]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sig.c
xnu-2782.20.48.tar.gz
[apple/xnu.git] / bsd / kern / kern_sig.c
1 /*
2 * Copyright (c) 1995-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1989, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
66 */
67 /*
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
71 * Version 2.0.
72 */
73
74 #define SIGPROP /* include signal properties table */
75 #include <sys/param.h>
76 #include <sys/resourcevar.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/timeb.h>
81 #include <sys/times.h>
82 #include <sys/acct.h>
83 #include <sys/file_internal.h>
84 #include <sys/kernel.h>
85 #include <sys/wait.h>
86 #include <sys/signalvar.h>
87 #include <sys/syslog.h>
88 #include <sys/stat.h>
89 #include <sys/lock.h>
90 #include <sys/kdebug.h>
91
92 #include <sys/mount.h>
93 #include <sys/sysproto.h>
94
95 #include <security/audit/audit.h>
96
97 #include <machine/spl.h>
98
99 #include <kern/cpu_number.h>
100
101 #include <sys/vm.h>
102 #include <sys/user.h> /* for coredump */
103 #include <kern/ast.h> /* for APC support */
104 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
105 #include <kern/thread.h>
106 #include <kern/sched_prim.h>
107 #include <kern/thread_call.h>
108 #include <mach/exception.h>
109 #include <mach/task.h>
110 #include <mach/thread_act.h>
111 #include <libkern/OSAtomic.h>
112
113 #include <sys/sdt.h>
114
115 /*
116 * Missing prototypes that Mach should export
117 *
118 * +++
119 */
120 extern int thread_enable_fpe(thread_t act, int onoff);
121 extern thread_t port_name_to_thread(mach_port_name_t port_name);
122 extern kern_return_t get_signalact(task_t , thread_t *, int);
123 extern unsigned int get_useraddr(void);
124 extern kern_return_t task_suspend_internal(task_t);
125 extern kern_return_t task_resume_internal(task_t);
126
127 /*
128 * ---
129 */
130
131 extern void doexception(int exc, mach_exception_code_t code,
132 mach_exception_subcode_t sub);
133
134 static void stop(proc_t, proc_t);
135 int cansignal(proc_t, kauth_cred_t, proc_t, int, int);
136 int killpg1(proc_t, int, int, int, int);
137 int setsigvec(proc_t, thread_t, int, struct __kern_sigaction *, boolean_t in_sigstart);
138 static void psignal_uthread(thread_t, int);
139 kern_return_t do_bsdexception(int, int, int);
140 void __posix_sem_syscall_return(kern_return_t);
141
142 /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */
143 kern_return_t semaphore_timedwait_signal_trap_internal(mach_port_name_t, mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t));
144 kern_return_t semaphore_timedwait_trap_internal(mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t));
145 kern_return_t semaphore_wait_signal_trap_internal(mach_port_name_t, mach_port_name_t, void (*)(kern_return_t));
146 kern_return_t semaphore_wait_trap_internal(mach_port_name_t, void (*)(kern_return_t));
147
148 static int filt_sigattach(struct knote *kn);
149 static void filt_sigdetach(struct knote *kn);
150 static int filt_signal(struct knote *kn, long hint);
151 static void filt_signaltouch(struct knote *kn, struct kevent64_s *kev,
152 long type);
153
154 struct filterops sig_filtops = {
155 .f_attach = filt_sigattach,
156 .f_detach = filt_sigdetach,
157 .f_event = filt_signal,
158 .f_touch = filt_signaltouch,
159 };
160
161 /* structures and fns for killpg1 iterartion callback and filters */
162 struct killpg1_filtargs {
163 int posix;
164 proc_t cp;
165 };
166
167 struct killpg1_iterargs {
168 proc_t cp;
169 kauth_cred_t uc;
170 int signum;
171 int * nfoundp;
172 int zombie;
173 };
174
175 static int killpg1_filt(proc_t p, void * arg);
176 static int killpg1_pgrpfilt(proc_t p, __unused void * arg);
177 static int killpg1_callback(proc_t p, void * arg);
178
179 static int pgsignal_filt(proc_t p, void * arg);
180 static int pgsignal_callback(proc_t p, void * arg);
181 static kern_return_t get_signalthread(proc_t, int, thread_t *);
182
183
184 /* flags for psignal_internal */
185 #define PSIG_LOCKED 0x1
186 #define PSIG_VFORK 0x2
187 #define PSIG_THREAD 0x4
188
189
190 static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum);
191
192 /*
193 * NOTE: Source and target may *NOT* overlap! (target is smaller)
194 */
195 static void
196 sigaltstack_kern_to_user32(struct kern_sigaltstack *in, struct user32_sigaltstack *out)
197 {
198 out->ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp);
199 out->ss_size = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size);
200 out->ss_flags = in->ss_flags;
201 }
202
203 static void
204 sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstack *out)
205 {
206 out->ss_sp = in->ss_sp;
207 out->ss_size = in->ss_size;
208 out->ss_flags = in->ss_flags;
209 }
210
211 /*
212 * NOTE: Source and target may are permitted to overlap! (source is smaller);
213 * this works because we copy fields in order from the end of the struct to
214 * the beginning.
215 */
216 static void
217 sigaltstack_user32_to_kern(struct user32_sigaltstack *in, struct kern_sigaltstack *out)
218 {
219 out->ss_flags = in->ss_flags;
220 out->ss_size = in->ss_size;
221 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
222 }
223 static void
224 sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out)
225 {
226 out->ss_flags = in->ss_flags;
227 out->ss_size = in->ss_size;
228 out->ss_sp = in->ss_sp;
229 }
230
231 static void
232 sigaction_kern_to_user32(struct kern_sigaction *in, struct user32_sigaction *out)
233 {
234 /* This assumes 32 bit __sa_handler is of type sig_t */
235 out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t,in->__sigaction_u.__sa_handler);
236 out->sa_mask = in->sa_mask;
237 out->sa_flags = in->sa_flags;
238 }
239 static void
240 sigaction_kern_to_user64(struct kern_sigaction *in, struct user64_sigaction *out)
241 {
242 /* This assumes 32 bit __sa_handler is of type sig_t */
243 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
244 out->sa_mask = in->sa_mask;
245 out->sa_flags = in->sa_flags;
246 }
247
248 static void
249 __sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigaction *out)
250 {
251 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
252 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
253 out->sa_mask = in->sa_mask;
254 out->sa_flags = in->sa_flags;
255 }
256
257 static void
258 __sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigaction *out)
259 {
260 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
261 out->sa_tramp = in->sa_tramp;
262 out->sa_mask = in->sa_mask;
263 out->sa_flags = in->sa_flags;
264 }
265
266 #if SIGNAL_DEBUG
267 void ram_printf(int);
268 int ram_debug=0;
269 unsigned int rdebug_proc=0;
270 void
271 ram_printf(int x)
272 {
273 printf("x is %d",x);
274
275 }
276 #endif /* SIGNAL_DEBUG */
277
278
279 void
280 signal_setast(thread_t sig_actthread)
281 {
282 act_set_astbsd(sig_actthread);
283 }
284
285 /*
286 * Can process p, with ucred uc, send the signal signum to process q?
287 * uc is refcounted by the caller so internal fileds can be used safely
288 * when called with zombie arg, list lock is held
289 */
290 int
291 cansignal(proc_t p, kauth_cred_t uc, proc_t q, int signum, int zombie)
292 {
293 kauth_cred_t my_cred;
294 struct session * p_sessp = SESSION_NULL;
295 struct session * q_sessp = SESSION_NULL;
296 #if CONFIG_MACF
297 int error;
298
299 error = mac_proc_check_signal(p, q, signum);
300 if (error)
301 return (0);
302 #endif
303
304 /* you can signal yourself */
305 if (p == q)
306 return(1);
307
308 if (!suser(uc, NULL))
309 return (1); /* root can always signal */
310
311 if (zombie == 0)
312 proc_list_lock();
313 if (p->p_pgrp != PGRP_NULL)
314 p_sessp = p->p_pgrp->pg_session;
315 if (q->p_pgrp != PGRP_NULL)
316 q_sessp = q->p_pgrp->pg_session;
317
318 if (signum == SIGCONT && q_sessp == p_sessp) {
319 if (zombie == 0)
320 proc_list_unlock();
321 return (1); /* SIGCONT in session */
322 }
323
324 if (zombie == 0)
325 proc_list_unlock();
326
327 /*
328 * If the real or effective UID of the sender matches the real
329 * or saved UID of the target, permit the signal to
330 * be sent.
331 */
332 if (zombie == 0)
333 my_cred = kauth_cred_proc_ref(q);
334 else
335 my_cred = proc_ucred(q);
336
337 if (kauth_cred_getruid(uc) == kauth_cred_getruid(my_cred) ||
338 kauth_cred_getruid(uc) == kauth_cred_getsvuid(my_cred) ||
339 kauth_cred_getuid(uc) == kauth_cred_getruid(my_cred) ||
340 kauth_cred_getuid(uc) == kauth_cred_getsvuid(my_cred)) {
341 if (zombie == 0)
342 kauth_cred_unref(&my_cred);
343 return (1);
344 }
345
346 if (zombie == 0)
347 kauth_cred_unref(&my_cred);
348
349 return (0);
350 }
351
352
353 /*
354 * Returns: 0 Success
355 * EINVAL
356 * copyout:EFAULT
357 * copyin:EFAULT
358 *
359 * Notes: Uses current thread as a parameter to inform PPC to enable
360 * FPU exceptions via setsigvec(); this operation is not proxy
361 * safe!
362 */
363 /* ARGSUSED */
364 int
365 sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval)
366 {
367 struct kern_sigaction vec;
368 struct __kern_sigaction __vec;
369
370 struct kern_sigaction *sa = &vec;
371 struct sigacts *ps = p->p_sigacts;
372
373 int signum;
374 int bit, error=0;
375
376 signum = uap->signum;
377 if (signum <= 0 || signum >= NSIG ||
378 signum == SIGKILL || signum == SIGSTOP)
379 return (EINVAL);
380
381 if (uap->osa) {
382 sa->sa_handler = ps->ps_sigact[signum];
383 sa->sa_mask = ps->ps_catchmask[signum];
384 bit = sigmask(signum);
385 sa->sa_flags = 0;
386 if ((ps->ps_sigonstack & bit) != 0)
387 sa->sa_flags |= SA_ONSTACK;
388 if ((ps->ps_sigintr & bit) == 0)
389 sa->sa_flags |= SA_RESTART;
390 if (ps->ps_siginfo & bit)
391 sa->sa_flags |= SA_SIGINFO;
392 if (ps->ps_signodefer & bit)
393 sa->sa_flags |= SA_NODEFER;
394 if (ps->ps_64regset & bit)
395 sa->sa_flags |= SA_64REGSET;
396 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP))
397 sa->sa_flags |= SA_NOCLDSTOP;
398 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT))
399 sa->sa_flags |= SA_NOCLDWAIT;
400
401 if (IS_64BIT_PROCESS(p)) {
402 struct user64_sigaction vec64;
403
404 sigaction_kern_to_user64(sa, &vec64);
405 error = copyout(&vec64, uap->osa, sizeof(vec64));
406 } else {
407 struct user32_sigaction vec32;
408
409 sigaction_kern_to_user32(sa, &vec32);
410 error = copyout(&vec32, uap->osa, sizeof(vec32));
411 }
412 if (error)
413 return (error);
414 }
415 if (uap->nsa) {
416 if (IS_64BIT_PROCESS(p)) {
417 struct __user64_sigaction __vec64;
418
419 error = copyin(uap->nsa, &__vec64, sizeof(__vec64));
420 __sigaction_user64_to_kern(&__vec64, &__vec);
421 } else {
422 struct __user32_sigaction __vec32;
423
424 error = copyin(uap->nsa, &__vec32, sizeof(__vec32));
425 __sigaction_user32_to_kern(&__vec32, &__vec);
426 }
427 if (error)
428 return (error);
429 __vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */
430 error = setsigvec(p, current_thread(), signum, &__vec, FALSE);
431 }
432 return (error);
433 }
434
435 /* Routines to manipulate bits on all threads */
436 int
437 clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart)
438 {
439 struct uthread * uth;
440 thread_t thact;
441
442 proc_lock(p);
443 if (!in_signalstart)
444 proc_signalstart(p, 1);
445
446 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
447 thact = p->p_vforkact;
448 uth = (struct uthread *)get_bsdthread_info(thact);
449 if (uth) {
450 uth->uu_siglist &= ~bit;
451 }
452 if (!in_signalstart)
453 proc_signalend(p, 1);
454 proc_unlock(p);
455 return(0);
456 }
457
458 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
459 uth->uu_siglist &= ~bit;
460 }
461 p->p_siglist &= ~bit;
462 if (!in_signalstart)
463 proc_signalend(p, 1);
464 proc_unlock(p);
465
466 return(0);
467 }
468
469
470 static int
471 unblock_procsigmask(proc_t p, int bit)
472 {
473 struct uthread * uth;
474 thread_t thact;
475
476 proc_lock(p);
477 proc_signalstart(p, 1);
478
479 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
480 thact = p->p_vforkact;
481 uth = (struct uthread *)get_bsdthread_info(thact);
482 if (uth) {
483 uth->uu_sigmask &= ~bit;
484 }
485 p->p_sigmask &= ~bit;
486 proc_signalend(p, 1);
487 proc_unlock(p);
488 return(0);
489 }
490 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
491 uth->uu_sigmask &= ~bit;
492 }
493 p->p_sigmask &= ~bit;
494
495 proc_signalend(p, 1);
496 proc_unlock(p);
497 return(0);
498 }
499
500 static int
501 block_procsigmask(proc_t p, int bit)
502 {
503 struct uthread * uth;
504 thread_t thact;
505
506 proc_lock(p);
507 proc_signalstart(p, 1);
508
509 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
510 thact = p->p_vforkact;
511 uth = (struct uthread *)get_bsdthread_info(thact);
512 if (uth) {
513 uth->uu_sigmask |= bit;
514 }
515 p->p_sigmask |= bit;
516 proc_signalend(p, 1);
517 proc_unlock(p);
518 return(0);
519 }
520 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
521 uth->uu_sigmask |= bit;
522 }
523 p->p_sigmask |= bit;
524
525 proc_signalend(p, 1);
526 proc_unlock(p);
527 return(0);
528 }
529
530 int
531 set_procsigmask(proc_t p, int bit)
532 {
533 struct uthread * uth;
534 thread_t thact;
535
536 proc_lock(p);
537 proc_signalstart(p, 1);
538
539 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
540 thact = p->p_vforkact;
541 uth = (struct uthread *)get_bsdthread_info(thact);
542 if (uth) {
543 uth->uu_sigmask = bit;
544 }
545 p->p_sigmask = bit;
546 proc_signalend(p, 1);
547 proc_unlock(p);
548 return(0);
549 }
550 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
551 uth->uu_sigmask = bit;
552 }
553 p->p_sigmask = bit;
554 proc_signalend(p, 1);
555 proc_unlock(p);
556
557 return(0);
558 }
559
560 /* XXX should be static? */
561 /*
562 * Notes: The thread parameter is used in the PPC case to select the
563 * thread on which the floating point exception will be enabled
564 * or disabled. We can't simply take current_thread(), since
565 * this is called from posix_spawn() on the not currently running
566 * process/thread pair.
567 *
568 * We mark thread as unused to alow compilation without warning
569 * on non-PPC platforms.
570 */
571 int
572 setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigaction *sa, boolean_t in_sigstart)
573 {
574 struct sigacts *ps = p->p_sigacts;
575 int bit;
576
577 if ((signum == SIGKILL || signum == SIGSTOP) &&
578 sa->sa_handler != SIG_DFL)
579 return(EINVAL);
580 bit = sigmask(signum);
581 /*
582 * Change setting atomically.
583 */
584 ps->ps_sigact[signum] = sa->sa_handler;
585 ps->ps_trampact[signum] = sa->sa_tramp;
586 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
587 if (sa->sa_flags & SA_SIGINFO)
588 ps->ps_siginfo |= bit;
589 else
590 ps->ps_siginfo &= ~bit;
591 if (sa->sa_flags & SA_64REGSET)
592 ps->ps_64regset |= bit;
593 else
594 ps->ps_64regset &= ~bit;
595 if ((sa->sa_flags & SA_RESTART) == 0)
596 ps->ps_sigintr |= bit;
597 else
598 ps->ps_sigintr &= ~bit;
599 if (sa->sa_flags & SA_ONSTACK)
600 ps->ps_sigonstack |= bit;
601 else
602 ps->ps_sigonstack &= ~bit;
603 if (sa->sa_flags & SA_USERTRAMP)
604 ps->ps_usertramp |= bit;
605 else
606 ps->ps_usertramp &= ~bit;
607 if (sa->sa_flags & SA_RESETHAND)
608 ps->ps_sigreset |= bit;
609 else
610 ps->ps_sigreset &= ~bit;
611 if (sa->sa_flags & SA_NODEFER)
612 ps->ps_signodefer |= bit;
613 else
614 ps->ps_signodefer &= ~bit;
615 if (signum == SIGCHLD) {
616 if (sa->sa_flags & SA_NOCLDSTOP)
617 OSBitOrAtomic(P_NOCLDSTOP, &p->p_flag);
618 else
619 OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), &p->p_flag);
620 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN))
621 OSBitOrAtomic(P_NOCLDWAIT, &p->p_flag);
622 else
623 OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), &p->p_flag);
624 }
625
626 /*
627 * Set bit in p_sigignore for signals that are set to SIG_IGN,
628 * and for signals set to SIG_DFL where the default is to ignore.
629 * However, don't put SIGCONT in p_sigignore,
630 * as we have to restart the process.
631 */
632 if (sa->sa_handler == SIG_IGN ||
633 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
634
635 clear_procsiglist(p, bit, in_sigstart);
636 if (signum != SIGCONT)
637 p->p_sigignore |= bit; /* easier in psignal */
638 p->p_sigcatch &= ~bit;
639 } else {
640 p->p_sigignore &= ~bit;
641 if (sa->sa_handler == SIG_DFL)
642 p->p_sigcatch &= ~bit;
643 else
644 p->p_sigcatch |= bit;
645 }
646 return(0);
647 }
648
649 /*
650 * Initialize signal state for process 0;
651 * set to ignore signals that are ignored by default.
652 */
653 void
654 siginit(proc_t p)
655 {
656 int i;
657
658 for (i = 1; i < NSIG; i++)
659 if (sigprop[i] & SA_IGNORE && i != SIGCONT)
660 p->p_sigignore |= sigmask(i);
661 }
662
663 /*
664 * Reset signals for an exec of the specified process.
665 */
666 void
667 execsigs(proc_t p, thread_t thread)
668 {
669 struct sigacts *ps = p->p_sigacts;
670 int nc, mask;
671 struct uthread *ut;
672
673 ut = (struct uthread *)get_bsdthread_info(thread);
674
675 /*
676 * transfer saved signal states from the process
677 * back to the current thread.
678 *
679 * NOTE: We do this without the process locked,
680 * because we are guaranteed to be single-threaded
681 * by this point in exec and the p_siglist is
682 * only accessed by threads inside the process.
683 */
684 ut->uu_siglist |= p->p_siglist;
685 p->p_siglist = 0;
686
687 /*
688 * Reset caught signals. Held signals remain held
689 * through p_sigmask (unless they were caught,
690 * and are now ignored by default).
691 */
692 while (p->p_sigcatch) {
693 nc = ffs((long)p->p_sigcatch);
694 mask = sigmask(nc);
695 p->p_sigcatch &= ~mask;
696 if (sigprop[nc] & SA_IGNORE) {
697 if (nc != SIGCONT)
698 p->p_sigignore |= mask;
699 ut->uu_siglist &= ~mask;
700 }
701 ps->ps_sigact[nc] = SIG_DFL;
702 }
703
704 /*
705 * Reset stack state to the user stack.
706 * Clear set of signals caught on the signal stack.
707 */
708 /* thread */
709 ut->uu_sigstk.ss_flags = SA_DISABLE;
710 ut->uu_sigstk.ss_size = 0;
711 ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
712 ut->uu_flag &= ~UT_ALTSTACK;
713 /* process */
714 ps->ps_sigonstack = 0;
715 }
716
717 /*
718 * Manipulate signal mask.
719 * Note that we receive new mask, not pointer,
720 * and return old mask as return value;
721 * the library stub does the rest.
722 */
723 int
724 sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval)
725 {
726 int error = 0;
727 sigset_t oldmask, nmask;
728 user_addr_t omask = uap->omask;
729 struct uthread *ut;
730
731 ut = (struct uthread *)get_bsdthread_info(current_thread());
732 oldmask = ut->uu_sigmask;
733
734 if (uap->mask == USER_ADDR_NULL) {
735 /* just want old mask */
736 goto out;
737 }
738 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
739 if (error)
740 goto out;
741
742 switch (uap->how) {
743 case SIG_BLOCK:
744 block_procsigmask(p, (nmask & ~sigcantmask));
745 signal_setast(current_thread());
746 break;
747
748 case SIG_UNBLOCK:
749 unblock_procsigmask(p, (nmask & ~sigcantmask));
750 signal_setast(current_thread());
751 break;
752
753 case SIG_SETMASK:
754 set_procsigmask(p, (nmask & ~sigcantmask));
755 signal_setast(current_thread());
756 break;
757
758 default:
759 error = EINVAL;
760 break;
761 }
762 out:
763 if (!error && omask != USER_ADDR_NULL)
764 copyout(&oldmask, omask, sizeof(sigset_t));
765 return (error);
766 }
767
768 int
769 sigpending(__unused proc_t p, struct sigpending_args *uap, __unused int32_t *retval)
770 {
771 struct uthread *ut;
772 sigset_t pendlist;
773
774 ut = (struct uthread *)get_bsdthread_info(current_thread());
775 pendlist = ut->uu_siglist;
776
777 if (uap->osv)
778 copyout(&pendlist, uap->osv, sizeof(sigset_t));
779 return(0);
780 }
781
782 /*
783 * Suspend process until signal, providing mask to be set
784 * in the meantime. Note nonstandard calling convention:
785 * libc stub passes mask, not pointer, to save a copyin.
786 */
787
788 static int
789 sigcontinue(__unused int error)
790 {
791 // struct uthread *ut = get_bsdthread_info(current_thread());
792 unix_syscall_return(EINTR);
793 }
794
795 int
796 sigsuspend(proc_t p, struct sigsuspend_args *uap, int32_t *retval)
797 {
798 __pthread_testcancel(1);
799 return(sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval));
800 }
801
802 int
803 sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused int32_t *retval)
804 {
805 struct uthread *ut;
806
807 ut = (struct uthread *)get_bsdthread_info(current_thread());
808
809 /*
810 * When returning from sigpause, we want
811 * the old mask to be restored after the
812 * signal handler has finished. Thus, we
813 * save it here and mark the sigacts structure
814 * to indicate this.
815 */
816 ut->uu_oldmask = ut->uu_sigmask;
817 ut->uu_flag |= UT_SAS_OLDMASK;
818 ut->uu_sigmask = (uap->mask & ~sigcantmask);
819 (void) tsleep0((caddr_t) p, PPAUSE|PCATCH, "pause", 0, sigcontinue);
820 /* always return EINTR rather than ERESTART... */
821 return (EINTR);
822 }
823
824
825 int
826 __disable_threadsignal(__unused proc_t p,
827 __unused struct __disable_threadsignal_args *uap,
828 __unused int32_t *retval)
829 {
830 struct uthread *uth;
831
832 uth = (struct uthread *)get_bsdthread_info(current_thread());
833
834 /* No longer valid to have any signal delivered */
835 uth->uu_flag |= (UT_NO_SIGMASK | UT_CANCELDISABLE);
836
837 return(0);
838
839 }
840
841 void
842 __pthread_testcancel(int presyscall)
843 {
844
845 thread_t self = current_thread();
846 struct uthread * uthread;
847
848 uthread = (struct uthread *)get_bsdthread_info(self);
849
850
851 uthread->uu_flag &= ~UT_NOTCANCELPT;
852
853 if ((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
854 if(presyscall != 0) {
855 unix_syscall_return(EINTR);
856 /* NOTREACHED */
857 } else
858 thread_abort_safely(self);
859 }
860 }
861
862
863
864 int
865 __pthread_markcancel(__unused proc_t p,
866 struct __pthread_markcancel_args *uap, __unused int32_t *retval)
867 {
868 thread_act_t target_act;
869 int error = 0;
870 struct uthread *uth;
871
872 target_act = (thread_act_t)port_name_to_thread(uap->thread_port);
873
874 if (target_act == THR_ACT_NULL)
875 return (ESRCH);
876
877 uth = (struct uthread *)get_bsdthread_info(target_act);
878
879 /* if the thread is in vfork do not cancel */
880 if ((uth->uu_flag & (UT_VFORK | UT_CANCEL | UT_CANCELED )) == 0) {
881 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
882 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
883 && ((uth->uu_flag & UT_CANCELDISABLE) == 0))
884 thread_abort_safely(target_act);
885 }
886
887 thread_deallocate(target_act);
888 return (error);
889 }
890
891 /* if action =0 ; return the cancellation state ,
892 * if marked for cancellation, make the thread canceled
893 * if action = 1 ; Enable the cancel handling
894 * if action = 2; Disable the cancel handling
895 */
896 int
897 __pthread_canceled(__unused proc_t p,
898 struct __pthread_canceled_args *uap, __unused int32_t *retval)
899 {
900 thread_act_t thread;
901 struct uthread *uth;
902 int action = uap->action;
903
904 thread = current_thread();
905 uth = (struct uthread *)get_bsdthread_info(thread);
906
907 switch (action) {
908 case 1:
909 uth->uu_flag &= ~UT_CANCELDISABLE;
910 return(0);
911 case 2:
912 uth->uu_flag |= UT_CANCELDISABLE;
913 return(0);
914 case 0:
915 default:
916 /* if the thread is in vfork do not cancel */
917 if((uth->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
918 uth->uu_flag &= ~UT_CANCEL;
919 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
920 return(0);
921 }
922 return(EINVAL);
923 }
924 return(EINVAL);
925 }
926
927 void
928 __posix_sem_syscall_return(kern_return_t kern_result)
929 {
930 int error = 0;
931
932 if (kern_result == KERN_SUCCESS)
933 error = 0;
934 else if (kern_result == KERN_ABORTED)
935 error = EINTR;
936 else if (kern_result == KERN_OPERATION_TIMED_OUT)
937 error = ETIMEDOUT;
938 else
939 error = EINVAL;
940 unix_syscall_return(error);
941 /* does not return */
942 }
943
944 #if OLD_SEMWAIT_SIGNAL
945 /*
946 * Returns: 0 Success
947 * EINTR
948 * ETIMEDOUT
949 * EINVAL
950 * EFAULT if timespec is NULL
951 */
952 int
953 __old_semwait_signal(proc_t p, struct __old_semwait_signal_args *uap,
954 int32_t *retval)
955 {
956 __pthread_testcancel(0);
957 return(__old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval));
958 }
959
960 int
961 __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_args *uap,
962 __unused int32_t *retval)
963 {
964
965 kern_return_t kern_result;
966 int error;
967 mach_timespec_t then;
968 struct timespec now;
969 struct user_timespec ts;
970 boolean_t truncated_timeout = FALSE;
971
972 if(uap->timeout) {
973
974 if (IS_64BIT_PROCESS(p)) {
975 struct user64_timespec ts64;
976 error = copyin(uap->ts, &ts64, sizeof(ts64));
977 ts.tv_sec = ts64.tv_sec;
978 ts.tv_nsec = ts64.tv_nsec;
979 } else {
980 struct user32_timespec ts32;
981 error = copyin(uap->ts, &ts32, sizeof(ts32));
982 ts.tv_sec = ts32.tv_sec;
983 ts.tv_nsec = ts32.tv_nsec;
984 }
985
986 if (error) {
987 return error;
988 }
989
990 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
991 ts.tv_sec = 0xFFFFFFFF;
992 ts.tv_nsec = 0;
993 truncated_timeout = TRUE;
994 }
995
996 if (uap->relative) {
997 then.tv_sec = ts.tv_sec;
998 then.tv_nsec = ts.tv_nsec;
999 } else {
1000 nanotime(&now);
1001
1002 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1003 if (now.tv_sec == ts.tv_sec ?
1004 now.tv_nsec > ts.tv_nsec :
1005 now.tv_sec > ts.tv_sec) {
1006 then.tv_sec = 0;
1007 then.tv_nsec = 0;
1008 } else {
1009 then.tv_sec = ts.tv_sec - now.tv_sec;
1010 then.tv_nsec = ts.tv_nsec - now.tv_nsec;
1011 if (then.tv_nsec < 0) {
1012 then.tv_nsec += NSEC_PER_SEC;
1013 then.tv_sec--;
1014 }
1015 }
1016 }
1017
1018 if (uap->mutex_sem == 0)
1019 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1020 else
1021 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1022
1023 } else {
1024
1025 if (uap->mutex_sem == 0)
1026 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1027 else
1028
1029 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1030 }
1031
1032 if (kern_result == KERN_SUCCESS && !truncated_timeout)
1033 return(0);
1034 else if (kern_result == KERN_SUCCESS && truncated_timeout)
1035 return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1036 else if (kern_result == KERN_ABORTED)
1037 return(EINTR);
1038 else if (kern_result == KERN_OPERATION_TIMED_OUT)
1039 return(ETIMEDOUT);
1040 else
1041 return(EINVAL);
1042 }
1043 #endif /* OLD_SEMWAIT_SIGNAL*/
1044
1045 /*
1046 * Returns: 0 Success
1047 * EINTR
1048 * ETIMEDOUT
1049 * EINVAL
1050 * EFAULT if timespec is NULL
1051 */
1052 int
1053 __semwait_signal(proc_t p, struct __semwait_signal_args *uap,
1054 int32_t *retval)
1055 {
1056 __pthread_testcancel(0);
1057 return(__semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval));
1058 }
1059
1060 int
1061 __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap,
1062 __unused int32_t *retval)
1063 {
1064
1065 kern_return_t kern_result;
1066 mach_timespec_t then;
1067 struct timespec now;
1068 struct user_timespec ts;
1069 boolean_t truncated_timeout = FALSE;
1070
1071 if(uap->timeout) {
1072
1073 ts.tv_sec = uap->tv_sec;
1074 ts.tv_nsec = uap->tv_nsec;
1075
1076 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1077 ts.tv_sec = 0xFFFFFFFF;
1078 ts.tv_nsec = 0;
1079 truncated_timeout = TRUE;
1080 }
1081
1082 if (uap->relative) {
1083 then.tv_sec = ts.tv_sec;
1084 then.tv_nsec = ts.tv_nsec;
1085 } else {
1086 nanotime(&now);
1087
1088 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1089 if (now.tv_sec == ts.tv_sec ?
1090 now.tv_nsec > ts.tv_nsec :
1091 now.tv_sec > ts.tv_sec) {
1092 then.tv_sec = 0;
1093 then.tv_nsec = 0;
1094 } else {
1095 then.tv_sec = ts.tv_sec - now.tv_sec;
1096 then.tv_nsec = ts.tv_nsec - now.tv_nsec;
1097 if (then.tv_nsec < 0) {
1098 then.tv_nsec += NSEC_PER_SEC;
1099 then.tv_sec--;
1100 }
1101 }
1102 }
1103
1104 if (uap->mutex_sem == 0)
1105 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1106 else
1107 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1108
1109 } else {
1110
1111 if (uap->mutex_sem == 0)
1112 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1113 else
1114
1115 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1116 }
1117
1118 if (kern_result == KERN_SUCCESS && !truncated_timeout)
1119 return(0);
1120 else if (kern_result == KERN_SUCCESS && truncated_timeout)
1121 return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1122 else if (kern_result == KERN_ABORTED)
1123 return(EINTR);
1124 else if (kern_result == KERN_OPERATION_TIMED_OUT)
1125 return(ETIMEDOUT);
1126 else
1127 return(EINVAL);
1128 }
1129
1130
1131 int
1132 __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap,
1133 __unused int32_t *retval)
1134 {
1135 thread_t target_act;
1136 int error = 0;
1137 int signum = uap->sig;
1138 struct uthread *uth;
1139
1140 target_act = (thread_t)port_name_to_thread(uap->thread_port);
1141
1142 if (target_act == THREAD_NULL)
1143 return (ESRCH);
1144 if ((u_int)signum >= NSIG) {
1145 error = EINVAL;
1146 goto out;
1147 }
1148
1149 uth = (struct uthread *)get_bsdthread_info(target_act);
1150
1151 if (uth->uu_flag & UT_NO_SIGMASK) {
1152 error = ESRCH;
1153 goto out;
1154 }
1155
1156 if (signum)
1157 psignal_uthread(target_act, signum);
1158 out:
1159 thread_deallocate(target_act);
1160 return (error);
1161 }
1162
1163
1164 int
1165 __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap,
1166 __unused int32_t *retval)
1167 {
1168 user_addr_t set = uap->set;
1169 user_addr_t oset = uap->oset;
1170 sigset_t nset;
1171 int error = 0;
1172 struct uthread *ut;
1173 sigset_t oldset;
1174
1175 ut = (struct uthread *)get_bsdthread_info(current_thread());
1176 oldset = ut->uu_sigmask;
1177
1178 if (set == USER_ADDR_NULL) {
1179 /* need only old mask */
1180 goto out;
1181 }
1182
1183 error = copyin(set, &nset, sizeof(sigset_t));
1184 if (error)
1185 goto out;
1186
1187 switch (uap->how) {
1188 case SIG_BLOCK:
1189 ut->uu_sigmask |= (nset & ~sigcantmask);
1190 break;
1191
1192 case SIG_UNBLOCK:
1193 ut->uu_sigmask &= ~(nset);
1194 signal_setast(current_thread());
1195 break;
1196
1197 case SIG_SETMASK:
1198 ut->uu_sigmask = (nset & ~sigcantmask);
1199 signal_setast(current_thread());
1200 break;
1201
1202 default:
1203 error = EINVAL;
1204
1205 }
1206 out:
1207 if (!error && oset != USER_ADDR_NULL)
1208 copyout(&oldset, oset, sizeof(sigset_t));
1209
1210 return(error);
1211 }
1212
1213 /*
1214 * Returns: 0 Success
1215 * EINVAL
1216 * copyin:EFAULT
1217 * copyout:EFAULT
1218 */
1219 int
1220 __sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval)
1221 {
1222 __pthread_testcancel(1);
1223 return(__sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval));
1224 }
1225
1226 int
1227 __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval)
1228 {
1229 struct uthread *ut;
1230 struct uthread *uth;
1231 int error = 0;
1232 sigset_t mask;
1233 sigset_t siglist;
1234 sigset_t sigw=0;
1235 int signum;
1236
1237 ut = (struct uthread *)get_bsdthread_info(current_thread());
1238
1239 if (uap->set == USER_ADDR_NULL)
1240 return(EINVAL);
1241
1242 error = copyin(uap->set, &mask, sizeof(sigset_t));
1243 if (error)
1244 return(error);
1245
1246 siglist = (mask & ~sigcantmask);
1247
1248 if (siglist == 0)
1249 return(EINVAL);
1250
1251 proc_lock(p);
1252 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
1253 proc_unlock(p);
1254 return(EINVAL);
1255 } else {
1256 proc_signalstart(p, 1);
1257 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1258 if ( (sigw = uth->uu_siglist & siglist) ) {
1259 break;
1260 }
1261 }
1262 proc_signalend(p, 1);
1263 }
1264
1265 if (sigw) {
1266 /* The signal was pending on a thread */
1267 goto sigwait1;
1268 }
1269 /*
1270 * When returning from sigwait, we want
1271 * the old mask to be restored after the
1272 * signal handler has finished. Thus, we
1273 * save it here and mark the sigacts structure
1274 * to indicate this.
1275 */
1276 uth = ut; /* wait for it to be delivered to us */
1277 ut->uu_oldmask = ut->uu_sigmask;
1278 ut->uu_flag |= UT_SAS_OLDMASK;
1279 if (siglist == (sigset_t)0) {
1280 proc_unlock(p);
1281 return(EINVAL);
1282 }
1283 /* SIGKILL and SIGSTOP are not maskable as well */
1284 ut->uu_sigmask = ~(siglist|sigcantmask);
1285 ut->uu_sigwait = siglist;
1286
1287 /* No Continuations for now */
1288 error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE|PCATCH, "pause", 0);
1289
1290 if (error == ERESTART)
1291 error = 0;
1292
1293 sigw = (ut->uu_sigwait & siglist);
1294 ut->uu_sigmask = ut->uu_oldmask;
1295 ut->uu_oldmask = 0;
1296 ut->uu_flag &= ~UT_SAS_OLDMASK;
1297 sigwait1:
1298 ut->uu_sigwait = 0;
1299 if (!error) {
1300 signum = ffs((unsigned int)sigw);
1301 if (!signum)
1302 panic("sigwait with no signal wakeup");
1303 /* Clear the pending signal in the thread it was delivered */
1304 uth->uu_siglist &= ~(sigmask(signum));
1305
1306 #if CONFIG_DTRACE
1307 DTRACE_PROC2(signal__clear, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo));
1308 #endif
1309
1310 proc_unlock(p);
1311 if (uap->sig != USER_ADDR_NULL)
1312 error = copyout(&signum, uap->sig, sizeof(int));
1313 } else
1314 proc_unlock(p);
1315
1316 return(error);
1317
1318 }
1319
1320 int
1321 sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *retval)
1322 {
1323 struct kern_sigaltstack ss;
1324 struct kern_sigaltstack *pstk;
1325 int error;
1326 struct uthread *uth;
1327 int onstack;
1328
1329 uth = (struct uthread *)get_bsdthread_info(current_thread());
1330
1331 pstk = &uth->uu_sigstk;
1332 if ((uth->uu_flag & UT_ALTSTACK) == 0)
1333 uth->uu_sigstk.ss_flags |= SA_DISABLE;
1334 onstack = pstk->ss_flags & SA_ONSTACK;
1335 if (uap->oss) {
1336 if (IS_64BIT_PROCESS(p)) {
1337 struct user64_sigaltstack ss64;
1338 sigaltstack_kern_to_user64(pstk, &ss64);
1339 error = copyout(&ss64, uap->oss, sizeof(ss64));
1340 } else {
1341 struct user32_sigaltstack ss32;
1342 sigaltstack_kern_to_user32(pstk, &ss32);
1343 error = copyout(&ss32, uap->oss, sizeof(ss32));
1344 }
1345 if (error)
1346 return (error);
1347 }
1348 if (uap->nss == USER_ADDR_NULL)
1349 return (0);
1350 if (IS_64BIT_PROCESS(p)) {
1351 struct user64_sigaltstack ss64;
1352 error = copyin(uap->nss, &ss64, sizeof(ss64));
1353 sigaltstack_user64_to_kern(&ss64, &ss);
1354 } else {
1355 struct user32_sigaltstack ss32;
1356 error = copyin(uap->nss, &ss32, sizeof(ss32));
1357 sigaltstack_user32_to_kern(&ss32, &ss);
1358 }
1359 if (error)
1360 return (error);
1361 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1362 return(EINVAL);
1363 }
1364
1365 if (ss.ss_flags & SA_DISABLE) {
1366 /* if we are here we are not in the signal handler ;so no need to check */
1367 if (uth->uu_sigstk.ss_flags & SA_ONSTACK)
1368 return (EINVAL);
1369 uth->uu_flag &= ~UT_ALTSTACK;
1370 uth->uu_sigstk.ss_flags = ss.ss_flags;
1371 return (0);
1372 }
1373 if (onstack)
1374 return (EPERM);
1375 /* The older stacksize was 8K, enforce that one so no compat problems */
1376 #define OLDMINSIGSTKSZ 8*1024
1377 if (ss.ss_size < OLDMINSIGSTKSZ)
1378 return (ENOMEM);
1379 uth->uu_flag |= UT_ALTSTACK;
1380 uth->uu_sigstk= ss;
1381 return (0);
1382 }
1383
1384 int
1385 kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval)
1386 {
1387 proc_t p;
1388 kauth_cred_t uc = kauth_cred_get();
1389 int posix = uap->posix; /* !0 if posix behaviour desired */
1390
1391 AUDIT_ARG(pid, uap->pid);
1392 AUDIT_ARG(signum, uap->signum);
1393
1394 if ((u_int)uap->signum >= NSIG)
1395 return (EINVAL);
1396 if (uap->pid > 0) {
1397 /* kill single process */
1398 if ((p = proc_find(uap->pid)) == NULL) {
1399 if ((p = pzfind(uap->pid)) != NULL) {
1400 /*
1401 * IEEE Std 1003.1-2001: return success
1402 * when killing a zombie.
1403 */
1404 return (0);
1405 }
1406 return (ESRCH);
1407 }
1408 AUDIT_ARG(process, p);
1409 if (!cansignal(cp, uc, p, uap->signum, 0)) {
1410 proc_rele(p);
1411 return(EPERM);
1412 }
1413 if (uap->signum)
1414 psignal(p, uap->signum);
1415 proc_rele(p);
1416 return (0);
1417 }
1418 switch (uap->pid) {
1419 case -1: /* broadcast signal */
1420 return (killpg1(cp, uap->signum, 0, 1, posix));
1421 case 0: /* signal own process group */
1422 return (killpg1(cp, uap->signum, 0, 0, posix));
1423 default: /* negative explicit process group */
1424 return (killpg1(cp, uap->signum, -(uap->pid), 0, posix));
1425 }
1426 /* NOTREACHED */
1427 }
1428
1429 static int
1430 killpg1_filt(proc_t p, void * arg)
1431 {
1432 struct killpg1_filtargs * kfargp = (struct killpg1_filtargs *)arg;
1433 proc_t cp = kfargp->cp;
1434 int posix = kfargp->posix;
1435
1436
1437 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1438 (!posix && p == cp))
1439 return(0);
1440 else
1441 return(1);
1442 }
1443
1444
1445 static int
1446 killpg1_pgrpfilt(proc_t p, __unused void * arg)
1447 {
1448 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1449 (p->p_stat == SZOMB))
1450 return(0);
1451 else
1452 return(1);
1453 }
1454
1455
1456
1457 static int
1458 killpg1_callback(proc_t p, void * arg)
1459 {
1460 struct killpg1_iterargs * kargp = (struct killpg1_iterargs *)arg;
1461 proc_t cp = kargp->cp;
1462 kauth_cred_t uc = kargp->uc; /* refcounted by the caller safe to use internal fields */
1463 int signum = kargp->signum;
1464 int * nfoundp = kargp->nfoundp;
1465 int n;
1466 int zombie = 0;
1467 int error = 0;
1468
1469 if ((kargp->zombie != 0) && ((p->p_listflag & P_LIST_EXITED) == P_LIST_EXITED))
1470 zombie = 1;
1471
1472 if (zombie != 0) {
1473 proc_list_lock();
1474 error = cansignal(cp, uc, p, signum, zombie);
1475 proc_list_unlock();
1476
1477 if (error != 0 && nfoundp != NULL) {
1478 n = *nfoundp;
1479 *nfoundp = n+1;
1480 }
1481 } else {
1482 if (cansignal(cp, uc, p, signum, 0) == 0)
1483 return(PROC_RETURNED);
1484
1485 if (nfoundp != NULL) {
1486 n = *nfoundp;
1487 *nfoundp = n+1;
1488 }
1489 if (signum != 0)
1490 psignal(p, signum);
1491 }
1492
1493 return(PROC_RETURNED);
1494 }
1495
1496 /*
1497 * Common code for kill process group/broadcast kill.
1498 * cp is calling process.
1499 */
1500 int
1501 killpg1(proc_t cp, int signum, int pgid, int all, int posix)
1502 {
1503 kauth_cred_t uc;
1504 struct pgrp *pgrp;
1505 int nfound = 0;
1506 struct killpg1_iterargs karg;
1507 struct killpg1_filtargs kfarg;
1508 int error = 0;
1509
1510 uc = kauth_cred_proc_ref(cp);
1511 if (all) {
1512 /*
1513 * broadcast
1514 */
1515 kfarg.posix = posix;
1516 kfarg.cp = cp;
1517
1518 karg.cp = cp;
1519 karg.uc = uc;
1520 karg.nfoundp = &nfound;
1521 karg.signum = signum;
1522 karg.zombie = 1;
1523
1524 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST), killpg1_callback, &karg, killpg1_filt, (void *)&kfarg);
1525
1526 } else {
1527 if (pgid == 0) {
1528 /*
1529 * zero pgid means send to my process group.
1530 */
1531 pgrp = proc_pgrp(cp);
1532 } else {
1533 pgrp = pgfind(pgid);
1534 if (pgrp == NULL) {
1535 error = ESRCH;
1536 goto out;
1537 }
1538 }
1539
1540 karg.nfoundp = &nfound;
1541 karg.uc = uc;
1542 karg.signum = signum;
1543 karg.cp = cp;
1544 karg.zombie = 0;
1545
1546
1547 /* PGRP_DROPREF drops the pgrp refernce */
1548 pgrp_iterate(pgrp, PGRP_BLOCKITERATE | PGRP_DROPREF, killpg1_callback, &karg,
1549 killpg1_pgrpfilt, NULL);
1550 }
1551 error = (nfound ? 0 : (posix ? EPERM : ESRCH));
1552 out:
1553 kauth_cred_unref(&uc);
1554 return (error);
1555 }
1556
1557
1558 /*
1559 * Send a signal to a process group.
1560 */
1561 void
1562 gsignal(int pgid, int signum)
1563 {
1564 struct pgrp *pgrp;
1565
1566 if (pgid && (pgrp = pgfind(pgid))) {
1567 pgsignal(pgrp, signum, 0);
1568 pg_rele(pgrp);
1569 }
1570 }
1571
1572 /*
1573 * Send a signal to a process group. If checkctty is 1,
1574 * limit to members which have a controlling terminal.
1575 */
1576
1577 static int
1578 pgsignal_filt(proc_t p, void * arg)
1579 {
1580 int checkctty = *(int*)arg;
1581
1582 if ((checkctty == 0) || p->p_flag & P_CONTROLT)
1583 return(1);
1584 else
1585 return(0);
1586 }
1587
1588
1589 static int
1590 pgsignal_callback(proc_t p, void * arg)
1591 {
1592 int signum = *(int*)arg;
1593
1594 psignal(p, signum);
1595 return(PROC_RETURNED);
1596 }
1597
1598
1599 void
1600 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
1601 {
1602 if (pgrp != PGRP_NULL) {
1603 pgrp_iterate(pgrp, PGRP_BLOCKITERATE, pgsignal_callback, &signum, pgsignal_filt, &checkctty);
1604 }
1605 }
1606
1607
1608 void
1609 tty_pgsignal(struct tty *tp, int signum, int checkctty)
1610 {
1611 struct pgrp * pg;
1612
1613 pg = tty_pgrp(tp);
1614 if (pg != PGRP_NULL) {
1615 pgrp_iterate(pg, PGRP_BLOCKITERATE, pgsignal_callback, &signum, pgsignal_filt, &checkctty);
1616 pg_rele(pg);
1617 }
1618 }
1619 /*
1620 * Send a signal caused by a trap to a specific thread.
1621 */
1622 void
1623 threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code)
1624 {
1625 struct uthread *uth;
1626 struct task * sig_task;
1627 proc_t p;
1628 int mask;
1629
1630 if ((u_int)signum >= NSIG || signum == 0)
1631 return;
1632
1633 mask = sigmask(signum);
1634 if ((mask & threadmask) == 0)
1635 return;
1636 sig_task = get_threadtask(sig_actthread);
1637 p = (proc_t)(get_bsdtask_info(sig_task));
1638
1639 uth = get_bsdthread_info(sig_actthread);
1640 if (uth->uu_flag & UT_VFORK)
1641 p = uth->uu_proc;
1642
1643 proc_lock(p);
1644 if (!(p->p_lflag & P_LTRACED) && (p->p_sigignore & mask)) {
1645 proc_unlock(p);
1646 return;
1647 }
1648
1649 uth->uu_siglist |= mask;
1650 uth->uu_code = code;
1651 proc_unlock(p);
1652
1653 /* mark on process as well */
1654 signal_setast(sig_actthread);
1655 }
1656
1657 static kern_return_t
1658 get_signalthread(proc_t p, int signum, thread_t * thr)
1659 {
1660 struct uthread *uth;
1661 sigset_t mask = sigmask(signum);
1662 thread_t sig_thread;
1663 struct task * sig_task = p->task;
1664 kern_return_t kret;
1665
1666 *thr = THREAD_NULL;
1667
1668 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
1669 sig_thread = p->p_vforkact;
1670 kret = check_actforsig(sig_task, sig_thread, 1);
1671 if (kret == KERN_SUCCESS) {
1672 *thr = sig_thread;
1673 return(KERN_SUCCESS);
1674 }else
1675 return(KERN_FAILURE);
1676 }
1677
1678 proc_lock(p);
1679 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1680 if(((uth->uu_flag & UT_NO_SIGMASK)== 0) &&
1681 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
1682 if (check_actforsig(p->task, uth->uu_context.vc_thread, 1) == KERN_SUCCESS) {
1683 *thr = uth->uu_context.vc_thread;
1684 proc_unlock(p);
1685 return(KERN_SUCCESS);
1686 }
1687 }
1688 }
1689 proc_unlock(p);
1690 if (get_signalact(p->task, thr, 1) == KERN_SUCCESS) {
1691 return(KERN_SUCCESS);
1692 }
1693
1694 return(KERN_FAILURE);
1695 }
1696
1697 /*
1698 * Send the signal to the process. If the signal has an action, the action
1699 * is usually performed by the target process rather than the caller; we add
1700 * the signal to the set of pending signals for the process.
1701 *
1702 * Exceptions:
1703 * o When a stop signal is sent to a sleeping process that takes the
1704 * default action, the process is stopped without awakening it.
1705 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1706 * regardless of the signal action (eg, blocked or ignored).
1707 *
1708 * Other ignored signals are discarded immediately.
1709 */
1710 static void
1711 psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum)
1712 {
1713 int prop;
1714 user_addr_t action = USER_ADDR_NULL;
1715 proc_t sig_proc;
1716 thread_t sig_thread;
1717 register task_t sig_task;
1718 int mask;
1719 struct uthread *uth;
1720 kern_return_t kret;
1721 uid_t r_uid;
1722 proc_t pp;
1723 kauth_cred_t my_cred;
1724
1725 if ((u_int)signum >= NSIG || signum == 0)
1726 panic("psignal signal number");
1727 mask = sigmask(signum);
1728 prop = sigprop[signum];
1729
1730 #if SIGNAL_DEBUG
1731 if(rdebug_proc && (p != PROC_NULL) && (p == rdebug_proc)) {
1732 ram_printf(3);
1733 }
1734 #endif /* SIGNAL_DEBUG */
1735
1736 /*
1737 * We will need the task pointer later. Grab it now to
1738 * check for a zombie process. Also don't send signals
1739 * to kernel internal tasks.
1740 */
1741 if (flavor & PSIG_VFORK) {
1742 sig_task = task;
1743 sig_thread = thread;
1744 sig_proc = p;
1745 } else if (flavor & PSIG_THREAD) {
1746 sig_task = get_threadtask(thread);
1747 sig_thread = thread;
1748 sig_proc = (proc_t)get_bsdtask_info(sig_task);
1749 } else {
1750 sig_task = p->task;
1751 sig_thread = (struct thread *)0;
1752 sig_proc = p;
1753 }
1754
1755 if ((sig_task == TASK_NULL) || is_kerneltask(sig_task))
1756 return;
1757
1758 /*
1759 * do not send signals to the process that has the thread
1760 * doing a reboot(). Not doing so will mark that thread aborted
1761 * and can cause IO failures wich will cause data loss. There's
1762 * also no need to send a signal to a process that is in the middle
1763 * of being torn down.
1764 */
1765 if (ISSET(sig_proc->p_flag, P_REBOOT) ||
1766 ISSET(sig_proc->p_lflag, P_LEXIT))
1767 return;
1768
1769 if( (flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) {
1770 proc_knote(sig_proc, NOTE_SIGNAL | signum);
1771 }
1772
1773 if ((flavor & PSIG_LOCKED)== 0)
1774 proc_signalstart(sig_proc, 0);
1775
1776 /*
1777 * Deliver the signal to the first thread in the task. This
1778 * allows single threaded applications which use signals to
1779 * be able to be linked with multithreaded libraries. We have
1780 * an implicit reference to the current thread, but need
1781 * an explicit one otherwise. The thread reference keeps
1782 * the corresponding task data structures around too. This
1783 * reference is released by thread_deallocate.
1784 */
1785
1786
1787 if (((flavor & PSIG_VFORK) == 0) && ((sig_proc->p_lflag & P_LTRACED) == 0) && (sig_proc->p_sigignore & mask)) {
1788 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
1789 goto psigout;
1790 }
1791
1792 if (flavor & PSIG_VFORK) {
1793 action = SIG_DFL;
1794 act_set_astbsd(sig_thread);
1795 kret = KERN_SUCCESS;
1796 } else if (flavor & PSIG_THREAD) {
1797 /* If successful return with ast set */
1798 kret = check_actforsig(sig_task, sig_thread, 1);
1799 } else {
1800 /* If successful return with ast set */
1801 kret = get_signalthread(sig_proc, signum, &sig_thread);
1802 }
1803 if (kret != KERN_SUCCESS) {
1804 #if SIGNAL_DEBUG
1805 ram_printf(1);
1806 #endif /* SIGNAL_DEBUG */
1807 goto psigout;
1808 }
1809
1810
1811 uth = get_bsdthread_info(sig_thread);
1812
1813 /*
1814 * If proc is traced, always give parent a chance.
1815 */
1816
1817 if ((flavor & PSIG_VFORK) == 0) {
1818 if (sig_proc->p_lflag & P_LTRACED)
1819 action = SIG_DFL;
1820 else {
1821 /*
1822 * If the signal is being ignored,
1823 * then we forget about it immediately.
1824 * (Note: we don't set SIGCONT in p_sigignore,
1825 * and if it is set to SIG_IGN,
1826 * action will be SIG_DFL here.)
1827 */
1828 if (sig_proc->p_sigignore & mask)
1829 goto psigout;
1830 if (uth->uu_sigwait & mask)
1831 action = KERN_SIG_WAIT;
1832 else if (uth->uu_sigmask & mask)
1833 action = KERN_SIG_HOLD;
1834 else if (sig_proc->p_sigcatch & mask)
1835 action = KERN_SIG_CATCH;
1836 else
1837 action = SIG_DFL;
1838 }
1839 }
1840
1841
1842 proc_lock(sig_proc);
1843
1844 if (sig_proc->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1845 (sig_proc->p_lflag & P_LTRACED) == 0)
1846 sig_proc->p_nice = NZERO;
1847
1848 if (prop & SA_CONT)
1849 uth->uu_siglist &= ~stopsigmask;
1850
1851 if (prop & SA_STOP) {
1852 struct pgrp *pg;
1853 /*
1854 * If sending a tty stop signal to a member of an orphaned
1855 * process group, discard the signal here if the action
1856 * is default; don't stop the process below if sleeping,
1857 * and don't clear any pending SIGCONT.
1858 */
1859 proc_unlock(sig_proc);
1860 pg = proc_pgrp(sig_proc);
1861 if (prop & SA_TTYSTOP && pg->pg_jobc == 0 &&
1862 action == SIG_DFL) {
1863 pg_rele(pg);
1864 goto psigout;
1865 }
1866 pg_rele(pg);
1867 proc_lock(sig_proc);
1868 uth->uu_siglist &= ~contsigmask;
1869 }
1870
1871 uth->uu_siglist |= mask;
1872 /*
1873 * Repost AST incase sigthread has processed
1874 * ast and missed signal post.
1875 */
1876 if (action == KERN_SIG_CATCH)
1877 act_set_astbsd(sig_thread);
1878
1879
1880 /*
1881 * Defer further processing for signals which are held,
1882 * except that stopped processes must be continued by SIGCONT.
1883 */
1884 /* vfork will not go thru as action is SIG_DFL */
1885 if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) {
1886 proc_unlock(sig_proc);
1887 goto psigout;
1888 }
1889 /*
1890 * SIGKILL priority twiddling moved here from above because
1891 * it needs sig_thread. Could merge it into large switch
1892 * below if we didn't care about priority for tracing
1893 * as SIGKILL's action is always SIG_DFL.
1894 */
1895 if ((signum == SIGKILL) && (sig_proc->p_nice > NZERO)) {
1896 sig_proc->p_nice = NZERO;
1897 }
1898
1899 /*
1900 * Process is traced - wake it up (if not already
1901 * stopped) so that it can discover the signal in
1902 * issig() and stop for the parent.
1903 */
1904 if (sig_proc->p_lflag & P_LTRACED) {
1905 if (sig_proc->p_stat != SSTOP)
1906 goto runlocked;
1907 else {
1908 proc_unlock(sig_proc);
1909 goto psigout;
1910 }
1911 }
1912 if ((flavor & PSIG_VFORK) != 0)
1913 goto runlocked;
1914
1915 if (action == KERN_SIG_WAIT) {
1916 #if CONFIG_DTRACE
1917 /*
1918 * DTrace proc signal-clear returns a siginfo_t. Collect the needed info.
1919 */
1920 r_uid = kauth_getruid(); /* per thread credential; protected by our thread context */
1921
1922 bzero((caddr_t)&(uth->t_dtrace_siginfo), sizeof(uth->t_dtrace_siginfo));
1923
1924 uth->t_dtrace_siginfo.si_signo = signum;
1925 uth->t_dtrace_siginfo.si_pid = current_proc()->p_pid;
1926 uth->t_dtrace_siginfo.si_status = W_EXITCODE(signum, 0);
1927 uth->t_dtrace_siginfo.si_uid = r_uid;
1928 uth->t_dtrace_siginfo.si_code = 0;
1929 #endif
1930 uth->uu_sigwait = mask;
1931 uth->uu_siglist &= ~mask;
1932 wakeup(&uth->uu_sigwait);
1933 /* if it is SIGCONT resume whole process */
1934 if (prop & SA_CONT) {
1935 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
1936 sig_proc->p_contproc = current_proc()->p_pid;
1937
1938 proc_unlock(sig_proc);
1939 (void) task_resume_internal(sig_task);
1940 goto psigout;
1941 }
1942 proc_unlock(sig_proc);
1943 goto psigout;
1944 }
1945
1946 if (action != SIG_DFL) {
1947 /*
1948 * User wants to catch the signal.
1949 * Wake up the thread, but don't un-suspend it
1950 * (except for SIGCONT).
1951 */
1952 if (prop & SA_CONT) {
1953 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
1954 proc_unlock(sig_proc);
1955 (void) task_resume_internal(sig_task);
1956 proc_lock(sig_proc);
1957 sig_proc->p_stat = SRUN;
1958 } else if (sig_proc->p_stat == SSTOP) {
1959 proc_unlock(sig_proc);
1960 goto psigout;
1961 }
1962 /*
1963 * Fill out siginfo structure information to pass to the
1964 * signalled process/thread sigaction handler, when it
1965 * wakes up. si_code is 0 because this is an ordinary
1966 * signal, not a SIGCHLD, and so si_status is the signal
1967 * number itself, instead of the child process exit status.
1968 * We shift this left because it will be shifted right before
1969 * it is passed to user space. kind of ugly to use W_EXITCODE
1970 * this way, but it beats defining a new macro.
1971 *
1972 * Note: Avoid the SIGCHLD recursion case!
1973 */
1974 if (signum != SIGCHLD) {
1975 proc_unlock(sig_proc);
1976 r_uid = kauth_getruid();
1977 proc_lock(sig_proc);
1978
1979 sig_proc->si_pid = current_proc()->p_pid;
1980 sig_proc->si_status = W_EXITCODE(signum, 0);
1981 sig_proc->si_uid = r_uid;
1982 sig_proc->si_code = 0;
1983 }
1984
1985 goto runlocked;
1986 } else {
1987 /* Default action - varies */
1988 if (mask & stopsigmask) {
1989 /*
1990 * These are the signals which by default
1991 * stop a process.
1992 *
1993 * Don't clog system with children of init
1994 * stopped from the keyboard.
1995 */
1996 if (!(prop & SA_STOP) && sig_proc->p_pptr == initproc) {
1997 proc_unlock(sig_proc);
1998 psignal_locked(sig_proc, SIGKILL);
1999 proc_lock(sig_proc);
2000 uth->uu_siglist &= ~mask;
2001 proc_unlock(sig_proc);
2002 goto psigout;
2003 }
2004
2005 /*
2006 * Stop the task
2007 * if task hasn't already been stopped by
2008 * a signal.
2009 */
2010 uth->uu_siglist &= ~mask;
2011 if (sig_proc->p_stat != SSTOP) {
2012 sig_proc->p_xstat = signum;
2013 sig_proc->p_stat = SSTOP;
2014 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &sig_proc->p_flag);
2015 sig_proc->p_lflag &= ~P_LWAITED;
2016 proc_unlock(sig_proc);
2017
2018 pp = proc_parentholdref(sig_proc);
2019 stop(sig_proc, pp);
2020 if (( pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2021
2022 my_cred = kauth_cred_proc_ref(sig_proc);
2023 r_uid = kauth_cred_getruid(my_cred);
2024 kauth_cred_unref(&my_cred);
2025
2026 proc_lock(sig_proc);
2027 pp->si_pid = sig_proc->p_pid;
2028 /*
2029 * POSIX: sigaction for a stopped child
2030 * when sent to the parent must set the
2031 * child's signal number into si_status.
2032 */
2033 if (signum != SIGSTOP)
2034 pp->si_status = WEXITSTATUS(sig_proc->p_xstat);
2035 else
2036 pp->si_status = W_EXITCODE(signum, signum);
2037 pp->si_code = CLD_STOPPED;
2038 pp->si_uid = r_uid;
2039 proc_unlock(sig_proc);
2040
2041 psignal(pp, SIGCHLD);
2042 }
2043 if (pp != PROC_NULL)
2044 proc_parentdropref(pp, 0);
2045 } else
2046 proc_unlock(sig_proc);
2047 goto psigout;
2048 }
2049
2050 DTRACE_PROC3(signal__send, thread_t, sig_thread, proc_t, p, int, signum);
2051
2052 /*
2053 * enters switch with sig_proc lock held but dropped when
2054 * gets out of switch
2055 */
2056 switch (signum) {
2057 /*
2058 * Signals ignored by default have been dealt
2059 * with already, since their bits are on in
2060 * p_sigignore.
2061 */
2062
2063 case SIGKILL:
2064 /*
2065 * Kill signal always sets process running and
2066 * unsuspends it.
2067 */
2068 /*
2069 * Process will be running after 'run'
2070 */
2071 sig_proc->p_stat = SRUN;
2072 /*
2073 * In scenarios where suspend/resume are racing
2074 * the signal we are missing AST_BSD by the time
2075 * we get here, set again to avoid races. This
2076 * was the scenario with spindump enabled shutdowns.
2077 * We would need to cover this approp down the line.
2078 */
2079 act_set_astbsd(sig_thread);
2080 thread_abort(sig_thread);
2081 proc_unlock(sig_proc);
2082
2083 goto psigout;
2084
2085 case SIGCONT:
2086 /*
2087 * Let the process run. If it's sleeping on an
2088 * event, it remains so.
2089 */
2090 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2091 sig_proc->p_contproc = sig_proc->p_pid;
2092
2093 proc_unlock(sig_proc);
2094 (void) task_resume_internal(sig_task);
2095 proc_lock(sig_proc);
2096 /*
2097 * When processing a SIGCONT, we need to check
2098 * to see if there are signals pending that
2099 * were not delivered because we had been
2100 * previously stopped. If that's the case,
2101 * we need to thread_abort_safely() to trigger
2102 * interruption of the current system call to
2103 * cause their handlers to fire. If it's only
2104 * the SIGCONT, then don't wake up.
2105 */
2106 if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (((uth->uu_siglist & ~uth->uu_sigmask) & ~sig_proc->p_sigignore) & ~mask)) {
2107 uth->uu_siglist &= ~mask;
2108 sig_proc->p_stat = SRUN;
2109 goto runlocked;
2110 }
2111
2112 uth->uu_siglist &= ~mask;
2113 sig_proc->p_stat = SRUN;
2114 proc_unlock(sig_proc);
2115 goto psigout;
2116
2117 default:
2118 /*
2119 * A signal which has a default action of killing
2120 * the process, and for which there is no handler,
2121 * needs to act like SIGKILL
2122 */
2123 if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (action == SIG_DFL) && (prop & SA_KILL)) {
2124 sig_proc->p_stat = SRUN;
2125 proc_unlock(sig_proc);
2126 thread_abort(sig_thread);
2127 goto psigout;
2128 }
2129
2130 /*
2131 * All other signals wake up the process, but don't
2132 * resume it.
2133 */
2134 if (sig_proc->p_stat == SSTOP) {
2135 proc_unlock(sig_proc);
2136 goto psigout;
2137 }
2138 goto runlocked;
2139 }
2140 }
2141 /*NOTREACHED*/
2142
2143 runlocked:
2144 /*
2145 * If we're being traced (possibly because someone attached us
2146 * while we were stopped), check for a signal from the debugger.
2147 */
2148 if (sig_proc->p_stat == SSTOP) {
2149 if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0)
2150 uth->uu_siglist |= sigmask(sig_proc->p_xstat);
2151 if ((flavor & PSIG_VFORK) != 0) {
2152 sig_proc->p_stat = SRUN;
2153 }
2154 proc_unlock(sig_proc);
2155 } else {
2156 /*
2157 * setrunnable(p) in BSD and
2158 * Wake up the thread if it is interruptible.
2159 */
2160 sig_proc->p_stat = SRUN;
2161 proc_unlock(sig_proc);
2162 if ((flavor & PSIG_VFORK) == 0)
2163 thread_abort_safely(sig_thread);
2164 }
2165 psigout:
2166 if ((flavor & PSIG_LOCKED)== 0) {
2167 proc_signalend(sig_proc, 0);
2168 }
2169 }
2170
2171 void
2172 psignal(proc_t p, int signum)
2173 {
2174 psignal_internal(p, NULL, NULL, 0, signum);
2175 }
2176
2177 void
2178 psignal_locked(proc_t p, int signum)
2179 {
2180 psignal_internal(p, NULL, NULL, PSIG_LOCKED, signum);
2181 }
2182
2183 void
2184 psignal_vfork(proc_t p, task_t new_task, thread_t thread, int signum)
2185 {
2186 psignal_internal(p, new_task, thread, PSIG_VFORK, signum);
2187 }
2188
2189 static void
2190 psignal_uthread(thread_t thread, int signum)
2191 {
2192 psignal_internal(PROC_NULL, TASK_NULL, thread, PSIG_THREAD, signum);
2193 }
2194
2195
2196 /*
2197 * If the current process has received a signal (should be caught or cause
2198 * termination, should interrupt current syscall), return the signal number.
2199 * Stop signals with default action are processed immediately, then cleared;
2200 * they aren't returned. This is checked after each entry to the system for
2201 * a syscall or trap (though this can usually be done without calling issignal
2202 * by checking the pending signal masks in the CURSIG macro.) The normal call
2203 * sequence is
2204 *
2205 * while (signum = CURSIG(curproc))
2206 * postsig(signum);
2207 */
2208 int
2209 issignal_locked(proc_t p)
2210 {
2211 int signum, mask, prop, sigbits;
2212 thread_t cur_act;
2213 struct uthread * ut;
2214 proc_t pp;
2215 kauth_cred_t my_cred;
2216 int retval = 0;
2217 uid_t r_uid;
2218
2219 cur_act = current_thread();
2220
2221 #if SIGNAL_DEBUG
2222 if(rdebug_proc && (p == rdebug_proc)) {
2223 ram_printf(3);
2224 }
2225 #endif /* SIGNAL_DEBUG */
2226
2227 /*
2228 * Try to grab the signal lock.
2229 */
2230 if (sig_try_locked(p) <= 0) {
2231 return(0);
2232 }
2233
2234 proc_signalstart(p, 1);
2235
2236 ut = get_bsdthread_info(cur_act);
2237 for(;;) {
2238 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2239
2240 if (p->p_lflag & P_LPPWAIT)
2241 sigbits &= ~stopsigmask;
2242 if (sigbits == 0) { /* no signal to send */
2243 retval = 0;
2244 goto out;
2245 }
2246
2247 signum = ffs((long)sigbits);
2248 mask = sigmask(signum);
2249 prop = sigprop[signum];
2250
2251 /*
2252 * We should see pending but ignored signals
2253 * only if P_LTRACED was on when they were posted.
2254 */
2255 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2256 ut->uu_siglist &= ~mask; /* take the signal! */
2257 continue;
2258 }
2259 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2260 task_t task;
2261 /*
2262 * If traced, always stop, and stay
2263 * stopped until released by the debugger.
2264 */
2265 /* ptrace debugging */
2266 p->p_xstat = signum;
2267
2268 if (p->p_lflag & P_LSIGEXC) {
2269 p->sigwait = TRUE;
2270 p->sigwait_thread = cur_act;
2271 p->p_stat = SSTOP;
2272 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2273 p->p_lflag &= ~P_LWAITED;
2274 ut->uu_siglist &= ~mask; /* clear the old signal */
2275 proc_signalend(p, 1);
2276 proc_unlock(p);
2277 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2278 proc_lock(p);
2279 proc_signalstart(p, 1);
2280 } else {
2281 proc_unlock(p);
2282 my_cred = kauth_cred_proc_ref(p);
2283 r_uid = kauth_cred_getruid(my_cred);
2284 kauth_cred_unref(&my_cred);
2285
2286 pp = proc_parentholdref(p);
2287 if (pp != PROC_NULL) {
2288 proc_lock(pp);
2289
2290 pp->si_pid = p->p_pid;
2291 pp->si_status = p->p_xstat;
2292 pp->si_code = CLD_TRAPPED;
2293 pp->si_uid = r_uid;
2294
2295 proc_unlock(pp);
2296 }
2297
2298 /*
2299 * XXX Have to really stop for debuggers;
2300 * XXX stop() doesn't do the right thing.
2301 */
2302 task = p->task;
2303 task_suspend_internal(task);
2304
2305 proc_lock(p);
2306 p->sigwait = TRUE;
2307 p->sigwait_thread = cur_act;
2308 p->p_stat = SSTOP;
2309 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2310 p->p_lflag &= ~P_LWAITED;
2311 ut->uu_siglist &= ~mask; /* clear the old signal */
2312
2313 proc_signalend(p, 1);
2314 proc_unlock(p);
2315
2316 if (pp != PROC_NULL) {
2317 psignal(pp, SIGCHLD);
2318 proc_list_lock();
2319 wakeup((caddr_t)pp);
2320 proc_parentdropref(pp, 1);
2321 proc_list_unlock();
2322 }
2323
2324 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2325 thread_block(THREAD_CONTINUE_NULL);
2326 proc_lock(p);
2327 proc_signalstart(p, 1);
2328 }
2329
2330 p->sigwait = FALSE;
2331 p->sigwait_thread = NULL;
2332 wakeup((caddr_t)&p->sigwait_thread);
2333
2334 /*
2335 * This code is to detect when gdb is killed
2336 * even as the traced program is attached.
2337 * pgsignal would get the SIGKILL to traced program
2338 * That's what we are trying to see (I hope)
2339 */
2340 if (ut->uu_siglist & sigmask(SIGKILL)) {
2341 /*
2342 * Wait event may still be outstanding;
2343 * clear it, since sig_lock_to_exit will
2344 * wait.
2345 */
2346 clear_wait(current_thread(), THREAD_INTERRUPTED);
2347 sig_lock_to_exit(p);
2348 /*
2349 * Since this thread will be resumed
2350 * to allow the current syscall to
2351 * be completed, must save u_qsave
2352 * before calling exit(). (Since exit()
2353 * calls closef() which can trash u_qsave.)
2354 */
2355 proc_signalend(p, 1);
2356 proc_unlock(p);
2357 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
2358 p->p_pid, W_EXITCODE(0, SIGKILL), 2, 0, 0);
2359 exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL);
2360 proc_lock(p);
2361 return(0);
2362 }
2363
2364 /*
2365 * We may have to quit
2366 */
2367 if (thread_should_abort(current_thread())) {
2368 retval = 0;
2369 goto out;
2370 }
2371 /*
2372 * If parent wants us to take the signal,
2373 * then it will leave it in p->p_xstat;
2374 * otherwise we just look for signals again.
2375 */
2376 signum = p->p_xstat;
2377 if (signum == 0)
2378 continue;
2379 /*
2380 * Put the new signal into p_siglist. If the
2381 * signal is being masked, look for other signals.
2382 */
2383 mask = sigmask(signum);
2384 ut->uu_siglist |= mask;
2385 if (ut->uu_sigmask & mask)
2386 continue;
2387 }
2388
2389 /*
2390 * Decide whether the signal should be returned.
2391 * Return the signal's number, or fall through
2392 * to clear it from the pending mask.
2393 */
2394
2395 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2396
2397 case (long)SIG_DFL:
2398 /*
2399 * Don't take default actions on system processes.
2400 */
2401 if (p->p_ppid == 0) {
2402 #if DIAGNOSTIC
2403 /*
2404 * Are you sure you want to ignore SIGSEGV
2405 * in init? XXX
2406 */
2407 printf("Process (pid %d) got signal %d\n",
2408 p->p_pid, signum);
2409 #endif
2410 break; /* == ignore */
2411 }
2412
2413 /*
2414 * If there is a pending stop signal to process
2415 * with default action, stop here,
2416 * then clear the signal. However,
2417 * if process is member of an orphaned
2418 * process group, ignore tty stop signals.
2419 */
2420 if (prop & SA_STOP) {
2421 struct pgrp * pg;
2422
2423 proc_unlock(p);
2424 pg = proc_pgrp(p);
2425 if (p->p_lflag & P_LTRACED ||
2426 (pg->pg_jobc == 0 &&
2427 prop & SA_TTYSTOP)) {
2428 proc_lock(p);
2429 pg_rele(pg);
2430 break; /* == ignore */
2431 }
2432 pg_rele(pg);
2433 if (p->p_stat != SSTOP) {
2434 proc_lock(p);
2435 p->p_xstat = signum;
2436
2437 p->p_stat = SSTOP;
2438 p->p_lflag &= ~P_LWAITED;
2439 proc_unlock(p);
2440
2441 pp = proc_parentholdref(p);
2442 stop(p, pp);
2443 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2444 my_cred = kauth_cred_proc_ref(p);
2445 r_uid = kauth_cred_getruid(my_cred);
2446 kauth_cred_unref(&my_cred);
2447
2448 proc_lock(pp);
2449 pp->si_pid = p->p_pid;
2450 pp->si_status = WEXITSTATUS(p->p_xstat);
2451 pp->si_code = CLD_STOPPED;
2452 pp->si_uid = r_uid;
2453 proc_unlock(pp);
2454
2455 psignal(pp, SIGCHLD);
2456 }
2457 if (pp != PROC_NULL)
2458 proc_parentdropref(pp, 0);
2459 }
2460 proc_lock(p);
2461 break;
2462 } else if (prop & SA_IGNORE) {
2463 /*
2464 * Except for SIGCONT, shouldn't get here.
2465 * Default action is to ignore; drop it.
2466 */
2467 break; /* == ignore */
2468 } else {
2469 ut->uu_siglist &= ~mask; /* take the signal! */
2470 retval = signum;
2471 goto out;
2472 }
2473
2474 /*NOTREACHED*/
2475 break;
2476
2477 case (long)SIG_IGN:
2478 /*
2479 * Masking above should prevent us ever trying
2480 * to take action on an ignored signal other
2481 * than SIGCONT, unless process is traced.
2482 */
2483 if ((prop & SA_CONT) == 0 &&
2484 (p->p_lflag & P_LTRACED) == 0)
2485 printf("issignal\n");
2486 break; /* == ignore */
2487
2488 default:
2489 /*
2490 * This signal has an action, let
2491 * postsig() process it.
2492 */
2493 ut->uu_siglist &= ~mask; /* take the signal! */
2494 retval = signum;
2495 goto out;
2496 }
2497 ut->uu_siglist &= ~mask; /* take the signal! */
2498 }
2499 /* NOTREACHED */
2500 out:
2501 proc_signalend(p, 1);
2502 return(retval);
2503 }
2504
2505 /* called from _sleep */
2506 int
2507 CURSIG(proc_t p)
2508 {
2509 int signum, mask, prop, sigbits;
2510 thread_t cur_act;
2511 struct uthread * ut;
2512 int retnum = 0;
2513
2514
2515 cur_act = current_thread();
2516
2517 ut = get_bsdthread_info(cur_act);
2518
2519 if (ut->uu_siglist == 0)
2520 return (0);
2521
2522 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_lflag & P_LTRACED) == 0))
2523 return (0);
2524
2525 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2526
2527 for(;;) {
2528 if (p->p_lflag & P_LPPWAIT)
2529 sigbits &= ~stopsigmask;
2530 if (sigbits == 0) { /* no signal to send */
2531 return (retnum);
2532 }
2533
2534 signum = ffs((long)sigbits);
2535 mask = sigmask(signum);
2536 prop = sigprop[signum];
2537 sigbits &= ~mask; /* take the signal out */
2538
2539 /*
2540 * We should see pending but ignored signals
2541 * only if P_LTRACED was on when they were posted.
2542 */
2543 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2544 continue;
2545 }
2546
2547 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2548 return(signum);
2549 }
2550
2551 /*
2552 * Decide whether the signal should be returned.
2553 * Return the signal's number, or fall through
2554 * to clear it from the pending mask.
2555 */
2556
2557 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2558
2559 case (long)SIG_DFL:
2560 /*
2561 * Don't take default actions on system processes.
2562 */
2563 if (p->p_ppid == 0) {
2564 #if DIAGNOSTIC
2565 /*
2566 * Are you sure you want to ignore SIGSEGV
2567 * in init? XXX
2568 */
2569 printf("Process (pid %d) got signal %d\n",
2570 p->p_pid, signum);
2571 #endif
2572 break; /* == ignore */
2573 }
2574
2575 /*
2576 * If there is a pending stop signal to process
2577 * with default action, stop here,
2578 * then clear the signal. However,
2579 * if process is member of an orphaned
2580 * process group, ignore tty stop signals.
2581 */
2582 if (prop & SA_STOP) {
2583 struct pgrp *pg;
2584
2585 pg = proc_pgrp(p);
2586
2587 if (p->p_lflag & P_LTRACED ||
2588 (pg->pg_jobc == 0 &&
2589 prop & SA_TTYSTOP)) {
2590 pg_rele(pg);
2591 break; /* == ignore */
2592 }
2593 pg_rele(pg);
2594 retnum = signum;
2595 break;
2596 } else if (prop & SA_IGNORE) {
2597 /*
2598 * Except for SIGCONT, shouldn't get here.
2599 * Default action is to ignore; drop it.
2600 */
2601 break; /* == ignore */
2602 } else {
2603 return (signum);
2604 }
2605 /*NOTREACHED*/
2606
2607 case (long)SIG_IGN:
2608 /*
2609 * Masking above should prevent us ever trying
2610 * to take action on an ignored signal other
2611 * than SIGCONT, unless process is traced.
2612 */
2613 if ((prop & SA_CONT) == 0 &&
2614 (p->p_lflag & P_LTRACED) == 0)
2615 printf("issignal\n");
2616 break; /* == ignore */
2617
2618 default:
2619 /*
2620 * This signal has an action, let
2621 * postsig() process it.
2622 */
2623 return (signum);
2624 }
2625 }
2626 /* NOTREACHED */
2627 }
2628
2629 /*
2630 * Put the argument process into the stopped state and notify the parent
2631 * via wakeup. Signals are handled elsewhere. The process must not be
2632 * on the run queue.
2633 */
2634 static void
2635 stop(proc_t p, proc_t parent)
2636 {
2637 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2638 if ((parent != PROC_NULL) && (parent->p_stat != SSTOP)) {
2639 proc_list_lock();
2640 wakeup((caddr_t)parent);
2641 proc_list_unlock();
2642 }
2643 (void) task_suspend_internal(p->task);
2644 }
2645
2646 /*
2647 * Take the action for the specified signal
2648 * from the current set of pending signals.
2649 */
2650 void
2651 postsig_locked(int signum)
2652 {
2653 proc_t p = current_proc();
2654 struct sigacts *ps = p->p_sigacts;
2655 user_addr_t catcher;
2656 uint32_t code;
2657 int mask, returnmask;
2658 struct uthread * ut;
2659
2660 #if DIAGNOSTIC
2661 if (signum == 0)
2662 panic("postsig");
2663 /*
2664 * This must be called on master cpu
2665 */
2666 if (cpu_number() != master_cpu)
2667 panic("psig not on master");
2668 #endif
2669
2670 /*
2671 * Try to grab the signal lock.
2672 */
2673 if (sig_try_locked(p) <= 0) {
2674 return;
2675 }
2676
2677 proc_signalstart(p, 1);
2678
2679 ut = (struct uthread *)get_bsdthread_info(current_thread());
2680 mask = sigmask(signum);
2681 ut->uu_siglist &= ~mask;
2682 catcher = ps->ps_sigact[signum];
2683 if (catcher == SIG_DFL) {
2684 /*
2685 * Default catcher, where the default is to kill
2686 * the process. (Other cases were ignored above.)
2687 */
2688 sig_lock_to_exit(p);
2689 p->p_acflag |= AXSIG;
2690 if (sigprop[signum] & SA_CORE) {
2691 p->p_sigacts->ps_sig = signum;
2692 proc_signalend(p, 1);
2693 proc_unlock(p);
2694 if (coredump(p, 0, 0) == 0)
2695 signum |= WCOREFLAG;
2696 } else {
2697 proc_signalend(p, 1);
2698 proc_unlock(p);
2699 }
2700
2701 #if CONFIG_DTRACE
2702 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
2703
2704 ut->t_dtrace_siginfo.si_signo = signum;
2705 ut->t_dtrace_siginfo.si_pid = p->si_pid;
2706 ut->t_dtrace_siginfo.si_uid = p->si_uid;
2707 ut->t_dtrace_siginfo.si_status = WEXITSTATUS(p->si_status);
2708
2709 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
2710 switch (signum) {
2711 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
2712 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
2713 break;
2714 default:
2715 break;
2716 }
2717
2718
2719 DTRACE_PROC3(signal__handle, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo),
2720 void (*)(void), SIG_DFL);
2721 #endif
2722
2723 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
2724 p->p_pid, W_EXITCODE(0, signum), 3, 0, 0);
2725 exit1(p, W_EXITCODE(0, signum), (int *)NULL);
2726 proc_lock(p);
2727 return;
2728 } else {
2729 /*
2730 * If we get here, the signal must be caught.
2731 */
2732 #if DIAGNOSTIC
2733 if (catcher == SIG_IGN || (ut->uu_sigmask & mask))
2734 log(LOG_WARNING,
2735 "postsig: processing masked or ignored signal\n");
2736 #endif
2737
2738 /*
2739 * Set the new mask value and also defer further
2740 * occurences of this signal.
2741 *
2742 * Special case: user has done a sigpause. Here the
2743 * current mask is not of interest, but rather the
2744 * mask from before the sigpause is what we want
2745 * restored after the signal processing is completed.
2746 */
2747 if (ut->uu_flag & UT_SAS_OLDMASK) {
2748 returnmask = ut->uu_oldmask;
2749 ut->uu_flag &= ~UT_SAS_OLDMASK;
2750 ut->uu_oldmask = 0;
2751 } else
2752 returnmask = ut->uu_sigmask;
2753 ut->uu_sigmask |= ps->ps_catchmask[signum];
2754 if ((ps->ps_signodefer & mask) == 0)
2755 ut->uu_sigmask |= mask;
2756 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
2757 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE))
2758 p->p_sigignore |= mask;
2759 ps->ps_sigact[signum] = SIG_DFL;
2760 ps->ps_siginfo &= ~mask;
2761 ps->ps_signodefer &= ~mask;
2762 }
2763
2764 if (ps->ps_sig != signum) {
2765 code = 0;
2766 } else {
2767 code = ps->ps_code;
2768 ps->ps_code = 0;
2769 }
2770 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nsignals);
2771 sendsig(p, catcher, signum, returnmask, code);
2772 }
2773 proc_signalend(p, 1);
2774 }
2775
2776 /*
2777 * Attach a signal knote to the list of knotes for this process.
2778 *
2779 * Signal knotes share the knote list with proc knotes. This
2780 * could be avoided by using a signal-specific knote list, but
2781 * probably isn't worth the trouble.
2782 */
2783
2784 static int
2785 filt_sigattach(struct knote *kn)
2786 {
2787 proc_t p = current_proc(); /* can attach only to oneself */
2788
2789 proc_klist_lock();
2790
2791 kn->kn_ptr.p_proc = p;
2792 kn->kn_flags |= EV_CLEAR; /* automatically set */
2793
2794 KNOTE_ATTACH(&p->p_klist, kn);
2795
2796 proc_klist_unlock();
2797
2798 return (0);
2799 }
2800
2801 /*
2802 * remove the knote from the process list, if it hasn't already
2803 * been removed by exit processing.
2804 */
2805
2806 static void
2807 filt_sigdetach(struct knote *kn)
2808 {
2809 proc_t p = kn->kn_ptr.p_proc;
2810
2811 proc_klist_lock();
2812 kn->kn_ptr.p_proc = NULL;
2813 KNOTE_DETACH(&p->p_klist, kn);
2814 proc_klist_unlock();
2815 }
2816
2817 /*
2818 * Post an event to the signal filter. Because we share the same list
2819 * as process knotes, we have to filter out and handle only signal events.
2820 *
2821 * We assume that we process fdfree() before we post the NOTE_EXIT for
2822 * a process during exit. Therefore, since signal filters can only be
2823 * set up "in-process", we should have already torn down the kqueue
2824 * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT.
2825 */
2826 static int
2827 filt_signal(struct knote *kn, long hint)
2828 {
2829
2830 if (hint & NOTE_SIGNAL) {
2831 hint &= ~NOTE_SIGNAL;
2832
2833 if (kn->kn_id == (unsigned int)hint)
2834 kn->kn_data++;
2835 } else if (hint & NOTE_EXIT) {
2836 panic("filt_signal: detected NOTE_EXIT event");
2837 }
2838
2839 return (kn->kn_data != 0);
2840 }
2841
2842 static void
2843 filt_signaltouch(struct knote *kn, struct kevent64_s *kev, long type)
2844 {
2845 proc_klist_lock();
2846 switch (type) {
2847 case EVENT_REGISTER:
2848 kn->kn_sfflags = kev->fflags;
2849 kn->kn_sdata = kev->data;
2850 break;
2851 case EVENT_PROCESS:
2852 *kev = kn->kn_kevent;
2853 if (kn->kn_flags & EV_CLEAR) {
2854 kn->kn_data = 0;
2855 kn->kn_fflags = 0;
2856 }
2857 break;
2858 default:
2859 panic("filt_machporttouch() - invalid type (%ld)", type);
2860 break;
2861 }
2862 proc_klist_unlock();
2863 }
2864
2865 void
2866 bsd_ast(thread_t thread)
2867 {
2868 proc_t p = current_proc();
2869 struct uthread *ut = get_bsdthread_info(thread);
2870 int signum;
2871 user_addr_t pc;
2872 static int bsd_init_done = 0;
2873
2874 if (p == NULL)
2875 return;
2876
2877 if ((p->p_flag & P_OWEUPC) && (p->p_flag & P_PROFIL)) {
2878 pc = get_useraddr();
2879 addupc_task(p, pc, 1);
2880 OSBitAndAtomic(~((uint32_t)P_OWEUPC), &p->p_flag);
2881 }
2882
2883 if (timerisset(&p->p_vtimer_user.it_value)) {
2884 uint32_t microsecs;
2885
2886 task_vtimer_update(p->task, TASK_VTIMER_USER, &microsecs);
2887
2888 if (!itimerdecr(p, &p->p_vtimer_user, microsecs)) {
2889 if (timerisset(&p->p_vtimer_user.it_value))
2890 task_vtimer_set(p->task, TASK_VTIMER_USER);
2891 else
2892 task_vtimer_clear(p->task, TASK_VTIMER_USER);
2893
2894 psignal(p, SIGVTALRM);
2895 }
2896 }
2897
2898 if (timerisset(&p->p_vtimer_prof.it_value)) {
2899 uint32_t microsecs;
2900
2901 task_vtimer_update(p->task, TASK_VTIMER_PROF, &microsecs);
2902
2903 if (!itimerdecr(p, &p->p_vtimer_prof, microsecs)) {
2904 if (timerisset(&p->p_vtimer_prof.it_value))
2905 task_vtimer_set(p->task, TASK_VTIMER_PROF);
2906 else
2907 task_vtimer_clear(p->task, TASK_VTIMER_PROF);
2908
2909 psignal(p, SIGPROF);
2910 }
2911 }
2912
2913 if (timerisset(&p->p_rlim_cpu)) {
2914 struct timeval tv;
2915
2916 task_vtimer_update(p->task, TASK_VTIMER_RLIM, (uint32_t *) &tv.tv_usec);
2917
2918 proc_spinlock(p);
2919 if (p->p_rlim_cpu.tv_sec > 0 || p->p_rlim_cpu.tv_usec > tv.tv_usec) {
2920 tv.tv_sec = 0;
2921 timersub(&p->p_rlim_cpu, &tv, &p->p_rlim_cpu);
2922 proc_spinunlock(p);
2923 } else {
2924
2925 timerclear(&p->p_rlim_cpu);
2926 proc_spinunlock(p);
2927
2928 task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
2929
2930 psignal(p, SIGXCPU);
2931 }
2932 }
2933
2934 #if CONFIG_DTRACE
2935 if (ut->t_dtrace_sig) {
2936 uint8_t dt_action_sig = ut->t_dtrace_sig;
2937 ut->t_dtrace_sig = 0;
2938 psignal(p, dt_action_sig);
2939 }
2940
2941 if (ut->t_dtrace_stop) {
2942 ut->t_dtrace_stop = 0;
2943 proc_lock(p);
2944 p->p_dtrace_stop = 1;
2945 proc_unlock(p);
2946 (void)task_suspend_internal(p->task);
2947 }
2948
2949 if (ut->t_dtrace_resumepid) {
2950 proc_t resumeproc = proc_find(ut->t_dtrace_resumepid);
2951 ut->t_dtrace_resumepid = 0;
2952 if (resumeproc != PROC_NULL) {
2953 proc_lock(resumeproc);
2954 /* We only act on processes stopped by dtrace */
2955 if (resumeproc->p_dtrace_stop) {
2956 resumeproc->p_dtrace_stop = 0;
2957 proc_unlock(resumeproc);
2958 task_resume_internal(resumeproc->task);
2959 }
2960 else {
2961 proc_unlock(resumeproc);
2962 }
2963 proc_rele(resumeproc);
2964 }
2965 }
2966
2967 #endif /* CONFIG_DTRACE */
2968
2969 proc_lock(p);
2970 if (CHECK_SIGNALS(p, current_thread(), ut)) {
2971 while ( (signum = issignal_locked(p)) )
2972 postsig_locked(signum);
2973 }
2974 proc_unlock(p);
2975
2976 if (!bsd_init_done) {
2977 bsd_init_done = 1;
2978 bsdinit_task();
2979 }
2980
2981 }
2982
2983 /* ptrace set runnable */
2984 void
2985 pt_setrunnable(proc_t p)
2986 {
2987 task_t task;
2988
2989 task = p->task;
2990
2991 if (p->p_lflag & P_LTRACED) {
2992 proc_lock(p);
2993 p->p_stat = SRUN;
2994 proc_unlock(p);
2995 if (p->sigwait) {
2996 wakeup((caddr_t)&(p->sigwait));
2997 if ((p->p_lflag & P_LSIGEXC) == 0) { // 5878479
2998 task_release(task);
2999 }
3000 }
3001 }
3002 }
3003
3004 kern_return_t
3005 do_bsdexception(
3006 int exc,
3007 int code,
3008 int sub)
3009 {
3010 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
3011
3012 codes[0] = code;
3013 codes[1] = sub;
3014 return(bsd_exception(exc, codes, 2));
3015 }
3016
3017 int
3018 proc_pendingsignals(proc_t p, sigset_t mask)
3019 {
3020 struct uthread * uth;
3021 thread_t th;
3022 sigset_t bits = 0;
3023
3024 proc_lock(p);
3025 /* If the process is in proc exit return no signal info */
3026 if (p->p_lflag & P_LPEXIT) {
3027 goto out;
3028 }
3029
3030 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
3031 th = p->p_vforkact;
3032 uth = (struct uthread *)get_bsdthread_info(th);
3033 if (uth) {
3034 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3035 }
3036 goto out;
3037 }
3038
3039 bits = 0;
3040 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
3041 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3042 }
3043 out:
3044 proc_unlock(p);
3045 return(bits);
3046 }
3047
3048 int
3049 thread_issignal(proc_t p, thread_t th, sigset_t mask)
3050 {
3051 struct uthread * uth;
3052 sigset_t bits=0;
3053
3054 proc_lock(p);
3055 uth = (struct uthread *)get_bsdthread_info(th);
3056 if (uth) {
3057 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3058 }
3059 proc_unlock(p);
3060 return(bits);
3061 }
3062
3063 /*
3064 * Allow external reads of the sigprop array.
3065 */
3066 int
3067 hassigprop(int sig, int prop)
3068 {
3069 return (sigprop[sig] & prop);
3070 }
3071
3072 void
3073 pgsigio(pid_t pgid, int sig)
3074 {
3075 proc_t p = PROC_NULL;
3076
3077 if (pgid < 0)
3078 gsignal(-(pgid), sig);
3079
3080 else if (pgid > 0 && (p = proc_find(pgid)) != 0)
3081 psignal(p, sig);
3082 if (p != PROC_NULL)
3083 proc_rele(p);
3084 }
3085
3086 void
3087 proc_signalstart(proc_t p, int locked)
3088 {
3089 if (!locked)
3090 proc_lock(p);
3091
3092 if(p->p_signalholder == current_thread())
3093 panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock");
3094
3095 p->p_sigwaitcnt++;
3096 while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL)
3097 msleep(&p->p_sigmask, &p->p_mlock, 0, "proc_signstart", NULL);
3098 p->p_sigwaitcnt--;
3099
3100 p->p_lflag |= P_LINSIGNAL;
3101 p->p_signalholder = current_thread();
3102 if (!locked)
3103 proc_unlock(p);
3104 }
3105
3106 void
3107 proc_signalend(proc_t p, int locked)
3108 {
3109 if (!locked)
3110 proc_lock(p);
3111 p->p_lflag &= ~P_LINSIGNAL;
3112
3113 if (p->p_sigwaitcnt > 0)
3114 wakeup(&p->p_sigmask);
3115
3116 p->p_signalholder = NULL;
3117 if (!locked)
3118 proc_unlock(p);
3119 }
3120
3121 void
3122 sig_lock_to_exit(proc_t p)
3123 {
3124 thread_t self = current_thread();
3125
3126 p->exit_thread = self;
3127 proc_unlock(p);
3128
3129 task_hold(p->task);
3130 task_wait(p->task, FALSE);
3131
3132 proc_lock(p);
3133 }
3134
3135 int
3136 sig_try_locked(proc_t p)
3137 {
3138 thread_t self = current_thread();
3139
3140 while (p->sigwait || p->exit_thread) {
3141 if (p->exit_thread) {
3142 return(0);
3143 }
3144 msleep((caddr_t)&p->sigwait_thread, &p->p_mlock, PCATCH | PDROP, 0, 0);
3145 if (thread_should_abort(self)) {
3146 /*
3147 * Terminate request - clean up.
3148 */
3149 proc_lock(p);
3150 return -1;
3151 }
3152 proc_lock(p);
3153 }
3154 return 1;
3155 }