]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sig.c
1492505f3f6de93ccd81dfafe8e2c10e3c13a345
[apple/xnu.git] / bsd / kern / kern_sig.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995-1998 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
67 */
68
69 #define SIGPROP /* include signal properties table */
70 #include <sys/param.h>
71 #include <sys/resourcevar.h>
72 #include <sys/proc_internal.h>
73 #include <sys/kauth.h>
74 #include <sys/systm.h>
75 #include <sys/timeb.h>
76 #include <sys/times.h>
77 #include <sys/acct.h>
78 #include <sys/file_internal.h>
79 #include <sys/kernel.h>
80 #include <sys/wait.h>
81 #include <sys/signalvar.h>
82 #if KTRACE
83 #include <sys/ktrace.h>
84 #endif
85 #include <sys/syslog.h>
86 #include <sys/stat.h>
87 #include <sys/lock.h>
88 #include <sys/kdebug.h>
89
90 #include <sys/mount.h>
91 #include <sys/sysproto.h>
92
93 #include <bsm/audit_kernel.h>
94
95 #include <machine/spl.h>
96
97 #include <kern/cpu_number.h>
98
99 #include <sys/vm.h>
100 #include <sys/user.h> /* for coredump */
101 #include <kern/ast.h> /* for APC support */
102 #include <kern/lock.h>
103 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
104 #include <kern/thread.h>
105 #include <kern/sched_prim.h>
106 #include <kern/thread_call.h>
107 #include <mach/exception.h>
108 #include <mach/task.h>
109 #include <mach/thread_act.h>
110
111 /*
112 * Missing prototypes that Mach should export
113 *
114 * +++
115 */
116 extern int thread_enable_fpe(thread_t act, int onoff);
117 extern void unix_syscall_return(int error);
118 extern thread_t port_name_to_thread(mach_port_name_t port_name);
119 extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast);
120 extern kern_return_t get_signalact(task_t , thread_t *, int);
121 extern boolean_t thread_should_abort(thread_t);
122 extern unsigned int get_useraddr(void);
123
124 /*
125 * ---
126 */
127
128 extern void doexception(int exc, int code, int sub);
129
130 void stop(struct proc *p);
131 int cansignal(struct proc *, kauth_cred_t, struct proc *, int);
132 int killpg1(struct proc *, int, int, int);
133 void sigexit_locked(struct proc *, int);
134 int setsigvec(struct proc *, int, struct __user_sigaction *);
135 void exit1(struct proc *, int, int *);
136 void psignal_uthread(thread_t, int);
137 kern_return_t do_bsdexception(int, int, int);
138 void __posix_sem_syscall_return(kern_return_t);
139
140 /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */
141 kern_return_t semaphore_timedwait_signal_trap_internal(void *, void *,time_t, int32_t, void (*)(int));
142 kern_return_t semaphore_timedwait_trap_internal(void *, time_t, int32_t, void (*)(int));
143 kern_return_t semaphore_wait_signal_trap_internal(void *, void *, void (*)(int));
144 kern_return_t semaphore_wait_trap_internal(void *, void (*)(int));
145
146 static int filt_sigattach(struct knote *kn);
147 static void filt_sigdetach(struct knote *kn);
148 static int filt_signal(struct knote *kn, long hint);
149
150 struct filterops sig_filtops =
151 { 0, filt_sigattach, filt_sigdetach, filt_signal };
152
153
154 /*
155 * NOTE: Source and target may *NOT* overlap! (target is smaller)
156 */
157 static void
158 sigaltstack_64to32(struct user_sigaltstack *in, struct sigaltstack *out)
159 {
160 out->ss_sp = CAST_DOWN(void *,in->ss_sp);
161 out->ss_size = in->ss_size;
162 out->ss_flags = in->ss_flags;
163 }
164
165 /*
166 * NOTE: Source and target may are permitted to overlap! (source is smaller);
167 * this works because we copy fields in order from the end of the struct to
168 * the beginning.
169 */
170 static void
171 sigaltstack_32to64(struct sigaltstack *in, struct user_sigaltstack *out)
172 {
173 out->ss_flags = in->ss_flags;
174 out->ss_size = in->ss_size;
175 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
176 }
177
178 static void
179 sigaction_64to32(struct user_sigaction *in, struct sigaction *out)
180 {
181 /* This assumes 32 bit __sa_handler is of type sig_t */
182 out->__sigaction_u.__sa_handler = CAST_DOWN(sig_t,in->__sigaction_u.__sa_handler);
183 out->sa_mask = in->sa_mask;
184 out->sa_flags = in->sa_flags;
185 }
186
187 static void
188 __sigaction_32to64(struct __sigaction *in, struct __user_sigaction *out)
189 {
190 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
191 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
192 out->sa_mask = in->sa_mask;
193 out->sa_flags = in->sa_flags;
194 }
195
196
197 #if SIGNAL_DEBUG
198 void ram_printf(int);
199 int ram_debug=0;
200 unsigned int rdebug_proc=0;
201 void
202 ram_printf(int x)
203 {
204 printf("x is %d",x);
205
206 }
207 #endif /* SIGNAL_DEBUG */
208
209 int
210 signal_lock(struct proc *p)
211 {
212 int error = 0;
213 #if DIAGNOSTIC
214 #if SIGNAL_DEBUG
215 #ifdef __ppc__
216 {
217 int register sp, *fp, numsaved;
218
219 __asm__ volatile("mr %0,r1" : "=r" (sp));
220
221 fp = (int *)*((int *)sp);
222 for (numsaved = 0; numsaved < 3; numsaved++) {
223 p->lockpc[numsaved] = fp[2];
224 if ((int)fp <= 0)
225 break;
226 fp = (int *)*fp;
227 }
228 }
229 #endif /* __ppc__ */
230 #endif /* SIGNAL_DEBUG */
231 #endif /* DIAGNOSTIC */
232
233 siglock_retry:
234 error = lockmgr((struct lock__bsd__ *)&p->signal_lock[0], LK_EXCLUSIVE, 0, (struct proc *)0);
235 if (error == EINTR)
236 goto siglock_retry;
237 return(error);
238 }
239
240 int
241 signal_unlock(struct proc *p)
242 {
243 #if DIAGNOSTIC
244 #if SIGNAL_DEBUG
245 #ifdef __ppc__
246 {
247 int register sp, *fp, numsaved;
248
249 __asm__ volatile("mr %0,r1" : "=r" (sp));
250
251 fp = (int *)*((int *)sp);
252 for (numsaved = 0; numsaved < 3; numsaved++) {
253 p->unlockpc[numsaved] = fp[2];
254 if ((int)fp <= 0)
255 break;
256 fp = (int *)*fp;
257 }
258 }
259 #endif /* __ppc__ */
260 #endif /* SIGNAL_DEBUG */
261 #endif /* DIAGNOSTIC */
262
263 /* TBD: check p last arg */
264 return(lockmgr((struct lock__bsd__ *)&p->signal_lock[0], LK_RELEASE, (simple_lock_t)0, (struct proc *)0));
265 }
266
267 void
268 signal_setast(sig_actthread)
269 thread_t sig_actthread;
270 {
271 act_set_astbsd(sig_actthread);
272 }
273
274 /*
275 * Can process p, with ucred uc, send the signal signum to process q?
276 */
277 int
278 cansignal(p, uc, q, signum)
279 struct proc *p;
280 kauth_cred_t uc;
281 struct proc *q;
282 int signum;
283 {
284 /* you can signal yourself */
285 if (p == q)
286 return(1);
287
288 if (!suser(uc, NULL))
289 return (1); /* root can always signal */
290
291 if (signum == SIGCONT && q->p_session == p->p_session)
292 return (1); /* SIGCONT in session */
293
294 /*
295 * Using kill(), only certain signals can be sent to setugid
296 * child processes
297 */
298 if (q->p_flag & P_SUGID) {
299 switch (signum) {
300 case 0:
301 case SIGKILL:
302 case SIGINT:
303 case SIGTERM:
304 case SIGSTOP:
305 case SIGTTIN:
306 case SIGTTOU:
307 case SIGTSTP:
308 case SIGHUP:
309 case SIGUSR1:
310 case SIGUSR2:
311 if (uc->cr_ruid == q->p_ucred->cr_ruid ||
312 kauth_cred_getuid(uc) == q->p_ucred->cr_ruid ||
313 uc->cr_ruid == kauth_cred_getuid(q->p_ucred) ||
314 kauth_cred_getuid(uc) == kauth_cred_getuid(q->p_ucred))
315 return (1);
316 }
317 return (0);
318 }
319
320 /* XXX
321 * because the P_SUGID test exists, this has extra tests which
322 * could be removed.
323 */
324 if (uc->cr_ruid == q->p_ucred->cr_ruid ||
325 uc->cr_ruid == q->p_ucred->cr_svuid ||
326 kauth_cred_getuid(uc) == q->p_ucred->cr_ruid ||
327 kauth_cred_getuid(uc) == q->p_ucred->cr_svuid ||
328 uc->cr_ruid == kauth_cred_getuid(q->p_ucred) ||
329 kauth_cred_getuid(uc) == kauth_cred_getuid(q->p_ucred))
330 return (1);
331 return (0);
332 }
333
334
335 /* ARGSUSED */
336 int
337 sigaction(struct proc *p, register struct sigaction_args *uap, __unused register_t *retval)
338 {
339 struct user_sigaction vec;
340 struct __user_sigaction __vec;
341
342 struct user_sigaction *sa = &vec;
343 register struct sigacts *ps = p->p_sigacts;
344
345 register int signum;
346 int bit, error=0;
347
348 signum = uap->signum;
349 if (signum <= 0 || signum >= NSIG ||
350 signum == SIGKILL || signum == SIGSTOP)
351 return (EINVAL);
352
353 if (uap->osa) {
354 sa->sa_handler = ps->ps_sigact[signum];
355 sa->sa_mask = ps->ps_catchmask[signum];
356 bit = sigmask(signum);
357 sa->sa_flags = 0;
358 if ((ps->ps_sigonstack & bit) != 0)
359 sa->sa_flags |= SA_ONSTACK;
360 if ((ps->ps_sigintr & bit) == 0)
361 sa->sa_flags |= SA_RESTART;
362 if (ps->ps_siginfo & bit)
363 sa->sa_flags |= SA_SIGINFO;
364 if (ps->ps_signodefer & bit)
365 sa->sa_flags |= SA_NODEFER;
366 if (ps->ps_64regset & bit)
367 sa->sa_flags |= SA_64REGSET;
368 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP))
369 sa->sa_flags |= SA_NOCLDSTOP;
370 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT))
371 sa->sa_flags |= SA_NOCLDWAIT;
372
373 if (IS_64BIT_PROCESS(p)) {
374 error = copyout(sa, uap->osa, sizeof(struct user_sigaction));
375 } else {
376 struct sigaction vec32;
377 sigaction_64to32(sa, &vec32);
378 error = copyout(&vec32, uap->osa, sizeof(struct sigaction));
379 }
380 if (error)
381 return (error);
382 }
383 if (uap->nsa) {
384 if (IS_64BIT_PROCESS(p)) {
385 error = copyin(uap->nsa, &__vec, sizeof(struct __user_sigaction));
386 } else {
387 struct __sigaction __vec32;
388 error = copyin(uap->nsa, &__vec32, sizeof(struct __sigaction));
389 __sigaction_32to64(&__vec32, &__vec);
390 }
391 if (error)
392 return (error);
393 error = setsigvec(p, signum, &__vec);
394 }
395 return (error);
396 }
397
398 /* Routines to manipulate bits on all threads */
399 int
400 clear_procsiglist(struct proc *p, int bit)
401 {
402 struct uthread * uth;
403 thread_t thact;
404
405 signal_lock(p);
406
407 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
408 thact = p->p_vforkact;
409 uth = (struct uthread *)get_bsdthread_info(thact);
410 if (uth) {
411 uth->uu_siglist &= ~bit;
412 }
413 p->p_siglist &= ~bit;
414 signal_unlock(p);
415 return(0);
416 }
417
418 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
419 uth->uu_siglist &= ~bit;
420 }
421 p->p_siglist &= ~bit;
422 signal_unlock(p);
423 return(0);
424 }
425
426
427 static int
428 unblock_procsigmask(struct proc *p, int bit)
429 {
430 struct uthread * uth;
431 thread_t thact;
432
433 signal_lock(p);
434 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
435 thact = p->p_vforkact;
436 uth = (struct uthread *)get_bsdthread_info(thact);
437 if (uth) {
438 uth->uu_sigmask &= ~bit;
439 }
440 p->p_sigmask &= ~bit;
441 signal_unlock(p);
442 return(0);
443 }
444 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
445 uth->uu_sigmask &= ~bit;
446 }
447 p->p_sigmask &= ~bit;
448 signal_unlock(p);
449 return(0);
450 }
451
452
453 static int
454 block_procsigmask(struct proc *p, int bit)
455 {
456 struct uthread * uth;
457 thread_t thact;
458
459 signal_lock(p);
460 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
461 thact = p->p_vforkact;
462 uth = (struct uthread *)get_bsdthread_info(thact);
463 if (uth) {
464 uth->uu_sigmask |= bit;
465 }
466 p->p_sigmask |= bit;
467 signal_unlock(p);
468 return(0);
469 }
470 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
471 uth->uu_sigmask |= bit;
472 }
473 p->p_sigmask |= bit;
474 signal_unlock(p);
475 return(0);
476 }
477
478 int
479 set_procsigmask(struct proc *p, int bit)
480 {
481 struct uthread * uth;
482 thread_t thact;
483
484 signal_lock(p);
485 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
486 thact = p->p_vforkact;
487 uth = (struct uthread *)get_bsdthread_info(thact);
488 if (uth) {
489 uth->uu_sigmask = bit;
490 }
491 p->p_sigmask = bit;
492 signal_unlock(p);
493 return(0);
494 }
495 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
496 uth->uu_sigmask = bit;
497 }
498 p->p_sigmask = bit;
499 signal_unlock(p);
500 return(0);
501 }
502
503 /* XXX should be static? */
504 int
505 setsigvec(struct proc *p, int signum, struct __user_sigaction *sa)
506 {
507 register struct sigacts *ps = p->p_sigacts;
508 register int bit;
509
510 if ((signum == SIGKILL || signum == SIGSTOP) &&
511 sa->sa_handler != SIG_DFL)
512 return(EINVAL);
513 bit = sigmask(signum);
514 /*
515 * Change setting atomically.
516 */
517 ps->ps_sigact[signum] = sa->sa_handler;
518 ps->ps_trampact[signum] = sa->sa_tramp;
519 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
520 if (sa->sa_flags & SA_SIGINFO)
521 ps->ps_siginfo |= bit;
522 else
523 ps->ps_siginfo &= ~bit;
524 if (sa->sa_flags & SA_64REGSET)
525 ps->ps_64regset |= bit;
526 else
527 ps->ps_64regset &= ~bit;
528 if ((sa->sa_flags & SA_RESTART) == 0)
529 ps->ps_sigintr |= bit;
530 else
531 ps->ps_sigintr &= ~bit;
532 if (sa->sa_flags & SA_ONSTACK)
533 ps->ps_sigonstack |= bit;
534 else
535 ps->ps_sigonstack &= ~bit;
536 if (sa->sa_flags & SA_USERTRAMP)
537 ps->ps_usertramp |= bit;
538 else
539 ps->ps_usertramp &= ~bit;
540 if (sa->sa_flags & SA_RESETHAND)
541 ps->ps_sigreset |= bit;
542 else
543 ps->ps_sigreset &= ~bit;
544 if (sa->sa_flags & SA_NODEFER)
545 ps->ps_signodefer |= bit;
546 else
547 ps->ps_signodefer &= ~bit;
548 if (signum == SIGCHLD) {
549 if (sa->sa_flags & SA_NOCLDSTOP)
550 p->p_flag |= P_NOCLDSTOP;
551 else
552 p->p_flag &= ~P_NOCLDSTOP;
553 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN))
554 p->p_flag |= P_NOCLDWAIT;
555 else
556 p->p_flag &= ~P_NOCLDWAIT;
557 }
558
559 #ifdef __ppc__
560 if (signum == SIGFPE) {
561 if (sa->sa_handler == SIG_DFL || sa->sa_handler == SIG_IGN)
562 thread_enable_fpe(current_thread(), 0);
563 else
564 thread_enable_fpe(current_thread(), 1);
565 }
566 #endif /* __ppc__ */
567 /*
568 * Set bit in p_sigignore for signals that are set to SIG_IGN,
569 * and for signals set to SIG_DFL where the default is to ignore.
570 * However, don't put SIGCONT in p_sigignore,
571 * as we have to restart the process.
572 */
573 if (sa->sa_handler == SIG_IGN ||
574 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
575
576 clear_procsiglist(p, bit);
577 if (signum != SIGCONT)
578 p->p_sigignore |= bit; /* easier in psignal */
579 p->p_sigcatch &= ~bit;
580 } else {
581 p->p_sigignore &= ~bit;
582 if (sa->sa_handler == SIG_DFL)
583 p->p_sigcatch &= ~bit;
584 else
585 p->p_sigcatch |= bit;
586 }
587 return(0);
588 }
589
590 /*
591 * Initialize signal state for process 0;
592 * set to ignore signals that are ignored by default.
593 */
594 void
595 siginit(p)
596 struct proc *p;
597 {
598 register int i;
599
600 for (i = 0; i < NSIG; i++)
601 if (sigprop[i] & SA_IGNORE && i != SIGCONT)
602 p->p_sigignore |= sigmask(i);
603 }
604
605 /*
606 * Reset signals for an exec of the specified process.
607 */
608 void
609 execsigs(p, thr_act)
610 register struct proc *p;
611 register thread_t thr_act;
612 {
613 register struct sigacts *ps = p->p_sigacts;
614 register int nc, mask;
615 struct uthread *ut;
616
617 /*
618 * Reset caught signals. Held signals remain held
619 * through p_sigmask (unless they were caught,
620 * and are now ignored by default).
621 */
622 while (p->p_sigcatch) {
623 nc = ffs((long)p->p_sigcatch);
624 mask = sigmask(nc);
625 p->p_sigcatch &= ~mask;
626 if (sigprop[nc] & SA_IGNORE) {
627 if (nc != SIGCONT)
628 p->p_sigignore |= mask;
629 if (thr_act){
630 ut = (struct uthread *)get_bsdthread_info(thr_act);
631 ut->uu_siglist &= ~mask;
632 p->p_siglist &= ~mask;
633 } else
634 clear_procsiglist(p, mask);
635 }
636 ps->ps_sigact[nc] = SIG_DFL;
637 }
638 /*
639 * Reset stack state to the user stack.
640 * Clear set of signals caught on the signal stack.
641 */
642 ps->ps_sigstk.ss_flags = SA_DISABLE;
643 ps->ps_sigstk.ss_size = 0;
644 ps->ps_sigstk.ss_sp = USER_ADDR_NULL;
645 ps->ps_flags = 0;
646 }
647
648 /*
649 * Manipulate signal mask.
650 * Note that we receive new mask, not pointer,
651 * and return old mask as return value;
652 * the library stub does the rest.
653 */
654 int
655 sigprocmask(register struct proc *p, struct sigprocmask_args *uap, __unused register_t *retval)
656 {
657 int error = 0;
658 sigset_t oldmask, nmask;
659 user_addr_t omask = uap->omask;
660 struct uthread *ut;
661
662 ut = (struct uthread *)get_bsdthread_info(current_thread());
663 oldmask = ut->uu_sigmask;
664
665 if (uap->mask == USER_ADDR_NULL) {
666 /* just want old mask */
667 goto out;
668 }
669 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
670 if (error)
671 goto out;
672
673 switch (uap->how) {
674 case SIG_BLOCK:
675 block_procsigmask(p, (nmask & ~sigcantmask));
676 signal_setast(current_thread());
677 break;
678
679 case SIG_UNBLOCK:
680 unblock_procsigmask(p, (nmask & ~sigcantmask));
681 signal_setast(current_thread());
682 break;
683
684 case SIG_SETMASK:
685 set_procsigmask(p, (nmask & ~sigcantmask));
686 signal_setast(current_thread());
687 break;
688
689 default:
690 error = EINVAL;
691 break;
692 }
693 out:
694 if (!error && omask != USER_ADDR_NULL)
695 copyout(&oldmask, omask, sizeof(sigset_t));
696 return (error);
697 }
698
699 int
700 sigpending(__unused struct proc *p, register struct sigpending_args *uap, __unused register_t *retval)
701 {
702 struct uthread *ut;
703 sigset_t pendlist;
704
705 ut = (struct uthread *)get_bsdthread_info(current_thread());
706 pendlist = ut->uu_siglist;
707
708 if (uap->osv)
709 copyout(&pendlist, uap->osv, sizeof(sigset_t));
710 return(0);
711 }
712
713
714 /*
715 * Suspend process until signal, providing mask to be set
716 * in the meantime. Note nonstandard calling convention:
717 * libc stub passes mask, not pointer, to save a copyin.
718 */
719
720 static int
721 sigcontinue(__unused int error)
722 {
723 // struct uthread *ut = get_bsdthread_info(current_thread());
724 unix_syscall_return(EINTR);
725 }
726
727 int
728 sigsuspend(register struct proc *p, struct sigsuspend_args *uap, __unused register_t *retval)
729 {
730 struct uthread *ut;
731
732 ut = (struct uthread *)get_bsdthread_info(current_thread());
733
734 /*
735 * When returning from sigpause, we want
736 * the old mask to be restored after the
737 * signal handler has finished. Thus, we
738 * save it here and mark the sigacts structure
739 * to indicate this.
740 */
741 ut->uu_oldmask = ut->uu_sigmask;
742 ut->uu_flag |= UT_SAS_OLDMASK;
743 ut->uu_sigmask = (uap->mask & ~sigcantmask);
744 (void) tsleep0((caddr_t) p, PPAUSE|PCATCH, "pause", 0, sigcontinue);
745 /* always return EINTR rather than ERESTART... */
746 return (EINTR);
747 }
748
749
750 int
751 __disable_threadsignal(struct proc *p,
752 __unused register struct __disable_threadsignal_args *uap,
753 __unused register_t *retval)
754 {
755 struct uthread *uth;
756
757 uth = (struct uthread *)get_bsdthread_info(current_thread());
758
759 /* No longer valid to have any signal delivered */
760 signal_lock(p);
761 uth->uu_flag |= UT_NO_SIGMASK;
762 signal_unlock(p);
763
764 return(0);
765
766 }
767
768
769 int
770 __pthread_markcancel(p, uap, retval)
771 struct proc *p;
772 register struct __pthread_markcancel_args *uap;
773 register_t *retval;
774 {
775 thread_act_t target_act;
776 int error = 0;
777 struct uthread *uth;
778
779 target_act = (thread_act_t)port_name_to_thread(uap->thread_port);
780
781 if (target_act == THR_ACT_NULL)
782 return (ESRCH);
783
784 uth = (struct uthread *)get_bsdthread_info(target_act);
785
786 /* if the thread is in vfork do not cancel */
787 if ((uth->uu_flag & (P_VFORK | UT_CANCEL | UT_CANCELED )) == 0) {
788 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
789 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
790 && ((uth->uu_flag & UT_CANCELDISABLE) == 0))
791 thread_abort_safely(target_act);
792 }
793
794 thread_deallocate(target_act);
795 return (error);
796 }
797
798 /* if action =0 ; return the cancellation state ,
799 * if marked for cancellation, make the thread canceled
800 * if action = 1 ; Enable the cancel handling
801 * if action = 2; Disable the cancel handling
802 */
803 int
804 __pthread_canceled(p, uap, retval)
805 struct proc *p;
806 register struct __pthread_canceled_args *uap;
807 register_t *retval;
808 {
809 thread_act_t thr_act;
810 struct uthread *uth;
811 int action = uap->action;
812
813 thr_act = current_act();
814 uth = (struct uthread *)get_bsdthread_info(thr_act);
815
816 switch (action) {
817 case 1:
818 uth->uu_flag &= ~UT_CANCELDISABLE;
819 return(0);
820 case 2:
821 uth->uu_flag |= UT_CANCELDISABLE;
822 return(0);
823 case 0:
824 default:
825 /* if the thread is in vfork do not cancel */
826 if((uth->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
827 uth->uu_flag &= ~UT_CANCEL;
828 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
829 return(0);
830 }
831 return(EINVAL);
832 }
833 return(EINVAL);
834 }
835
836 void
837 __posix_sem_syscall_return(kern_return_t kern_result)
838 {
839 int error = 0;
840
841 if (kern_result == KERN_SUCCESS)
842 error = 0;
843 else if (kern_result == KERN_ABORTED)
844 error = EINTR;
845 else if (kern_result == KERN_OPERATION_TIMED_OUT)
846 error = ETIMEDOUT;
847 else
848 error = EINVAL;
849 unix_syscall_return(error);
850 /* does not return */
851 }
852
853
854 int
855 __semwait_signal(p, uap, retval)
856 struct proc *p;
857 register struct __semwait_signal_args *uap;
858 register_t *retval;
859 {
860
861 kern_return_t kern_result;
862 mach_timespec_t then;
863 struct timespec now;
864
865 if(uap->timeout) {
866
867 if (uap->relative) {
868 then.tv_sec = uap->tv_sec;
869 then.tv_nsec = uap->tv_nsec;
870 } else {
871 nanotime(&now);
872 then.tv_sec = uap->tv_sec - now.tv_sec;
873 then.tv_nsec = uap->tv_nsec - now.tv_nsec;
874 if (then.tv_nsec < 0) {
875 then.tv_nsec += NSEC_PER_SEC;
876 then.tv_sec--;
877 }
878 }
879
880 if (uap->mutex_sem == (void *)NULL)
881 kern_result = semaphore_timedwait_trap_internal(uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
882 else
883 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
884
885 } else {
886
887 if (uap->mutex_sem == (void *)NULL)
888 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
889 else
890
891 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
892 }
893
894 out:
895 if (kern_result == KERN_SUCCESS)
896 return(0);
897 else if (kern_result == KERN_ABORTED)
898 return(EINTR);
899 else if (kern_result == KERN_OPERATION_TIMED_OUT)
900 return(ETIMEDOUT);
901 else
902 return(EINVAL);
903 }
904
905
906 int
907 __pthread_kill(__unused struct proc *p,
908 register struct __pthread_kill_args *uap,
909 __unused register_t *retval)
910 {
911 thread_t target_act;
912 int error = 0;
913 int signum = uap->sig;
914 struct uthread *uth;
915
916 target_act = (thread_t)port_name_to_thread(uap->thread_port);
917
918 if (target_act == THREAD_NULL)
919 return (ESRCH);
920 if ((u_int)signum >= NSIG) {
921 error = EINVAL;
922 goto out;
923 }
924
925 uth = (struct uthread *)get_bsdthread_info(target_act);
926
927 if (uth->uu_flag & UT_NO_SIGMASK) {
928 error = ESRCH;
929 goto out;
930 }
931
932 if (signum)
933 psignal_uthread(target_act, signum);
934 out:
935 thread_deallocate(target_act);
936 return (error);
937 }
938
939
940 int
941 pthread_sigmask(__unused register struct proc *p,
942 register struct pthread_sigmask_args *uap,
943 __unused register_t *retval)
944 {
945 user_addr_t set = uap->set;
946 user_addr_t oset = uap->oset;
947 sigset_t nset;
948 int error = 0;
949 struct uthread *ut;
950 sigset_t oldset;
951
952 ut = (struct uthread *)get_bsdthread_info(current_thread());
953 oldset = ut->uu_sigmask;
954
955 if (set == USER_ADDR_NULL) {
956 /* need only old mask */
957 goto out;
958 }
959
960 error = copyin(set, &nset, sizeof(sigset_t));
961 if (error)
962 goto out;
963
964 switch (uap->how) {
965 case SIG_BLOCK:
966 ut->uu_sigmask |= (nset & ~sigcantmask);
967 break;
968
969 case SIG_UNBLOCK:
970 ut->uu_sigmask &= ~(nset);
971 signal_setast(current_thread());
972 break;
973
974 case SIG_SETMASK:
975 ut->uu_sigmask = (nset & ~sigcantmask);
976 signal_setast(current_thread());
977 break;
978
979 default:
980 error = EINVAL;
981
982 }
983 out:
984 if (!error && oset != USER_ADDR_NULL)
985 copyout(&oldset, oset, sizeof(sigset_t));
986
987 return(error);
988 }
989
990
991 int
992 sigwait(register struct proc *p, register struct sigwait_args *uap, __unused register_t *retval)
993 {
994 struct uthread *ut;
995 struct uthread *uth;
996 int error = 0;
997 sigset_t mask;
998 sigset_t siglist;
999 sigset_t sigw=0;
1000 int signum;
1001
1002 ut = (struct uthread *)get_bsdthread_info(current_thread());
1003
1004 if (uap->set == USER_ADDR_NULL)
1005 return(EINVAL);
1006
1007 error = copyin(uap->set, &mask, sizeof(sigset_t));
1008 if (error)
1009 return(error);
1010
1011 siglist = (mask & ~sigcantmask);
1012
1013 if (siglist == 0)
1014 return(EINVAL);
1015
1016 signal_lock(p);
1017 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
1018 signal_unlock(p);
1019 return(EINVAL);
1020 } else {
1021 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1022 if ( (sigw = uth->uu_siglist & siglist) ) {
1023 break;
1024 }
1025 }
1026 }
1027 signal_unlock(p);
1028 if (sigw) {
1029 /* The signal was pending on a thread */
1030 goto sigwait1;
1031 }
1032 /*
1033 * When returning from sigwait, we want
1034 * the old mask to be restored after the
1035 * signal handler has finished. Thus, we
1036 * save it here and mark the sigacts structure
1037 * to indicate this.
1038 */
1039 ut->uu_oldmask = ut->uu_sigmask;
1040 ut->uu_flag |= UT_SAS_OLDMASK;
1041 if (siglist == (sigset_t)0)
1042 return(EINVAL);
1043 /* SIGKILL and SIGSTOP are not maskable as well */
1044 ut->uu_sigmask = ~(siglist|sigcantmask);
1045 ut->uu_sigwait = siglist;
1046 /* No Continuations for now */
1047 error = tsleep((caddr_t)&ut->uu_sigwait, PPAUSE|PCATCH, "pause", 0);
1048
1049 if ((error == EINTR) || (error == ERESTART))
1050 error = 0;
1051
1052 sigw = (ut->uu_sigwait & siglist);
1053 ut->uu_sigmask = ut->uu_oldmask;
1054 ut->uu_oldmask = 0;
1055 ut->uu_flag &= ~UT_SAS_OLDMASK;
1056 sigwait1:
1057 ut->uu_sigwait = 0;
1058 if (!error) {
1059 signum = ffs((unsigned int)sigw);
1060 if (!signum)
1061 panic("sigwait with no signal wakeup");
1062 ut->uu_siglist &= ~(sigmask(signum));
1063 if (uap->sig != USER_ADDR_NULL)
1064 error = copyout(&signum, uap->sig, sizeof(int));
1065 }
1066
1067 return(error);
1068
1069 }
1070
1071
1072 int
1073 sigaltstack(struct proc *p, register struct sigaltstack_args *uap, __unused register_t *retval)
1074 {
1075 struct sigacts *psp;
1076 struct user_sigaltstack ss;
1077 int error;
1078
1079 psp = p->p_sigacts;
1080 if ((psp->ps_flags & SAS_ALTSTACK) == 0)
1081 psp->ps_sigstk.ss_flags |= SA_DISABLE;
1082 if (uap->oss) {
1083 if (IS_64BIT_PROCESS(p)) {
1084 error = copyout(&psp->ps_sigstk, uap->oss, sizeof(struct user_sigaltstack));
1085 } else {
1086 struct sigaltstack ss32;
1087 sigaltstack_64to32(&psp->ps_sigstk, &ss32);
1088 error = copyout(&ss32, uap->oss, sizeof(struct sigaltstack));
1089 }
1090 if (error)
1091 return (error);
1092 }
1093 if (uap->nss == USER_ADDR_NULL)
1094 return (0);
1095 if (IS_64BIT_PROCESS(p)) {
1096 error = copyin(uap->nss, &ss, sizeof(struct user_sigaltstack));
1097 } else {
1098 struct sigaltstack ss32;
1099 error = copyin(uap->nss, &ss32, sizeof(struct sigaltstack));
1100 sigaltstack_32to64(&ss32,&ss);
1101 }
1102 if (error)
1103 return (error);
1104 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1105 return(EINVAL);
1106 }
1107
1108 if (ss.ss_flags & SA_DISABLE) {
1109 if (psp->ps_sigstk.ss_flags & SA_ONSTACK)
1110 return (EINVAL);
1111 psp->ps_flags &= ~SAS_ALTSTACK;
1112 psp->ps_sigstk.ss_flags = ss.ss_flags;
1113 return (0);
1114 }
1115 /* The older stacksize was 8K, enforce that one so no compat problems */
1116 #define OLDMINSIGSTKSZ 8*1024
1117 if (ss.ss_size < OLDMINSIGSTKSZ)
1118 return (ENOMEM);
1119 psp->ps_flags |= SAS_ALTSTACK;
1120 psp->ps_sigstk= ss;
1121 return (0);
1122 }
1123
1124 int
1125 kill(struct proc *cp, struct kill_args *uap, __unused register_t *retval)
1126 {
1127 register struct proc *p;
1128 kauth_cred_t uc = kauth_cred_get();
1129
1130 AUDIT_ARG(pid, uap->pid);
1131 AUDIT_ARG(signum, uap->signum);
1132
1133 if ((u_int)uap->signum >= NSIG)
1134 return (EINVAL);
1135 if (uap->pid > 0) {
1136 /* kill single process */
1137 if ((p = proc_findref(uap->pid)) == NULL) {
1138 if ((p = pzfind(uap->pid)) != NULL) {
1139 /*
1140 * IEEE Std 1003.1-2001: return success
1141 * when killing a zombie.
1142 */
1143 return (0);
1144 }
1145 return (ESRCH);
1146 }
1147 AUDIT_ARG(process, p);
1148 if (!cansignal(cp, uc, p, uap->signum)) {
1149 proc_dropref(p);
1150 return(EPERM);
1151 }
1152 if (uap->signum)
1153 psignal(p, uap->signum);
1154 proc_dropref(p);
1155 return (0);
1156 }
1157 switch (uap->pid) {
1158 case -1: /* broadcast signal */
1159 return (killpg1(cp, uap->signum, 0, 1));
1160 case 0: /* signal own process group */
1161 return (killpg1(cp, uap->signum, 0, 0));
1162 default: /* negative explicit process group */
1163 return (killpg1(cp, uap->signum, -(uap->pid), 0));
1164 }
1165 /* NOTREACHED */
1166 }
1167
1168
1169 /*
1170 * Common code for kill process group/broadcast kill.
1171 * cp is calling process.
1172 */
1173 int
1174 killpg1(cp, signum, pgid, all)
1175 register struct proc *cp;
1176 int signum, pgid, all;
1177 {
1178 register struct proc *p;
1179 kauth_cred_t uc = cp->p_ucred;
1180 struct pgrp *pgrp;
1181 int nfound = 0;
1182
1183 if (all) {
1184 /*
1185 * broadcast
1186 */
1187 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1188 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1189 p == cp || !cansignal(cp, uc, p, signum))
1190 continue;
1191 nfound++;
1192 if (signum)
1193 psignal(p, signum);
1194 }
1195 } else {
1196 if (pgid == 0)
1197 /*
1198 * zero pgid means send to my process group.
1199 */
1200 pgrp = cp->p_pgrp;
1201 else {
1202 pgrp = pgfind(pgid);
1203 if (pgrp == NULL)
1204 return (ESRCH);
1205 }
1206 for (p = pgrp->pg_members.lh_first; p != 0;
1207 p = p->p_pglist.le_next) {
1208 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1209 p->p_stat == SZOMB ||
1210 !cansignal(cp, uc, p, signum))
1211 continue;
1212 nfound++;
1213 if (signum)
1214 psignal(p, signum);
1215 }
1216 }
1217 return (nfound ? 0 : ESRCH);
1218 }
1219
1220 /*
1221 * Send a signal to a process group.
1222 */
1223 void
1224 gsignal(pgid, signum)
1225 int pgid, signum;
1226 {
1227 struct pgrp *pgrp;
1228
1229 if (pgid && (pgrp = pgfind(pgid)))
1230 pgsignal(pgrp, signum, 0);
1231 }
1232
1233 /*
1234 * Send a signal to a process group. If checktty is 1,
1235 * limit to members which have a controlling terminal.
1236 */
1237 void
1238 pgsignal(pgrp, signum, checkctty)
1239 struct pgrp *pgrp;
1240 int signum, checkctty;
1241 {
1242 register struct proc *p;
1243
1244 if (pgrp)
1245 for (p = pgrp->pg_members.lh_first; p != 0;
1246 p = p->p_pglist.le_next)
1247 if (checkctty == 0 || p->p_flag & P_CONTROLT)
1248 psignal(p, signum);
1249 }
1250
1251 /*
1252 * Send signal to a backgrounded process blocked due to tty access
1253 * In FreeBSD, the backgrounded process wakes up every second and
1254 * discovers whether it is foregounded or not. In our case, we block
1255 * the thread in tsleep as we want to avoid storm of processes as well
1256 * as the suspend is only at AST level
1257 */
1258 void
1259 tty_pgsignal(pgrp, signum)
1260 struct pgrp *pgrp;
1261 int signum;
1262 {
1263 register struct proc *p;
1264
1265 if (pgrp)
1266 for (p = pgrp->pg_members.lh_first; p != 0;
1267 p = p->p_pglist.le_next)
1268 if ((p->p_flag & P_TTYSLEEP) && (p->p_flag & P_CONTROLT))
1269 psignal(p, signum);
1270 }
1271
1272 /*
1273 * Send a signal caused by a trap to a specific thread.
1274 */
1275 void
1276 threadsignal(thread_t sig_actthread, int signum, u_long code)
1277 {
1278 register struct uthread *uth;
1279 register struct task * sig_task;
1280 register struct proc *p ;
1281 int mask;
1282
1283 if ((u_int)signum >= NSIG || signum == 0)
1284 return;
1285
1286 mask = sigmask(signum);
1287 if ((mask & threadmask) == 0)
1288 return;
1289 sig_task = get_threadtask(sig_actthread);
1290 p = (struct proc *)(get_bsdtask_info(sig_task));
1291
1292 uth = get_bsdthread_info(sig_actthread);
1293 if (uth && (uth->uu_flag & UT_VFORK))
1294 p = uth->uu_proc;
1295
1296 if (!(p->p_flag & P_TRACED) && (p->p_sigignore & mask))
1297 return;
1298
1299 uth->uu_siglist |= mask;
1300 p->p_siglist |= mask; /* just for lame ones looking here */
1301 uth->uu_code = code;
1302 /* mark on process as well */
1303 signal_setast(sig_actthread);
1304 }
1305
1306
1307 void
1308 psignal(p, signum)
1309 register struct proc *p;
1310 register int signum;
1311 {
1312 psignal_lock(p, signum, 1);
1313 }
1314
1315 void
1316 psignal_vfork(struct proc *p, task_t new_task, thread_t thr_act, int signum)
1317 {
1318 register int prop;
1319 register sig_t action;
1320 int mask;
1321 struct uthread *uth;
1322
1323 if ((u_int)signum >= NSIG || signum == 0)
1324 panic("psignal signal number");
1325 mask = sigmask(signum);
1326 prop = sigprop[signum];
1327
1328 #if SIGNAL_DEBUG
1329 if(rdebug_proc && (p == rdebug_proc)) {
1330 ram_printf(3);
1331 }
1332 #endif /* SIGNAL_DEBUG */
1333
1334 if ((new_task == TASK_NULL) || (thr_act == (thread_t)NULL) || is_kerneltask(new_task))
1335 return;
1336
1337
1338 uth = get_bsdthread_info(thr_act);
1339 signal_lock(p);
1340
1341 /*
1342 * proc is traced, always give parent a chance.
1343 */
1344 action = SIG_DFL;
1345
1346 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1347 (p->p_flag & P_TRACED) == 0)
1348 p->p_nice = NZERO;
1349
1350 if (prop & SA_CONT) {
1351 p->p_siglist &= ~stopsigmask;
1352 uth->uu_siglist &= ~stopsigmask;
1353 }
1354
1355 if (prop & SA_STOP) {
1356 /*
1357 * If sending a tty stop signal to a member of an orphaned
1358 * process group, discard the signal here if the action
1359 * is default; don't stop the process below if sleeping,
1360 * and don't clear any pending SIGCONT.
1361 */
1362 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
1363 action == SIG_DFL)
1364 goto psigout;
1365 uth->uu_siglist &= ~contsigmask;
1366 p->p_siglist &= ~contsigmask;
1367 }
1368 uth->uu_siglist |= mask;
1369 p->p_siglist |= mask; /* just for lame ones looking here */
1370
1371 /* Deliver signal to the activation passed in */
1372 act_set_astbsd(thr_act);
1373
1374 /*
1375 * SIGKILL priority twiddling moved here from above because
1376 * it needs sig_thread. Could merge it into large switch
1377 * below if we didn't care about priority for tracing
1378 * as SIGKILL's action is always SIG_DFL.
1379 */
1380 if ((signum == SIGKILL) && (p->p_nice > NZERO)) {
1381 p->p_nice = NZERO;
1382 }
1383
1384 /*
1385 * This Process is traced - wake it up (if not already
1386 * stopped) so that it can discover the signal in
1387 * issig() and stop for the parent.
1388 */
1389 if (p->p_flag & P_TRACED) {
1390 if (p->p_stat != SSTOP)
1391 goto run;
1392 else
1393 goto psigout;
1394 }
1395 run:
1396 /*
1397 * If we're being traced (possibly because someone attached us
1398 * while we were stopped), check for a signal from the debugger.
1399 */
1400 if (p->p_stat == SSTOP) {
1401 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
1402 uth->uu_siglist |= sigmask(p->p_xstat);
1403 p->p_siglist |= mask; /* just for lame ones looking here */
1404 }
1405 }
1406
1407 /*
1408 * setrunnable(p) in BSD
1409 */
1410 p->p_stat = SRUN;
1411
1412 psigout:
1413 signal_unlock(p);
1414 }
1415
1416 static thread_t
1417 get_signalthread(struct proc *p, int signum)
1418 {
1419 struct uthread *uth;
1420 thread_t thr_act;
1421 sigset_t mask = sigmask(signum);
1422 thread_t sig_thread_act;
1423 struct task * sig_task = p->task;
1424 kern_return_t kret;
1425
1426 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
1427 sig_thread_act = p->p_vforkact;
1428 kret = check_actforsig(sig_task, sig_thread_act, 1);
1429 if (kret == KERN_SUCCESS)
1430 return(sig_thread_act);
1431 else
1432 return(THREAD_NULL);
1433 }
1434
1435 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1436 if(((uth->uu_flag & UT_NO_SIGMASK)== 0) &&
1437 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
1438 if (check_actforsig(p->task, uth->uu_act, 1) == KERN_SUCCESS)
1439 return(uth->uu_act);
1440 }
1441 }
1442 if (get_signalact(p->task, &thr_act, 1) == KERN_SUCCESS) {
1443 return(thr_act);
1444 }
1445
1446 return(THREAD_NULL);
1447 }
1448
1449 /*
1450 * Send the signal to the process. If the signal has an action, the action
1451 * is usually performed by the target process rather than the caller; we add
1452 * the signal to the set of pending signals for the process.
1453 *
1454 * Exceptions:
1455 * o When a stop signal is sent to a sleeping process that takes the
1456 * default action, the process is stopped without awakening it.
1457 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1458 * regardless of the signal action (eg, blocked or ignored).
1459 *
1460 * Other ignored signals are discarded immediately.
1461 */
1462 void
1463 psignal_lock(p, signum, withlock)
1464 register struct proc *p;
1465 register int signum;
1466 register int withlock;
1467 {
1468 register int s, prop;
1469 register sig_t action;
1470 thread_t sig_thread_act;
1471 register task_t sig_task;
1472 int mask;
1473 struct uthread *uth;
1474 boolean_t funnel_state = FALSE;
1475 int sw_funnel = 0;
1476
1477 if ((u_int)signum >= NSIG || signum == 0)
1478 panic("psignal signal number");
1479 mask = sigmask(signum);
1480 prop = sigprop[signum];
1481
1482 #if SIGNAL_DEBUG
1483 if(rdebug_proc && (p == rdebug_proc)) {
1484 ram_printf(3);
1485 }
1486 #endif /* SIGNAL_DEBUG */
1487
1488 if (thread_funnel_get() == (funnel_t *)0) {
1489 sw_funnel = 1;
1490 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1491 }
1492 /*
1493 * We will need the task pointer later. Grab it now to
1494 * check for a zombie process. Also don't send signals
1495 * to kernel internal tasks.
1496 */
1497 if (((sig_task = p->task) == TASK_NULL) || is_kerneltask(sig_task)) {
1498 if (sw_funnel)
1499 thread_funnel_set(kernel_flock, funnel_state);
1500 return;
1501 }
1502
1503 s = splhigh();
1504 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
1505 splx(s);
1506
1507 /*
1508 * do not send signals to the process that has the thread
1509 * doing a reboot(). Not doing so will mark that thread aborted
1510 * and can cause IO failures wich will cause data loss.
1511 */
1512 if (ISSET(p->p_flag, P_REBOOT)) {
1513 if (sw_funnel)
1514 thread_funnel_set(kernel_flock, funnel_state);
1515 return;
1516 }
1517
1518 if (withlock)
1519 signal_lock(p);
1520
1521 /*
1522 * Deliver the signal to the first thread in the task. This
1523 * allows single threaded applications which use signals to
1524 * be able to be linked with multithreaded libraries. We have
1525 * an implicit reference to the current thread, but need
1526 * an explicit one otherwise. The thread reference keeps
1527 * the corresponding task data structures around too. This
1528 * reference is released by thread_deallocate.
1529 */
1530
1531 if (((p->p_flag & P_TRACED) == 0) && (p->p_sigignore & mask))
1532 goto psigout;
1533
1534 /* If successful return with ast set */
1535 sig_thread_act = get_signalthread(p, signum);
1536
1537 if (sig_thread_act == THREAD_NULL) {
1538 /* XXXX FIXME
1539 * if it is sigkill, may be we should
1540 * inject a thread to terminate
1541 */
1542 #if SIGNAL_DEBUG
1543 ram_printf(1);
1544 #endif /* SIGNAL_DEBUG */
1545 goto psigout;
1546 }
1547
1548 uth = get_bsdthread_info(sig_thread_act);
1549
1550 /*
1551 * If proc is traced, always give parent a chance.
1552 */
1553 if (p->p_flag & P_TRACED)
1554 action = SIG_DFL;
1555 else {
1556 /*
1557 * If the signal is being ignored,
1558 * then we forget about it immediately.
1559 * (Note: we don't set SIGCONT in p_sigignore,
1560 * and if it is set to SIG_IGN,
1561 * action will be SIG_DFL here.)
1562 */
1563 if (p->p_sigignore & mask)
1564 goto psigout;
1565 /* sigwait takes precedence */
1566 if (uth->uu_sigwait & mask)
1567 action = KERN_SIG_WAIT;
1568 else if (uth->uu_sigmask & mask)
1569 action = KERN_SIG_HOLD;
1570 else if (p->p_sigcatch & mask)
1571 action = KERN_SIG_CATCH;
1572 else
1573 action = SIG_DFL;
1574 }
1575
1576 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1577 (p->p_flag & P_TRACED) == 0)
1578 p->p_nice = NZERO;
1579
1580 if (prop & SA_CONT) {
1581 uth->uu_siglist &= ~stopsigmask;
1582 p->p_siglist &= ~stopsigmask;
1583 }
1584
1585 if (prop & SA_STOP) {
1586 /*
1587 * If sending a tty stop signal to a member of an orphaned
1588 * process group, discard the signal here if the action
1589 * is default; don't stop the process below if sleeping,
1590 * and don't clear any pending SIGCONT.
1591 */
1592 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
1593 action == SIG_DFL)
1594 goto psigout;
1595 uth->uu_siglist &= ~contsigmask;
1596 p->p_siglist &= ~contsigmask;
1597 }
1598 uth->uu_siglist |= mask;
1599 p->p_siglist |= mask; /* just for lame ones looking here */
1600
1601
1602 /*
1603 * Defer further processing for signals which are held,
1604 * except that stopped processes must be continued by SIGCONT.
1605 */
1606 if (action == KERN_SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) {
1607 goto psigout;
1608 }
1609 /*
1610 * SIGKILL priority twiddling moved here from above because
1611 * it needs sig_thread. Could merge it into large switch
1612 * below if we didn't care about priority for tracing
1613 * as SIGKILL's action is always SIG_DFL.
1614 */
1615 if ((signum == SIGKILL) && (p->p_nice > NZERO)) {
1616 p->p_nice = NZERO;
1617 }
1618
1619 /*
1620 * Process is traced - wake it up (if not already
1621 * stopped) so that it can discover the signal in
1622 * issig() and stop for the parent.
1623 */
1624 if (p->p_flag & P_TRACED) {
1625 if (p->p_stat != SSTOP)
1626 goto run;
1627 else
1628 goto psigout;
1629 }
1630
1631 if (action == KERN_SIG_WAIT) {
1632 uth->uu_sigwait = mask;
1633 uth->uu_siglist &= ~mask;
1634 p->p_siglist &= ~mask;
1635 wakeup(&uth->uu_sigwait);
1636 /* if it is SIGCONT resume whole process */
1637 if (prop & SA_CONT) {
1638 p->p_flag |= P_CONTINUED;
1639 (void) task_resume(sig_task);
1640 }
1641 goto psigout;
1642 }
1643
1644 if (action != SIG_DFL) {
1645 /*
1646 * User wants to catch the signal.
1647 * Wake up the thread, but don't un-suspend it
1648 * (except for SIGCONT).
1649 */
1650 if (prop & SA_CONT) {
1651 if (p->p_flag & P_TTYSLEEP) {
1652 p->p_flag &= ~P_TTYSLEEP;
1653 wakeup(&p->p_siglist);
1654 } else {
1655 p->p_flag |= P_CONTINUED;
1656 (void) task_resume(sig_task);
1657 }
1658 p->p_stat = SRUN;
1659 } else if (p->p_stat == SSTOP)
1660 goto psigout;
1661 goto run;
1662 } else {
1663 /* Default action - varies */
1664 if (mask & stopsigmask) {
1665 /*
1666 * These are the signals which by default
1667 * stop a process.
1668 *
1669 * Don't clog system with children of init
1670 * stopped from the keyboard.
1671 */
1672 if (!(prop & SA_STOP) && p->p_pptr == initproc) {
1673 psignal_lock(p, SIGKILL, 0);
1674 uth->uu_siglist &= ~mask;
1675 p->p_siglist &= ~mask;
1676 goto psigout;
1677 }
1678
1679 /*
1680 * Stop the task
1681 * if task hasn't already been stopped by
1682 * a signal.
1683 */
1684 uth->uu_siglist &= ~mask;
1685 p->p_siglist &= ~mask;
1686 if (p->p_stat != SSTOP) {
1687 p->p_xstat = signum;
1688 stop(p);
1689 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1690 struct proc *pp = p->p_pptr;
1691
1692 pp->si_pid = p->p_pid;
1693 pp->si_status = p->p_xstat;
1694 pp->si_code = CLD_STOPPED;
1695 pp->si_uid = p->p_ucred->cr_ruid;
1696 psignal(pp, SIGCHLD);
1697 }
1698 }
1699 goto psigout;
1700 }
1701
1702 switch (signum) {
1703 /*
1704 * Signals ignored by default have been dealt
1705 * with already, since their bits are on in
1706 * p_sigignore.
1707 */
1708
1709 case SIGKILL:
1710 /*
1711 * Kill signal always sets process running and
1712 * unsuspends it.
1713 */
1714 /*
1715 * Process will be running after 'run'
1716 */
1717 p->p_stat = SRUN;
1718
1719 thread_abort(sig_thread_act);
1720
1721 goto psigout;
1722
1723 case SIGCONT:
1724 /*
1725 * Let the process run. If it's sleeping on an
1726 * event, it remains so.
1727 */
1728 if (p->p_flag & P_TTYSLEEP) {
1729 p->p_flag &= ~P_TTYSLEEP;
1730 wakeup(&p->p_siglist);
1731 } else {
1732 p->p_flag |= P_CONTINUED;
1733 (void) task_resume(sig_task);
1734 }
1735 uth->uu_siglist &= ~mask;
1736 p->p_siglist &= ~mask;
1737 p->p_stat = SRUN;
1738
1739 goto psigout;
1740
1741 default:
1742 /*
1743 * All other signals wake up the process, but don't
1744 * resume it.
1745 */
1746 if (p->p_stat == SSTOP)
1747 goto psigout;
1748 goto run;
1749 }
1750 }
1751 /*NOTREACHED*/
1752 run:
1753 /*
1754 * If we're being traced (possibly because someone attached us
1755 * while we were stopped), check for a signal from the debugger.
1756 */
1757 if (p->p_stat == SSTOP) {
1758 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0)
1759 uth->uu_siglist |= sigmask(p->p_xstat);
1760 } else {
1761 /*
1762 * setrunnable(p) in BSD and
1763 * Wake up the thread if it is interruptible.
1764 */
1765 p->p_stat = SRUN;
1766 thread_abort_safely(sig_thread_act);
1767 }
1768 psigout:
1769 if (withlock)
1770 signal_unlock(p);
1771 if (sw_funnel)
1772 thread_funnel_set(kernel_flock, funnel_state);
1773 }
1774
1775
1776 /* psignal_lock(p, signum, withlock ) */
1777 void
1778 psignal_uthread(thr_act, signum)
1779 thread_t thr_act;
1780 int signum;
1781 {
1782 struct proc *p;
1783 register int prop;
1784 register sig_t action;
1785 thread_t sig_thread_act;
1786 register task_t sig_task;
1787 int mask;
1788 struct uthread *uth;
1789 kern_return_t kret;
1790 int error = 0;
1791
1792 p = (struct proc *)get_bsdtask_info(get_threadtask(thr_act));
1793 if ((u_int)signum >= NSIG || signum == 0)
1794 panic("Invalid signal number in psignal_uthread");
1795 mask = sigmask(signum);
1796 prop = sigprop[signum];
1797
1798 #if SIGNAL_DEBUG
1799 if(rdebug_proc && (p == rdebug_proc)) {
1800 ram_printf(3);
1801 }
1802 #endif /* SIGNAL_DEBUG */
1803
1804 /*
1805 * We will need the task pointer later. Grab it now to
1806 * check for a zombie process. Also don't send signals
1807 * to kernel internal tasks.
1808 */
1809 if (((sig_task = p->task) == TASK_NULL) || is_kerneltask(sig_task)) {
1810 return;
1811 }
1812
1813 sig_thread_act = thr_act;
1814 /*
1815 * do not send signals to the process that has the thread
1816 * doing a reboot(). Not doing so will mark that thread aborted
1817 * and can cause IO failures wich will cause data loss.
1818 */
1819 if (ISSET(p->p_flag, P_REBOOT)) {
1820 return;
1821 }
1822
1823 signal_lock(p);
1824
1825 /*
1826 * Deliver the signal to the first thread in the task. This
1827 * allows single threaded applications which use signals to
1828 * be able to be linked with multithreaded libraries. We have
1829 * an implicit reference to the current thread, but need
1830 * an explicit one otherwise. The thread reference keeps
1831 * the corresponding task data structures around too. This
1832 * reference is released by thread_deallocate.
1833 */
1834
1835 if (((p->p_flag & P_TRACED) == 0) && (p->p_sigignore & mask))
1836 goto puthout;
1837
1838 kret = check_actforsig(sig_task, sig_thread_act, 1);
1839
1840 if (kret != KERN_SUCCESS) {
1841 error = EINVAL;
1842 goto puthout;
1843 }
1844
1845
1846 uth = get_bsdthread_info(sig_thread_act);
1847
1848 /*
1849 * If proc is traced, always give parent a chance.
1850 */
1851 if (p->p_flag & P_TRACED)
1852 action = SIG_DFL;
1853 else {
1854 /*
1855 * If the signal is being ignored,
1856 * then we forget about it immediately.
1857 * (Note: we don't set SIGCONT in p_sigignore,
1858 * and if it is set to SIG_IGN,
1859 * action will be SIG_DFL here.)
1860 */
1861 if (p->p_sigignore & mask)
1862 goto puthout;
1863 /* sigwait takes precedence */
1864 if (uth->uu_sigwait & mask)
1865 action = KERN_SIG_WAIT;
1866 else if (uth->uu_sigmask & mask)
1867 action = KERN_SIG_HOLD;
1868 else if (p->p_sigcatch & mask)
1869 action = KERN_SIG_CATCH;
1870 else
1871 action = SIG_DFL;
1872 }
1873
1874 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1875 (p->p_flag & P_TRACED) == 0)
1876 p->p_nice = NZERO;
1877
1878 if (prop & SA_CONT) {
1879 uth->uu_siglist &= ~stopsigmask;
1880 p->p_siglist &= ~stopsigmask;
1881 }
1882
1883 if (prop & SA_STOP) {
1884 /*
1885 * If sending a tty stop signal to a member of an orphaned
1886 * process group, discard the signal here if the action
1887 * is default; don't stop the process below if sleeping,
1888 * and don't clear any pending SIGCONT.
1889 */
1890 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
1891 action == SIG_DFL)
1892 goto puthout;
1893 uth->uu_siglist &= ~contsigmask;
1894 p->p_siglist &= ~contsigmask;
1895 }
1896 uth->uu_siglist |= mask;
1897 p->p_siglist |= mask; /* just for lame ones looking here */
1898
1899 /*
1900 * Defer further processing for signals which are held,
1901 * except that stopped processes must be continued by SIGCONT.
1902 */
1903 if (action == KERN_SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
1904 goto puthout;
1905
1906 /*
1907 * SIGKILL priority twiddling moved here from above because
1908 * it needs sig_thread. Could merge it into large switch
1909 * below if we didn't care about priority for tracing
1910 * as SIGKILL's action is always SIG_DFL.
1911 */
1912 if ((signum == SIGKILL) && (p->p_nice > NZERO)) {
1913 p->p_nice = NZERO;
1914 }
1915
1916 /*
1917 * Process is traced - wake it up (if not already
1918 * stopped) so that it can discover the signal in
1919 * issig() and stop for the parent.
1920 */
1921 if (p->p_flag & P_TRACED) {
1922 if (p->p_stat != SSTOP)
1923 goto psurun;
1924 else
1925 goto puthout;
1926 }
1927
1928 if (action == KERN_SIG_WAIT) {
1929 uth->uu_sigwait = mask;
1930 uth->uu_siglist &= ~mask;
1931 p->p_siglist &= ~mask;
1932 wakeup(&uth->uu_sigwait);
1933 /* if it is SIGCONT resume whole process */
1934 if (prop & SA_CONT) {
1935 p->p_flag |= P_CONTINUED;
1936 (void) task_resume(sig_task);
1937 }
1938 goto puthout;
1939 }
1940
1941 if (action != SIG_DFL) {
1942 /*
1943 * User wants to catch the signal.
1944 * Wake up the thread, but don't un-suspend it
1945 * (except for SIGCONT).
1946 */
1947 if (prop & SA_CONT) {
1948 p->p_flag |= P_CONTINUED;
1949 (void) task_resume(sig_task);
1950 }
1951 goto psurun;
1952 } else {
1953 /* Default action - varies */
1954 if (mask & stopsigmask) {
1955 /*
1956 * These are the signals which by default
1957 * stop a process.
1958 *
1959 * Don't clog system with children of init
1960 * stopped from the keyboard.
1961 */
1962 if (!(prop & SA_STOP) && p->p_pptr == initproc) {
1963 psignal_lock(p, SIGKILL, 0);
1964 uth->uu_siglist &= ~mask;
1965 p->p_siglist &= ~mask;
1966 goto puthout;
1967 }
1968
1969 /*
1970 * Stop the task
1971 * if task hasn't already been stopped by
1972 * a signal.
1973 */
1974 uth->uu_siglist &= ~mask;
1975 p->p_siglist &= ~mask;
1976 if (p->p_stat != SSTOP) {
1977 p->p_xstat = signum;
1978 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1979 struct proc *pp = p->p_pptr;
1980
1981 pp->si_pid = p->p_pid;
1982 pp->si_status = p->p_xstat;
1983 pp->si_code = CLD_STOPPED;
1984 pp->si_uid = p->p_ucred->cr_ruid;
1985 psignal(pp, SIGCHLD);
1986 }
1987 stop(p);
1988 }
1989 goto puthout;
1990 }
1991
1992 switch (signum) {
1993 /*
1994 * Signals ignored by default have been dealt
1995 * with already, since their bits are on in
1996 * p_sigignore.
1997 */
1998
1999 case SIGKILL:
2000 /*
2001 * Kill signal always sets process running and
2002 * unsuspends it.
2003 */
2004 /*
2005 * Process will be running after 'run'
2006 */
2007 p->p_stat = SRUN;
2008
2009 thread_abort(sig_thread_act);
2010
2011 goto puthout;
2012
2013 case SIGCONT:
2014 /*
2015 * Let the process run. If it's sleeping on an
2016 * event, it remains so.
2017 */
2018 if (p->p_flag & P_TTYSLEEP) {
2019 p->p_flag &= ~P_TTYSLEEP;
2020 wakeup(&p->p_siglist);
2021 } else {
2022 p->p_flag |= P_CONTINUED;
2023 (void) task_resume(sig_task);
2024 }
2025 uth->uu_siglist &= ~mask;
2026 p->p_siglist &= ~mask;
2027 p->p_stat = SRUN;
2028 goto puthout;
2029
2030 default:
2031 /*
2032 * All other signals wake up the process, but don't
2033 * resume it.
2034 */
2035 goto psurun;
2036 }
2037 }
2038 /*NOTREACHED*/
2039 psurun:
2040 /*
2041 * If we're being traced (possibly because someone attached us
2042 * while we were stopped), check for a signal from the debugger.
2043 */
2044 if (p->p_stat == SSTOP) {
2045 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
2046 uth->uu_siglist |= sigmask(p->p_xstat);
2047 p->p_siglist |= sigmask(p->p_xstat);
2048 }
2049 } else {
2050 /*
2051 * setrunnable(p) in BSD and
2052 * Wake up the thread if it is interruptible.
2053 */
2054 p->p_stat = SRUN;
2055 thread_abort_safely(sig_thread_act);
2056 }
2057
2058 puthout:
2059 signal_unlock(p);
2060 }
2061
2062
2063 __inline__ void
2064 sig_lock_to_exit(struct proc *p)
2065 {
2066 thread_t self = current_thread();
2067
2068 p->exit_thread = self;
2069 (void) task_suspend(p->task);
2070 }
2071
2072 __inline__ int
2073 sig_try_locked(struct proc *p)
2074 {
2075 thread_t self = current_thread();
2076
2077 while (p->sigwait || p->exit_thread) {
2078 if (p->exit_thread) {
2079 if (p->exit_thread != self) {
2080 /*
2081 * Already exiting - no signals.
2082 */
2083 thread_abort(self);
2084 }
2085 return(0);
2086 }
2087 if(assert_wait_possible()) {
2088 assert_wait((caddr_t)&p->sigwait_thread,
2089 (THREAD_INTERRUPTIBLE));
2090 }
2091 signal_unlock(p);
2092 thread_block(THREAD_CONTINUE_NULL);
2093 signal_lock(p);
2094 if (thread_should_abort(self)) {
2095 /*
2096 * Terminate request - clean up.
2097 */
2098 return -1;
2099 }
2100 }
2101 return 1;
2102 }
2103
2104 /*
2105 * If the current process has received a signal (should be caught or cause
2106 * termination, should interrupt current syscall), return the signal number.
2107 * Stop signals with default action are processed immediately, then cleared;
2108 * they aren't returned. This is checked after each entry to the system for
2109 * a syscall or trap (though this can usually be done without calling issignal
2110 * by checking the pending signal masks in the CURSIG macro.) The normal call
2111 * sequence is
2112 *
2113 * while (signum = CURSIG(curproc))
2114 * postsig(signum);
2115 */
2116 int
2117 issignal(p)
2118 register struct proc *p;
2119 {
2120 register int signum, mask, prop, sigbits;
2121 thread_t cur_act;
2122 struct uthread * ut;
2123 struct proc *pp;
2124
2125 cur_act = current_thread();
2126
2127 #if SIGNAL_DEBUG
2128 if(rdebug_proc && (p == rdebug_proc)) {
2129 ram_printf(3);
2130 }
2131 #endif /* SIGNAL_DEBUG */
2132 signal_lock(p);
2133
2134 /*
2135 * Try to grab the signal lock.
2136 */
2137 if (sig_try_locked(p) <= 0) {
2138 signal_unlock(p);
2139 return (0);
2140 }
2141
2142 ut = get_bsdthread_info(cur_act);
2143 for(;;) {
2144 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2145
2146 if (p->p_flag & P_PPWAIT)
2147 sigbits &= ~stopsigmask;
2148 if (sigbits == 0) { /* no signal to send */
2149 signal_unlock(p);
2150 return (0);
2151 }
2152 signum = ffs((long)sigbits);
2153 mask = sigmask(signum);
2154 prop = sigprop[signum];
2155
2156 /*
2157 * We should see pending but ignored signals
2158 * only if P_TRACED was on when they were posted.
2159 */
2160 if (mask & p->p_sigignore && (p->p_flag & P_TRACED) == 0) {
2161 ut->uu_siglist &= ~mask; /* take the signal! */
2162 p->p_siglist &= ~mask; /* take the signal! */
2163 continue;
2164 }
2165 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
2166 register task_t task;
2167 /*
2168 * If traced, always stop, and stay
2169 * stopped until released by the debugger.
2170 */
2171 /* ptrace debugging */
2172 p->p_xstat = signum;
2173 pp = p->p_pptr;
2174 if (p->p_flag & P_SIGEXC) {
2175 p->sigwait = TRUE;
2176 p->sigwait_thread = cur_act;
2177 p->p_stat = SSTOP;
2178 p->p_flag &= ~(P_WAITED|P_CONTINUED);
2179 ut->uu_siglist &= ~mask; /* clear the old signal */
2180 p->p_siglist &= ~mask; /* clear the old signal */
2181 signal_unlock(p);
2182 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2183 signal_lock(p);
2184 } else {
2185 // panic("Unsupportef gdb option \n");;
2186 pp->si_pid = p->p_pid;
2187 pp->si_status = p->p_xstat;
2188 pp->si_code = CLD_TRAPPED;
2189 pp->si_uid = p->p_ucred->cr_ruid;
2190 psignal(pp, SIGCHLD);
2191 /*
2192 * XXX Have to really stop for debuggers;
2193 * XXX stop() doesn't do the right thing.
2194 * XXX Inline the task_suspend because we
2195 * XXX have to diddle Unix state in the
2196 * XXX middle of it.
2197 */
2198 task = p->task;
2199 task_hold(task);
2200 p->sigwait = TRUE;
2201 p->sigwait_thread = cur_act;
2202 p->p_stat = SSTOP;
2203 p->p_flag &= ~(P_WAITED|P_CONTINUED);
2204 ut->uu_siglist &= ~mask; /* clear the old signal */
2205 p->p_siglist &= ~mask; /* clear the old signal */
2206
2207 wakeup((caddr_t)p->p_pptr);
2208 signal_unlock(p);
2209 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2210 thread_block(THREAD_CONTINUE_NULL);
2211 signal_lock(p);
2212 }
2213
2214 p->sigwait = FALSE;
2215 p->sigwait_thread = NULL;
2216 wakeup((caddr_t)&p->sigwait_thread);
2217
2218 /*
2219 * This code is to detect when gdb is killed
2220 * even as the traced program is attached.
2221 * pgsignal would get the SIGKILL to traced program
2222 * That's what we are trying to see (I hope)
2223 */
2224 if (ut->uu_siglist & sigmask(SIGKILL)) {
2225 /*
2226 * Wait event may still be outstanding;
2227 * clear it, since sig_lock_to_exit will
2228 * wait.
2229 */
2230 clear_wait(current_thread(), THREAD_INTERRUPTED);
2231 sig_lock_to_exit(p);
2232 /*
2233 * Since this thread will be resumed
2234 * to allow the current syscall to
2235 * be completed, must save u_qsave
2236 * before calling exit(). (Since exit()
2237 * calls closef() which can trash u_qsave.)
2238 */
2239 signal_unlock(p);
2240 exit1(p,signum, (int *)NULL);
2241 return(0);
2242 }
2243
2244 /*
2245 * We may have to quit
2246 */
2247 if (thread_should_abort(current_thread())) {
2248 signal_unlock(p);
2249 return(0);
2250 }
2251 /*
2252 * If parent wants us to take the signal,
2253 * then it will leave it in p->p_xstat;
2254 * otherwise we just look for signals again.
2255 */
2256 signum = p->p_xstat;
2257 if (signum == 0)
2258 continue;
2259 /*
2260 * Put the new signal into p_siglist. If the
2261 * signal is being masked, look for other signals.
2262 */
2263 mask = sigmask(signum);
2264 ut->uu_siglist |= mask;
2265 p->p_siglist |= mask; /* just for lame ones looking here */
2266 if (ut->uu_sigmask & mask)
2267 continue;
2268 }
2269
2270 /*
2271 * Decide whether the signal should be returned.
2272 * Return the signal's number, or fall through
2273 * to clear it from the pending mask.
2274 */
2275
2276 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2277
2278 case (long)SIG_DFL:
2279 /*
2280 * Don't take default actions on system processes.
2281 */
2282 if (p->p_pptr->p_pid == 0) {
2283 #if DIAGNOSTIC
2284 /*
2285 * Are you sure you want to ignore SIGSEGV
2286 * in init? XXX
2287 */
2288 printf("Process (pid %d) got signal %d\n",
2289 p->p_pid, signum);
2290 #endif
2291 break; /* == ignore */
2292 }
2293
2294 /*
2295 * If there is a pending stop signal to process
2296 * with default action, stop here,
2297 * then clear the signal. However,
2298 * if process is member of an orphaned
2299 * process group, ignore tty stop signals.
2300 */
2301 if (prop & SA_STOP) {
2302 if (p->p_flag & P_TRACED ||
2303 (p->p_pgrp->pg_jobc == 0 &&
2304 prop & SA_TTYSTOP))
2305 break; /* == ignore */
2306 if (p->p_stat != SSTOP) {
2307 p->p_xstat = signum;
2308 stop(p);
2309 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
2310 pp = p->p_pptr;
2311 pp->si_pid = p->p_pid;
2312 pp->si_status = p->p_xstat;
2313 pp->si_code = CLD_STOPPED;
2314 pp->si_uid = p->p_ucred->cr_ruid;
2315 psignal(pp, SIGCHLD);
2316 }
2317 }
2318 break;
2319 } else if (prop & SA_IGNORE) {
2320 /*
2321 * Except for SIGCONT, shouldn't get here.
2322 * Default action is to ignore; drop it.
2323 */
2324 break; /* == ignore */
2325 } else {
2326 ut->uu_siglist &= ~mask; /* take the signal! */
2327 p->p_siglist &= ~mask; /* take the signal! */
2328 signal_unlock(p);
2329 return (signum);
2330 }
2331 /*NOTREACHED*/
2332
2333 case (long)SIG_IGN:
2334 /*
2335 * Masking above should prevent us ever trying
2336 * to take action on an ignored signal other
2337 * than SIGCONT, unless process is traced.
2338 */
2339 if ((prop & SA_CONT) == 0 &&
2340 (p->p_flag & P_TRACED) == 0)
2341 printf("issignal\n");
2342 break; /* == ignore */
2343
2344 default:
2345 /*
2346 * This signal has an action, let
2347 * postsig() process it.
2348 */
2349 ut->uu_siglist &= ~mask; /* take the signal! */
2350 p->p_siglist &= ~mask; /* take the signal! */
2351 signal_unlock(p);
2352 return (signum);
2353 }
2354 ut->uu_siglist &= ~mask; /* take the signal! */
2355 p->p_siglist &= ~mask; /* take the signal! */
2356 }
2357 /* NOTREACHED */
2358 }
2359
2360 /* called from _sleep */
2361 int
2362 CURSIG(p)
2363 register struct proc *p;
2364 {
2365 register int signum, mask, prop, sigbits;
2366 thread_t cur_act;
2367 struct uthread * ut;
2368 int retnum = 0;
2369
2370
2371 cur_act = current_thread();
2372
2373 ut = get_bsdthread_info(cur_act);
2374
2375 if (ut->uu_siglist == 0)
2376 return (0);
2377
2378 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_flag & P_TRACED) == 0))
2379 return (0);
2380
2381 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2382
2383 for(;;) {
2384 if (p->p_flag & P_PPWAIT)
2385 sigbits &= ~stopsigmask;
2386 if (sigbits == 0) { /* no signal to send */
2387 return (retnum);
2388 }
2389
2390 signum = ffs((long)sigbits);
2391 mask = sigmask(signum);
2392 prop = sigprop[signum];
2393
2394 /*
2395 * We should see pending but ignored signals
2396 * only if P_TRACED was on when they were posted.
2397 */
2398 if (mask & p->p_sigignore && (p->p_flag & P_TRACED) == 0) {
2399 continue;
2400 }
2401 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
2402 /*
2403 * Put the new signal into p_siglist. If the
2404 * signal is being masked, look for other signals.
2405 */
2406 mask = sigmask(signum);
2407 if (ut->uu_sigmask & mask)
2408 continue;
2409 return(signum);
2410 }
2411
2412 /*
2413 * Decide whether the signal should be returned.
2414 * Return the signal's number, or fall through
2415 * to clear it from the pending mask.
2416 */
2417
2418 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2419
2420 case (long)SIG_DFL:
2421 /*
2422 * Don't take default actions on system processes.
2423 */
2424 if (p->p_pptr->p_pid == 0) {
2425 #if DIAGNOSTIC
2426 /*
2427 * Are you sure you want to ignore SIGSEGV
2428 * in init? XXX
2429 */
2430 printf("Process (pid %d) got signal %d\n",
2431 p->p_pid, signum);
2432 #endif
2433 break; /* == ignore */
2434 }
2435
2436 /*
2437 * If there is a pending stop signal to process
2438 * with default action, stop here,
2439 * then clear the signal. However,
2440 * if process is member of an orphaned
2441 * process group, ignore tty stop signals.
2442 */
2443 if (prop & SA_STOP) {
2444 if (p->p_flag & P_TRACED ||
2445 (p->p_pgrp->pg_jobc == 0 &&
2446 prop & SA_TTYSTOP))
2447 break; /* == ignore */
2448 retnum = signum;
2449 break;
2450 } else if (prop & SA_IGNORE) {
2451 /*
2452 * Except for SIGCONT, shouldn't get here.
2453 * Default action is to ignore; drop it.
2454 */
2455 break; /* == ignore */
2456 } else {
2457 return (signum);
2458 }
2459 /*NOTREACHED*/
2460
2461 case (long)SIG_IGN:
2462 /*
2463 * Masking above should prevent us ever trying
2464 * to take action on an ignored signal other
2465 * than SIGCONT, unless process is traced.
2466 */
2467 if ((prop & SA_CONT) == 0 &&
2468 (p->p_flag & P_TRACED) == 0)
2469 printf("issignal\n");
2470 break; /* == ignore */
2471
2472 default:
2473 /*
2474 * This signal has an action, let
2475 * postsig() process it.
2476 */
2477 return (signum);
2478 }
2479 sigbits &= ~mask; /* take the signal! */
2480 }
2481 /* NOTREACHED */
2482 }
2483
2484 /*
2485 * Put the argument process into the stopped state and notify the parent
2486 * via wakeup. Signals are handled elsewhere. The process must not be
2487 * on the run queue.
2488 */
2489 void
2490 stop(p)
2491 register struct proc *p;
2492 {
2493 p->p_stat = SSTOP;
2494 p->p_flag &= ~(P_WAITED|P_CONTINUED);
2495 if (p->p_pptr->p_stat != SSTOP)
2496 wakeup((caddr_t)p->p_pptr);
2497 (void) task_suspend(p->task); /*XXX*/
2498 }
2499
2500 /*
2501 * Take the action for the specified signal
2502 * from the current set of pending signals.
2503 */
2504 void
2505 postsig(int signum)
2506 {
2507 struct proc *p = current_proc();
2508 struct sigacts *ps = p->p_sigacts;
2509 user_addr_t catcher;
2510 u_long code;
2511 int mask, returnmask;
2512 struct uthread * ut;
2513
2514 #if DIAGNOSTIC
2515 if (signum == 0)
2516 panic("postsig");
2517 /*
2518 * This must be called on master cpu
2519 */
2520 if (cpu_number() != master_cpu)
2521 panic("psig not on master");
2522 #endif
2523
2524 signal_lock(p);
2525 /*
2526 * Try to grab the signal lock.
2527 */
2528 if (sig_try_locked(p) <= 0) {
2529 signal_unlock(p);
2530 return;
2531 }
2532
2533 ut = (struct uthread *)get_bsdthread_info(current_thread());
2534 mask = sigmask(signum);
2535 ut->uu_siglist &= ~mask;
2536 p->p_siglist &= ~mask;
2537 catcher = ps->ps_sigact[signum];
2538 #if KTRACE
2539 //LP64: catcher argument is a 64 bit user space handler address
2540 if (KTRPOINT(p, KTR_PSIG))
2541 ktrpsig(p->p_tracep,
2542 signum, CAST_DOWN(void *,catcher), ut->uu_flag & UT_SAS_OLDMASK ?
2543 &ut->uu_oldmask : &ut->uu_sigmask, 0);
2544 #endif
2545 if (catcher == SIG_DFL) {
2546 /*
2547 * Default catcher, where the default is to kill
2548 * the process. (Other cases were ignored above.)
2549 */
2550 /* called with signal_lock() held */
2551 sigexit_locked(p, signum);
2552 return;
2553 /* NOTREACHED */
2554 } else {
2555 /*
2556 * If we get here, the signal must be caught.
2557 */
2558 #if DIAGNOSTIC
2559 if (catcher == SIG_IGN || (ut->uu_sigmask & mask))
2560 log(LOG_WARNING,
2561 "postsig: processing masked or ignored signal\n");
2562 #endif
2563 /*
2564 * Set the new mask value and also defer further
2565 * occurences of this signal.
2566 *
2567 * Special case: user has done a sigpause. Here the
2568 * current mask is not of interest, but rather the
2569 * mask from before the sigpause is what we want
2570 * restored after the signal processing is completed.
2571 */
2572 if (ut->uu_flag & UT_SAS_OLDMASK) {
2573 returnmask = ut->uu_oldmask;
2574 ut->uu_flag &= ~UT_SAS_OLDMASK;
2575 ut->uu_oldmask = 0;
2576 } else
2577 returnmask = ut->uu_sigmask;
2578 ut->uu_sigmask |= ps->ps_catchmask[signum];
2579 if ((ps->ps_signodefer & mask) == 0)
2580 ut->uu_sigmask |= mask;
2581 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
2582 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE))
2583 p->p_sigignore |= mask;
2584 ps->ps_sigact[signum] = SIG_DFL;
2585 ps->ps_siginfo &= ~mask;
2586 ps->ps_signodefer &= ~mask;
2587 }
2588 #ifdef __ppc__
2589 /* Needs to disable to run in user mode */
2590 if (signum == SIGFPE) {
2591 thread_enable_fpe(current_thread(), 0);
2592 }
2593 #endif /* __ppc__ */
2594
2595 if (ps->ps_sig != signum) {
2596 code = 0;
2597 } else {
2598 code = ps->ps_code;
2599 ps->ps_code = 0;
2600 }
2601 p->p_stats->p_ru.ru_nsignals++;
2602 sendsig(p, catcher, signum, returnmask, code);
2603 }
2604 signal_unlock(p);
2605 }
2606
2607 /*
2608 * Force the current process to exit with the specified signal, dumping core
2609 * if appropriate. We bypass the normal tests for masked and caught signals,
2610 * allowing unrecoverable failures to terminate the process without changing
2611 * signal state. Mark the accounting record with the signal termination.
2612 * If dumping core, save the signal number for the debugger. Calls exit and
2613 * does not return.
2614 */
2615 /* called with signal lock */
2616 void
2617 sigexit_locked(p, signum)
2618 register struct proc *p;
2619 int signum;
2620 {
2621
2622 sig_lock_to_exit(p);
2623 p->p_acflag |= AXSIG;
2624 if (sigprop[signum] & SA_CORE) {
2625 p->p_sigacts->ps_sig = signum;
2626 signal_unlock(p);
2627 if (coredump(p) == 0)
2628 signum |= WCOREFLAG;
2629 } else
2630 signal_unlock(p);
2631
2632 exit1(p, W_EXITCODE(0, signum), (int *)NULL);
2633 /* NOTREACHED */
2634 }
2635
2636
2637 static int
2638 filt_sigattach(struct knote *kn)
2639 {
2640 struct proc *p = current_proc();
2641
2642 kn->kn_ptr.p_proc = p;
2643 kn->kn_flags |= EV_CLEAR; /* automatically set */
2644
2645 /* XXX lock the proc here while adding to the list? */
2646 KNOTE_ATTACH(&p->p_klist, kn);
2647
2648 return (0);
2649 }
2650
2651 static void
2652 filt_sigdetach(struct knote *kn)
2653 {
2654 struct proc *p = kn->kn_ptr.p_proc;
2655
2656 KNOTE_DETACH(&p->p_klist, kn);
2657 }
2658
2659 /*
2660 * signal knotes are shared with proc knotes, so we apply a mask to
2661 * the hint in order to differentiate them from process hints. This
2662 * could be avoided by using a signal-specific knote list, but probably
2663 * isn't worth the trouble.
2664 */
2665 static int
2666 filt_signal(struct knote *kn, long hint)
2667 {
2668
2669 if (hint & NOTE_SIGNAL) {
2670 hint &= ~NOTE_SIGNAL;
2671
2672 if (kn->kn_id == (unsigned int)hint)
2673 kn->kn_data++;
2674 }
2675 return (kn->kn_data != 0);
2676 }
2677
2678
2679 void
2680 bsd_ast(thread_t thr_act)
2681 {
2682 struct proc *p = current_proc();
2683 struct uthread *ut = get_bsdthread_info(thr_act);
2684 int signum;
2685 user_addr_t pc;
2686 boolean_t funnel_state;
2687 static int bsd_init_done = 0;
2688
2689 if (p == NULL)
2690 return;
2691
2692 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2693
2694 if ((p->p_flag & P_OWEUPC) && (p->p_flag & P_PROFIL)) {
2695 pc = get_useraddr();
2696 addupc_task(p, pc, 1);
2697 p->p_flag &= ~P_OWEUPC;
2698 }
2699
2700 if (CHECK_SIGNALS(p, current_thread(), ut)) {
2701 while ( (signum = issignal(p)) )
2702 postsig(signum);
2703 }
2704 if (!bsd_init_done) {
2705 bsd_init_done = 1;
2706 bsdinit_task();
2707 }
2708
2709 (void) thread_funnel_set(kernel_flock, FALSE);
2710 }
2711
2712 /*
2713 * Follwing routines are called using callout from bsd_hardclock
2714 * so that psignals are called in a thread context and are funneled
2715 */
2716 void
2717 psignal_vtalarm(struct proc *p)
2718 {
2719 boolean_t funnel_state;
2720
2721 if (p == NULL)
2722 return;
2723 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2724 psignal_lock(p, SIGVTALRM, 1);
2725 (void) thread_funnel_set(kernel_flock, FALSE);
2726 }
2727
2728 void
2729 psignal_xcpu(struct proc *p)
2730 {
2731 boolean_t funnel_state;
2732
2733 if (p == NULL)
2734 return;
2735 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2736 psignal_lock(p, SIGXCPU, 1);
2737 (void) thread_funnel_set(kernel_flock, FALSE);
2738 }
2739
2740 void
2741 psignal_sigprof(struct proc *p)
2742 {
2743 boolean_t funnel_state;
2744
2745 if (p == NULL)
2746 return;
2747 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2748 psignal_lock(p, SIGPROF, 1);
2749 (void) thread_funnel_set(kernel_flock, FALSE);
2750 }
2751
2752 /* ptrace set runnalbe */
2753 void
2754 pt_setrunnable(struct proc *p)
2755 {
2756 task_t task;
2757
2758 task = p->task;
2759
2760 if (p->p_flag & P_TRACED) {
2761 p->p_stat = SRUN;
2762 if (p->sigwait) {
2763 wakeup((caddr_t)&(p->sigwait));
2764 task_release(task);
2765 }
2766 }
2767 }
2768
2769
2770 kern_return_t
2771 do_bsdexception(
2772 int exc,
2773 int code,
2774 int sub)
2775 {
2776 exception_data_type_t codes[EXCEPTION_CODE_MAX];
2777
2778 codes[0] = code;
2779 codes[1] = sub;
2780 return(bsd_exception(exc, codes, 2));
2781 }
2782
2783 int
2784 proc_pendingsignals(struct proc *p, sigset_t mask)
2785 {
2786 struct uthread * uth;
2787 thread_t th;
2788 sigset_t bits = 0;
2789 int error;
2790
2791 /* If the process is in proc exit return no signal info */
2792 if (p->p_lflag & P_LPEXIT)
2793 return(0);
2794
2795 /* duplicate the signal lock code to enable recursion; as exit
2796 * holds the lock too long. All this code is being reworked
2797 * this is just a workaround for regressions till new code
2798 * arrives.
2799 */
2800 ppend_retry:
2801 error = lockmgr((struct lock__bsd__ *)&p->signal_lock[0], (LK_EXCLUSIVE | LK_CANRECURSE), 0, (struct proc *)0);
2802 if (error == EINTR)
2803 goto ppend_retry;
2804
2805 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
2806 th = p->p_vforkact;
2807 uth = (struct uthread *)get_bsdthread_info(th);
2808 if (uth) {
2809 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
2810 }
2811 goto out;
2812 }
2813
2814 bits = 0;
2815 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
2816 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
2817 }
2818 out:
2819 signal_unlock(p);
2820 return(bits);
2821 }
2822
2823 int
2824 thread_issignal(proc_t p, thread_t th, sigset_t mask)
2825 {
2826 struct uthread * uth;
2827 sigset_t bits=0;
2828
2829
2830 uth = (struct uthread *)get_bsdthread_info(th);
2831 if (uth) {
2832 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
2833 }
2834 return(bits);
2835 }
2836