]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sig.c
479b1f5e11b7b128d3bb19ea199f07df812174be
[apple/xnu.git] / bsd / kern / kern_sig.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995-1998 Apple Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1982, 1986, 1989, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
61 */
62
63 #define SIGPROP /* include signal properties table */
64 #include <sys/param.h>
65 #include <sys/resourcevar.h>
66 #include <sys/proc_internal.h>
67 #include <sys/kauth.h>
68 #include <sys/systm.h>
69 #include <sys/timeb.h>
70 #include <sys/times.h>
71 #include <sys/acct.h>
72 #include <sys/file_internal.h>
73 #include <sys/kernel.h>
74 #include <sys/wait.h>
75 #include <sys/signalvar.h>
76 #if KTRACE
77 #include <sys/ktrace.h>
78 #endif
79 #include <sys/syslog.h>
80 #include <sys/stat.h>
81 #include <sys/lock.h>
82 #include <sys/kdebug.h>
83
84 #include <sys/mount.h>
85 #include <sys/sysproto.h>
86
87 #include <bsm/audit_kernel.h>
88
89 #include <machine/spl.h>
90
91 #include <kern/cpu_number.h>
92
93 #include <sys/vm.h>
94 #include <sys/user.h> /* for coredump */
95 #include <kern/ast.h> /* for APC support */
96 #include <kern/lock.h>
97 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
98 #include <kern/thread.h>
99 #include <kern/sched_prim.h>
100 #include <kern/thread_call.h>
101 #include <mach/exception.h>
102 #include <mach/task.h>
103 #include <mach/thread_act.h>
104
105 /*
106 * Missing prototypes that Mach should export
107 *
108 * +++
109 */
110 extern int thread_enable_fpe(thread_t act, int onoff);
111 extern void unix_syscall_return(int error);
112 extern thread_t port_name_to_thread(mach_port_name_t port_name);
113 extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast);
114 extern kern_return_t get_signalact(task_t , thread_t *, int);
115 extern boolean_t thread_should_abort(thread_t);
116 extern unsigned int get_useraddr(void);
117
118 /*
119 * ---
120 */
121
122 extern void doexception(int exc, int code, int sub);
123
124 void stop(struct proc *p);
125 int cansignal(struct proc *, kauth_cred_t, struct proc *, int);
126 int killpg1(struct proc *, int, int, int);
127 void sigexit_locked(struct proc *, int);
128 int setsigvec(struct proc *, int, struct __user_sigaction *);
129 void exit1(struct proc *, int, int *);
130 void psignal_uthread(thread_t, int);
131 kern_return_t do_bsdexception(int, int, int);
132 void __posix_sem_syscall_return(kern_return_t);
133
134 /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */
135 kern_return_t semaphore_timedwait_signal_trap_internal(void *, void *,time_t, int32_t, void (*)(int));
136 kern_return_t semaphore_timedwait_trap_internal(void *, time_t, int32_t, void (*)(int));
137 kern_return_t semaphore_wait_signal_trap_internal(void *, void *, void (*)(int));
138 kern_return_t semaphore_wait_trap_internal(void *, void (*)(int));
139
140 static int filt_sigattach(struct knote *kn);
141 static void filt_sigdetach(struct knote *kn);
142 static int filt_signal(struct knote *kn, long hint);
143
144 struct filterops sig_filtops =
145 { 0, filt_sigattach, filt_sigdetach, filt_signal };
146
147
148 /*
149 * NOTE: Source and target may *NOT* overlap! (target is smaller)
150 */
151 static void
152 sigaltstack_64to32(struct user_sigaltstack *in, struct sigaltstack *out)
153 {
154 out->ss_sp = CAST_DOWN(void *,in->ss_sp);
155 out->ss_size = in->ss_size;
156 out->ss_flags = in->ss_flags;
157 }
158
159 /*
160 * NOTE: Source and target may are permitted to overlap! (source is smaller);
161 * this works because we copy fields in order from the end of the struct to
162 * the beginning.
163 */
164 static void
165 sigaltstack_32to64(struct sigaltstack *in, struct user_sigaltstack *out)
166 {
167 out->ss_flags = in->ss_flags;
168 out->ss_size = in->ss_size;
169 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
170 }
171
172 static void
173 sigaction_64to32(struct user_sigaction *in, struct sigaction *out)
174 {
175 /* This assumes 32 bit __sa_handler is of type sig_t */
176 out->__sigaction_u.__sa_handler = CAST_DOWN(sig_t,in->__sigaction_u.__sa_handler);
177 out->sa_mask = in->sa_mask;
178 out->sa_flags = in->sa_flags;
179 }
180
181 static void
182 __sigaction_32to64(struct __sigaction *in, struct __user_sigaction *out)
183 {
184 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
185 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
186 out->sa_mask = in->sa_mask;
187 out->sa_flags = in->sa_flags;
188 }
189
190
191 #if SIGNAL_DEBUG
192 void ram_printf(int);
193 int ram_debug=0;
194 unsigned int rdebug_proc=0;
195 void
196 ram_printf(int x)
197 {
198 printf("x is %d",x);
199
200 }
201 #endif /* SIGNAL_DEBUG */
202
203 int
204 signal_lock(struct proc *p)
205 {
206 int error = 0;
207 #if DIAGNOSTIC
208 #if SIGNAL_DEBUG
209 #ifdef __ppc__
210 {
211 int register sp, *fp, numsaved;
212
213 __asm__ volatile("mr %0,r1" : "=r" (sp));
214
215 fp = (int *)*((int *)sp);
216 for (numsaved = 0; numsaved < 3; numsaved++) {
217 p->lockpc[numsaved] = fp[2];
218 if ((int)fp <= 0)
219 break;
220 fp = (int *)*fp;
221 }
222 }
223 #endif /* __ppc__ */
224 #endif /* SIGNAL_DEBUG */
225 #endif /* DIAGNOSTIC */
226
227 siglock_retry:
228 error = lockmgr((struct lock__bsd__ *)&p->signal_lock[0], LK_EXCLUSIVE, 0, (struct proc *)0);
229 if (error == EINTR)
230 goto siglock_retry;
231 return(error);
232 }
233
234 int
235 signal_unlock(struct proc *p)
236 {
237 #if DIAGNOSTIC
238 #if SIGNAL_DEBUG
239 #ifdef __ppc__
240 {
241 int register sp, *fp, numsaved;
242
243 __asm__ volatile("mr %0,r1" : "=r" (sp));
244
245 fp = (int *)*((int *)sp);
246 for (numsaved = 0; numsaved < 3; numsaved++) {
247 p->unlockpc[numsaved] = fp[2];
248 if ((int)fp <= 0)
249 break;
250 fp = (int *)*fp;
251 }
252 }
253 #endif /* __ppc__ */
254 #endif /* SIGNAL_DEBUG */
255 #endif /* DIAGNOSTIC */
256
257 /* TBD: check p last arg */
258 return(lockmgr((struct lock__bsd__ *)&p->signal_lock[0], LK_RELEASE, (simple_lock_t)0, (struct proc *)0));
259 }
260
261 void
262 signal_setast(sig_actthread)
263 thread_t sig_actthread;
264 {
265 act_set_astbsd(sig_actthread);
266 }
267
268 /*
269 * Can process p, with ucred uc, send the signal signum to process q?
270 */
271 int
272 cansignal(p, uc, q, signum)
273 struct proc *p;
274 kauth_cred_t uc;
275 struct proc *q;
276 int signum;
277 {
278 /* you can signal yourself */
279 if (p == q)
280 return(1);
281
282 if (!suser(uc, NULL))
283 return (1); /* root can always signal */
284
285 if (signum == SIGCONT && q->p_session == p->p_session)
286 return (1); /* SIGCONT in session */
287
288 /*
289 * Using kill(), only certain signals can be sent to setugid
290 * child processes
291 */
292 if (q->p_flag & P_SUGID) {
293 switch (signum) {
294 case 0:
295 case SIGKILL:
296 case SIGINT:
297 case SIGTERM:
298 case SIGSTOP:
299 case SIGTTIN:
300 case SIGTTOU:
301 case SIGTSTP:
302 case SIGHUP:
303 case SIGUSR1:
304 case SIGUSR2:
305 if (uc->cr_ruid == q->p_ucred->cr_ruid ||
306 kauth_cred_getuid(uc) == q->p_ucred->cr_ruid ||
307 uc->cr_ruid == kauth_cred_getuid(q->p_ucred) ||
308 kauth_cred_getuid(uc) == kauth_cred_getuid(q->p_ucred))
309 return (1);
310 }
311 return (0);
312 }
313
314 /* XXX
315 * because the P_SUGID test exists, this has extra tests which
316 * could be removed.
317 */
318 if (uc->cr_ruid == q->p_ucred->cr_ruid ||
319 uc->cr_ruid == q->p_ucred->cr_svuid ||
320 kauth_cred_getuid(uc) == q->p_ucred->cr_ruid ||
321 kauth_cred_getuid(uc) == q->p_ucred->cr_svuid ||
322 uc->cr_ruid == kauth_cred_getuid(q->p_ucred) ||
323 kauth_cred_getuid(uc) == kauth_cred_getuid(q->p_ucred))
324 return (1);
325 return (0);
326 }
327
328
329 /* ARGSUSED */
330 int
331 sigaction(struct proc *p, register struct sigaction_args *uap, __unused register_t *retval)
332 {
333 struct user_sigaction vec;
334 struct __user_sigaction __vec;
335
336 struct user_sigaction *sa = &vec;
337 register struct sigacts *ps = p->p_sigacts;
338
339 register int signum;
340 int bit, error=0;
341
342 signum = uap->signum;
343 if (signum <= 0 || signum >= NSIG ||
344 signum == SIGKILL || signum == SIGSTOP)
345 return (EINVAL);
346
347 if (uap->osa) {
348 sa->sa_handler = ps->ps_sigact[signum];
349 sa->sa_mask = ps->ps_catchmask[signum];
350 bit = sigmask(signum);
351 sa->sa_flags = 0;
352 if ((ps->ps_sigonstack & bit) != 0)
353 sa->sa_flags |= SA_ONSTACK;
354 if ((ps->ps_sigintr & bit) == 0)
355 sa->sa_flags |= SA_RESTART;
356 if (ps->ps_siginfo & bit)
357 sa->sa_flags |= SA_SIGINFO;
358 if (ps->ps_signodefer & bit)
359 sa->sa_flags |= SA_NODEFER;
360 if (ps->ps_64regset & bit)
361 sa->sa_flags |= SA_64REGSET;
362 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP))
363 sa->sa_flags |= SA_NOCLDSTOP;
364 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT))
365 sa->sa_flags |= SA_NOCLDWAIT;
366
367 if (IS_64BIT_PROCESS(p)) {
368 error = copyout(sa, uap->osa, sizeof(struct user_sigaction));
369 } else {
370 struct sigaction vec32;
371 sigaction_64to32(sa, &vec32);
372 error = copyout(&vec32, uap->osa, sizeof(struct sigaction));
373 }
374 if (error)
375 return (error);
376 }
377 if (uap->nsa) {
378 if (IS_64BIT_PROCESS(p)) {
379 error = copyin(uap->nsa, &__vec, sizeof(struct __user_sigaction));
380 } else {
381 struct __sigaction __vec32;
382 error = copyin(uap->nsa, &__vec32, sizeof(struct __sigaction));
383 __sigaction_32to64(&__vec32, &__vec);
384 }
385 if (error)
386 return (error);
387 error = setsigvec(p, signum, &__vec);
388 }
389 return (error);
390 }
391
392 /* Routines to manipulate bits on all threads */
393 int
394 clear_procsiglist(struct proc *p, int bit)
395 {
396 struct uthread * uth;
397 thread_t thact;
398
399 signal_lock(p);
400
401 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
402 thact = p->p_vforkact;
403 uth = (struct uthread *)get_bsdthread_info(thact);
404 if (uth) {
405 uth->uu_siglist &= ~bit;
406 }
407 p->p_siglist &= ~bit;
408 signal_unlock(p);
409 return(0);
410 }
411
412 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
413 uth->uu_siglist &= ~bit;
414 }
415 p->p_siglist &= ~bit;
416 signal_unlock(p);
417 return(0);
418 }
419
420
421 static int
422 unblock_procsigmask(struct proc *p, int bit)
423 {
424 struct uthread * uth;
425 thread_t thact;
426
427 signal_lock(p);
428 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
429 thact = p->p_vforkact;
430 uth = (struct uthread *)get_bsdthread_info(thact);
431 if (uth) {
432 uth->uu_sigmask &= ~bit;
433 }
434 p->p_sigmask &= ~bit;
435 signal_unlock(p);
436 return(0);
437 }
438 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
439 uth->uu_sigmask &= ~bit;
440 }
441 p->p_sigmask &= ~bit;
442 signal_unlock(p);
443 return(0);
444 }
445
446
447 static int
448 block_procsigmask(struct proc *p, int bit)
449 {
450 struct uthread * uth;
451 thread_t thact;
452
453 signal_lock(p);
454 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
455 thact = p->p_vforkact;
456 uth = (struct uthread *)get_bsdthread_info(thact);
457 if (uth) {
458 uth->uu_sigmask |= bit;
459 }
460 p->p_sigmask |= bit;
461 signal_unlock(p);
462 return(0);
463 }
464 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
465 uth->uu_sigmask |= bit;
466 }
467 p->p_sigmask |= bit;
468 signal_unlock(p);
469 return(0);
470 }
471
472 int
473 set_procsigmask(struct proc *p, int bit)
474 {
475 struct uthread * uth;
476 thread_t thact;
477
478 signal_lock(p);
479 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
480 thact = p->p_vforkact;
481 uth = (struct uthread *)get_bsdthread_info(thact);
482 if (uth) {
483 uth->uu_sigmask = bit;
484 }
485 p->p_sigmask = bit;
486 signal_unlock(p);
487 return(0);
488 }
489 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
490 uth->uu_sigmask = bit;
491 }
492 p->p_sigmask = bit;
493 signal_unlock(p);
494 return(0);
495 }
496
497 /* XXX should be static? */
498 int
499 setsigvec(struct proc *p, int signum, struct __user_sigaction *sa)
500 {
501 register struct sigacts *ps = p->p_sigacts;
502 register int bit;
503
504 if ((signum == SIGKILL || signum == SIGSTOP) &&
505 sa->sa_handler != SIG_DFL)
506 return(EINVAL);
507 bit = sigmask(signum);
508 /*
509 * Change setting atomically.
510 */
511 ps->ps_sigact[signum] = sa->sa_handler;
512 ps->ps_trampact[signum] = sa->sa_tramp;
513 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
514 if (sa->sa_flags & SA_SIGINFO)
515 ps->ps_siginfo |= bit;
516 else
517 ps->ps_siginfo &= ~bit;
518 if (sa->sa_flags & SA_64REGSET)
519 ps->ps_64regset |= bit;
520 else
521 ps->ps_64regset &= ~bit;
522 if ((sa->sa_flags & SA_RESTART) == 0)
523 ps->ps_sigintr |= bit;
524 else
525 ps->ps_sigintr &= ~bit;
526 if (sa->sa_flags & SA_ONSTACK)
527 ps->ps_sigonstack |= bit;
528 else
529 ps->ps_sigonstack &= ~bit;
530 if (sa->sa_flags & SA_USERTRAMP)
531 ps->ps_usertramp |= bit;
532 else
533 ps->ps_usertramp &= ~bit;
534 if (sa->sa_flags & SA_RESETHAND)
535 ps->ps_sigreset |= bit;
536 else
537 ps->ps_sigreset &= ~bit;
538 if (sa->sa_flags & SA_NODEFER)
539 ps->ps_signodefer |= bit;
540 else
541 ps->ps_signodefer &= ~bit;
542 if (signum == SIGCHLD) {
543 if (sa->sa_flags & SA_NOCLDSTOP)
544 p->p_flag |= P_NOCLDSTOP;
545 else
546 p->p_flag &= ~P_NOCLDSTOP;
547 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN))
548 p->p_flag |= P_NOCLDWAIT;
549 else
550 p->p_flag &= ~P_NOCLDWAIT;
551 }
552
553 #ifdef __ppc__
554 if (signum == SIGFPE) {
555 if (sa->sa_handler == SIG_DFL || sa->sa_handler == SIG_IGN)
556 thread_enable_fpe(current_thread(), 0);
557 else
558 thread_enable_fpe(current_thread(), 1);
559 }
560 #endif /* __ppc__ */
561 /*
562 * Set bit in p_sigignore for signals that are set to SIG_IGN,
563 * and for signals set to SIG_DFL where the default is to ignore.
564 * However, don't put SIGCONT in p_sigignore,
565 * as we have to restart the process.
566 */
567 if (sa->sa_handler == SIG_IGN ||
568 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
569
570 clear_procsiglist(p, bit);
571 if (signum != SIGCONT)
572 p->p_sigignore |= bit; /* easier in psignal */
573 p->p_sigcatch &= ~bit;
574 } else {
575 p->p_sigignore &= ~bit;
576 if (sa->sa_handler == SIG_DFL)
577 p->p_sigcatch &= ~bit;
578 else
579 p->p_sigcatch |= bit;
580 }
581 return(0);
582 }
583
584 /*
585 * Initialize signal state for process 0;
586 * set to ignore signals that are ignored by default.
587 */
588 void
589 siginit(p)
590 struct proc *p;
591 {
592 register int i;
593
594 for (i = 0; i < NSIG; i++)
595 if (sigprop[i] & SA_IGNORE && i != SIGCONT)
596 p->p_sigignore |= sigmask(i);
597 }
598
599 /*
600 * Reset signals for an exec of the specified process.
601 */
602 void
603 execsigs(p, thr_act)
604 register struct proc *p;
605 register thread_t thr_act;
606 {
607 register struct sigacts *ps = p->p_sigacts;
608 register int nc, mask;
609 struct uthread *ut = (struct uthread *)0;
610
611 if (thr_act){
612 ut = (struct uthread *)get_bsdthread_info(thr_act);
613 }
614 /*
615 * Reset caught signals. Held signals remain held
616 * through p_sigmask (unless they were caught,
617 * and are now ignored by default).
618 */
619 while (p->p_sigcatch) {
620 nc = ffs((long)p->p_sigcatch);
621 mask = sigmask(nc);
622 p->p_sigcatch &= ~mask;
623 if (sigprop[nc] & SA_IGNORE) {
624 if (nc != SIGCONT)
625 p->p_sigignore |= mask;
626 if (thr_act){
627 ut->uu_siglist &= ~mask;
628 p->p_siglist &= ~mask;
629 } else
630 clear_procsiglist(p, mask);
631 }
632 ps->ps_sigact[nc] = SIG_DFL;
633 }
634 /*
635 * Reset stack state to the user stack.
636 * Clear set of signals caught on the signal stack.
637 */
638 ps->ps_sigstk.ss_flags = SA_DISABLE;
639 ps->ps_sigstk.ss_size = 0;
640 ps->ps_sigstk.ss_sp = USER_ADDR_NULL;
641 ps->ps_flags = 0;
642 if (thr_act) {
643 ut->uu_sigstk.ss_flags = SA_DISABLE;
644 ut->uu_sigstk.ss_size = 0;
645 ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
646 ut->uu_flag &= ~UT_ALTSTACK;
647 }
648 ps->ps_sigonstack = 0;
649 }
650
651 /*
652 * Manipulate signal mask.
653 * Note that we receive new mask, not pointer,
654 * and return old mask as return value;
655 * the library stub does the rest.
656 */
657 int
658 sigprocmask(register struct proc *p, struct sigprocmask_args *uap, __unused register_t *retval)
659 {
660 int error = 0;
661 sigset_t oldmask, nmask;
662 user_addr_t omask = uap->omask;
663 struct uthread *ut;
664
665 ut = (struct uthread *)get_bsdthread_info(current_thread());
666 oldmask = ut->uu_sigmask;
667
668 if (uap->mask == USER_ADDR_NULL) {
669 /* just want old mask */
670 goto out;
671 }
672 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
673 if (error)
674 goto out;
675
676 switch (uap->how) {
677 case SIG_BLOCK:
678 block_procsigmask(p, (nmask & ~sigcantmask));
679 signal_setast(current_thread());
680 break;
681
682 case SIG_UNBLOCK:
683 unblock_procsigmask(p, (nmask & ~sigcantmask));
684 signal_setast(current_thread());
685 break;
686
687 case SIG_SETMASK:
688 set_procsigmask(p, (nmask & ~sigcantmask));
689 signal_setast(current_thread());
690 break;
691
692 default:
693 error = EINVAL;
694 break;
695 }
696 out:
697 if (!error && omask != USER_ADDR_NULL)
698 copyout(&oldmask, omask, sizeof(sigset_t));
699 return (error);
700 }
701
702 int
703 sigpending(__unused struct proc *p, register struct sigpending_args *uap, __unused register_t *retval)
704 {
705 struct uthread *ut;
706 sigset_t pendlist;
707
708 ut = (struct uthread *)get_bsdthread_info(current_thread());
709 pendlist = ut->uu_siglist;
710
711 if (uap->osv)
712 copyout(&pendlist, uap->osv, sizeof(sigset_t));
713 return(0);
714 }
715
716
717 /*
718 * Suspend process until signal, providing mask to be set
719 * in the meantime. Note nonstandard calling convention:
720 * libc stub passes mask, not pointer, to save a copyin.
721 */
722
723 static int
724 sigcontinue(__unused int error)
725 {
726 // struct uthread *ut = get_bsdthread_info(current_thread());
727 unix_syscall_return(EINTR);
728 }
729
730 int
731 sigsuspend(register struct proc *p, struct sigsuspend_args *uap, __unused register_t *retval)
732 {
733 struct uthread *ut;
734
735 ut = (struct uthread *)get_bsdthread_info(current_thread());
736
737 /*
738 * When returning from sigpause, we want
739 * the old mask to be restored after the
740 * signal handler has finished. Thus, we
741 * save it here and mark the sigacts structure
742 * to indicate this.
743 */
744 ut->uu_oldmask = ut->uu_sigmask;
745 ut->uu_flag |= UT_SAS_OLDMASK;
746 ut->uu_sigmask = (uap->mask & ~sigcantmask);
747 (void) tsleep0((caddr_t) p, PPAUSE|PCATCH, "pause", 0, sigcontinue);
748 /* always return EINTR rather than ERESTART... */
749 return (EINTR);
750 }
751
752
753 int
754 __disable_threadsignal(struct proc *p,
755 __unused register struct __disable_threadsignal_args *uap,
756 __unused register_t *retval)
757 {
758 struct uthread *uth;
759
760 uth = (struct uthread *)get_bsdthread_info(current_thread());
761
762 /* No longer valid to have any signal delivered */
763 signal_lock(p);
764 uth->uu_flag |= UT_NO_SIGMASK;
765 signal_unlock(p);
766
767 return(0);
768
769 }
770
771
772 int
773 __pthread_markcancel(p, uap, retval)
774 struct proc *p;
775 register struct __pthread_markcancel_args *uap;
776 register_t *retval;
777 {
778 thread_act_t target_act;
779 int error = 0;
780 struct uthread *uth;
781
782 target_act = (thread_act_t)port_name_to_thread(uap->thread_port);
783
784 if (target_act == THR_ACT_NULL)
785 return (ESRCH);
786
787 uth = (struct uthread *)get_bsdthread_info(target_act);
788
789 /* if the thread is in vfork do not cancel */
790 if ((uth->uu_flag & (P_VFORK | UT_CANCEL | UT_CANCELED )) == 0) {
791 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
792 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
793 && ((uth->uu_flag & UT_CANCELDISABLE) == 0))
794 thread_abort_safely(target_act);
795 }
796
797 thread_deallocate(target_act);
798 return (error);
799 }
800
801 /* if action =0 ; return the cancellation state ,
802 * if marked for cancellation, make the thread canceled
803 * if action = 1 ; Enable the cancel handling
804 * if action = 2; Disable the cancel handling
805 */
806 int
807 __pthread_canceled(p, uap, retval)
808 struct proc *p;
809 register struct __pthread_canceled_args *uap;
810 register_t *retval;
811 {
812 thread_act_t thr_act;
813 struct uthread *uth;
814 int action = uap->action;
815
816 thr_act = current_act();
817 uth = (struct uthread *)get_bsdthread_info(thr_act);
818
819 switch (action) {
820 case 1:
821 uth->uu_flag &= ~UT_CANCELDISABLE;
822 return(0);
823 case 2:
824 uth->uu_flag |= UT_CANCELDISABLE;
825 return(0);
826 case 0:
827 default:
828 /* if the thread is in vfork do not cancel */
829 if((uth->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
830 uth->uu_flag &= ~UT_CANCEL;
831 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
832 return(0);
833 }
834 return(EINVAL);
835 }
836 return(EINVAL);
837 }
838
839 void
840 __posix_sem_syscall_return(kern_return_t kern_result)
841 {
842 int error = 0;
843
844 if (kern_result == KERN_SUCCESS)
845 error = 0;
846 else if (kern_result == KERN_ABORTED)
847 error = EINTR;
848 else if (kern_result == KERN_OPERATION_TIMED_OUT)
849 error = ETIMEDOUT;
850 else
851 error = EINVAL;
852 unix_syscall_return(error);
853 /* does not return */
854 }
855
856
857 int
858 __semwait_signal(p, uap, retval)
859 struct proc *p;
860 register struct __semwait_signal_args *uap;
861 register_t *retval;
862 {
863
864 kern_return_t kern_result;
865 mach_timespec_t then;
866 struct timespec now;
867
868 if(uap->timeout) {
869
870 if (uap->relative) {
871 then.tv_sec = uap->tv_sec;
872 then.tv_nsec = uap->tv_nsec;
873 } else {
874 nanotime(&now);
875 then.tv_sec = uap->tv_sec - now.tv_sec;
876 then.tv_nsec = uap->tv_nsec - now.tv_nsec;
877 if (then.tv_nsec < 0) {
878 then.tv_nsec += NSEC_PER_SEC;
879 then.tv_sec--;
880 }
881 }
882
883 if (uap->mutex_sem == (void *)NULL)
884 kern_result = semaphore_timedwait_trap_internal(uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
885 else
886 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
887
888 } else {
889
890 if (uap->mutex_sem == (void *)NULL)
891 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
892 else
893
894 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
895 }
896
897 out:
898 if (kern_result == KERN_SUCCESS)
899 return(0);
900 else if (kern_result == KERN_ABORTED)
901 return(EINTR);
902 else if (kern_result == KERN_OPERATION_TIMED_OUT)
903 return(ETIMEDOUT);
904 else
905 return(EINVAL);
906 }
907
908
909 int
910 __pthread_kill(__unused struct proc *p,
911 register struct __pthread_kill_args *uap,
912 __unused register_t *retval)
913 {
914 thread_t target_act;
915 int error = 0;
916 int signum = uap->sig;
917 struct uthread *uth;
918
919 target_act = (thread_t)port_name_to_thread(uap->thread_port);
920
921 if (target_act == THREAD_NULL)
922 return (ESRCH);
923 if ((u_int)signum >= NSIG) {
924 error = EINVAL;
925 goto out;
926 }
927
928 uth = (struct uthread *)get_bsdthread_info(target_act);
929
930 if (uth->uu_flag & UT_NO_SIGMASK) {
931 error = ESRCH;
932 goto out;
933 }
934
935 if (signum)
936 psignal_uthread(target_act, signum);
937 out:
938 thread_deallocate(target_act);
939 return (error);
940 }
941
942
943 int
944 pthread_sigmask(__unused register struct proc *p,
945 register struct pthread_sigmask_args *uap,
946 __unused register_t *retval)
947 {
948 user_addr_t set = uap->set;
949 user_addr_t oset = uap->oset;
950 sigset_t nset;
951 int error = 0;
952 struct uthread *ut;
953 sigset_t oldset;
954
955 ut = (struct uthread *)get_bsdthread_info(current_thread());
956 oldset = ut->uu_sigmask;
957
958 if (set == USER_ADDR_NULL) {
959 /* need only old mask */
960 goto out;
961 }
962
963 error = copyin(set, &nset, sizeof(sigset_t));
964 if (error)
965 goto out;
966
967 switch (uap->how) {
968 case SIG_BLOCK:
969 ut->uu_sigmask |= (nset & ~sigcantmask);
970 break;
971
972 case SIG_UNBLOCK:
973 ut->uu_sigmask &= ~(nset);
974 signal_setast(current_thread());
975 break;
976
977 case SIG_SETMASK:
978 ut->uu_sigmask = (nset & ~sigcantmask);
979 signal_setast(current_thread());
980 break;
981
982 default:
983 error = EINVAL;
984
985 }
986 out:
987 if (!error && oset != USER_ADDR_NULL)
988 copyout(&oldset, oset, sizeof(sigset_t));
989
990 return(error);
991 }
992
993
994 int
995 sigwait(register struct proc *p, register struct sigwait_args *uap, __unused register_t *retval)
996 {
997 struct uthread *ut;
998 struct uthread *uth;
999 int error = 0;
1000 sigset_t mask;
1001 sigset_t siglist;
1002 sigset_t sigw=0;
1003 int signum;
1004
1005 ut = (struct uthread *)get_bsdthread_info(current_thread());
1006
1007 if (uap->set == USER_ADDR_NULL)
1008 return(EINVAL);
1009
1010 error = copyin(uap->set, &mask, sizeof(sigset_t));
1011 if (error)
1012 return(error);
1013
1014 siglist = (mask & ~sigcantmask);
1015
1016 if (siglist == 0)
1017 return(EINVAL);
1018
1019 signal_lock(p);
1020 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
1021 signal_unlock(p);
1022 return(EINVAL);
1023 } else {
1024 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1025 if ( (sigw = uth->uu_siglist & siglist) ) {
1026 break;
1027 }
1028 }
1029 }
1030 signal_unlock(p);
1031 if (sigw) {
1032 /* The signal was pending on a thread */
1033 goto sigwait1;
1034 }
1035 /*
1036 * When returning from sigwait, we want
1037 * the old mask to be restored after the
1038 * signal handler has finished. Thus, we
1039 * save it here and mark the sigacts structure
1040 * to indicate this.
1041 */
1042 ut->uu_oldmask = ut->uu_sigmask;
1043 ut->uu_flag |= UT_SAS_OLDMASK;
1044 if (siglist == (sigset_t)0)
1045 return(EINVAL);
1046 /* SIGKILL and SIGSTOP are not maskable as well */
1047 ut->uu_sigmask = ~(siglist|sigcantmask);
1048 ut->uu_sigwait = siglist;
1049 /* No Continuations for now */
1050 error = tsleep((caddr_t)&ut->uu_sigwait, PPAUSE|PCATCH, "pause", 0);
1051
1052 if ((error == EINTR) || (error == ERESTART))
1053 error = 0;
1054
1055 sigw = (ut->uu_sigwait & siglist);
1056 ut->uu_sigmask = ut->uu_oldmask;
1057 ut->uu_oldmask = 0;
1058 ut->uu_flag &= ~UT_SAS_OLDMASK;
1059 sigwait1:
1060 ut->uu_sigwait = 0;
1061 if (!error) {
1062 signum = ffs((unsigned int)sigw);
1063 if (!signum)
1064 panic("sigwait with no signal wakeup");
1065 ut->uu_siglist &= ~(sigmask(signum));
1066 if (uap->sig != USER_ADDR_NULL)
1067 error = copyout(&signum, uap->sig, sizeof(int));
1068 }
1069
1070 return(error);
1071
1072 }
1073
1074
1075 int
1076 sigaltstack(struct proc *p, register struct sigaltstack_args *uap, __unused register_t *retval)
1077 {
1078 struct sigacts *psp;
1079 struct user_sigaltstack *pstk;
1080 struct user_sigaltstack ss;
1081 struct uthread *uth;
1082 int uthsigaltstack = 0;
1083 int error;
1084
1085 uth = (struct uthread *)get_bsdthread_info(current_thread());
1086 uthsigaltstack = p->p_lflag & P_LTHSIGSTACK;
1087
1088 psp = p->p_sigacts;
1089 if (uthsigaltstack != 0) {
1090 pstk = &uth->uu_sigstk;
1091 if ((uth->uu_flag & UT_ALTSTACK) == 0)
1092 uth->uu_sigstk.ss_flags |= SA_DISABLE;
1093 } else {
1094 pstk = &psp->ps_sigstk;
1095 if ((psp->ps_flags & SAS_ALTSTACK) == 0)
1096 psp->ps_sigstk.ss_flags |= SA_DISABLE;
1097 }
1098 if (uap->oss) {
1099 if (IS_64BIT_PROCESS(p)) {
1100 error = copyout(pstk, uap->oss, sizeof(struct user_sigaltstack));
1101 } else {
1102 struct sigaltstack ss32;
1103 sigaltstack_64to32(pstk, &ss32);
1104 error = copyout(&ss32, uap->oss, sizeof(struct sigaltstack));
1105 }
1106 if (error)
1107 return (error);
1108 }
1109 if (uap->nss == USER_ADDR_NULL)
1110 return (0);
1111 if (IS_64BIT_PROCESS(p)) {
1112 error = copyin(uap->nss, &ss, sizeof(struct user_sigaltstack));
1113 } else {
1114 struct sigaltstack ss32;
1115 error = copyin(uap->nss, &ss32, sizeof(struct sigaltstack));
1116 sigaltstack_32to64(&ss32,&ss);
1117 }
1118 if (error)
1119 return (error);
1120 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1121 return(EINVAL);
1122 }
1123
1124 if (ss.ss_flags & SA_DISABLE) {
1125 if (uthsigaltstack != 0) {
1126 /* if we are here we are not in the signal handler ;so no need to check */
1127 if (uth->uu_sigstk.ss_flags & SA_ONSTACK)
1128 return (EINVAL);
1129 uth->uu_flag &= ~UT_ALTSTACK;
1130 uth->uu_sigstk.ss_flags = ss.ss_flags;
1131 } else {
1132 if (psp->ps_sigstk.ss_flags & SA_ONSTACK)
1133 return (EINVAL);
1134 psp->ps_flags &= ~SAS_ALTSTACK;
1135 psp->ps_sigstk.ss_flags = ss.ss_flags;
1136 }
1137
1138 return (0);
1139 }
1140 /* The older stacksize was 8K, enforce that one so no compat problems */
1141 #define OLDMINSIGSTKSZ 8*1024
1142 if (ss.ss_size < OLDMINSIGSTKSZ)
1143 return (ENOMEM);
1144 if (uthsigaltstack != 0) {
1145 uth->uu_flag |= UT_ALTSTACK;
1146 uth->uu_sigstk= ss;
1147 } else {
1148 psp->ps_flags |= SAS_ALTSTACK;
1149 psp->ps_sigstk= ss;
1150 }
1151 return (0);
1152 }
1153
1154 int
1155 kill(struct proc *cp, struct kill_args *uap, __unused register_t *retval)
1156 {
1157 register struct proc *p;
1158 kauth_cred_t uc = kauth_cred_get();
1159
1160 AUDIT_ARG(pid, uap->pid);
1161 AUDIT_ARG(signum, uap->signum);
1162
1163 if ((u_int)uap->signum >= NSIG)
1164 return (EINVAL);
1165 if (uap->pid > 0) {
1166 /* kill single process */
1167 if ((p = proc_findref(uap->pid)) == NULL) {
1168 if ((p = pzfind(uap->pid)) != NULL) {
1169 /*
1170 * IEEE Std 1003.1-2001: return success
1171 * when killing a zombie.
1172 */
1173 return (0);
1174 }
1175 return (ESRCH);
1176 }
1177 AUDIT_ARG(process, p);
1178 if (!cansignal(cp, uc, p, uap->signum)) {
1179 proc_dropref(p);
1180 return(EPERM);
1181 }
1182 if (uap->signum)
1183 psignal(p, uap->signum);
1184 proc_dropref(p);
1185 return (0);
1186 }
1187 switch (uap->pid) {
1188 case -1: /* broadcast signal */
1189 return (killpg1(cp, uap->signum, 0, 1));
1190 case 0: /* signal own process group */
1191 return (killpg1(cp, uap->signum, 0, 0));
1192 default: /* negative explicit process group */
1193 return (killpg1(cp, uap->signum, -(uap->pid), 0));
1194 }
1195 /* NOTREACHED */
1196 }
1197
1198
1199 /*
1200 * Common code for kill process group/broadcast kill.
1201 * cp is calling process.
1202 */
1203 int
1204 killpg1(cp, signum, pgid, all)
1205 register struct proc *cp;
1206 int signum, pgid, all;
1207 {
1208 register struct proc *p;
1209 kauth_cred_t uc = cp->p_ucred;
1210 struct pgrp *pgrp;
1211 int nfound = 0;
1212
1213 if (all) {
1214 /*
1215 * broadcast
1216 */
1217 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1218 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1219 p == cp || !cansignal(cp, uc, p, signum))
1220 continue;
1221 nfound++;
1222 if (signum)
1223 psignal(p, signum);
1224 }
1225 } else {
1226 if (pgid == 0)
1227 /*
1228 * zero pgid means send to my process group.
1229 */
1230 pgrp = cp->p_pgrp;
1231 else {
1232 pgrp = pgfind(pgid);
1233 if (pgrp == NULL)
1234 return (ESRCH);
1235 }
1236 for (p = pgrp->pg_members.lh_first; p != 0;
1237 p = p->p_pglist.le_next) {
1238 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1239 p->p_stat == SZOMB ||
1240 !cansignal(cp, uc, p, signum))
1241 continue;
1242 nfound++;
1243 if (signum)
1244 psignal(p, signum);
1245 }
1246 }
1247 return (nfound ? 0 : ESRCH);
1248 }
1249
1250 /*
1251 * Send a signal to a process group.
1252 */
1253 void
1254 gsignal(pgid, signum)
1255 int pgid, signum;
1256 {
1257 struct pgrp *pgrp;
1258
1259 if (pgid && (pgrp = pgfind(pgid)))
1260 pgsignal(pgrp, signum, 0);
1261 }
1262
1263 /*
1264 * Send a signal to a process group. If checktty is 1,
1265 * limit to members which have a controlling terminal.
1266 */
1267 void
1268 pgsignal(pgrp, signum, checkctty)
1269 struct pgrp *pgrp;
1270 int signum, checkctty;
1271 {
1272 register struct proc *p;
1273
1274 if (pgrp)
1275 for (p = pgrp->pg_members.lh_first; p != 0;
1276 p = p->p_pglist.le_next)
1277 if (checkctty == 0 || p->p_flag & P_CONTROLT)
1278 psignal(p, signum);
1279 }
1280
1281 /*
1282 * Send signal to a backgrounded process blocked due to tty access
1283 * In FreeBSD, the backgrounded process wakes up every second and
1284 * discovers whether it is foregounded or not. In our case, we block
1285 * the thread in tsleep as we want to avoid storm of processes as well
1286 * as the suspend is only at AST level
1287 */
1288 void
1289 tty_pgsignal(pgrp, signum)
1290 struct pgrp *pgrp;
1291 int signum;
1292 {
1293 register struct proc *p;
1294
1295 if (pgrp)
1296 for (p = pgrp->pg_members.lh_first; p != 0;
1297 p = p->p_pglist.le_next)
1298 if ((p->p_flag & P_TTYSLEEP) && (p->p_flag & P_CONTROLT))
1299 psignal(p, signum);
1300 }
1301
1302 /*
1303 * Send a signal caused by a trap to a specific thread.
1304 */
1305 void
1306 threadsignal(thread_t sig_actthread, int signum, u_long code)
1307 {
1308 register struct uthread *uth;
1309 register struct task * sig_task;
1310 register struct proc *p ;
1311 int mask;
1312
1313 if ((u_int)signum >= NSIG || signum == 0)
1314 return;
1315
1316 mask = sigmask(signum);
1317 if ((mask & threadmask) == 0)
1318 return;
1319 sig_task = get_threadtask(sig_actthread);
1320 p = (struct proc *)(get_bsdtask_info(sig_task));
1321
1322 uth = get_bsdthread_info(sig_actthread);
1323 if (uth && (uth->uu_flag & UT_VFORK))
1324 p = uth->uu_proc;
1325
1326 if (!(p->p_flag & P_TRACED) && (p->p_sigignore & mask))
1327 return;
1328
1329 uth->uu_siglist |= mask;
1330 p->p_siglist |= mask; /* just for lame ones looking here */
1331 uth->uu_code = code;
1332 /* mark on process as well */
1333 signal_setast(sig_actthread);
1334 }
1335
1336
1337 void
1338 psignal(p, signum)
1339 register struct proc *p;
1340 register int signum;
1341 {
1342 psignal_lock(p, signum, 1);
1343 }
1344
1345 void
1346 psignal_vfork(struct proc *p, task_t new_task, thread_t thr_act, int signum)
1347 {
1348 register int prop;
1349 register sig_t action;
1350 int mask;
1351 struct uthread *uth;
1352
1353 if ((u_int)signum >= NSIG || signum == 0)
1354 panic("psignal signal number");
1355 mask = sigmask(signum);
1356 prop = sigprop[signum];
1357
1358 #if SIGNAL_DEBUG
1359 if(rdebug_proc && (p == rdebug_proc)) {
1360 ram_printf(3);
1361 }
1362 #endif /* SIGNAL_DEBUG */
1363
1364 if ((new_task == TASK_NULL) || (thr_act == (thread_t)NULL) || is_kerneltask(new_task))
1365 return;
1366
1367
1368 uth = get_bsdthread_info(thr_act);
1369 signal_lock(p);
1370
1371 /*
1372 * proc is traced, always give parent a chance.
1373 */
1374 action = SIG_DFL;
1375
1376 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1377 (p->p_flag & P_TRACED) == 0)
1378 p->p_nice = NZERO;
1379
1380 if (prop & SA_CONT) {
1381 p->p_siglist &= ~stopsigmask;
1382 uth->uu_siglist &= ~stopsigmask;
1383 }
1384
1385 if (prop & SA_STOP) {
1386 /*
1387 * If sending a tty stop signal to a member of an orphaned
1388 * process group, discard the signal here if the action
1389 * is default; don't stop the process below if sleeping,
1390 * and don't clear any pending SIGCONT.
1391 */
1392 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
1393 action == SIG_DFL)
1394 goto psigout;
1395 uth->uu_siglist &= ~contsigmask;
1396 p->p_siglist &= ~contsigmask;
1397 }
1398 uth->uu_siglist |= mask;
1399 p->p_siglist |= mask; /* just for lame ones looking here */
1400
1401 /* Deliver signal to the activation passed in */
1402 act_set_astbsd(thr_act);
1403
1404 /*
1405 * SIGKILL priority twiddling moved here from above because
1406 * it needs sig_thread. Could merge it into large switch
1407 * below if we didn't care about priority for tracing
1408 * as SIGKILL's action is always SIG_DFL.
1409 */
1410 if ((signum == SIGKILL) && (p->p_nice > NZERO)) {
1411 p->p_nice = NZERO;
1412 }
1413
1414 /*
1415 * This Process is traced - wake it up (if not already
1416 * stopped) so that it can discover the signal in
1417 * issig() and stop for the parent.
1418 */
1419 if (p->p_flag & P_TRACED) {
1420 if (p->p_stat != SSTOP)
1421 goto run;
1422 else
1423 goto psigout;
1424 }
1425 run:
1426 /*
1427 * If we're being traced (possibly because someone attached us
1428 * while we were stopped), check for a signal from the debugger.
1429 */
1430 if (p->p_stat == SSTOP) {
1431 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
1432 uth->uu_siglist |= sigmask(p->p_xstat);
1433 p->p_siglist |= mask; /* just for lame ones looking here */
1434 }
1435 }
1436
1437 /*
1438 * setrunnable(p) in BSD
1439 */
1440 p->p_stat = SRUN;
1441
1442 psigout:
1443 signal_unlock(p);
1444 }
1445
1446 static thread_t
1447 get_signalthread(struct proc *p, int signum)
1448 {
1449 struct uthread *uth;
1450 thread_t thr_act;
1451 sigset_t mask = sigmask(signum);
1452 thread_t sig_thread_act;
1453 struct task * sig_task = p->task;
1454 kern_return_t kret;
1455
1456 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
1457 sig_thread_act = p->p_vforkact;
1458 kret = check_actforsig(sig_task, sig_thread_act, 1);
1459 if (kret == KERN_SUCCESS)
1460 return(sig_thread_act);
1461 else
1462 return(THREAD_NULL);
1463 }
1464
1465 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1466 if(((uth->uu_flag & UT_NO_SIGMASK)== 0) &&
1467 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
1468 if (check_actforsig(p->task, uth->uu_act, 1) == KERN_SUCCESS)
1469 return(uth->uu_act);
1470 }
1471 }
1472 if (get_signalact(p->task, &thr_act, 1) == KERN_SUCCESS) {
1473 return(thr_act);
1474 }
1475
1476 return(THREAD_NULL);
1477 }
1478
1479 /*
1480 * Send the signal to the process. If the signal has an action, the action
1481 * is usually performed by the target process rather than the caller; we add
1482 * the signal to the set of pending signals for the process.
1483 *
1484 * Exceptions:
1485 * o When a stop signal is sent to a sleeping process that takes the
1486 * default action, the process is stopped without awakening it.
1487 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1488 * regardless of the signal action (eg, blocked or ignored).
1489 *
1490 * Other ignored signals are discarded immediately.
1491 */
1492 void
1493 psignal_lock(p, signum, withlock)
1494 register struct proc *p;
1495 register int signum;
1496 register int withlock;
1497 {
1498 register int prop;
1499 register sig_t action;
1500 thread_t sig_thread_act;
1501 register task_t sig_task;
1502 int mask;
1503 struct uthread *uth;
1504 boolean_t funnel_state = FALSE;
1505 int sw_funnel = 0;
1506
1507 if ((u_int)signum >= NSIG || signum == 0)
1508 panic("psignal signal number");
1509 mask = sigmask(signum);
1510 prop = sigprop[signum];
1511
1512 #if SIGNAL_DEBUG
1513 if(rdebug_proc && (p == rdebug_proc)) {
1514 ram_printf(3);
1515 }
1516 #endif /* SIGNAL_DEBUG */
1517
1518 if (thread_funnel_get() == (funnel_t *)0) {
1519 sw_funnel = 1;
1520 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1521 }
1522 /*
1523 * We will need the task pointer later. Grab it now to
1524 * check for a zombie process. Also don't send signals
1525 * to kernel internal tasks.
1526 */
1527 if (((sig_task = p->task) == TASK_NULL) || is_kerneltask(sig_task)) {
1528 if (sw_funnel)
1529 thread_funnel_set(kernel_flock, funnel_state);
1530 return;
1531 }
1532
1533 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
1534
1535 /*
1536 * do not send signals to the process that has the thread
1537 * doing a reboot(). Not doing so will mark that thread aborted
1538 * and can cause IO failures wich will cause data loss.
1539 */
1540 if (ISSET(p->p_flag, P_REBOOT)) {
1541 if (sw_funnel)
1542 thread_funnel_set(kernel_flock, funnel_state);
1543 return;
1544 }
1545
1546 if (withlock)
1547 signal_lock(p);
1548
1549 /*
1550 * Deliver the signal to the first thread in the task. This
1551 * allows single threaded applications which use signals to
1552 * be able to be linked with multithreaded libraries. We have
1553 * an implicit reference to the current thread, but need
1554 * an explicit one otherwise. The thread reference keeps
1555 * the corresponding task data structures around too. This
1556 * reference is released by thread_deallocate.
1557 */
1558
1559 if (((p->p_flag & P_TRACED) == 0) && (p->p_sigignore & mask))
1560 goto psigout;
1561
1562 /* If successful return with ast set */
1563 sig_thread_act = get_signalthread(p, signum);
1564
1565 if (sig_thread_act == THREAD_NULL) {
1566 /* XXXX FIXME
1567 * if it is sigkill, may be we should
1568 * inject a thread to terminate
1569 */
1570 #if SIGNAL_DEBUG
1571 ram_printf(1);
1572 #endif /* SIGNAL_DEBUG */
1573 goto psigout;
1574 }
1575
1576 uth = get_bsdthread_info(sig_thread_act);
1577
1578 /*
1579 * If proc is traced, always give parent a chance.
1580 */
1581 if (p->p_flag & P_TRACED)
1582 action = SIG_DFL;
1583 else {
1584 /*
1585 * If the signal is being ignored,
1586 * then we forget about it immediately.
1587 * (Note: we don't set SIGCONT in p_sigignore,
1588 * and if it is set to SIG_IGN,
1589 * action will be SIG_DFL here.)
1590 */
1591 if (p->p_sigignore & mask)
1592 goto psigout;
1593 /* sigwait takes precedence */
1594 if (uth->uu_sigwait & mask)
1595 action = KERN_SIG_WAIT;
1596 else if (uth->uu_sigmask & mask)
1597 action = KERN_SIG_HOLD;
1598 else if (p->p_sigcatch & mask)
1599 action = KERN_SIG_CATCH;
1600 else
1601 action = SIG_DFL;
1602 }
1603
1604 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1605 (p->p_flag & P_TRACED) == 0)
1606 p->p_nice = NZERO;
1607
1608 if (prop & SA_CONT) {
1609 uth->uu_siglist &= ~stopsigmask;
1610 p->p_siglist &= ~stopsigmask;
1611 }
1612
1613 if (prop & SA_STOP) {
1614 /*
1615 * If sending a tty stop signal to a member of an orphaned
1616 * process group, discard the signal here if the action
1617 * is default; don't stop the process below if sleeping,
1618 * and don't clear any pending SIGCONT.
1619 */
1620 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
1621 action == SIG_DFL)
1622 goto psigout;
1623 uth->uu_siglist &= ~contsigmask;
1624 p->p_siglist &= ~contsigmask;
1625 }
1626 uth->uu_siglist |= mask;
1627 p->p_siglist |= mask; /* just for lame ones looking here */
1628
1629
1630 /*
1631 * Defer further processing for signals which are held,
1632 * except that stopped processes must be continued by SIGCONT.
1633 */
1634 if (action == KERN_SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) {
1635 goto psigout;
1636 }
1637 /*
1638 * SIGKILL priority twiddling moved here from above because
1639 * it needs sig_thread. Could merge it into large switch
1640 * below if we didn't care about priority for tracing
1641 * as SIGKILL's action is always SIG_DFL.
1642 */
1643 if ((signum == SIGKILL) && (p->p_nice > NZERO)) {
1644 p->p_nice = NZERO;
1645 }
1646
1647 /*
1648 * Process is traced - wake it up (if not already
1649 * stopped) so that it can discover the signal in
1650 * issig() and stop for the parent.
1651 */
1652 if (p->p_flag & P_TRACED) {
1653 if (p->p_stat != SSTOP)
1654 goto run;
1655 else
1656 goto psigout;
1657 }
1658
1659 if (action == KERN_SIG_WAIT) {
1660 uth->uu_sigwait = mask;
1661 uth->uu_siglist &= ~mask;
1662 p->p_siglist &= ~mask;
1663 wakeup(&uth->uu_sigwait);
1664 /* if it is SIGCONT resume whole process */
1665 if (prop & SA_CONT) {
1666 p->p_flag |= P_CONTINUED;
1667 (void) task_resume(sig_task);
1668 }
1669 goto psigout;
1670 }
1671
1672 if (action != SIG_DFL) {
1673 /*
1674 * User wants to catch the signal.
1675 * Wake up the thread, but don't un-suspend it
1676 * (except for SIGCONT).
1677 */
1678 if (prop & SA_CONT) {
1679 if (p->p_flag & P_TTYSLEEP) {
1680 p->p_flag &= ~P_TTYSLEEP;
1681 wakeup(&p->p_siglist);
1682 } else {
1683 p->p_flag |= P_CONTINUED;
1684 (void) task_resume(sig_task);
1685 }
1686 p->p_stat = SRUN;
1687 } else if (p->p_stat == SSTOP)
1688 goto psigout;
1689 goto run;
1690 } else {
1691 /* Default action - varies */
1692 if (mask & stopsigmask) {
1693 /*
1694 * These are the signals which by default
1695 * stop a process.
1696 *
1697 * Don't clog system with children of init
1698 * stopped from the keyboard.
1699 */
1700 if (!(prop & SA_STOP) && p->p_pptr == initproc) {
1701 psignal_lock(p, SIGKILL, 0);
1702 uth->uu_siglist &= ~mask;
1703 p->p_siglist &= ~mask;
1704 goto psigout;
1705 }
1706
1707 /*
1708 * Stop the task
1709 * if task hasn't already been stopped by
1710 * a signal.
1711 */
1712 uth->uu_siglist &= ~mask;
1713 p->p_siglist &= ~mask;
1714 if (p->p_stat != SSTOP) {
1715 p->p_xstat = signum;
1716 stop(p);
1717 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1718 struct proc *pp = p->p_pptr;
1719
1720 pp->si_pid = p->p_pid;
1721 pp->si_status = p->p_xstat;
1722 pp->si_code = CLD_STOPPED;
1723 pp->si_uid = p->p_ucred->cr_ruid;
1724 psignal(pp, SIGCHLD);
1725 }
1726 }
1727 goto psigout;
1728 }
1729
1730 switch (signum) {
1731 /*
1732 * Signals ignored by default have been dealt
1733 * with already, since their bits are on in
1734 * p_sigignore.
1735 */
1736
1737 case SIGKILL:
1738 /*
1739 * Kill signal always sets process running and
1740 * unsuspends it.
1741 */
1742 /*
1743 * Process will be running after 'run'
1744 */
1745 p->p_stat = SRUN;
1746
1747 thread_abort(sig_thread_act);
1748
1749 goto psigout;
1750
1751 case SIGCONT:
1752 /*
1753 * Let the process run. If it's sleeping on an
1754 * event, it remains so.
1755 */
1756 if (p->p_flag & P_TTYSLEEP) {
1757 p->p_flag &= ~P_TTYSLEEP;
1758 wakeup(&p->p_siglist);
1759 } else {
1760 p->p_flag |= P_CONTINUED;
1761 (void) task_resume(sig_task);
1762 }
1763 uth->uu_siglist &= ~mask;
1764 p->p_siglist &= ~mask;
1765 p->p_stat = SRUN;
1766
1767 goto psigout;
1768
1769 default:
1770 /*
1771 * All other signals wake up the process, but don't
1772 * resume it.
1773 */
1774 if (p->p_stat == SSTOP)
1775 goto psigout;
1776 goto run;
1777 }
1778 }
1779 /*NOTREACHED*/
1780 run:
1781 /*
1782 * If we're being traced (possibly because someone attached us
1783 * while we were stopped), check for a signal from the debugger.
1784 */
1785 if (p->p_stat == SSTOP) {
1786 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0)
1787 uth->uu_siglist |= sigmask(p->p_xstat);
1788 } else {
1789 /*
1790 * setrunnable(p) in BSD and
1791 * Wake up the thread if it is interruptible.
1792 */
1793 p->p_stat = SRUN;
1794 thread_abort_safely(sig_thread_act);
1795 }
1796 psigout:
1797 if (withlock)
1798 signal_unlock(p);
1799 if (sw_funnel)
1800 thread_funnel_set(kernel_flock, funnel_state);
1801 }
1802
1803
1804 /* psignal_lock(p, signum, withlock ) */
1805 void
1806 psignal_uthread(thr_act, signum)
1807 thread_t thr_act;
1808 int signum;
1809 {
1810 struct proc *p;
1811 register int prop;
1812 register sig_t action;
1813 thread_t sig_thread_act;
1814 register task_t sig_task;
1815 int mask;
1816 struct uthread *uth;
1817 kern_return_t kret;
1818 int error = 0;
1819
1820 p = (struct proc *)get_bsdtask_info(get_threadtask(thr_act));
1821 if ((u_int)signum >= NSIG || signum == 0)
1822 panic("Invalid signal number in psignal_uthread");
1823 mask = sigmask(signum);
1824 prop = sigprop[signum];
1825
1826 #if SIGNAL_DEBUG
1827 if(rdebug_proc && (p == rdebug_proc)) {
1828 ram_printf(3);
1829 }
1830 #endif /* SIGNAL_DEBUG */
1831
1832 /*
1833 * We will need the task pointer later. Grab it now to
1834 * check for a zombie process. Also don't send signals
1835 * to kernel internal tasks.
1836 */
1837 if (((sig_task = p->task) == TASK_NULL) || is_kerneltask(sig_task)) {
1838 return;
1839 }
1840
1841 sig_thread_act = thr_act;
1842 /*
1843 * do not send signals to the process that has the thread
1844 * doing a reboot(). Not doing so will mark that thread aborted
1845 * and can cause IO failures wich will cause data loss.
1846 */
1847 if (ISSET(p->p_flag, P_REBOOT)) {
1848 return;
1849 }
1850
1851 signal_lock(p);
1852
1853 /*
1854 * Deliver the signal to the first thread in the task. This
1855 * allows single threaded applications which use signals to
1856 * be able to be linked with multithreaded libraries. We have
1857 * an implicit reference to the current thread, but need
1858 * an explicit one otherwise. The thread reference keeps
1859 * the corresponding task data structures around too. This
1860 * reference is released by thread_deallocate.
1861 */
1862
1863 if (((p->p_flag & P_TRACED) == 0) && (p->p_sigignore & mask))
1864 goto puthout;
1865
1866 kret = check_actforsig(sig_task, sig_thread_act, 1);
1867
1868 if (kret != KERN_SUCCESS) {
1869 error = EINVAL;
1870 goto puthout;
1871 }
1872
1873
1874 uth = get_bsdthread_info(sig_thread_act);
1875
1876 /*
1877 * If proc is traced, always give parent a chance.
1878 */
1879 if (p->p_flag & P_TRACED)
1880 action = SIG_DFL;
1881 else {
1882 /*
1883 * If the signal is being ignored,
1884 * then we forget about it immediately.
1885 * (Note: we don't set SIGCONT in p_sigignore,
1886 * and if it is set to SIG_IGN,
1887 * action will be SIG_DFL here.)
1888 */
1889 if (p->p_sigignore & mask)
1890 goto puthout;
1891 /* sigwait takes precedence */
1892 if (uth->uu_sigwait & mask)
1893 action = KERN_SIG_WAIT;
1894 else if (uth->uu_sigmask & mask)
1895 action = KERN_SIG_HOLD;
1896 else if (p->p_sigcatch & mask)
1897 action = KERN_SIG_CATCH;
1898 else
1899 action = SIG_DFL;
1900 }
1901
1902 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1903 (p->p_flag & P_TRACED) == 0)
1904 p->p_nice = NZERO;
1905
1906 if (prop & SA_CONT) {
1907 uth->uu_siglist &= ~stopsigmask;
1908 p->p_siglist &= ~stopsigmask;
1909 }
1910
1911 if (prop & SA_STOP) {
1912 /*
1913 * If sending a tty stop signal to a member of an orphaned
1914 * process group, discard the signal here if the action
1915 * is default; don't stop the process below if sleeping,
1916 * and don't clear any pending SIGCONT.
1917 */
1918 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
1919 action == SIG_DFL)
1920 goto puthout;
1921 uth->uu_siglist &= ~contsigmask;
1922 p->p_siglist &= ~contsigmask;
1923 }
1924 uth->uu_siglist |= mask;
1925 p->p_siglist |= mask; /* just for lame ones looking here */
1926
1927 /*
1928 * Defer further processing for signals which are held,
1929 * except that stopped processes must be continued by SIGCONT.
1930 */
1931 if (action == KERN_SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
1932 goto puthout;
1933
1934 /*
1935 * SIGKILL priority twiddling moved here from above because
1936 * it needs sig_thread. Could merge it into large switch
1937 * below if we didn't care about priority for tracing
1938 * as SIGKILL's action is always SIG_DFL.
1939 */
1940 if ((signum == SIGKILL) && (p->p_nice > NZERO)) {
1941 p->p_nice = NZERO;
1942 }
1943
1944 /*
1945 * Process is traced - wake it up (if not already
1946 * stopped) so that it can discover the signal in
1947 * issig() and stop for the parent.
1948 */
1949 if (p->p_flag & P_TRACED) {
1950 if (p->p_stat != SSTOP)
1951 goto psurun;
1952 else
1953 goto puthout;
1954 }
1955
1956 if (action == KERN_SIG_WAIT) {
1957 uth->uu_sigwait = mask;
1958 uth->uu_siglist &= ~mask;
1959 p->p_siglist &= ~mask;
1960 wakeup(&uth->uu_sigwait);
1961 /* if it is SIGCONT resume whole process */
1962 if (prop & SA_CONT) {
1963 p->p_flag |= P_CONTINUED;
1964 (void) task_resume(sig_task);
1965 }
1966 goto puthout;
1967 }
1968
1969 if (action != SIG_DFL) {
1970 /*
1971 * User wants to catch the signal.
1972 * Wake up the thread, but don't un-suspend it
1973 * (except for SIGCONT).
1974 */
1975 if (prop & SA_CONT) {
1976 p->p_flag |= P_CONTINUED;
1977 (void) task_resume(sig_task);
1978 }
1979 goto psurun;
1980 } else {
1981 /* Default action - varies */
1982 if (mask & stopsigmask) {
1983 /*
1984 * These are the signals which by default
1985 * stop a process.
1986 *
1987 * Don't clog system with children of init
1988 * stopped from the keyboard.
1989 */
1990 if (!(prop & SA_STOP) && p->p_pptr == initproc) {
1991 psignal_lock(p, SIGKILL, 0);
1992 uth->uu_siglist &= ~mask;
1993 p->p_siglist &= ~mask;
1994 goto puthout;
1995 }
1996
1997 /*
1998 * Stop the task
1999 * if task hasn't already been stopped by
2000 * a signal.
2001 */
2002 uth->uu_siglist &= ~mask;
2003 p->p_siglist &= ~mask;
2004 if (p->p_stat != SSTOP) {
2005 p->p_xstat = signum;
2006 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
2007 struct proc *pp = p->p_pptr;
2008
2009 pp->si_pid = p->p_pid;
2010 pp->si_status = p->p_xstat;
2011 pp->si_code = CLD_STOPPED;
2012 pp->si_uid = p->p_ucred->cr_ruid;
2013 psignal(pp, SIGCHLD);
2014 }
2015 stop(p);
2016 }
2017 goto puthout;
2018 }
2019
2020 switch (signum) {
2021 /*
2022 * Signals ignored by default have been dealt
2023 * with already, since their bits are on in
2024 * p_sigignore.
2025 */
2026
2027 case SIGKILL:
2028 /*
2029 * Kill signal always sets process running and
2030 * unsuspends it.
2031 */
2032 /*
2033 * Process will be running after 'run'
2034 */
2035 p->p_stat = SRUN;
2036
2037 thread_abort(sig_thread_act);
2038
2039 goto puthout;
2040
2041 case SIGCONT:
2042 /*
2043 * Let the process run. If it's sleeping on an
2044 * event, it remains so.
2045 */
2046 if (p->p_flag & P_TTYSLEEP) {
2047 p->p_flag &= ~P_TTYSLEEP;
2048 wakeup(&p->p_siglist);
2049 } else {
2050 p->p_flag |= P_CONTINUED;
2051 (void) task_resume(sig_task);
2052 }
2053 uth->uu_siglist &= ~mask;
2054 p->p_siglist &= ~mask;
2055 p->p_stat = SRUN;
2056 goto puthout;
2057
2058 default:
2059 /*
2060 * All other signals wake up the process, but don't
2061 * resume it.
2062 */
2063 goto psurun;
2064 }
2065 }
2066 /*NOTREACHED*/
2067 psurun:
2068 /*
2069 * If we're being traced (possibly because someone attached us
2070 * while we were stopped), check for a signal from the debugger.
2071 */
2072 if (p->p_stat == SSTOP) {
2073 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
2074 uth->uu_siglist |= sigmask(p->p_xstat);
2075 p->p_siglist |= sigmask(p->p_xstat);
2076 }
2077 } else {
2078 /*
2079 * setrunnable(p) in BSD and
2080 * Wake up the thread if it is interruptible.
2081 */
2082 p->p_stat = SRUN;
2083 thread_abort_safely(sig_thread_act);
2084 }
2085
2086 puthout:
2087 signal_unlock(p);
2088 }
2089
2090
2091 __inline__ void
2092 sig_lock_to_exit(struct proc *p)
2093 {
2094 thread_t self = current_thread();
2095
2096 p->exit_thread = self;
2097 (void) task_suspend(p->task);
2098 }
2099
2100 __inline__ int
2101 sig_try_locked(struct proc *p)
2102 {
2103 thread_t self = current_thread();
2104
2105 while (p->sigwait || p->exit_thread) {
2106 if (p->exit_thread) {
2107 if (p->exit_thread != self) {
2108 /*
2109 * Already exiting - no signals.
2110 */
2111 thread_abort(self);
2112 }
2113 return(0);
2114 }
2115 if(assert_wait_possible()) {
2116 assert_wait((caddr_t)&p->sigwait_thread,
2117 (THREAD_INTERRUPTIBLE));
2118 }
2119 signal_unlock(p);
2120 thread_block(THREAD_CONTINUE_NULL);
2121 signal_lock(p);
2122 if (thread_should_abort(self)) {
2123 /*
2124 * Terminate request - clean up.
2125 */
2126 return -1;
2127 }
2128 }
2129 return 1;
2130 }
2131
2132 /*
2133 * If the current process has received a signal (should be caught or cause
2134 * termination, should interrupt current syscall), return the signal number.
2135 * Stop signals with default action are processed immediately, then cleared;
2136 * they aren't returned. This is checked after each entry to the system for
2137 * a syscall or trap (though this can usually be done without calling issignal
2138 * by checking the pending signal masks in the CURSIG macro.) The normal call
2139 * sequence is
2140 *
2141 * while (signum = CURSIG(curproc))
2142 * postsig(signum);
2143 */
2144 int
2145 issignal(p)
2146 register struct proc *p;
2147 {
2148 register int signum, mask, prop, sigbits;
2149 thread_t cur_act;
2150 struct uthread * ut;
2151 struct proc *pp;
2152
2153 cur_act = current_thread();
2154
2155 #if SIGNAL_DEBUG
2156 if(rdebug_proc && (p == rdebug_proc)) {
2157 ram_printf(3);
2158 }
2159 #endif /* SIGNAL_DEBUG */
2160 signal_lock(p);
2161
2162 /*
2163 * Try to grab the signal lock.
2164 */
2165 if (sig_try_locked(p) <= 0) {
2166 signal_unlock(p);
2167 return (0);
2168 }
2169
2170 ut = get_bsdthread_info(cur_act);
2171 for(;;) {
2172 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2173
2174 if (p->p_flag & P_PPWAIT)
2175 sigbits &= ~stopsigmask;
2176 if (sigbits == 0) { /* no signal to send */
2177 signal_unlock(p);
2178 return (0);
2179 }
2180 signum = ffs((long)sigbits);
2181 mask = sigmask(signum);
2182 prop = sigprop[signum];
2183
2184 /*
2185 * We should see pending but ignored signals
2186 * only if P_TRACED was on when they were posted.
2187 */
2188 if (mask & p->p_sigignore && (p->p_flag & P_TRACED) == 0) {
2189 ut->uu_siglist &= ~mask; /* take the signal! */
2190 p->p_siglist &= ~mask; /* take the signal! */
2191 continue;
2192 }
2193 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
2194 register task_t task;
2195 /*
2196 * If traced, always stop, and stay
2197 * stopped until released by the debugger.
2198 */
2199 /* ptrace debugging */
2200 p->p_xstat = signum;
2201 pp = p->p_pptr;
2202 if (p->p_flag & P_SIGEXC) {
2203 p->sigwait = TRUE;
2204 p->sigwait_thread = cur_act;
2205 p->p_stat = SSTOP;
2206 p->p_flag &= ~(P_WAITED|P_CONTINUED);
2207 ut->uu_siglist &= ~mask; /* clear the old signal */
2208 p->p_siglist &= ~mask; /* clear the old signal */
2209 signal_unlock(p);
2210 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2211 signal_lock(p);
2212 } else {
2213 // panic("Unsupportef gdb option \n");;
2214 pp->si_pid = p->p_pid;
2215 pp->si_status = p->p_xstat;
2216 pp->si_code = CLD_TRAPPED;
2217 pp->si_uid = p->p_ucred->cr_ruid;
2218 psignal(pp, SIGCHLD);
2219 /*
2220 * XXX Have to really stop for debuggers;
2221 * XXX stop() doesn't do the right thing.
2222 * XXX Inline the task_suspend because we
2223 * XXX have to diddle Unix state in the
2224 * XXX middle of it.
2225 */
2226 task = p->task;
2227 task_hold(task);
2228 p->sigwait = TRUE;
2229 p->sigwait_thread = cur_act;
2230 p->p_stat = SSTOP;
2231 p->p_flag &= ~(P_WAITED|P_CONTINUED);
2232 ut->uu_siglist &= ~mask; /* clear the old signal */
2233 p->p_siglist &= ~mask; /* clear the old signal */
2234
2235 wakeup((caddr_t)p->p_pptr);
2236 signal_unlock(p);
2237 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2238 thread_block(THREAD_CONTINUE_NULL);
2239 signal_lock(p);
2240 }
2241
2242 p->sigwait = FALSE;
2243 p->sigwait_thread = NULL;
2244 wakeup((caddr_t)&p->sigwait_thread);
2245
2246 /*
2247 * This code is to detect when gdb is killed
2248 * even as the traced program is attached.
2249 * pgsignal would get the SIGKILL to traced program
2250 * That's what we are trying to see (I hope)
2251 */
2252 if (ut->uu_siglist & sigmask(SIGKILL)) {
2253 /*
2254 * Wait event may still be outstanding;
2255 * clear it, since sig_lock_to_exit will
2256 * wait.
2257 */
2258 clear_wait(current_thread(), THREAD_INTERRUPTED);
2259 sig_lock_to_exit(p);
2260 /*
2261 * Since this thread will be resumed
2262 * to allow the current syscall to
2263 * be completed, must save u_qsave
2264 * before calling exit(). (Since exit()
2265 * calls closef() which can trash u_qsave.)
2266 */
2267 signal_unlock(p);
2268 exit1(p,signum, (int *)NULL);
2269 return(0);
2270 }
2271
2272 /*
2273 * We may have to quit
2274 */
2275 if (thread_should_abort(current_thread())) {
2276 signal_unlock(p);
2277 return(0);
2278 }
2279 /*
2280 * If parent wants us to take the signal,
2281 * then it will leave it in p->p_xstat;
2282 * otherwise we just look for signals again.
2283 */
2284 signum = p->p_xstat;
2285 if (signum == 0)
2286 continue;
2287 /*
2288 * Put the new signal into p_siglist. If the
2289 * signal is being masked, look for other signals.
2290 */
2291 mask = sigmask(signum);
2292 ut->uu_siglist |= mask;
2293 p->p_siglist |= mask; /* just for lame ones looking here */
2294 if (ut->uu_sigmask & mask)
2295 continue;
2296 }
2297
2298 /*
2299 * Decide whether the signal should be returned.
2300 * Return the signal's number, or fall through
2301 * to clear it from the pending mask.
2302 */
2303
2304 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2305
2306 case (long)SIG_DFL:
2307 /*
2308 * Don't take default actions on system processes.
2309 */
2310 if (p->p_pptr->p_pid == 0) {
2311 #if DIAGNOSTIC
2312 /*
2313 * Are you sure you want to ignore SIGSEGV
2314 * in init? XXX
2315 */
2316 printf("Process (pid %d) got signal %d\n",
2317 p->p_pid, signum);
2318 #endif
2319 break; /* == ignore */
2320 }
2321
2322 /*
2323 * If there is a pending stop signal to process
2324 * with default action, stop here,
2325 * then clear the signal. However,
2326 * if process is member of an orphaned
2327 * process group, ignore tty stop signals.
2328 */
2329 if (prop & SA_STOP) {
2330 if (p->p_flag & P_TRACED ||
2331 (p->p_pgrp->pg_jobc == 0 &&
2332 prop & SA_TTYSTOP))
2333 break; /* == ignore */
2334 if (p->p_stat != SSTOP) {
2335 p->p_xstat = signum;
2336 stop(p);
2337 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
2338 pp = p->p_pptr;
2339 pp->si_pid = p->p_pid;
2340 pp->si_status = p->p_xstat;
2341 pp->si_code = CLD_STOPPED;
2342 pp->si_uid = p->p_ucred->cr_ruid;
2343 psignal(pp, SIGCHLD);
2344 }
2345 }
2346 break;
2347 } else if (prop & SA_IGNORE) {
2348 /*
2349 * Except for SIGCONT, shouldn't get here.
2350 * Default action is to ignore; drop it.
2351 */
2352 break; /* == ignore */
2353 } else {
2354 ut->uu_siglist &= ~mask; /* take the signal! */
2355 p->p_siglist &= ~mask; /* take the signal! */
2356 signal_unlock(p);
2357 return (signum);
2358 }
2359 /*NOTREACHED*/
2360
2361 case (long)SIG_IGN:
2362 /*
2363 * Masking above should prevent us ever trying
2364 * to take action on an ignored signal other
2365 * than SIGCONT, unless process is traced.
2366 */
2367 if ((prop & SA_CONT) == 0 &&
2368 (p->p_flag & P_TRACED) == 0)
2369 printf("issignal\n");
2370 break; /* == ignore */
2371
2372 default:
2373 /*
2374 * This signal has an action, let
2375 * postsig() process it.
2376 */
2377 ut->uu_siglist &= ~mask; /* take the signal! */
2378 p->p_siglist &= ~mask; /* take the signal! */
2379 signal_unlock(p);
2380 return (signum);
2381 }
2382 ut->uu_siglist &= ~mask; /* take the signal! */
2383 p->p_siglist &= ~mask; /* take the signal! */
2384 }
2385 /* NOTREACHED */
2386 }
2387
2388 /* called from _sleep */
2389 int
2390 CURSIG(p)
2391 register struct proc *p;
2392 {
2393 register int signum, mask, prop, sigbits;
2394 thread_t cur_act;
2395 struct uthread * ut;
2396 int retnum = 0;
2397
2398
2399 cur_act = current_thread();
2400
2401 ut = get_bsdthread_info(cur_act);
2402
2403 if (ut->uu_siglist == 0)
2404 return (0);
2405
2406 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_flag & P_TRACED) == 0))
2407 return (0);
2408
2409 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2410
2411 for(;;) {
2412 if (p->p_flag & P_PPWAIT)
2413 sigbits &= ~stopsigmask;
2414 if (sigbits == 0) { /* no signal to send */
2415 return (retnum);
2416 }
2417
2418 signum = ffs((long)sigbits);
2419 mask = sigmask(signum);
2420 prop = sigprop[signum];
2421
2422 /*
2423 * We should see pending but ignored signals
2424 * only if P_TRACED was on when they were posted.
2425 */
2426 if (mask & p->p_sigignore && (p->p_flag & P_TRACED) == 0) {
2427 continue;
2428 }
2429 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
2430 /*
2431 * Put the new signal into p_siglist. If the
2432 * signal is being masked, look for other signals.
2433 */
2434 mask = sigmask(signum);
2435 if (ut->uu_sigmask & mask)
2436 continue;
2437 return(signum);
2438 }
2439
2440 /*
2441 * Decide whether the signal should be returned.
2442 * Return the signal's number, or fall through
2443 * to clear it from the pending mask.
2444 */
2445
2446 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2447
2448 case (long)SIG_DFL:
2449 /*
2450 * Don't take default actions on system processes.
2451 */
2452 if (p->p_pptr->p_pid == 0) {
2453 #if DIAGNOSTIC
2454 /*
2455 * Are you sure you want to ignore SIGSEGV
2456 * in init? XXX
2457 */
2458 printf("Process (pid %d) got signal %d\n",
2459 p->p_pid, signum);
2460 #endif
2461 break; /* == ignore */
2462 }
2463
2464 /*
2465 * If there is a pending stop signal to process
2466 * with default action, stop here,
2467 * then clear the signal. However,
2468 * if process is member of an orphaned
2469 * process group, ignore tty stop signals.
2470 */
2471 if (prop & SA_STOP) {
2472 if (p->p_flag & P_TRACED ||
2473 (p->p_pgrp->pg_jobc == 0 &&
2474 prop & SA_TTYSTOP))
2475 break; /* == ignore */
2476 retnum = signum;
2477 break;
2478 } else if (prop & SA_IGNORE) {
2479 /*
2480 * Except for SIGCONT, shouldn't get here.
2481 * Default action is to ignore; drop it.
2482 */
2483 break; /* == ignore */
2484 } else {
2485 return (signum);
2486 }
2487 /*NOTREACHED*/
2488
2489 case (long)SIG_IGN:
2490 /*
2491 * Masking above should prevent us ever trying
2492 * to take action on an ignored signal other
2493 * than SIGCONT, unless process is traced.
2494 */
2495 if ((prop & SA_CONT) == 0 &&
2496 (p->p_flag & P_TRACED) == 0)
2497 printf("issignal\n");
2498 break; /* == ignore */
2499
2500 default:
2501 /*
2502 * This signal has an action, let
2503 * postsig() process it.
2504 */
2505 return (signum);
2506 }
2507 sigbits &= ~mask; /* take the signal! */
2508 }
2509 /* NOTREACHED */
2510 }
2511
2512 /*
2513 * Put the argument process into the stopped state and notify the parent
2514 * via wakeup. Signals are handled elsewhere. The process must not be
2515 * on the run queue.
2516 */
2517 void
2518 stop(p)
2519 register struct proc *p;
2520 {
2521 p->p_stat = SSTOP;
2522 p->p_flag &= ~(P_WAITED|P_CONTINUED);
2523 if (p->p_pptr->p_stat != SSTOP)
2524 wakeup((caddr_t)p->p_pptr);
2525 (void) task_suspend(p->task); /*XXX*/
2526 }
2527
2528 /*
2529 * Take the action for the specified signal
2530 * from the current set of pending signals.
2531 */
2532 void
2533 postsig(int signum)
2534 {
2535 struct proc *p = current_proc();
2536 struct sigacts *ps = p->p_sigacts;
2537 user_addr_t catcher;
2538 u_long code;
2539 int mask, returnmask;
2540 struct uthread * ut;
2541
2542 #if DIAGNOSTIC
2543 if (signum == 0)
2544 panic("postsig");
2545 /*
2546 * This must be called on master cpu
2547 */
2548 if (cpu_number() != master_cpu)
2549 panic("psig not on master");
2550 #endif
2551
2552 signal_lock(p);
2553 /*
2554 * Try to grab the signal lock.
2555 */
2556 if (sig_try_locked(p) <= 0) {
2557 signal_unlock(p);
2558 return;
2559 }
2560
2561 ut = (struct uthread *)get_bsdthread_info(current_thread());
2562 mask = sigmask(signum);
2563 ut->uu_siglist &= ~mask;
2564 p->p_siglist &= ~mask;
2565 catcher = ps->ps_sigact[signum];
2566 #if KTRACE
2567 //LP64: catcher argument is a 64 bit user space handler address
2568 if (KTRPOINT(p, KTR_PSIG))
2569 ktrpsig(p->p_tracep,
2570 signum, CAST_DOWN(void *,catcher), ut->uu_flag & UT_SAS_OLDMASK ?
2571 &ut->uu_oldmask : &ut->uu_sigmask, 0);
2572 #endif
2573 if (catcher == SIG_DFL) {
2574 /*
2575 * Default catcher, where the default is to kill
2576 * the process. (Other cases were ignored above.)
2577 */
2578 /* called with signal_lock() held */
2579 sigexit_locked(p, signum);
2580 return;
2581 /* NOTREACHED */
2582 } else {
2583 /*
2584 * If we get here, the signal must be caught.
2585 */
2586 #if DIAGNOSTIC
2587 if (catcher == SIG_IGN || (ut->uu_sigmask & mask))
2588 log(LOG_WARNING,
2589 "postsig: processing masked or ignored signal\n");
2590 #endif
2591 /*
2592 * Set the new mask value and also defer further
2593 * occurences of this signal.
2594 *
2595 * Special case: user has done a sigpause. Here the
2596 * current mask is not of interest, but rather the
2597 * mask from before the sigpause is what we want
2598 * restored after the signal processing is completed.
2599 */
2600 if (ut->uu_flag & UT_SAS_OLDMASK) {
2601 returnmask = ut->uu_oldmask;
2602 ut->uu_flag &= ~UT_SAS_OLDMASK;
2603 ut->uu_oldmask = 0;
2604 } else
2605 returnmask = ut->uu_sigmask;
2606 ut->uu_sigmask |= ps->ps_catchmask[signum];
2607 if ((ps->ps_signodefer & mask) == 0)
2608 ut->uu_sigmask |= mask;
2609 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
2610 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE))
2611 p->p_sigignore |= mask;
2612 ps->ps_sigact[signum] = SIG_DFL;
2613 ps->ps_siginfo &= ~mask;
2614 ps->ps_signodefer &= ~mask;
2615 }
2616 #ifdef __ppc__
2617 /* Needs to disable to run in user mode */
2618 if (signum == SIGFPE) {
2619 thread_enable_fpe(current_thread(), 0);
2620 }
2621 #endif /* __ppc__ */
2622
2623 if (ps->ps_sig != signum) {
2624 code = 0;
2625 } else {
2626 code = ps->ps_code;
2627 ps->ps_code = 0;
2628 }
2629 p->p_stats->p_ru.ru_nsignals++;
2630 sendsig(p, catcher, signum, returnmask, code);
2631 }
2632 signal_unlock(p);
2633 }
2634
2635 /*
2636 * Force the current process to exit with the specified signal, dumping core
2637 * if appropriate. We bypass the normal tests for masked and caught signals,
2638 * allowing unrecoverable failures to terminate the process without changing
2639 * signal state. Mark the accounting record with the signal termination.
2640 * If dumping core, save the signal number for the debugger. Calls exit and
2641 * does not return.
2642 */
2643 /* called with signal lock */
2644 void
2645 sigexit_locked(p, signum)
2646 register struct proc *p;
2647 int signum;
2648 {
2649
2650 sig_lock_to_exit(p);
2651 p->p_acflag |= AXSIG;
2652 if (sigprop[signum] & SA_CORE) {
2653 p->p_sigacts->ps_sig = signum;
2654 signal_unlock(p);
2655 if (coredump(p) == 0)
2656 signum |= WCOREFLAG;
2657 } else
2658 signal_unlock(p);
2659
2660 exit1(p, W_EXITCODE(0, signum), (int *)NULL);
2661 /* NOTREACHED */
2662 }
2663
2664
2665 static int
2666 filt_sigattach(struct knote *kn)
2667 {
2668 struct proc *p = current_proc();
2669 boolean_t funnel_state;
2670
2671 kn->kn_ptr.p_proc = p;
2672 kn->kn_flags |= EV_CLEAR; /* automatically set */
2673
2674 /* Take the funnel to protect the proc while adding to the list */
2675 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2676 KNOTE_ATTACH(&p->p_klist, kn);
2677 thread_funnel_set(kernel_flock, funnel_state);
2678
2679 return (0);
2680 }
2681
2682 static void
2683 filt_sigdetach(struct knote *kn)
2684 {
2685 struct proc *p = kn->kn_ptr.p_proc;
2686 boolean_t funnel_state;
2687
2688 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2689 KNOTE_DETACH(&p->p_klist, kn);
2690 thread_funnel_set(kernel_flock, funnel_state);
2691 }
2692
2693 /*
2694 * signal knotes are shared with proc knotes, so we apply a mask to
2695 * the hint in order to differentiate them from process hints. This
2696 * could be avoided by using a signal-specific knote list, but probably
2697 * isn't worth the trouble.
2698 */
2699 static int
2700 filt_signal(struct knote *kn, long hint)
2701 {
2702
2703 if (hint & NOTE_SIGNAL) {
2704 hint &= ~NOTE_SIGNAL;
2705
2706 if (kn->kn_id == (unsigned int)hint)
2707 kn->kn_data++;
2708 }
2709 return (kn->kn_data != 0);
2710 }
2711
2712
2713 void
2714 bsd_ast(thread_t thr_act)
2715 {
2716 struct proc *p = current_proc();
2717 struct uthread *ut = get_bsdthread_info(thr_act);
2718 int signum;
2719 user_addr_t pc;
2720 boolean_t funnel_state;
2721 static int bsd_init_done = 0;
2722
2723 if (p == NULL)
2724 return;
2725
2726 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2727
2728 if ((p->p_flag & P_OWEUPC) && (p->p_flag & P_PROFIL)) {
2729 pc = get_useraddr();
2730 addupc_task(p, pc, 1);
2731 p->p_flag &= ~P_OWEUPC;
2732 }
2733
2734 if (CHECK_SIGNALS(p, current_thread(), ut)) {
2735 while ( (signum = issignal(p)) )
2736 postsig(signum);
2737 }
2738 if (!bsd_init_done) {
2739 bsd_init_done = 1;
2740 bsdinit_task();
2741 }
2742
2743 (void) thread_funnel_set(kernel_flock, FALSE);
2744 }
2745
2746 /*
2747 * Follwing routines are called using callout from bsd_hardclock
2748 * so that psignals are called in a thread context and are funneled
2749 */
2750 void
2751 psignal_vtalarm(struct proc *p)
2752 {
2753 boolean_t funnel_state;
2754
2755 if (p == NULL)
2756 return;
2757 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2758 psignal_lock(p, SIGVTALRM, 1);
2759 (void) thread_funnel_set(kernel_flock, FALSE);
2760 }
2761
2762 void
2763 psignal_xcpu(struct proc *p)
2764 {
2765 boolean_t funnel_state;
2766
2767 if (p == NULL)
2768 return;
2769 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2770 psignal_lock(p, SIGXCPU, 1);
2771 (void) thread_funnel_set(kernel_flock, FALSE);
2772 }
2773
2774 void
2775 psignal_sigprof(struct proc *p)
2776 {
2777 boolean_t funnel_state;
2778
2779 if (p == NULL)
2780 return;
2781 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2782 psignal_lock(p, SIGPROF, 1);
2783 (void) thread_funnel_set(kernel_flock, FALSE);
2784 }
2785
2786 /* ptrace set runnalbe */
2787 void
2788 pt_setrunnable(struct proc *p)
2789 {
2790 task_t task;
2791
2792 task = p->task;
2793
2794 if (p->p_flag & P_TRACED) {
2795 p->p_stat = SRUN;
2796 if (p->sigwait) {
2797 wakeup((caddr_t)&(p->sigwait));
2798 task_release(task);
2799 }
2800 }
2801 }
2802
2803
2804 kern_return_t
2805 do_bsdexception(
2806 int exc,
2807 int code,
2808 int sub)
2809 {
2810 exception_data_type_t codes[EXCEPTION_CODE_MAX];
2811
2812 codes[0] = code;
2813 codes[1] = sub;
2814 return(bsd_exception(exc, codes, 2));
2815 }
2816
2817 int
2818 proc_pendingsignals(struct proc *p, sigset_t mask)
2819 {
2820 struct uthread * uth;
2821 thread_t th;
2822 sigset_t bits = 0;
2823 int error;
2824
2825 /* If the process is in proc exit return no signal info */
2826 if (p->p_lflag & P_LPEXIT)
2827 return(0);
2828
2829 /* duplicate the signal lock code to enable recursion; as exit
2830 * holds the lock too long. All this code is being reworked
2831 * this is just a workaround for regressions till new code
2832 * arrives.
2833 */
2834 ppend_retry:
2835 error = lockmgr((struct lock__bsd__ *)&p->signal_lock[0], (LK_EXCLUSIVE | LK_CANRECURSE), 0, (struct proc *)0);
2836 if (error == EINTR)
2837 goto ppend_retry;
2838
2839 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
2840 th = p->p_vforkact;
2841 uth = (struct uthread *)get_bsdthread_info(th);
2842 if (uth) {
2843 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
2844 }
2845 goto out;
2846 }
2847
2848 bits = 0;
2849 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
2850 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
2851 }
2852 out:
2853 signal_unlock(p);
2854 return(bits);
2855 }
2856
2857 int
2858 thread_issignal(proc_t p, thread_t th, sigset_t mask)
2859 {
2860 struct uthread * uth;
2861 sigset_t bits=0;
2862
2863
2864 uth = (struct uthread *)get_bsdthread_info(th);
2865 if (uth) {
2866 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
2867 }
2868 return(bits);
2869 }
2870