]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sig.c
daa877ef96493af80d881ed51d64ac8148cd6437
[apple/xnu.git] / bsd / kern / kern_sig.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /* Copyright (c) 1995-1998 Apple Computer, Inc. All Rights Reserved */
31 /*
32 * Copyright (c) 1982, 1986, 1989, 1991, 1993
33 * The Regents of the University of California. All rights reserved.
34 * (c) UNIX System Laboratories, Inc.
35 * All or some portions of this file are derived from material licensed
36 * to the University of California by American Telephone and Telegraph
37 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
38 * the permission of UNIX System Laboratories, Inc.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
69 */
70
71 #define SIGPROP /* include signal properties table */
72 #include <sys/param.h>
73 #include <sys/resourcevar.h>
74 #include <sys/proc_internal.h>
75 #include <sys/kauth.h>
76 #include <sys/systm.h>
77 #include <sys/timeb.h>
78 #include <sys/times.h>
79 #include <sys/acct.h>
80 #include <sys/file_internal.h>
81 #include <sys/kernel.h>
82 #include <sys/wait.h>
83 #include <sys/signalvar.h>
84 #if KTRACE
85 #include <sys/ktrace.h>
86 #endif
87 #include <sys/syslog.h>
88 #include <sys/stat.h>
89 #include <sys/lock.h>
90 #include <sys/kdebug.h>
91
92 #include <sys/mount.h>
93 #include <sys/sysproto.h>
94
95 #include <bsm/audit_kernel.h>
96
97 #include <machine/spl.h>
98
99 #include <kern/cpu_number.h>
100
101 #include <sys/vm.h>
102 #include <sys/user.h> /* for coredump */
103 #include <kern/ast.h> /* for APC support */
104 #include <kern/lock.h>
105 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
106 #include <kern/thread.h>
107 #include <kern/sched_prim.h>
108 #include <kern/thread_call.h>
109 #include <mach/exception.h>
110 #include <mach/task.h>
111 #include <mach/thread_act.h>
112
113 /*
114 * Missing prototypes that Mach should export
115 *
116 * +++
117 */
118 extern int thread_enable_fpe(thread_t act, int onoff);
119 extern void unix_syscall_return(int error);
120 extern thread_t port_name_to_thread(mach_port_name_t port_name);
121 extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast);
122 extern kern_return_t get_signalact(task_t , thread_t *, int);
123 extern boolean_t thread_should_abort(thread_t);
124 extern unsigned int get_useraddr(void);
125
126 /*
127 * ---
128 */
129
130 extern void doexception(int exc, int code, int sub);
131
132 void stop(struct proc *p);
133 int cansignal(struct proc *, kauth_cred_t, struct proc *, int);
134 int killpg1(struct proc *, int, int, int);
135 void sigexit_locked(struct proc *, int);
136 int setsigvec(struct proc *, int, struct __user_sigaction *);
137 void exit1(struct proc *, int, int *);
138 void psignal_uthread(thread_t, int);
139 kern_return_t do_bsdexception(int, int, int);
140 void __posix_sem_syscall_return(kern_return_t);
141
142 /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */
143 kern_return_t semaphore_timedwait_signal_trap_internal(void *, void *,time_t, int32_t, void (*)(int));
144 kern_return_t semaphore_timedwait_trap_internal(void *, time_t, int32_t, void (*)(int));
145 kern_return_t semaphore_wait_signal_trap_internal(void *, void *, void (*)(int));
146 kern_return_t semaphore_wait_trap_internal(void *, void (*)(int));
147
148 static int filt_sigattach(struct knote *kn);
149 static void filt_sigdetach(struct knote *kn);
150 static int filt_signal(struct knote *kn, long hint);
151
152 struct filterops sig_filtops =
153 { 0, filt_sigattach, filt_sigdetach, filt_signal };
154
155
156 /*
157 * NOTE: Source and target may *NOT* overlap! (target is smaller)
158 */
159 static void
160 sigaltstack_64to32(struct user_sigaltstack *in, struct sigaltstack *out)
161 {
162 out->ss_sp = CAST_DOWN(void *,in->ss_sp);
163 out->ss_size = in->ss_size;
164 out->ss_flags = in->ss_flags;
165 }
166
167 /*
168 * NOTE: Source and target may are permitted to overlap! (source is smaller);
169 * this works because we copy fields in order from the end of the struct to
170 * the beginning.
171 */
172 static void
173 sigaltstack_32to64(struct sigaltstack *in, struct user_sigaltstack *out)
174 {
175 out->ss_flags = in->ss_flags;
176 out->ss_size = in->ss_size;
177 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
178 }
179
180 static void
181 sigaction_64to32(struct user_sigaction *in, struct sigaction *out)
182 {
183 /* This assumes 32 bit __sa_handler is of type sig_t */
184 out->__sigaction_u.__sa_handler = CAST_DOWN(sig_t,in->__sigaction_u.__sa_handler);
185 out->sa_mask = in->sa_mask;
186 out->sa_flags = in->sa_flags;
187 }
188
189 static void
190 __sigaction_32to64(struct __sigaction *in, struct __user_sigaction *out)
191 {
192 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
193 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
194 out->sa_mask = in->sa_mask;
195 out->sa_flags = in->sa_flags;
196 }
197
198
199 #if SIGNAL_DEBUG
200 void ram_printf(int);
201 int ram_debug=0;
202 unsigned int rdebug_proc=0;
203 void
204 ram_printf(int x)
205 {
206 printf("x is %d",x);
207
208 }
209 #endif /* SIGNAL_DEBUG */
210
211 int
212 signal_lock(struct proc *p)
213 {
214 int error = 0;
215 #if DIAGNOSTIC
216 #if SIGNAL_DEBUG
217 #ifdef __ppc__
218 {
219 int register sp, *fp, numsaved;
220
221 __asm__ volatile("mr %0,r1" : "=r" (sp));
222
223 fp = (int *)*((int *)sp);
224 for (numsaved = 0; numsaved < 3; numsaved++) {
225 p->lockpc[numsaved] = fp[2];
226 if ((int)fp <= 0)
227 break;
228 fp = (int *)*fp;
229 }
230 }
231 #endif /* __ppc__ */
232 #endif /* SIGNAL_DEBUG */
233 #endif /* DIAGNOSTIC */
234
235 siglock_retry:
236 error = lockmgr((struct lock__bsd__ *)&p->signal_lock[0], LK_EXCLUSIVE, 0, (struct proc *)0);
237 if (error == EINTR)
238 goto siglock_retry;
239 return(error);
240 }
241
242 int
243 signal_unlock(struct proc *p)
244 {
245 #if DIAGNOSTIC
246 #if SIGNAL_DEBUG
247 #ifdef __ppc__
248 {
249 int register sp, *fp, numsaved;
250
251 __asm__ volatile("mr %0,r1" : "=r" (sp));
252
253 fp = (int *)*((int *)sp);
254 for (numsaved = 0; numsaved < 3; numsaved++) {
255 p->unlockpc[numsaved] = fp[2];
256 if ((int)fp <= 0)
257 break;
258 fp = (int *)*fp;
259 }
260 }
261 #endif /* __ppc__ */
262 #endif /* SIGNAL_DEBUG */
263 #endif /* DIAGNOSTIC */
264
265 /* TBD: check p last arg */
266 return(lockmgr((struct lock__bsd__ *)&p->signal_lock[0], LK_RELEASE, (simple_lock_t)0, (struct proc *)0));
267 }
268
269 void
270 signal_setast(sig_actthread)
271 thread_t sig_actthread;
272 {
273 act_set_astbsd(sig_actthread);
274 }
275
276 /*
277 * Can process p, with ucred uc, send the signal signum to process q?
278 */
279 int
280 cansignal(p, uc, q, signum)
281 struct proc *p;
282 kauth_cred_t uc;
283 struct proc *q;
284 int signum;
285 {
286 /* you can signal yourself */
287 if (p == q)
288 return(1);
289
290 if (!suser(uc, NULL))
291 return (1); /* root can always signal */
292
293 if (signum == SIGCONT && q->p_session == p->p_session)
294 return (1); /* SIGCONT in session */
295
296 /*
297 * Using kill(), only certain signals can be sent to setugid
298 * child processes
299 */
300 if (q->p_flag & P_SUGID) {
301 switch (signum) {
302 case 0:
303 case SIGKILL:
304 case SIGINT:
305 case SIGTERM:
306 case SIGSTOP:
307 case SIGTTIN:
308 case SIGTTOU:
309 case SIGTSTP:
310 case SIGHUP:
311 case SIGUSR1:
312 case SIGUSR2:
313 if (uc->cr_ruid == q->p_ucred->cr_ruid ||
314 kauth_cred_getuid(uc) == q->p_ucred->cr_ruid ||
315 uc->cr_ruid == kauth_cred_getuid(q->p_ucred) ||
316 kauth_cred_getuid(uc) == kauth_cred_getuid(q->p_ucred))
317 return (1);
318 }
319 return (0);
320 }
321
322 /* XXX
323 * because the P_SUGID test exists, this has extra tests which
324 * could be removed.
325 */
326 if (uc->cr_ruid == q->p_ucred->cr_ruid ||
327 uc->cr_ruid == q->p_ucred->cr_svuid ||
328 kauth_cred_getuid(uc) == q->p_ucred->cr_ruid ||
329 kauth_cred_getuid(uc) == q->p_ucred->cr_svuid ||
330 uc->cr_ruid == kauth_cred_getuid(q->p_ucred) ||
331 kauth_cred_getuid(uc) == kauth_cred_getuid(q->p_ucred))
332 return (1);
333 return (0);
334 }
335
336
337 /* ARGSUSED */
338 int
339 sigaction(struct proc *p, register struct sigaction_args *uap, __unused register_t *retval)
340 {
341 struct user_sigaction vec;
342 struct __user_sigaction __vec;
343
344 struct user_sigaction *sa = &vec;
345 register struct sigacts *ps = p->p_sigacts;
346
347 register int signum;
348 int bit, error=0;
349
350 signum = uap->signum;
351 if (signum <= 0 || signum >= NSIG ||
352 signum == SIGKILL || signum == SIGSTOP)
353 return (EINVAL);
354
355 if (uap->osa) {
356 sa->sa_handler = ps->ps_sigact[signum];
357 sa->sa_mask = ps->ps_catchmask[signum];
358 bit = sigmask(signum);
359 sa->sa_flags = 0;
360 if ((ps->ps_sigonstack & bit) != 0)
361 sa->sa_flags |= SA_ONSTACK;
362 if ((ps->ps_sigintr & bit) == 0)
363 sa->sa_flags |= SA_RESTART;
364 if (ps->ps_siginfo & bit)
365 sa->sa_flags |= SA_SIGINFO;
366 if (ps->ps_signodefer & bit)
367 sa->sa_flags |= SA_NODEFER;
368 if (ps->ps_64regset & bit)
369 sa->sa_flags |= SA_64REGSET;
370 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP))
371 sa->sa_flags |= SA_NOCLDSTOP;
372 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT))
373 sa->sa_flags |= SA_NOCLDWAIT;
374
375 if (IS_64BIT_PROCESS(p)) {
376 error = copyout(sa, uap->osa, sizeof(struct user_sigaction));
377 } else {
378 struct sigaction vec32;
379 sigaction_64to32(sa, &vec32);
380 error = copyout(&vec32, uap->osa, sizeof(struct sigaction));
381 }
382 if (error)
383 return (error);
384 }
385 if (uap->nsa) {
386 if (IS_64BIT_PROCESS(p)) {
387 error = copyin(uap->nsa, &__vec, sizeof(struct __user_sigaction));
388 } else {
389 struct __sigaction __vec32;
390 error = copyin(uap->nsa, &__vec32, sizeof(struct __sigaction));
391 __sigaction_32to64(&__vec32, &__vec);
392 }
393 if (error)
394 return (error);
395 error = setsigvec(p, signum, &__vec);
396 }
397 return (error);
398 }
399
400 /* Routines to manipulate bits on all threads */
401 int
402 clear_procsiglist(struct proc *p, int bit)
403 {
404 struct uthread * uth;
405 thread_t thact;
406
407 signal_lock(p);
408
409 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
410 thact = p->p_vforkact;
411 uth = (struct uthread *)get_bsdthread_info(thact);
412 if (uth) {
413 uth->uu_siglist &= ~bit;
414 }
415 p->p_siglist &= ~bit;
416 signal_unlock(p);
417 return(0);
418 }
419
420 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
421 uth->uu_siglist &= ~bit;
422 }
423 p->p_siglist &= ~bit;
424 signal_unlock(p);
425 return(0);
426 }
427
428
429 static int
430 unblock_procsigmask(struct proc *p, int bit)
431 {
432 struct uthread * uth;
433 thread_t thact;
434
435 signal_lock(p);
436 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
437 thact = p->p_vforkact;
438 uth = (struct uthread *)get_bsdthread_info(thact);
439 if (uth) {
440 uth->uu_sigmask &= ~bit;
441 }
442 p->p_sigmask &= ~bit;
443 signal_unlock(p);
444 return(0);
445 }
446 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
447 uth->uu_sigmask &= ~bit;
448 }
449 p->p_sigmask &= ~bit;
450 signal_unlock(p);
451 return(0);
452 }
453
454
455 static int
456 block_procsigmask(struct proc *p, int bit)
457 {
458 struct uthread * uth;
459 thread_t thact;
460
461 signal_lock(p);
462 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
463 thact = p->p_vforkact;
464 uth = (struct uthread *)get_bsdthread_info(thact);
465 if (uth) {
466 uth->uu_sigmask |= bit;
467 }
468 p->p_sigmask |= bit;
469 signal_unlock(p);
470 return(0);
471 }
472 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
473 uth->uu_sigmask |= bit;
474 }
475 p->p_sigmask |= bit;
476 signal_unlock(p);
477 return(0);
478 }
479
480 int
481 set_procsigmask(struct proc *p, int bit)
482 {
483 struct uthread * uth;
484 thread_t thact;
485
486 signal_lock(p);
487 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
488 thact = p->p_vforkact;
489 uth = (struct uthread *)get_bsdthread_info(thact);
490 if (uth) {
491 uth->uu_sigmask = bit;
492 }
493 p->p_sigmask = bit;
494 signal_unlock(p);
495 return(0);
496 }
497 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
498 uth->uu_sigmask = bit;
499 }
500 p->p_sigmask = bit;
501 signal_unlock(p);
502 return(0);
503 }
504
505 /* XXX should be static? */
506 int
507 setsigvec(struct proc *p, int signum, struct __user_sigaction *sa)
508 {
509 register struct sigacts *ps = p->p_sigacts;
510 register int bit;
511
512 if ((signum == SIGKILL || signum == SIGSTOP) &&
513 sa->sa_handler != SIG_DFL)
514 return(EINVAL);
515 bit = sigmask(signum);
516 /*
517 * Change setting atomically.
518 */
519 ps->ps_sigact[signum] = sa->sa_handler;
520 ps->ps_trampact[signum] = sa->sa_tramp;
521 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
522 if (sa->sa_flags & SA_SIGINFO)
523 ps->ps_siginfo |= bit;
524 else
525 ps->ps_siginfo &= ~bit;
526 if (sa->sa_flags & SA_64REGSET)
527 ps->ps_64regset |= bit;
528 else
529 ps->ps_64regset &= ~bit;
530 if ((sa->sa_flags & SA_RESTART) == 0)
531 ps->ps_sigintr |= bit;
532 else
533 ps->ps_sigintr &= ~bit;
534 if (sa->sa_flags & SA_ONSTACK)
535 ps->ps_sigonstack |= bit;
536 else
537 ps->ps_sigonstack &= ~bit;
538 if (sa->sa_flags & SA_USERTRAMP)
539 ps->ps_usertramp |= bit;
540 else
541 ps->ps_usertramp &= ~bit;
542 if (sa->sa_flags & SA_RESETHAND)
543 ps->ps_sigreset |= bit;
544 else
545 ps->ps_sigreset &= ~bit;
546 if (sa->sa_flags & SA_NODEFER)
547 ps->ps_signodefer |= bit;
548 else
549 ps->ps_signodefer &= ~bit;
550 if (signum == SIGCHLD) {
551 if (sa->sa_flags & SA_NOCLDSTOP)
552 p->p_flag |= P_NOCLDSTOP;
553 else
554 p->p_flag &= ~P_NOCLDSTOP;
555 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN))
556 p->p_flag |= P_NOCLDWAIT;
557 else
558 p->p_flag &= ~P_NOCLDWAIT;
559 }
560
561 #ifdef __ppc__
562 if (signum == SIGFPE) {
563 if (sa->sa_handler == SIG_DFL || sa->sa_handler == SIG_IGN)
564 thread_enable_fpe(current_thread(), 0);
565 else
566 thread_enable_fpe(current_thread(), 1);
567 }
568 #endif /* __ppc__ */
569 /*
570 * Set bit in p_sigignore for signals that are set to SIG_IGN,
571 * and for signals set to SIG_DFL where the default is to ignore.
572 * However, don't put SIGCONT in p_sigignore,
573 * as we have to restart the process.
574 */
575 if (sa->sa_handler == SIG_IGN ||
576 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
577
578 clear_procsiglist(p, bit);
579 if (signum != SIGCONT)
580 p->p_sigignore |= bit; /* easier in psignal */
581 p->p_sigcatch &= ~bit;
582 } else {
583 p->p_sigignore &= ~bit;
584 if (sa->sa_handler == SIG_DFL)
585 p->p_sigcatch &= ~bit;
586 else
587 p->p_sigcatch |= bit;
588 }
589 return(0);
590 }
591
592 /*
593 * Initialize signal state for process 0;
594 * set to ignore signals that are ignored by default.
595 */
596 void
597 siginit(p)
598 struct proc *p;
599 {
600 register int i;
601
602 for (i = 0; i < NSIG; i++)
603 if (sigprop[i] & SA_IGNORE && i != SIGCONT)
604 p->p_sigignore |= sigmask(i);
605 }
606
607 /*
608 * Reset signals for an exec of the specified process.
609 */
610 void
611 execsigs(p, thr_act)
612 register struct proc *p;
613 register thread_t thr_act;
614 {
615 register struct sigacts *ps = p->p_sigacts;
616 register int nc, mask;
617 struct uthread *ut = (struct uthread *)0;
618
619 if (thr_act){
620 ut = (struct uthread *)get_bsdthread_info(thr_act);
621 }
622 /*
623 * Reset caught signals. Held signals remain held
624 * through p_sigmask (unless they were caught,
625 * and are now ignored by default).
626 */
627 while (p->p_sigcatch) {
628 nc = ffs((long)p->p_sigcatch);
629 mask = sigmask(nc);
630 p->p_sigcatch &= ~mask;
631 if (sigprop[nc] & SA_IGNORE) {
632 if (nc != SIGCONT)
633 p->p_sigignore |= mask;
634 if (thr_act){
635 ut->uu_siglist &= ~mask;
636 p->p_siglist &= ~mask;
637 } else
638 clear_procsiglist(p, mask);
639 }
640 ps->ps_sigact[nc] = SIG_DFL;
641 }
642 /*
643 * Reset stack state to the user stack.
644 * Clear set of signals caught on the signal stack.
645 */
646 ps->ps_sigstk.ss_flags = SA_DISABLE;
647 ps->ps_sigstk.ss_size = 0;
648 ps->ps_sigstk.ss_sp = USER_ADDR_NULL;
649 ps->ps_flags = 0;
650 if (thr_act) {
651 ut->uu_sigstk.ss_flags = SA_DISABLE;
652 ut->uu_sigstk.ss_size = 0;
653 ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
654 ut->uu_flag &= ~UT_ALTSTACK;
655 }
656 ps->ps_sigonstack = 0;
657 }
658
659 /*
660 * Manipulate signal mask.
661 * Note that we receive new mask, not pointer,
662 * and return old mask as return value;
663 * the library stub does the rest.
664 */
665 int
666 sigprocmask(register struct proc *p, struct sigprocmask_args *uap, __unused register_t *retval)
667 {
668 int error = 0;
669 sigset_t oldmask, nmask;
670 user_addr_t omask = uap->omask;
671 struct uthread *ut;
672
673 ut = (struct uthread *)get_bsdthread_info(current_thread());
674 oldmask = ut->uu_sigmask;
675
676 if (uap->mask == USER_ADDR_NULL) {
677 /* just want old mask */
678 goto out;
679 }
680 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
681 if (error)
682 goto out;
683
684 switch (uap->how) {
685 case SIG_BLOCK:
686 block_procsigmask(p, (nmask & ~sigcantmask));
687 signal_setast(current_thread());
688 break;
689
690 case SIG_UNBLOCK:
691 unblock_procsigmask(p, (nmask & ~sigcantmask));
692 signal_setast(current_thread());
693 break;
694
695 case SIG_SETMASK:
696 set_procsigmask(p, (nmask & ~sigcantmask));
697 signal_setast(current_thread());
698 break;
699
700 default:
701 error = EINVAL;
702 break;
703 }
704 out:
705 if (!error && omask != USER_ADDR_NULL)
706 copyout(&oldmask, omask, sizeof(sigset_t));
707 return (error);
708 }
709
710 int
711 sigpending(__unused struct proc *p, register struct sigpending_args *uap, __unused register_t *retval)
712 {
713 struct uthread *ut;
714 sigset_t pendlist;
715
716 ut = (struct uthread *)get_bsdthread_info(current_thread());
717 pendlist = ut->uu_siglist;
718
719 if (uap->osv)
720 copyout(&pendlist, uap->osv, sizeof(sigset_t));
721 return(0);
722 }
723
724
725 /*
726 * Suspend process until signal, providing mask to be set
727 * in the meantime. Note nonstandard calling convention:
728 * libc stub passes mask, not pointer, to save a copyin.
729 */
730
731 static int
732 sigcontinue(__unused int error)
733 {
734 // struct uthread *ut = get_bsdthread_info(current_thread());
735 unix_syscall_return(EINTR);
736 }
737
738 int
739 sigsuspend(register struct proc *p, struct sigsuspend_args *uap, __unused register_t *retval)
740 {
741 struct uthread *ut;
742
743 ut = (struct uthread *)get_bsdthread_info(current_thread());
744
745 /*
746 * When returning from sigpause, we want
747 * the old mask to be restored after the
748 * signal handler has finished. Thus, we
749 * save it here and mark the sigacts structure
750 * to indicate this.
751 */
752 ut->uu_oldmask = ut->uu_sigmask;
753 ut->uu_flag |= UT_SAS_OLDMASK;
754 ut->uu_sigmask = (uap->mask & ~sigcantmask);
755 (void) tsleep0((caddr_t) p, PPAUSE|PCATCH, "pause", 0, sigcontinue);
756 /* always return EINTR rather than ERESTART... */
757 return (EINTR);
758 }
759
760
761 int
762 __disable_threadsignal(struct proc *p,
763 __unused register struct __disable_threadsignal_args *uap,
764 __unused register_t *retval)
765 {
766 struct uthread *uth;
767
768 uth = (struct uthread *)get_bsdthread_info(current_thread());
769
770 /* No longer valid to have any signal delivered */
771 signal_lock(p);
772 uth->uu_flag |= UT_NO_SIGMASK;
773 signal_unlock(p);
774
775 return(0);
776
777 }
778
779
780 int
781 __pthread_markcancel(p, uap, retval)
782 struct proc *p;
783 register struct __pthread_markcancel_args *uap;
784 register_t *retval;
785 {
786 thread_act_t target_act;
787 int error = 0;
788 struct uthread *uth;
789
790 target_act = (thread_act_t)port_name_to_thread(uap->thread_port);
791
792 if (target_act == THR_ACT_NULL)
793 return (ESRCH);
794
795 uth = (struct uthread *)get_bsdthread_info(target_act);
796
797 /* if the thread is in vfork do not cancel */
798 if ((uth->uu_flag & (P_VFORK | UT_CANCEL | UT_CANCELED )) == 0) {
799 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
800 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
801 && ((uth->uu_flag & UT_CANCELDISABLE) == 0))
802 thread_abort_safely(target_act);
803 }
804
805 thread_deallocate(target_act);
806 return (error);
807 }
808
809 /* if action =0 ; return the cancellation state ,
810 * if marked for cancellation, make the thread canceled
811 * if action = 1 ; Enable the cancel handling
812 * if action = 2; Disable the cancel handling
813 */
814 int
815 __pthread_canceled(p, uap, retval)
816 struct proc *p;
817 register struct __pthread_canceled_args *uap;
818 register_t *retval;
819 {
820 thread_act_t thr_act;
821 struct uthread *uth;
822 int action = uap->action;
823
824 thr_act = current_act();
825 uth = (struct uthread *)get_bsdthread_info(thr_act);
826
827 switch (action) {
828 case 1:
829 uth->uu_flag &= ~UT_CANCELDISABLE;
830 return(0);
831 case 2:
832 uth->uu_flag |= UT_CANCELDISABLE;
833 return(0);
834 case 0:
835 default:
836 /* if the thread is in vfork do not cancel */
837 if((uth->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
838 uth->uu_flag &= ~UT_CANCEL;
839 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
840 return(0);
841 }
842 return(EINVAL);
843 }
844 return(EINVAL);
845 }
846
847 void
848 __posix_sem_syscall_return(kern_return_t kern_result)
849 {
850 int error = 0;
851
852 if (kern_result == KERN_SUCCESS)
853 error = 0;
854 else if (kern_result == KERN_ABORTED)
855 error = EINTR;
856 else if (kern_result == KERN_OPERATION_TIMED_OUT)
857 error = ETIMEDOUT;
858 else
859 error = EINVAL;
860 unix_syscall_return(error);
861 /* does not return */
862 }
863
864
865 int
866 __semwait_signal(p, uap, retval)
867 struct proc *p;
868 register struct __semwait_signal_args *uap;
869 register_t *retval;
870 {
871
872 kern_return_t kern_result;
873 mach_timespec_t then;
874 struct timespec now;
875
876 if(uap->timeout) {
877
878 if (uap->relative) {
879 then.tv_sec = uap->tv_sec;
880 then.tv_nsec = uap->tv_nsec;
881 } else {
882 nanotime(&now);
883 then.tv_sec = uap->tv_sec - now.tv_sec;
884 then.tv_nsec = uap->tv_nsec - now.tv_nsec;
885 if (then.tv_nsec < 0) {
886 then.tv_nsec += NSEC_PER_SEC;
887 then.tv_sec--;
888 }
889 }
890
891 if (uap->mutex_sem == (void *)NULL)
892 kern_result = semaphore_timedwait_trap_internal(uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
893 else
894 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
895
896 } else {
897
898 if (uap->mutex_sem == (void *)NULL)
899 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
900 else
901
902 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
903 }
904
905 out:
906 if (kern_result == KERN_SUCCESS)
907 return(0);
908 else if (kern_result == KERN_ABORTED)
909 return(EINTR);
910 else if (kern_result == KERN_OPERATION_TIMED_OUT)
911 return(ETIMEDOUT);
912 else
913 return(EINVAL);
914 }
915
916
917 int
918 __pthread_kill(__unused struct proc *p,
919 register struct __pthread_kill_args *uap,
920 __unused register_t *retval)
921 {
922 thread_t target_act;
923 int error = 0;
924 int signum = uap->sig;
925 struct uthread *uth;
926
927 target_act = (thread_t)port_name_to_thread(uap->thread_port);
928
929 if (target_act == THREAD_NULL)
930 return (ESRCH);
931 if ((u_int)signum >= NSIG) {
932 error = EINVAL;
933 goto out;
934 }
935
936 uth = (struct uthread *)get_bsdthread_info(target_act);
937
938 if (uth->uu_flag & UT_NO_SIGMASK) {
939 error = ESRCH;
940 goto out;
941 }
942
943 if (signum)
944 psignal_uthread(target_act, signum);
945 out:
946 thread_deallocate(target_act);
947 return (error);
948 }
949
950
951 int
952 pthread_sigmask(__unused register struct proc *p,
953 register struct pthread_sigmask_args *uap,
954 __unused register_t *retval)
955 {
956 user_addr_t set = uap->set;
957 user_addr_t oset = uap->oset;
958 sigset_t nset;
959 int error = 0;
960 struct uthread *ut;
961 sigset_t oldset;
962
963 ut = (struct uthread *)get_bsdthread_info(current_thread());
964 oldset = ut->uu_sigmask;
965
966 if (set == USER_ADDR_NULL) {
967 /* need only old mask */
968 goto out;
969 }
970
971 error = copyin(set, &nset, sizeof(sigset_t));
972 if (error)
973 goto out;
974
975 switch (uap->how) {
976 case SIG_BLOCK:
977 ut->uu_sigmask |= (nset & ~sigcantmask);
978 break;
979
980 case SIG_UNBLOCK:
981 ut->uu_sigmask &= ~(nset);
982 signal_setast(current_thread());
983 break;
984
985 case SIG_SETMASK:
986 ut->uu_sigmask = (nset & ~sigcantmask);
987 signal_setast(current_thread());
988 break;
989
990 default:
991 error = EINVAL;
992
993 }
994 out:
995 if (!error && oset != USER_ADDR_NULL)
996 copyout(&oldset, oset, sizeof(sigset_t));
997
998 return(error);
999 }
1000
1001
1002 int
1003 sigwait(register struct proc *p, register struct sigwait_args *uap, __unused register_t *retval)
1004 {
1005 struct uthread *ut;
1006 struct uthread *uth;
1007 int error = 0;
1008 sigset_t mask;
1009 sigset_t siglist;
1010 sigset_t sigw=0;
1011 int signum;
1012
1013 ut = (struct uthread *)get_bsdthread_info(current_thread());
1014
1015 if (uap->set == USER_ADDR_NULL)
1016 return(EINVAL);
1017
1018 error = copyin(uap->set, &mask, sizeof(sigset_t));
1019 if (error)
1020 return(error);
1021
1022 siglist = (mask & ~sigcantmask);
1023
1024 if (siglist == 0)
1025 return(EINVAL);
1026
1027 signal_lock(p);
1028 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
1029 signal_unlock(p);
1030 return(EINVAL);
1031 } else {
1032 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1033 if ( (sigw = uth->uu_siglist & siglist) ) {
1034 break;
1035 }
1036 }
1037 }
1038 signal_unlock(p);
1039 if (sigw) {
1040 /* The signal was pending on a thread */
1041 goto sigwait1;
1042 }
1043 /*
1044 * When returning from sigwait, we want
1045 * the old mask to be restored after the
1046 * signal handler has finished. Thus, we
1047 * save it here and mark the sigacts structure
1048 * to indicate this.
1049 */
1050 ut->uu_oldmask = ut->uu_sigmask;
1051 ut->uu_flag |= UT_SAS_OLDMASK;
1052 if (siglist == (sigset_t)0)
1053 return(EINVAL);
1054 /* SIGKILL and SIGSTOP are not maskable as well */
1055 ut->uu_sigmask = ~(siglist|sigcantmask);
1056 ut->uu_sigwait = siglist;
1057 /* No Continuations for now */
1058 error = tsleep((caddr_t)&ut->uu_sigwait, PPAUSE|PCATCH, "pause", 0);
1059
1060 if ((error == EINTR) || (error == ERESTART))
1061 error = 0;
1062
1063 sigw = (ut->uu_sigwait & siglist);
1064 ut->uu_sigmask = ut->uu_oldmask;
1065 ut->uu_oldmask = 0;
1066 ut->uu_flag &= ~UT_SAS_OLDMASK;
1067 sigwait1:
1068 ut->uu_sigwait = 0;
1069 if (!error) {
1070 signum = ffs((unsigned int)sigw);
1071 if (!signum)
1072 panic("sigwait with no signal wakeup");
1073 ut->uu_siglist &= ~(sigmask(signum));
1074 if (uap->sig != USER_ADDR_NULL)
1075 error = copyout(&signum, uap->sig, sizeof(int));
1076 }
1077
1078 return(error);
1079
1080 }
1081
1082
1083 int
1084 sigaltstack(struct proc *p, register struct sigaltstack_args *uap, __unused register_t *retval)
1085 {
1086 struct sigacts *psp;
1087 struct user_sigaltstack *pstk;
1088 struct user_sigaltstack ss;
1089 struct uthread *uth;
1090 int uthsigaltstack = 0;
1091 int error;
1092
1093 uth = (struct uthread *)get_bsdthread_info(current_thread());
1094 uthsigaltstack = p->p_lflag & P_LTHSIGSTACK;
1095
1096 psp = p->p_sigacts;
1097 if (uthsigaltstack != 0) {
1098 pstk = &uth->uu_sigstk;
1099 if ((uth->uu_flag & UT_ALTSTACK) == 0)
1100 uth->uu_sigstk.ss_flags |= SA_DISABLE;
1101 } else {
1102 pstk = &psp->ps_sigstk;
1103 if ((psp->ps_flags & SAS_ALTSTACK) == 0)
1104 psp->ps_sigstk.ss_flags |= SA_DISABLE;
1105 }
1106 if (uap->oss) {
1107 if (IS_64BIT_PROCESS(p)) {
1108 error = copyout(pstk, uap->oss, sizeof(struct user_sigaltstack));
1109 } else {
1110 struct sigaltstack ss32;
1111 sigaltstack_64to32(pstk, &ss32);
1112 error = copyout(&ss32, uap->oss, sizeof(struct sigaltstack));
1113 }
1114 if (error)
1115 return (error);
1116 }
1117 if (uap->nss == USER_ADDR_NULL)
1118 return (0);
1119 if (IS_64BIT_PROCESS(p)) {
1120 error = copyin(uap->nss, &ss, sizeof(struct user_sigaltstack));
1121 } else {
1122 struct sigaltstack ss32;
1123 error = copyin(uap->nss, &ss32, sizeof(struct sigaltstack));
1124 sigaltstack_32to64(&ss32,&ss);
1125 }
1126 if (error)
1127 return (error);
1128 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1129 return(EINVAL);
1130 }
1131
1132 if (ss.ss_flags & SA_DISABLE) {
1133 if (uthsigaltstack != 0) {
1134 /* if we are here we are not in the signal handler ;so no need to check */
1135 if (uth->uu_sigstk.ss_flags & SA_ONSTACK)
1136 return (EINVAL);
1137 uth->uu_flag &= ~UT_ALTSTACK;
1138 uth->uu_sigstk.ss_flags = ss.ss_flags;
1139 } else {
1140 if (psp->ps_sigstk.ss_flags & SA_ONSTACK)
1141 return (EINVAL);
1142 psp->ps_flags &= ~SAS_ALTSTACK;
1143 psp->ps_sigstk.ss_flags = ss.ss_flags;
1144 }
1145
1146 return (0);
1147 }
1148 /* The older stacksize was 8K, enforce that one so no compat problems */
1149 #define OLDMINSIGSTKSZ 8*1024
1150 if (ss.ss_size < OLDMINSIGSTKSZ)
1151 return (ENOMEM);
1152 if (uthsigaltstack != 0) {
1153 uth->uu_flag |= UT_ALTSTACK;
1154 uth->uu_sigstk= ss;
1155 } else {
1156 psp->ps_flags |= SAS_ALTSTACK;
1157 psp->ps_sigstk= ss;
1158 }
1159 return (0);
1160 }
1161
1162 int
1163 kill(struct proc *cp, struct kill_args *uap, __unused register_t *retval)
1164 {
1165 register struct proc *p;
1166 kauth_cred_t uc = kauth_cred_get();
1167
1168 AUDIT_ARG(pid, uap->pid);
1169 AUDIT_ARG(signum, uap->signum);
1170
1171 if ((u_int)uap->signum >= NSIG)
1172 return (EINVAL);
1173 if (uap->pid > 0) {
1174 /* kill single process */
1175 if ((p = proc_findref(uap->pid)) == NULL) {
1176 if ((p = pzfind(uap->pid)) != NULL) {
1177 /*
1178 * IEEE Std 1003.1-2001: return success
1179 * when killing a zombie.
1180 */
1181 return (0);
1182 }
1183 return (ESRCH);
1184 }
1185 AUDIT_ARG(process, p);
1186 if (!cansignal(cp, uc, p, uap->signum)) {
1187 proc_dropref(p);
1188 return(EPERM);
1189 }
1190 if (uap->signum)
1191 psignal(p, uap->signum);
1192 proc_dropref(p);
1193 return (0);
1194 }
1195 switch (uap->pid) {
1196 case -1: /* broadcast signal */
1197 return (killpg1(cp, uap->signum, 0, 1));
1198 case 0: /* signal own process group */
1199 return (killpg1(cp, uap->signum, 0, 0));
1200 default: /* negative explicit process group */
1201 return (killpg1(cp, uap->signum, -(uap->pid), 0));
1202 }
1203 /* NOTREACHED */
1204 }
1205
1206
1207 /*
1208 * Common code for kill process group/broadcast kill.
1209 * cp is calling process.
1210 */
1211 int
1212 killpg1(cp, signum, pgid, all)
1213 register struct proc *cp;
1214 int signum, pgid, all;
1215 {
1216 register struct proc *p;
1217 kauth_cred_t uc = cp->p_ucred;
1218 struct pgrp *pgrp;
1219 int nfound = 0;
1220
1221 if (all) {
1222 /*
1223 * broadcast
1224 */
1225 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1226 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1227 p == cp || !cansignal(cp, uc, p, signum))
1228 continue;
1229 nfound++;
1230 if (signum)
1231 psignal(p, signum);
1232 }
1233 } else {
1234 if (pgid == 0)
1235 /*
1236 * zero pgid means send to my process group.
1237 */
1238 pgrp = cp->p_pgrp;
1239 else {
1240 pgrp = pgfind(pgid);
1241 if (pgrp == NULL)
1242 return (ESRCH);
1243 }
1244 for (p = pgrp->pg_members.lh_first; p != 0;
1245 p = p->p_pglist.le_next) {
1246 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1247 p->p_stat == SZOMB ||
1248 !cansignal(cp, uc, p, signum))
1249 continue;
1250 nfound++;
1251 if (signum)
1252 psignal(p, signum);
1253 }
1254 }
1255 return (nfound ? 0 : ESRCH);
1256 }
1257
1258 /*
1259 * Send a signal to a process group.
1260 */
1261 void
1262 gsignal(pgid, signum)
1263 int pgid, signum;
1264 {
1265 struct pgrp *pgrp;
1266
1267 if (pgid && (pgrp = pgfind(pgid)))
1268 pgsignal(pgrp, signum, 0);
1269 }
1270
1271 /*
1272 * Send a signal to a process group. If checktty is 1,
1273 * limit to members which have a controlling terminal.
1274 */
1275 void
1276 pgsignal(pgrp, signum, checkctty)
1277 struct pgrp *pgrp;
1278 int signum, checkctty;
1279 {
1280 register struct proc *p;
1281
1282 if (pgrp)
1283 for (p = pgrp->pg_members.lh_first; p != 0;
1284 p = p->p_pglist.le_next)
1285 if (checkctty == 0 || p->p_flag & P_CONTROLT)
1286 psignal(p, signum);
1287 }
1288
1289 /*
1290 * Send signal to a backgrounded process blocked due to tty access
1291 * In FreeBSD, the backgrounded process wakes up every second and
1292 * discovers whether it is foregounded or not. In our case, we block
1293 * the thread in tsleep as we want to avoid storm of processes as well
1294 * as the suspend is only at AST level
1295 */
1296 void
1297 tty_pgsignal(pgrp, signum)
1298 struct pgrp *pgrp;
1299 int signum;
1300 {
1301 register struct proc *p;
1302
1303 if (pgrp)
1304 for (p = pgrp->pg_members.lh_first; p != 0;
1305 p = p->p_pglist.le_next)
1306 if ((p->p_flag & P_TTYSLEEP) && (p->p_flag & P_CONTROLT))
1307 psignal(p, signum);
1308 }
1309
1310 /*
1311 * Send a signal caused by a trap to a specific thread.
1312 */
1313 void
1314 threadsignal(thread_t sig_actthread, int signum, u_long code)
1315 {
1316 register struct uthread *uth;
1317 register struct task * sig_task;
1318 register struct proc *p ;
1319 int mask;
1320
1321 if ((u_int)signum >= NSIG || signum == 0)
1322 return;
1323
1324 mask = sigmask(signum);
1325 if ((mask & threadmask) == 0)
1326 return;
1327 sig_task = get_threadtask(sig_actthread);
1328 p = (struct proc *)(get_bsdtask_info(sig_task));
1329
1330 uth = get_bsdthread_info(sig_actthread);
1331 if (uth && (uth->uu_flag & UT_VFORK))
1332 p = uth->uu_proc;
1333
1334 if (!(p->p_flag & P_TRACED) && (p->p_sigignore & mask))
1335 return;
1336
1337 uth->uu_siglist |= mask;
1338 p->p_siglist |= mask; /* just for lame ones looking here */
1339 uth->uu_code = code;
1340 /* mark on process as well */
1341 signal_setast(sig_actthread);
1342 }
1343
1344
1345 void
1346 psignal(p, signum)
1347 register struct proc *p;
1348 register int signum;
1349 {
1350 psignal_lock(p, signum, 1);
1351 }
1352
1353 void
1354 psignal_vfork(struct proc *p, task_t new_task, thread_t thr_act, int signum)
1355 {
1356 register int prop;
1357 register sig_t action;
1358 int mask;
1359 struct uthread *uth;
1360
1361 if ((u_int)signum >= NSIG || signum == 0)
1362 panic("psignal signal number");
1363 mask = sigmask(signum);
1364 prop = sigprop[signum];
1365
1366 #if SIGNAL_DEBUG
1367 if(rdebug_proc && (p == rdebug_proc)) {
1368 ram_printf(3);
1369 }
1370 #endif /* SIGNAL_DEBUG */
1371
1372 if ((new_task == TASK_NULL) || (thr_act == (thread_t)NULL) || is_kerneltask(new_task))
1373 return;
1374
1375
1376 uth = get_bsdthread_info(thr_act);
1377 signal_lock(p);
1378
1379 /*
1380 * proc is traced, always give parent a chance.
1381 */
1382 action = SIG_DFL;
1383
1384 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1385 (p->p_flag & P_TRACED) == 0)
1386 p->p_nice = NZERO;
1387
1388 if (prop & SA_CONT) {
1389 p->p_siglist &= ~stopsigmask;
1390 uth->uu_siglist &= ~stopsigmask;
1391 }
1392
1393 if (prop & SA_STOP) {
1394 /*
1395 * If sending a tty stop signal to a member of an orphaned
1396 * process group, discard the signal here if the action
1397 * is default; don't stop the process below if sleeping,
1398 * and don't clear any pending SIGCONT.
1399 */
1400 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
1401 action == SIG_DFL)
1402 goto psigout;
1403 uth->uu_siglist &= ~contsigmask;
1404 p->p_siglist &= ~contsigmask;
1405 }
1406 uth->uu_siglist |= mask;
1407 p->p_siglist |= mask; /* just for lame ones looking here */
1408
1409 /* Deliver signal to the activation passed in */
1410 act_set_astbsd(thr_act);
1411
1412 /*
1413 * SIGKILL priority twiddling moved here from above because
1414 * it needs sig_thread. Could merge it into large switch
1415 * below if we didn't care about priority for tracing
1416 * as SIGKILL's action is always SIG_DFL.
1417 */
1418 if ((signum == SIGKILL) && (p->p_nice > NZERO)) {
1419 p->p_nice = NZERO;
1420 }
1421
1422 /*
1423 * This Process is traced - wake it up (if not already
1424 * stopped) so that it can discover the signal in
1425 * issig() and stop for the parent.
1426 */
1427 if (p->p_flag & P_TRACED) {
1428 if (p->p_stat != SSTOP)
1429 goto run;
1430 else
1431 goto psigout;
1432 }
1433 run:
1434 /*
1435 * If we're being traced (possibly because someone attached us
1436 * while we were stopped), check for a signal from the debugger.
1437 */
1438 if (p->p_stat == SSTOP) {
1439 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
1440 uth->uu_siglist |= sigmask(p->p_xstat);
1441 p->p_siglist |= mask; /* just for lame ones looking here */
1442 }
1443 }
1444
1445 /*
1446 * setrunnable(p) in BSD
1447 */
1448 p->p_stat = SRUN;
1449
1450 psigout:
1451 signal_unlock(p);
1452 }
1453
1454 static thread_t
1455 get_signalthread(struct proc *p, int signum)
1456 {
1457 struct uthread *uth;
1458 thread_t thr_act;
1459 sigset_t mask = sigmask(signum);
1460 thread_t sig_thread_act;
1461 struct task * sig_task = p->task;
1462 kern_return_t kret;
1463
1464 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
1465 sig_thread_act = p->p_vforkact;
1466 kret = check_actforsig(sig_task, sig_thread_act, 1);
1467 if (kret == KERN_SUCCESS)
1468 return(sig_thread_act);
1469 else
1470 return(THREAD_NULL);
1471 }
1472
1473 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1474 if(((uth->uu_flag & UT_NO_SIGMASK)== 0) &&
1475 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
1476 if (check_actforsig(p->task, uth->uu_act, 1) == KERN_SUCCESS)
1477 return(uth->uu_act);
1478 }
1479 }
1480 if (get_signalact(p->task, &thr_act, 1) == KERN_SUCCESS) {
1481 return(thr_act);
1482 }
1483
1484 return(THREAD_NULL);
1485 }
1486
1487 /*
1488 * Send the signal to the process. If the signal has an action, the action
1489 * is usually performed by the target process rather than the caller; we add
1490 * the signal to the set of pending signals for the process.
1491 *
1492 * Exceptions:
1493 * o When a stop signal is sent to a sleeping process that takes the
1494 * default action, the process is stopped without awakening it.
1495 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1496 * regardless of the signal action (eg, blocked or ignored).
1497 *
1498 * Other ignored signals are discarded immediately.
1499 */
1500 void
1501 psignal_lock(p, signum, withlock)
1502 register struct proc *p;
1503 register int signum;
1504 register int withlock;
1505 {
1506 register int prop;
1507 register sig_t action;
1508 thread_t sig_thread_act;
1509 register task_t sig_task;
1510 int mask;
1511 struct uthread *uth;
1512 boolean_t funnel_state = FALSE;
1513 int sw_funnel = 0;
1514
1515 if ((u_int)signum >= NSIG || signum == 0)
1516 panic("psignal signal number");
1517 mask = sigmask(signum);
1518 prop = sigprop[signum];
1519
1520 #if SIGNAL_DEBUG
1521 if(rdebug_proc && (p == rdebug_proc)) {
1522 ram_printf(3);
1523 }
1524 #endif /* SIGNAL_DEBUG */
1525
1526 if (thread_funnel_get() == (funnel_t *)0) {
1527 sw_funnel = 1;
1528 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1529 }
1530 /*
1531 * We will need the task pointer later. Grab it now to
1532 * check for a zombie process. Also don't send signals
1533 * to kernel internal tasks.
1534 */
1535 if (((sig_task = p->task) == TASK_NULL) || is_kerneltask(sig_task)) {
1536 if (sw_funnel)
1537 thread_funnel_set(kernel_flock, funnel_state);
1538 return;
1539 }
1540
1541 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
1542
1543 /*
1544 * do not send signals to the process that has the thread
1545 * doing a reboot(). Not doing so will mark that thread aborted
1546 * and can cause IO failures wich will cause data loss.
1547 */
1548 if (ISSET(p->p_flag, P_REBOOT)) {
1549 if (sw_funnel)
1550 thread_funnel_set(kernel_flock, funnel_state);
1551 return;
1552 }
1553
1554 if (withlock)
1555 signal_lock(p);
1556
1557 /*
1558 * Deliver the signal to the first thread in the task. This
1559 * allows single threaded applications which use signals to
1560 * be able to be linked with multithreaded libraries. We have
1561 * an implicit reference to the current thread, but need
1562 * an explicit one otherwise. The thread reference keeps
1563 * the corresponding task data structures around too. This
1564 * reference is released by thread_deallocate.
1565 */
1566
1567 if (((p->p_flag & P_TRACED) == 0) && (p->p_sigignore & mask))
1568 goto psigout;
1569
1570 /* If successful return with ast set */
1571 sig_thread_act = get_signalthread(p, signum);
1572
1573 if (sig_thread_act == THREAD_NULL) {
1574 /* XXXX FIXME
1575 * if it is sigkill, may be we should
1576 * inject a thread to terminate
1577 */
1578 #if SIGNAL_DEBUG
1579 ram_printf(1);
1580 #endif /* SIGNAL_DEBUG */
1581 goto psigout;
1582 }
1583
1584 uth = get_bsdthread_info(sig_thread_act);
1585
1586 /*
1587 * If proc is traced, always give parent a chance.
1588 */
1589 if (p->p_flag & P_TRACED)
1590 action = SIG_DFL;
1591 else {
1592 /*
1593 * If the signal is being ignored,
1594 * then we forget about it immediately.
1595 * (Note: we don't set SIGCONT in p_sigignore,
1596 * and if it is set to SIG_IGN,
1597 * action will be SIG_DFL here.)
1598 */
1599 if (p->p_sigignore & mask)
1600 goto psigout;
1601 /* sigwait takes precedence */
1602 if (uth->uu_sigwait & mask)
1603 action = KERN_SIG_WAIT;
1604 else if (uth->uu_sigmask & mask)
1605 action = KERN_SIG_HOLD;
1606 else if (p->p_sigcatch & mask)
1607 action = KERN_SIG_CATCH;
1608 else
1609 action = SIG_DFL;
1610 }
1611
1612 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1613 (p->p_flag & P_TRACED) == 0)
1614 p->p_nice = NZERO;
1615
1616 if (prop & SA_CONT) {
1617 uth->uu_siglist &= ~stopsigmask;
1618 p->p_siglist &= ~stopsigmask;
1619 }
1620
1621 if (prop & SA_STOP) {
1622 /*
1623 * If sending a tty stop signal to a member of an orphaned
1624 * process group, discard the signal here if the action
1625 * is default; don't stop the process below if sleeping,
1626 * and don't clear any pending SIGCONT.
1627 */
1628 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
1629 action == SIG_DFL)
1630 goto psigout;
1631 uth->uu_siglist &= ~contsigmask;
1632 p->p_siglist &= ~contsigmask;
1633 }
1634 uth->uu_siglist |= mask;
1635 p->p_siglist |= mask; /* just for lame ones looking here */
1636
1637
1638 /*
1639 * Defer further processing for signals which are held,
1640 * except that stopped processes must be continued by SIGCONT.
1641 */
1642 if (action == KERN_SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) {
1643 goto psigout;
1644 }
1645 /*
1646 * SIGKILL priority twiddling moved here from above because
1647 * it needs sig_thread. Could merge it into large switch
1648 * below if we didn't care about priority for tracing
1649 * as SIGKILL's action is always SIG_DFL.
1650 */
1651 if ((signum == SIGKILL) && (p->p_nice > NZERO)) {
1652 p->p_nice = NZERO;
1653 }
1654
1655 /*
1656 * Process is traced - wake it up (if not already
1657 * stopped) so that it can discover the signal in
1658 * issig() and stop for the parent.
1659 */
1660 if (p->p_flag & P_TRACED) {
1661 if (p->p_stat != SSTOP)
1662 goto run;
1663 else
1664 goto psigout;
1665 }
1666
1667 if (action == KERN_SIG_WAIT) {
1668 uth->uu_sigwait = mask;
1669 uth->uu_siglist &= ~mask;
1670 p->p_siglist &= ~mask;
1671 wakeup(&uth->uu_sigwait);
1672 /* if it is SIGCONT resume whole process */
1673 if (prop & SA_CONT) {
1674 p->p_flag |= P_CONTINUED;
1675 (void) task_resume(sig_task);
1676 }
1677 goto psigout;
1678 }
1679
1680 if (action != SIG_DFL) {
1681 /*
1682 * User wants to catch the signal.
1683 * Wake up the thread, but don't un-suspend it
1684 * (except for SIGCONT).
1685 */
1686 if (prop & SA_CONT) {
1687 if (p->p_flag & P_TTYSLEEP) {
1688 p->p_flag &= ~P_TTYSLEEP;
1689 wakeup(&p->p_siglist);
1690 } else {
1691 p->p_flag |= P_CONTINUED;
1692 (void) task_resume(sig_task);
1693 }
1694 p->p_stat = SRUN;
1695 } else if (p->p_stat == SSTOP)
1696 goto psigout;
1697 goto run;
1698 } else {
1699 /* Default action - varies */
1700 if (mask & stopsigmask) {
1701 /*
1702 * These are the signals which by default
1703 * stop a process.
1704 *
1705 * Don't clog system with children of init
1706 * stopped from the keyboard.
1707 */
1708 if (!(prop & SA_STOP) && p->p_pptr == initproc) {
1709 psignal_lock(p, SIGKILL, 0);
1710 uth->uu_siglist &= ~mask;
1711 p->p_siglist &= ~mask;
1712 goto psigout;
1713 }
1714
1715 /*
1716 * Stop the task
1717 * if task hasn't already been stopped by
1718 * a signal.
1719 */
1720 uth->uu_siglist &= ~mask;
1721 p->p_siglist &= ~mask;
1722 if (p->p_stat != SSTOP) {
1723 p->p_xstat = signum;
1724 stop(p);
1725 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1726 struct proc *pp = p->p_pptr;
1727
1728 pp->si_pid = p->p_pid;
1729 pp->si_status = p->p_xstat;
1730 pp->si_code = CLD_STOPPED;
1731 pp->si_uid = p->p_ucred->cr_ruid;
1732 psignal(pp, SIGCHLD);
1733 }
1734 }
1735 goto psigout;
1736 }
1737
1738 switch (signum) {
1739 /*
1740 * Signals ignored by default have been dealt
1741 * with already, since their bits are on in
1742 * p_sigignore.
1743 */
1744
1745 case SIGKILL:
1746 /*
1747 * Kill signal always sets process running and
1748 * unsuspends it.
1749 */
1750 /*
1751 * Process will be running after 'run'
1752 */
1753 p->p_stat = SRUN;
1754
1755 thread_abort(sig_thread_act);
1756
1757 goto psigout;
1758
1759 case SIGCONT:
1760 /*
1761 * Let the process run. If it's sleeping on an
1762 * event, it remains so.
1763 */
1764 if (p->p_flag & P_TTYSLEEP) {
1765 p->p_flag &= ~P_TTYSLEEP;
1766 wakeup(&p->p_siglist);
1767 } else {
1768 p->p_flag |= P_CONTINUED;
1769 (void) task_resume(sig_task);
1770 }
1771 uth->uu_siglist &= ~mask;
1772 p->p_siglist &= ~mask;
1773 p->p_stat = SRUN;
1774
1775 goto psigout;
1776
1777 default:
1778 /*
1779 * All other signals wake up the process, but don't
1780 * resume it.
1781 */
1782 if (p->p_stat == SSTOP)
1783 goto psigout;
1784 goto run;
1785 }
1786 }
1787 /*NOTREACHED*/
1788 run:
1789 /*
1790 * If we're being traced (possibly because someone attached us
1791 * while we were stopped), check for a signal from the debugger.
1792 */
1793 if (p->p_stat == SSTOP) {
1794 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0)
1795 uth->uu_siglist |= sigmask(p->p_xstat);
1796 } else {
1797 /*
1798 * setrunnable(p) in BSD and
1799 * Wake up the thread if it is interruptible.
1800 */
1801 p->p_stat = SRUN;
1802 thread_abort_safely(sig_thread_act);
1803 }
1804 psigout:
1805 if (withlock)
1806 signal_unlock(p);
1807 if (sw_funnel)
1808 thread_funnel_set(kernel_flock, funnel_state);
1809 }
1810
1811
1812 /* psignal_lock(p, signum, withlock ) */
1813 void
1814 psignal_uthread(thr_act, signum)
1815 thread_t thr_act;
1816 int signum;
1817 {
1818 struct proc *p;
1819 register int prop;
1820 register sig_t action;
1821 thread_t sig_thread_act;
1822 register task_t sig_task;
1823 int mask;
1824 struct uthread *uth;
1825 kern_return_t kret;
1826 int error = 0;
1827
1828 p = (struct proc *)get_bsdtask_info(get_threadtask(thr_act));
1829 if ((u_int)signum >= NSIG || signum == 0)
1830 panic("Invalid signal number in psignal_uthread");
1831 mask = sigmask(signum);
1832 prop = sigprop[signum];
1833
1834 #if SIGNAL_DEBUG
1835 if(rdebug_proc && (p == rdebug_proc)) {
1836 ram_printf(3);
1837 }
1838 #endif /* SIGNAL_DEBUG */
1839
1840 /*
1841 * We will need the task pointer later. Grab it now to
1842 * check for a zombie process. Also don't send signals
1843 * to kernel internal tasks.
1844 */
1845 if (((sig_task = p->task) == TASK_NULL) || is_kerneltask(sig_task)) {
1846 return;
1847 }
1848
1849 sig_thread_act = thr_act;
1850 /*
1851 * do not send signals to the process that has the thread
1852 * doing a reboot(). Not doing so will mark that thread aborted
1853 * and can cause IO failures wich will cause data loss.
1854 */
1855 if (ISSET(p->p_flag, P_REBOOT)) {
1856 return;
1857 }
1858
1859 signal_lock(p);
1860
1861 /*
1862 * Deliver the signal to the first thread in the task. This
1863 * allows single threaded applications which use signals to
1864 * be able to be linked with multithreaded libraries. We have
1865 * an implicit reference to the current thread, but need
1866 * an explicit one otherwise. The thread reference keeps
1867 * the corresponding task data structures around too. This
1868 * reference is released by thread_deallocate.
1869 */
1870
1871 if (((p->p_flag & P_TRACED) == 0) && (p->p_sigignore & mask))
1872 goto puthout;
1873
1874 kret = check_actforsig(sig_task, sig_thread_act, 1);
1875
1876 if (kret != KERN_SUCCESS) {
1877 error = EINVAL;
1878 goto puthout;
1879 }
1880
1881
1882 uth = get_bsdthread_info(sig_thread_act);
1883
1884 /*
1885 * If proc is traced, always give parent a chance.
1886 */
1887 if (p->p_flag & P_TRACED)
1888 action = SIG_DFL;
1889 else {
1890 /*
1891 * If the signal is being ignored,
1892 * then we forget about it immediately.
1893 * (Note: we don't set SIGCONT in p_sigignore,
1894 * and if it is set to SIG_IGN,
1895 * action will be SIG_DFL here.)
1896 */
1897 if (p->p_sigignore & mask)
1898 goto puthout;
1899 /* sigwait takes precedence */
1900 if (uth->uu_sigwait & mask)
1901 action = KERN_SIG_WAIT;
1902 else if (uth->uu_sigmask & mask)
1903 action = KERN_SIG_HOLD;
1904 else if (p->p_sigcatch & mask)
1905 action = KERN_SIG_CATCH;
1906 else
1907 action = SIG_DFL;
1908 }
1909
1910 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1911 (p->p_flag & P_TRACED) == 0)
1912 p->p_nice = NZERO;
1913
1914 if (prop & SA_CONT) {
1915 uth->uu_siglist &= ~stopsigmask;
1916 p->p_siglist &= ~stopsigmask;
1917 }
1918
1919 if (prop & SA_STOP) {
1920 /*
1921 * If sending a tty stop signal to a member of an orphaned
1922 * process group, discard the signal here if the action
1923 * is default; don't stop the process below if sleeping,
1924 * and don't clear any pending SIGCONT.
1925 */
1926 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
1927 action == SIG_DFL)
1928 goto puthout;
1929 uth->uu_siglist &= ~contsigmask;
1930 p->p_siglist &= ~contsigmask;
1931 }
1932 uth->uu_siglist |= mask;
1933 p->p_siglist |= mask; /* just for lame ones looking here */
1934
1935 /*
1936 * Defer further processing for signals which are held,
1937 * except that stopped processes must be continued by SIGCONT.
1938 */
1939 if (action == KERN_SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
1940 goto puthout;
1941
1942 /*
1943 * SIGKILL priority twiddling moved here from above because
1944 * it needs sig_thread. Could merge it into large switch
1945 * below if we didn't care about priority for tracing
1946 * as SIGKILL's action is always SIG_DFL.
1947 */
1948 if ((signum == SIGKILL) && (p->p_nice > NZERO)) {
1949 p->p_nice = NZERO;
1950 }
1951
1952 /*
1953 * Process is traced - wake it up (if not already
1954 * stopped) so that it can discover the signal in
1955 * issig() and stop for the parent.
1956 */
1957 if (p->p_flag & P_TRACED) {
1958 if (p->p_stat != SSTOP)
1959 goto psurun;
1960 else
1961 goto puthout;
1962 }
1963
1964 if (action == KERN_SIG_WAIT) {
1965 uth->uu_sigwait = mask;
1966 uth->uu_siglist &= ~mask;
1967 p->p_siglist &= ~mask;
1968 wakeup(&uth->uu_sigwait);
1969 /* if it is SIGCONT resume whole process */
1970 if (prop & SA_CONT) {
1971 p->p_flag |= P_CONTINUED;
1972 (void) task_resume(sig_task);
1973 }
1974 goto puthout;
1975 }
1976
1977 if (action != SIG_DFL) {
1978 /*
1979 * User wants to catch the signal.
1980 * Wake up the thread, but don't un-suspend it
1981 * (except for SIGCONT).
1982 */
1983 if (prop & SA_CONT) {
1984 p->p_flag |= P_CONTINUED;
1985 (void) task_resume(sig_task);
1986 }
1987 goto psurun;
1988 } else {
1989 /* Default action - varies */
1990 if (mask & stopsigmask) {
1991 /*
1992 * These are the signals which by default
1993 * stop a process.
1994 *
1995 * Don't clog system with children of init
1996 * stopped from the keyboard.
1997 */
1998 if (!(prop & SA_STOP) && p->p_pptr == initproc) {
1999 psignal_lock(p, SIGKILL, 0);
2000 uth->uu_siglist &= ~mask;
2001 p->p_siglist &= ~mask;
2002 goto puthout;
2003 }
2004
2005 /*
2006 * Stop the task
2007 * if task hasn't already been stopped by
2008 * a signal.
2009 */
2010 uth->uu_siglist &= ~mask;
2011 p->p_siglist &= ~mask;
2012 if (p->p_stat != SSTOP) {
2013 p->p_xstat = signum;
2014 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
2015 struct proc *pp = p->p_pptr;
2016
2017 pp->si_pid = p->p_pid;
2018 pp->si_status = p->p_xstat;
2019 pp->si_code = CLD_STOPPED;
2020 pp->si_uid = p->p_ucred->cr_ruid;
2021 psignal(pp, SIGCHLD);
2022 }
2023 stop(p);
2024 }
2025 goto puthout;
2026 }
2027
2028 switch (signum) {
2029 /*
2030 * Signals ignored by default have been dealt
2031 * with already, since their bits are on in
2032 * p_sigignore.
2033 */
2034
2035 case SIGKILL:
2036 /*
2037 * Kill signal always sets process running and
2038 * unsuspends it.
2039 */
2040 /*
2041 * Process will be running after 'run'
2042 */
2043 p->p_stat = SRUN;
2044
2045 thread_abort(sig_thread_act);
2046
2047 goto puthout;
2048
2049 case SIGCONT:
2050 /*
2051 * Let the process run. If it's sleeping on an
2052 * event, it remains so.
2053 */
2054 if (p->p_flag & P_TTYSLEEP) {
2055 p->p_flag &= ~P_TTYSLEEP;
2056 wakeup(&p->p_siglist);
2057 } else {
2058 p->p_flag |= P_CONTINUED;
2059 (void) task_resume(sig_task);
2060 }
2061 uth->uu_siglist &= ~mask;
2062 p->p_siglist &= ~mask;
2063 p->p_stat = SRUN;
2064 goto puthout;
2065
2066 default:
2067 /*
2068 * All other signals wake up the process, but don't
2069 * resume it.
2070 */
2071 goto psurun;
2072 }
2073 }
2074 /*NOTREACHED*/
2075 psurun:
2076 /*
2077 * If we're being traced (possibly because someone attached us
2078 * while we were stopped), check for a signal from the debugger.
2079 */
2080 if (p->p_stat == SSTOP) {
2081 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
2082 uth->uu_siglist |= sigmask(p->p_xstat);
2083 p->p_siglist |= sigmask(p->p_xstat);
2084 }
2085 } else {
2086 /*
2087 * setrunnable(p) in BSD and
2088 * Wake up the thread if it is interruptible.
2089 */
2090 p->p_stat = SRUN;
2091 thread_abort_safely(sig_thread_act);
2092 }
2093
2094 puthout:
2095 signal_unlock(p);
2096 }
2097
2098
2099 __inline__ void
2100 sig_lock_to_exit(struct proc *p)
2101 {
2102 thread_t self = current_thread();
2103
2104 p->exit_thread = self;
2105 (void) task_suspend(p->task);
2106 }
2107
2108 __inline__ int
2109 sig_try_locked(struct proc *p)
2110 {
2111 thread_t self = current_thread();
2112
2113 while (p->sigwait || p->exit_thread) {
2114 if (p->exit_thread) {
2115 if (p->exit_thread != self) {
2116 /*
2117 * Already exiting - no signals.
2118 */
2119 thread_abort(self);
2120 }
2121 return(0);
2122 }
2123 if(assert_wait_possible()) {
2124 assert_wait((caddr_t)&p->sigwait_thread,
2125 (THREAD_INTERRUPTIBLE));
2126 }
2127 signal_unlock(p);
2128 thread_block(THREAD_CONTINUE_NULL);
2129 signal_lock(p);
2130 if (thread_should_abort(self)) {
2131 /*
2132 * Terminate request - clean up.
2133 */
2134 return -1;
2135 }
2136 }
2137 return 1;
2138 }
2139
2140 /*
2141 * If the current process has received a signal (should be caught or cause
2142 * termination, should interrupt current syscall), return the signal number.
2143 * Stop signals with default action are processed immediately, then cleared;
2144 * they aren't returned. This is checked after each entry to the system for
2145 * a syscall or trap (though this can usually be done without calling issignal
2146 * by checking the pending signal masks in the CURSIG macro.) The normal call
2147 * sequence is
2148 *
2149 * while (signum = CURSIG(curproc))
2150 * postsig(signum);
2151 */
2152 int
2153 issignal(p)
2154 register struct proc *p;
2155 {
2156 register int signum, mask, prop, sigbits;
2157 thread_t cur_act;
2158 struct uthread * ut;
2159 struct proc *pp;
2160
2161 cur_act = current_thread();
2162
2163 #if SIGNAL_DEBUG
2164 if(rdebug_proc && (p == rdebug_proc)) {
2165 ram_printf(3);
2166 }
2167 #endif /* SIGNAL_DEBUG */
2168 signal_lock(p);
2169
2170 /*
2171 * Try to grab the signal lock.
2172 */
2173 if (sig_try_locked(p) <= 0) {
2174 signal_unlock(p);
2175 return (0);
2176 }
2177
2178 ut = get_bsdthread_info(cur_act);
2179 for(;;) {
2180 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2181
2182 if (p->p_flag & P_PPWAIT)
2183 sigbits &= ~stopsigmask;
2184 if (sigbits == 0) { /* no signal to send */
2185 signal_unlock(p);
2186 return (0);
2187 }
2188 signum = ffs((long)sigbits);
2189 mask = sigmask(signum);
2190 prop = sigprop[signum];
2191
2192 /*
2193 * We should see pending but ignored signals
2194 * only if P_TRACED was on when they were posted.
2195 */
2196 if (mask & p->p_sigignore && (p->p_flag & P_TRACED) == 0) {
2197 ut->uu_siglist &= ~mask; /* take the signal! */
2198 p->p_siglist &= ~mask; /* take the signal! */
2199 continue;
2200 }
2201 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
2202 register task_t task;
2203 /*
2204 * If traced, always stop, and stay
2205 * stopped until released by the debugger.
2206 */
2207 /* ptrace debugging */
2208 p->p_xstat = signum;
2209 pp = p->p_pptr;
2210 if (p->p_flag & P_SIGEXC) {
2211 p->sigwait = TRUE;
2212 p->sigwait_thread = cur_act;
2213 p->p_stat = SSTOP;
2214 p->p_flag &= ~(P_WAITED|P_CONTINUED);
2215 ut->uu_siglist &= ~mask; /* clear the old signal */
2216 p->p_siglist &= ~mask; /* clear the old signal */
2217 signal_unlock(p);
2218 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2219 signal_lock(p);
2220 } else {
2221 // panic("Unsupportef gdb option \n");;
2222 pp->si_pid = p->p_pid;
2223 pp->si_status = p->p_xstat;
2224 pp->si_code = CLD_TRAPPED;
2225 pp->si_uid = p->p_ucred->cr_ruid;
2226 psignal(pp, SIGCHLD);
2227 /*
2228 * XXX Have to really stop for debuggers;
2229 * XXX stop() doesn't do the right thing.
2230 * XXX Inline the task_suspend because we
2231 * XXX have to diddle Unix state in the
2232 * XXX middle of it.
2233 */
2234 task = p->task;
2235 task_hold(task);
2236 p->sigwait = TRUE;
2237 p->sigwait_thread = cur_act;
2238 p->p_stat = SSTOP;
2239 p->p_flag &= ~(P_WAITED|P_CONTINUED);
2240 ut->uu_siglist &= ~mask; /* clear the old signal */
2241 p->p_siglist &= ~mask; /* clear the old signal */
2242
2243 wakeup((caddr_t)p->p_pptr);
2244 signal_unlock(p);
2245 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2246 thread_block(THREAD_CONTINUE_NULL);
2247 signal_lock(p);
2248 }
2249
2250 p->sigwait = FALSE;
2251 p->sigwait_thread = NULL;
2252 wakeup((caddr_t)&p->sigwait_thread);
2253
2254 /*
2255 * This code is to detect when gdb is killed
2256 * even as the traced program is attached.
2257 * pgsignal would get the SIGKILL to traced program
2258 * That's what we are trying to see (I hope)
2259 */
2260 if (ut->uu_siglist & sigmask(SIGKILL)) {
2261 /*
2262 * Wait event may still be outstanding;
2263 * clear it, since sig_lock_to_exit will
2264 * wait.
2265 */
2266 clear_wait(current_thread(), THREAD_INTERRUPTED);
2267 sig_lock_to_exit(p);
2268 /*
2269 * Since this thread will be resumed
2270 * to allow the current syscall to
2271 * be completed, must save u_qsave
2272 * before calling exit(). (Since exit()
2273 * calls closef() which can trash u_qsave.)
2274 */
2275 signal_unlock(p);
2276 exit1(p,signum, (int *)NULL);
2277 return(0);
2278 }
2279
2280 /*
2281 * We may have to quit
2282 */
2283 if (thread_should_abort(current_thread())) {
2284 signal_unlock(p);
2285 return(0);
2286 }
2287 /*
2288 * If parent wants us to take the signal,
2289 * then it will leave it in p->p_xstat;
2290 * otherwise we just look for signals again.
2291 */
2292 signum = p->p_xstat;
2293 if (signum == 0)
2294 continue;
2295 /*
2296 * Put the new signal into p_siglist. If the
2297 * signal is being masked, look for other signals.
2298 */
2299 mask = sigmask(signum);
2300 ut->uu_siglist |= mask;
2301 p->p_siglist |= mask; /* just for lame ones looking here */
2302 if (ut->uu_sigmask & mask)
2303 continue;
2304 }
2305
2306 /*
2307 * Decide whether the signal should be returned.
2308 * Return the signal's number, or fall through
2309 * to clear it from the pending mask.
2310 */
2311
2312 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2313
2314 case (long)SIG_DFL:
2315 /*
2316 * Don't take default actions on system processes.
2317 */
2318 if (p->p_pptr->p_pid == 0) {
2319 #if DIAGNOSTIC
2320 /*
2321 * Are you sure you want to ignore SIGSEGV
2322 * in init? XXX
2323 */
2324 printf("Process (pid %d) got signal %d\n",
2325 p->p_pid, signum);
2326 #endif
2327 break; /* == ignore */
2328 }
2329
2330 /*
2331 * If there is a pending stop signal to process
2332 * with default action, stop here,
2333 * then clear the signal. However,
2334 * if process is member of an orphaned
2335 * process group, ignore tty stop signals.
2336 */
2337 if (prop & SA_STOP) {
2338 if (p->p_flag & P_TRACED ||
2339 (p->p_pgrp->pg_jobc == 0 &&
2340 prop & SA_TTYSTOP))
2341 break; /* == ignore */
2342 if (p->p_stat != SSTOP) {
2343 p->p_xstat = signum;
2344 stop(p);
2345 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
2346 pp = p->p_pptr;
2347 pp->si_pid = p->p_pid;
2348 pp->si_status = p->p_xstat;
2349 pp->si_code = CLD_STOPPED;
2350 pp->si_uid = p->p_ucred->cr_ruid;
2351 psignal(pp, SIGCHLD);
2352 }
2353 }
2354 break;
2355 } else if (prop & SA_IGNORE) {
2356 /*
2357 * Except for SIGCONT, shouldn't get here.
2358 * Default action is to ignore; drop it.
2359 */
2360 break; /* == ignore */
2361 } else {
2362 ut->uu_siglist &= ~mask; /* take the signal! */
2363 p->p_siglist &= ~mask; /* take the signal! */
2364 signal_unlock(p);
2365 return (signum);
2366 }
2367 /*NOTREACHED*/
2368
2369 case (long)SIG_IGN:
2370 /*
2371 * Masking above should prevent us ever trying
2372 * to take action on an ignored signal other
2373 * than SIGCONT, unless process is traced.
2374 */
2375 if ((prop & SA_CONT) == 0 &&
2376 (p->p_flag & P_TRACED) == 0)
2377 printf("issignal\n");
2378 break; /* == ignore */
2379
2380 default:
2381 /*
2382 * This signal has an action, let
2383 * postsig() process it.
2384 */
2385 ut->uu_siglist &= ~mask; /* take the signal! */
2386 p->p_siglist &= ~mask; /* take the signal! */
2387 signal_unlock(p);
2388 return (signum);
2389 }
2390 ut->uu_siglist &= ~mask; /* take the signal! */
2391 p->p_siglist &= ~mask; /* take the signal! */
2392 }
2393 /* NOTREACHED */
2394 }
2395
2396 /* called from _sleep */
2397 int
2398 CURSIG(p)
2399 register struct proc *p;
2400 {
2401 register int signum, mask, prop, sigbits;
2402 thread_t cur_act;
2403 struct uthread * ut;
2404 int retnum = 0;
2405
2406
2407 cur_act = current_thread();
2408
2409 ut = get_bsdthread_info(cur_act);
2410
2411 if (ut->uu_siglist == 0)
2412 return (0);
2413
2414 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_flag & P_TRACED) == 0))
2415 return (0);
2416
2417 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2418
2419 for(;;) {
2420 if (p->p_flag & P_PPWAIT)
2421 sigbits &= ~stopsigmask;
2422 if (sigbits == 0) { /* no signal to send */
2423 return (retnum);
2424 }
2425
2426 signum = ffs((long)sigbits);
2427 mask = sigmask(signum);
2428 prop = sigprop[signum];
2429
2430 /*
2431 * We should see pending but ignored signals
2432 * only if P_TRACED was on when they were posted.
2433 */
2434 if (mask & p->p_sigignore && (p->p_flag & P_TRACED) == 0) {
2435 continue;
2436 }
2437 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
2438 /*
2439 * Put the new signal into p_siglist. If the
2440 * signal is being masked, look for other signals.
2441 */
2442 mask = sigmask(signum);
2443 if (ut->uu_sigmask & mask)
2444 continue;
2445 return(signum);
2446 }
2447
2448 /*
2449 * Decide whether the signal should be returned.
2450 * Return the signal's number, or fall through
2451 * to clear it from the pending mask.
2452 */
2453
2454 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2455
2456 case (long)SIG_DFL:
2457 /*
2458 * Don't take default actions on system processes.
2459 */
2460 if (p->p_pptr->p_pid == 0) {
2461 #if DIAGNOSTIC
2462 /*
2463 * Are you sure you want to ignore SIGSEGV
2464 * in init? XXX
2465 */
2466 printf("Process (pid %d) got signal %d\n",
2467 p->p_pid, signum);
2468 #endif
2469 break; /* == ignore */
2470 }
2471
2472 /*
2473 * If there is a pending stop signal to process
2474 * with default action, stop here,
2475 * then clear the signal. However,
2476 * if process is member of an orphaned
2477 * process group, ignore tty stop signals.
2478 */
2479 if (prop & SA_STOP) {
2480 if (p->p_flag & P_TRACED ||
2481 (p->p_pgrp->pg_jobc == 0 &&
2482 prop & SA_TTYSTOP))
2483 break; /* == ignore */
2484 retnum = signum;
2485 break;
2486 } else if (prop & SA_IGNORE) {
2487 /*
2488 * Except for SIGCONT, shouldn't get here.
2489 * Default action is to ignore; drop it.
2490 */
2491 break; /* == ignore */
2492 } else {
2493 return (signum);
2494 }
2495 /*NOTREACHED*/
2496
2497 case (long)SIG_IGN:
2498 /*
2499 * Masking above should prevent us ever trying
2500 * to take action on an ignored signal other
2501 * than SIGCONT, unless process is traced.
2502 */
2503 if ((prop & SA_CONT) == 0 &&
2504 (p->p_flag & P_TRACED) == 0)
2505 printf("issignal\n");
2506 break; /* == ignore */
2507
2508 default:
2509 /*
2510 * This signal has an action, let
2511 * postsig() process it.
2512 */
2513 return (signum);
2514 }
2515 sigbits &= ~mask; /* take the signal! */
2516 }
2517 /* NOTREACHED */
2518 }
2519
2520 /*
2521 * Put the argument process into the stopped state and notify the parent
2522 * via wakeup. Signals are handled elsewhere. The process must not be
2523 * on the run queue.
2524 */
2525 void
2526 stop(p)
2527 register struct proc *p;
2528 {
2529 p->p_stat = SSTOP;
2530 p->p_flag &= ~(P_WAITED|P_CONTINUED);
2531 if (p->p_pptr->p_stat != SSTOP)
2532 wakeup((caddr_t)p->p_pptr);
2533 (void) task_suspend(p->task); /*XXX*/
2534 }
2535
2536 /*
2537 * Take the action for the specified signal
2538 * from the current set of pending signals.
2539 */
2540 void
2541 postsig(int signum)
2542 {
2543 struct proc *p = current_proc();
2544 struct sigacts *ps = p->p_sigacts;
2545 user_addr_t catcher;
2546 u_long code;
2547 int mask, returnmask;
2548 struct uthread * ut;
2549
2550 #if DIAGNOSTIC
2551 if (signum == 0)
2552 panic("postsig");
2553 /*
2554 * This must be called on master cpu
2555 */
2556 if (cpu_number() != master_cpu)
2557 panic("psig not on master");
2558 #endif
2559
2560 signal_lock(p);
2561 /*
2562 * Try to grab the signal lock.
2563 */
2564 if (sig_try_locked(p) <= 0) {
2565 signal_unlock(p);
2566 return;
2567 }
2568
2569 ut = (struct uthread *)get_bsdthread_info(current_thread());
2570 mask = sigmask(signum);
2571 ut->uu_siglist &= ~mask;
2572 p->p_siglist &= ~mask;
2573 catcher = ps->ps_sigact[signum];
2574 #if KTRACE
2575 //LP64: catcher argument is a 64 bit user space handler address
2576 if (KTRPOINT(p, KTR_PSIG))
2577 ktrpsig(p->p_tracep,
2578 signum, CAST_DOWN(void *,catcher), ut->uu_flag & UT_SAS_OLDMASK ?
2579 &ut->uu_oldmask : &ut->uu_sigmask, 0);
2580 #endif
2581 if (catcher == SIG_DFL) {
2582 /*
2583 * Default catcher, where the default is to kill
2584 * the process. (Other cases were ignored above.)
2585 */
2586 /* called with signal_lock() held */
2587 sigexit_locked(p, signum);
2588 return;
2589 /* NOTREACHED */
2590 } else {
2591 /*
2592 * If we get here, the signal must be caught.
2593 */
2594 #if DIAGNOSTIC
2595 if (catcher == SIG_IGN || (ut->uu_sigmask & mask))
2596 log(LOG_WARNING,
2597 "postsig: processing masked or ignored signal\n");
2598 #endif
2599 /*
2600 * Set the new mask value and also defer further
2601 * occurences of this signal.
2602 *
2603 * Special case: user has done a sigpause. Here the
2604 * current mask is not of interest, but rather the
2605 * mask from before the sigpause is what we want
2606 * restored after the signal processing is completed.
2607 */
2608 if (ut->uu_flag & UT_SAS_OLDMASK) {
2609 returnmask = ut->uu_oldmask;
2610 ut->uu_flag &= ~UT_SAS_OLDMASK;
2611 ut->uu_oldmask = 0;
2612 } else
2613 returnmask = ut->uu_sigmask;
2614 ut->uu_sigmask |= ps->ps_catchmask[signum];
2615 if ((ps->ps_signodefer & mask) == 0)
2616 ut->uu_sigmask |= mask;
2617 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
2618 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE))
2619 p->p_sigignore |= mask;
2620 ps->ps_sigact[signum] = SIG_DFL;
2621 ps->ps_siginfo &= ~mask;
2622 ps->ps_signodefer &= ~mask;
2623 }
2624 #ifdef __ppc__
2625 /* Needs to disable to run in user mode */
2626 if (signum == SIGFPE) {
2627 thread_enable_fpe(current_thread(), 0);
2628 }
2629 #endif /* __ppc__ */
2630
2631 if (ps->ps_sig != signum) {
2632 code = 0;
2633 } else {
2634 code = ps->ps_code;
2635 ps->ps_code = 0;
2636 }
2637 p->p_stats->p_ru.ru_nsignals++;
2638 sendsig(p, catcher, signum, returnmask, code);
2639 }
2640 signal_unlock(p);
2641 }
2642
2643 /*
2644 * Force the current process to exit with the specified signal, dumping core
2645 * if appropriate. We bypass the normal tests for masked and caught signals,
2646 * allowing unrecoverable failures to terminate the process without changing
2647 * signal state. Mark the accounting record with the signal termination.
2648 * If dumping core, save the signal number for the debugger. Calls exit and
2649 * does not return.
2650 */
2651 /* called with signal lock */
2652 void
2653 sigexit_locked(p, signum)
2654 register struct proc *p;
2655 int signum;
2656 {
2657
2658 sig_lock_to_exit(p);
2659 p->p_acflag |= AXSIG;
2660 if (sigprop[signum] & SA_CORE) {
2661 p->p_sigacts->ps_sig = signum;
2662 signal_unlock(p);
2663 if (coredump(p) == 0)
2664 signum |= WCOREFLAG;
2665 } else
2666 signal_unlock(p);
2667
2668 exit1(p, W_EXITCODE(0, signum), (int *)NULL);
2669 /* NOTREACHED */
2670 }
2671
2672
2673 static int
2674 filt_sigattach(struct knote *kn)
2675 {
2676 struct proc *p = current_proc();
2677 boolean_t funnel_state;
2678
2679 kn->kn_ptr.p_proc = p;
2680 kn->kn_flags |= EV_CLEAR; /* automatically set */
2681
2682 /* Take the funnel to protect the proc while adding to the list */
2683 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2684 KNOTE_ATTACH(&p->p_klist, kn);
2685 thread_funnel_set(kernel_flock, funnel_state);
2686
2687 return (0);
2688 }
2689
2690 static void
2691 filt_sigdetach(struct knote *kn)
2692 {
2693 struct proc *p = kn->kn_ptr.p_proc;
2694 boolean_t funnel_state;
2695
2696 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2697 KNOTE_DETACH(&p->p_klist, kn);
2698 thread_funnel_set(kernel_flock, funnel_state);
2699 }
2700
2701 /*
2702 * signal knotes are shared with proc knotes, so we apply a mask to
2703 * the hint in order to differentiate them from process hints. This
2704 * could be avoided by using a signal-specific knote list, but probably
2705 * isn't worth the trouble.
2706 */
2707 static int
2708 filt_signal(struct knote *kn, long hint)
2709 {
2710
2711 if (hint & NOTE_SIGNAL) {
2712 hint &= ~NOTE_SIGNAL;
2713
2714 if (kn->kn_id == (unsigned int)hint)
2715 kn->kn_data++;
2716 }
2717 return (kn->kn_data != 0);
2718 }
2719
2720
2721 void
2722 bsd_ast(thread_t thr_act)
2723 {
2724 struct proc *p = current_proc();
2725 struct uthread *ut = get_bsdthread_info(thr_act);
2726 int signum;
2727 user_addr_t pc;
2728 boolean_t funnel_state;
2729 static int bsd_init_done = 0;
2730
2731 if (p == NULL)
2732 return;
2733
2734 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2735
2736 if ((p->p_flag & P_OWEUPC) && (p->p_flag & P_PROFIL)) {
2737 pc = get_useraddr();
2738 addupc_task(p, pc, 1);
2739 p->p_flag &= ~P_OWEUPC;
2740 }
2741
2742 if (CHECK_SIGNALS(p, current_thread(), ut)) {
2743 while ( (signum = issignal(p)) )
2744 postsig(signum);
2745 }
2746 if (!bsd_init_done) {
2747 bsd_init_done = 1;
2748 bsdinit_task();
2749 }
2750
2751 (void) thread_funnel_set(kernel_flock, FALSE);
2752 }
2753
2754 /*
2755 * Follwing routines are called using callout from bsd_hardclock
2756 * so that psignals are called in a thread context and are funneled
2757 */
2758 void
2759 psignal_vtalarm(struct proc *p)
2760 {
2761 boolean_t funnel_state;
2762
2763 if (p == NULL)
2764 return;
2765 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2766 psignal_lock(p, SIGVTALRM, 1);
2767 (void) thread_funnel_set(kernel_flock, FALSE);
2768 }
2769
2770 void
2771 psignal_xcpu(struct proc *p)
2772 {
2773 boolean_t funnel_state;
2774
2775 if (p == NULL)
2776 return;
2777 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2778 psignal_lock(p, SIGXCPU, 1);
2779 (void) thread_funnel_set(kernel_flock, FALSE);
2780 }
2781
2782 void
2783 psignal_sigprof(struct proc *p)
2784 {
2785 boolean_t funnel_state;
2786
2787 if (p == NULL)
2788 return;
2789 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2790 psignal_lock(p, SIGPROF, 1);
2791 (void) thread_funnel_set(kernel_flock, FALSE);
2792 }
2793
2794 /* ptrace set runnalbe */
2795 void
2796 pt_setrunnable(struct proc *p)
2797 {
2798 task_t task;
2799
2800 task = p->task;
2801
2802 if (p->p_flag & P_TRACED) {
2803 p->p_stat = SRUN;
2804 if (p->sigwait) {
2805 wakeup((caddr_t)&(p->sigwait));
2806 task_release(task);
2807 }
2808 }
2809 }
2810
2811
2812 kern_return_t
2813 do_bsdexception(
2814 int exc,
2815 int code,
2816 int sub)
2817 {
2818 exception_data_type_t codes[EXCEPTION_CODE_MAX];
2819
2820 codes[0] = code;
2821 codes[1] = sub;
2822 return(bsd_exception(exc, codes, 2));
2823 }
2824
2825 int
2826 proc_pendingsignals(struct proc *p, sigset_t mask)
2827 {
2828 struct uthread * uth;
2829 thread_t th;
2830 sigset_t bits = 0;
2831 int error;
2832
2833 /* If the process is in proc exit return no signal info */
2834 if (p->p_lflag & P_LPEXIT)
2835 return(0);
2836
2837 /* duplicate the signal lock code to enable recursion; as exit
2838 * holds the lock too long. All this code is being reworked
2839 * this is just a workaround for regressions till new code
2840 * arrives.
2841 */
2842 ppend_retry:
2843 error = lockmgr((struct lock__bsd__ *)&p->signal_lock[0], (LK_EXCLUSIVE | LK_CANRECURSE), 0, (struct proc *)0);
2844 if (error == EINTR)
2845 goto ppend_retry;
2846
2847 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
2848 th = p->p_vforkact;
2849 uth = (struct uthread *)get_bsdthread_info(th);
2850 if (uth) {
2851 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
2852 }
2853 goto out;
2854 }
2855
2856 bits = 0;
2857 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
2858 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
2859 }
2860 out:
2861 signal_unlock(p);
2862 return(bits);
2863 }
2864
2865 int
2866 thread_issignal(proc_t p, thread_t th, sigset_t mask)
2867 {
2868 struct uthread * uth;
2869 sigset_t bits=0;
2870
2871
2872 uth = (struct uthread *)get_bsdthread_info(th);
2873 if (uth) {
2874 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
2875 }
2876 return(bits);
2877 }
2878