]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sig.c
2333f91fbf6dee8c6e5742d7c8586a5cca9c714e
[apple/xnu.git] / bsd / kern / kern_sig.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995-1998 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
67 */
68
69 #define SIGPROP /* include signal properties table */
70 #include <sys/param.h>
71 #include <sys/resourcevar.h>
72 #include <sys/proc_internal.h>
73 #include <sys/kauth.h>
74 #include <sys/systm.h>
75 #include <sys/timeb.h>
76 #include <sys/times.h>
77 #include <sys/acct.h>
78 #include <sys/file_internal.h>
79 #include <sys/kernel.h>
80 #include <sys/wait.h>
81 #include <sys/signalvar.h>
82 #if KTRACE
83 #include <sys/ktrace.h>
84 #endif
85 #include <sys/syslog.h>
86 #include <sys/stat.h>
87 #include <sys/lock.h>
88 #include <sys/kdebug.h>
89
90 #include <sys/mount.h>
91 #include <sys/sysproto.h>
92
93 #include <bsm/audit_kernel.h>
94
95 #include <machine/spl.h>
96
97 #include <kern/cpu_number.h>
98
99 #include <sys/vm.h>
100 #include <sys/user.h> /* for coredump */
101 #include <kern/ast.h> /* for APC support */
102 #include <kern/lock.h>
103 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
104 #include <kern/thread.h>
105 #include <kern/sched_prim.h>
106 #include <kern/thread_call.h>
107 #include <mach/exception.h>
108 #include <mach/task.h>
109 #include <mach/thread_act.h>
110
111 /*
112 * Missing prototypes that Mach should export
113 *
114 * +++
115 */
116 extern int thread_enable_fpe(thread_t act, int onoff);
117 extern void unix_syscall_return(int error);
118 extern thread_t port_name_to_thread(mach_port_name_t port_name);
119 extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast);
120 extern kern_return_t get_signalact(task_t , thread_t *, int);
121 extern boolean_t thread_should_abort(thread_t);
122 extern unsigned int get_useraddr(void);
123
124 /*
125 * ---
126 */
127
128 extern void doexception(int exc, int code, int sub);
129
130 void stop(struct proc *p);
131 int cansignal(struct proc *, kauth_cred_t, struct proc *, int);
132 int killpg1(struct proc *, int, int, int);
133 void sigexit_locked(struct proc *, int);
134 int setsigvec(struct proc *, int, struct __user_sigaction *);
135 void exit1(struct proc *, int, int *);
136 void psignal_uthread(thread_t, int);
137 kern_return_t do_bsdexception(int, int, int);
138 void __posix_sem_syscall_return(kern_return_t);
139
140 /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */
141 kern_return_t semaphore_timedwait_signal_trap_internal(void *, void *,time_t, int32_t, void (*)(int));
142 kern_return_t semaphore_timedwait_trap_internal(void *, time_t, int32_t, void (*)(int));
143 kern_return_t semaphore_wait_signal_trap_internal(void *, void *, void (*)(int));
144 kern_return_t semaphore_wait_trap_internal(void *, void (*)(int));
145
146 static int filt_sigattach(struct knote *kn);
147 static void filt_sigdetach(struct knote *kn);
148 static int filt_signal(struct knote *kn, long hint);
149
150 struct filterops sig_filtops =
151 { 0, filt_sigattach, filt_sigdetach, filt_signal };
152
153
154 /*
155 * NOTE: Source and target may *NOT* overlap! (target is smaller)
156 */
157 static void
158 sigaltstack_64to32(struct user_sigaltstack *in, struct sigaltstack *out)
159 {
160 out->ss_sp = CAST_DOWN(void *,in->ss_sp);
161 out->ss_size = in->ss_size;
162 out->ss_flags = in->ss_flags;
163 }
164
165 /*
166 * NOTE: Source and target may are permitted to overlap! (source is smaller);
167 * this works because we copy fields in order from the end of the struct to
168 * the beginning.
169 */
170 static void
171 sigaltstack_32to64(struct sigaltstack *in, struct user_sigaltstack *out)
172 {
173 out->ss_flags = in->ss_flags;
174 out->ss_size = in->ss_size;
175 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
176 }
177
178 static void
179 sigaction_64to32(struct user_sigaction *in, struct sigaction *out)
180 {
181 /* This assumes 32 bit __sa_handler is of type sig_t */
182 out->__sigaction_u.__sa_handler = CAST_DOWN(sig_t,in->__sigaction_u.__sa_handler);
183 out->sa_mask = in->sa_mask;
184 out->sa_flags = in->sa_flags;
185 }
186
187 static void
188 __sigaction_32to64(struct __sigaction *in, struct __user_sigaction *out)
189 {
190 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
191 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
192 out->sa_mask = in->sa_mask;
193 out->sa_flags = in->sa_flags;
194 }
195
196
197 #if SIGNAL_DEBUG
198 void ram_printf(int);
199 int ram_debug=0;
200 unsigned int rdebug_proc=0;
201 void
202 ram_printf(int x)
203 {
204 printf("x is %d",x);
205
206 }
207 #endif /* SIGNAL_DEBUG */
208
209 int
210 signal_lock(struct proc *p)
211 {
212 int error = 0;
213 #if DIAGNOSTIC
214 #if SIGNAL_DEBUG
215 #ifdef __ppc__
216 {
217 int register sp, *fp, numsaved;
218
219 __asm__ volatile("mr %0,r1" : "=r" (sp));
220
221 fp = (int *)*((int *)sp);
222 for (numsaved = 0; numsaved < 3; numsaved++) {
223 p->lockpc[numsaved] = fp[2];
224 if ((int)fp <= 0)
225 break;
226 fp = (int *)*fp;
227 }
228 }
229 #endif /* __ppc__ */
230 #endif /* SIGNAL_DEBUG */
231 #endif /* DIAGNOSTIC */
232
233 siglock_retry:
234 error = lockmgr((struct lock__bsd__ *)&p->signal_lock[0], LK_EXCLUSIVE, 0, (struct proc *)0);
235 if (error == EINTR)
236 goto siglock_retry;
237 return(error);
238 }
239
240 int
241 signal_unlock(struct proc *p)
242 {
243 #if DIAGNOSTIC
244 #if SIGNAL_DEBUG
245 #ifdef __ppc__
246 {
247 int register sp, *fp, numsaved;
248
249 __asm__ volatile("mr %0,r1" : "=r" (sp));
250
251 fp = (int *)*((int *)sp);
252 for (numsaved = 0; numsaved < 3; numsaved++) {
253 p->unlockpc[numsaved] = fp[2];
254 if ((int)fp <= 0)
255 break;
256 fp = (int *)*fp;
257 }
258 }
259 #endif /* __ppc__ */
260 #endif /* SIGNAL_DEBUG */
261 #endif /* DIAGNOSTIC */
262
263 /* TBD: check p last arg */
264 return(lockmgr((struct lock__bsd__ *)&p->signal_lock[0], LK_RELEASE, (simple_lock_t)0, (struct proc *)0));
265 }
266
267 void
268 signal_setast(sig_actthread)
269 thread_t sig_actthread;
270 {
271 act_set_astbsd(sig_actthread);
272 }
273
274 /*
275 * Can process p, with ucred uc, send the signal signum to process q?
276 */
277 int
278 cansignal(p, uc, q, signum)
279 struct proc *p;
280 kauth_cred_t uc;
281 struct proc *q;
282 int signum;
283 {
284 /* you can signal yourself */
285 if (p == q)
286 return(1);
287
288 if (!suser(uc, NULL))
289 return (1); /* root can always signal */
290
291 if (signum == SIGCONT && q->p_session == p->p_session)
292 return (1); /* SIGCONT in session */
293
294 /*
295 * Using kill(), only certain signals can be sent to setugid
296 * child processes
297 */
298 if (q->p_flag & P_SUGID) {
299 switch (signum) {
300 case 0:
301 case SIGKILL:
302 case SIGINT:
303 case SIGTERM:
304 case SIGSTOP:
305 case SIGTTIN:
306 case SIGTTOU:
307 case SIGTSTP:
308 case SIGHUP:
309 case SIGUSR1:
310 case SIGUSR2:
311 if (uc->cr_ruid == q->p_ucred->cr_ruid ||
312 kauth_cred_getuid(uc) == q->p_ucred->cr_ruid ||
313 uc->cr_ruid == kauth_cred_getuid(q->p_ucred) ||
314 kauth_cred_getuid(uc) == kauth_cred_getuid(q->p_ucred))
315 return (1);
316 }
317 return (0);
318 }
319
320 /* XXX
321 * because the P_SUGID test exists, this has extra tests which
322 * could be removed.
323 */
324 if (uc->cr_ruid == q->p_ucred->cr_ruid ||
325 uc->cr_ruid == q->p_ucred->cr_svuid ||
326 kauth_cred_getuid(uc) == q->p_ucred->cr_ruid ||
327 kauth_cred_getuid(uc) == q->p_ucred->cr_svuid ||
328 uc->cr_ruid == kauth_cred_getuid(q->p_ucred) ||
329 kauth_cred_getuid(uc) == kauth_cred_getuid(q->p_ucred))
330 return (1);
331 return (0);
332 }
333
334
335 /* ARGSUSED */
336 int
337 sigaction(struct proc *p, register struct sigaction_args *uap, __unused register_t *retval)
338 {
339 struct user_sigaction vec;
340 struct __user_sigaction __vec;
341
342 struct user_sigaction *sa = &vec;
343 register struct sigacts *ps = p->p_sigacts;
344
345 register int signum;
346 int bit, error=0;
347
348 signum = uap->signum;
349 if (signum <= 0 || signum >= NSIG ||
350 signum == SIGKILL || signum == SIGSTOP)
351 return (EINVAL);
352
353 if (uap->osa) {
354 sa->sa_handler = ps->ps_sigact[signum];
355 sa->sa_mask = ps->ps_catchmask[signum];
356 bit = sigmask(signum);
357 sa->sa_flags = 0;
358 if ((ps->ps_sigonstack & bit) != 0)
359 sa->sa_flags |= SA_ONSTACK;
360 if ((ps->ps_sigintr & bit) == 0)
361 sa->sa_flags |= SA_RESTART;
362 if (ps->ps_siginfo & bit)
363 sa->sa_flags |= SA_SIGINFO;
364 if (ps->ps_signodefer & bit)
365 sa->sa_flags |= SA_NODEFER;
366 if (ps->ps_64regset & bit)
367 sa->sa_flags |= SA_64REGSET;
368 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP))
369 sa->sa_flags |= SA_NOCLDSTOP;
370 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT))
371 sa->sa_flags |= SA_NOCLDWAIT;
372
373 if (IS_64BIT_PROCESS(p)) {
374 error = copyout(sa, uap->osa, sizeof(struct user_sigaction));
375 } else {
376 struct sigaction vec32;
377 sigaction_64to32(sa, &vec32);
378 error = copyout(&vec32, uap->osa, sizeof(struct sigaction));
379 }
380 if (error)
381 return (error);
382 }
383 if (uap->nsa) {
384 if (IS_64BIT_PROCESS(p)) {
385 error = copyin(uap->nsa, &__vec, sizeof(struct __user_sigaction));
386 } else {
387 struct __sigaction __vec32;
388 error = copyin(uap->nsa, &__vec32, sizeof(struct __sigaction));
389 __sigaction_32to64(&__vec32, &__vec);
390 }
391 if (error)
392 return (error);
393 error = setsigvec(p, signum, &__vec);
394 }
395 return (error);
396 }
397
398 /* Routines to manipulate bits on all threads */
399 int
400 clear_procsiglist(struct proc *p, int bit)
401 {
402 struct uthread * uth;
403 thread_t thact;
404
405 signal_lock(p);
406
407 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
408 thact = p->p_vforkact;
409 uth = (struct uthread *)get_bsdthread_info(thact);
410 if (uth) {
411 uth->uu_siglist &= ~bit;
412 }
413 p->p_siglist &= ~bit;
414 signal_unlock(p);
415 return(0);
416 }
417
418 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
419 uth->uu_siglist &= ~bit;
420 }
421 p->p_siglist &= ~bit;
422 signal_unlock(p);
423 return(0);
424 }
425
426
427 static int
428 unblock_procsigmask(struct proc *p, int bit)
429 {
430 struct uthread * uth;
431 thread_t thact;
432
433 signal_lock(p);
434 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
435 thact = p->p_vforkact;
436 uth = (struct uthread *)get_bsdthread_info(thact);
437 if (uth) {
438 uth->uu_sigmask &= ~bit;
439 }
440 p->p_sigmask &= ~bit;
441 signal_unlock(p);
442 return(0);
443 }
444 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
445 uth->uu_sigmask &= ~bit;
446 }
447 p->p_sigmask &= ~bit;
448 signal_unlock(p);
449 return(0);
450 }
451
452
453 static int
454 block_procsigmask(struct proc *p, int bit)
455 {
456 struct uthread * uth;
457 thread_t thact;
458
459 signal_lock(p);
460 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
461 thact = p->p_vforkact;
462 uth = (struct uthread *)get_bsdthread_info(thact);
463 if (uth) {
464 uth->uu_sigmask |= bit;
465 }
466 p->p_sigmask |= bit;
467 signal_unlock(p);
468 return(0);
469 }
470 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
471 uth->uu_sigmask |= bit;
472 }
473 p->p_sigmask |= bit;
474 signal_unlock(p);
475 return(0);
476 }
477
478 int
479 set_procsigmask(struct proc *p, int bit)
480 {
481 struct uthread * uth;
482 thread_t thact;
483
484 signal_lock(p);
485 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
486 thact = p->p_vforkact;
487 uth = (struct uthread *)get_bsdthread_info(thact);
488 if (uth) {
489 uth->uu_sigmask = bit;
490 }
491 p->p_sigmask = bit;
492 signal_unlock(p);
493 return(0);
494 }
495 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
496 uth->uu_sigmask = bit;
497 }
498 p->p_sigmask = bit;
499 signal_unlock(p);
500 return(0);
501 }
502
503 /* XXX should be static? */
504 int
505 setsigvec(struct proc *p, int signum, struct __user_sigaction *sa)
506 {
507 register struct sigacts *ps = p->p_sigacts;
508 register int bit;
509
510 if ((signum == SIGKILL || signum == SIGSTOP) &&
511 sa->sa_handler != SIG_DFL)
512 return(EINVAL);
513 bit = sigmask(signum);
514 /*
515 * Change setting atomically.
516 */
517 ps->ps_sigact[signum] = sa->sa_handler;
518 ps->ps_trampact[signum] = sa->sa_tramp;
519 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
520 if (sa->sa_flags & SA_SIGINFO)
521 ps->ps_siginfo |= bit;
522 else
523 ps->ps_siginfo &= ~bit;
524 if (sa->sa_flags & SA_64REGSET)
525 ps->ps_64regset |= bit;
526 else
527 ps->ps_64regset &= ~bit;
528 if ((sa->sa_flags & SA_RESTART) == 0)
529 ps->ps_sigintr |= bit;
530 else
531 ps->ps_sigintr &= ~bit;
532 if (sa->sa_flags & SA_ONSTACK)
533 ps->ps_sigonstack |= bit;
534 else
535 ps->ps_sigonstack &= ~bit;
536 if (sa->sa_flags & SA_USERTRAMP)
537 ps->ps_usertramp |= bit;
538 else
539 ps->ps_usertramp &= ~bit;
540 if (sa->sa_flags & SA_RESETHAND)
541 ps->ps_sigreset |= bit;
542 else
543 ps->ps_sigreset &= ~bit;
544 if (sa->sa_flags & SA_NODEFER)
545 ps->ps_signodefer |= bit;
546 else
547 ps->ps_signodefer &= ~bit;
548 if (signum == SIGCHLD) {
549 if (sa->sa_flags & SA_NOCLDSTOP)
550 p->p_flag |= P_NOCLDSTOP;
551 else
552 p->p_flag &= ~P_NOCLDSTOP;
553 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN))
554 p->p_flag |= P_NOCLDWAIT;
555 else
556 p->p_flag &= ~P_NOCLDWAIT;
557 }
558
559 #ifdef __ppc__
560 if (signum == SIGFPE) {
561 if (sa->sa_handler == SIG_DFL || sa->sa_handler == SIG_IGN)
562 thread_enable_fpe(current_thread(), 0);
563 else
564 thread_enable_fpe(current_thread(), 1);
565 }
566 #endif /* __ppc__ */
567 /*
568 * Set bit in p_sigignore for signals that are set to SIG_IGN,
569 * and for signals set to SIG_DFL where the default is to ignore.
570 * However, don't put SIGCONT in p_sigignore,
571 * as we have to restart the process.
572 */
573 if (sa->sa_handler == SIG_IGN ||
574 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
575
576 clear_procsiglist(p, bit);
577 if (signum != SIGCONT)
578 p->p_sigignore |= bit; /* easier in psignal */
579 p->p_sigcatch &= ~bit;
580 } else {
581 p->p_sigignore &= ~bit;
582 if (sa->sa_handler == SIG_DFL)
583 p->p_sigcatch &= ~bit;
584 else
585 p->p_sigcatch |= bit;
586 }
587 return(0);
588 }
589
590 /*
591 * Initialize signal state for process 0;
592 * set to ignore signals that are ignored by default.
593 */
594 void
595 siginit(p)
596 struct proc *p;
597 {
598 register int i;
599
600 for (i = 0; i < NSIG; i++)
601 if (sigprop[i] & SA_IGNORE && i != SIGCONT)
602 p->p_sigignore |= sigmask(i);
603 }
604
605 /*
606 * Reset signals for an exec of the specified process.
607 */
608 void
609 execsigs(p, thr_act)
610 register struct proc *p;
611 register thread_t thr_act;
612 {
613 register struct sigacts *ps = p->p_sigacts;
614 register int nc, mask;
615 struct uthread *ut = (struct uthread *)0;
616
617 if (thr_act){
618 ut = (struct uthread *)get_bsdthread_info(thr_act);
619 }
620 /*
621 * Reset caught signals. Held signals remain held
622 * through p_sigmask (unless they were caught,
623 * and are now ignored by default).
624 */
625 while (p->p_sigcatch) {
626 nc = ffs((long)p->p_sigcatch);
627 mask = sigmask(nc);
628 p->p_sigcatch &= ~mask;
629 if (sigprop[nc] & SA_IGNORE) {
630 if (nc != SIGCONT)
631 p->p_sigignore |= mask;
632 if (thr_act){
633 ut->uu_siglist &= ~mask;
634 p->p_siglist &= ~mask;
635 } else
636 clear_procsiglist(p, mask);
637 }
638 ps->ps_sigact[nc] = SIG_DFL;
639 }
640 /*
641 * Reset stack state to the user stack.
642 * Clear set of signals caught on the signal stack.
643 */
644 ps->ps_sigstk.ss_flags = SA_DISABLE;
645 ps->ps_sigstk.ss_size = 0;
646 ps->ps_sigstk.ss_sp = USER_ADDR_NULL;
647 ps->ps_flags = 0;
648 if (thr_act) {
649 ut->uu_sigstk.ss_flags = SA_DISABLE;
650 ut->uu_sigstk.ss_size = 0;
651 ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
652 ut->uu_flag &= ~UT_ALTSTACK;
653 }
654 ps->ps_sigonstack = 0;
655 }
656
657 /*
658 * Manipulate signal mask.
659 * Note that we receive new mask, not pointer,
660 * and return old mask as return value;
661 * the library stub does the rest.
662 */
663 int
664 sigprocmask(register struct proc *p, struct sigprocmask_args *uap, __unused register_t *retval)
665 {
666 int error = 0;
667 sigset_t oldmask, nmask;
668 user_addr_t omask = uap->omask;
669 struct uthread *ut;
670
671 ut = (struct uthread *)get_bsdthread_info(current_thread());
672 oldmask = ut->uu_sigmask;
673
674 if (uap->mask == USER_ADDR_NULL) {
675 /* just want old mask */
676 goto out;
677 }
678 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
679 if (error)
680 goto out;
681
682 switch (uap->how) {
683 case SIG_BLOCK:
684 block_procsigmask(p, (nmask & ~sigcantmask));
685 signal_setast(current_thread());
686 break;
687
688 case SIG_UNBLOCK:
689 unblock_procsigmask(p, (nmask & ~sigcantmask));
690 signal_setast(current_thread());
691 break;
692
693 case SIG_SETMASK:
694 set_procsigmask(p, (nmask & ~sigcantmask));
695 signal_setast(current_thread());
696 break;
697
698 default:
699 error = EINVAL;
700 break;
701 }
702 out:
703 if (!error && omask != USER_ADDR_NULL)
704 copyout(&oldmask, omask, sizeof(sigset_t));
705 return (error);
706 }
707
708 int
709 sigpending(__unused struct proc *p, register struct sigpending_args *uap, __unused register_t *retval)
710 {
711 struct uthread *ut;
712 sigset_t pendlist;
713
714 ut = (struct uthread *)get_bsdthread_info(current_thread());
715 pendlist = ut->uu_siglist;
716
717 if (uap->osv)
718 copyout(&pendlist, uap->osv, sizeof(sigset_t));
719 return(0);
720 }
721
722
723 /*
724 * Suspend process until signal, providing mask to be set
725 * in the meantime. Note nonstandard calling convention:
726 * libc stub passes mask, not pointer, to save a copyin.
727 */
728
729 static int
730 sigcontinue(__unused int error)
731 {
732 // struct uthread *ut = get_bsdthread_info(current_thread());
733 unix_syscall_return(EINTR);
734 }
735
736 int
737 sigsuspend(register struct proc *p, struct sigsuspend_args *uap, __unused register_t *retval)
738 {
739 struct uthread *ut;
740
741 ut = (struct uthread *)get_bsdthread_info(current_thread());
742
743 /*
744 * When returning from sigpause, we want
745 * the old mask to be restored after the
746 * signal handler has finished. Thus, we
747 * save it here and mark the sigacts structure
748 * to indicate this.
749 */
750 ut->uu_oldmask = ut->uu_sigmask;
751 ut->uu_flag |= UT_SAS_OLDMASK;
752 ut->uu_sigmask = (uap->mask & ~sigcantmask);
753 (void) tsleep0((caddr_t) p, PPAUSE|PCATCH, "pause", 0, sigcontinue);
754 /* always return EINTR rather than ERESTART... */
755 return (EINTR);
756 }
757
758
759 int
760 __disable_threadsignal(struct proc *p,
761 __unused register struct __disable_threadsignal_args *uap,
762 __unused register_t *retval)
763 {
764 struct uthread *uth;
765
766 uth = (struct uthread *)get_bsdthread_info(current_thread());
767
768 /* No longer valid to have any signal delivered */
769 signal_lock(p);
770 uth->uu_flag |= UT_NO_SIGMASK;
771 signal_unlock(p);
772
773 return(0);
774
775 }
776
777
778 int
779 __pthread_markcancel(p, uap, retval)
780 struct proc *p;
781 register struct __pthread_markcancel_args *uap;
782 register_t *retval;
783 {
784 thread_act_t target_act;
785 int error = 0;
786 struct uthread *uth;
787
788 target_act = (thread_act_t)port_name_to_thread(uap->thread_port);
789
790 if (target_act == THR_ACT_NULL)
791 return (ESRCH);
792
793 uth = (struct uthread *)get_bsdthread_info(target_act);
794
795 /* if the thread is in vfork do not cancel */
796 if ((uth->uu_flag & (P_VFORK | UT_CANCEL | UT_CANCELED )) == 0) {
797 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
798 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
799 && ((uth->uu_flag & UT_CANCELDISABLE) == 0))
800 thread_abort_safely(target_act);
801 }
802
803 thread_deallocate(target_act);
804 return (error);
805 }
806
807 /* if action =0 ; return the cancellation state ,
808 * if marked for cancellation, make the thread canceled
809 * if action = 1 ; Enable the cancel handling
810 * if action = 2; Disable the cancel handling
811 */
812 int
813 __pthread_canceled(p, uap, retval)
814 struct proc *p;
815 register struct __pthread_canceled_args *uap;
816 register_t *retval;
817 {
818 thread_act_t thr_act;
819 struct uthread *uth;
820 int action = uap->action;
821
822 thr_act = current_act();
823 uth = (struct uthread *)get_bsdthread_info(thr_act);
824
825 switch (action) {
826 case 1:
827 uth->uu_flag &= ~UT_CANCELDISABLE;
828 return(0);
829 case 2:
830 uth->uu_flag |= UT_CANCELDISABLE;
831 return(0);
832 case 0:
833 default:
834 /* if the thread is in vfork do not cancel */
835 if((uth->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
836 uth->uu_flag &= ~UT_CANCEL;
837 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
838 return(0);
839 }
840 return(EINVAL);
841 }
842 return(EINVAL);
843 }
844
845 void
846 __posix_sem_syscall_return(kern_return_t kern_result)
847 {
848 int error = 0;
849
850 if (kern_result == KERN_SUCCESS)
851 error = 0;
852 else if (kern_result == KERN_ABORTED)
853 error = EINTR;
854 else if (kern_result == KERN_OPERATION_TIMED_OUT)
855 error = ETIMEDOUT;
856 else
857 error = EINVAL;
858 unix_syscall_return(error);
859 /* does not return */
860 }
861
862
863 int
864 __semwait_signal(p, uap, retval)
865 struct proc *p;
866 register struct __semwait_signal_args *uap;
867 register_t *retval;
868 {
869
870 kern_return_t kern_result;
871 mach_timespec_t then;
872 struct timespec now;
873
874 if(uap->timeout) {
875
876 if (uap->relative) {
877 then.tv_sec = uap->tv_sec;
878 then.tv_nsec = uap->tv_nsec;
879 } else {
880 nanotime(&now);
881 then.tv_sec = uap->tv_sec - now.tv_sec;
882 then.tv_nsec = uap->tv_nsec - now.tv_nsec;
883 if (then.tv_nsec < 0) {
884 then.tv_nsec += NSEC_PER_SEC;
885 then.tv_sec--;
886 }
887 }
888
889 if (uap->mutex_sem == (void *)NULL)
890 kern_result = semaphore_timedwait_trap_internal(uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
891 else
892 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
893
894 } else {
895
896 if (uap->mutex_sem == (void *)NULL)
897 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
898 else
899
900 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
901 }
902
903 out:
904 if (kern_result == KERN_SUCCESS)
905 return(0);
906 else if (kern_result == KERN_ABORTED)
907 return(EINTR);
908 else if (kern_result == KERN_OPERATION_TIMED_OUT)
909 return(ETIMEDOUT);
910 else
911 return(EINVAL);
912 }
913
914
915 int
916 __pthread_kill(__unused struct proc *p,
917 register struct __pthread_kill_args *uap,
918 __unused register_t *retval)
919 {
920 thread_t target_act;
921 int error = 0;
922 int signum = uap->sig;
923 struct uthread *uth;
924
925 target_act = (thread_t)port_name_to_thread(uap->thread_port);
926
927 if (target_act == THREAD_NULL)
928 return (ESRCH);
929 if ((u_int)signum >= NSIG) {
930 error = EINVAL;
931 goto out;
932 }
933
934 uth = (struct uthread *)get_bsdthread_info(target_act);
935
936 if (uth->uu_flag & UT_NO_SIGMASK) {
937 error = ESRCH;
938 goto out;
939 }
940
941 if (signum)
942 psignal_uthread(target_act, signum);
943 out:
944 thread_deallocate(target_act);
945 return (error);
946 }
947
948
949 int
950 pthread_sigmask(__unused register struct proc *p,
951 register struct pthread_sigmask_args *uap,
952 __unused register_t *retval)
953 {
954 user_addr_t set = uap->set;
955 user_addr_t oset = uap->oset;
956 sigset_t nset;
957 int error = 0;
958 struct uthread *ut;
959 sigset_t oldset;
960
961 ut = (struct uthread *)get_bsdthread_info(current_thread());
962 oldset = ut->uu_sigmask;
963
964 if (set == USER_ADDR_NULL) {
965 /* need only old mask */
966 goto out;
967 }
968
969 error = copyin(set, &nset, sizeof(sigset_t));
970 if (error)
971 goto out;
972
973 switch (uap->how) {
974 case SIG_BLOCK:
975 ut->uu_sigmask |= (nset & ~sigcantmask);
976 break;
977
978 case SIG_UNBLOCK:
979 ut->uu_sigmask &= ~(nset);
980 signal_setast(current_thread());
981 break;
982
983 case SIG_SETMASK:
984 ut->uu_sigmask = (nset & ~sigcantmask);
985 signal_setast(current_thread());
986 break;
987
988 default:
989 error = EINVAL;
990
991 }
992 out:
993 if (!error && oset != USER_ADDR_NULL)
994 copyout(&oldset, oset, sizeof(sigset_t));
995
996 return(error);
997 }
998
999
1000 int
1001 sigwait(register struct proc *p, register struct sigwait_args *uap, __unused register_t *retval)
1002 {
1003 struct uthread *ut;
1004 struct uthread *uth;
1005 int error = 0;
1006 sigset_t mask;
1007 sigset_t siglist;
1008 sigset_t sigw=0;
1009 int signum;
1010
1011 ut = (struct uthread *)get_bsdthread_info(current_thread());
1012
1013 if (uap->set == USER_ADDR_NULL)
1014 return(EINVAL);
1015
1016 error = copyin(uap->set, &mask, sizeof(sigset_t));
1017 if (error)
1018 return(error);
1019
1020 siglist = (mask & ~sigcantmask);
1021
1022 if (siglist == 0)
1023 return(EINVAL);
1024
1025 signal_lock(p);
1026 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
1027 signal_unlock(p);
1028 return(EINVAL);
1029 } else {
1030 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1031 if ( (sigw = uth->uu_siglist & siglist) ) {
1032 break;
1033 }
1034 }
1035 }
1036 signal_unlock(p);
1037 if (sigw) {
1038 /* The signal was pending on a thread */
1039 goto sigwait1;
1040 }
1041 /*
1042 * When returning from sigwait, we want
1043 * the old mask to be restored after the
1044 * signal handler has finished. Thus, we
1045 * save it here and mark the sigacts structure
1046 * to indicate this.
1047 */
1048 ut->uu_oldmask = ut->uu_sigmask;
1049 ut->uu_flag |= UT_SAS_OLDMASK;
1050 if (siglist == (sigset_t)0)
1051 return(EINVAL);
1052 /* SIGKILL and SIGSTOP are not maskable as well */
1053 ut->uu_sigmask = ~(siglist|sigcantmask);
1054 ut->uu_sigwait = siglist;
1055 /* No Continuations for now */
1056 error = tsleep((caddr_t)&ut->uu_sigwait, PPAUSE|PCATCH, "pause", 0);
1057
1058 if ((error == EINTR) || (error == ERESTART))
1059 error = 0;
1060
1061 sigw = (ut->uu_sigwait & siglist);
1062 ut->uu_sigmask = ut->uu_oldmask;
1063 ut->uu_oldmask = 0;
1064 ut->uu_flag &= ~UT_SAS_OLDMASK;
1065 sigwait1:
1066 ut->uu_sigwait = 0;
1067 if (!error) {
1068 signum = ffs((unsigned int)sigw);
1069 if (!signum)
1070 panic("sigwait with no signal wakeup");
1071 ut->uu_siglist &= ~(sigmask(signum));
1072 if (uap->sig != USER_ADDR_NULL)
1073 error = copyout(&signum, uap->sig, sizeof(int));
1074 }
1075
1076 return(error);
1077
1078 }
1079
1080
1081 int
1082 sigaltstack(struct proc *p, register struct sigaltstack_args *uap, __unused register_t *retval)
1083 {
1084 struct sigacts *psp;
1085 struct user_sigaltstack *pstk;
1086 struct user_sigaltstack ss;
1087 struct uthread *uth;
1088 int uthsigaltstack = 0;
1089 int error;
1090
1091 uth = (struct uthread *)get_bsdthread_info(current_thread());
1092 uthsigaltstack = p->p_lflag & P_LTHSIGSTACK;
1093
1094 psp = p->p_sigacts;
1095 if (uthsigaltstack != 0) {
1096 pstk = &uth->uu_sigstk;
1097 if ((uth->uu_flag & UT_ALTSTACK) == 0)
1098 uth->uu_sigstk.ss_flags |= SA_DISABLE;
1099 } else {
1100 pstk = &psp->ps_sigstk;
1101 if ((psp->ps_flags & SAS_ALTSTACK) == 0)
1102 psp->ps_sigstk.ss_flags |= SA_DISABLE;
1103 }
1104 if (uap->oss) {
1105 if (IS_64BIT_PROCESS(p)) {
1106 error = copyout(pstk, uap->oss, sizeof(struct user_sigaltstack));
1107 } else {
1108 struct sigaltstack ss32;
1109 sigaltstack_64to32(pstk, &ss32);
1110 error = copyout(&ss32, uap->oss, sizeof(struct sigaltstack));
1111 }
1112 if (error)
1113 return (error);
1114 }
1115 if (uap->nss == USER_ADDR_NULL)
1116 return (0);
1117 if (IS_64BIT_PROCESS(p)) {
1118 error = copyin(uap->nss, &ss, sizeof(struct user_sigaltstack));
1119 } else {
1120 struct sigaltstack ss32;
1121 error = copyin(uap->nss, &ss32, sizeof(struct sigaltstack));
1122 sigaltstack_32to64(&ss32,&ss);
1123 }
1124 if (error)
1125 return (error);
1126 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1127 return(EINVAL);
1128 }
1129
1130 if (ss.ss_flags & SA_DISABLE) {
1131 if (uthsigaltstack != 0) {
1132 /* if we are here we are not in the signal handler ;so no need to check */
1133 if (uth->uu_sigstk.ss_flags & SA_ONSTACK)
1134 return (EINVAL);
1135 uth->uu_flag &= ~UT_ALTSTACK;
1136 uth->uu_sigstk.ss_flags = ss.ss_flags;
1137 } else {
1138 if (psp->ps_sigstk.ss_flags & SA_ONSTACK)
1139 return (EINVAL);
1140 psp->ps_flags &= ~SAS_ALTSTACK;
1141 psp->ps_sigstk.ss_flags = ss.ss_flags;
1142 }
1143
1144 return (0);
1145 }
1146 /* The older stacksize was 8K, enforce that one so no compat problems */
1147 #define OLDMINSIGSTKSZ 8*1024
1148 if (ss.ss_size < OLDMINSIGSTKSZ)
1149 return (ENOMEM);
1150 if (uthsigaltstack != 0) {
1151 uth->uu_flag |= UT_ALTSTACK;
1152 uth->uu_sigstk= ss;
1153 } else {
1154 psp->ps_flags |= SAS_ALTSTACK;
1155 psp->ps_sigstk= ss;
1156 }
1157 return (0);
1158 }
1159
1160 int
1161 kill(struct proc *cp, struct kill_args *uap, __unused register_t *retval)
1162 {
1163 register struct proc *p;
1164 kauth_cred_t uc = kauth_cred_get();
1165
1166 AUDIT_ARG(pid, uap->pid);
1167 AUDIT_ARG(signum, uap->signum);
1168
1169 if ((u_int)uap->signum >= NSIG)
1170 return (EINVAL);
1171 if (uap->pid > 0) {
1172 /* kill single process */
1173 if ((p = proc_findref(uap->pid)) == NULL) {
1174 if ((p = pzfind(uap->pid)) != NULL) {
1175 /*
1176 * IEEE Std 1003.1-2001: return success
1177 * when killing a zombie.
1178 */
1179 return (0);
1180 }
1181 return (ESRCH);
1182 }
1183 AUDIT_ARG(process, p);
1184 if (!cansignal(cp, uc, p, uap->signum)) {
1185 proc_dropref(p);
1186 return(EPERM);
1187 }
1188 if (uap->signum)
1189 psignal(p, uap->signum);
1190 proc_dropref(p);
1191 return (0);
1192 }
1193 switch (uap->pid) {
1194 case -1: /* broadcast signal */
1195 return (killpg1(cp, uap->signum, 0, 1));
1196 case 0: /* signal own process group */
1197 return (killpg1(cp, uap->signum, 0, 0));
1198 default: /* negative explicit process group */
1199 return (killpg1(cp, uap->signum, -(uap->pid), 0));
1200 }
1201 /* NOTREACHED */
1202 }
1203
1204
1205 /*
1206 * Common code for kill process group/broadcast kill.
1207 * cp is calling process.
1208 */
1209 int
1210 killpg1(cp, signum, pgid, all)
1211 register struct proc *cp;
1212 int signum, pgid, all;
1213 {
1214 register struct proc *p;
1215 kauth_cred_t uc = cp->p_ucred;
1216 struct pgrp *pgrp;
1217 int nfound = 0;
1218
1219 if (all) {
1220 /*
1221 * broadcast
1222 */
1223 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1224 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1225 p == cp || !cansignal(cp, uc, p, signum))
1226 continue;
1227 nfound++;
1228 if (signum)
1229 psignal(p, signum);
1230 }
1231 } else {
1232 if (pgid == 0)
1233 /*
1234 * zero pgid means send to my process group.
1235 */
1236 pgrp = cp->p_pgrp;
1237 else {
1238 pgrp = pgfind(pgid);
1239 if (pgrp == NULL)
1240 return (ESRCH);
1241 }
1242 for (p = pgrp->pg_members.lh_first; p != 0;
1243 p = p->p_pglist.le_next) {
1244 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1245 p->p_stat == SZOMB ||
1246 !cansignal(cp, uc, p, signum))
1247 continue;
1248 nfound++;
1249 if (signum)
1250 psignal(p, signum);
1251 }
1252 }
1253 return (nfound ? 0 : ESRCH);
1254 }
1255
1256 /*
1257 * Send a signal to a process group.
1258 */
1259 void
1260 gsignal(pgid, signum)
1261 int pgid, signum;
1262 {
1263 struct pgrp *pgrp;
1264
1265 if (pgid && (pgrp = pgfind(pgid)))
1266 pgsignal(pgrp, signum, 0);
1267 }
1268
1269 /*
1270 * Send a signal to a process group. If checktty is 1,
1271 * limit to members which have a controlling terminal.
1272 */
1273 void
1274 pgsignal(pgrp, signum, checkctty)
1275 struct pgrp *pgrp;
1276 int signum, checkctty;
1277 {
1278 register struct proc *p;
1279
1280 if (pgrp)
1281 for (p = pgrp->pg_members.lh_first; p != 0;
1282 p = p->p_pglist.le_next)
1283 if (checkctty == 0 || p->p_flag & P_CONTROLT)
1284 psignal(p, signum);
1285 }
1286
1287 /*
1288 * Send signal to a backgrounded process blocked due to tty access
1289 * In FreeBSD, the backgrounded process wakes up every second and
1290 * discovers whether it is foregounded or not. In our case, we block
1291 * the thread in tsleep as we want to avoid storm of processes as well
1292 * as the suspend is only at AST level
1293 */
1294 void
1295 tty_pgsignal(pgrp, signum)
1296 struct pgrp *pgrp;
1297 int signum;
1298 {
1299 register struct proc *p;
1300
1301 if (pgrp)
1302 for (p = pgrp->pg_members.lh_first; p != 0;
1303 p = p->p_pglist.le_next)
1304 if ((p->p_flag & P_TTYSLEEP) && (p->p_flag & P_CONTROLT))
1305 psignal(p, signum);
1306 }
1307
1308 /*
1309 * Send a signal caused by a trap to a specific thread.
1310 */
1311 void
1312 threadsignal(thread_t sig_actthread, int signum, u_long code)
1313 {
1314 register struct uthread *uth;
1315 register struct task * sig_task;
1316 register struct proc *p ;
1317 int mask;
1318
1319 if ((u_int)signum >= NSIG || signum == 0)
1320 return;
1321
1322 mask = sigmask(signum);
1323 if ((mask & threadmask) == 0)
1324 return;
1325 sig_task = get_threadtask(sig_actthread);
1326 p = (struct proc *)(get_bsdtask_info(sig_task));
1327
1328 uth = get_bsdthread_info(sig_actthread);
1329 if (uth && (uth->uu_flag & UT_VFORK))
1330 p = uth->uu_proc;
1331
1332 if (!(p->p_flag & P_TRACED) && (p->p_sigignore & mask))
1333 return;
1334
1335 uth->uu_siglist |= mask;
1336 p->p_siglist |= mask; /* just for lame ones looking here */
1337 uth->uu_code = code;
1338 /* mark on process as well */
1339 signal_setast(sig_actthread);
1340 }
1341
1342
1343 void
1344 psignal(p, signum)
1345 register struct proc *p;
1346 register int signum;
1347 {
1348 psignal_lock(p, signum, 1);
1349 }
1350
1351 void
1352 psignal_vfork(struct proc *p, task_t new_task, thread_t thr_act, int signum)
1353 {
1354 register int prop;
1355 register sig_t action;
1356 int mask;
1357 struct uthread *uth;
1358
1359 if ((u_int)signum >= NSIG || signum == 0)
1360 panic("psignal signal number");
1361 mask = sigmask(signum);
1362 prop = sigprop[signum];
1363
1364 #if SIGNAL_DEBUG
1365 if(rdebug_proc && (p == rdebug_proc)) {
1366 ram_printf(3);
1367 }
1368 #endif /* SIGNAL_DEBUG */
1369
1370 if ((new_task == TASK_NULL) || (thr_act == (thread_t)NULL) || is_kerneltask(new_task))
1371 return;
1372
1373
1374 uth = get_bsdthread_info(thr_act);
1375 signal_lock(p);
1376
1377 /*
1378 * proc is traced, always give parent a chance.
1379 */
1380 action = SIG_DFL;
1381
1382 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1383 (p->p_flag & P_TRACED) == 0)
1384 p->p_nice = NZERO;
1385
1386 if (prop & SA_CONT) {
1387 p->p_siglist &= ~stopsigmask;
1388 uth->uu_siglist &= ~stopsigmask;
1389 }
1390
1391 if (prop & SA_STOP) {
1392 /*
1393 * If sending a tty stop signal to a member of an orphaned
1394 * process group, discard the signal here if the action
1395 * is default; don't stop the process below if sleeping,
1396 * and don't clear any pending SIGCONT.
1397 */
1398 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
1399 action == SIG_DFL)
1400 goto psigout;
1401 uth->uu_siglist &= ~contsigmask;
1402 p->p_siglist &= ~contsigmask;
1403 }
1404 uth->uu_siglist |= mask;
1405 p->p_siglist |= mask; /* just for lame ones looking here */
1406
1407 /* Deliver signal to the activation passed in */
1408 act_set_astbsd(thr_act);
1409
1410 /*
1411 * SIGKILL priority twiddling moved here from above because
1412 * it needs sig_thread. Could merge it into large switch
1413 * below if we didn't care about priority for tracing
1414 * as SIGKILL's action is always SIG_DFL.
1415 */
1416 if ((signum == SIGKILL) && (p->p_nice > NZERO)) {
1417 p->p_nice = NZERO;
1418 }
1419
1420 /*
1421 * This Process is traced - wake it up (if not already
1422 * stopped) so that it can discover the signal in
1423 * issig() and stop for the parent.
1424 */
1425 if (p->p_flag & P_TRACED) {
1426 if (p->p_stat != SSTOP)
1427 goto run;
1428 else
1429 goto psigout;
1430 }
1431 run:
1432 /*
1433 * If we're being traced (possibly because someone attached us
1434 * while we were stopped), check for a signal from the debugger.
1435 */
1436 if (p->p_stat == SSTOP) {
1437 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
1438 uth->uu_siglist |= sigmask(p->p_xstat);
1439 p->p_siglist |= mask; /* just for lame ones looking here */
1440 }
1441 }
1442
1443 /*
1444 * setrunnable(p) in BSD
1445 */
1446 p->p_stat = SRUN;
1447
1448 psigout:
1449 signal_unlock(p);
1450 }
1451
1452 static thread_t
1453 get_signalthread(struct proc *p, int signum)
1454 {
1455 struct uthread *uth;
1456 thread_t thr_act;
1457 sigset_t mask = sigmask(signum);
1458 thread_t sig_thread_act;
1459 struct task * sig_task = p->task;
1460 kern_return_t kret;
1461
1462 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
1463 sig_thread_act = p->p_vforkact;
1464 kret = check_actforsig(sig_task, sig_thread_act, 1);
1465 if (kret == KERN_SUCCESS)
1466 return(sig_thread_act);
1467 else
1468 return(THREAD_NULL);
1469 }
1470
1471 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1472 if(((uth->uu_flag & UT_NO_SIGMASK)== 0) &&
1473 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
1474 if (check_actforsig(p->task, uth->uu_act, 1) == KERN_SUCCESS)
1475 return(uth->uu_act);
1476 }
1477 }
1478 if (get_signalact(p->task, &thr_act, 1) == KERN_SUCCESS) {
1479 return(thr_act);
1480 }
1481
1482 return(THREAD_NULL);
1483 }
1484
1485 /*
1486 * Send the signal to the process. If the signal has an action, the action
1487 * is usually performed by the target process rather than the caller; we add
1488 * the signal to the set of pending signals for the process.
1489 *
1490 * Exceptions:
1491 * o When a stop signal is sent to a sleeping process that takes the
1492 * default action, the process is stopped without awakening it.
1493 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1494 * regardless of the signal action (eg, blocked or ignored).
1495 *
1496 * Other ignored signals are discarded immediately.
1497 */
1498 void
1499 psignal_lock(p, signum, withlock)
1500 register struct proc *p;
1501 register int signum;
1502 register int withlock;
1503 {
1504 register int prop;
1505 register sig_t action;
1506 thread_t sig_thread_act;
1507 register task_t sig_task;
1508 int mask;
1509 struct uthread *uth;
1510 boolean_t funnel_state = FALSE;
1511 int sw_funnel = 0;
1512
1513 if ((u_int)signum >= NSIG || signum == 0)
1514 panic("psignal signal number");
1515 mask = sigmask(signum);
1516 prop = sigprop[signum];
1517
1518 #if SIGNAL_DEBUG
1519 if(rdebug_proc && (p == rdebug_proc)) {
1520 ram_printf(3);
1521 }
1522 #endif /* SIGNAL_DEBUG */
1523
1524 if (thread_funnel_get() == (funnel_t *)0) {
1525 sw_funnel = 1;
1526 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1527 }
1528 /*
1529 * We will need the task pointer later. Grab it now to
1530 * check for a zombie process. Also don't send signals
1531 * to kernel internal tasks.
1532 */
1533 if (((sig_task = p->task) == TASK_NULL) || is_kerneltask(sig_task)) {
1534 if (sw_funnel)
1535 thread_funnel_set(kernel_flock, funnel_state);
1536 return;
1537 }
1538
1539 KNOTE(&p->p_klist, NOTE_SIGNAL | signum);
1540
1541 /*
1542 * do not send signals to the process that has the thread
1543 * doing a reboot(). Not doing so will mark that thread aborted
1544 * and can cause IO failures wich will cause data loss.
1545 */
1546 if (ISSET(p->p_flag, P_REBOOT)) {
1547 if (sw_funnel)
1548 thread_funnel_set(kernel_flock, funnel_state);
1549 return;
1550 }
1551
1552 if (withlock)
1553 signal_lock(p);
1554
1555 /*
1556 * Deliver the signal to the first thread in the task. This
1557 * allows single threaded applications which use signals to
1558 * be able to be linked with multithreaded libraries. We have
1559 * an implicit reference to the current thread, but need
1560 * an explicit one otherwise. The thread reference keeps
1561 * the corresponding task data structures around too. This
1562 * reference is released by thread_deallocate.
1563 */
1564
1565 if (((p->p_flag & P_TRACED) == 0) && (p->p_sigignore & mask))
1566 goto psigout;
1567
1568 /* If successful return with ast set */
1569 sig_thread_act = get_signalthread(p, signum);
1570
1571 if (sig_thread_act == THREAD_NULL) {
1572 /* XXXX FIXME
1573 * if it is sigkill, may be we should
1574 * inject a thread to terminate
1575 */
1576 #if SIGNAL_DEBUG
1577 ram_printf(1);
1578 #endif /* SIGNAL_DEBUG */
1579 goto psigout;
1580 }
1581
1582 uth = get_bsdthread_info(sig_thread_act);
1583
1584 /*
1585 * If proc is traced, always give parent a chance.
1586 */
1587 if (p->p_flag & P_TRACED)
1588 action = SIG_DFL;
1589 else {
1590 /*
1591 * If the signal is being ignored,
1592 * then we forget about it immediately.
1593 * (Note: we don't set SIGCONT in p_sigignore,
1594 * and if it is set to SIG_IGN,
1595 * action will be SIG_DFL here.)
1596 */
1597 if (p->p_sigignore & mask)
1598 goto psigout;
1599 /* sigwait takes precedence */
1600 if (uth->uu_sigwait & mask)
1601 action = KERN_SIG_WAIT;
1602 else if (uth->uu_sigmask & mask)
1603 action = KERN_SIG_HOLD;
1604 else if (p->p_sigcatch & mask)
1605 action = KERN_SIG_CATCH;
1606 else
1607 action = SIG_DFL;
1608 }
1609
1610 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1611 (p->p_flag & P_TRACED) == 0)
1612 p->p_nice = NZERO;
1613
1614 if (prop & SA_CONT) {
1615 uth->uu_siglist &= ~stopsigmask;
1616 p->p_siglist &= ~stopsigmask;
1617 }
1618
1619 if (prop & SA_STOP) {
1620 /*
1621 * If sending a tty stop signal to a member of an orphaned
1622 * process group, discard the signal here if the action
1623 * is default; don't stop the process below if sleeping,
1624 * and don't clear any pending SIGCONT.
1625 */
1626 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
1627 action == SIG_DFL)
1628 goto psigout;
1629 uth->uu_siglist &= ~contsigmask;
1630 p->p_siglist &= ~contsigmask;
1631 }
1632 uth->uu_siglist |= mask;
1633 p->p_siglist |= mask; /* just for lame ones looking here */
1634
1635
1636 /*
1637 * Defer further processing for signals which are held,
1638 * except that stopped processes must be continued by SIGCONT.
1639 */
1640 if (action == KERN_SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) {
1641 goto psigout;
1642 }
1643 /*
1644 * SIGKILL priority twiddling moved here from above because
1645 * it needs sig_thread. Could merge it into large switch
1646 * below if we didn't care about priority for tracing
1647 * as SIGKILL's action is always SIG_DFL.
1648 */
1649 if ((signum == SIGKILL) && (p->p_nice > NZERO)) {
1650 p->p_nice = NZERO;
1651 }
1652
1653 /*
1654 * Process is traced - wake it up (if not already
1655 * stopped) so that it can discover the signal in
1656 * issig() and stop for the parent.
1657 */
1658 if (p->p_flag & P_TRACED) {
1659 if (p->p_stat != SSTOP)
1660 goto run;
1661 else
1662 goto psigout;
1663 }
1664
1665 if (action == KERN_SIG_WAIT) {
1666 uth->uu_sigwait = mask;
1667 uth->uu_siglist &= ~mask;
1668 p->p_siglist &= ~mask;
1669 wakeup(&uth->uu_sigwait);
1670 /* if it is SIGCONT resume whole process */
1671 if (prop & SA_CONT) {
1672 p->p_flag |= P_CONTINUED;
1673 (void) task_resume(sig_task);
1674 }
1675 goto psigout;
1676 }
1677
1678 if (action != SIG_DFL) {
1679 /*
1680 * User wants to catch the signal.
1681 * Wake up the thread, but don't un-suspend it
1682 * (except for SIGCONT).
1683 */
1684 if (prop & SA_CONT) {
1685 if (p->p_flag & P_TTYSLEEP) {
1686 p->p_flag &= ~P_TTYSLEEP;
1687 wakeup(&p->p_siglist);
1688 } else {
1689 p->p_flag |= P_CONTINUED;
1690 (void) task_resume(sig_task);
1691 }
1692 p->p_stat = SRUN;
1693 } else if (p->p_stat == SSTOP)
1694 goto psigout;
1695 goto run;
1696 } else {
1697 /* Default action - varies */
1698 if (mask & stopsigmask) {
1699 /*
1700 * These are the signals which by default
1701 * stop a process.
1702 *
1703 * Don't clog system with children of init
1704 * stopped from the keyboard.
1705 */
1706 if (!(prop & SA_STOP) && p->p_pptr == initproc) {
1707 psignal_lock(p, SIGKILL, 0);
1708 uth->uu_siglist &= ~mask;
1709 p->p_siglist &= ~mask;
1710 goto psigout;
1711 }
1712
1713 /*
1714 * Stop the task
1715 * if task hasn't already been stopped by
1716 * a signal.
1717 */
1718 uth->uu_siglist &= ~mask;
1719 p->p_siglist &= ~mask;
1720 if (p->p_stat != SSTOP) {
1721 p->p_xstat = signum;
1722 stop(p);
1723 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
1724 struct proc *pp = p->p_pptr;
1725
1726 pp->si_pid = p->p_pid;
1727 pp->si_status = p->p_xstat;
1728 pp->si_code = CLD_STOPPED;
1729 pp->si_uid = p->p_ucred->cr_ruid;
1730 psignal(pp, SIGCHLD);
1731 }
1732 }
1733 goto psigout;
1734 }
1735
1736 switch (signum) {
1737 /*
1738 * Signals ignored by default have been dealt
1739 * with already, since their bits are on in
1740 * p_sigignore.
1741 */
1742
1743 case SIGKILL:
1744 /*
1745 * Kill signal always sets process running and
1746 * unsuspends it.
1747 */
1748 /*
1749 * Process will be running after 'run'
1750 */
1751 p->p_stat = SRUN;
1752
1753 thread_abort(sig_thread_act);
1754
1755 goto psigout;
1756
1757 case SIGCONT:
1758 /*
1759 * Let the process run. If it's sleeping on an
1760 * event, it remains so.
1761 */
1762 if (p->p_flag & P_TTYSLEEP) {
1763 p->p_flag &= ~P_TTYSLEEP;
1764 wakeup(&p->p_siglist);
1765 } else {
1766 p->p_flag |= P_CONTINUED;
1767 (void) task_resume(sig_task);
1768 }
1769 uth->uu_siglist &= ~mask;
1770 p->p_siglist &= ~mask;
1771 p->p_stat = SRUN;
1772
1773 goto psigout;
1774
1775 default:
1776 /*
1777 * All other signals wake up the process, but don't
1778 * resume it.
1779 */
1780 if (p->p_stat == SSTOP)
1781 goto psigout;
1782 goto run;
1783 }
1784 }
1785 /*NOTREACHED*/
1786 run:
1787 /*
1788 * If we're being traced (possibly because someone attached us
1789 * while we were stopped), check for a signal from the debugger.
1790 */
1791 if (p->p_stat == SSTOP) {
1792 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0)
1793 uth->uu_siglist |= sigmask(p->p_xstat);
1794 } else {
1795 /*
1796 * setrunnable(p) in BSD and
1797 * Wake up the thread if it is interruptible.
1798 */
1799 p->p_stat = SRUN;
1800 thread_abort_safely(sig_thread_act);
1801 }
1802 psigout:
1803 if (withlock)
1804 signal_unlock(p);
1805 if (sw_funnel)
1806 thread_funnel_set(kernel_flock, funnel_state);
1807 }
1808
1809
1810 /* psignal_lock(p, signum, withlock ) */
1811 void
1812 psignal_uthread(thr_act, signum)
1813 thread_t thr_act;
1814 int signum;
1815 {
1816 struct proc *p;
1817 register int prop;
1818 register sig_t action;
1819 thread_t sig_thread_act;
1820 register task_t sig_task;
1821 int mask;
1822 struct uthread *uth;
1823 kern_return_t kret;
1824 int error = 0;
1825
1826 p = (struct proc *)get_bsdtask_info(get_threadtask(thr_act));
1827 if ((u_int)signum >= NSIG || signum == 0)
1828 panic("Invalid signal number in psignal_uthread");
1829 mask = sigmask(signum);
1830 prop = sigprop[signum];
1831
1832 #if SIGNAL_DEBUG
1833 if(rdebug_proc && (p == rdebug_proc)) {
1834 ram_printf(3);
1835 }
1836 #endif /* SIGNAL_DEBUG */
1837
1838 /*
1839 * We will need the task pointer later. Grab it now to
1840 * check for a zombie process. Also don't send signals
1841 * to kernel internal tasks.
1842 */
1843 if (((sig_task = p->task) == TASK_NULL) || is_kerneltask(sig_task)) {
1844 return;
1845 }
1846
1847 sig_thread_act = thr_act;
1848 /*
1849 * do not send signals to the process that has the thread
1850 * doing a reboot(). Not doing so will mark that thread aborted
1851 * and can cause IO failures wich will cause data loss.
1852 */
1853 if (ISSET(p->p_flag, P_REBOOT)) {
1854 return;
1855 }
1856
1857 signal_lock(p);
1858
1859 /*
1860 * Deliver the signal to the first thread in the task. This
1861 * allows single threaded applications which use signals to
1862 * be able to be linked with multithreaded libraries. We have
1863 * an implicit reference to the current thread, but need
1864 * an explicit one otherwise. The thread reference keeps
1865 * the corresponding task data structures around too. This
1866 * reference is released by thread_deallocate.
1867 */
1868
1869 if (((p->p_flag & P_TRACED) == 0) && (p->p_sigignore & mask))
1870 goto puthout;
1871
1872 kret = check_actforsig(sig_task, sig_thread_act, 1);
1873
1874 if (kret != KERN_SUCCESS) {
1875 error = EINVAL;
1876 goto puthout;
1877 }
1878
1879
1880 uth = get_bsdthread_info(sig_thread_act);
1881
1882 /*
1883 * If proc is traced, always give parent a chance.
1884 */
1885 if (p->p_flag & P_TRACED)
1886 action = SIG_DFL;
1887 else {
1888 /*
1889 * If the signal is being ignored,
1890 * then we forget about it immediately.
1891 * (Note: we don't set SIGCONT in p_sigignore,
1892 * and if it is set to SIG_IGN,
1893 * action will be SIG_DFL here.)
1894 */
1895 if (p->p_sigignore & mask)
1896 goto puthout;
1897 /* sigwait takes precedence */
1898 if (uth->uu_sigwait & mask)
1899 action = KERN_SIG_WAIT;
1900 else if (uth->uu_sigmask & mask)
1901 action = KERN_SIG_HOLD;
1902 else if (p->p_sigcatch & mask)
1903 action = KERN_SIG_CATCH;
1904 else
1905 action = SIG_DFL;
1906 }
1907
1908 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1909 (p->p_flag & P_TRACED) == 0)
1910 p->p_nice = NZERO;
1911
1912 if (prop & SA_CONT) {
1913 uth->uu_siglist &= ~stopsigmask;
1914 p->p_siglist &= ~stopsigmask;
1915 }
1916
1917 if (prop & SA_STOP) {
1918 /*
1919 * If sending a tty stop signal to a member of an orphaned
1920 * process group, discard the signal here if the action
1921 * is default; don't stop the process below if sleeping,
1922 * and don't clear any pending SIGCONT.
1923 */
1924 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
1925 action == SIG_DFL)
1926 goto puthout;
1927 uth->uu_siglist &= ~contsigmask;
1928 p->p_siglist &= ~contsigmask;
1929 }
1930 uth->uu_siglist |= mask;
1931 p->p_siglist |= mask; /* just for lame ones looking here */
1932
1933 /*
1934 * Defer further processing for signals which are held,
1935 * except that stopped processes must be continued by SIGCONT.
1936 */
1937 if (action == KERN_SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
1938 goto puthout;
1939
1940 /*
1941 * SIGKILL priority twiddling moved here from above because
1942 * it needs sig_thread. Could merge it into large switch
1943 * below if we didn't care about priority for tracing
1944 * as SIGKILL's action is always SIG_DFL.
1945 */
1946 if ((signum == SIGKILL) && (p->p_nice > NZERO)) {
1947 p->p_nice = NZERO;
1948 }
1949
1950 /*
1951 * Process is traced - wake it up (if not already
1952 * stopped) so that it can discover the signal in
1953 * issig() and stop for the parent.
1954 */
1955 if (p->p_flag & P_TRACED) {
1956 if (p->p_stat != SSTOP)
1957 goto psurun;
1958 else
1959 goto puthout;
1960 }
1961
1962 if (action == KERN_SIG_WAIT) {
1963 uth->uu_sigwait = mask;
1964 uth->uu_siglist &= ~mask;
1965 p->p_siglist &= ~mask;
1966 wakeup(&uth->uu_sigwait);
1967 /* if it is SIGCONT resume whole process */
1968 if (prop & SA_CONT) {
1969 p->p_flag |= P_CONTINUED;
1970 (void) task_resume(sig_task);
1971 }
1972 goto puthout;
1973 }
1974
1975 if (action != SIG_DFL) {
1976 /*
1977 * User wants to catch the signal.
1978 * Wake up the thread, but don't un-suspend it
1979 * (except for SIGCONT).
1980 */
1981 if (prop & SA_CONT) {
1982 p->p_flag |= P_CONTINUED;
1983 (void) task_resume(sig_task);
1984 }
1985 goto psurun;
1986 } else {
1987 /* Default action - varies */
1988 if (mask & stopsigmask) {
1989 /*
1990 * These are the signals which by default
1991 * stop a process.
1992 *
1993 * Don't clog system with children of init
1994 * stopped from the keyboard.
1995 */
1996 if (!(prop & SA_STOP) && p->p_pptr == initproc) {
1997 psignal_lock(p, SIGKILL, 0);
1998 uth->uu_siglist &= ~mask;
1999 p->p_siglist &= ~mask;
2000 goto puthout;
2001 }
2002
2003 /*
2004 * Stop the task
2005 * if task hasn't already been stopped by
2006 * a signal.
2007 */
2008 uth->uu_siglist &= ~mask;
2009 p->p_siglist &= ~mask;
2010 if (p->p_stat != SSTOP) {
2011 p->p_xstat = signum;
2012 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
2013 struct proc *pp = p->p_pptr;
2014
2015 pp->si_pid = p->p_pid;
2016 pp->si_status = p->p_xstat;
2017 pp->si_code = CLD_STOPPED;
2018 pp->si_uid = p->p_ucred->cr_ruid;
2019 psignal(pp, SIGCHLD);
2020 }
2021 stop(p);
2022 }
2023 goto puthout;
2024 }
2025
2026 switch (signum) {
2027 /*
2028 * Signals ignored by default have been dealt
2029 * with already, since their bits are on in
2030 * p_sigignore.
2031 */
2032
2033 case SIGKILL:
2034 /*
2035 * Kill signal always sets process running and
2036 * unsuspends it.
2037 */
2038 /*
2039 * Process will be running after 'run'
2040 */
2041 p->p_stat = SRUN;
2042
2043 thread_abort(sig_thread_act);
2044
2045 goto puthout;
2046
2047 case SIGCONT:
2048 /*
2049 * Let the process run. If it's sleeping on an
2050 * event, it remains so.
2051 */
2052 if (p->p_flag & P_TTYSLEEP) {
2053 p->p_flag &= ~P_TTYSLEEP;
2054 wakeup(&p->p_siglist);
2055 } else {
2056 p->p_flag |= P_CONTINUED;
2057 (void) task_resume(sig_task);
2058 }
2059 uth->uu_siglist &= ~mask;
2060 p->p_siglist &= ~mask;
2061 p->p_stat = SRUN;
2062 goto puthout;
2063
2064 default:
2065 /*
2066 * All other signals wake up the process, but don't
2067 * resume it.
2068 */
2069 goto psurun;
2070 }
2071 }
2072 /*NOTREACHED*/
2073 psurun:
2074 /*
2075 * If we're being traced (possibly because someone attached us
2076 * while we were stopped), check for a signal from the debugger.
2077 */
2078 if (p->p_stat == SSTOP) {
2079 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
2080 uth->uu_siglist |= sigmask(p->p_xstat);
2081 p->p_siglist |= sigmask(p->p_xstat);
2082 }
2083 } else {
2084 /*
2085 * setrunnable(p) in BSD and
2086 * Wake up the thread if it is interruptible.
2087 */
2088 p->p_stat = SRUN;
2089 thread_abort_safely(sig_thread_act);
2090 }
2091
2092 puthout:
2093 signal_unlock(p);
2094 }
2095
2096
2097 __inline__ void
2098 sig_lock_to_exit(struct proc *p)
2099 {
2100 thread_t self = current_thread();
2101
2102 p->exit_thread = self;
2103 (void) task_suspend(p->task);
2104 }
2105
2106 __inline__ int
2107 sig_try_locked(struct proc *p)
2108 {
2109 thread_t self = current_thread();
2110
2111 while (p->sigwait || p->exit_thread) {
2112 if (p->exit_thread) {
2113 if (p->exit_thread != self) {
2114 /*
2115 * Already exiting - no signals.
2116 */
2117 thread_abort(self);
2118 }
2119 return(0);
2120 }
2121 if(assert_wait_possible()) {
2122 assert_wait((caddr_t)&p->sigwait_thread,
2123 (THREAD_INTERRUPTIBLE));
2124 }
2125 signal_unlock(p);
2126 thread_block(THREAD_CONTINUE_NULL);
2127 signal_lock(p);
2128 if (thread_should_abort(self)) {
2129 /*
2130 * Terminate request - clean up.
2131 */
2132 return -1;
2133 }
2134 }
2135 return 1;
2136 }
2137
2138 /*
2139 * If the current process has received a signal (should be caught or cause
2140 * termination, should interrupt current syscall), return the signal number.
2141 * Stop signals with default action are processed immediately, then cleared;
2142 * they aren't returned. This is checked after each entry to the system for
2143 * a syscall or trap (though this can usually be done without calling issignal
2144 * by checking the pending signal masks in the CURSIG macro.) The normal call
2145 * sequence is
2146 *
2147 * while (signum = CURSIG(curproc))
2148 * postsig(signum);
2149 */
2150 int
2151 issignal(p)
2152 register struct proc *p;
2153 {
2154 register int signum, mask, prop, sigbits;
2155 thread_t cur_act;
2156 struct uthread * ut;
2157 struct proc *pp;
2158
2159 cur_act = current_thread();
2160
2161 #if SIGNAL_DEBUG
2162 if(rdebug_proc && (p == rdebug_proc)) {
2163 ram_printf(3);
2164 }
2165 #endif /* SIGNAL_DEBUG */
2166 signal_lock(p);
2167
2168 /*
2169 * Try to grab the signal lock.
2170 */
2171 if (sig_try_locked(p) <= 0) {
2172 signal_unlock(p);
2173 return (0);
2174 }
2175
2176 ut = get_bsdthread_info(cur_act);
2177 for(;;) {
2178 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2179
2180 if (p->p_flag & P_PPWAIT)
2181 sigbits &= ~stopsigmask;
2182 if (sigbits == 0) { /* no signal to send */
2183 signal_unlock(p);
2184 return (0);
2185 }
2186 signum = ffs((long)sigbits);
2187 mask = sigmask(signum);
2188 prop = sigprop[signum];
2189
2190 /*
2191 * We should see pending but ignored signals
2192 * only if P_TRACED was on when they were posted.
2193 */
2194 if (mask & p->p_sigignore && (p->p_flag & P_TRACED) == 0) {
2195 ut->uu_siglist &= ~mask; /* take the signal! */
2196 p->p_siglist &= ~mask; /* take the signal! */
2197 continue;
2198 }
2199 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
2200 register task_t task;
2201 /*
2202 * If traced, always stop, and stay
2203 * stopped until released by the debugger.
2204 */
2205 /* ptrace debugging */
2206 p->p_xstat = signum;
2207 pp = p->p_pptr;
2208 if (p->p_flag & P_SIGEXC) {
2209 p->sigwait = TRUE;
2210 p->sigwait_thread = cur_act;
2211 p->p_stat = SSTOP;
2212 p->p_flag &= ~(P_WAITED|P_CONTINUED);
2213 ut->uu_siglist &= ~mask; /* clear the old signal */
2214 p->p_siglist &= ~mask; /* clear the old signal */
2215 signal_unlock(p);
2216 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2217 signal_lock(p);
2218 } else {
2219 // panic("Unsupportef gdb option \n");;
2220 pp->si_pid = p->p_pid;
2221 pp->si_status = p->p_xstat;
2222 pp->si_code = CLD_TRAPPED;
2223 pp->si_uid = p->p_ucred->cr_ruid;
2224 psignal(pp, SIGCHLD);
2225 /*
2226 * XXX Have to really stop for debuggers;
2227 * XXX stop() doesn't do the right thing.
2228 * XXX Inline the task_suspend because we
2229 * XXX have to diddle Unix state in the
2230 * XXX middle of it.
2231 */
2232 task = p->task;
2233 task_hold(task);
2234 p->sigwait = TRUE;
2235 p->sigwait_thread = cur_act;
2236 p->p_stat = SSTOP;
2237 p->p_flag &= ~(P_WAITED|P_CONTINUED);
2238 ut->uu_siglist &= ~mask; /* clear the old signal */
2239 p->p_siglist &= ~mask; /* clear the old signal */
2240
2241 wakeup((caddr_t)p->p_pptr);
2242 signal_unlock(p);
2243 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2244 thread_block(THREAD_CONTINUE_NULL);
2245 signal_lock(p);
2246 }
2247
2248 p->sigwait = FALSE;
2249 p->sigwait_thread = NULL;
2250 wakeup((caddr_t)&p->sigwait_thread);
2251
2252 /*
2253 * This code is to detect when gdb is killed
2254 * even as the traced program is attached.
2255 * pgsignal would get the SIGKILL to traced program
2256 * That's what we are trying to see (I hope)
2257 */
2258 if (ut->uu_siglist & sigmask(SIGKILL)) {
2259 /*
2260 * Wait event may still be outstanding;
2261 * clear it, since sig_lock_to_exit will
2262 * wait.
2263 */
2264 clear_wait(current_thread(), THREAD_INTERRUPTED);
2265 sig_lock_to_exit(p);
2266 /*
2267 * Since this thread will be resumed
2268 * to allow the current syscall to
2269 * be completed, must save u_qsave
2270 * before calling exit(). (Since exit()
2271 * calls closef() which can trash u_qsave.)
2272 */
2273 signal_unlock(p);
2274 exit1(p,signum, (int *)NULL);
2275 return(0);
2276 }
2277
2278 /*
2279 * We may have to quit
2280 */
2281 if (thread_should_abort(current_thread())) {
2282 signal_unlock(p);
2283 return(0);
2284 }
2285 /*
2286 * If parent wants us to take the signal,
2287 * then it will leave it in p->p_xstat;
2288 * otherwise we just look for signals again.
2289 */
2290 signum = p->p_xstat;
2291 if (signum == 0)
2292 continue;
2293 /*
2294 * Put the new signal into p_siglist. If the
2295 * signal is being masked, look for other signals.
2296 */
2297 mask = sigmask(signum);
2298 ut->uu_siglist |= mask;
2299 p->p_siglist |= mask; /* just for lame ones looking here */
2300 if (ut->uu_sigmask & mask)
2301 continue;
2302 }
2303
2304 /*
2305 * Decide whether the signal should be returned.
2306 * Return the signal's number, or fall through
2307 * to clear it from the pending mask.
2308 */
2309
2310 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2311
2312 case (long)SIG_DFL:
2313 /*
2314 * Don't take default actions on system processes.
2315 */
2316 if (p->p_pptr->p_pid == 0) {
2317 #if DIAGNOSTIC
2318 /*
2319 * Are you sure you want to ignore SIGSEGV
2320 * in init? XXX
2321 */
2322 printf("Process (pid %d) got signal %d\n",
2323 p->p_pid, signum);
2324 #endif
2325 break; /* == ignore */
2326 }
2327
2328 /*
2329 * If there is a pending stop signal to process
2330 * with default action, stop here,
2331 * then clear the signal. However,
2332 * if process is member of an orphaned
2333 * process group, ignore tty stop signals.
2334 */
2335 if (prop & SA_STOP) {
2336 if (p->p_flag & P_TRACED ||
2337 (p->p_pgrp->pg_jobc == 0 &&
2338 prop & SA_TTYSTOP))
2339 break; /* == ignore */
2340 if (p->p_stat != SSTOP) {
2341 p->p_xstat = signum;
2342 stop(p);
2343 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
2344 pp = p->p_pptr;
2345 pp->si_pid = p->p_pid;
2346 pp->si_status = p->p_xstat;
2347 pp->si_code = CLD_STOPPED;
2348 pp->si_uid = p->p_ucred->cr_ruid;
2349 psignal(pp, SIGCHLD);
2350 }
2351 }
2352 break;
2353 } else if (prop & SA_IGNORE) {
2354 /*
2355 * Except for SIGCONT, shouldn't get here.
2356 * Default action is to ignore; drop it.
2357 */
2358 break; /* == ignore */
2359 } else {
2360 ut->uu_siglist &= ~mask; /* take the signal! */
2361 p->p_siglist &= ~mask; /* take the signal! */
2362 signal_unlock(p);
2363 return (signum);
2364 }
2365 /*NOTREACHED*/
2366
2367 case (long)SIG_IGN:
2368 /*
2369 * Masking above should prevent us ever trying
2370 * to take action on an ignored signal other
2371 * than SIGCONT, unless process is traced.
2372 */
2373 if ((prop & SA_CONT) == 0 &&
2374 (p->p_flag & P_TRACED) == 0)
2375 printf("issignal\n");
2376 break; /* == ignore */
2377
2378 default:
2379 /*
2380 * This signal has an action, let
2381 * postsig() process it.
2382 */
2383 ut->uu_siglist &= ~mask; /* take the signal! */
2384 p->p_siglist &= ~mask; /* take the signal! */
2385 signal_unlock(p);
2386 return (signum);
2387 }
2388 ut->uu_siglist &= ~mask; /* take the signal! */
2389 p->p_siglist &= ~mask; /* take the signal! */
2390 }
2391 /* NOTREACHED */
2392 }
2393
2394 /* called from _sleep */
2395 int
2396 CURSIG(p)
2397 register struct proc *p;
2398 {
2399 register int signum, mask, prop, sigbits;
2400 thread_t cur_act;
2401 struct uthread * ut;
2402 int retnum = 0;
2403
2404
2405 cur_act = current_thread();
2406
2407 ut = get_bsdthread_info(cur_act);
2408
2409 if (ut->uu_siglist == 0)
2410 return (0);
2411
2412 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_flag & P_TRACED) == 0))
2413 return (0);
2414
2415 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2416
2417 for(;;) {
2418 if (p->p_flag & P_PPWAIT)
2419 sigbits &= ~stopsigmask;
2420 if (sigbits == 0) { /* no signal to send */
2421 return (retnum);
2422 }
2423
2424 signum = ffs((long)sigbits);
2425 mask = sigmask(signum);
2426 prop = sigprop[signum];
2427
2428 /*
2429 * We should see pending but ignored signals
2430 * only if P_TRACED was on when they were posted.
2431 */
2432 if (mask & p->p_sigignore && (p->p_flag & P_TRACED) == 0) {
2433 continue;
2434 }
2435 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
2436 /*
2437 * Put the new signal into p_siglist. If the
2438 * signal is being masked, look for other signals.
2439 */
2440 mask = sigmask(signum);
2441 if (ut->uu_sigmask & mask)
2442 continue;
2443 return(signum);
2444 }
2445
2446 /*
2447 * Decide whether the signal should be returned.
2448 * Return the signal's number, or fall through
2449 * to clear it from the pending mask.
2450 */
2451
2452 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2453
2454 case (long)SIG_DFL:
2455 /*
2456 * Don't take default actions on system processes.
2457 */
2458 if (p->p_pptr->p_pid == 0) {
2459 #if DIAGNOSTIC
2460 /*
2461 * Are you sure you want to ignore SIGSEGV
2462 * in init? XXX
2463 */
2464 printf("Process (pid %d) got signal %d\n",
2465 p->p_pid, signum);
2466 #endif
2467 break; /* == ignore */
2468 }
2469
2470 /*
2471 * If there is a pending stop signal to process
2472 * with default action, stop here,
2473 * then clear the signal. However,
2474 * if process is member of an orphaned
2475 * process group, ignore tty stop signals.
2476 */
2477 if (prop & SA_STOP) {
2478 if (p->p_flag & P_TRACED ||
2479 (p->p_pgrp->pg_jobc == 0 &&
2480 prop & SA_TTYSTOP))
2481 break; /* == ignore */
2482 retnum = signum;
2483 break;
2484 } else if (prop & SA_IGNORE) {
2485 /*
2486 * Except for SIGCONT, shouldn't get here.
2487 * Default action is to ignore; drop it.
2488 */
2489 break; /* == ignore */
2490 } else {
2491 return (signum);
2492 }
2493 /*NOTREACHED*/
2494
2495 case (long)SIG_IGN:
2496 /*
2497 * Masking above should prevent us ever trying
2498 * to take action on an ignored signal other
2499 * than SIGCONT, unless process is traced.
2500 */
2501 if ((prop & SA_CONT) == 0 &&
2502 (p->p_flag & P_TRACED) == 0)
2503 printf("issignal\n");
2504 break; /* == ignore */
2505
2506 default:
2507 /*
2508 * This signal has an action, let
2509 * postsig() process it.
2510 */
2511 return (signum);
2512 }
2513 sigbits &= ~mask; /* take the signal! */
2514 }
2515 /* NOTREACHED */
2516 }
2517
2518 /*
2519 * Put the argument process into the stopped state and notify the parent
2520 * via wakeup. Signals are handled elsewhere. The process must not be
2521 * on the run queue.
2522 */
2523 void
2524 stop(p)
2525 register struct proc *p;
2526 {
2527 p->p_stat = SSTOP;
2528 p->p_flag &= ~(P_WAITED|P_CONTINUED);
2529 if (p->p_pptr->p_stat != SSTOP)
2530 wakeup((caddr_t)p->p_pptr);
2531 (void) task_suspend(p->task); /*XXX*/
2532 }
2533
2534 /*
2535 * Take the action for the specified signal
2536 * from the current set of pending signals.
2537 */
2538 void
2539 postsig(int signum)
2540 {
2541 struct proc *p = current_proc();
2542 struct sigacts *ps = p->p_sigacts;
2543 user_addr_t catcher;
2544 u_long code;
2545 int mask, returnmask;
2546 struct uthread * ut;
2547
2548 #if DIAGNOSTIC
2549 if (signum == 0)
2550 panic("postsig");
2551 /*
2552 * This must be called on master cpu
2553 */
2554 if (cpu_number() != master_cpu)
2555 panic("psig not on master");
2556 #endif
2557
2558 signal_lock(p);
2559 /*
2560 * Try to grab the signal lock.
2561 */
2562 if (sig_try_locked(p) <= 0) {
2563 signal_unlock(p);
2564 return;
2565 }
2566
2567 ut = (struct uthread *)get_bsdthread_info(current_thread());
2568 mask = sigmask(signum);
2569 ut->uu_siglist &= ~mask;
2570 p->p_siglist &= ~mask;
2571 catcher = ps->ps_sigact[signum];
2572 #if KTRACE
2573 //LP64: catcher argument is a 64 bit user space handler address
2574 if (KTRPOINT(p, KTR_PSIG))
2575 ktrpsig(p->p_tracep,
2576 signum, CAST_DOWN(void *,catcher), ut->uu_flag & UT_SAS_OLDMASK ?
2577 &ut->uu_oldmask : &ut->uu_sigmask, 0);
2578 #endif
2579 if (catcher == SIG_DFL) {
2580 /*
2581 * Default catcher, where the default is to kill
2582 * the process. (Other cases were ignored above.)
2583 */
2584 /* called with signal_lock() held */
2585 sigexit_locked(p, signum);
2586 return;
2587 /* NOTREACHED */
2588 } else {
2589 /*
2590 * If we get here, the signal must be caught.
2591 */
2592 #if DIAGNOSTIC
2593 if (catcher == SIG_IGN || (ut->uu_sigmask & mask))
2594 log(LOG_WARNING,
2595 "postsig: processing masked or ignored signal\n");
2596 #endif
2597 /*
2598 * Set the new mask value and also defer further
2599 * occurences of this signal.
2600 *
2601 * Special case: user has done a sigpause. Here the
2602 * current mask is not of interest, but rather the
2603 * mask from before the sigpause is what we want
2604 * restored after the signal processing is completed.
2605 */
2606 if (ut->uu_flag & UT_SAS_OLDMASK) {
2607 returnmask = ut->uu_oldmask;
2608 ut->uu_flag &= ~UT_SAS_OLDMASK;
2609 ut->uu_oldmask = 0;
2610 } else
2611 returnmask = ut->uu_sigmask;
2612 ut->uu_sigmask |= ps->ps_catchmask[signum];
2613 if ((ps->ps_signodefer & mask) == 0)
2614 ut->uu_sigmask |= mask;
2615 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
2616 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE))
2617 p->p_sigignore |= mask;
2618 ps->ps_sigact[signum] = SIG_DFL;
2619 ps->ps_siginfo &= ~mask;
2620 ps->ps_signodefer &= ~mask;
2621 }
2622 #ifdef __ppc__
2623 /* Needs to disable to run in user mode */
2624 if (signum == SIGFPE) {
2625 thread_enable_fpe(current_thread(), 0);
2626 }
2627 #endif /* __ppc__ */
2628
2629 if (ps->ps_sig != signum) {
2630 code = 0;
2631 } else {
2632 code = ps->ps_code;
2633 ps->ps_code = 0;
2634 }
2635 p->p_stats->p_ru.ru_nsignals++;
2636 sendsig(p, catcher, signum, returnmask, code);
2637 }
2638 signal_unlock(p);
2639 }
2640
2641 /*
2642 * Force the current process to exit with the specified signal, dumping core
2643 * if appropriate. We bypass the normal tests for masked and caught signals,
2644 * allowing unrecoverable failures to terminate the process without changing
2645 * signal state. Mark the accounting record with the signal termination.
2646 * If dumping core, save the signal number for the debugger. Calls exit and
2647 * does not return.
2648 */
2649 /* called with signal lock */
2650 void
2651 sigexit_locked(p, signum)
2652 register struct proc *p;
2653 int signum;
2654 {
2655
2656 sig_lock_to_exit(p);
2657 p->p_acflag |= AXSIG;
2658 if (sigprop[signum] & SA_CORE) {
2659 p->p_sigacts->ps_sig = signum;
2660 signal_unlock(p);
2661 if (coredump(p) == 0)
2662 signum |= WCOREFLAG;
2663 } else
2664 signal_unlock(p);
2665
2666 exit1(p, W_EXITCODE(0, signum), (int *)NULL);
2667 /* NOTREACHED */
2668 }
2669
2670
2671 static int
2672 filt_sigattach(struct knote *kn)
2673 {
2674 struct proc *p = current_proc();
2675 boolean_t funnel_state;
2676
2677 kn->kn_ptr.p_proc = p;
2678 kn->kn_flags |= EV_CLEAR; /* automatically set */
2679
2680 /* Take the funnel to protect the proc while adding to the list */
2681 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2682 KNOTE_ATTACH(&p->p_klist, kn);
2683 thread_funnel_set(kernel_flock, funnel_state);
2684
2685 return (0);
2686 }
2687
2688 static void
2689 filt_sigdetach(struct knote *kn)
2690 {
2691 struct proc *p = kn->kn_ptr.p_proc;
2692 boolean_t funnel_state;
2693
2694 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2695 KNOTE_DETACH(&p->p_klist, kn);
2696 thread_funnel_set(kernel_flock, funnel_state);
2697 }
2698
2699 /*
2700 * signal knotes are shared with proc knotes, so we apply a mask to
2701 * the hint in order to differentiate them from process hints. This
2702 * could be avoided by using a signal-specific knote list, but probably
2703 * isn't worth the trouble.
2704 */
2705 static int
2706 filt_signal(struct knote *kn, long hint)
2707 {
2708
2709 if (hint & NOTE_SIGNAL) {
2710 hint &= ~NOTE_SIGNAL;
2711
2712 if (kn->kn_id == (unsigned int)hint)
2713 kn->kn_data++;
2714 }
2715 return (kn->kn_data != 0);
2716 }
2717
2718
2719 void
2720 bsd_ast(thread_t thr_act)
2721 {
2722 struct proc *p = current_proc();
2723 struct uthread *ut = get_bsdthread_info(thr_act);
2724 int signum;
2725 user_addr_t pc;
2726 boolean_t funnel_state;
2727 static int bsd_init_done = 0;
2728
2729 if (p == NULL)
2730 return;
2731
2732 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2733
2734 if ((p->p_flag & P_OWEUPC) && (p->p_flag & P_PROFIL)) {
2735 pc = get_useraddr();
2736 addupc_task(p, pc, 1);
2737 p->p_flag &= ~P_OWEUPC;
2738 }
2739
2740 if (CHECK_SIGNALS(p, current_thread(), ut)) {
2741 while ( (signum = issignal(p)) )
2742 postsig(signum);
2743 }
2744 if (!bsd_init_done) {
2745 bsd_init_done = 1;
2746 bsdinit_task();
2747 }
2748
2749 (void) thread_funnel_set(kernel_flock, FALSE);
2750 }
2751
2752 /*
2753 * Follwing routines are called using callout from bsd_hardclock
2754 * so that psignals are called in a thread context and are funneled
2755 */
2756 void
2757 psignal_vtalarm(struct proc *p)
2758 {
2759 boolean_t funnel_state;
2760
2761 if (p == NULL)
2762 return;
2763 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2764 psignal_lock(p, SIGVTALRM, 1);
2765 (void) thread_funnel_set(kernel_flock, FALSE);
2766 }
2767
2768 void
2769 psignal_xcpu(struct proc *p)
2770 {
2771 boolean_t funnel_state;
2772
2773 if (p == NULL)
2774 return;
2775 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2776 psignal_lock(p, SIGXCPU, 1);
2777 (void) thread_funnel_set(kernel_flock, FALSE);
2778 }
2779
2780 void
2781 psignal_sigprof(struct proc *p)
2782 {
2783 boolean_t funnel_state;
2784
2785 if (p == NULL)
2786 return;
2787 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2788 psignal_lock(p, SIGPROF, 1);
2789 (void) thread_funnel_set(kernel_flock, FALSE);
2790 }
2791
2792 /* ptrace set runnalbe */
2793 void
2794 pt_setrunnable(struct proc *p)
2795 {
2796 task_t task;
2797
2798 task = p->task;
2799
2800 if (p->p_flag & P_TRACED) {
2801 p->p_stat = SRUN;
2802 if (p->sigwait) {
2803 wakeup((caddr_t)&(p->sigwait));
2804 task_release(task);
2805 }
2806 }
2807 }
2808
2809
2810 kern_return_t
2811 do_bsdexception(
2812 int exc,
2813 int code,
2814 int sub)
2815 {
2816 exception_data_type_t codes[EXCEPTION_CODE_MAX];
2817
2818 codes[0] = code;
2819 codes[1] = sub;
2820 return(bsd_exception(exc, codes, 2));
2821 }
2822
2823 int
2824 proc_pendingsignals(struct proc *p, sigset_t mask)
2825 {
2826 struct uthread * uth;
2827 thread_t th;
2828 sigset_t bits = 0;
2829 int error;
2830
2831 /* If the process is in proc exit return no signal info */
2832 if (p->p_lflag & P_LPEXIT)
2833 return(0);
2834
2835 /* duplicate the signal lock code to enable recursion; as exit
2836 * holds the lock too long. All this code is being reworked
2837 * this is just a workaround for regressions till new code
2838 * arrives.
2839 */
2840 ppend_retry:
2841 error = lockmgr((struct lock__bsd__ *)&p->signal_lock[0], (LK_EXCLUSIVE | LK_CANRECURSE), 0, (struct proc *)0);
2842 if (error == EINTR)
2843 goto ppend_retry;
2844
2845 if ((p->p_flag & P_INVFORK) && p->p_vforkact) {
2846 th = p->p_vforkact;
2847 uth = (struct uthread *)get_bsdthread_info(th);
2848 if (uth) {
2849 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
2850 }
2851 goto out;
2852 }
2853
2854 bits = 0;
2855 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
2856 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
2857 }
2858 out:
2859 signal_unlock(p);
2860 return(bits);
2861 }
2862
2863 int
2864 thread_issignal(proc_t p, thread_t th, sigset_t mask)
2865 {
2866 struct uthread * uth;
2867 sigset_t bits=0;
2868
2869
2870 uth = (struct uthread *)get_bsdthread_info(th);
2871 if (uth) {
2872 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
2873 }
2874 return(bits);
2875 }
2876