]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sig.c
xnu-2050.48.11.tar.gz
[apple/xnu.git] / bsd / kern / kern_sig.c
1 /*
2 * Copyright (c) 1995-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1989, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
66 */
67 /*
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
71 * Version 2.0.
72 */
73
74 #define SIGPROP /* include signal properties table */
75 #include <sys/param.h>
76 #include <sys/resourcevar.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/timeb.h>
81 #include <sys/times.h>
82 #include <sys/acct.h>
83 #include <sys/file_internal.h>
84 #include <sys/kernel.h>
85 #include <sys/wait.h>
86 #include <sys/signalvar.h>
87 #include <sys/syslog.h>
88 #include <sys/stat.h>
89 #include <sys/lock.h>
90 #include <sys/kdebug.h>
91
92 #include <sys/mount.h>
93 #include <sys/sysproto.h>
94
95 #include <security/audit/audit.h>
96
97 #include <machine/spl.h>
98
99 #include <kern/cpu_number.h>
100
101 #include <sys/vm.h>
102 #include <sys/user.h> /* for coredump */
103 #include <kern/ast.h> /* for APC support */
104 #include <kern/lock.h>
105 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
106 #include <kern/thread.h>
107 #include <kern/sched_prim.h>
108 #include <kern/thread_call.h>
109 #include <mach/exception.h>
110 #include <mach/task.h>
111 #include <mach/thread_act.h>
112 #include <libkern/OSAtomic.h>
113
114 #include <sys/sdt.h>
115
116 /*
117 * Missing prototypes that Mach should export
118 *
119 * +++
120 */
121 extern int thread_enable_fpe(thread_t act, int onoff);
122 extern thread_t port_name_to_thread(mach_port_name_t port_name);
123 extern kern_return_t get_signalact(task_t , thread_t *, int);
124 extern unsigned int get_useraddr(void);
125
126 /*
127 * ---
128 */
129
130 extern void doexception(int exc, mach_exception_code_t code,
131 mach_exception_subcode_t sub);
132
133 static void stop(proc_t, proc_t);
134 int cansignal(proc_t, kauth_cred_t, proc_t, int, int);
135 int killpg1(proc_t, int, int, int, int);
136 int setsigvec(proc_t, thread_t, int, struct __kern_sigaction *, boolean_t in_sigstart);
137 static void psignal_uthread(thread_t, int);
138 kern_return_t do_bsdexception(int, int, int);
139 void __posix_sem_syscall_return(kern_return_t);
140
141 /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */
142 kern_return_t semaphore_timedwait_signal_trap_internal(mach_port_name_t, mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t));
143 kern_return_t semaphore_timedwait_trap_internal(mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t));
144 kern_return_t semaphore_wait_signal_trap_internal(mach_port_name_t, mach_port_name_t, void (*)(kern_return_t));
145 kern_return_t semaphore_wait_trap_internal(mach_port_name_t, void (*)(kern_return_t));
146
147 static int filt_sigattach(struct knote *kn);
148 static void filt_sigdetach(struct knote *kn);
149 static int filt_signal(struct knote *kn, long hint);
150 static void filt_signaltouch(struct knote *kn, struct kevent64_s *kev,
151 long type);
152
153 struct filterops sig_filtops = {
154 .f_attach = filt_sigattach,
155 .f_detach = filt_sigdetach,
156 .f_event = filt_signal,
157 .f_touch = filt_signaltouch,
158 };
159
160 /* structures and fns for killpg1 iterartion callback and filters */
161 struct killpg1_filtargs {
162 int posix;
163 proc_t cp;
164 };
165
166 struct killpg1_iterargs {
167 proc_t cp;
168 kauth_cred_t uc;
169 int signum;
170 int * nfoundp;
171 int zombie;
172 };
173
174 static int killpg1_filt(proc_t p, void * arg);
175 static int killpg1_pgrpfilt(proc_t p, __unused void * arg);
176 static int killpg1_callback(proc_t p, void * arg);
177
178 static int pgsignal_filt(proc_t p, void * arg);
179 static int pgsignal_callback(proc_t p, void * arg);
180 static kern_return_t get_signalthread(proc_t, int, thread_t *);
181
182
183 /* flags for psignal_internal */
184 #define PSIG_LOCKED 0x1
185 #define PSIG_VFORK 0x2
186 #define PSIG_THREAD 0x4
187
188
189 static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum);
190
191 /*
192 * NOTE: Source and target may *NOT* overlap! (target is smaller)
193 */
194 static void
195 sigaltstack_kern_to_user32(struct kern_sigaltstack *in, struct user32_sigaltstack *out)
196 {
197 out->ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp);
198 out->ss_size = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size);
199 out->ss_flags = in->ss_flags;
200 }
201
202 static void
203 sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstack *out)
204 {
205 out->ss_sp = in->ss_sp;
206 out->ss_size = in->ss_size;
207 out->ss_flags = in->ss_flags;
208 }
209
210 /*
211 * NOTE: Source and target may are permitted to overlap! (source is smaller);
212 * this works because we copy fields in order from the end of the struct to
213 * the beginning.
214 */
215 static void
216 sigaltstack_user32_to_kern(struct user32_sigaltstack *in, struct kern_sigaltstack *out)
217 {
218 out->ss_flags = in->ss_flags;
219 out->ss_size = in->ss_size;
220 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
221 }
222 static void
223 sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out)
224 {
225 out->ss_flags = in->ss_flags;
226 out->ss_size = in->ss_size;
227 out->ss_sp = in->ss_sp;
228 }
229
230 static void
231 sigaction_kern_to_user32(struct kern_sigaction *in, struct user32_sigaction *out)
232 {
233 /* This assumes 32 bit __sa_handler is of type sig_t */
234 out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t,in->__sigaction_u.__sa_handler);
235 out->sa_mask = in->sa_mask;
236 out->sa_flags = in->sa_flags;
237 }
238 static void
239 sigaction_kern_to_user64(struct kern_sigaction *in, struct user64_sigaction *out)
240 {
241 /* This assumes 32 bit __sa_handler is of type sig_t */
242 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
243 out->sa_mask = in->sa_mask;
244 out->sa_flags = in->sa_flags;
245 }
246
247 static void
248 __sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigaction *out)
249 {
250 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
251 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
252 out->sa_mask = in->sa_mask;
253 out->sa_flags = in->sa_flags;
254 }
255
256 static void
257 __sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigaction *out)
258 {
259 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
260 out->sa_tramp = in->sa_tramp;
261 out->sa_mask = in->sa_mask;
262 out->sa_flags = in->sa_flags;
263 }
264
265 #if SIGNAL_DEBUG
266 void ram_printf(int);
267 int ram_debug=0;
268 unsigned int rdebug_proc=0;
269 void
270 ram_printf(int x)
271 {
272 printf("x is %d",x);
273
274 }
275 #endif /* SIGNAL_DEBUG */
276
277
278 void
279 signal_setast(thread_t sig_actthread)
280 {
281 act_set_astbsd(sig_actthread);
282 }
283
284 /*
285 * Can process p, with ucred uc, send the signal signum to process q?
286 * uc is refcounted by the caller so internal fileds can be used safely
287 * when called with zombie arg, list lock is held
288 */
289 int
290 cansignal(proc_t p, kauth_cred_t uc, proc_t q, int signum, int zombie)
291 {
292 kauth_cred_t my_cred;
293 struct session * p_sessp = SESSION_NULL;
294 struct session * q_sessp = SESSION_NULL;
295 #if CONFIG_MACF
296 int error;
297
298 error = mac_proc_check_signal(p, q, signum);
299 if (error)
300 return (0);
301 #endif
302
303 /* you can signal yourself */
304 if (p == q)
305 return(1);
306
307 if (!suser(uc, NULL))
308 return (1); /* root can always signal */
309
310 if (zombie == 0)
311 proc_list_lock();
312 if (p->p_pgrp != PGRP_NULL)
313 p_sessp = p->p_pgrp->pg_session;
314 if (q->p_pgrp != PGRP_NULL)
315 q_sessp = q->p_pgrp->pg_session;
316
317 if (signum == SIGCONT && q_sessp == p_sessp) {
318 if (zombie == 0)
319 proc_list_unlock();
320 return (1); /* SIGCONT in session */
321 }
322
323 if (zombie == 0)
324 proc_list_unlock();
325
326 /*
327 * If the real or effective UID of the sender matches the real
328 * or saved UID of the target, permit the signal to
329 * be sent.
330 */
331 if (zombie == 0)
332 my_cred = kauth_cred_proc_ref(q);
333 else
334 my_cred = proc_ucred(q);
335
336 if (kauth_cred_getruid(uc) == kauth_cred_getruid(my_cred) ||
337 kauth_cred_getruid(uc) == kauth_cred_getsvuid(my_cred) ||
338 kauth_cred_getuid(uc) == kauth_cred_getruid(my_cred) ||
339 kauth_cred_getuid(uc) == kauth_cred_getsvuid(my_cred)) {
340 if (zombie == 0)
341 kauth_cred_unref(&my_cred);
342 return (1);
343 }
344
345 if (zombie == 0)
346 kauth_cred_unref(&my_cred);
347
348 return (0);
349 }
350
351
352 /*
353 * Returns: 0 Success
354 * EINVAL
355 * copyout:EFAULT
356 * copyin:EFAULT
357 *
358 * Notes: Uses current thread as a parameter to inform PPC to enable
359 * FPU exceptions via setsigvec(); this operation is not proxy
360 * safe!
361 */
362 /* ARGSUSED */
363 int
364 sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval)
365 {
366 struct kern_sigaction vec;
367 struct __kern_sigaction __vec;
368
369 struct kern_sigaction *sa = &vec;
370 struct sigacts *ps = p->p_sigacts;
371
372 int signum;
373 int bit, error=0;
374
375 signum = uap->signum;
376 if (signum <= 0 || signum >= NSIG ||
377 signum == SIGKILL || signum == SIGSTOP)
378 return (EINVAL);
379
380 if (uap->osa) {
381 sa->sa_handler = ps->ps_sigact[signum];
382 sa->sa_mask = ps->ps_catchmask[signum];
383 bit = sigmask(signum);
384 sa->sa_flags = 0;
385 if ((ps->ps_sigonstack & bit) != 0)
386 sa->sa_flags |= SA_ONSTACK;
387 if ((ps->ps_sigintr & bit) == 0)
388 sa->sa_flags |= SA_RESTART;
389 if (ps->ps_siginfo & bit)
390 sa->sa_flags |= SA_SIGINFO;
391 if (ps->ps_signodefer & bit)
392 sa->sa_flags |= SA_NODEFER;
393 if (ps->ps_64regset & bit)
394 sa->sa_flags |= SA_64REGSET;
395 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP))
396 sa->sa_flags |= SA_NOCLDSTOP;
397 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT))
398 sa->sa_flags |= SA_NOCLDWAIT;
399
400 if (IS_64BIT_PROCESS(p)) {
401 struct user64_sigaction vec64;
402
403 sigaction_kern_to_user64(sa, &vec64);
404 error = copyout(&vec64, uap->osa, sizeof(vec64));
405 } else {
406 struct user32_sigaction vec32;
407
408 sigaction_kern_to_user32(sa, &vec32);
409 error = copyout(&vec32, uap->osa, sizeof(vec32));
410 }
411 if (error)
412 return (error);
413 }
414 if (uap->nsa) {
415 if (IS_64BIT_PROCESS(p)) {
416 struct __user64_sigaction __vec64;
417
418 error = copyin(uap->nsa, &__vec64, sizeof(__vec64));
419 __sigaction_user64_to_kern(&__vec64, &__vec);
420 } else {
421 struct __user32_sigaction __vec32;
422
423 error = copyin(uap->nsa, &__vec32, sizeof(__vec32));
424 __sigaction_user32_to_kern(&__vec32, &__vec);
425 }
426 if (error)
427 return (error);
428 __vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */
429 error = setsigvec(p, current_thread(), signum, &__vec, FALSE);
430 }
431 return (error);
432 }
433
434 /* Routines to manipulate bits on all threads */
435 int
436 clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart)
437 {
438 struct uthread * uth;
439 thread_t thact;
440
441 proc_lock(p);
442 if (!in_signalstart)
443 proc_signalstart(p, 1);
444
445 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
446 thact = p->p_vforkact;
447 uth = (struct uthread *)get_bsdthread_info(thact);
448 if (uth) {
449 uth->uu_siglist &= ~bit;
450 }
451 if (!in_signalstart)
452 proc_signalend(p, 1);
453 proc_unlock(p);
454 return(0);
455 }
456
457 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
458 uth->uu_siglist &= ~bit;
459 }
460 p->p_siglist &= ~bit;
461 if (!in_signalstart)
462 proc_signalend(p, 1);
463 proc_unlock(p);
464
465 return(0);
466 }
467
468
469 static int
470 unblock_procsigmask(proc_t p, int bit)
471 {
472 struct uthread * uth;
473 thread_t thact;
474
475 proc_lock(p);
476 proc_signalstart(p, 1);
477
478 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
479 thact = p->p_vforkact;
480 uth = (struct uthread *)get_bsdthread_info(thact);
481 if (uth) {
482 uth->uu_sigmask &= ~bit;
483 }
484 p->p_sigmask &= ~bit;
485 proc_signalend(p, 1);
486 proc_unlock(p);
487 return(0);
488 }
489 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
490 uth->uu_sigmask &= ~bit;
491 }
492 p->p_sigmask &= ~bit;
493
494 proc_signalend(p, 1);
495 proc_unlock(p);
496 return(0);
497 }
498
499 static int
500 block_procsigmask(proc_t p, int bit)
501 {
502 struct uthread * uth;
503 thread_t thact;
504
505 proc_lock(p);
506 proc_signalstart(p, 1);
507
508 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
509 thact = p->p_vforkact;
510 uth = (struct uthread *)get_bsdthread_info(thact);
511 if (uth) {
512 uth->uu_sigmask |= bit;
513 }
514 p->p_sigmask |= bit;
515 proc_signalend(p, 1);
516 proc_unlock(p);
517 return(0);
518 }
519 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
520 uth->uu_sigmask |= bit;
521 }
522 p->p_sigmask |= bit;
523
524 proc_signalend(p, 1);
525 proc_unlock(p);
526 return(0);
527 }
528
529 int
530 set_procsigmask(proc_t p, int bit)
531 {
532 struct uthread * uth;
533 thread_t thact;
534
535 proc_lock(p);
536 proc_signalstart(p, 1);
537
538 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
539 thact = p->p_vforkact;
540 uth = (struct uthread *)get_bsdthread_info(thact);
541 if (uth) {
542 uth->uu_sigmask = bit;
543 }
544 p->p_sigmask = bit;
545 proc_signalend(p, 1);
546 proc_unlock(p);
547 return(0);
548 }
549 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
550 uth->uu_sigmask = bit;
551 }
552 p->p_sigmask = bit;
553 proc_signalend(p, 1);
554 proc_unlock(p);
555
556 return(0);
557 }
558
559 /* XXX should be static? */
560 /*
561 * Notes: The thread parameter is used in the PPC case to select the
562 * thread on which the floating point exception will be enabled
563 * or disabled. We can't simply take current_thread(), since
564 * this is called from posix_spawn() on the not currently running
565 * process/thread pair.
566 *
567 * We mark thread as unused to alow compilation without warning
568 * on non-PPC platforms.
569 */
570 int
571 setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigaction *sa, boolean_t in_sigstart)
572 {
573 struct sigacts *ps = p->p_sigacts;
574 int bit;
575
576 if ((signum == SIGKILL || signum == SIGSTOP) &&
577 sa->sa_handler != SIG_DFL)
578 return(EINVAL);
579 bit = sigmask(signum);
580 /*
581 * Change setting atomically.
582 */
583 ps->ps_sigact[signum] = sa->sa_handler;
584 ps->ps_trampact[signum] = sa->sa_tramp;
585 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
586 if (sa->sa_flags & SA_SIGINFO)
587 ps->ps_siginfo |= bit;
588 else
589 ps->ps_siginfo &= ~bit;
590 if (sa->sa_flags & SA_64REGSET)
591 ps->ps_64regset |= bit;
592 else
593 ps->ps_64regset &= ~bit;
594 if ((sa->sa_flags & SA_RESTART) == 0)
595 ps->ps_sigintr |= bit;
596 else
597 ps->ps_sigintr &= ~bit;
598 if (sa->sa_flags & SA_ONSTACK)
599 ps->ps_sigonstack |= bit;
600 else
601 ps->ps_sigonstack &= ~bit;
602 if (sa->sa_flags & SA_USERTRAMP)
603 ps->ps_usertramp |= bit;
604 else
605 ps->ps_usertramp &= ~bit;
606 if (sa->sa_flags & SA_RESETHAND)
607 ps->ps_sigreset |= bit;
608 else
609 ps->ps_sigreset &= ~bit;
610 if (sa->sa_flags & SA_NODEFER)
611 ps->ps_signodefer |= bit;
612 else
613 ps->ps_signodefer &= ~bit;
614 if (signum == SIGCHLD) {
615 if (sa->sa_flags & SA_NOCLDSTOP)
616 OSBitOrAtomic(P_NOCLDSTOP, &p->p_flag);
617 else
618 OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), &p->p_flag);
619 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN))
620 OSBitOrAtomic(P_NOCLDWAIT, &p->p_flag);
621 else
622 OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), &p->p_flag);
623 }
624
625 /*
626 * Set bit in p_sigignore for signals that are set to SIG_IGN,
627 * and for signals set to SIG_DFL where the default is to ignore.
628 * However, don't put SIGCONT in p_sigignore,
629 * as we have to restart the process.
630 */
631 if (sa->sa_handler == SIG_IGN ||
632 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
633
634 clear_procsiglist(p, bit, in_sigstart);
635 if (signum != SIGCONT)
636 p->p_sigignore |= bit; /* easier in psignal */
637 p->p_sigcatch &= ~bit;
638 } else {
639 p->p_sigignore &= ~bit;
640 if (sa->sa_handler == SIG_DFL)
641 p->p_sigcatch &= ~bit;
642 else
643 p->p_sigcatch |= bit;
644 }
645 return(0);
646 }
647
648 /*
649 * Initialize signal state for process 0;
650 * set to ignore signals that are ignored by default.
651 */
652 void
653 siginit(proc_t p)
654 {
655 int i;
656
657 for (i = 1; i < NSIG; i++)
658 if (sigprop[i] & SA_IGNORE && i != SIGCONT)
659 p->p_sigignore |= sigmask(i);
660 }
661
662 /*
663 * Reset signals for an exec of the specified process.
664 */
665 void
666 execsigs(proc_t p, thread_t thread)
667 {
668 struct sigacts *ps = p->p_sigacts;
669 int nc, mask;
670 struct uthread *ut;
671
672 ut = (struct uthread *)get_bsdthread_info(thread);
673
674 /*
675 * transfer saved signal states from the process
676 * back to the current thread.
677 *
678 * NOTE: We do this without the process locked,
679 * because we are guaranteed to be single-threaded
680 * by this point in exec and the p_siglist is
681 * only accessed by threads inside the process.
682 */
683 ut->uu_siglist |= p->p_siglist;
684 p->p_siglist = 0;
685
686 /*
687 * Reset caught signals. Held signals remain held
688 * through p_sigmask (unless they were caught,
689 * and are now ignored by default).
690 */
691 while (p->p_sigcatch) {
692 nc = ffs((long)p->p_sigcatch);
693 mask = sigmask(nc);
694 p->p_sigcatch &= ~mask;
695 if (sigprop[nc] & SA_IGNORE) {
696 if (nc != SIGCONT)
697 p->p_sigignore |= mask;
698 ut->uu_siglist &= ~mask;
699 }
700 ps->ps_sigact[nc] = SIG_DFL;
701 }
702
703 /*
704 * Reset stack state to the user stack.
705 * Clear set of signals caught on the signal stack.
706 */
707 /* thread */
708 ut->uu_sigstk.ss_flags = SA_DISABLE;
709 ut->uu_sigstk.ss_size = 0;
710 ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
711 ut->uu_flag &= ~UT_ALTSTACK;
712 /* process */
713 ps->ps_sigonstack = 0;
714 }
715
716 /*
717 * Manipulate signal mask.
718 * Note that we receive new mask, not pointer,
719 * and return old mask as return value;
720 * the library stub does the rest.
721 */
722 int
723 sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval)
724 {
725 int error = 0;
726 sigset_t oldmask, nmask;
727 user_addr_t omask = uap->omask;
728 struct uthread *ut;
729
730 ut = (struct uthread *)get_bsdthread_info(current_thread());
731 oldmask = ut->uu_sigmask;
732
733 if (uap->mask == USER_ADDR_NULL) {
734 /* just want old mask */
735 goto out;
736 }
737 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
738 if (error)
739 goto out;
740
741 switch (uap->how) {
742 case SIG_BLOCK:
743 block_procsigmask(p, (nmask & ~sigcantmask));
744 signal_setast(current_thread());
745 break;
746
747 case SIG_UNBLOCK:
748 unblock_procsigmask(p, (nmask & ~sigcantmask));
749 signal_setast(current_thread());
750 break;
751
752 case SIG_SETMASK:
753 set_procsigmask(p, (nmask & ~sigcantmask));
754 signal_setast(current_thread());
755 break;
756
757 default:
758 error = EINVAL;
759 break;
760 }
761 out:
762 if (!error && omask != USER_ADDR_NULL)
763 copyout(&oldmask, omask, sizeof(sigset_t));
764 return (error);
765 }
766
767 int
768 sigpending(__unused proc_t p, struct sigpending_args *uap, __unused int32_t *retval)
769 {
770 struct uthread *ut;
771 sigset_t pendlist;
772
773 ut = (struct uthread *)get_bsdthread_info(current_thread());
774 pendlist = ut->uu_siglist;
775
776 if (uap->osv)
777 copyout(&pendlist, uap->osv, sizeof(sigset_t));
778 return(0);
779 }
780
781 /*
782 * Suspend process until signal, providing mask to be set
783 * in the meantime. Note nonstandard calling convention:
784 * libc stub passes mask, not pointer, to save a copyin.
785 */
786
787 static int
788 sigcontinue(__unused int error)
789 {
790 // struct uthread *ut = get_bsdthread_info(current_thread());
791 unix_syscall_return(EINTR);
792 }
793
794 int
795 sigsuspend(proc_t p, struct sigsuspend_args *uap, int32_t *retval)
796 {
797 __pthread_testcancel(1);
798 return(sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval));
799 }
800
801 int
802 sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused int32_t *retval)
803 {
804 struct uthread *ut;
805
806 ut = (struct uthread *)get_bsdthread_info(current_thread());
807
808 /*
809 * When returning from sigpause, we want
810 * the old mask to be restored after the
811 * signal handler has finished. Thus, we
812 * save it here and mark the sigacts structure
813 * to indicate this.
814 */
815 ut->uu_oldmask = ut->uu_sigmask;
816 ut->uu_flag |= UT_SAS_OLDMASK;
817 ut->uu_sigmask = (uap->mask & ~sigcantmask);
818 (void) tsleep0((caddr_t) p, PPAUSE|PCATCH, "pause", 0, sigcontinue);
819 /* always return EINTR rather than ERESTART... */
820 return (EINTR);
821 }
822
823
824 int
825 __disable_threadsignal(__unused proc_t p,
826 __unused struct __disable_threadsignal_args *uap,
827 __unused int32_t *retval)
828 {
829 struct uthread *uth;
830
831 uth = (struct uthread *)get_bsdthread_info(current_thread());
832
833 /* No longer valid to have any signal delivered */
834 uth->uu_flag |= (UT_NO_SIGMASK | UT_CANCELDISABLE);
835
836 return(0);
837
838 }
839
840 void
841 __pthread_testcancel(int presyscall)
842 {
843
844 thread_t self = current_thread();
845 struct uthread * uthread;
846
847 uthread = (struct uthread *)get_bsdthread_info(self);
848
849
850 uthread->uu_flag &= ~UT_NOTCANCELPT;
851
852 if ((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
853 if(presyscall != 0) {
854 unix_syscall_return(EINTR);
855 /* NOTREACHED */
856 } else
857 thread_abort_safely(self);
858 }
859 }
860
861
862
863 int
864 __pthread_markcancel(__unused proc_t p,
865 struct __pthread_markcancel_args *uap, __unused int32_t *retval)
866 {
867 thread_act_t target_act;
868 int error = 0;
869 struct uthread *uth;
870
871 target_act = (thread_act_t)port_name_to_thread(uap->thread_port);
872
873 if (target_act == THR_ACT_NULL)
874 return (ESRCH);
875
876 uth = (struct uthread *)get_bsdthread_info(target_act);
877
878 /* if the thread is in vfork do not cancel */
879 if ((uth->uu_flag & (UT_VFORK | UT_CANCEL | UT_CANCELED )) == 0) {
880 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
881 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
882 && ((uth->uu_flag & UT_CANCELDISABLE) == 0))
883 thread_abort_safely(target_act);
884 }
885
886 thread_deallocate(target_act);
887 return (error);
888 }
889
890 /* if action =0 ; return the cancellation state ,
891 * if marked for cancellation, make the thread canceled
892 * if action = 1 ; Enable the cancel handling
893 * if action = 2; Disable the cancel handling
894 */
895 int
896 __pthread_canceled(__unused proc_t p,
897 struct __pthread_canceled_args *uap, __unused int32_t *retval)
898 {
899 thread_act_t thread;
900 struct uthread *uth;
901 int action = uap->action;
902
903 thread = current_thread();
904 uth = (struct uthread *)get_bsdthread_info(thread);
905
906 switch (action) {
907 case 1:
908 uth->uu_flag &= ~UT_CANCELDISABLE;
909 return(0);
910 case 2:
911 uth->uu_flag |= UT_CANCELDISABLE;
912 return(0);
913 case 0:
914 default:
915 /* if the thread is in vfork do not cancel */
916 if((uth->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
917 uth->uu_flag &= ~UT_CANCEL;
918 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
919 return(0);
920 }
921 return(EINVAL);
922 }
923 return(EINVAL);
924 }
925
926 void
927 __posix_sem_syscall_return(kern_return_t kern_result)
928 {
929 int error = 0;
930
931 if (kern_result == KERN_SUCCESS)
932 error = 0;
933 else if (kern_result == KERN_ABORTED)
934 error = EINTR;
935 else if (kern_result == KERN_OPERATION_TIMED_OUT)
936 error = ETIMEDOUT;
937 else
938 error = EINVAL;
939 unix_syscall_return(error);
940 /* does not return */
941 }
942
943 #if OLD_SEMWAIT_SIGNAL
944 /*
945 * Returns: 0 Success
946 * EINTR
947 * ETIMEDOUT
948 * EINVAL
949 * EFAULT if timespec is NULL
950 */
951 int
952 __old_semwait_signal(proc_t p, struct __old_semwait_signal_args *uap,
953 int32_t *retval)
954 {
955 __pthread_testcancel(0);
956 return(__old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval));
957 }
958
959 int
960 __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_args *uap,
961 __unused int32_t *retval)
962 {
963
964 kern_return_t kern_result;
965 int error;
966 mach_timespec_t then;
967 struct timespec now;
968 struct user_timespec ts;
969 boolean_t truncated_timeout = FALSE;
970
971 if(uap->timeout) {
972
973 if (IS_64BIT_PROCESS(p)) {
974 struct user64_timespec ts64;
975 error = copyin(uap->ts, &ts64, sizeof(ts64));
976 ts.tv_sec = ts64.tv_sec;
977 ts.tv_nsec = ts64.tv_nsec;
978 } else {
979 struct user32_timespec ts32;
980 error = copyin(uap->ts, &ts32, sizeof(ts32));
981 ts.tv_sec = ts32.tv_sec;
982 ts.tv_nsec = ts32.tv_nsec;
983 }
984
985 if (error) {
986 return error;
987 }
988
989 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
990 ts.tv_sec = 0xFFFFFFFF;
991 ts.tv_nsec = 0;
992 truncated_timeout = TRUE;
993 }
994
995 if (uap->relative) {
996 then.tv_sec = ts.tv_sec;
997 then.tv_nsec = ts.tv_nsec;
998 } else {
999 nanotime(&now);
1000
1001 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1002 if (now.tv_sec == ts.tv_sec ?
1003 now.tv_nsec > ts.tv_nsec :
1004 now.tv_sec > ts.tv_sec) {
1005 then.tv_sec = 0;
1006 then.tv_nsec = 0;
1007 } else {
1008 then.tv_sec = ts.tv_sec - now.tv_sec;
1009 then.tv_nsec = ts.tv_nsec - now.tv_nsec;
1010 if (then.tv_nsec < 0) {
1011 then.tv_nsec += NSEC_PER_SEC;
1012 then.tv_sec--;
1013 }
1014 }
1015 }
1016
1017 if (uap->mutex_sem == 0)
1018 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1019 else
1020 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1021
1022 } else {
1023
1024 if (uap->mutex_sem == 0)
1025 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1026 else
1027
1028 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1029 }
1030
1031 if (kern_result == KERN_SUCCESS && !truncated_timeout)
1032 return(0);
1033 else if (kern_result == KERN_SUCCESS && truncated_timeout)
1034 return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1035 else if (kern_result == KERN_ABORTED)
1036 return(EINTR);
1037 else if (kern_result == KERN_OPERATION_TIMED_OUT)
1038 return(ETIMEDOUT);
1039 else
1040 return(EINVAL);
1041 }
1042 #endif /* OLD_SEMWAIT_SIGNAL*/
1043
1044 /*
1045 * Returns: 0 Success
1046 * EINTR
1047 * ETIMEDOUT
1048 * EINVAL
1049 * EFAULT if timespec is NULL
1050 */
1051 int
1052 __semwait_signal(proc_t p, struct __semwait_signal_args *uap,
1053 int32_t *retval)
1054 {
1055 __pthread_testcancel(0);
1056 return(__semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval));
1057 }
1058
1059 int
1060 __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap,
1061 __unused int32_t *retval)
1062 {
1063
1064 kern_return_t kern_result;
1065 mach_timespec_t then;
1066 struct timespec now;
1067 struct user_timespec ts;
1068 boolean_t truncated_timeout = FALSE;
1069
1070 if(uap->timeout) {
1071
1072 ts.tv_sec = uap->tv_sec;
1073 ts.tv_nsec = uap->tv_nsec;
1074
1075 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1076 ts.tv_sec = 0xFFFFFFFF;
1077 ts.tv_nsec = 0;
1078 truncated_timeout = TRUE;
1079 }
1080
1081 if (uap->relative) {
1082 then.tv_sec = ts.tv_sec;
1083 then.tv_nsec = ts.tv_nsec;
1084 } else {
1085 nanotime(&now);
1086
1087 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1088 if (now.tv_sec == ts.tv_sec ?
1089 now.tv_nsec > ts.tv_nsec :
1090 now.tv_sec > ts.tv_sec) {
1091 then.tv_sec = 0;
1092 then.tv_nsec = 0;
1093 } else {
1094 then.tv_sec = ts.tv_sec - now.tv_sec;
1095 then.tv_nsec = ts.tv_nsec - now.tv_nsec;
1096 if (then.tv_nsec < 0) {
1097 then.tv_nsec += NSEC_PER_SEC;
1098 then.tv_sec--;
1099 }
1100 }
1101 }
1102
1103 if (uap->mutex_sem == 0)
1104 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1105 else
1106 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1107
1108 } else {
1109
1110 if (uap->mutex_sem == 0)
1111 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1112 else
1113
1114 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1115 }
1116
1117 if (kern_result == KERN_SUCCESS && !truncated_timeout)
1118 return(0);
1119 else if (kern_result == KERN_SUCCESS && truncated_timeout)
1120 return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1121 else if (kern_result == KERN_ABORTED)
1122 return(EINTR);
1123 else if (kern_result == KERN_OPERATION_TIMED_OUT)
1124 return(ETIMEDOUT);
1125 else
1126 return(EINVAL);
1127 }
1128
1129
1130 int
1131 __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap,
1132 __unused int32_t *retval)
1133 {
1134 thread_t target_act;
1135 int error = 0;
1136 int signum = uap->sig;
1137 struct uthread *uth;
1138
1139 target_act = (thread_t)port_name_to_thread(uap->thread_port);
1140
1141 if (target_act == THREAD_NULL)
1142 return (ESRCH);
1143 if ((u_int)signum >= NSIG) {
1144 error = EINVAL;
1145 goto out;
1146 }
1147
1148 uth = (struct uthread *)get_bsdthread_info(target_act);
1149
1150 if (uth->uu_flag & UT_NO_SIGMASK) {
1151 error = ESRCH;
1152 goto out;
1153 }
1154
1155 if (signum)
1156 psignal_uthread(target_act, signum);
1157 out:
1158 thread_deallocate(target_act);
1159 return (error);
1160 }
1161
1162
1163 int
1164 __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap,
1165 __unused int32_t *retval)
1166 {
1167 user_addr_t set = uap->set;
1168 user_addr_t oset = uap->oset;
1169 sigset_t nset;
1170 int error = 0;
1171 struct uthread *ut;
1172 sigset_t oldset;
1173
1174 ut = (struct uthread *)get_bsdthread_info(current_thread());
1175 oldset = ut->uu_sigmask;
1176
1177 if (set == USER_ADDR_NULL) {
1178 /* need only old mask */
1179 goto out;
1180 }
1181
1182 error = copyin(set, &nset, sizeof(sigset_t));
1183 if (error)
1184 goto out;
1185
1186 switch (uap->how) {
1187 case SIG_BLOCK:
1188 ut->uu_sigmask |= (nset & ~sigcantmask);
1189 break;
1190
1191 case SIG_UNBLOCK:
1192 ut->uu_sigmask &= ~(nset);
1193 signal_setast(current_thread());
1194 break;
1195
1196 case SIG_SETMASK:
1197 ut->uu_sigmask = (nset & ~sigcantmask);
1198 signal_setast(current_thread());
1199 break;
1200
1201 default:
1202 error = EINVAL;
1203
1204 }
1205 out:
1206 if (!error && oset != USER_ADDR_NULL)
1207 copyout(&oldset, oset, sizeof(sigset_t));
1208
1209 return(error);
1210 }
1211
1212 /*
1213 * Returns: 0 Success
1214 * EINVAL
1215 * copyin:EFAULT
1216 * copyout:EFAULT
1217 */
1218 int
1219 __sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval)
1220 {
1221 __pthread_testcancel(1);
1222 return(__sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval));
1223 }
1224
1225 int
1226 __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval)
1227 {
1228 struct uthread *ut;
1229 struct uthread *uth;
1230 int error = 0;
1231 sigset_t mask;
1232 sigset_t siglist;
1233 sigset_t sigw=0;
1234 int signum;
1235
1236 ut = (struct uthread *)get_bsdthread_info(current_thread());
1237
1238 if (uap->set == USER_ADDR_NULL)
1239 return(EINVAL);
1240
1241 error = copyin(uap->set, &mask, sizeof(sigset_t));
1242 if (error)
1243 return(error);
1244
1245 siglist = (mask & ~sigcantmask);
1246
1247 if (siglist == 0)
1248 return(EINVAL);
1249
1250 proc_lock(p);
1251 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
1252 proc_unlock(p);
1253 return(EINVAL);
1254 } else {
1255 proc_signalstart(p, 1);
1256 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1257 if ( (sigw = uth->uu_siglist & siglist) ) {
1258 break;
1259 }
1260 }
1261 proc_signalend(p, 1);
1262 }
1263
1264 if (sigw) {
1265 /* The signal was pending on a thread */
1266 goto sigwait1;
1267 }
1268 /*
1269 * When returning from sigwait, we want
1270 * the old mask to be restored after the
1271 * signal handler has finished. Thus, we
1272 * save it here and mark the sigacts structure
1273 * to indicate this.
1274 */
1275 uth = ut; /* wait for it to be delivered to us */
1276 ut->uu_oldmask = ut->uu_sigmask;
1277 ut->uu_flag |= UT_SAS_OLDMASK;
1278 if (siglist == (sigset_t)0) {
1279 proc_unlock(p);
1280 return(EINVAL);
1281 }
1282 /* SIGKILL and SIGSTOP are not maskable as well */
1283 ut->uu_sigmask = ~(siglist|sigcantmask);
1284 ut->uu_sigwait = siglist;
1285
1286 /* No Continuations for now */
1287 error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE|PCATCH, "pause", 0);
1288
1289 if (error == ERESTART)
1290 error = 0;
1291
1292 sigw = (ut->uu_sigwait & siglist);
1293 ut->uu_sigmask = ut->uu_oldmask;
1294 ut->uu_oldmask = 0;
1295 ut->uu_flag &= ~UT_SAS_OLDMASK;
1296 sigwait1:
1297 ut->uu_sigwait = 0;
1298 if (!error) {
1299 signum = ffs((unsigned int)sigw);
1300 if (!signum)
1301 panic("sigwait with no signal wakeup");
1302 /* Clear the pending signal in the thread it was delivered */
1303 uth->uu_siglist &= ~(sigmask(signum));
1304
1305 #if CONFIG_DTRACE
1306 DTRACE_PROC2(signal__clear, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo));
1307 #endif
1308
1309 proc_unlock(p);
1310 if (uap->sig != USER_ADDR_NULL)
1311 error = copyout(&signum, uap->sig, sizeof(int));
1312 } else
1313 proc_unlock(p);
1314
1315 return(error);
1316
1317 }
1318
1319 int
1320 sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *retval)
1321 {
1322 struct kern_sigaltstack ss;
1323 struct kern_sigaltstack *pstk;
1324 int error;
1325 struct uthread *uth;
1326 int onstack;
1327
1328 uth = (struct uthread *)get_bsdthread_info(current_thread());
1329
1330 pstk = &uth->uu_sigstk;
1331 if ((uth->uu_flag & UT_ALTSTACK) == 0)
1332 uth->uu_sigstk.ss_flags |= SA_DISABLE;
1333 onstack = pstk->ss_flags & SA_ONSTACK;
1334 if (uap->oss) {
1335 if (IS_64BIT_PROCESS(p)) {
1336 struct user64_sigaltstack ss64;
1337 sigaltstack_kern_to_user64(pstk, &ss64);
1338 error = copyout(&ss64, uap->oss, sizeof(ss64));
1339 } else {
1340 struct user32_sigaltstack ss32;
1341 sigaltstack_kern_to_user32(pstk, &ss32);
1342 error = copyout(&ss32, uap->oss, sizeof(ss32));
1343 }
1344 if (error)
1345 return (error);
1346 }
1347 if (uap->nss == USER_ADDR_NULL)
1348 return (0);
1349 if (IS_64BIT_PROCESS(p)) {
1350 struct user64_sigaltstack ss64;
1351 error = copyin(uap->nss, &ss64, sizeof(ss64));
1352 sigaltstack_user64_to_kern(&ss64, &ss);
1353 } else {
1354 struct user32_sigaltstack ss32;
1355 error = copyin(uap->nss, &ss32, sizeof(ss32));
1356 sigaltstack_user32_to_kern(&ss32, &ss);
1357 }
1358 if (error)
1359 return (error);
1360 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1361 return(EINVAL);
1362 }
1363
1364 if (ss.ss_flags & SA_DISABLE) {
1365 /* if we are here we are not in the signal handler ;so no need to check */
1366 if (uth->uu_sigstk.ss_flags & SA_ONSTACK)
1367 return (EINVAL);
1368 uth->uu_flag &= ~UT_ALTSTACK;
1369 uth->uu_sigstk.ss_flags = ss.ss_flags;
1370 return (0);
1371 }
1372 if (onstack)
1373 return (EPERM);
1374 /* The older stacksize was 8K, enforce that one so no compat problems */
1375 #define OLDMINSIGSTKSZ 8*1024
1376 if (ss.ss_size < OLDMINSIGSTKSZ)
1377 return (ENOMEM);
1378 uth->uu_flag |= UT_ALTSTACK;
1379 uth->uu_sigstk= ss;
1380 return (0);
1381 }
1382
1383 int
1384 kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval)
1385 {
1386 proc_t p;
1387 kauth_cred_t uc = kauth_cred_get();
1388 int posix = uap->posix; /* !0 if posix behaviour desired */
1389
1390 AUDIT_ARG(pid, uap->pid);
1391 AUDIT_ARG(signum, uap->signum);
1392
1393 if ((u_int)uap->signum >= NSIG)
1394 return (EINVAL);
1395 if (uap->pid > 0) {
1396 /* kill single process */
1397 if ((p = proc_find(uap->pid)) == NULL) {
1398 if ((p = pzfind(uap->pid)) != NULL) {
1399 /*
1400 * IEEE Std 1003.1-2001: return success
1401 * when killing a zombie.
1402 */
1403 return (0);
1404 }
1405 return (ESRCH);
1406 }
1407 AUDIT_ARG(process, p);
1408 if (!cansignal(cp, uc, p, uap->signum, 0)) {
1409 proc_rele(p);
1410 return(EPERM);
1411 }
1412 if (uap->signum)
1413 psignal(p, uap->signum);
1414 proc_rele(p);
1415 return (0);
1416 }
1417 switch (uap->pid) {
1418 case -1: /* broadcast signal */
1419 return (killpg1(cp, uap->signum, 0, 1, posix));
1420 case 0: /* signal own process group */
1421 return (killpg1(cp, uap->signum, 0, 0, posix));
1422 default: /* negative explicit process group */
1423 return (killpg1(cp, uap->signum, -(uap->pid), 0, posix));
1424 }
1425 /* NOTREACHED */
1426 }
1427
1428 static int
1429 killpg1_filt(proc_t p, void * arg)
1430 {
1431 struct killpg1_filtargs * kfargp = (struct killpg1_filtargs *)arg;
1432 proc_t cp = kfargp->cp;
1433 int posix = kfargp->posix;
1434
1435
1436 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1437 (!posix && p == cp))
1438 return(0);
1439 else
1440 return(1);
1441 }
1442
1443
1444 static int
1445 killpg1_pgrpfilt(proc_t p, __unused void * arg)
1446 {
1447 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1448 (p->p_stat == SZOMB))
1449 return(0);
1450 else
1451 return(1);
1452 }
1453
1454
1455
1456 static int
1457 killpg1_callback(proc_t p, void * arg)
1458 {
1459 struct killpg1_iterargs * kargp = (struct killpg1_iterargs *)arg;
1460 proc_t cp = kargp->cp;
1461 kauth_cred_t uc = kargp->uc; /* refcounted by the caller safe to use internal fields */
1462 int signum = kargp->signum;
1463 int * nfoundp = kargp->nfoundp;
1464 int n;
1465 int zombie = 0;
1466 int error = 0;
1467
1468 if ((kargp->zombie != 0) && ((p->p_listflag & P_LIST_EXITED) == P_LIST_EXITED))
1469 zombie = 1;
1470
1471 if (zombie != 0) {
1472 proc_list_lock();
1473 error = cansignal(cp, uc, p, signum, zombie);
1474 proc_list_unlock();
1475
1476 if (error != 0 && nfoundp != NULL) {
1477 n = *nfoundp;
1478 *nfoundp = n+1;
1479 }
1480 } else {
1481 if (cansignal(cp, uc, p, signum, 0) == 0)
1482 return(PROC_RETURNED);
1483
1484 if (nfoundp != NULL) {
1485 n = *nfoundp;
1486 *nfoundp = n+1;
1487 }
1488 if (signum != 0)
1489 psignal(p, signum);
1490 }
1491
1492 return(PROC_RETURNED);
1493 }
1494
1495 /*
1496 * Common code for kill process group/broadcast kill.
1497 * cp is calling process.
1498 */
1499 int
1500 killpg1(proc_t cp, int signum, int pgid, int all, int posix)
1501 {
1502 kauth_cred_t uc;
1503 struct pgrp *pgrp;
1504 int nfound = 0;
1505 struct killpg1_iterargs karg;
1506 struct killpg1_filtargs kfarg;
1507 int error = 0;
1508
1509 uc = kauth_cred_proc_ref(cp);
1510 if (all) {
1511 /*
1512 * broadcast
1513 */
1514 kfarg.posix = posix;
1515 kfarg.cp = cp;
1516
1517 karg.cp = cp;
1518 karg.uc = uc;
1519 karg.nfoundp = &nfound;
1520 karg.signum = signum;
1521 karg.zombie = 1;
1522
1523 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST), killpg1_callback, &karg, killpg1_filt, (void *)&kfarg);
1524
1525 } else {
1526 if (pgid == 0) {
1527 /*
1528 * zero pgid means send to my process group.
1529 */
1530 pgrp = proc_pgrp(cp);
1531 } else {
1532 pgrp = pgfind(pgid);
1533 if (pgrp == NULL) {
1534 error = ESRCH;
1535 goto out;
1536 }
1537 }
1538
1539 karg.nfoundp = &nfound;
1540 karg.uc = uc;
1541 karg.signum = signum;
1542 karg.cp = cp;
1543 karg.zombie = 0;
1544
1545
1546 /* PGRP_DROPREF drops the pgrp refernce */
1547 pgrp_iterate(pgrp, PGRP_BLOCKITERATE | PGRP_DROPREF, killpg1_callback, &karg,
1548 killpg1_pgrpfilt, NULL);
1549 }
1550 error = (nfound ? 0 : (posix ? EPERM : ESRCH));
1551 out:
1552 kauth_cred_unref(&uc);
1553 return (error);
1554 }
1555
1556
1557 /*
1558 * Send a signal to a process group.
1559 */
1560 void
1561 gsignal(int pgid, int signum)
1562 {
1563 struct pgrp *pgrp;
1564
1565 if (pgid && (pgrp = pgfind(pgid))) {
1566 pgsignal(pgrp, signum, 0);
1567 pg_rele(pgrp);
1568 }
1569 }
1570
1571 /*
1572 * Send a signal to a process group. If checkctty is 1,
1573 * limit to members which have a controlling terminal.
1574 */
1575
1576 static int
1577 pgsignal_filt(proc_t p, void * arg)
1578 {
1579 int checkctty = *(int*)arg;
1580
1581 if ((checkctty == 0) || p->p_flag & P_CONTROLT)
1582 return(1);
1583 else
1584 return(0);
1585 }
1586
1587
1588 static int
1589 pgsignal_callback(proc_t p, void * arg)
1590 {
1591 int signum = *(int*)arg;
1592
1593 psignal(p, signum);
1594 return(PROC_RETURNED);
1595 }
1596
1597
1598 void
1599 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
1600 {
1601 if (pgrp != PGRP_NULL) {
1602 pgrp_iterate(pgrp, PGRP_BLOCKITERATE, pgsignal_callback, &signum, pgsignal_filt, &checkctty);
1603 }
1604 }
1605
1606
1607 void
1608 tty_pgsignal(struct tty *tp, int signum, int checkctty)
1609 {
1610 struct pgrp * pg;
1611
1612 pg = tty_pgrp(tp);
1613 if (pg != PGRP_NULL) {
1614 pgrp_iterate(pg, PGRP_BLOCKITERATE, pgsignal_callback, &signum, pgsignal_filt, &checkctty);
1615 pg_rele(pg);
1616 }
1617 }
1618 /*
1619 * Send a signal caused by a trap to a specific thread.
1620 */
1621 void
1622 threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code)
1623 {
1624 struct uthread *uth;
1625 struct task * sig_task;
1626 proc_t p;
1627 int mask;
1628
1629 if ((u_int)signum >= NSIG || signum == 0)
1630 return;
1631
1632 mask = sigmask(signum);
1633 if ((mask & threadmask) == 0)
1634 return;
1635 sig_task = get_threadtask(sig_actthread);
1636 p = (proc_t)(get_bsdtask_info(sig_task));
1637
1638 uth = get_bsdthread_info(sig_actthread);
1639 if (uth->uu_flag & UT_VFORK)
1640 p = uth->uu_proc;
1641
1642 proc_lock(p);
1643 if (!(p->p_lflag & P_LTRACED) && (p->p_sigignore & mask)) {
1644 proc_unlock(p);
1645 return;
1646 }
1647
1648 uth->uu_siglist |= mask;
1649 uth->uu_code = code;
1650 proc_unlock(p);
1651
1652 /* mark on process as well */
1653 signal_setast(sig_actthread);
1654 }
1655
1656 static kern_return_t
1657 get_signalthread(proc_t p, int signum, thread_t * thr)
1658 {
1659 struct uthread *uth;
1660 sigset_t mask = sigmask(signum);
1661 thread_t sig_thread;
1662 struct task * sig_task = p->task;
1663 kern_return_t kret;
1664
1665 *thr = THREAD_NULL;
1666
1667 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
1668 sig_thread = p->p_vforkact;
1669 kret = check_actforsig(sig_task, sig_thread, 1);
1670 if (kret == KERN_SUCCESS) {
1671 *thr = sig_thread;
1672 return(KERN_SUCCESS);
1673 }else
1674 return(KERN_FAILURE);
1675 }
1676
1677 proc_lock(p);
1678 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1679 if(((uth->uu_flag & UT_NO_SIGMASK)== 0) &&
1680 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
1681 if (check_actforsig(p->task, uth->uu_context.vc_thread, 1) == KERN_SUCCESS) {
1682 *thr = uth->uu_context.vc_thread;
1683 proc_unlock(p);
1684 return(KERN_SUCCESS);
1685 }
1686 }
1687 }
1688 proc_unlock(p);
1689 if (get_signalact(p->task, thr, 1) == KERN_SUCCESS) {
1690 return(KERN_SUCCESS);
1691 }
1692
1693 return(KERN_FAILURE);
1694 }
1695
1696 /*
1697 * Send the signal to the process. If the signal has an action, the action
1698 * is usually performed by the target process rather than the caller; we add
1699 * the signal to the set of pending signals for the process.
1700 *
1701 * Exceptions:
1702 * o When a stop signal is sent to a sleeping process that takes the
1703 * default action, the process is stopped without awakening it.
1704 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1705 * regardless of the signal action (eg, blocked or ignored).
1706 *
1707 * Other ignored signals are discarded immediately.
1708 */
1709 static void
1710 psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum)
1711 {
1712 int prop;
1713 sig_t action = NULL;
1714 proc_t sig_proc;
1715 thread_t sig_thread;
1716 register task_t sig_task;
1717 int mask;
1718 struct uthread *uth;
1719 kern_return_t kret;
1720 uid_t r_uid;
1721 proc_t pp;
1722 kauth_cred_t my_cred;
1723
1724 if ((u_int)signum >= NSIG || signum == 0)
1725 panic("psignal signal number");
1726 mask = sigmask(signum);
1727 prop = sigprop[signum];
1728
1729 #if SIGNAL_DEBUG
1730 if(rdebug_proc && (p != PROC_NULL) && (p == rdebug_proc)) {
1731 ram_printf(3);
1732 }
1733 #endif /* SIGNAL_DEBUG */
1734
1735 /*
1736 * We will need the task pointer later. Grab it now to
1737 * check for a zombie process. Also don't send signals
1738 * to kernel internal tasks.
1739 */
1740 if (flavor & PSIG_VFORK) {
1741 sig_task = task;
1742 sig_thread = thread;
1743 sig_proc = p;
1744 } else if (flavor & PSIG_THREAD) {
1745 sig_task = get_threadtask(thread);
1746 sig_thread = thread;
1747 sig_proc = (proc_t)get_bsdtask_info(sig_task);
1748 } else {
1749 sig_task = p->task;
1750 sig_thread = (struct thread *)0;
1751 sig_proc = p;
1752 }
1753
1754 if ((sig_task == TASK_NULL) || is_kerneltask(sig_task))
1755 return;
1756
1757 /*
1758 * do not send signals to the process that has the thread
1759 * doing a reboot(). Not doing so will mark that thread aborted
1760 * and can cause IO failures wich will cause data loss. There's
1761 * also no need to send a signal to a process that is in the middle
1762 * of being torn down.
1763 */
1764 if (ISSET(sig_proc->p_flag, P_REBOOT) ||
1765 ISSET(sig_proc->p_lflag, P_LEXIT))
1766 return;
1767
1768 if( (flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) {
1769 proc_knote(sig_proc, NOTE_SIGNAL | signum);
1770 }
1771
1772 if ((flavor & PSIG_LOCKED)== 0)
1773 proc_signalstart(sig_proc, 0);
1774
1775 /*
1776 * Deliver the signal to the first thread in the task. This
1777 * allows single threaded applications which use signals to
1778 * be able to be linked with multithreaded libraries. We have
1779 * an implicit reference to the current thread, but need
1780 * an explicit one otherwise. The thread reference keeps
1781 * the corresponding task data structures around too. This
1782 * reference is released by thread_deallocate.
1783 */
1784
1785
1786 if (((flavor & PSIG_VFORK) == 0) && ((sig_proc->p_lflag & P_LTRACED) == 0) && (sig_proc->p_sigignore & mask)) {
1787 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
1788 goto psigout;
1789 }
1790
1791 if (flavor & PSIG_VFORK) {
1792 action = SIG_DFL;
1793 act_set_astbsd(sig_thread);
1794 kret = KERN_SUCCESS;
1795 } else if (flavor & PSIG_THREAD) {
1796 /* If successful return with ast set */
1797 kret = check_actforsig(sig_task, sig_thread, 1);
1798 } else {
1799 /* If successful return with ast set */
1800 kret = get_signalthread(sig_proc, signum, &sig_thread);
1801 }
1802 if (kret != KERN_SUCCESS) {
1803 #if SIGNAL_DEBUG
1804 ram_printf(1);
1805 #endif /* SIGNAL_DEBUG */
1806 goto psigout;
1807 }
1808
1809
1810 uth = get_bsdthread_info(sig_thread);
1811
1812 /*
1813 * If proc is traced, always give parent a chance.
1814 */
1815
1816 if ((flavor & PSIG_VFORK) == 0) {
1817 if (sig_proc->p_lflag & P_LTRACED)
1818 action = SIG_DFL;
1819 else {
1820 /*
1821 * If the signal is being ignored,
1822 * then we forget about it immediately.
1823 * (Note: we don't set SIGCONT in p_sigignore,
1824 * and if it is set to SIG_IGN,
1825 * action will be SIG_DFL here.)
1826 */
1827 if (sig_proc->p_sigignore & mask)
1828 goto psigout;
1829 if (uth->uu_sigwait & mask)
1830 action = KERN_SIG_WAIT;
1831 else if (uth->uu_sigmask & mask)
1832 action = KERN_SIG_HOLD;
1833 else if (sig_proc->p_sigcatch & mask)
1834 action = KERN_SIG_CATCH;
1835 else
1836 action = SIG_DFL;
1837 }
1838 }
1839
1840
1841 proc_lock(sig_proc);
1842
1843 if (sig_proc->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1844 (sig_proc->p_lflag & P_LTRACED) == 0)
1845 sig_proc->p_nice = NZERO;
1846
1847 if (prop & SA_CONT)
1848 uth->uu_siglist &= ~stopsigmask;
1849
1850 if (prop & SA_STOP) {
1851 struct pgrp *pg;
1852 /*
1853 * If sending a tty stop signal to a member of an orphaned
1854 * process group, discard the signal here if the action
1855 * is default; don't stop the process below if sleeping,
1856 * and don't clear any pending SIGCONT.
1857 */
1858 proc_unlock(sig_proc);
1859 pg = proc_pgrp(sig_proc);
1860 if (prop & SA_TTYSTOP && pg->pg_jobc == 0 &&
1861 action == SIG_DFL) {
1862 pg_rele(pg);
1863 goto psigout;
1864 }
1865 pg_rele(pg);
1866 proc_lock(sig_proc);
1867 uth->uu_siglist &= ~contsigmask;
1868 }
1869
1870 uth->uu_siglist |= mask;
1871 /*
1872 * Repost AST incase sigthread has processed
1873 * ast and missed signal post.
1874 */
1875 if (action == KERN_SIG_CATCH)
1876 act_set_astbsd(sig_thread);
1877
1878
1879 /*
1880 * Defer further processing for signals which are held,
1881 * except that stopped processes must be continued by SIGCONT.
1882 */
1883 /* vfork will not go thru as action is SIG_DFL */
1884 if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) {
1885 proc_unlock(sig_proc);
1886 goto psigout;
1887 }
1888 /*
1889 * SIGKILL priority twiddling moved here from above because
1890 * it needs sig_thread. Could merge it into large switch
1891 * below if we didn't care about priority for tracing
1892 * as SIGKILL's action is always SIG_DFL.
1893 */
1894 if ((signum == SIGKILL) && (sig_proc->p_nice > NZERO)) {
1895 sig_proc->p_nice = NZERO;
1896 }
1897
1898 /*
1899 * Process is traced - wake it up (if not already
1900 * stopped) so that it can discover the signal in
1901 * issig() and stop for the parent.
1902 */
1903 if (sig_proc->p_lflag & P_LTRACED) {
1904 if (sig_proc->p_stat != SSTOP)
1905 goto runlocked;
1906 else {
1907 proc_unlock(sig_proc);
1908 goto psigout;
1909 }
1910 }
1911 if ((flavor & PSIG_VFORK) != 0)
1912 goto runlocked;
1913
1914 if (action == KERN_SIG_WAIT) {
1915 #if CONFIG_DTRACE
1916 /*
1917 * DTrace proc signal-clear returns a siginfo_t. Collect the needed info.
1918 */
1919 r_uid = kauth_getruid(); /* per thread credential; protected by our thread context */
1920
1921 bzero((caddr_t)&(uth->t_dtrace_siginfo), sizeof(uth->t_dtrace_siginfo));
1922
1923 uth->t_dtrace_siginfo.si_signo = signum;
1924 uth->t_dtrace_siginfo.si_pid = current_proc()->p_pid;
1925 uth->t_dtrace_siginfo.si_status = W_EXITCODE(signum, 0);
1926 uth->t_dtrace_siginfo.si_uid = r_uid;
1927 uth->t_dtrace_siginfo.si_code = 0;
1928 #endif
1929 uth->uu_sigwait = mask;
1930 uth->uu_siglist &= ~mask;
1931 wakeup(&uth->uu_sigwait);
1932 /* if it is SIGCONT resume whole process */
1933 if (prop & SA_CONT) {
1934 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
1935 sig_proc->p_contproc = current_proc()->p_pid;
1936
1937 proc_unlock(sig_proc);
1938 (void) task_resume(sig_task);
1939 goto psigout;
1940 }
1941 proc_unlock(sig_proc);
1942 goto psigout;
1943 }
1944
1945 if (action != SIG_DFL) {
1946 /*
1947 * User wants to catch the signal.
1948 * Wake up the thread, but don't un-suspend it
1949 * (except for SIGCONT).
1950 */
1951 if (prop & SA_CONT) {
1952 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
1953 proc_unlock(sig_proc);
1954 (void) task_resume(sig_task);
1955 proc_lock(sig_proc);
1956 sig_proc->p_stat = SRUN;
1957 } else if (sig_proc->p_stat == SSTOP) {
1958 proc_unlock(sig_proc);
1959 goto psigout;
1960 }
1961 /*
1962 * Fill out siginfo structure information to pass to the
1963 * signalled process/thread sigaction handler, when it
1964 * wakes up. si_code is 0 because this is an ordinary
1965 * signal, not a SIGCHLD, and so si_status is the signal
1966 * number itself, instead of the child process exit status.
1967 * We shift this left because it will be shifted right before
1968 * it is passed to user space. kind of ugly to use W_EXITCODE
1969 * this way, but it beats defining a new macro.
1970 *
1971 * Note: Avoid the SIGCHLD recursion case!
1972 */
1973 if (signum != SIGCHLD) {
1974 proc_unlock(sig_proc);
1975 r_uid = kauth_getruid();
1976 proc_lock(sig_proc);
1977
1978 sig_proc->si_pid = current_proc()->p_pid;
1979 sig_proc->si_status = W_EXITCODE(signum, 0);
1980 sig_proc->si_uid = r_uid;
1981 sig_proc->si_code = 0;
1982 }
1983
1984 goto runlocked;
1985 } else {
1986 /* Default action - varies */
1987 if (mask & stopsigmask) {
1988 /*
1989 * These are the signals which by default
1990 * stop a process.
1991 *
1992 * Don't clog system with children of init
1993 * stopped from the keyboard.
1994 */
1995 if (!(prop & SA_STOP) && sig_proc->p_pptr == initproc) {
1996 proc_unlock(sig_proc);
1997 psignal_locked(sig_proc, SIGKILL);
1998 proc_lock(sig_proc);
1999 uth->uu_siglist &= ~mask;
2000 proc_unlock(sig_proc);
2001 goto psigout;
2002 }
2003
2004 /*
2005 * Stop the task
2006 * if task hasn't already been stopped by
2007 * a signal.
2008 */
2009 uth->uu_siglist &= ~mask;
2010 if (sig_proc->p_stat != SSTOP) {
2011 sig_proc->p_xstat = signum;
2012 sig_proc->p_stat = SSTOP;
2013 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &sig_proc->p_flag);
2014 sig_proc->p_lflag &= ~P_LWAITED;
2015 proc_unlock(sig_proc);
2016
2017 pp = proc_parentholdref(sig_proc);
2018 stop(sig_proc, pp);
2019 if (( pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2020
2021 my_cred = kauth_cred_proc_ref(sig_proc);
2022 r_uid = kauth_cred_getruid(my_cred);
2023 kauth_cred_unref(&my_cred);
2024
2025 proc_lock(sig_proc);
2026 pp->si_pid = sig_proc->p_pid;
2027 /*
2028 * POSIX: sigaction for a stopped child
2029 * when sent to the parent must set the
2030 * child's signal number into si_status.
2031 */
2032 if (signum != SIGSTOP)
2033 pp->si_status = WEXITSTATUS(sig_proc->p_xstat);
2034 else
2035 pp->si_status = W_EXITCODE(signum, signum);
2036 pp->si_code = CLD_STOPPED;
2037 pp->si_uid = r_uid;
2038 proc_unlock(sig_proc);
2039
2040 psignal(pp, SIGCHLD);
2041 }
2042 if (pp != PROC_NULL)
2043 proc_parentdropref(pp, 0);
2044 } else
2045 proc_unlock(sig_proc);
2046 goto psigout;
2047 }
2048
2049 DTRACE_PROC3(signal__send, thread_t, sig_thread, proc_t, p, int, signum);
2050
2051 /*
2052 * enters switch with sig_proc lock held but dropped when
2053 * gets out of switch
2054 */
2055 switch (signum) {
2056 /*
2057 * Signals ignored by default have been dealt
2058 * with already, since their bits are on in
2059 * p_sigignore.
2060 */
2061
2062 case SIGKILL:
2063 /*
2064 * Kill signal always sets process running and
2065 * unsuspends it.
2066 */
2067 /*
2068 * Process will be running after 'run'
2069 */
2070 sig_proc->p_stat = SRUN;
2071 /*
2072 * In scenarios where suspend/resume are racing
2073 * the signal we are missing AST_BSD by the time
2074 * we get here, set again to avoid races. This
2075 * was the scenario with spindump enabled shutdowns.
2076 * We would need to cover this approp down the line.
2077 */
2078 act_set_astbsd(sig_thread);
2079 thread_abort(sig_thread);
2080 proc_unlock(sig_proc);
2081
2082 goto psigout;
2083
2084 case SIGCONT:
2085 /*
2086 * Let the process run. If it's sleeping on an
2087 * event, it remains so.
2088 */
2089 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2090 sig_proc->p_contproc = sig_proc->p_pid;
2091
2092 proc_unlock(sig_proc);
2093 (void) task_resume(sig_task);
2094 proc_lock(sig_proc);
2095 /*
2096 * When processing a SIGCONT, we need to check
2097 * to see if there are signals pending that
2098 * were not delivered because we had been
2099 * previously stopped. If that's the case,
2100 * we need to thread_abort_safely() to trigger
2101 * interruption of the current system call to
2102 * cause their handlers to fire. If it's only
2103 * the SIGCONT, then don't wake up.
2104 */
2105 if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (((uth->uu_siglist & ~uth->uu_sigmask) & ~sig_proc->p_sigignore) & ~mask)) {
2106 uth->uu_siglist &= ~mask;
2107 sig_proc->p_stat = SRUN;
2108 goto runlocked;
2109 }
2110
2111 uth->uu_siglist &= ~mask;
2112 sig_proc->p_stat = SRUN;
2113 proc_unlock(sig_proc);
2114 goto psigout;
2115
2116 default:
2117 /*
2118 * A signal which has a default action of killing
2119 * the process, and for which there is no handler,
2120 * needs to act like SIGKILL
2121 */
2122 if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (action == SIG_DFL) && (prop & SA_KILL)) {
2123 sig_proc->p_stat = SRUN;
2124 proc_unlock(sig_proc);
2125 thread_abort(sig_thread);
2126 goto psigout;
2127 }
2128
2129 /*
2130 * All other signals wake up the process, but don't
2131 * resume it.
2132 */
2133 if (sig_proc->p_stat == SSTOP) {
2134 proc_unlock(sig_proc);
2135 goto psigout;
2136 }
2137 goto runlocked;
2138 }
2139 }
2140 /*NOTREACHED*/
2141
2142 runlocked:
2143 /*
2144 * If we're being traced (possibly because someone attached us
2145 * while we were stopped), check for a signal from the debugger.
2146 */
2147 if (sig_proc->p_stat == SSTOP) {
2148 if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0)
2149 uth->uu_siglist |= sigmask(sig_proc->p_xstat);
2150 if ((flavor & PSIG_VFORK) != 0) {
2151 sig_proc->p_stat = SRUN;
2152 }
2153 proc_unlock(sig_proc);
2154 } else {
2155 /*
2156 * setrunnable(p) in BSD and
2157 * Wake up the thread if it is interruptible.
2158 */
2159 sig_proc->p_stat = SRUN;
2160 proc_unlock(sig_proc);
2161 if ((flavor & PSIG_VFORK) == 0)
2162 thread_abort_safely(sig_thread);
2163 }
2164 psigout:
2165 if ((flavor & PSIG_LOCKED)== 0) {
2166 proc_signalend(sig_proc, 0);
2167 }
2168 }
2169
2170 void
2171 psignal(proc_t p, int signum)
2172 {
2173 psignal_internal(p, NULL, NULL, 0, signum);
2174 }
2175
2176 void
2177 psignal_locked(proc_t p, int signum)
2178 {
2179 psignal_internal(p, NULL, NULL, PSIG_LOCKED, signum);
2180 }
2181
2182 void
2183 psignal_vfork(proc_t p, task_t new_task, thread_t thread, int signum)
2184 {
2185 psignal_internal(p, new_task, thread, PSIG_VFORK, signum);
2186 }
2187
2188 static void
2189 psignal_uthread(thread_t thread, int signum)
2190 {
2191 psignal_internal(PROC_NULL, TASK_NULL, thread, PSIG_THREAD, signum);
2192 }
2193
2194
2195 /*
2196 * If the current process has received a signal (should be caught or cause
2197 * termination, should interrupt current syscall), return the signal number.
2198 * Stop signals with default action are processed immediately, then cleared;
2199 * they aren't returned. This is checked after each entry to the system for
2200 * a syscall or trap (though this can usually be done without calling issignal
2201 * by checking the pending signal masks in the CURSIG macro.) The normal call
2202 * sequence is
2203 *
2204 * while (signum = CURSIG(curproc))
2205 * postsig(signum);
2206 */
2207 int
2208 issignal_locked(proc_t p)
2209 {
2210 int signum, mask, prop, sigbits;
2211 thread_t cur_act;
2212 struct uthread * ut;
2213 proc_t pp;
2214 kauth_cred_t my_cred;
2215 int retval = 0;
2216 uid_t r_uid;
2217
2218 cur_act = current_thread();
2219
2220 #if SIGNAL_DEBUG
2221 if(rdebug_proc && (p == rdebug_proc)) {
2222 ram_printf(3);
2223 }
2224 #endif /* SIGNAL_DEBUG */
2225
2226 /*
2227 * Try to grab the signal lock.
2228 */
2229 if (sig_try_locked(p) <= 0) {
2230 return(0);
2231 }
2232
2233 proc_signalstart(p, 1);
2234
2235 ut = get_bsdthread_info(cur_act);
2236 for(;;) {
2237 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2238
2239 if (p->p_lflag & P_LPPWAIT)
2240 sigbits &= ~stopsigmask;
2241 if (sigbits == 0) { /* no signal to send */
2242 retval = 0;
2243 goto out;
2244 }
2245
2246 signum = ffs((long)sigbits);
2247 mask = sigmask(signum);
2248 prop = sigprop[signum];
2249
2250 /*
2251 * We should see pending but ignored signals
2252 * only if P_LTRACED was on when they were posted.
2253 */
2254 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2255 ut->uu_siglist &= ~mask; /* take the signal! */
2256 continue;
2257 }
2258 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2259 task_t task;
2260 /*
2261 * If traced, always stop, and stay
2262 * stopped until released by the debugger.
2263 */
2264 /* ptrace debugging */
2265 p->p_xstat = signum;
2266
2267 if (p->p_lflag & P_LSIGEXC) {
2268 p->sigwait = TRUE;
2269 p->sigwait_thread = cur_act;
2270 p->p_stat = SSTOP;
2271 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2272 p->p_lflag &= ~P_LWAITED;
2273 ut->uu_siglist &= ~mask; /* clear the old signal */
2274 proc_signalend(p, 1);
2275 proc_unlock(p);
2276 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2277 proc_lock(p);
2278 proc_signalstart(p, 1);
2279 } else {
2280 proc_unlock(p);
2281 my_cred = kauth_cred_proc_ref(p);
2282 r_uid = kauth_cred_getruid(my_cred);
2283 kauth_cred_unref(&my_cred);
2284
2285 pp = proc_parentholdref(p);
2286 if (pp != PROC_NULL) {
2287 proc_lock(pp);
2288
2289 pp->si_pid = p->p_pid;
2290 pp->si_status = p->p_xstat;
2291 pp->si_code = CLD_TRAPPED;
2292 pp->si_uid = r_uid;
2293
2294 proc_unlock(pp);
2295 }
2296
2297 /*
2298 * XXX Have to really stop for debuggers;
2299 * XXX stop() doesn't do the right thing.
2300 * XXX Inline the task_suspend because we
2301 * XXX have to diddle Unix state in the
2302 * XXX middle of it.
2303 */
2304 task = p->task;
2305 task_suspend(task);
2306
2307 proc_lock(p);
2308 p->sigwait = TRUE;
2309 p->sigwait_thread = cur_act;
2310 p->p_stat = SSTOP;
2311 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2312 p->p_lflag &= ~P_LWAITED;
2313 ut->uu_siglist &= ~mask; /* clear the old signal */
2314
2315 proc_signalend(p, 1);
2316 proc_unlock(p);
2317
2318 if (pp != PROC_NULL) {
2319 psignal(pp, SIGCHLD);
2320 proc_list_lock();
2321 wakeup((caddr_t)pp);
2322 proc_parentdropref(pp, 1);
2323 proc_list_unlock();
2324 }
2325
2326 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2327 thread_block(THREAD_CONTINUE_NULL);
2328 proc_lock(p);
2329 proc_signalstart(p, 1);
2330 }
2331
2332 p->sigwait = FALSE;
2333 p->sigwait_thread = NULL;
2334 wakeup((caddr_t)&p->sigwait_thread);
2335
2336 /*
2337 * This code is to detect when gdb is killed
2338 * even as the traced program is attached.
2339 * pgsignal would get the SIGKILL to traced program
2340 * That's what we are trying to see (I hope)
2341 */
2342 if (ut->uu_siglist & sigmask(SIGKILL)) {
2343 /*
2344 * Wait event may still be outstanding;
2345 * clear it, since sig_lock_to_exit will
2346 * wait.
2347 */
2348 clear_wait(current_thread(), THREAD_INTERRUPTED);
2349 sig_lock_to_exit(p);
2350 /*
2351 * Since this thread will be resumed
2352 * to allow the current syscall to
2353 * be completed, must save u_qsave
2354 * before calling exit(). (Since exit()
2355 * calls closef() which can trash u_qsave.)
2356 */
2357 proc_signalend(p, 1);
2358 proc_unlock(p);
2359 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
2360 p->p_pid, W_EXITCODE(0, SIGKILL), 2, 0, 0);
2361 exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL);
2362 proc_lock(p);
2363 return(0);
2364 }
2365
2366 /*
2367 * We may have to quit
2368 */
2369 if (thread_should_abort(current_thread())) {
2370 retval = 0;
2371 goto out;
2372 }
2373 /*
2374 * If parent wants us to take the signal,
2375 * then it will leave it in p->p_xstat;
2376 * otherwise we just look for signals again.
2377 */
2378 signum = p->p_xstat;
2379 if (signum == 0)
2380 continue;
2381 /*
2382 * Put the new signal into p_siglist. If the
2383 * signal is being masked, look for other signals.
2384 */
2385 mask = sigmask(signum);
2386 ut->uu_siglist |= mask;
2387 if (ut->uu_sigmask & mask)
2388 continue;
2389 }
2390
2391 /*
2392 * Decide whether the signal should be returned.
2393 * Return the signal's number, or fall through
2394 * to clear it from the pending mask.
2395 */
2396
2397 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2398
2399 case (long)SIG_DFL:
2400 /*
2401 * Don't take default actions on system processes.
2402 */
2403 if (p->p_ppid == 0) {
2404 #if DIAGNOSTIC
2405 /*
2406 * Are you sure you want to ignore SIGSEGV
2407 * in init? XXX
2408 */
2409 printf("Process (pid %d) got signal %d\n",
2410 p->p_pid, signum);
2411 #endif
2412 break; /* == ignore */
2413 }
2414
2415 /*
2416 * If there is a pending stop signal to process
2417 * with default action, stop here,
2418 * then clear the signal. However,
2419 * if process is member of an orphaned
2420 * process group, ignore tty stop signals.
2421 */
2422 if (prop & SA_STOP) {
2423 struct pgrp * pg;
2424
2425 proc_unlock(p);
2426 pg = proc_pgrp(p);
2427 if (p->p_lflag & P_LTRACED ||
2428 (pg->pg_jobc == 0 &&
2429 prop & SA_TTYSTOP)) {
2430 proc_lock(p);
2431 pg_rele(pg);
2432 break; /* == ignore */
2433 }
2434 pg_rele(pg);
2435 if (p->p_stat != SSTOP) {
2436 proc_lock(p);
2437 p->p_xstat = signum;
2438
2439 p->p_stat = SSTOP;
2440 p->p_lflag &= ~P_LWAITED;
2441 proc_unlock(p);
2442
2443 pp = proc_parentholdref(p);
2444 stop(p, pp);
2445 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2446 my_cred = kauth_cred_proc_ref(p);
2447 r_uid = kauth_cred_getruid(my_cred);
2448 kauth_cred_unref(&my_cred);
2449
2450 proc_lock(pp);
2451 pp->si_pid = p->p_pid;
2452 pp->si_status = WEXITSTATUS(p->p_xstat);
2453 pp->si_code = CLD_STOPPED;
2454 pp->si_uid = r_uid;
2455 proc_unlock(pp);
2456
2457 psignal(pp, SIGCHLD);
2458 }
2459 if (pp != PROC_NULL)
2460 proc_parentdropref(pp, 0);
2461 }
2462 proc_lock(p);
2463 break;
2464 } else if (prop & SA_IGNORE) {
2465 /*
2466 * Except for SIGCONT, shouldn't get here.
2467 * Default action is to ignore; drop it.
2468 */
2469 break; /* == ignore */
2470 } else {
2471 ut->uu_siglist &= ~mask; /* take the signal! */
2472 retval = signum;
2473 goto out;
2474 }
2475
2476 /*NOTREACHED*/
2477 break;
2478
2479 case (long)SIG_IGN:
2480 /*
2481 * Masking above should prevent us ever trying
2482 * to take action on an ignored signal other
2483 * than SIGCONT, unless process is traced.
2484 */
2485 if ((prop & SA_CONT) == 0 &&
2486 (p->p_lflag & P_LTRACED) == 0)
2487 printf("issignal\n");
2488 break; /* == ignore */
2489
2490 default:
2491 /*
2492 * This signal has an action, let
2493 * postsig() process it.
2494 */
2495 ut->uu_siglist &= ~mask; /* take the signal! */
2496 retval = signum;
2497 goto out;
2498 }
2499 ut->uu_siglist &= ~mask; /* take the signal! */
2500 }
2501 /* NOTREACHED */
2502 out:
2503 proc_signalend(p, 1);
2504 return(retval);
2505 }
2506
2507 /* called from _sleep */
2508 int
2509 CURSIG(proc_t p)
2510 {
2511 int signum, mask, prop, sigbits;
2512 thread_t cur_act;
2513 struct uthread * ut;
2514 int retnum = 0;
2515
2516
2517 cur_act = current_thread();
2518
2519 ut = get_bsdthread_info(cur_act);
2520
2521 if (ut->uu_siglist == 0)
2522 return (0);
2523
2524 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_lflag & P_LTRACED) == 0))
2525 return (0);
2526
2527 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2528
2529 for(;;) {
2530 if (p->p_lflag & P_LPPWAIT)
2531 sigbits &= ~stopsigmask;
2532 if (sigbits == 0) { /* no signal to send */
2533 return (retnum);
2534 }
2535
2536 signum = ffs((long)sigbits);
2537 mask = sigmask(signum);
2538 prop = sigprop[signum];
2539 sigbits &= ~mask; /* take the signal out */
2540
2541 /*
2542 * We should see pending but ignored signals
2543 * only if P_LTRACED was on when they were posted.
2544 */
2545 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2546 continue;
2547 }
2548
2549 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2550 return(signum);
2551 }
2552
2553 /*
2554 * Decide whether the signal should be returned.
2555 * Return the signal's number, or fall through
2556 * to clear it from the pending mask.
2557 */
2558
2559 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2560
2561 case (long)SIG_DFL:
2562 /*
2563 * Don't take default actions on system processes.
2564 */
2565 if (p->p_ppid == 0) {
2566 #if DIAGNOSTIC
2567 /*
2568 * Are you sure you want to ignore SIGSEGV
2569 * in init? XXX
2570 */
2571 printf("Process (pid %d) got signal %d\n",
2572 p->p_pid, signum);
2573 #endif
2574 break; /* == ignore */
2575 }
2576
2577 /*
2578 * If there is a pending stop signal to process
2579 * with default action, stop here,
2580 * then clear the signal. However,
2581 * if process is member of an orphaned
2582 * process group, ignore tty stop signals.
2583 */
2584 if (prop & SA_STOP) {
2585 struct pgrp *pg;
2586
2587 pg = proc_pgrp(p);
2588
2589 if (p->p_lflag & P_LTRACED ||
2590 (pg->pg_jobc == 0 &&
2591 prop & SA_TTYSTOP)) {
2592 pg_rele(pg);
2593 break; /* == ignore */
2594 }
2595 pg_rele(pg);
2596 retnum = signum;
2597 break;
2598 } else if (prop & SA_IGNORE) {
2599 /*
2600 * Except for SIGCONT, shouldn't get here.
2601 * Default action is to ignore; drop it.
2602 */
2603 break; /* == ignore */
2604 } else {
2605 return (signum);
2606 }
2607 /*NOTREACHED*/
2608
2609 case (long)SIG_IGN:
2610 /*
2611 * Masking above should prevent us ever trying
2612 * to take action on an ignored signal other
2613 * than SIGCONT, unless process is traced.
2614 */
2615 if ((prop & SA_CONT) == 0 &&
2616 (p->p_lflag & P_LTRACED) == 0)
2617 printf("issignal\n");
2618 break; /* == ignore */
2619
2620 default:
2621 /*
2622 * This signal has an action, let
2623 * postsig() process it.
2624 */
2625 return (signum);
2626 }
2627 }
2628 /* NOTREACHED */
2629 }
2630
2631 /*
2632 * Put the argument process into the stopped state and notify the parent
2633 * via wakeup. Signals are handled elsewhere. The process must not be
2634 * on the run queue.
2635 */
2636 static void
2637 stop(proc_t p, proc_t parent)
2638 {
2639 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2640 if ((parent != PROC_NULL) && (parent->p_stat != SSTOP)) {
2641 proc_list_lock();
2642 wakeup((caddr_t)parent);
2643 proc_list_unlock();
2644 }
2645 (void) task_suspend(p->task); /*XXX*/
2646 }
2647
2648 /*
2649 * Take the action for the specified signal
2650 * from the current set of pending signals.
2651 */
2652 void
2653 postsig_locked(int signum)
2654 {
2655 proc_t p = current_proc();
2656 struct sigacts *ps = p->p_sigacts;
2657 user_addr_t catcher;
2658 uint32_t code;
2659 int mask, returnmask;
2660 struct uthread * ut;
2661
2662 #if DIAGNOSTIC
2663 if (signum == 0)
2664 panic("postsig");
2665 /*
2666 * This must be called on master cpu
2667 */
2668 if (cpu_number() != master_cpu)
2669 panic("psig not on master");
2670 #endif
2671
2672 /*
2673 * Try to grab the signal lock.
2674 */
2675 if (sig_try_locked(p) <= 0) {
2676 return;
2677 }
2678
2679 proc_signalstart(p, 1);
2680
2681 ut = (struct uthread *)get_bsdthread_info(current_thread());
2682 mask = sigmask(signum);
2683 ut->uu_siglist &= ~mask;
2684 catcher = ps->ps_sigact[signum];
2685 if (catcher == SIG_DFL) {
2686 /*
2687 * Default catcher, where the default is to kill
2688 * the process. (Other cases were ignored above.)
2689 */
2690 sig_lock_to_exit(p);
2691 p->p_acflag |= AXSIG;
2692 if (sigprop[signum] & SA_CORE) {
2693 p->p_sigacts->ps_sig = signum;
2694 proc_signalend(p, 1);
2695 proc_unlock(p);
2696 if (coredump(p) == 0)
2697 signum |= WCOREFLAG;
2698 } else {
2699 proc_signalend(p, 1);
2700 proc_unlock(p);
2701 }
2702
2703 #if CONFIG_DTRACE
2704 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
2705
2706 ut->t_dtrace_siginfo.si_signo = signum;
2707 ut->t_dtrace_siginfo.si_pid = p->si_pid;
2708 ut->t_dtrace_siginfo.si_uid = p->si_uid;
2709 ut->t_dtrace_siginfo.si_status = WEXITSTATUS(p->si_status);
2710
2711 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
2712 switch (signum) {
2713 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
2714 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
2715 break;
2716 default:
2717 break;
2718 }
2719
2720
2721 DTRACE_PROC3(signal__handle, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo),
2722 void (*)(void), SIG_DFL);
2723 #endif
2724
2725 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
2726 p->p_pid, W_EXITCODE(0, signum), 3, 0, 0);
2727 exit1(p, W_EXITCODE(0, signum), (int *)NULL);
2728 proc_lock(p);
2729 return;
2730 } else {
2731 /*
2732 * If we get here, the signal must be caught.
2733 */
2734 #if DIAGNOSTIC
2735 if (catcher == SIG_IGN || (ut->uu_sigmask & mask))
2736 log(LOG_WARNING,
2737 "postsig: processing masked or ignored signal\n");
2738 #endif
2739
2740 /*
2741 * Set the new mask value and also defer further
2742 * occurences of this signal.
2743 *
2744 * Special case: user has done a sigpause. Here the
2745 * current mask is not of interest, but rather the
2746 * mask from before the sigpause is what we want
2747 * restored after the signal processing is completed.
2748 */
2749 if (ut->uu_flag & UT_SAS_OLDMASK) {
2750 returnmask = ut->uu_oldmask;
2751 ut->uu_flag &= ~UT_SAS_OLDMASK;
2752 ut->uu_oldmask = 0;
2753 } else
2754 returnmask = ut->uu_sigmask;
2755 ut->uu_sigmask |= ps->ps_catchmask[signum];
2756 if ((ps->ps_signodefer & mask) == 0)
2757 ut->uu_sigmask |= mask;
2758 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
2759 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE))
2760 p->p_sigignore |= mask;
2761 ps->ps_sigact[signum] = SIG_DFL;
2762 ps->ps_siginfo &= ~mask;
2763 ps->ps_signodefer &= ~mask;
2764 }
2765
2766 if (ps->ps_sig != signum) {
2767 code = 0;
2768 } else {
2769 code = ps->ps_code;
2770 ps->ps_code = 0;
2771 }
2772 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nsignals);
2773 sendsig(p, catcher, signum, returnmask, code);
2774 }
2775 proc_signalend(p, 1);
2776 }
2777
2778 /*
2779 * Attach a signal knote to the list of knotes for this process.
2780 *
2781 * Signal knotes share the knote list with proc knotes. This
2782 * could be avoided by using a signal-specific knote list, but
2783 * probably isn't worth the trouble.
2784 */
2785
2786 static int
2787 filt_sigattach(struct knote *kn)
2788 {
2789 proc_t p = current_proc(); /* can attach only to oneself */
2790
2791 proc_klist_lock();
2792
2793 kn->kn_ptr.p_proc = p;
2794 kn->kn_flags |= EV_CLEAR; /* automatically set */
2795
2796 KNOTE_ATTACH(&p->p_klist, kn);
2797
2798 proc_klist_unlock();
2799
2800 return (0);
2801 }
2802
2803 /*
2804 * remove the knote from the process list, if it hasn't already
2805 * been removed by exit processing.
2806 */
2807
2808 static void
2809 filt_sigdetach(struct knote *kn)
2810 {
2811 proc_t p = kn->kn_ptr.p_proc;
2812
2813 proc_klist_lock();
2814 kn->kn_ptr.p_proc = NULL;
2815 KNOTE_DETACH(&p->p_klist, kn);
2816 proc_klist_unlock();
2817 }
2818
2819 /*
2820 * Post an event to the signal filter. Because we share the same list
2821 * as process knotes, we have to filter out and handle only signal events.
2822 *
2823 * We assume that we process fdfree() before we post the NOTE_EXIT for
2824 * a process during exit. Therefore, since signal filters can only be
2825 * set up "in-process", we should have already torn down the kqueue
2826 * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT.
2827 */
2828 static int
2829 filt_signal(struct knote *kn, long hint)
2830 {
2831
2832 if (hint & NOTE_SIGNAL) {
2833 hint &= ~NOTE_SIGNAL;
2834
2835 if (kn->kn_id == (unsigned int)hint)
2836 kn->kn_data++;
2837 } else if (hint & NOTE_EXIT) {
2838 panic("filt_signal: detected NOTE_EXIT event");
2839 }
2840
2841 return (kn->kn_data != 0);
2842 }
2843
2844 static void
2845 filt_signaltouch(struct knote *kn, struct kevent64_s *kev, long type)
2846 {
2847 proc_klist_lock();
2848 switch (type) {
2849 case EVENT_REGISTER:
2850 kn->kn_sfflags = kev->fflags;
2851 kn->kn_sdata = kev->data;
2852 break;
2853 case EVENT_PROCESS:
2854 *kev = kn->kn_kevent;
2855 if (kn->kn_flags & EV_CLEAR) {
2856 kn->kn_data = 0;
2857 kn->kn_fflags = 0;
2858 }
2859 break;
2860 default:
2861 panic("filt_machporttouch() - invalid type (%ld)", type);
2862 break;
2863 }
2864 proc_klist_unlock();
2865 }
2866
2867 void
2868 bsd_ast(thread_t thread)
2869 {
2870 proc_t p = current_proc();
2871 struct uthread *ut = get_bsdthread_info(thread);
2872 int signum;
2873 user_addr_t pc;
2874 static int bsd_init_done = 0;
2875
2876 if (p == NULL)
2877 return;
2878
2879 if ((p->p_flag & P_OWEUPC) && (p->p_flag & P_PROFIL)) {
2880 pc = get_useraddr();
2881 addupc_task(p, pc, 1);
2882 OSBitAndAtomic(~((uint32_t)P_OWEUPC), &p->p_flag);
2883 }
2884
2885 if (timerisset(&p->p_vtimer_user.it_value)) {
2886 uint32_t microsecs;
2887
2888 task_vtimer_update(p->task, TASK_VTIMER_USER, &microsecs);
2889
2890 if (!itimerdecr(p, &p->p_vtimer_user, microsecs)) {
2891 if (timerisset(&p->p_vtimer_user.it_value))
2892 task_vtimer_set(p->task, TASK_VTIMER_USER);
2893 else
2894 task_vtimer_clear(p->task, TASK_VTIMER_USER);
2895
2896 psignal(p, SIGVTALRM);
2897 }
2898 }
2899
2900 if (timerisset(&p->p_vtimer_prof.it_value)) {
2901 uint32_t microsecs;
2902
2903 task_vtimer_update(p->task, TASK_VTIMER_PROF, &microsecs);
2904
2905 if (!itimerdecr(p, &p->p_vtimer_prof, microsecs)) {
2906 if (timerisset(&p->p_vtimer_prof.it_value))
2907 task_vtimer_set(p->task, TASK_VTIMER_PROF);
2908 else
2909 task_vtimer_clear(p->task, TASK_VTIMER_PROF);
2910
2911 psignal(p, SIGPROF);
2912 }
2913 }
2914
2915 if (timerisset(&p->p_rlim_cpu)) {
2916 struct timeval tv;
2917
2918 task_vtimer_update(p->task, TASK_VTIMER_RLIM, (uint32_t *) &tv.tv_usec);
2919
2920 proc_spinlock(p);
2921 if (p->p_rlim_cpu.tv_sec > 0 || p->p_rlim_cpu.tv_usec > tv.tv_usec) {
2922 tv.tv_sec = 0;
2923 timersub(&p->p_rlim_cpu, &tv, &p->p_rlim_cpu);
2924 proc_spinunlock(p);
2925 } else {
2926
2927 timerclear(&p->p_rlim_cpu);
2928 proc_spinunlock(p);
2929
2930 task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
2931
2932 psignal(p, SIGXCPU);
2933 }
2934 }
2935
2936 #if CONFIG_DTRACE
2937 if (ut->t_dtrace_sig) {
2938 uint8_t dt_action_sig = ut->t_dtrace_sig;
2939 ut->t_dtrace_sig = 0;
2940 psignal(p, dt_action_sig);
2941 }
2942
2943 if (ut->t_dtrace_stop) {
2944 ut->t_dtrace_stop = 0;
2945 proc_lock(p);
2946 p->p_dtrace_stop = 1;
2947 proc_unlock(p);
2948 (void)task_suspend(p->task);
2949 }
2950
2951 if (ut->t_dtrace_resumepid) {
2952 proc_t resumeproc = proc_find(ut->t_dtrace_resumepid);
2953 ut->t_dtrace_resumepid = 0;
2954 if (resumeproc != PROC_NULL) {
2955 proc_lock(resumeproc);
2956 /* We only act on processes stopped by dtrace */
2957 if (resumeproc->p_dtrace_stop) {
2958 resumeproc->p_dtrace_stop = 0;
2959 proc_unlock(resumeproc);
2960 task_resume(resumeproc->task);
2961 }
2962 else {
2963 proc_unlock(resumeproc);
2964 }
2965 proc_rele(resumeproc);
2966 }
2967 }
2968
2969 #endif /* CONFIG_DTRACE */
2970
2971 proc_lock(p);
2972 if (CHECK_SIGNALS(p, current_thread(), ut)) {
2973 while ( (signum = issignal_locked(p)) )
2974 postsig_locked(signum);
2975 }
2976 proc_unlock(p);
2977
2978 if (!bsd_init_done) {
2979 bsd_init_done = 1;
2980 bsdinit_task();
2981 }
2982
2983 }
2984
2985 /* ptrace set runnable */
2986 void
2987 pt_setrunnable(proc_t p)
2988 {
2989 task_t task;
2990
2991 task = p->task;
2992
2993 if (p->p_lflag & P_LTRACED) {
2994 proc_lock(p);
2995 p->p_stat = SRUN;
2996 proc_unlock(p);
2997 if (p->sigwait) {
2998 wakeup((caddr_t)&(p->sigwait));
2999 if ((p->p_lflag & P_LSIGEXC) == 0) { // 5878479
3000 task_release(task);
3001 }
3002 }
3003 }
3004 }
3005
3006 kern_return_t
3007 do_bsdexception(
3008 int exc,
3009 int code,
3010 int sub)
3011 {
3012 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
3013
3014 codes[0] = code;
3015 codes[1] = sub;
3016 return(bsd_exception(exc, codes, 2));
3017 }
3018
3019 int
3020 proc_pendingsignals(proc_t p, sigset_t mask)
3021 {
3022 struct uthread * uth;
3023 thread_t th;
3024 sigset_t bits = 0;
3025
3026 proc_lock(p);
3027 /* If the process is in proc exit return no signal info */
3028 if (p->p_lflag & P_LPEXIT) {
3029 goto out;
3030 }
3031
3032 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
3033 th = p->p_vforkact;
3034 uth = (struct uthread *)get_bsdthread_info(th);
3035 if (uth) {
3036 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3037 }
3038 goto out;
3039 }
3040
3041 bits = 0;
3042 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
3043 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3044 }
3045 out:
3046 proc_unlock(p);
3047 return(bits);
3048 }
3049
3050 int
3051 thread_issignal(proc_t p, thread_t th, sigset_t mask)
3052 {
3053 struct uthread * uth;
3054 sigset_t bits=0;
3055
3056 proc_lock(p);
3057 uth = (struct uthread *)get_bsdthread_info(th);
3058 if (uth) {
3059 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3060 }
3061 proc_unlock(p);
3062 return(bits);
3063 }
3064
3065 /*
3066 * Allow external reads of the sigprop array.
3067 */
3068 int
3069 hassigprop(int sig, int prop)
3070 {
3071 return (sigprop[sig] & prop);
3072 }
3073
3074 void
3075 pgsigio(pid_t pgid, int sig)
3076 {
3077 proc_t p = PROC_NULL;
3078
3079 if (pgid < 0)
3080 gsignal(-(pgid), sig);
3081
3082 else if (pgid > 0 && (p = proc_find(pgid)) != 0)
3083 psignal(p, sig);
3084 if (p != PROC_NULL)
3085 proc_rele(p);
3086 }
3087
3088 void
3089 proc_signalstart(proc_t p, int locked)
3090 {
3091 if (!locked)
3092 proc_lock(p);
3093 p->p_sigwaitcnt++;
3094 while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL)
3095 msleep(&p->p_sigmask, &p->p_mlock, 0, "proc_signstart", NULL);
3096 p->p_sigwaitcnt--;
3097
3098 p->p_lflag |= P_LINSIGNAL;
3099 p->p_signalholder = current_thread();
3100 if (!locked)
3101 proc_unlock(p);
3102 }
3103
3104 void
3105 proc_signalend(proc_t p, int locked)
3106 {
3107 if (!locked)
3108 proc_lock(p);
3109 p->p_lflag &= ~P_LINSIGNAL;
3110
3111 if (p->p_sigwaitcnt > 0)
3112 wakeup(&p->p_sigmask);
3113
3114 p->p_signalholder = NULL;
3115 if (!locked)
3116 proc_unlock(p);
3117 }
3118
3119 void
3120 sig_lock_to_exit(proc_t p)
3121 {
3122 thread_t self = current_thread();
3123
3124 p->exit_thread = self;
3125 proc_unlock(p);
3126
3127 task_hold(p->task);
3128 task_wait(p->task, FALSE);
3129
3130 proc_lock(p);
3131 }
3132
3133 int
3134 sig_try_locked(proc_t p)
3135 {
3136 thread_t self = current_thread();
3137
3138 while (p->sigwait || p->exit_thread) {
3139 if (p->exit_thread) {
3140 return(0);
3141 }
3142 msleep((caddr_t)&p->sigwait_thread, &p->p_mlock, PCATCH | PDROP, 0, 0);
3143 if (thread_should_abort(self)) {
3144 /*
3145 * Terminate request - clean up.
3146 */
3147 proc_lock(p);
3148 return -1;
3149 }
3150 proc_lock(p);
3151 }
3152 return 1;
3153 }