]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sig.c
xnu-3248.30.4.tar.gz
[apple/xnu.git] / bsd / kern / kern_sig.c
1 /*
2 * Copyright (c) 1995-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1989, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
66 */
67 /*
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
71 * Version 2.0.
72 */
73
74 #define SIGPROP /* include signal properties table */
75 #include <sys/param.h>
76 #include <sys/resourcevar.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/timeb.h>
81 #include <sys/times.h>
82 #include <sys/acct.h>
83 #include <sys/file_internal.h>
84 #include <sys/kernel.h>
85 #include <sys/wait.h>
86 #include <sys/signalvar.h>
87 #include <sys/syslog.h>
88 #include <sys/stat.h>
89 #include <sys/lock.h>
90 #include <sys/kdebug.h>
91
92 #include <sys/mount.h>
93 #include <sys/sysproto.h>
94
95 #include <security/audit/audit.h>
96
97 #include <machine/spl.h>
98
99 #include <kern/cpu_number.h>
100
101 #include <sys/vm.h>
102 #include <sys/user.h> /* for coredump */
103 #include <kern/ast.h> /* for APC support */
104 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
105 #include <kern/thread.h>
106 #include <kern/sched_prim.h>
107 #include <kern/thread_call.h>
108 #include <mach/exception.h>
109 #include <mach/task.h>
110 #include <mach/thread_act.h>
111 #include <libkern/OSAtomic.h>
112
113 #include <sys/sdt.h>
114 #include <sys/codesign.h>
115
116 /*
117 * Missing prototypes that Mach should export
118 *
119 * +++
120 */
121 extern int thread_enable_fpe(thread_t act, int onoff);
122 extern thread_t port_name_to_thread(mach_port_name_t port_name);
123 extern kern_return_t get_signalact(task_t , thread_t *, int);
124 extern unsigned int get_useraddr(void);
125
126 /*
127 * ---
128 */
129
130 extern void doexception(int exc, mach_exception_code_t code,
131 mach_exception_subcode_t sub);
132
133 static void stop(proc_t, proc_t);
134 int cansignal(proc_t, kauth_cred_t, proc_t, int, int);
135 int killpg1(proc_t, int, int, int, int);
136 static void psignal_uthread(thread_t, int);
137 static void psignal_try_thread(proc_t, thread_t, int signum);
138 kern_return_t do_bsdexception(int, int, int);
139 void __posix_sem_syscall_return(kern_return_t);
140 char *proc_name_address(void *p);
141
142 /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */
143 kern_return_t semaphore_timedwait_signal_trap_internal(mach_port_name_t, mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t));
144 kern_return_t semaphore_timedwait_trap_internal(mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t));
145 kern_return_t semaphore_wait_signal_trap_internal(mach_port_name_t, mach_port_name_t, void (*)(kern_return_t));
146 kern_return_t semaphore_wait_trap_internal(mach_port_name_t, void (*)(kern_return_t));
147
148 static int filt_sigattach(struct knote *kn);
149 static void filt_sigdetach(struct knote *kn);
150 static int filt_signal(struct knote *kn, long hint);
151 static void filt_signaltouch(struct knote *kn, struct kevent_internal_s *kev,
152 long type);
153
154 struct filterops sig_filtops = {
155 .f_attach = filt_sigattach,
156 .f_detach = filt_sigdetach,
157 .f_event = filt_signal,
158 .f_touch = filt_signaltouch,
159 };
160
161 /* structures and fns for killpg1 iterartion callback and filters */
162 struct killpg1_filtargs {
163 int posix;
164 proc_t cp;
165 };
166
167 struct killpg1_iterargs {
168 proc_t cp;
169 kauth_cred_t uc;
170 int signum;
171 int * nfoundp;
172 int zombie;
173 };
174
175 static int killpg1_filt(proc_t p, void * arg);
176 static int killpg1_pgrpfilt(proc_t p, __unused void * arg);
177 static int killpg1_callback(proc_t p, void * arg);
178
179 static int pgsignal_filt(proc_t p, void * arg);
180 static int pgsignal_callback(proc_t p, void * arg);
181 static kern_return_t get_signalthread(proc_t, int, thread_t *);
182
183
184 /* flags for psignal_internal */
185 #define PSIG_LOCKED 0x1
186 #define PSIG_VFORK 0x2
187 #define PSIG_THREAD 0x4
188 #define PSIG_TRY_THREAD 0x8
189
190
191 static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum);
192
193 /*
194 * NOTE: Source and target may *NOT* overlap! (target is smaller)
195 */
196 static void
197 sigaltstack_kern_to_user32(struct kern_sigaltstack *in, struct user32_sigaltstack *out)
198 {
199 out->ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp);
200 out->ss_size = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size);
201 out->ss_flags = in->ss_flags;
202 }
203
204 static void
205 sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstack *out)
206 {
207 out->ss_sp = in->ss_sp;
208 out->ss_size = in->ss_size;
209 out->ss_flags = in->ss_flags;
210 }
211
212 /*
213 * NOTE: Source and target may are permitted to overlap! (source is smaller);
214 * this works because we copy fields in order from the end of the struct to
215 * the beginning.
216 */
217 static void
218 sigaltstack_user32_to_kern(struct user32_sigaltstack *in, struct kern_sigaltstack *out)
219 {
220 out->ss_flags = in->ss_flags;
221 out->ss_size = in->ss_size;
222 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
223 }
224 static void
225 sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out)
226 {
227 out->ss_flags = in->ss_flags;
228 out->ss_size = in->ss_size;
229 out->ss_sp = in->ss_sp;
230 }
231
232 static void
233 sigaction_kern_to_user32(struct kern_sigaction *in, struct user32_sigaction *out)
234 {
235 /* This assumes 32 bit __sa_handler is of type sig_t */
236 out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t,in->__sigaction_u.__sa_handler);
237 out->sa_mask = in->sa_mask;
238 out->sa_flags = in->sa_flags;
239 }
240 static void
241 sigaction_kern_to_user64(struct kern_sigaction *in, struct user64_sigaction *out)
242 {
243 /* This assumes 32 bit __sa_handler is of type sig_t */
244 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
245 out->sa_mask = in->sa_mask;
246 out->sa_flags = in->sa_flags;
247 }
248
249 static void
250 __sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigaction *out)
251 {
252 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
253 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
254 out->sa_mask = in->sa_mask;
255 out->sa_flags = in->sa_flags;
256 }
257
258 static void
259 __sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigaction *out)
260 {
261 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
262 out->sa_tramp = in->sa_tramp;
263 out->sa_mask = in->sa_mask;
264 out->sa_flags = in->sa_flags;
265 }
266
267 #if SIGNAL_DEBUG
268 void ram_printf(int);
269 int ram_debug=0;
270 unsigned int rdebug_proc=0;
271 void
272 ram_printf(int x)
273 {
274 printf("x is %d",x);
275
276 }
277 #endif /* SIGNAL_DEBUG */
278
279
280 void
281 signal_setast(thread_t sig_actthread)
282 {
283 act_set_astbsd(sig_actthread);
284 }
285
286 /*
287 * Can process p, with ucred uc, send the signal signum to process q?
288 * uc is refcounted by the caller so internal fileds can be used safely
289 * when called with zombie arg, list lock is held
290 */
291 int
292 cansignal(proc_t p, kauth_cred_t uc, proc_t q, int signum, int zombie)
293 {
294 kauth_cred_t my_cred;
295 struct session * p_sessp = SESSION_NULL;
296 struct session * q_sessp = SESSION_NULL;
297 #if CONFIG_MACF
298 int error;
299
300 error = mac_proc_check_signal(p, q, signum);
301 if (error)
302 return (0);
303 #endif
304
305 /* you can signal yourself */
306 if (p == q)
307 return(1);
308
309 /* you can't send launchd SIGKILL, even if root */
310 if (signum == SIGKILL && q == initproc)
311 return(0);
312
313 if (!suser(uc, NULL))
314 return (1); /* root can always signal */
315
316 if (zombie == 0)
317 proc_list_lock();
318 if (p->p_pgrp != PGRP_NULL)
319 p_sessp = p->p_pgrp->pg_session;
320 if (q->p_pgrp != PGRP_NULL)
321 q_sessp = q->p_pgrp->pg_session;
322
323 if (signum == SIGCONT && q_sessp == p_sessp) {
324 if (zombie == 0)
325 proc_list_unlock();
326 return (1); /* SIGCONT in session */
327 }
328
329 if (zombie == 0)
330 proc_list_unlock();
331
332 /*
333 * If the real or effective UID of the sender matches the real
334 * or saved UID of the target, permit the signal to
335 * be sent.
336 */
337 if (zombie == 0)
338 my_cred = kauth_cred_proc_ref(q);
339 else
340 my_cred = proc_ucred(q);
341
342 if (kauth_cred_getruid(uc) == kauth_cred_getruid(my_cred) ||
343 kauth_cred_getruid(uc) == kauth_cred_getsvuid(my_cred) ||
344 kauth_cred_getuid(uc) == kauth_cred_getruid(my_cred) ||
345 kauth_cred_getuid(uc) == kauth_cred_getsvuid(my_cred)) {
346 if (zombie == 0)
347 kauth_cred_unref(&my_cred);
348 return (1);
349 }
350
351 if (zombie == 0)
352 kauth_cred_unref(&my_cred);
353
354 return (0);
355 }
356
357 /*
358 * <rdar://problem/21952708> Some signals can be restricted from being handled,
359 * forcing the default action for that signal. This behavior applies only to
360 * non-root (EUID != 0) processes, and is configured with the "sigrestrict=x"
361 * bootarg:
362 *
363 * 0 (default): Disallow use of restricted signals. Trying to register a handler
364 * returns ENOTSUP, which userspace may use to take special action (e.g. abort).
365 * 1: As above, but return EINVAL. Restricted signals behave similarly to SIGKILL.
366 * 2: Usual POSIX semantics.
367 */
368 unsigned sigrestrict_arg = 0;
369
370 #if PLATFORM_WatchOS
371 static int
372 sigrestrictmask(void)
373 {
374 if (kauth_getuid() != 0 && sigrestrict_arg != 2) {
375 return SIGRESTRICTMASK;
376 }
377 return 0;
378 }
379
380 static int
381 signal_is_restricted(proc_t p, int signum)
382 {
383 if (sigmask(signum) & sigrestrictmask()) {
384 if (sigrestrict_arg == 0 &&
385 task_get_apptype(p->task) == TASK_APPTYPE_APP_DEFAULT) {
386 return ENOTSUP;
387 } else {
388 return EINVAL;
389 }
390 }
391 return 0;
392 }
393
394 #else
395
396 static inline int
397 signal_is_restricted(proc_t p, int signum)
398 {
399 (void)p;
400 (void)signum;
401 return 0;
402 }
403 #endif /* !PLATFORM_WatchOS */
404
405 /*
406 * Returns: 0 Success
407 * EINVAL
408 * copyout:EFAULT
409 * copyin:EFAULT
410 *
411 * Notes: Uses current thread as a parameter to inform PPC to enable
412 * FPU exceptions via setsigvec(); this operation is not proxy
413 * safe!
414 */
415 /* ARGSUSED */
416 int
417 sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval)
418 {
419 struct kern_sigaction vec;
420 struct __kern_sigaction __vec;
421
422 struct kern_sigaction *sa = &vec;
423 struct sigacts *ps = p->p_sigacts;
424
425 int signum;
426 int bit, error=0;
427
428 signum = uap->signum;
429 if (signum <= 0 || signum >= NSIG ||
430 signum == SIGKILL || signum == SIGSTOP)
431 return (EINVAL);
432
433 if (uap->nsa) {
434 if (IS_64BIT_PROCESS(p)) {
435 struct __user64_sigaction __vec64;
436 error = copyin(uap->nsa, &__vec64, sizeof(__vec64));
437 __sigaction_user64_to_kern(&__vec64, &__vec);
438 } else {
439 struct __user32_sigaction __vec32;
440 error = copyin(uap->nsa, &__vec32, sizeof(__vec32));
441 __sigaction_user32_to_kern(&__vec32, &__vec);
442 }
443 if (error)
444 return (error);
445 __vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */
446
447 if ((__vec.sa_flags & SA_SIGINFO) || __vec.sa_handler != SIG_DFL) {
448 if ((error = signal_is_restricted(p, signum))) {
449 if (error == ENOTSUP) {
450 printf("%s(%d): denied attempt to register action for signal %d\n",
451 proc_name_address(p), proc_pid(p), signum);
452 }
453 return error;
454 }
455 }
456 }
457
458 if (uap->osa) {
459 sa->sa_handler = ps->ps_sigact[signum];
460 sa->sa_mask = ps->ps_catchmask[signum];
461 bit = sigmask(signum);
462 sa->sa_flags = 0;
463 if ((ps->ps_sigonstack & bit) != 0)
464 sa->sa_flags |= SA_ONSTACK;
465 if ((ps->ps_sigintr & bit) == 0)
466 sa->sa_flags |= SA_RESTART;
467 if (ps->ps_siginfo & bit)
468 sa->sa_flags |= SA_SIGINFO;
469 if (ps->ps_signodefer & bit)
470 sa->sa_flags |= SA_NODEFER;
471 if (ps->ps_64regset & bit)
472 sa->sa_flags |= SA_64REGSET;
473 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP))
474 sa->sa_flags |= SA_NOCLDSTOP;
475 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT))
476 sa->sa_flags |= SA_NOCLDWAIT;
477
478 if (IS_64BIT_PROCESS(p)) {
479 struct user64_sigaction vec64;
480 sigaction_kern_to_user64(sa, &vec64);
481 error = copyout(&vec64, uap->osa, sizeof(vec64));
482 } else {
483 struct user32_sigaction vec32;
484 sigaction_kern_to_user32(sa, &vec32);
485 error = copyout(&vec32, uap->osa, sizeof(vec32));
486 }
487 if (error)
488 return (error);
489 }
490
491 if (uap->nsa) {
492 error = setsigvec(p, current_thread(), signum, &__vec, FALSE);
493 }
494
495 return (error);
496 }
497
498 /* Routines to manipulate bits on all threads */
499 int
500 clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart)
501 {
502 struct uthread * uth;
503 thread_t thact;
504
505 proc_lock(p);
506 if (!in_signalstart)
507 proc_signalstart(p, 1);
508
509 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
510 thact = p->p_vforkact;
511 uth = (struct uthread *)get_bsdthread_info(thact);
512 if (uth) {
513 uth->uu_siglist &= ~bit;
514 }
515 if (!in_signalstart)
516 proc_signalend(p, 1);
517 proc_unlock(p);
518 return(0);
519 }
520
521 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
522 uth->uu_siglist &= ~bit;
523 }
524 p->p_siglist &= ~bit;
525 if (!in_signalstart)
526 proc_signalend(p, 1);
527 proc_unlock(p);
528
529 return(0);
530 }
531
532
533 static int
534 unblock_procsigmask(proc_t p, int bit)
535 {
536 struct uthread * uth;
537 thread_t thact;
538
539 proc_lock(p);
540 proc_signalstart(p, 1);
541
542 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
543 thact = p->p_vforkact;
544 uth = (struct uthread *)get_bsdthread_info(thact);
545 if (uth) {
546 uth->uu_sigmask &= ~bit;
547 }
548 p->p_sigmask &= ~bit;
549 proc_signalend(p, 1);
550 proc_unlock(p);
551 return(0);
552 }
553 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
554 uth->uu_sigmask &= ~bit;
555 }
556 p->p_sigmask &= ~bit;
557
558 proc_signalend(p, 1);
559 proc_unlock(p);
560 return(0);
561 }
562
563 static int
564 block_procsigmask(proc_t p, int bit)
565 {
566 struct uthread * uth;
567 thread_t thact;
568
569 proc_lock(p);
570 proc_signalstart(p, 1);
571
572 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
573 thact = p->p_vforkact;
574 uth = (struct uthread *)get_bsdthread_info(thact);
575 if (uth) {
576 uth->uu_sigmask |= bit;
577 }
578 p->p_sigmask |= bit;
579 proc_signalend(p, 1);
580 proc_unlock(p);
581 return(0);
582 }
583 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
584 uth->uu_sigmask |= bit;
585 }
586 p->p_sigmask |= bit;
587
588 proc_signalend(p, 1);
589 proc_unlock(p);
590 return(0);
591 }
592
593 int
594 set_procsigmask(proc_t p, int bit)
595 {
596 struct uthread * uth;
597 thread_t thact;
598
599 proc_lock(p);
600 proc_signalstart(p, 1);
601
602 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
603 thact = p->p_vforkact;
604 uth = (struct uthread *)get_bsdthread_info(thact);
605 if (uth) {
606 uth->uu_sigmask = bit;
607 }
608 p->p_sigmask = bit;
609 proc_signalend(p, 1);
610 proc_unlock(p);
611 return(0);
612 }
613 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
614 uth->uu_sigmask = bit;
615 }
616 p->p_sigmask = bit;
617 proc_signalend(p, 1);
618 proc_unlock(p);
619
620 return(0);
621 }
622
623 /* XXX should be static? */
624 /*
625 * Notes: The thread parameter is used in the PPC case to select the
626 * thread on which the floating point exception will be enabled
627 * or disabled. We can't simply take current_thread(), since
628 * this is called from posix_spawn() on the not currently running
629 * process/thread pair.
630 *
631 * We mark thread as unused to alow compilation without warning
632 * on non-PPC platforms.
633 */
634 int
635 setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigaction *sa, boolean_t in_sigstart)
636 {
637 struct sigacts *ps = p->p_sigacts;
638 int bit;
639
640 if ((signum == SIGKILL || signum == SIGSTOP) &&
641 sa->sa_handler != SIG_DFL)
642 return(EINVAL);
643 bit = sigmask(signum);
644 /*
645 * Change setting atomically.
646 */
647 ps->ps_sigact[signum] = sa->sa_handler;
648 ps->ps_trampact[signum] = sa->sa_tramp;
649 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
650 if (sa->sa_flags & SA_SIGINFO)
651 ps->ps_siginfo |= bit;
652 else
653 ps->ps_siginfo &= ~bit;
654 if (sa->sa_flags & SA_64REGSET)
655 ps->ps_64regset |= bit;
656 else
657 ps->ps_64regset &= ~bit;
658 if ((sa->sa_flags & SA_RESTART) == 0)
659 ps->ps_sigintr |= bit;
660 else
661 ps->ps_sigintr &= ~bit;
662 if (sa->sa_flags & SA_ONSTACK)
663 ps->ps_sigonstack |= bit;
664 else
665 ps->ps_sigonstack &= ~bit;
666 if (sa->sa_flags & SA_USERTRAMP)
667 ps->ps_usertramp |= bit;
668 else
669 ps->ps_usertramp &= ~bit;
670 if (sa->sa_flags & SA_RESETHAND)
671 ps->ps_sigreset |= bit;
672 else
673 ps->ps_sigreset &= ~bit;
674 if (sa->sa_flags & SA_NODEFER)
675 ps->ps_signodefer |= bit;
676 else
677 ps->ps_signodefer &= ~bit;
678 if (signum == SIGCHLD) {
679 if (sa->sa_flags & SA_NOCLDSTOP)
680 OSBitOrAtomic(P_NOCLDSTOP, &p->p_flag);
681 else
682 OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), &p->p_flag);
683 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN))
684 OSBitOrAtomic(P_NOCLDWAIT, &p->p_flag);
685 else
686 OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), &p->p_flag);
687 }
688
689 /*
690 * Set bit in p_sigignore for signals that are set to SIG_IGN,
691 * and for signals set to SIG_DFL where the default is to ignore.
692 * However, don't put SIGCONT in p_sigignore,
693 * as we have to restart the process.
694 */
695 if (sa->sa_handler == SIG_IGN ||
696 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
697
698 clear_procsiglist(p, bit, in_sigstart);
699 if (signum != SIGCONT)
700 p->p_sigignore |= bit; /* easier in psignal */
701 p->p_sigcatch &= ~bit;
702 } else {
703 p->p_sigignore &= ~bit;
704 if (sa->sa_handler == SIG_DFL)
705 p->p_sigcatch &= ~bit;
706 else
707 p->p_sigcatch |= bit;
708 }
709 return(0);
710 }
711
712 /*
713 * Initialize signal state for process 0;
714 * set to ignore signals that are ignored by default.
715 */
716 void
717 siginit(proc_t p)
718 {
719 int i;
720
721 for (i = 1; i < NSIG; i++)
722 if (sigprop[i] & SA_IGNORE && i != SIGCONT)
723 p->p_sigignore |= sigmask(i);
724 }
725
726 /*
727 * Reset signals for an exec of the specified process.
728 */
729 void
730 execsigs(proc_t p, thread_t thread)
731 {
732 struct sigacts *ps = p->p_sigacts;
733 int nc, mask;
734 struct uthread *ut;
735
736 ut = (struct uthread *)get_bsdthread_info(thread);
737
738 /*
739 * transfer saved signal states from the process
740 * back to the current thread.
741 *
742 * NOTE: We do this without the process locked,
743 * because we are guaranteed to be single-threaded
744 * by this point in exec and the p_siglist is
745 * only accessed by threads inside the process.
746 */
747 ut->uu_siglist |= p->p_siglist;
748 p->p_siglist = 0;
749
750 /*
751 * Reset caught signals. Held signals remain held
752 * through p_sigmask (unless they were caught,
753 * and are now ignored by default).
754 */
755 while (p->p_sigcatch) {
756 nc = ffs((long)p->p_sigcatch);
757 mask = sigmask(nc);
758 p->p_sigcatch &= ~mask;
759 if (sigprop[nc] & SA_IGNORE) {
760 if (nc != SIGCONT)
761 p->p_sigignore |= mask;
762 ut->uu_siglist &= ~mask;
763 }
764 ps->ps_sigact[nc] = SIG_DFL;
765 }
766
767 /*
768 * Reset stack state to the user stack.
769 * Clear set of signals caught on the signal stack.
770 */
771 /* thread */
772 ut->uu_sigstk.ss_flags = SA_DISABLE;
773 ut->uu_sigstk.ss_size = 0;
774 ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
775 ut->uu_flag &= ~UT_ALTSTACK;
776 /* process */
777 ps->ps_sigonstack = 0;
778 }
779
780 /*
781 * Manipulate signal mask.
782 * Note that we receive new mask, not pointer,
783 * and return old mask as return value;
784 * the library stub does the rest.
785 */
786 int
787 sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval)
788 {
789 int error = 0;
790 sigset_t oldmask, nmask;
791 user_addr_t omask = uap->omask;
792 struct uthread *ut;
793
794 ut = (struct uthread *)get_bsdthread_info(current_thread());
795 oldmask = ut->uu_sigmask;
796
797 if (uap->mask == USER_ADDR_NULL) {
798 /* just want old mask */
799 goto out;
800 }
801 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
802 if (error)
803 goto out;
804
805 switch (uap->how) {
806 case SIG_BLOCK:
807 block_procsigmask(p, (nmask & ~sigcantmask));
808 signal_setast(current_thread());
809 break;
810
811 case SIG_UNBLOCK:
812 unblock_procsigmask(p, (nmask & ~sigcantmask));
813 signal_setast(current_thread());
814 break;
815
816 case SIG_SETMASK:
817 set_procsigmask(p, (nmask & ~sigcantmask));
818 signal_setast(current_thread());
819 break;
820
821 default:
822 error = EINVAL;
823 break;
824 }
825 out:
826 if (!error && omask != USER_ADDR_NULL)
827 copyout(&oldmask, omask, sizeof(sigset_t));
828 return (error);
829 }
830
831 int
832 sigpending(__unused proc_t p, struct sigpending_args *uap, __unused int32_t *retval)
833 {
834 struct uthread *ut;
835 sigset_t pendlist;
836
837 ut = (struct uthread *)get_bsdthread_info(current_thread());
838 pendlist = ut->uu_siglist;
839
840 if (uap->osv)
841 copyout(&pendlist, uap->osv, sizeof(sigset_t));
842 return(0);
843 }
844
845 /*
846 * Suspend process until signal, providing mask to be set
847 * in the meantime. Note nonstandard calling convention:
848 * libc stub passes mask, not pointer, to save a copyin.
849 */
850
851 static int
852 sigcontinue(__unused int error)
853 {
854 // struct uthread *ut = get_bsdthread_info(current_thread());
855 unix_syscall_return(EINTR);
856 }
857
858 int
859 sigsuspend(proc_t p, struct sigsuspend_args *uap, int32_t *retval)
860 {
861 __pthread_testcancel(1);
862 return(sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval));
863 }
864
865 int
866 sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused int32_t *retval)
867 {
868 struct uthread *ut;
869
870 ut = (struct uthread *)get_bsdthread_info(current_thread());
871
872 /*
873 * When returning from sigpause, we want
874 * the old mask to be restored after the
875 * signal handler has finished. Thus, we
876 * save it here and mark the sigacts structure
877 * to indicate this.
878 */
879 ut->uu_oldmask = ut->uu_sigmask;
880 ut->uu_flag |= UT_SAS_OLDMASK;
881 ut->uu_sigmask = (uap->mask & ~sigcantmask);
882 (void) tsleep0((caddr_t) p, PPAUSE|PCATCH, "pause", 0, sigcontinue);
883 /* always return EINTR rather than ERESTART... */
884 return (EINTR);
885 }
886
887
888 int
889 __disable_threadsignal(__unused proc_t p,
890 __unused struct __disable_threadsignal_args *uap,
891 __unused int32_t *retval)
892 {
893 struct uthread *uth;
894
895 uth = (struct uthread *)get_bsdthread_info(current_thread());
896
897 /* No longer valid to have any signal delivered */
898 uth->uu_flag |= (UT_NO_SIGMASK | UT_CANCELDISABLE);
899
900 return(0);
901
902 }
903
904 void
905 __pthread_testcancel(int presyscall)
906 {
907
908 thread_t self = current_thread();
909 struct uthread * uthread;
910
911 uthread = (struct uthread *)get_bsdthread_info(self);
912
913
914 uthread->uu_flag &= ~UT_NOTCANCELPT;
915
916 if ((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
917 if(presyscall != 0) {
918 unix_syscall_return(EINTR);
919 /* NOTREACHED */
920 } else
921 thread_abort_safely(self);
922 }
923 }
924
925
926
927 int
928 __pthread_markcancel(__unused proc_t p,
929 struct __pthread_markcancel_args *uap, __unused int32_t *retval)
930 {
931 thread_act_t target_act;
932 int error = 0;
933 struct uthread *uth;
934
935 target_act = (thread_act_t)port_name_to_thread(uap->thread_port);
936
937 if (target_act == THR_ACT_NULL)
938 return (ESRCH);
939
940 uth = (struct uthread *)get_bsdthread_info(target_act);
941
942 /* if the thread is in vfork do not cancel */
943 if ((uth->uu_flag & (UT_VFORK | UT_CANCEL | UT_CANCELED )) == 0) {
944 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
945 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
946 && ((uth->uu_flag & UT_CANCELDISABLE) == 0))
947 thread_abort_safely(target_act);
948 }
949
950 thread_deallocate(target_act);
951 return (error);
952 }
953
954 /* if action =0 ; return the cancellation state ,
955 * if marked for cancellation, make the thread canceled
956 * if action = 1 ; Enable the cancel handling
957 * if action = 2; Disable the cancel handling
958 */
959 int
960 __pthread_canceled(__unused proc_t p,
961 struct __pthread_canceled_args *uap, __unused int32_t *retval)
962 {
963 thread_act_t thread;
964 struct uthread *uth;
965 int action = uap->action;
966
967 thread = current_thread();
968 uth = (struct uthread *)get_bsdthread_info(thread);
969
970 switch (action) {
971 case 1:
972 uth->uu_flag &= ~UT_CANCELDISABLE;
973 return(0);
974 case 2:
975 uth->uu_flag |= UT_CANCELDISABLE;
976 return(0);
977 case 0:
978 default:
979 /* if the thread is in vfork do not cancel */
980 if((uth->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
981 uth->uu_flag &= ~UT_CANCEL;
982 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
983 return(0);
984 }
985 return(EINVAL);
986 }
987 return(EINVAL);
988 }
989
990 void
991 __posix_sem_syscall_return(kern_return_t kern_result)
992 {
993 int error = 0;
994
995 if (kern_result == KERN_SUCCESS)
996 error = 0;
997 else if (kern_result == KERN_ABORTED)
998 error = EINTR;
999 else if (kern_result == KERN_OPERATION_TIMED_OUT)
1000 error = ETIMEDOUT;
1001 else
1002 error = EINVAL;
1003 unix_syscall_return(error);
1004 /* does not return */
1005 }
1006
1007 #if OLD_SEMWAIT_SIGNAL
1008 /*
1009 * Returns: 0 Success
1010 * EINTR
1011 * ETIMEDOUT
1012 * EINVAL
1013 * EFAULT if timespec is NULL
1014 */
1015 int
1016 __old_semwait_signal(proc_t p, struct __old_semwait_signal_args *uap,
1017 int32_t *retval)
1018 {
1019 __pthread_testcancel(0);
1020 return(__old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval));
1021 }
1022
1023 int
1024 __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_args *uap,
1025 __unused int32_t *retval)
1026 {
1027
1028 kern_return_t kern_result;
1029 int error;
1030 mach_timespec_t then;
1031 struct timespec now;
1032 struct user_timespec ts;
1033 boolean_t truncated_timeout = FALSE;
1034
1035 if(uap->timeout) {
1036
1037 if (IS_64BIT_PROCESS(p)) {
1038 struct user64_timespec ts64;
1039 error = copyin(uap->ts, &ts64, sizeof(ts64));
1040 ts.tv_sec = ts64.tv_sec;
1041 ts.tv_nsec = ts64.tv_nsec;
1042 } else {
1043 struct user32_timespec ts32;
1044 error = copyin(uap->ts, &ts32, sizeof(ts32));
1045 ts.tv_sec = ts32.tv_sec;
1046 ts.tv_nsec = ts32.tv_nsec;
1047 }
1048
1049 if (error) {
1050 return error;
1051 }
1052
1053 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1054 ts.tv_sec = 0xFFFFFFFF;
1055 ts.tv_nsec = 0;
1056 truncated_timeout = TRUE;
1057 }
1058
1059 if (uap->relative) {
1060 then.tv_sec = ts.tv_sec;
1061 then.tv_nsec = ts.tv_nsec;
1062 } else {
1063 nanotime(&now);
1064
1065 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1066 if (now.tv_sec == ts.tv_sec ?
1067 now.tv_nsec > ts.tv_nsec :
1068 now.tv_sec > ts.tv_sec) {
1069 then.tv_sec = 0;
1070 then.tv_nsec = 0;
1071 } else {
1072 then.tv_sec = ts.tv_sec - now.tv_sec;
1073 then.tv_nsec = ts.tv_nsec - now.tv_nsec;
1074 if (then.tv_nsec < 0) {
1075 then.tv_nsec += NSEC_PER_SEC;
1076 then.tv_sec--;
1077 }
1078 }
1079 }
1080
1081 if (uap->mutex_sem == 0)
1082 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1083 else
1084 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1085
1086 } else {
1087
1088 if (uap->mutex_sem == 0)
1089 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1090 else
1091
1092 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1093 }
1094
1095 if (kern_result == KERN_SUCCESS && !truncated_timeout)
1096 return(0);
1097 else if (kern_result == KERN_SUCCESS && truncated_timeout)
1098 return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1099 else if (kern_result == KERN_ABORTED)
1100 return(EINTR);
1101 else if (kern_result == KERN_OPERATION_TIMED_OUT)
1102 return(ETIMEDOUT);
1103 else
1104 return(EINVAL);
1105 }
1106 #endif /* OLD_SEMWAIT_SIGNAL*/
1107
1108 /*
1109 * Returns: 0 Success
1110 * EINTR
1111 * ETIMEDOUT
1112 * EINVAL
1113 * EFAULT if timespec is NULL
1114 */
1115 int
1116 __semwait_signal(proc_t p, struct __semwait_signal_args *uap,
1117 int32_t *retval)
1118 {
1119 __pthread_testcancel(0);
1120 return(__semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval));
1121 }
1122
1123 int
1124 __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap,
1125 __unused int32_t *retval)
1126 {
1127
1128 kern_return_t kern_result;
1129 mach_timespec_t then;
1130 struct timespec now;
1131 struct user_timespec ts;
1132 boolean_t truncated_timeout = FALSE;
1133
1134 if(uap->timeout) {
1135
1136 ts.tv_sec = uap->tv_sec;
1137 ts.tv_nsec = uap->tv_nsec;
1138
1139 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1140 ts.tv_sec = 0xFFFFFFFF;
1141 ts.tv_nsec = 0;
1142 truncated_timeout = TRUE;
1143 }
1144
1145 if (uap->relative) {
1146 then.tv_sec = ts.tv_sec;
1147 then.tv_nsec = ts.tv_nsec;
1148 } else {
1149 nanotime(&now);
1150
1151 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1152 if (now.tv_sec == ts.tv_sec ?
1153 now.tv_nsec > ts.tv_nsec :
1154 now.tv_sec > ts.tv_sec) {
1155 then.tv_sec = 0;
1156 then.tv_nsec = 0;
1157 } else {
1158 then.tv_sec = ts.tv_sec - now.tv_sec;
1159 then.tv_nsec = ts.tv_nsec - now.tv_nsec;
1160 if (then.tv_nsec < 0) {
1161 then.tv_nsec += NSEC_PER_SEC;
1162 then.tv_sec--;
1163 }
1164 }
1165 }
1166
1167 if (uap->mutex_sem == 0)
1168 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1169 else
1170 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1171
1172 } else {
1173
1174 if (uap->mutex_sem == 0)
1175 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1176 else
1177
1178 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1179 }
1180
1181 if (kern_result == KERN_SUCCESS && !truncated_timeout)
1182 return(0);
1183 else if (kern_result == KERN_SUCCESS && truncated_timeout)
1184 return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1185 else if (kern_result == KERN_ABORTED)
1186 return(EINTR);
1187 else if (kern_result == KERN_OPERATION_TIMED_OUT)
1188 return(ETIMEDOUT);
1189 else
1190 return(EINVAL);
1191 }
1192
1193
1194 int
1195 __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap,
1196 __unused int32_t *retval)
1197 {
1198 thread_t target_act;
1199 int error = 0;
1200 int signum = uap->sig;
1201 struct uthread *uth;
1202
1203 target_act = (thread_t)port_name_to_thread(uap->thread_port);
1204
1205 if (target_act == THREAD_NULL)
1206 return (ESRCH);
1207 if ((u_int)signum >= NSIG) {
1208 error = EINVAL;
1209 goto out;
1210 }
1211
1212 uth = (struct uthread *)get_bsdthread_info(target_act);
1213
1214 if (uth->uu_flag & UT_NO_SIGMASK) {
1215 error = ESRCH;
1216 goto out;
1217 }
1218
1219 if (signum)
1220 psignal_uthread(target_act, signum);
1221 out:
1222 thread_deallocate(target_act);
1223 return (error);
1224 }
1225
1226
1227 int
1228 __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap,
1229 __unused int32_t *retval)
1230 {
1231 user_addr_t set = uap->set;
1232 user_addr_t oset = uap->oset;
1233 sigset_t nset;
1234 int error = 0;
1235 struct uthread *ut;
1236 sigset_t oldset;
1237
1238 ut = (struct uthread *)get_bsdthread_info(current_thread());
1239 oldset = ut->uu_sigmask;
1240
1241 if (set == USER_ADDR_NULL) {
1242 /* need only old mask */
1243 goto out;
1244 }
1245
1246 error = copyin(set, &nset, sizeof(sigset_t));
1247 if (error)
1248 goto out;
1249
1250 switch (uap->how) {
1251 case SIG_BLOCK:
1252 ut->uu_sigmask |= (nset & ~sigcantmask);
1253 break;
1254
1255 case SIG_UNBLOCK:
1256 ut->uu_sigmask &= ~(nset);
1257 signal_setast(current_thread());
1258 break;
1259
1260 case SIG_SETMASK:
1261 ut->uu_sigmask = (nset & ~sigcantmask);
1262 signal_setast(current_thread());
1263 break;
1264
1265 default:
1266 error = EINVAL;
1267
1268 }
1269 out:
1270 if (!error && oset != USER_ADDR_NULL)
1271 copyout(&oldset, oset, sizeof(sigset_t));
1272
1273 return(error);
1274 }
1275
1276 /*
1277 * Returns: 0 Success
1278 * EINVAL
1279 * copyin:EFAULT
1280 * copyout:EFAULT
1281 */
1282 int
1283 __sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval)
1284 {
1285 __pthread_testcancel(1);
1286 return(__sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval));
1287 }
1288
1289 int
1290 __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval)
1291 {
1292 struct uthread *ut;
1293 struct uthread *uth;
1294 int error = 0;
1295 sigset_t mask;
1296 sigset_t siglist;
1297 sigset_t sigw=0;
1298 int signum;
1299
1300 ut = (struct uthread *)get_bsdthread_info(current_thread());
1301
1302 if (uap->set == USER_ADDR_NULL)
1303 return(EINVAL);
1304
1305 error = copyin(uap->set, &mask, sizeof(sigset_t));
1306 if (error)
1307 return(error);
1308
1309 siglist = (mask & ~sigcantmask);
1310
1311 if (siglist == 0)
1312 return(EINVAL);
1313
1314 proc_lock(p);
1315 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
1316 proc_unlock(p);
1317 return(EINVAL);
1318 } else {
1319 proc_signalstart(p, 1);
1320 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1321 if ( (sigw = uth->uu_siglist & siglist) ) {
1322 break;
1323 }
1324 }
1325 proc_signalend(p, 1);
1326 }
1327
1328 if (sigw) {
1329 /* The signal was pending on a thread */
1330 goto sigwait1;
1331 }
1332 /*
1333 * When returning from sigwait, we want
1334 * the old mask to be restored after the
1335 * signal handler has finished. Thus, we
1336 * save it here and mark the sigacts structure
1337 * to indicate this.
1338 */
1339 uth = ut; /* wait for it to be delivered to us */
1340 ut->uu_oldmask = ut->uu_sigmask;
1341 ut->uu_flag |= UT_SAS_OLDMASK;
1342 if (siglist == (sigset_t)0) {
1343 proc_unlock(p);
1344 return(EINVAL);
1345 }
1346 /* SIGKILL and SIGSTOP are not maskable as well */
1347 ut->uu_sigmask = ~(siglist|sigcantmask);
1348 ut->uu_sigwait = siglist;
1349
1350 /* No Continuations for now */
1351 error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE|PCATCH, "pause", 0);
1352
1353 if (error == ERESTART)
1354 error = 0;
1355
1356 sigw = (ut->uu_sigwait & siglist);
1357 ut->uu_sigmask = ut->uu_oldmask;
1358 ut->uu_oldmask = 0;
1359 ut->uu_flag &= ~UT_SAS_OLDMASK;
1360 sigwait1:
1361 ut->uu_sigwait = 0;
1362 if (!error) {
1363 signum = ffs((unsigned int)sigw);
1364 if (!signum)
1365 panic("sigwait with no signal wakeup");
1366 /* Clear the pending signal in the thread it was delivered */
1367 uth->uu_siglist &= ~(sigmask(signum));
1368
1369 #if CONFIG_DTRACE
1370 DTRACE_PROC2(signal__clear, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo));
1371 #endif
1372
1373 proc_unlock(p);
1374 if (uap->sig != USER_ADDR_NULL)
1375 error = copyout(&signum, uap->sig, sizeof(int));
1376 } else
1377 proc_unlock(p);
1378
1379 return(error);
1380
1381 }
1382
1383 int
1384 sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *retval)
1385 {
1386 struct kern_sigaltstack ss;
1387 struct kern_sigaltstack *pstk;
1388 int error;
1389 struct uthread *uth;
1390 int onstack;
1391
1392 uth = (struct uthread *)get_bsdthread_info(current_thread());
1393
1394 pstk = &uth->uu_sigstk;
1395 if ((uth->uu_flag & UT_ALTSTACK) == 0)
1396 uth->uu_sigstk.ss_flags |= SA_DISABLE;
1397 onstack = pstk->ss_flags & SA_ONSTACK;
1398 if (uap->oss) {
1399 if (IS_64BIT_PROCESS(p)) {
1400 struct user64_sigaltstack ss64;
1401 sigaltstack_kern_to_user64(pstk, &ss64);
1402 error = copyout(&ss64, uap->oss, sizeof(ss64));
1403 } else {
1404 struct user32_sigaltstack ss32;
1405 sigaltstack_kern_to_user32(pstk, &ss32);
1406 error = copyout(&ss32, uap->oss, sizeof(ss32));
1407 }
1408 if (error)
1409 return (error);
1410 }
1411 if (uap->nss == USER_ADDR_NULL)
1412 return (0);
1413 if (IS_64BIT_PROCESS(p)) {
1414 struct user64_sigaltstack ss64;
1415 error = copyin(uap->nss, &ss64, sizeof(ss64));
1416 sigaltstack_user64_to_kern(&ss64, &ss);
1417 } else {
1418 struct user32_sigaltstack ss32;
1419 error = copyin(uap->nss, &ss32, sizeof(ss32));
1420 sigaltstack_user32_to_kern(&ss32, &ss);
1421 }
1422 if (error)
1423 return (error);
1424 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1425 return(EINVAL);
1426 }
1427
1428 if (ss.ss_flags & SA_DISABLE) {
1429 /* if we are here we are not in the signal handler ;so no need to check */
1430 if (uth->uu_sigstk.ss_flags & SA_ONSTACK)
1431 return (EINVAL);
1432 uth->uu_flag &= ~UT_ALTSTACK;
1433 uth->uu_sigstk.ss_flags = ss.ss_flags;
1434 return (0);
1435 }
1436 if (onstack)
1437 return (EPERM);
1438 /* The older stacksize was 8K, enforce that one so no compat problems */
1439 #define OLDMINSIGSTKSZ 8*1024
1440 if (ss.ss_size < OLDMINSIGSTKSZ)
1441 return (ENOMEM);
1442 uth->uu_flag |= UT_ALTSTACK;
1443 uth->uu_sigstk= ss;
1444 return (0);
1445 }
1446
1447 int
1448 kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval)
1449 {
1450 proc_t p;
1451 kauth_cred_t uc = kauth_cred_get();
1452 int posix = uap->posix; /* !0 if posix behaviour desired */
1453
1454 AUDIT_ARG(pid, uap->pid);
1455 AUDIT_ARG(signum, uap->signum);
1456
1457 if ((u_int)uap->signum >= NSIG)
1458 return (EINVAL);
1459 if (uap->pid > 0) {
1460 /* kill single process */
1461 if ((p = proc_find(uap->pid)) == NULL) {
1462 if ((p = pzfind(uap->pid)) != NULL) {
1463 /*
1464 * IEEE Std 1003.1-2001: return success
1465 * when killing a zombie.
1466 */
1467 return (0);
1468 }
1469 return (ESRCH);
1470 }
1471 AUDIT_ARG(process, p);
1472 if (!cansignal(cp, uc, p, uap->signum, 0)) {
1473 proc_rele(p);
1474 return(EPERM);
1475 }
1476 if (uap->signum)
1477 psignal(p, uap->signum);
1478 proc_rele(p);
1479 return (0);
1480 }
1481 switch (uap->pid) {
1482 case -1: /* broadcast signal */
1483 return (killpg1(cp, uap->signum, 0, 1, posix));
1484 case 0: /* signal own process group */
1485 return (killpg1(cp, uap->signum, 0, 0, posix));
1486 default: /* negative explicit process group */
1487 return (killpg1(cp, uap->signum, -(uap->pid), 0, posix));
1488 }
1489 /* NOTREACHED */
1490 }
1491
1492 static int
1493 killpg1_filt(proc_t p, void * arg)
1494 {
1495 struct killpg1_filtargs * kfargp = (struct killpg1_filtargs *)arg;
1496 proc_t cp = kfargp->cp;
1497 int posix = kfargp->posix;
1498
1499
1500 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1501 (!posix && p == cp))
1502 return(0);
1503 else
1504 return(1);
1505 }
1506
1507
1508 static int
1509 killpg1_pgrpfilt(proc_t p, __unused void * arg)
1510 {
1511 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1512 (p->p_stat == SZOMB))
1513 return(0);
1514 else
1515 return(1);
1516 }
1517
1518
1519
1520 static int
1521 killpg1_callback(proc_t p, void * arg)
1522 {
1523 struct killpg1_iterargs * kargp = (struct killpg1_iterargs *)arg;
1524 proc_t cp = kargp->cp;
1525 kauth_cred_t uc = kargp->uc; /* refcounted by the caller safe to use internal fields */
1526 int signum = kargp->signum;
1527 int * nfoundp = kargp->nfoundp;
1528 int n;
1529 int zombie = 0;
1530 int error = 0;
1531
1532 if ((kargp->zombie != 0) && ((p->p_listflag & P_LIST_EXITED) == P_LIST_EXITED))
1533 zombie = 1;
1534
1535 if (zombie != 0) {
1536 proc_list_lock();
1537 error = cansignal(cp, uc, p, signum, zombie);
1538 proc_list_unlock();
1539
1540 if (error != 0 && nfoundp != NULL) {
1541 n = *nfoundp;
1542 *nfoundp = n+1;
1543 }
1544 } else {
1545 if (cansignal(cp, uc, p, signum, 0) == 0)
1546 return(PROC_RETURNED);
1547
1548 if (nfoundp != NULL) {
1549 n = *nfoundp;
1550 *nfoundp = n+1;
1551 }
1552 if (signum != 0)
1553 psignal(p, signum);
1554 }
1555
1556 return(PROC_RETURNED);
1557 }
1558
1559 /*
1560 * Common code for kill process group/broadcast kill.
1561 * cp is calling process.
1562 */
1563 int
1564 killpg1(proc_t cp, int signum, int pgid, int all, int posix)
1565 {
1566 kauth_cred_t uc;
1567 struct pgrp *pgrp;
1568 int nfound = 0;
1569 struct killpg1_iterargs karg;
1570 struct killpg1_filtargs kfarg;
1571 int error = 0;
1572
1573 uc = kauth_cred_proc_ref(cp);
1574 if (all) {
1575 /*
1576 * broadcast
1577 */
1578 kfarg.posix = posix;
1579 kfarg.cp = cp;
1580
1581 karg.cp = cp;
1582 karg.uc = uc;
1583 karg.nfoundp = &nfound;
1584 karg.signum = signum;
1585 karg.zombie = 1;
1586
1587 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST), killpg1_callback, &karg, killpg1_filt, (void *)&kfarg);
1588
1589 } else {
1590 if (pgid == 0) {
1591 /*
1592 * zero pgid means send to my process group.
1593 */
1594 pgrp = proc_pgrp(cp);
1595 } else {
1596 pgrp = pgfind(pgid);
1597 if (pgrp == NULL) {
1598 error = ESRCH;
1599 goto out;
1600 }
1601 }
1602
1603 karg.nfoundp = &nfound;
1604 karg.uc = uc;
1605 karg.signum = signum;
1606 karg.cp = cp;
1607 karg.zombie = 0;
1608
1609
1610 /* PGRP_DROPREF drops the pgrp refernce */
1611 pgrp_iterate(pgrp, PGRP_BLOCKITERATE | PGRP_DROPREF, killpg1_callback, &karg,
1612 killpg1_pgrpfilt, NULL);
1613 }
1614 error = (nfound ? 0 : (posix ? EPERM : ESRCH));
1615 out:
1616 kauth_cred_unref(&uc);
1617 return (error);
1618 }
1619
1620
1621 /*
1622 * Send a signal to a process group.
1623 */
1624 void
1625 gsignal(int pgid, int signum)
1626 {
1627 struct pgrp *pgrp;
1628
1629 if (pgid && (pgrp = pgfind(pgid))) {
1630 pgsignal(pgrp, signum, 0);
1631 pg_rele(pgrp);
1632 }
1633 }
1634
1635 /*
1636 * Send a signal to a process group. If checkctty is 1,
1637 * limit to members which have a controlling terminal.
1638 */
1639
1640 static int
1641 pgsignal_filt(proc_t p, void * arg)
1642 {
1643 int checkctty = *(int*)arg;
1644
1645 if ((checkctty == 0) || p->p_flag & P_CONTROLT)
1646 return(1);
1647 else
1648 return(0);
1649 }
1650
1651
1652 static int
1653 pgsignal_callback(proc_t p, void * arg)
1654 {
1655 int signum = *(int*)arg;
1656
1657 psignal(p, signum);
1658 return(PROC_RETURNED);
1659 }
1660
1661
1662 void
1663 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
1664 {
1665 if (pgrp != PGRP_NULL) {
1666 pgrp_iterate(pgrp, PGRP_BLOCKITERATE, pgsignal_callback, &signum, pgsignal_filt, &checkctty);
1667 }
1668 }
1669
1670
1671 void
1672 tty_pgsignal(struct tty *tp, int signum, int checkctty)
1673 {
1674 struct pgrp * pg;
1675
1676 pg = tty_pgrp(tp);
1677 if (pg != PGRP_NULL) {
1678 pgrp_iterate(pg, PGRP_BLOCKITERATE, pgsignal_callback, &signum, pgsignal_filt, &checkctty);
1679 pg_rele(pg);
1680 }
1681 }
1682 /*
1683 * Send a signal caused by a trap to a specific thread.
1684 */
1685 void
1686 threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code)
1687 {
1688 struct uthread *uth;
1689 struct task * sig_task;
1690 proc_t p;
1691 int mask;
1692
1693 if ((u_int)signum >= NSIG || signum == 0)
1694 return;
1695
1696 mask = sigmask(signum);
1697 if ((mask & threadmask) == 0)
1698 return;
1699 sig_task = get_threadtask(sig_actthread);
1700 p = (proc_t)(get_bsdtask_info(sig_task));
1701
1702 uth = get_bsdthread_info(sig_actthread);
1703 if (uth->uu_flag & UT_VFORK)
1704 p = uth->uu_proc;
1705
1706 proc_lock(p);
1707 if (!(p->p_lflag & P_LTRACED) && (p->p_sigignore & mask)) {
1708 proc_unlock(p);
1709 return;
1710 }
1711
1712 uth->uu_siglist |= mask;
1713 uth->uu_code = code;
1714 proc_unlock(p);
1715
1716 /* mark on process as well */
1717 signal_setast(sig_actthread);
1718 }
1719
1720 /*
1721 * get_signalthread
1722 *
1723 * Picks an appropriate thread from a process to target with a signal.
1724 *
1725 * Called with proc locked.
1726 * Returns thread with BSD ast set.
1727 *
1728 * We attempt to deliver a proc-wide signal to the first thread in the task.
1729 * This allows single threaded applications which use signals to
1730 * be able to be linked with multithreaded libraries.
1731 */
1732 static kern_return_t
1733 get_signalthread(proc_t p, int signum, thread_t * thr)
1734 {
1735 struct uthread *uth;
1736 sigset_t mask = sigmask(signum);
1737 thread_t sig_thread;
1738 struct task * sig_task = p->task;
1739 kern_return_t kret;
1740
1741 *thr = THREAD_NULL;
1742
1743 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
1744 sig_thread = p->p_vforkact;
1745 kret = check_actforsig(sig_task, sig_thread, 1);
1746 if (kret == KERN_SUCCESS) {
1747 *thr = sig_thread;
1748 return(KERN_SUCCESS);
1749 }else
1750 return(KERN_FAILURE);
1751 }
1752
1753 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1754 if(((uth->uu_flag & UT_NO_SIGMASK)== 0) &&
1755 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
1756 if (check_actforsig(p->task, uth->uu_context.vc_thread, 1) == KERN_SUCCESS) {
1757 *thr = uth->uu_context.vc_thread;
1758 return(KERN_SUCCESS);
1759 }
1760 }
1761 }
1762 if (get_signalact(p->task, thr, 1) == KERN_SUCCESS) {
1763 return(KERN_SUCCESS);
1764 }
1765
1766 return(KERN_FAILURE);
1767 }
1768
1769 /*
1770 * Send the signal to the process. If the signal has an action, the action
1771 * is usually performed by the target process rather than the caller; we add
1772 * the signal to the set of pending signals for the process.
1773 *
1774 * Exceptions:
1775 * o When a stop signal is sent to a sleeping process that takes the
1776 * default action, the process is stopped without awakening it.
1777 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1778 * regardless of the signal action (eg, blocked or ignored).
1779 *
1780 * Other ignored signals are discarded immediately.
1781 */
1782 static void
1783 psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum)
1784 {
1785 int prop;
1786 user_addr_t action = USER_ADDR_NULL;
1787 proc_t sig_proc;
1788 thread_t sig_thread;
1789 task_t sig_task;
1790 int mask;
1791 struct uthread *uth;
1792 kern_return_t kret;
1793 uid_t r_uid;
1794 proc_t pp;
1795 kauth_cred_t my_cred;
1796
1797 if ((u_int)signum >= NSIG || signum == 0)
1798 panic("psignal: bad signal number %d", signum);
1799
1800 mask = sigmask(signum);
1801 prop = sigprop[signum];
1802
1803 #if SIGNAL_DEBUG
1804 if(rdebug_proc && (p != PROC_NULL) && (p == rdebug_proc)) {
1805 ram_printf(3);
1806 }
1807 #endif /* SIGNAL_DEBUG */
1808
1809 /* catch unexpected initproc kills early for easier debuggging */
1810 if (signum == SIGKILL && p == initproc)
1811 panic_plain("unexpected SIGKILL of %s %s",
1812 (p->p_name[0] != '\0' ? p->p_name : "initproc"),
1813 ((p->p_csflags & CS_KILLED) ? "(CS_KILLED)" : ""));
1814
1815 /*
1816 * We will need the task pointer later. Grab it now to
1817 * check for a zombie process. Also don't send signals
1818 * to kernel internal tasks.
1819 */
1820 if (flavor & PSIG_VFORK) {
1821 sig_task = task;
1822 sig_thread = thread;
1823 sig_proc = p;
1824 } else if (flavor & PSIG_THREAD) {
1825 sig_task = get_threadtask(thread);
1826 sig_thread = thread;
1827 sig_proc = (proc_t)get_bsdtask_info(sig_task);
1828 } else if (flavor & PSIG_TRY_THREAD) {
1829 assert((thread == current_thread()) && (p == current_proc()));
1830 sig_task = p->task;
1831 sig_thread = thread;
1832 sig_proc = p;
1833 } else {
1834 sig_task = p->task;
1835 sig_thread = THREAD_NULL;
1836 sig_proc = p;
1837 }
1838
1839 if ((sig_task == TASK_NULL) || is_kerneltask(sig_task))
1840 return;
1841
1842 /*
1843 * do not send signals to the process that has the thread
1844 * doing a reboot(). Not doing so will mark that thread aborted
1845 * and can cause IO failures wich will cause data loss. There's
1846 * also no need to send a signal to a process that is in the middle
1847 * of being torn down.
1848 */
1849 if (ISSET(sig_proc->p_flag, P_REBOOT) || ISSET(sig_proc->p_lflag, P_LEXIT)) {
1850 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
1851 return;
1852 }
1853
1854 if( (flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) {
1855 proc_knote(sig_proc, NOTE_SIGNAL | signum);
1856 }
1857
1858 if ((flavor & PSIG_LOCKED)== 0)
1859 proc_signalstart(sig_proc, 0);
1860
1861 /* Don't send signals to a process that has ignored them. */
1862 if (((flavor & PSIG_VFORK) == 0) && ((sig_proc->p_lflag & P_LTRACED) == 0) && (sig_proc->p_sigignore & mask)) {
1863 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
1864 goto sigout_unlocked;
1865 }
1866
1867 /*
1868 * The proc_lock prevents the targeted thread from being deallocated
1869 * or handling the signal until we're done signaling it.
1870 *
1871 * Once the proc_lock is dropped, we have no guarantee the thread or uthread exists anymore.
1872 *
1873 * XXX: What if the thread goes inactive after the thread passes bsd ast point?
1874 */
1875 proc_lock(sig_proc);
1876
1877 if (flavor & PSIG_VFORK) {
1878 action = SIG_DFL;
1879 act_set_astbsd(sig_thread);
1880 kret = KERN_SUCCESS;
1881 } else if (flavor & PSIG_TRY_THREAD) {
1882 uth = get_bsdthread_info(sig_thread);
1883 if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
1884 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask)) &&
1885 ((kret = check_actforsig(sig_proc->task, sig_thread, 1)) == KERN_SUCCESS)) {
1886 /* deliver to specified thread */
1887 } else {
1888 /* deliver to any willing thread */
1889 kret = get_signalthread(sig_proc, signum, &sig_thread);
1890 }
1891 } else if (flavor & PSIG_THREAD) {
1892 /* If successful return with ast set */
1893 kret = check_actforsig(sig_task, sig_thread, 1);
1894 } else {
1895 /* If successful return with ast set */
1896 kret = get_signalthread(sig_proc, signum, &sig_thread);
1897 }
1898
1899 if (kret != KERN_SUCCESS) {
1900 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
1901 proc_unlock(sig_proc);
1902 goto sigout_unlocked;
1903 }
1904
1905 uth = get_bsdthread_info(sig_thread);
1906
1907 /*
1908 * If proc is traced, always give parent a chance.
1909 */
1910
1911 if ((flavor & PSIG_VFORK) == 0) {
1912 if (sig_proc->p_lflag & P_LTRACED)
1913 action = SIG_DFL;
1914 else {
1915 /*
1916 * If the signal is being ignored,
1917 * then we forget about it immediately.
1918 * (Note: we don't set SIGCONT in p_sigignore,
1919 * and if it is set to SIG_IGN,
1920 * action will be SIG_DFL here.)
1921 */
1922 if (sig_proc->p_sigignore & mask)
1923 goto sigout_locked;
1924
1925 if (uth->uu_sigwait & mask)
1926 action = KERN_SIG_WAIT;
1927 else if (uth->uu_sigmask & mask)
1928 action = KERN_SIG_HOLD;
1929 else if (sig_proc->p_sigcatch & mask)
1930 action = KERN_SIG_CATCH;
1931 else
1932 action = SIG_DFL;
1933 }
1934 }
1935
1936 /* TODO: p_nice isn't hooked up to the scheduler... */
1937 if (sig_proc->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1938 (sig_proc->p_lflag & P_LTRACED) == 0)
1939 sig_proc->p_nice = NZERO;
1940
1941 if (prop & SA_CONT)
1942 uth->uu_siglist &= ~stopsigmask;
1943
1944 if (prop & SA_STOP) {
1945 struct pgrp *pg;
1946 /*
1947 * If sending a tty stop signal to a member of an orphaned
1948 * process group, discard the signal here if the action
1949 * is default; don't stop the process below if sleeping,
1950 * and don't clear any pending SIGCONT.
1951 */
1952 pg = proc_pgrp(sig_proc);
1953 if (prop & SA_TTYSTOP && pg->pg_jobc == 0 &&
1954 action == SIG_DFL) {
1955 pg_rele(pg);
1956 goto sigout_locked;
1957 }
1958 pg_rele(pg);
1959 uth->uu_siglist &= ~contsigmask;
1960 }
1961
1962 uth->uu_siglist |= mask;
1963
1964 /*
1965 * Defer further processing for signals which are held,
1966 * except that stopped processes must be continued by SIGCONT.
1967 */
1968 /* vfork will not go thru as action is SIG_DFL */
1969 if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP))
1970 goto sigout_locked;
1971
1972 /*
1973 * SIGKILL priority twiddling moved here from above because
1974 * it needs sig_thread. Could merge it into large switch
1975 * below if we didn't care about priority for tracing
1976 * as SIGKILL's action is always SIG_DFL.
1977 *
1978 * TODO: p_nice isn't hooked up to the scheduler...
1979 */
1980 if ((signum == SIGKILL) && (sig_proc->p_nice > NZERO)) {
1981 sig_proc->p_nice = NZERO;
1982 }
1983
1984 /*
1985 * Process is traced - wake it up (if not already
1986 * stopped) so that it can discover the signal in
1987 * issig() and stop for the parent.
1988 */
1989 if (sig_proc->p_lflag & P_LTRACED) {
1990 if (sig_proc->p_stat != SSTOP)
1991 goto runlocked;
1992 else
1993 goto sigout_locked;
1994 }
1995
1996 if ((flavor & PSIG_VFORK) != 0)
1997 goto runlocked;
1998
1999 if (action == KERN_SIG_WAIT) {
2000 #if CONFIG_DTRACE
2001 /*
2002 * DTrace proc signal-clear returns a siginfo_t. Collect the needed info.
2003 */
2004 r_uid = kauth_getruid(); /* per thread credential; protected by our thread context */
2005
2006 bzero((caddr_t)&(uth->t_dtrace_siginfo), sizeof(uth->t_dtrace_siginfo));
2007
2008 uth->t_dtrace_siginfo.si_signo = signum;
2009 uth->t_dtrace_siginfo.si_pid = current_proc()->p_pid;
2010 uth->t_dtrace_siginfo.si_status = W_EXITCODE(signum, 0);
2011 uth->t_dtrace_siginfo.si_uid = r_uid;
2012 uth->t_dtrace_siginfo.si_code = 0;
2013 #endif
2014 uth->uu_sigwait = mask;
2015 uth->uu_siglist &= ~mask;
2016 wakeup(&uth->uu_sigwait);
2017 /* if it is SIGCONT resume whole process */
2018 if (prop & SA_CONT) {
2019 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2020 sig_proc->p_contproc = current_proc()->p_pid;
2021 (void) task_resume_internal(sig_task);
2022 }
2023 goto sigout_locked;
2024 }
2025
2026 if (action != SIG_DFL) {
2027 /*
2028 * User wants to catch the signal.
2029 * Wake up the thread, but don't un-suspend it
2030 * (except for SIGCONT).
2031 */
2032 if (prop & SA_CONT) {
2033 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2034 (void) task_resume_internal(sig_task);
2035 sig_proc->p_stat = SRUN;
2036 } else if (sig_proc->p_stat == SSTOP) {
2037 goto sigout_locked;
2038 }
2039 /*
2040 * Fill out siginfo structure information to pass to the
2041 * signalled process/thread sigaction handler, when it
2042 * wakes up. si_code is 0 because this is an ordinary
2043 * signal, not a SIGCHLD, and so si_status is the signal
2044 * number itself, instead of the child process exit status.
2045 * We shift this left because it will be shifted right before
2046 * it is passed to user space. kind of ugly to use W_EXITCODE
2047 * this way, but it beats defining a new macro.
2048 *
2049 * Note: Avoid the SIGCHLD recursion case!
2050 */
2051 if (signum != SIGCHLD) {
2052 r_uid = kauth_getruid();
2053
2054 sig_proc->si_pid = current_proc()->p_pid;
2055 sig_proc->si_status = W_EXITCODE(signum, 0);
2056 sig_proc->si_uid = r_uid;
2057 sig_proc->si_code = 0;
2058 }
2059
2060 goto runlocked;
2061 } else {
2062 /* Default action - varies */
2063 if (mask & stopsigmask) {
2064 /*
2065 * These are the signals which by default
2066 * stop a process.
2067 *
2068 * Don't clog system with children of init
2069 * stopped from the keyboard.
2070 */
2071 if (!(prop & SA_STOP) && sig_proc->p_pptr == initproc) {
2072 uth->uu_siglist &= ~mask;
2073 proc_unlock(sig_proc);
2074 /* siglock still locked, proc_lock not locked */
2075 psignal_locked(sig_proc, SIGKILL);
2076 goto sigout_unlocked;
2077 }
2078
2079 /*
2080 * Stop the task
2081 * if task hasn't already been stopped by
2082 * a signal.
2083 */
2084 uth->uu_siglist &= ~mask;
2085 if (sig_proc->p_stat != SSTOP) {
2086 sig_proc->p_xstat = signum;
2087 sig_proc->p_stat = SSTOP;
2088 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &sig_proc->p_flag);
2089 sig_proc->p_lflag &= ~P_LWAITED;
2090 proc_unlock(sig_proc);
2091
2092 pp = proc_parentholdref(sig_proc);
2093 stop(sig_proc, pp);
2094 if (( pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2095
2096 my_cred = kauth_cred_proc_ref(sig_proc);
2097 r_uid = kauth_cred_getruid(my_cred);
2098 kauth_cred_unref(&my_cred);
2099
2100 proc_lock(sig_proc);
2101 pp->si_pid = sig_proc->p_pid;
2102 /*
2103 * POSIX: sigaction for a stopped child
2104 * when sent to the parent must set the
2105 * child's signal number into si_status.
2106 */
2107 if (signum != SIGSTOP)
2108 pp->si_status = WEXITSTATUS(sig_proc->p_xstat);
2109 else
2110 pp->si_status = W_EXITCODE(signum, signum);
2111 pp->si_code = CLD_STOPPED;
2112 pp->si_uid = r_uid;
2113 proc_unlock(sig_proc);
2114
2115 psignal(pp, SIGCHLD);
2116 }
2117 if (pp != PROC_NULL) {
2118 proc_parentdropref(pp, 0);
2119 }
2120
2121 goto sigout_unlocked;
2122 }
2123
2124 goto sigout_locked;
2125 }
2126
2127 DTRACE_PROC3(signal__send, thread_t, sig_thread, proc_t, p, int, signum);
2128
2129 switch (signum) {
2130 /*
2131 * Signals ignored by default have been dealt
2132 * with already, since their bits are on in
2133 * p_sigignore.
2134 */
2135
2136 case SIGKILL:
2137 /*
2138 * Kill signal always sets process running and
2139 * unsuspends it.
2140 */
2141 /*
2142 * Process will be running after 'run'
2143 */
2144 sig_proc->p_stat = SRUN;
2145 /*
2146 * In scenarios where suspend/resume are racing
2147 * the signal we are missing AST_BSD by the time
2148 * we get here, set again to avoid races. This
2149 * was the scenario with spindump enabled shutdowns.
2150 * We would need to cover this approp down the line.
2151 */
2152 act_set_astbsd(sig_thread);
2153 thread_abort(sig_thread);
2154
2155 goto sigout_locked;
2156
2157 case SIGCONT:
2158 /*
2159 * Let the process run. If it's sleeping on an
2160 * event, it remains so.
2161 */
2162 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2163 sig_proc->p_contproc = sig_proc->p_pid;
2164
2165 (void) task_resume_internal(sig_task);
2166
2167 /*
2168 * When processing a SIGCONT, we need to check
2169 * to see if there are signals pending that
2170 * were not delivered because we had been
2171 * previously stopped. If that's the case,
2172 * we need to thread_abort_safely() to trigger
2173 * interruption of the current system call to
2174 * cause their handlers to fire. If it's only
2175 * the SIGCONT, then don't wake up.
2176 */
2177 if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (((uth->uu_siglist & ~uth->uu_sigmask) & ~sig_proc->p_sigignore) & ~mask)) {
2178 uth->uu_siglist &= ~mask;
2179 sig_proc->p_stat = SRUN;
2180 goto runlocked;
2181 }
2182
2183 uth->uu_siglist &= ~mask;
2184 sig_proc->p_stat = SRUN;
2185 goto sigout_locked;
2186
2187 default:
2188 /*
2189 * A signal which has a default action of killing
2190 * the process, and for which there is no handler,
2191 * needs to act like SIGKILL
2192 */
2193 if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (action == SIG_DFL) && (prop & SA_KILL)) {
2194 sig_proc->p_stat = SRUN;
2195 thread_abort(sig_thread);
2196 goto sigout_locked;
2197 }
2198
2199 /*
2200 * All other signals wake up the process, but don't
2201 * resume it.
2202 */
2203 if (sig_proc->p_stat == SSTOP) {
2204 goto sigout_locked;
2205 }
2206 goto runlocked;
2207 }
2208 }
2209 /*NOTREACHED*/
2210
2211 runlocked:
2212 /*
2213 * If we're being traced (possibly because someone attached us
2214 * while we were stopped), check for a signal from the debugger.
2215 */
2216 if (sig_proc->p_stat == SSTOP) {
2217 if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0)
2218 uth->uu_siglist |= sigmask(sig_proc->p_xstat);
2219
2220 if ((flavor & PSIG_VFORK) != 0) {
2221 sig_proc->p_stat = SRUN;
2222 }
2223 } else {
2224 /*
2225 * setrunnable(p) in BSD and
2226 * Wake up the thread if it is interruptible.
2227 */
2228 sig_proc->p_stat = SRUN;
2229 if ((flavor & PSIG_VFORK) == 0)
2230 thread_abort_safely(sig_thread);
2231 }
2232
2233 sigout_locked:
2234 proc_unlock(sig_proc);
2235
2236 sigout_unlocked:
2237 if ((flavor & PSIG_LOCKED)== 0) {
2238 proc_signalend(sig_proc, 0);
2239 }
2240 }
2241
2242 void
2243 psignal(proc_t p, int signum)
2244 {
2245 psignal_internal(p, NULL, NULL, 0, signum);
2246 }
2247
2248 void
2249 psignal_locked(proc_t p, int signum)
2250 {
2251 psignal_internal(p, NULL, NULL, PSIG_LOCKED, signum);
2252 }
2253
2254 void
2255 psignal_vfork(proc_t p, task_t new_task, thread_t thread, int signum)
2256 {
2257 psignal_internal(p, new_task, thread, PSIG_VFORK, signum);
2258 }
2259
2260 static void
2261 psignal_uthread(thread_t thread, int signum)
2262 {
2263 psignal_internal(PROC_NULL, TASK_NULL, thread, PSIG_THREAD, signum);
2264 }
2265
2266 /* same as psignal(), but prefer delivery to 'thread' if possible */
2267 static void
2268 psignal_try_thread(proc_t p, thread_t thread, int signum)
2269 {
2270 psignal_internal(p, NULL, thread, PSIG_TRY_THREAD, signum);
2271 }
2272
2273 /*
2274 * If the current process has received a signal (should be caught or cause
2275 * termination, should interrupt current syscall), return the signal number.
2276 * Stop signals with default action are processed immediately, then cleared;
2277 * they aren't returned. This is checked after each entry to the system for
2278 * a syscall or trap (though this can usually be done without calling issignal
2279 * by checking the pending signal masks in the CURSIG macro.) The normal call
2280 * sequence is
2281 *
2282 * while (signum = CURSIG(curproc))
2283 * postsig(signum);
2284 */
2285 int
2286 issignal_locked(proc_t p)
2287 {
2288 int signum, mask, prop, sigbits;
2289 thread_t cur_act;
2290 struct uthread * ut;
2291 proc_t pp;
2292 kauth_cred_t my_cred;
2293 int retval = 0;
2294 uid_t r_uid;
2295
2296 cur_act = current_thread();
2297
2298 #if SIGNAL_DEBUG
2299 if(rdebug_proc && (p == rdebug_proc)) {
2300 ram_printf(3);
2301 }
2302 #endif /* SIGNAL_DEBUG */
2303
2304 /*
2305 * Try to grab the signal lock.
2306 */
2307 if (sig_try_locked(p) <= 0) {
2308 return(0);
2309 }
2310
2311 proc_signalstart(p, 1);
2312
2313 ut = get_bsdthread_info(cur_act);
2314 for(;;) {
2315 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2316
2317 if (p->p_lflag & P_LPPWAIT)
2318 sigbits &= ~stopsigmask;
2319 if (sigbits == 0) { /* no signal to send */
2320 retval = 0;
2321 goto out;
2322 }
2323
2324 signum = ffs((long)sigbits);
2325 mask = sigmask(signum);
2326 prop = sigprop[signum];
2327
2328 /*
2329 * We should see pending but ignored signals
2330 * only if P_LTRACED was on when they were posted.
2331 */
2332 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2333 ut->uu_siglist &= ~mask; /* take the signal! */
2334 continue;
2335 }
2336 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2337 task_t task;
2338 /*
2339 * If traced, always stop, and stay
2340 * stopped until released by the debugger.
2341 */
2342 /* ptrace debugging */
2343 p->p_xstat = signum;
2344
2345 if (p->p_lflag & P_LSIGEXC) {
2346 p->sigwait = TRUE;
2347 p->sigwait_thread = cur_act;
2348 p->p_stat = SSTOP;
2349 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2350 p->p_lflag &= ~P_LWAITED;
2351 ut->uu_siglist &= ~mask; /* clear the old signal */
2352 proc_signalend(p, 1);
2353 proc_unlock(p);
2354 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2355 proc_lock(p);
2356 proc_signalstart(p, 1);
2357 } else {
2358 proc_unlock(p);
2359 my_cred = kauth_cred_proc_ref(p);
2360 r_uid = kauth_cred_getruid(my_cred);
2361 kauth_cred_unref(&my_cred);
2362
2363 pp = proc_parentholdref(p);
2364 if (pp != PROC_NULL) {
2365 proc_lock(pp);
2366
2367 pp->si_pid = p->p_pid;
2368 pp->si_status = p->p_xstat;
2369 pp->si_code = CLD_TRAPPED;
2370 pp->si_uid = r_uid;
2371
2372 proc_unlock(pp);
2373 }
2374
2375 /*
2376 * XXX Have to really stop for debuggers;
2377 * XXX stop() doesn't do the right thing.
2378 */
2379 task = p->task;
2380 task_suspend_internal(task);
2381
2382 proc_lock(p);
2383 p->sigwait = TRUE;
2384 p->sigwait_thread = cur_act;
2385 p->p_stat = SSTOP;
2386 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2387 p->p_lflag &= ~P_LWAITED;
2388 ut->uu_siglist &= ~mask; /* clear the old signal */
2389
2390 proc_signalend(p, 1);
2391 proc_unlock(p);
2392
2393 if (pp != PROC_NULL) {
2394 psignal(pp, SIGCHLD);
2395 proc_list_lock();
2396 wakeup((caddr_t)pp);
2397 proc_parentdropref(pp, 1);
2398 proc_list_unlock();
2399 }
2400
2401 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2402 thread_block(THREAD_CONTINUE_NULL);
2403 proc_lock(p);
2404 proc_signalstart(p, 1);
2405 }
2406
2407 p->sigwait = FALSE;
2408 p->sigwait_thread = NULL;
2409 wakeup((caddr_t)&p->sigwait_thread);
2410
2411 /*
2412 * This code is to detect when gdb is killed
2413 * even as the traced program is attached.
2414 * pgsignal would get the SIGKILL to traced program
2415 * That's what we are trying to see (I hope)
2416 */
2417 if (ut->uu_siglist & sigmask(SIGKILL)) {
2418 /*
2419 * Wait event may still be outstanding;
2420 * clear it, since sig_lock_to_exit will
2421 * wait.
2422 */
2423 clear_wait(current_thread(), THREAD_INTERRUPTED);
2424 sig_lock_to_exit(p);
2425 /*
2426 * Since this thread will be resumed
2427 * to allow the current syscall to
2428 * be completed, must save u_qsave
2429 * before calling exit(). (Since exit()
2430 * calls closef() which can trash u_qsave.)
2431 */
2432 proc_signalend(p, 1);
2433 proc_unlock(p);
2434 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
2435 p->p_pid, W_EXITCODE(0, SIGKILL), 2, 0, 0);
2436 exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL);
2437 proc_lock(p);
2438 return(0);
2439 }
2440
2441 /*
2442 * We may have to quit
2443 */
2444 if (thread_should_abort(current_thread())) {
2445 retval = 0;
2446 goto out;
2447 }
2448 /*
2449 * If parent wants us to take the signal,
2450 * then it will leave it in p->p_xstat;
2451 * otherwise we just look for signals again.
2452 */
2453 signum = p->p_xstat;
2454 if (signum == 0)
2455 continue;
2456 /*
2457 * Put the new signal into p_siglist. If the
2458 * signal is being masked, look for other signals.
2459 */
2460 mask = sigmask(signum);
2461 ut->uu_siglist |= mask;
2462 if (ut->uu_sigmask & mask)
2463 continue;
2464 }
2465
2466 /*
2467 * Decide whether the signal should be returned.
2468 * Return the signal's number, or fall through
2469 * to clear it from the pending mask.
2470 */
2471
2472 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2473
2474 case (long)SIG_DFL:
2475 /*
2476 * If there is a pending stop signal to process
2477 * with default action, stop here,
2478 * then clear the signal. However,
2479 * if process is member of an orphaned
2480 * process group, ignore tty stop signals.
2481 */
2482 if (prop & SA_STOP) {
2483 struct pgrp * pg;
2484
2485 proc_unlock(p);
2486 pg = proc_pgrp(p);
2487 if (p->p_lflag & P_LTRACED ||
2488 (pg->pg_jobc == 0 &&
2489 prop & SA_TTYSTOP)) {
2490 proc_lock(p);
2491 pg_rele(pg);
2492 break; /* == ignore */
2493 }
2494 pg_rele(pg);
2495 if (p->p_stat != SSTOP) {
2496 proc_lock(p);
2497 p->p_xstat = signum;
2498
2499 p->p_stat = SSTOP;
2500 p->p_lflag &= ~P_LWAITED;
2501 proc_unlock(p);
2502
2503 pp = proc_parentholdref(p);
2504 stop(p, pp);
2505 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2506 my_cred = kauth_cred_proc_ref(p);
2507 r_uid = kauth_cred_getruid(my_cred);
2508 kauth_cred_unref(&my_cred);
2509
2510 proc_lock(pp);
2511 pp->si_pid = p->p_pid;
2512 pp->si_status = WEXITSTATUS(p->p_xstat);
2513 pp->si_code = CLD_STOPPED;
2514 pp->si_uid = r_uid;
2515 proc_unlock(pp);
2516
2517 psignal(pp, SIGCHLD);
2518 }
2519 if (pp != PROC_NULL)
2520 proc_parentdropref(pp, 0);
2521 }
2522 proc_lock(p);
2523 break;
2524 } else if (prop & SA_IGNORE) {
2525 /*
2526 * Except for SIGCONT, shouldn't get here.
2527 * Default action is to ignore; drop it.
2528 */
2529 break; /* == ignore */
2530 } else {
2531 ut->uu_siglist &= ~mask; /* take the signal! */
2532 retval = signum;
2533 goto out;
2534 }
2535
2536 /*NOTREACHED*/
2537 break;
2538
2539 case (long)SIG_IGN:
2540 /*
2541 * Masking above should prevent us ever trying
2542 * to take action on an ignored signal other
2543 * than SIGCONT, unless process is traced.
2544 */
2545 if ((prop & SA_CONT) == 0 &&
2546 (p->p_lflag & P_LTRACED) == 0)
2547 printf("issignal\n");
2548 break; /* == ignore */
2549
2550 default:
2551 /*
2552 * This signal has an action, let
2553 * postsig() process it.
2554 */
2555 ut->uu_siglist &= ~mask; /* take the signal! */
2556 retval = signum;
2557 goto out;
2558 }
2559 ut->uu_siglist &= ~mask; /* take the signal! */
2560 }
2561 /* NOTREACHED */
2562 out:
2563 proc_signalend(p, 1);
2564 return(retval);
2565 }
2566
2567 /* called from _sleep */
2568 int
2569 CURSIG(proc_t p)
2570 {
2571 int signum, mask, prop, sigbits;
2572 thread_t cur_act;
2573 struct uthread * ut;
2574 int retnum = 0;
2575
2576
2577 cur_act = current_thread();
2578
2579 ut = get_bsdthread_info(cur_act);
2580
2581 if (ut->uu_siglist == 0)
2582 return (0);
2583
2584 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_lflag & P_LTRACED) == 0))
2585 return (0);
2586
2587 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2588
2589 for(;;) {
2590 if (p->p_lflag & P_LPPWAIT)
2591 sigbits &= ~stopsigmask;
2592 if (sigbits == 0) { /* no signal to send */
2593 return (retnum);
2594 }
2595
2596 signum = ffs((long)sigbits);
2597 mask = sigmask(signum);
2598 prop = sigprop[signum];
2599 sigbits &= ~mask; /* take the signal out */
2600
2601 /*
2602 * We should see pending but ignored signals
2603 * only if P_LTRACED was on when they were posted.
2604 */
2605 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2606 continue;
2607 }
2608
2609 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2610 return(signum);
2611 }
2612
2613 /*
2614 * Decide whether the signal should be returned.
2615 * Return the signal's number, or fall through
2616 * to clear it from the pending mask.
2617 */
2618
2619 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2620
2621 case (long)SIG_DFL:
2622 /*
2623 * If there is a pending stop signal to process
2624 * with default action, stop here,
2625 * then clear the signal. However,
2626 * if process is member of an orphaned
2627 * process group, ignore tty stop signals.
2628 */
2629 if (prop & SA_STOP) {
2630 struct pgrp *pg;
2631
2632 pg = proc_pgrp(p);
2633
2634 if (p->p_lflag & P_LTRACED ||
2635 (pg->pg_jobc == 0 &&
2636 prop & SA_TTYSTOP)) {
2637 pg_rele(pg);
2638 break; /* == ignore */
2639 }
2640 pg_rele(pg);
2641 retnum = signum;
2642 break;
2643 } else if (prop & SA_IGNORE) {
2644 /*
2645 * Except for SIGCONT, shouldn't get here.
2646 * Default action is to ignore; drop it.
2647 */
2648 break; /* == ignore */
2649 } else {
2650 return (signum);
2651 }
2652 /*NOTREACHED*/
2653
2654 case (long)SIG_IGN:
2655 /*
2656 * Masking above should prevent us ever trying
2657 * to take action on an ignored signal other
2658 * than SIGCONT, unless process is traced.
2659 */
2660 if ((prop & SA_CONT) == 0 &&
2661 (p->p_lflag & P_LTRACED) == 0)
2662 printf("issignal\n");
2663 break; /* == ignore */
2664
2665 default:
2666 /*
2667 * This signal has an action, let
2668 * postsig() process it.
2669 */
2670 return (signum);
2671 }
2672 }
2673 /* NOTREACHED */
2674 }
2675
2676 /*
2677 * Put the argument process into the stopped state and notify the parent
2678 * via wakeup. Signals are handled elsewhere. The process must not be
2679 * on the run queue.
2680 */
2681 static void
2682 stop(proc_t p, proc_t parent)
2683 {
2684 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2685 if ((parent != PROC_NULL) && (parent->p_stat != SSTOP)) {
2686 proc_list_lock();
2687 wakeup((caddr_t)parent);
2688 proc_list_unlock();
2689 }
2690 (void) task_suspend_internal(p->task);
2691 }
2692
2693 /*
2694 * Take the action for the specified signal
2695 * from the current set of pending signals.
2696 */
2697 void
2698 postsig_locked(int signum)
2699 {
2700 proc_t p = current_proc();
2701 struct sigacts *ps = p->p_sigacts;
2702 user_addr_t catcher;
2703 uint32_t code;
2704 int mask, returnmask;
2705 struct uthread * ut;
2706
2707 #if DIAGNOSTIC
2708 if (signum == 0)
2709 panic("postsig");
2710 /*
2711 * This must be called on master cpu
2712 */
2713 if (cpu_number() != master_cpu)
2714 panic("psig not on master");
2715 #endif
2716
2717 /*
2718 * Try to grab the signal lock.
2719 */
2720 if (sig_try_locked(p) <= 0) {
2721 return;
2722 }
2723
2724 proc_signalstart(p, 1);
2725
2726 ut = (struct uthread *)get_bsdthread_info(current_thread());
2727 mask = sigmask(signum);
2728 ut->uu_siglist &= ~mask;
2729 catcher = ps->ps_sigact[signum];
2730 if (catcher == SIG_DFL) {
2731 /*
2732 * Default catcher, where the default is to kill
2733 * the process. (Other cases were ignored above.)
2734 */
2735 sig_lock_to_exit(p);
2736 p->p_acflag |= AXSIG;
2737 if (sigprop[signum] & SA_CORE) {
2738 p->p_sigacts->ps_sig = signum;
2739 proc_signalend(p, 1);
2740 proc_unlock(p);
2741 if (coredump(p, 0, 0) == 0)
2742 signum |= WCOREFLAG;
2743 } else {
2744 proc_signalend(p, 1);
2745 proc_unlock(p);
2746 }
2747
2748 #if CONFIG_DTRACE
2749 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
2750
2751 ut->t_dtrace_siginfo.si_signo = signum;
2752 ut->t_dtrace_siginfo.si_pid = p->si_pid;
2753 ut->t_dtrace_siginfo.si_uid = p->si_uid;
2754 ut->t_dtrace_siginfo.si_status = WEXITSTATUS(p->si_status);
2755
2756 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
2757 switch (signum) {
2758 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
2759 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
2760 break;
2761 default:
2762 break;
2763 }
2764
2765
2766 DTRACE_PROC3(signal__handle, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo),
2767 void (*)(void), SIG_DFL);
2768 #endif
2769
2770 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
2771 p->p_pid, W_EXITCODE(0, signum), 3, 0, 0);
2772 exit1(p, W_EXITCODE(0, signum), (int *)NULL);
2773 proc_lock(p);
2774 return;
2775 } else {
2776 /*
2777 * If we get here, the signal must be caught.
2778 */
2779 #if DIAGNOSTIC
2780 if (catcher == SIG_IGN || (ut->uu_sigmask & mask))
2781 log(LOG_WARNING,
2782 "postsig: processing masked or ignored signal\n");
2783 #endif
2784
2785 /*
2786 * Set the new mask value and also defer further
2787 * occurences of this signal.
2788 *
2789 * Special case: user has done a sigpause. Here the
2790 * current mask is not of interest, but rather the
2791 * mask from before the sigpause is what we want
2792 * restored after the signal processing is completed.
2793 */
2794 if (ut->uu_flag & UT_SAS_OLDMASK) {
2795 returnmask = ut->uu_oldmask;
2796 ut->uu_flag &= ~UT_SAS_OLDMASK;
2797 ut->uu_oldmask = 0;
2798 } else
2799 returnmask = ut->uu_sigmask;
2800 ut->uu_sigmask |= ps->ps_catchmask[signum];
2801 if ((ps->ps_signodefer & mask) == 0)
2802 ut->uu_sigmask |= mask;
2803 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
2804 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE))
2805 p->p_sigignore |= mask;
2806 ps->ps_sigact[signum] = SIG_DFL;
2807 ps->ps_siginfo &= ~mask;
2808 ps->ps_signodefer &= ~mask;
2809 }
2810
2811 if (ps->ps_sig != signum) {
2812 code = 0;
2813 } else {
2814 code = ps->ps_code;
2815 ps->ps_code = 0;
2816 }
2817 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nsignals);
2818 sendsig(p, catcher, signum, returnmask, code);
2819 }
2820 proc_signalend(p, 1);
2821 }
2822
2823 /*
2824 * Attach a signal knote to the list of knotes for this process.
2825 *
2826 * Signal knotes share the knote list with proc knotes. This
2827 * could be avoided by using a signal-specific knote list, but
2828 * probably isn't worth the trouble.
2829 */
2830
2831 static int
2832 filt_sigattach(struct knote *kn)
2833 {
2834 proc_t p = current_proc(); /* can attach only to oneself */
2835
2836 proc_klist_lock();
2837
2838 kn->kn_ptr.p_proc = p;
2839 kn->kn_flags |= EV_CLEAR; /* automatically set */
2840
2841 KNOTE_ATTACH(&p->p_klist, kn);
2842
2843 proc_klist_unlock();
2844
2845 return (0);
2846 }
2847
2848 /*
2849 * remove the knote from the process list, if it hasn't already
2850 * been removed by exit processing.
2851 */
2852
2853 static void
2854 filt_sigdetach(struct knote *kn)
2855 {
2856 proc_t p = kn->kn_ptr.p_proc;
2857
2858 proc_klist_lock();
2859 kn->kn_ptr.p_proc = NULL;
2860 KNOTE_DETACH(&p->p_klist, kn);
2861 proc_klist_unlock();
2862 }
2863
2864 /*
2865 * Post an event to the signal filter. Because we share the same list
2866 * as process knotes, we have to filter out and handle only signal events.
2867 *
2868 * We assume that we process fdfree() before we post the NOTE_EXIT for
2869 * a process during exit. Therefore, since signal filters can only be
2870 * set up "in-process", we should have already torn down the kqueue
2871 * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT.
2872 */
2873 static int
2874 filt_signal(struct knote *kn, long hint)
2875 {
2876
2877 if (hint & NOTE_SIGNAL) {
2878 hint &= ~NOTE_SIGNAL;
2879
2880 if (kn->kn_id == (unsigned int)hint)
2881 kn->kn_data++;
2882 } else if (hint & NOTE_EXIT) {
2883 panic("filt_signal: detected NOTE_EXIT event");
2884 }
2885
2886 return (kn->kn_data != 0);
2887 }
2888
2889 static void
2890 filt_signaltouch(struct knote *kn, struct kevent_internal_s *kev, long type)
2891 {
2892 proc_klist_lock();
2893 switch (type) {
2894 case EVENT_REGISTER:
2895 kn->kn_sfflags = kev->fflags;
2896 kn->kn_sdata = kev->data;
2897 break;
2898 case EVENT_PROCESS:
2899 *kev = kn->kn_kevent;
2900 if (kn->kn_flags & EV_CLEAR) {
2901 kn->kn_data = 0;
2902 kn->kn_fflags = 0;
2903 }
2904 break;
2905 default:
2906 panic("filt_signaltouch() - invalid type (%ld)", type);
2907 break;
2908 }
2909 proc_klist_unlock();
2910 }
2911
2912 void
2913 bsd_ast(thread_t thread)
2914 {
2915 proc_t p = current_proc();
2916 struct uthread *ut = get_bsdthread_info(thread);
2917 int signum;
2918 user_addr_t pc;
2919 static int bsd_init_done = 0;
2920
2921 if (p == NULL)
2922 return;
2923
2924 if ((p->p_flag & P_OWEUPC) && (p->p_flag & P_PROFIL)) {
2925 pc = get_useraddr();
2926 addupc_task(p, pc, 1);
2927 OSBitAndAtomic(~((uint32_t)P_OWEUPC), &p->p_flag);
2928 }
2929
2930 if (timerisset(&p->p_vtimer_user.it_value)) {
2931 uint32_t microsecs;
2932
2933 task_vtimer_update(p->task, TASK_VTIMER_USER, &microsecs);
2934
2935 if (!itimerdecr(p, &p->p_vtimer_user, microsecs)) {
2936 if (timerisset(&p->p_vtimer_user.it_value))
2937 task_vtimer_set(p->task, TASK_VTIMER_USER);
2938 else
2939 task_vtimer_clear(p->task, TASK_VTIMER_USER);
2940
2941 psignal_try_thread(p, thread, SIGVTALRM);
2942 }
2943 }
2944
2945 if (timerisset(&p->p_vtimer_prof.it_value)) {
2946 uint32_t microsecs;
2947
2948 task_vtimer_update(p->task, TASK_VTIMER_PROF, &microsecs);
2949
2950 if (!itimerdecr(p, &p->p_vtimer_prof, microsecs)) {
2951 if (timerisset(&p->p_vtimer_prof.it_value))
2952 task_vtimer_set(p->task, TASK_VTIMER_PROF);
2953 else
2954 task_vtimer_clear(p->task, TASK_VTIMER_PROF);
2955
2956 psignal_try_thread(p, thread, SIGPROF);
2957 }
2958 }
2959
2960 if (timerisset(&p->p_rlim_cpu)) {
2961 struct timeval tv;
2962
2963 task_vtimer_update(p->task, TASK_VTIMER_RLIM, (uint32_t *) &tv.tv_usec);
2964
2965 proc_spinlock(p);
2966 if (p->p_rlim_cpu.tv_sec > 0 || p->p_rlim_cpu.tv_usec > tv.tv_usec) {
2967 tv.tv_sec = 0;
2968 timersub(&p->p_rlim_cpu, &tv, &p->p_rlim_cpu);
2969 proc_spinunlock(p);
2970 } else {
2971
2972 timerclear(&p->p_rlim_cpu);
2973 proc_spinunlock(p);
2974
2975 task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
2976
2977 psignal_try_thread(p, thread, SIGXCPU);
2978 }
2979 }
2980
2981 #if CONFIG_DTRACE
2982 if (ut->t_dtrace_sig) {
2983 uint8_t dt_action_sig = ut->t_dtrace_sig;
2984 ut->t_dtrace_sig = 0;
2985 psignal(p, dt_action_sig);
2986 }
2987
2988 if (ut->t_dtrace_stop) {
2989 ut->t_dtrace_stop = 0;
2990 proc_lock(p);
2991 p->p_dtrace_stop = 1;
2992 proc_unlock(p);
2993 (void)task_suspend_internal(p->task);
2994 }
2995
2996 if (ut->t_dtrace_resumepid) {
2997 proc_t resumeproc = proc_find(ut->t_dtrace_resumepid);
2998 ut->t_dtrace_resumepid = 0;
2999 if (resumeproc != PROC_NULL) {
3000 proc_lock(resumeproc);
3001 /* We only act on processes stopped by dtrace */
3002 if (resumeproc->p_dtrace_stop) {
3003 resumeproc->p_dtrace_stop = 0;
3004 proc_unlock(resumeproc);
3005 task_resume_internal(resumeproc->task);
3006 }
3007 else {
3008 proc_unlock(resumeproc);
3009 }
3010 proc_rele(resumeproc);
3011 }
3012 }
3013
3014 #endif /* CONFIG_DTRACE */
3015
3016 proc_lock(p);
3017 if (CHECK_SIGNALS(p, current_thread(), ut)) {
3018 while ( (signum = issignal_locked(p)) )
3019 postsig_locked(signum);
3020 }
3021 proc_unlock(p);
3022
3023 if (!bsd_init_done) {
3024 bsd_init_done = 1;
3025 bsdinit_task();
3026 }
3027
3028 }
3029
3030 /* ptrace set runnable */
3031 void
3032 pt_setrunnable(proc_t p)
3033 {
3034 task_t task;
3035
3036 task = p->task;
3037
3038 if (p->p_lflag & P_LTRACED) {
3039 proc_lock(p);
3040 p->p_stat = SRUN;
3041 proc_unlock(p);
3042 if (p->sigwait) {
3043 wakeup((caddr_t)&(p->sigwait));
3044 if ((p->p_lflag & P_LSIGEXC) == 0) { // 5878479
3045 task_release(task);
3046 }
3047 }
3048 }
3049 }
3050
3051 kern_return_t
3052 do_bsdexception(
3053 int exc,
3054 int code,
3055 int sub)
3056 {
3057 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
3058
3059 codes[0] = code;
3060 codes[1] = sub;
3061 return(bsd_exception(exc, codes, 2));
3062 }
3063
3064 int
3065 proc_pendingsignals(proc_t p, sigset_t mask)
3066 {
3067 struct uthread * uth;
3068 thread_t th;
3069 sigset_t bits = 0;
3070
3071 proc_lock(p);
3072 /* If the process is in proc exit return no signal info */
3073 if (p->p_lflag & P_LPEXIT) {
3074 goto out;
3075 }
3076
3077 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
3078 th = p->p_vforkact;
3079 uth = (struct uthread *)get_bsdthread_info(th);
3080 if (uth) {
3081 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3082 }
3083 goto out;
3084 }
3085
3086 bits = 0;
3087 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
3088 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3089 }
3090 out:
3091 proc_unlock(p);
3092 return(bits);
3093 }
3094
3095 int
3096 thread_issignal(proc_t p, thread_t th, sigset_t mask)
3097 {
3098 struct uthread * uth;
3099 sigset_t bits=0;
3100
3101 proc_lock(p);
3102 uth = (struct uthread *)get_bsdthread_info(th);
3103 if (uth) {
3104 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3105 }
3106 proc_unlock(p);
3107 return(bits);
3108 }
3109
3110 /*
3111 * Allow external reads of the sigprop array.
3112 */
3113 int
3114 hassigprop(int sig, int prop)
3115 {
3116 return (sigprop[sig] & prop);
3117 }
3118
3119 void
3120 pgsigio(pid_t pgid, int sig)
3121 {
3122 proc_t p = PROC_NULL;
3123
3124 if (pgid < 0)
3125 gsignal(-(pgid), sig);
3126
3127 else if (pgid > 0 && (p = proc_find(pgid)) != 0)
3128 psignal(p, sig);
3129 if (p != PROC_NULL)
3130 proc_rele(p);
3131 }
3132
3133 void
3134 proc_signalstart(proc_t p, int locked)
3135 {
3136 if (!locked)
3137 proc_lock(p);
3138
3139 if(p->p_signalholder == current_thread())
3140 panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock");
3141
3142 p->p_sigwaitcnt++;
3143 while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL)
3144 msleep(&p->p_sigmask, &p->p_mlock, 0, "proc_signstart", NULL);
3145 p->p_sigwaitcnt--;
3146
3147 p->p_lflag |= P_LINSIGNAL;
3148 p->p_signalholder = current_thread();
3149 if (!locked)
3150 proc_unlock(p);
3151 }
3152
3153 void
3154 proc_signalend(proc_t p, int locked)
3155 {
3156 if (!locked)
3157 proc_lock(p);
3158 p->p_lflag &= ~P_LINSIGNAL;
3159
3160 if (p->p_sigwaitcnt > 0)
3161 wakeup(&p->p_sigmask);
3162
3163 p->p_signalholder = NULL;
3164 if (!locked)
3165 proc_unlock(p);
3166 }
3167
3168 void
3169 sig_lock_to_exit(proc_t p)
3170 {
3171 thread_t self = current_thread();
3172
3173 p->exit_thread = self;
3174 proc_unlock(p);
3175
3176 task_hold(p->task);
3177 task_wait(p->task, FALSE);
3178
3179 proc_lock(p);
3180 }
3181
3182 int
3183 sig_try_locked(proc_t p)
3184 {
3185 thread_t self = current_thread();
3186
3187 while (p->sigwait || p->exit_thread) {
3188 if (p->exit_thread) {
3189 return(0);
3190 }
3191 msleep((caddr_t)&p->sigwait_thread, &p->p_mlock, PCATCH | PDROP, 0, 0);
3192 if (thread_should_abort(self)) {
3193 /*
3194 * Terminate request - clean up.
3195 */
3196 proc_lock(p);
3197 return -1;
3198 }
3199 proc_lock(p);
3200 }
3201 return 1;
3202 }