]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sig.c
xnu-3247.1.106.tar.gz
[apple/xnu.git] / bsd / kern / kern_sig.c
1 /*
2 * Copyright (c) 1995-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1989, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
66 */
67 /*
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
71 * Version 2.0.
72 */
73
74 #define SIGPROP /* include signal properties table */
75 #include <sys/param.h>
76 #include <sys/resourcevar.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/timeb.h>
81 #include <sys/times.h>
82 #include <sys/acct.h>
83 #include <sys/file_internal.h>
84 #include <sys/kernel.h>
85 #include <sys/wait.h>
86 #include <sys/signalvar.h>
87 #include <sys/syslog.h>
88 #include <sys/stat.h>
89 #include <sys/lock.h>
90 #include <sys/kdebug.h>
91
92 #include <sys/mount.h>
93 #include <sys/sysproto.h>
94
95 #include <security/audit/audit.h>
96
97 #include <machine/spl.h>
98
99 #include <kern/cpu_number.h>
100
101 #include <sys/vm.h>
102 #include <sys/user.h> /* for coredump */
103 #include <kern/ast.h> /* for APC support */
104 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
105 #include <kern/thread.h>
106 #include <kern/sched_prim.h>
107 #include <kern/thread_call.h>
108 #include <mach/exception.h>
109 #include <mach/task.h>
110 #include <mach/thread_act.h>
111 #include <libkern/OSAtomic.h>
112
113 #include <sys/sdt.h>
114 #include <sys/codesign.h>
115
116 /*
117 * Missing prototypes that Mach should export
118 *
119 * +++
120 */
121 extern int thread_enable_fpe(thread_t act, int onoff);
122 extern thread_t port_name_to_thread(mach_port_name_t port_name);
123 extern kern_return_t get_signalact(task_t , thread_t *, int);
124 extern unsigned int get_useraddr(void);
125
126 /*
127 * ---
128 */
129
130 extern void doexception(int exc, mach_exception_code_t code,
131 mach_exception_subcode_t sub);
132
133 static void stop(proc_t, proc_t);
134 int cansignal(proc_t, kauth_cred_t, proc_t, int, int);
135 int killpg1(proc_t, int, int, int, int);
136 static void psignal_uthread(thread_t, int);
137 static void psignal_try_thread(proc_t, thread_t, int signum);
138 kern_return_t do_bsdexception(int, int, int);
139 void __posix_sem_syscall_return(kern_return_t);
140 char *proc_name_address(void *p);
141
142 /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */
143 kern_return_t semaphore_timedwait_signal_trap_internal(mach_port_name_t, mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t));
144 kern_return_t semaphore_timedwait_trap_internal(mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t));
145 kern_return_t semaphore_wait_signal_trap_internal(mach_port_name_t, mach_port_name_t, void (*)(kern_return_t));
146 kern_return_t semaphore_wait_trap_internal(mach_port_name_t, void (*)(kern_return_t));
147
148 static int filt_sigattach(struct knote *kn);
149 static void filt_sigdetach(struct knote *kn);
150 static int filt_signal(struct knote *kn, long hint);
151 static void filt_signaltouch(struct knote *kn, struct kevent_internal_s *kev,
152 long type);
153
154 struct filterops sig_filtops = {
155 .f_attach = filt_sigattach,
156 .f_detach = filt_sigdetach,
157 .f_event = filt_signal,
158 .f_touch = filt_signaltouch,
159 };
160
161 /* structures and fns for killpg1 iterartion callback and filters */
162 struct killpg1_filtargs {
163 int posix;
164 proc_t cp;
165 };
166
167 struct killpg1_iterargs {
168 proc_t cp;
169 kauth_cred_t uc;
170 int signum;
171 int * nfoundp;
172 int zombie;
173 };
174
175 static int killpg1_filt(proc_t p, void * arg);
176 static int killpg1_pgrpfilt(proc_t p, __unused void * arg);
177 static int killpg1_callback(proc_t p, void * arg);
178
179 static int pgsignal_filt(proc_t p, void * arg);
180 static int pgsignal_callback(proc_t p, void * arg);
181 static kern_return_t get_signalthread(proc_t, int, thread_t *);
182
183
184 /* flags for psignal_internal */
185 #define PSIG_LOCKED 0x1
186 #define PSIG_VFORK 0x2
187 #define PSIG_THREAD 0x4
188 #define PSIG_TRY_THREAD 0x8
189
190
191 static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum);
192
193 /*
194 * NOTE: Source and target may *NOT* overlap! (target is smaller)
195 */
196 static void
197 sigaltstack_kern_to_user32(struct kern_sigaltstack *in, struct user32_sigaltstack *out)
198 {
199 out->ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp);
200 out->ss_size = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size);
201 out->ss_flags = in->ss_flags;
202 }
203
204 static void
205 sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstack *out)
206 {
207 out->ss_sp = in->ss_sp;
208 out->ss_size = in->ss_size;
209 out->ss_flags = in->ss_flags;
210 }
211
212 /*
213 * NOTE: Source and target may are permitted to overlap! (source is smaller);
214 * this works because we copy fields in order from the end of the struct to
215 * the beginning.
216 */
217 static void
218 sigaltstack_user32_to_kern(struct user32_sigaltstack *in, struct kern_sigaltstack *out)
219 {
220 out->ss_flags = in->ss_flags;
221 out->ss_size = in->ss_size;
222 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
223 }
224 static void
225 sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out)
226 {
227 out->ss_flags = in->ss_flags;
228 out->ss_size = in->ss_size;
229 out->ss_sp = in->ss_sp;
230 }
231
232 static void
233 sigaction_kern_to_user32(struct kern_sigaction *in, struct user32_sigaction *out)
234 {
235 /* This assumes 32 bit __sa_handler is of type sig_t */
236 out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t,in->__sigaction_u.__sa_handler);
237 out->sa_mask = in->sa_mask;
238 out->sa_flags = in->sa_flags;
239 }
240 static void
241 sigaction_kern_to_user64(struct kern_sigaction *in, struct user64_sigaction *out)
242 {
243 /* This assumes 32 bit __sa_handler is of type sig_t */
244 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
245 out->sa_mask = in->sa_mask;
246 out->sa_flags = in->sa_flags;
247 }
248
249 static void
250 __sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigaction *out)
251 {
252 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
253 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
254 out->sa_mask = in->sa_mask;
255 out->sa_flags = in->sa_flags;
256 }
257
258 static void
259 __sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigaction *out)
260 {
261 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
262 out->sa_tramp = in->sa_tramp;
263 out->sa_mask = in->sa_mask;
264 out->sa_flags = in->sa_flags;
265 }
266
267 #if SIGNAL_DEBUG
268 void ram_printf(int);
269 int ram_debug=0;
270 unsigned int rdebug_proc=0;
271 void
272 ram_printf(int x)
273 {
274 printf("x is %d",x);
275
276 }
277 #endif /* SIGNAL_DEBUG */
278
279
280 void
281 signal_setast(thread_t sig_actthread)
282 {
283 act_set_astbsd(sig_actthread);
284 }
285
286 /*
287 * Can process p, with ucred uc, send the signal signum to process q?
288 * uc is refcounted by the caller so internal fileds can be used safely
289 * when called with zombie arg, list lock is held
290 */
291 int
292 cansignal(proc_t p, kauth_cred_t uc, proc_t q, int signum, int zombie)
293 {
294 kauth_cred_t my_cred;
295 struct session * p_sessp = SESSION_NULL;
296 struct session * q_sessp = SESSION_NULL;
297 #if CONFIG_MACF
298 int error;
299
300 error = mac_proc_check_signal(p, q, signum);
301 if (error)
302 return (0);
303 #endif
304
305 /* you can signal yourself */
306 if (p == q)
307 return(1);
308
309 /* you can't send launchd SIGKILL, even if root */
310 if (signum == SIGKILL && q == initproc)
311 return(0);
312
313 if (!suser(uc, NULL))
314 return (1); /* root can always signal */
315
316 if (zombie == 0)
317 proc_list_lock();
318 if (p->p_pgrp != PGRP_NULL)
319 p_sessp = p->p_pgrp->pg_session;
320 if (q->p_pgrp != PGRP_NULL)
321 q_sessp = q->p_pgrp->pg_session;
322
323 if (signum == SIGCONT && q_sessp == p_sessp) {
324 if (zombie == 0)
325 proc_list_unlock();
326 return (1); /* SIGCONT in session */
327 }
328
329 if (zombie == 0)
330 proc_list_unlock();
331
332 /*
333 * If the real or effective UID of the sender matches the real
334 * or saved UID of the target, permit the signal to
335 * be sent.
336 */
337 if (zombie == 0)
338 my_cred = kauth_cred_proc_ref(q);
339 else
340 my_cred = proc_ucred(q);
341
342 if (kauth_cred_getruid(uc) == kauth_cred_getruid(my_cred) ||
343 kauth_cred_getruid(uc) == kauth_cred_getsvuid(my_cred) ||
344 kauth_cred_getuid(uc) == kauth_cred_getruid(my_cred) ||
345 kauth_cred_getuid(uc) == kauth_cred_getsvuid(my_cred)) {
346 if (zombie == 0)
347 kauth_cred_unref(&my_cred);
348 return (1);
349 }
350
351 if (zombie == 0)
352 kauth_cred_unref(&my_cred);
353
354 return (0);
355 }
356
357 /*
358 * <rdar://problem/21952708> Some signals can be restricted from being handled,
359 * forcing the default action for that signal. This behavior applies only to
360 * non-root (EUID != 0) processes, and is configured with the "sigrestrict=x"
361 * bootarg:
362 *
363 * 0 (default): Disallow use of restricted signals. Trying to register a handler
364 * returns ENOTSUP, which userspace may use to take special action (e.g. abort).
365 * 1: As above, but return EINVAL. Restricted signals behave similarly to SIGKILL.
366 * 2: Usual POSIX semantics.
367 */
368 unsigned sigrestrict_arg = 0;
369
370 #if PLATFORM_WatchOS || PLATFORM_AppleTVOS
371 static int
372 sigrestrictmask(void)
373 {
374 if (kauth_getuid() != 0 && sigrestrict_arg != 2) {
375 return SIGRESTRICTMASK;
376 }
377 return 0;
378 }
379
380 static int
381 signal_is_restricted(proc_t p, int signum)
382 {
383 if (sigmask(signum) & sigrestrictmask()) {
384 if (sigrestrict_arg == 0 &&
385 task_get_apptype(p->task) == TASK_APPTYPE_APP_DEFAULT) {
386 return ENOTSUP;
387 } else {
388 return EINVAL;
389 }
390 }
391 return 0;
392 }
393
394 #else
395
396 static inline int
397 signal_is_restricted(proc_t p, int signum)
398 {
399 (void)p;
400 (void)signum;
401 return 0;
402 }
403 #endif /* !(PLATFORM_WatchOS || PLATFORM_AppleTVOS) */
404
405 /*
406 * Returns: 0 Success
407 * EINVAL
408 * copyout:EFAULT
409 * copyin:EFAULT
410 *
411 * Notes: Uses current thread as a parameter to inform PPC to enable
412 * FPU exceptions via setsigvec(); this operation is not proxy
413 * safe!
414 */
415 /* ARGSUSED */
416 int
417 sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval)
418 {
419 struct kern_sigaction vec;
420 struct __kern_sigaction __vec;
421
422 struct kern_sigaction *sa = &vec;
423 struct sigacts *ps = p->p_sigacts;
424
425 int signum;
426 int bit, error=0;
427
428 signum = uap->signum;
429 if (signum <= 0 || signum >= NSIG ||
430 signum == SIGKILL || signum == SIGSTOP)
431 return (EINVAL);
432
433 if ((error = signal_is_restricted(p, signum))) {
434 if (error == ENOTSUP) {
435 printf("%s(%d): denied attempt to register action for signal %d\n",
436 proc_name_address(p), proc_pid(p), signum);
437 }
438 return error;
439 }
440
441 if (uap->osa) {
442 sa->sa_handler = ps->ps_sigact[signum];
443 sa->sa_mask = ps->ps_catchmask[signum];
444 bit = sigmask(signum);
445 sa->sa_flags = 0;
446 if ((ps->ps_sigonstack & bit) != 0)
447 sa->sa_flags |= SA_ONSTACK;
448 if ((ps->ps_sigintr & bit) == 0)
449 sa->sa_flags |= SA_RESTART;
450 if (ps->ps_siginfo & bit)
451 sa->sa_flags |= SA_SIGINFO;
452 if (ps->ps_signodefer & bit)
453 sa->sa_flags |= SA_NODEFER;
454 if (ps->ps_64regset & bit)
455 sa->sa_flags |= SA_64REGSET;
456 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP))
457 sa->sa_flags |= SA_NOCLDSTOP;
458 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT))
459 sa->sa_flags |= SA_NOCLDWAIT;
460
461 if (IS_64BIT_PROCESS(p)) {
462 struct user64_sigaction vec64;
463
464 sigaction_kern_to_user64(sa, &vec64);
465 error = copyout(&vec64, uap->osa, sizeof(vec64));
466 } else {
467 struct user32_sigaction vec32;
468
469 sigaction_kern_to_user32(sa, &vec32);
470 error = copyout(&vec32, uap->osa, sizeof(vec32));
471 }
472 if (error)
473 return (error);
474 }
475 if (uap->nsa) {
476 if (IS_64BIT_PROCESS(p)) {
477 struct __user64_sigaction __vec64;
478
479 error = copyin(uap->nsa, &__vec64, sizeof(__vec64));
480 __sigaction_user64_to_kern(&__vec64, &__vec);
481 } else {
482 struct __user32_sigaction __vec32;
483
484 error = copyin(uap->nsa, &__vec32, sizeof(__vec32));
485 __sigaction_user32_to_kern(&__vec32, &__vec);
486 }
487 if (error)
488 return (error);
489 __vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */
490 error = setsigvec(p, current_thread(), signum, &__vec, FALSE);
491 }
492 return (error);
493 }
494
495 /* Routines to manipulate bits on all threads */
496 int
497 clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart)
498 {
499 struct uthread * uth;
500 thread_t thact;
501
502 proc_lock(p);
503 if (!in_signalstart)
504 proc_signalstart(p, 1);
505
506 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
507 thact = p->p_vforkact;
508 uth = (struct uthread *)get_bsdthread_info(thact);
509 if (uth) {
510 uth->uu_siglist &= ~bit;
511 }
512 if (!in_signalstart)
513 proc_signalend(p, 1);
514 proc_unlock(p);
515 return(0);
516 }
517
518 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
519 uth->uu_siglist &= ~bit;
520 }
521 p->p_siglist &= ~bit;
522 if (!in_signalstart)
523 proc_signalend(p, 1);
524 proc_unlock(p);
525
526 return(0);
527 }
528
529
530 static int
531 unblock_procsigmask(proc_t p, int bit)
532 {
533 struct uthread * uth;
534 thread_t thact;
535
536 proc_lock(p);
537 proc_signalstart(p, 1);
538
539 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
540 thact = p->p_vforkact;
541 uth = (struct uthread *)get_bsdthread_info(thact);
542 if (uth) {
543 uth->uu_sigmask &= ~bit;
544 }
545 p->p_sigmask &= ~bit;
546 proc_signalend(p, 1);
547 proc_unlock(p);
548 return(0);
549 }
550 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
551 uth->uu_sigmask &= ~bit;
552 }
553 p->p_sigmask &= ~bit;
554
555 proc_signalend(p, 1);
556 proc_unlock(p);
557 return(0);
558 }
559
560 static int
561 block_procsigmask(proc_t p, int bit)
562 {
563 struct uthread * uth;
564 thread_t thact;
565
566 proc_lock(p);
567 proc_signalstart(p, 1);
568
569 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
570 thact = p->p_vforkact;
571 uth = (struct uthread *)get_bsdthread_info(thact);
572 if (uth) {
573 uth->uu_sigmask |= bit;
574 }
575 p->p_sigmask |= bit;
576 proc_signalend(p, 1);
577 proc_unlock(p);
578 return(0);
579 }
580 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
581 uth->uu_sigmask |= bit;
582 }
583 p->p_sigmask |= bit;
584
585 proc_signalend(p, 1);
586 proc_unlock(p);
587 return(0);
588 }
589
590 int
591 set_procsigmask(proc_t p, int bit)
592 {
593 struct uthread * uth;
594 thread_t thact;
595
596 proc_lock(p);
597 proc_signalstart(p, 1);
598
599 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
600 thact = p->p_vforkact;
601 uth = (struct uthread *)get_bsdthread_info(thact);
602 if (uth) {
603 uth->uu_sigmask = bit;
604 }
605 p->p_sigmask = bit;
606 proc_signalend(p, 1);
607 proc_unlock(p);
608 return(0);
609 }
610 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
611 uth->uu_sigmask = bit;
612 }
613 p->p_sigmask = bit;
614 proc_signalend(p, 1);
615 proc_unlock(p);
616
617 return(0);
618 }
619
620 /* XXX should be static? */
621 /*
622 * Notes: The thread parameter is used in the PPC case to select the
623 * thread on which the floating point exception will be enabled
624 * or disabled. We can't simply take current_thread(), since
625 * this is called from posix_spawn() on the not currently running
626 * process/thread pair.
627 *
628 * We mark thread as unused to alow compilation without warning
629 * on non-PPC platforms.
630 */
631 int
632 setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigaction *sa, boolean_t in_sigstart)
633 {
634 struct sigacts *ps = p->p_sigacts;
635 int bit;
636
637 if ((signum == SIGKILL || signum == SIGSTOP) &&
638 sa->sa_handler != SIG_DFL)
639 return(EINVAL);
640 bit = sigmask(signum);
641 /*
642 * Change setting atomically.
643 */
644 ps->ps_sigact[signum] = sa->sa_handler;
645 ps->ps_trampact[signum] = sa->sa_tramp;
646 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
647 if (sa->sa_flags & SA_SIGINFO)
648 ps->ps_siginfo |= bit;
649 else
650 ps->ps_siginfo &= ~bit;
651 if (sa->sa_flags & SA_64REGSET)
652 ps->ps_64regset |= bit;
653 else
654 ps->ps_64regset &= ~bit;
655 if ((sa->sa_flags & SA_RESTART) == 0)
656 ps->ps_sigintr |= bit;
657 else
658 ps->ps_sigintr &= ~bit;
659 if (sa->sa_flags & SA_ONSTACK)
660 ps->ps_sigonstack |= bit;
661 else
662 ps->ps_sigonstack &= ~bit;
663 if (sa->sa_flags & SA_USERTRAMP)
664 ps->ps_usertramp |= bit;
665 else
666 ps->ps_usertramp &= ~bit;
667 if (sa->sa_flags & SA_RESETHAND)
668 ps->ps_sigreset |= bit;
669 else
670 ps->ps_sigreset &= ~bit;
671 if (sa->sa_flags & SA_NODEFER)
672 ps->ps_signodefer |= bit;
673 else
674 ps->ps_signodefer &= ~bit;
675 if (signum == SIGCHLD) {
676 if (sa->sa_flags & SA_NOCLDSTOP)
677 OSBitOrAtomic(P_NOCLDSTOP, &p->p_flag);
678 else
679 OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), &p->p_flag);
680 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN))
681 OSBitOrAtomic(P_NOCLDWAIT, &p->p_flag);
682 else
683 OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), &p->p_flag);
684 }
685
686 /*
687 * Set bit in p_sigignore for signals that are set to SIG_IGN,
688 * and for signals set to SIG_DFL where the default is to ignore.
689 * However, don't put SIGCONT in p_sigignore,
690 * as we have to restart the process.
691 */
692 if (sa->sa_handler == SIG_IGN ||
693 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
694
695 clear_procsiglist(p, bit, in_sigstart);
696 if (signum != SIGCONT)
697 p->p_sigignore |= bit; /* easier in psignal */
698 p->p_sigcatch &= ~bit;
699 } else {
700 p->p_sigignore &= ~bit;
701 if (sa->sa_handler == SIG_DFL)
702 p->p_sigcatch &= ~bit;
703 else
704 p->p_sigcatch |= bit;
705 }
706 return(0);
707 }
708
709 /*
710 * Initialize signal state for process 0;
711 * set to ignore signals that are ignored by default.
712 */
713 void
714 siginit(proc_t p)
715 {
716 int i;
717
718 for (i = 1; i < NSIG; i++)
719 if (sigprop[i] & SA_IGNORE && i != SIGCONT)
720 p->p_sigignore |= sigmask(i);
721 }
722
723 /*
724 * Reset signals for an exec of the specified process.
725 */
726 void
727 execsigs(proc_t p, thread_t thread)
728 {
729 struct sigacts *ps = p->p_sigacts;
730 int nc, mask;
731 struct uthread *ut;
732
733 ut = (struct uthread *)get_bsdthread_info(thread);
734
735 /*
736 * transfer saved signal states from the process
737 * back to the current thread.
738 *
739 * NOTE: We do this without the process locked,
740 * because we are guaranteed to be single-threaded
741 * by this point in exec and the p_siglist is
742 * only accessed by threads inside the process.
743 */
744 ut->uu_siglist |= p->p_siglist;
745 p->p_siglist = 0;
746
747 /*
748 * Reset caught signals. Held signals remain held
749 * through p_sigmask (unless they were caught,
750 * and are now ignored by default).
751 */
752 while (p->p_sigcatch) {
753 nc = ffs((long)p->p_sigcatch);
754 mask = sigmask(nc);
755 p->p_sigcatch &= ~mask;
756 if (sigprop[nc] & SA_IGNORE) {
757 if (nc != SIGCONT)
758 p->p_sigignore |= mask;
759 ut->uu_siglist &= ~mask;
760 }
761 ps->ps_sigact[nc] = SIG_DFL;
762 }
763
764 /*
765 * Reset stack state to the user stack.
766 * Clear set of signals caught on the signal stack.
767 */
768 /* thread */
769 ut->uu_sigstk.ss_flags = SA_DISABLE;
770 ut->uu_sigstk.ss_size = 0;
771 ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
772 ut->uu_flag &= ~UT_ALTSTACK;
773 /* process */
774 ps->ps_sigonstack = 0;
775 }
776
777 /*
778 * Manipulate signal mask.
779 * Note that we receive new mask, not pointer,
780 * and return old mask as return value;
781 * the library stub does the rest.
782 */
783 int
784 sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval)
785 {
786 int error = 0;
787 sigset_t oldmask, nmask;
788 user_addr_t omask = uap->omask;
789 struct uthread *ut;
790
791 ut = (struct uthread *)get_bsdthread_info(current_thread());
792 oldmask = ut->uu_sigmask;
793
794 if (uap->mask == USER_ADDR_NULL) {
795 /* just want old mask */
796 goto out;
797 }
798 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
799 if (error)
800 goto out;
801
802 switch (uap->how) {
803 case SIG_BLOCK:
804 block_procsigmask(p, (nmask & ~sigcantmask));
805 signal_setast(current_thread());
806 break;
807
808 case SIG_UNBLOCK:
809 unblock_procsigmask(p, (nmask & ~sigcantmask));
810 signal_setast(current_thread());
811 break;
812
813 case SIG_SETMASK:
814 set_procsigmask(p, (nmask & ~sigcantmask));
815 signal_setast(current_thread());
816 break;
817
818 default:
819 error = EINVAL;
820 break;
821 }
822 out:
823 if (!error && omask != USER_ADDR_NULL)
824 copyout(&oldmask, omask, sizeof(sigset_t));
825 return (error);
826 }
827
828 int
829 sigpending(__unused proc_t p, struct sigpending_args *uap, __unused int32_t *retval)
830 {
831 struct uthread *ut;
832 sigset_t pendlist;
833
834 ut = (struct uthread *)get_bsdthread_info(current_thread());
835 pendlist = ut->uu_siglist;
836
837 if (uap->osv)
838 copyout(&pendlist, uap->osv, sizeof(sigset_t));
839 return(0);
840 }
841
842 /*
843 * Suspend process until signal, providing mask to be set
844 * in the meantime. Note nonstandard calling convention:
845 * libc stub passes mask, not pointer, to save a copyin.
846 */
847
848 static int
849 sigcontinue(__unused int error)
850 {
851 // struct uthread *ut = get_bsdthread_info(current_thread());
852 unix_syscall_return(EINTR);
853 }
854
855 int
856 sigsuspend(proc_t p, struct sigsuspend_args *uap, int32_t *retval)
857 {
858 __pthread_testcancel(1);
859 return(sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval));
860 }
861
862 int
863 sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused int32_t *retval)
864 {
865 struct uthread *ut;
866
867 ut = (struct uthread *)get_bsdthread_info(current_thread());
868
869 /*
870 * When returning from sigpause, we want
871 * the old mask to be restored after the
872 * signal handler has finished. Thus, we
873 * save it here and mark the sigacts structure
874 * to indicate this.
875 */
876 ut->uu_oldmask = ut->uu_sigmask;
877 ut->uu_flag |= UT_SAS_OLDMASK;
878 ut->uu_sigmask = (uap->mask & ~sigcantmask);
879 (void) tsleep0((caddr_t) p, PPAUSE|PCATCH, "pause", 0, sigcontinue);
880 /* always return EINTR rather than ERESTART... */
881 return (EINTR);
882 }
883
884
885 int
886 __disable_threadsignal(__unused proc_t p,
887 __unused struct __disable_threadsignal_args *uap,
888 __unused int32_t *retval)
889 {
890 struct uthread *uth;
891
892 uth = (struct uthread *)get_bsdthread_info(current_thread());
893
894 /* No longer valid to have any signal delivered */
895 uth->uu_flag |= (UT_NO_SIGMASK | UT_CANCELDISABLE);
896
897 return(0);
898
899 }
900
901 void
902 __pthread_testcancel(int presyscall)
903 {
904
905 thread_t self = current_thread();
906 struct uthread * uthread;
907
908 uthread = (struct uthread *)get_bsdthread_info(self);
909
910
911 uthread->uu_flag &= ~UT_NOTCANCELPT;
912
913 if ((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
914 if(presyscall != 0) {
915 unix_syscall_return(EINTR);
916 /* NOTREACHED */
917 } else
918 thread_abort_safely(self);
919 }
920 }
921
922
923
924 int
925 __pthread_markcancel(__unused proc_t p,
926 struct __pthread_markcancel_args *uap, __unused int32_t *retval)
927 {
928 thread_act_t target_act;
929 int error = 0;
930 struct uthread *uth;
931
932 target_act = (thread_act_t)port_name_to_thread(uap->thread_port);
933
934 if (target_act == THR_ACT_NULL)
935 return (ESRCH);
936
937 uth = (struct uthread *)get_bsdthread_info(target_act);
938
939 /* if the thread is in vfork do not cancel */
940 if ((uth->uu_flag & (UT_VFORK | UT_CANCEL | UT_CANCELED )) == 0) {
941 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
942 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
943 && ((uth->uu_flag & UT_CANCELDISABLE) == 0))
944 thread_abort_safely(target_act);
945 }
946
947 thread_deallocate(target_act);
948 return (error);
949 }
950
951 /* if action =0 ; return the cancellation state ,
952 * if marked for cancellation, make the thread canceled
953 * if action = 1 ; Enable the cancel handling
954 * if action = 2; Disable the cancel handling
955 */
956 int
957 __pthread_canceled(__unused proc_t p,
958 struct __pthread_canceled_args *uap, __unused int32_t *retval)
959 {
960 thread_act_t thread;
961 struct uthread *uth;
962 int action = uap->action;
963
964 thread = current_thread();
965 uth = (struct uthread *)get_bsdthread_info(thread);
966
967 switch (action) {
968 case 1:
969 uth->uu_flag &= ~UT_CANCELDISABLE;
970 return(0);
971 case 2:
972 uth->uu_flag |= UT_CANCELDISABLE;
973 return(0);
974 case 0:
975 default:
976 /* if the thread is in vfork do not cancel */
977 if((uth->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
978 uth->uu_flag &= ~UT_CANCEL;
979 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
980 return(0);
981 }
982 return(EINVAL);
983 }
984 return(EINVAL);
985 }
986
987 void
988 __posix_sem_syscall_return(kern_return_t kern_result)
989 {
990 int error = 0;
991
992 if (kern_result == KERN_SUCCESS)
993 error = 0;
994 else if (kern_result == KERN_ABORTED)
995 error = EINTR;
996 else if (kern_result == KERN_OPERATION_TIMED_OUT)
997 error = ETIMEDOUT;
998 else
999 error = EINVAL;
1000 unix_syscall_return(error);
1001 /* does not return */
1002 }
1003
1004 #if OLD_SEMWAIT_SIGNAL
1005 /*
1006 * Returns: 0 Success
1007 * EINTR
1008 * ETIMEDOUT
1009 * EINVAL
1010 * EFAULT if timespec is NULL
1011 */
1012 int
1013 __old_semwait_signal(proc_t p, struct __old_semwait_signal_args *uap,
1014 int32_t *retval)
1015 {
1016 __pthread_testcancel(0);
1017 return(__old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval));
1018 }
1019
1020 int
1021 __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_args *uap,
1022 __unused int32_t *retval)
1023 {
1024
1025 kern_return_t kern_result;
1026 int error;
1027 mach_timespec_t then;
1028 struct timespec now;
1029 struct user_timespec ts;
1030 boolean_t truncated_timeout = FALSE;
1031
1032 if(uap->timeout) {
1033
1034 if (IS_64BIT_PROCESS(p)) {
1035 struct user64_timespec ts64;
1036 error = copyin(uap->ts, &ts64, sizeof(ts64));
1037 ts.tv_sec = ts64.tv_sec;
1038 ts.tv_nsec = ts64.tv_nsec;
1039 } else {
1040 struct user32_timespec ts32;
1041 error = copyin(uap->ts, &ts32, sizeof(ts32));
1042 ts.tv_sec = ts32.tv_sec;
1043 ts.tv_nsec = ts32.tv_nsec;
1044 }
1045
1046 if (error) {
1047 return error;
1048 }
1049
1050 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1051 ts.tv_sec = 0xFFFFFFFF;
1052 ts.tv_nsec = 0;
1053 truncated_timeout = TRUE;
1054 }
1055
1056 if (uap->relative) {
1057 then.tv_sec = ts.tv_sec;
1058 then.tv_nsec = ts.tv_nsec;
1059 } else {
1060 nanotime(&now);
1061
1062 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1063 if (now.tv_sec == ts.tv_sec ?
1064 now.tv_nsec > ts.tv_nsec :
1065 now.tv_sec > ts.tv_sec) {
1066 then.tv_sec = 0;
1067 then.tv_nsec = 0;
1068 } else {
1069 then.tv_sec = ts.tv_sec - now.tv_sec;
1070 then.tv_nsec = ts.tv_nsec - now.tv_nsec;
1071 if (then.tv_nsec < 0) {
1072 then.tv_nsec += NSEC_PER_SEC;
1073 then.tv_sec--;
1074 }
1075 }
1076 }
1077
1078 if (uap->mutex_sem == 0)
1079 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1080 else
1081 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1082
1083 } else {
1084
1085 if (uap->mutex_sem == 0)
1086 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1087 else
1088
1089 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1090 }
1091
1092 if (kern_result == KERN_SUCCESS && !truncated_timeout)
1093 return(0);
1094 else if (kern_result == KERN_SUCCESS && truncated_timeout)
1095 return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1096 else if (kern_result == KERN_ABORTED)
1097 return(EINTR);
1098 else if (kern_result == KERN_OPERATION_TIMED_OUT)
1099 return(ETIMEDOUT);
1100 else
1101 return(EINVAL);
1102 }
1103 #endif /* OLD_SEMWAIT_SIGNAL*/
1104
1105 /*
1106 * Returns: 0 Success
1107 * EINTR
1108 * ETIMEDOUT
1109 * EINVAL
1110 * EFAULT if timespec is NULL
1111 */
1112 int
1113 __semwait_signal(proc_t p, struct __semwait_signal_args *uap,
1114 int32_t *retval)
1115 {
1116 __pthread_testcancel(0);
1117 return(__semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval));
1118 }
1119
1120 int
1121 __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap,
1122 __unused int32_t *retval)
1123 {
1124
1125 kern_return_t kern_result;
1126 mach_timespec_t then;
1127 struct timespec now;
1128 struct user_timespec ts;
1129 boolean_t truncated_timeout = FALSE;
1130
1131 if(uap->timeout) {
1132
1133 ts.tv_sec = uap->tv_sec;
1134 ts.tv_nsec = uap->tv_nsec;
1135
1136 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1137 ts.tv_sec = 0xFFFFFFFF;
1138 ts.tv_nsec = 0;
1139 truncated_timeout = TRUE;
1140 }
1141
1142 if (uap->relative) {
1143 then.tv_sec = ts.tv_sec;
1144 then.tv_nsec = ts.tv_nsec;
1145 } else {
1146 nanotime(&now);
1147
1148 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1149 if (now.tv_sec == ts.tv_sec ?
1150 now.tv_nsec > ts.tv_nsec :
1151 now.tv_sec > ts.tv_sec) {
1152 then.tv_sec = 0;
1153 then.tv_nsec = 0;
1154 } else {
1155 then.tv_sec = ts.tv_sec - now.tv_sec;
1156 then.tv_nsec = ts.tv_nsec - now.tv_nsec;
1157 if (then.tv_nsec < 0) {
1158 then.tv_nsec += NSEC_PER_SEC;
1159 then.tv_sec--;
1160 }
1161 }
1162 }
1163
1164 if (uap->mutex_sem == 0)
1165 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1166 else
1167 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1168
1169 } else {
1170
1171 if (uap->mutex_sem == 0)
1172 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1173 else
1174
1175 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1176 }
1177
1178 if (kern_result == KERN_SUCCESS && !truncated_timeout)
1179 return(0);
1180 else if (kern_result == KERN_SUCCESS && truncated_timeout)
1181 return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1182 else if (kern_result == KERN_ABORTED)
1183 return(EINTR);
1184 else if (kern_result == KERN_OPERATION_TIMED_OUT)
1185 return(ETIMEDOUT);
1186 else
1187 return(EINVAL);
1188 }
1189
1190
1191 int
1192 __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap,
1193 __unused int32_t *retval)
1194 {
1195 thread_t target_act;
1196 int error = 0;
1197 int signum = uap->sig;
1198 struct uthread *uth;
1199
1200 target_act = (thread_t)port_name_to_thread(uap->thread_port);
1201
1202 if (target_act == THREAD_NULL)
1203 return (ESRCH);
1204 if ((u_int)signum >= NSIG) {
1205 error = EINVAL;
1206 goto out;
1207 }
1208
1209 uth = (struct uthread *)get_bsdthread_info(target_act);
1210
1211 if (uth->uu_flag & UT_NO_SIGMASK) {
1212 error = ESRCH;
1213 goto out;
1214 }
1215
1216 if (signum)
1217 psignal_uthread(target_act, signum);
1218 out:
1219 thread_deallocate(target_act);
1220 return (error);
1221 }
1222
1223
1224 int
1225 __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap,
1226 __unused int32_t *retval)
1227 {
1228 user_addr_t set = uap->set;
1229 user_addr_t oset = uap->oset;
1230 sigset_t nset;
1231 int error = 0;
1232 struct uthread *ut;
1233 sigset_t oldset;
1234
1235 ut = (struct uthread *)get_bsdthread_info(current_thread());
1236 oldset = ut->uu_sigmask;
1237
1238 if (set == USER_ADDR_NULL) {
1239 /* need only old mask */
1240 goto out;
1241 }
1242
1243 error = copyin(set, &nset, sizeof(sigset_t));
1244 if (error)
1245 goto out;
1246
1247 switch (uap->how) {
1248 case SIG_BLOCK:
1249 ut->uu_sigmask |= (nset & ~sigcantmask);
1250 break;
1251
1252 case SIG_UNBLOCK:
1253 ut->uu_sigmask &= ~(nset);
1254 signal_setast(current_thread());
1255 break;
1256
1257 case SIG_SETMASK:
1258 ut->uu_sigmask = (nset & ~sigcantmask);
1259 signal_setast(current_thread());
1260 break;
1261
1262 default:
1263 error = EINVAL;
1264
1265 }
1266 out:
1267 if (!error && oset != USER_ADDR_NULL)
1268 copyout(&oldset, oset, sizeof(sigset_t));
1269
1270 return(error);
1271 }
1272
1273 /*
1274 * Returns: 0 Success
1275 * EINVAL
1276 * copyin:EFAULT
1277 * copyout:EFAULT
1278 */
1279 int
1280 __sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval)
1281 {
1282 __pthread_testcancel(1);
1283 return(__sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval));
1284 }
1285
1286 int
1287 __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval)
1288 {
1289 struct uthread *ut;
1290 struct uthread *uth;
1291 int error = 0;
1292 sigset_t mask;
1293 sigset_t siglist;
1294 sigset_t sigw=0;
1295 int signum;
1296
1297 ut = (struct uthread *)get_bsdthread_info(current_thread());
1298
1299 if (uap->set == USER_ADDR_NULL)
1300 return(EINVAL);
1301
1302 error = copyin(uap->set, &mask, sizeof(sigset_t));
1303 if (error)
1304 return(error);
1305
1306 siglist = (mask & ~sigcantmask);
1307
1308 if (siglist == 0)
1309 return(EINVAL);
1310
1311 proc_lock(p);
1312 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
1313 proc_unlock(p);
1314 return(EINVAL);
1315 } else {
1316 proc_signalstart(p, 1);
1317 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1318 if ( (sigw = uth->uu_siglist & siglist) ) {
1319 break;
1320 }
1321 }
1322 proc_signalend(p, 1);
1323 }
1324
1325 if (sigw) {
1326 /* The signal was pending on a thread */
1327 goto sigwait1;
1328 }
1329 /*
1330 * When returning from sigwait, we want
1331 * the old mask to be restored after the
1332 * signal handler has finished. Thus, we
1333 * save it here and mark the sigacts structure
1334 * to indicate this.
1335 */
1336 uth = ut; /* wait for it to be delivered to us */
1337 ut->uu_oldmask = ut->uu_sigmask;
1338 ut->uu_flag |= UT_SAS_OLDMASK;
1339 if (siglist == (sigset_t)0) {
1340 proc_unlock(p);
1341 return(EINVAL);
1342 }
1343 /* SIGKILL and SIGSTOP are not maskable as well */
1344 ut->uu_sigmask = ~(siglist|sigcantmask);
1345 ut->uu_sigwait = siglist;
1346
1347 /* No Continuations for now */
1348 error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE|PCATCH, "pause", 0);
1349
1350 if (error == ERESTART)
1351 error = 0;
1352
1353 sigw = (ut->uu_sigwait & siglist);
1354 ut->uu_sigmask = ut->uu_oldmask;
1355 ut->uu_oldmask = 0;
1356 ut->uu_flag &= ~UT_SAS_OLDMASK;
1357 sigwait1:
1358 ut->uu_sigwait = 0;
1359 if (!error) {
1360 signum = ffs((unsigned int)sigw);
1361 if (!signum)
1362 panic("sigwait with no signal wakeup");
1363 /* Clear the pending signal in the thread it was delivered */
1364 uth->uu_siglist &= ~(sigmask(signum));
1365
1366 #if CONFIG_DTRACE
1367 DTRACE_PROC2(signal__clear, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo));
1368 #endif
1369
1370 proc_unlock(p);
1371 if (uap->sig != USER_ADDR_NULL)
1372 error = copyout(&signum, uap->sig, sizeof(int));
1373 } else
1374 proc_unlock(p);
1375
1376 return(error);
1377
1378 }
1379
1380 int
1381 sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *retval)
1382 {
1383 struct kern_sigaltstack ss;
1384 struct kern_sigaltstack *pstk;
1385 int error;
1386 struct uthread *uth;
1387 int onstack;
1388
1389 uth = (struct uthread *)get_bsdthread_info(current_thread());
1390
1391 pstk = &uth->uu_sigstk;
1392 if ((uth->uu_flag & UT_ALTSTACK) == 0)
1393 uth->uu_sigstk.ss_flags |= SA_DISABLE;
1394 onstack = pstk->ss_flags & SA_ONSTACK;
1395 if (uap->oss) {
1396 if (IS_64BIT_PROCESS(p)) {
1397 struct user64_sigaltstack ss64;
1398 sigaltstack_kern_to_user64(pstk, &ss64);
1399 error = copyout(&ss64, uap->oss, sizeof(ss64));
1400 } else {
1401 struct user32_sigaltstack ss32;
1402 sigaltstack_kern_to_user32(pstk, &ss32);
1403 error = copyout(&ss32, uap->oss, sizeof(ss32));
1404 }
1405 if (error)
1406 return (error);
1407 }
1408 if (uap->nss == USER_ADDR_NULL)
1409 return (0);
1410 if (IS_64BIT_PROCESS(p)) {
1411 struct user64_sigaltstack ss64;
1412 error = copyin(uap->nss, &ss64, sizeof(ss64));
1413 sigaltstack_user64_to_kern(&ss64, &ss);
1414 } else {
1415 struct user32_sigaltstack ss32;
1416 error = copyin(uap->nss, &ss32, sizeof(ss32));
1417 sigaltstack_user32_to_kern(&ss32, &ss);
1418 }
1419 if (error)
1420 return (error);
1421 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1422 return(EINVAL);
1423 }
1424
1425 if (ss.ss_flags & SA_DISABLE) {
1426 /* if we are here we are not in the signal handler ;so no need to check */
1427 if (uth->uu_sigstk.ss_flags & SA_ONSTACK)
1428 return (EINVAL);
1429 uth->uu_flag &= ~UT_ALTSTACK;
1430 uth->uu_sigstk.ss_flags = ss.ss_flags;
1431 return (0);
1432 }
1433 if (onstack)
1434 return (EPERM);
1435 /* The older stacksize was 8K, enforce that one so no compat problems */
1436 #define OLDMINSIGSTKSZ 8*1024
1437 if (ss.ss_size < OLDMINSIGSTKSZ)
1438 return (ENOMEM);
1439 uth->uu_flag |= UT_ALTSTACK;
1440 uth->uu_sigstk= ss;
1441 return (0);
1442 }
1443
1444 int
1445 kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval)
1446 {
1447 proc_t p;
1448 kauth_cred_t uc = kauth_cred_get();
1449 int posix = uap->posix; /* !0 if posix behaviour desired */
1450
1451 AUDIT_ARG(pid, uap->pid);
1452 AUDIT_ARG(signum, uap->signum);
1453
1454 if ((u_int)uap->signum >= NSIG)
1455 return (EINVAL);
1456 if (uap->pid > 0) {
1457 /* kill single process */
1458 if ((p = proc_find(uap->pid)) == NULL) {
1459 if ((p = pzfind(uap->pid)) != NULL) {
1460 /*
1461 * IEEE Std 1003.1-2001: return success
1462 * when killing a zombie.
1463 */
1464 return (0);
1465 }
1466 return (ESRCH);
1467 }
1468 AUDIT_ARG(process, p);
1469 if (!cansignal(cp, uc, p, uap->signum, 0)) {
1470 proc_rele(p);
1471 return(EPERM);
1472 }
1473 if (uap->signum)
1474 psignal(p, uap->signum);
1475 proc_rele(p);
1476 return (0);
1477 }
1478 switch (uap->pid) {
1479 case -1: /* broadcast signal */
1480 return (killpg1(cp, uap->signum, 0, 1, posix));
1481 case 0: /* signal own process group */
1482 return (killpg1(cp, uap->signum, 0, 0, posix));
1483 default: /* negative explicit process group */
1484 return (killpg1(cp, uap->signum, -(uap->pid), 0, posix));
1485 }
1486 /* NOTREACHED */
1487 }
1488
1489 static int
1490 killpg1_filt(proc_t p, void * arg)
1491 {
1492 struct killpg1_filtargs * kfargp = (struct killpg1_filtargs *)arg;
1493 proc_t cp = kfargp->cp;
1494 int posix = kfargp->posix;
1495
1496
1497 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1498 (!posix && p == cp))
1499 return(0);
1500 else
1501 return(1);
1502 }
1503
1504
1505 static int
1506 killpg1_pgrpfilt(proc_t p, __unused void * arg)
1507 {
1508 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1509 (p->p_stat == SZOMB))
1510 return(0);
1511 else
1512 return(1);
1513 }
1514
1515
1516
1517 static int
1518 killpg1_callback(proc_t p, void * arg)
1519 {
1520 struct killpg1_iterargs * kargp = (struct killpg1_iterargs *)arg;
1521 proc_t cp = kargp->cp;
1522 kauth_cred_t uc = kargp->uc; /* refcounted by the caller safe to use internal fields */
1523 int signum = kargp->signum;
1524 int * nfoundp = kargp->nfoundp;
1525 int n;
1526 int zombie = 0;
1527 int error = 0;
1528
1529 if ((kargp->zombie != 0) && ((p->p_listflag & P_LIST_EXITED) == P_LIST_EXITED))
1530 zombie = 1;
1531
1532 if (zombie != 0) {
1533 proc_list_lock();
1534 error = cansignal(cp, uc, p, signum, zombie);
1535 proc_list_unlock();
1536
1537 if (error != 0 && nfoundp != NULL) {
1538 n = *nfoundp;
1539 *nfoundp = n+1;
1540 }
1541 } else {
1542 if (cansignal(cp, uc, p, signum, 0) == 0)
1543 return(PROC_RETURNED);
1544
1545 if (nfoundp != NULL) {
1546 n = *nfoundp;
1547 *nfoundp = n+1;
1548 }
1549 if (signum != 0)
1550 psignal(p, signum);
1551 }
1552
1553 return(PROC_RETURNED);
1554 }
1555
1556 /*
1557 * Common code for kill process group/broadcast kill.
1558 * cp is calling process.
1559 */
1560 int
1561 killpg1(proc_t cp, int signum, int pgid, int all, int posix)
1562 {
1563 kauth_cred_t uc;
1564 struct pgrp *pgrp;
1565 int nfound = 0;
1566 struct killpg1_iterargs karg;
1567 struct killpg1_filtargs kfarg;
1568 int error = 0;
1569
1570 uc = kauth_cred_proc_ref(cp);
1571 if (all) {
1572 /*
1573 * broadcast
1574 */
1575 kfarg.posix = posix;
1576 kfarg.cp = cp;
1577
1578 karg.cp = cp;
1579 karg.uc = uc;
1580 karg.nfoundp = &nfound;
1581 karg.signum = signum;
1582 karg.zombie = 1;
1583
1584 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST), killpg1_callback, &karg, killpg1_filt, (void *)&kfarg);
1585
1586 } else {
1587 if (pgid == 0) {
1588 /*
1589 * zero pgid means send to my process group.
1590 */
1591 pgrp = proc_pgrp(cp);
1592 } else {
1593 pgrp = pgfind(pgid);
1594 if (pgrp == NULL) {
1595 error = ESRCH;
1596 goto out;
1597 }
1598 }
1599
1600 karg.nfoundp = &nfound;
1601 karg.uc = uc;
1602 karg.signum = signum;
1603 karg.cp = cp;
1604 karg.zombie = 0;
1605
1606
1607 /* PGRP_DROPREF drops the pgrp refernce */
1608 pgrp_iterate(pgrp, PGRP_BLOCKITERATE | PGRP_DROPREF, killpg1_callback, &karg,
1609 killpg1_pgrpfilt, NULL);
1610 }
1611 error = (nfound ? 0 : (posix ? EPERM : ESRCH));
1612 out:
1613 kauth_cred_unref(&uc);
1614 return (error);
1615 }
1616
1617
1618 /*
1619 * Send a signal to a process group.
1620 */
1621 void
1622 gsignal(int pgid, int signum)
1623 {
1624 struct pgrp *pgrp;
1625
1626 if (pgid && (pgrp = pgfind(pgid))) {
1627 pgsignal(pgrp, signum, 0);
1628 pg_rele(pgrp);
1629 }
1630 }
1631
1632 /*
1633 * Send a signal to a process group. If checkctty is 1,
1634 * limit to members which have a controlling terminal.
1635 */
1636
1637 static int
1638 pgsignal_filt(proc_t p, void * arg)
1639 {
1640 int checkctty = *(int*)arg;
1641
1642 if ((checkctty == 0) || p->p_flag & P_CONTROLT)
1643 return(1);
1644 else
1645 return(0);
1646 }
1647
1648
1649 static int
1650 pgsignal_callback(proc_t p, void * arg)
1651 {
1652 int signum = *(int*)arg;
1653
1654 psignal(p, signum);
1655 return(PROC_RETURNED);
1656 }
1657
1658
1659 void
1660 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
1661 {
1662 if (pgrp != PGRP_NULL) {
1663 pgrp_iterate(pgrp, PGRP_BLOCKITERATE, pgsignal_callback, &signum, pgsignal_filt, &checkctty);
1664 }
1665 }
1666
1667
1668 void
1669 tty_pgsignal(struct tty *tp, int signum, int checkctty)
1670 {
1671 struct pgrp * pg;
1672
1673 pg = tty_pgrp(tp);
1674 if (pg != PGRP_NULL) {
1675 pgrp_iterate(pg, PGRP_BLOCKITERATE, pgsignal_callback, &signum, pgsignal_filt, &checkctty);
1676 pg_rele(pg);
1677 }
1678 }
1679 /*
1680 * Send a signal caused by a trap to a specific thread.
1681 */
1682 void
1683 threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code)
1684 {
1685 struct uthread *uth;
1686 struct task * sig_task;
1687 proc_t p;
1688 int mask;
1689
1690 if ((u_int)signum >= NSIG || signum == 0)
1691 return;
1692
1693 mask = sigmask(signum);
1694 if ((mask & threadmask) == 0)
1695 return;
1696 sig_task = get_threadtask(sig_actthread);
1697 p = (proc_t)(get_bsdtask_info(sig_task));
1698
1699 uth = get_bsdthread_info(sig_actthread);
1700 if (uth->uu_flag & UT_VFORK)
1701 p = uth->uu_proc;
1702
1703 proc_lock(p);
1704 if (!(p->p_lflag & P_LTRACED) && (p->p_sigignore & mask)) {
1705 proc_unlock(p);
1706 return;
1707 }
1708
1709 uth->uu_siglist |= mask;
1710 uth->uu_code = code;
1711 proc_unlock(p);
1712
1713 /* mark on process as well */
1714 signal_setast(sig_actthread);
1715 }
1716
1717 static kern_return_t
1718 get_signalthread(proc_t p, int signum, thread_t * thr)
1719 {
1720 struct uthread *uth;
1721 sigset_t mask = sigmask(signum);
1722 thread_t sig_thread;
1723 struct task * sig_task = p->task;
1724 kern_return_t kret;
1725
1726 *thr = THREAD_NULL;
1727
1728 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
1729 sig_thread = p->p_vforkact;
1730 kret = check_actforsig(sig_task, sig_thread, 1);
1731 if (kret == KERN_SUCCESS) {
1732 *thr = sig_thread;
1733 return(KERN_SUCCESS);
1734 }else
1735 return(KERN_FAILURE);
1736 }
1737
1738 proc_lock(p);
1739
1740 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1741 if(((uth->uu_flag & UT_NO_SIGMASK)== 0) &&
1742 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
1743 if (check_actforsig(p->task, uth->uu_context.vc_thread, 1) == KERN_SUCCESS) {
1744 *thr = uth->uu_context.vc_thread;
1745 proc_unlock(p);
1746 return(KERN_SUCCESS);
1747 }
1748 }
1749 }
1750 proc_unlock(p);
1751 if (get_signalact(p->task, thr, 1) == KERN_SUCCESS) {
1752 return(KERN_SUCCESS);
1753 }
1754
1755 return(KERN_FAILURE);
1756 }
1757
1758 /*
1759 * Send the signal to the process. If the signal has an action, the action
1760 * is usually performed by the target process rather than the caller; we add
1761 * the signal to the set of pending signals for the process.
1762 *
1763 * Exceptions:
1764 * o When a stop signal is sent to a sleeping process that takes the
1765 * default action, the process is stopped without awakening it.
1766 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1767 * regardless of the signal action (eg, blocked or ignored).
1768 *
1769 * Other ignored signals are discarded immediately.
1770 */
1771 static void
1772 psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum)
1773 {
1774 int prop;
1775 user_addr_t action = USER_ADDR_NULL;
1776 proc_t sig_proc;
1777 thread_t sig_thread;
1778 register task_t sig_task;
1779 int mask;
1780 struct uthread *uth;
1781 kern_return_t kret;
1782 uid_t r_uid;
1783 proc_t pp;
1784 kauth_cred_t my_cred;
1785
1786 if ((u_int)signum >= NSIG || signum == 0)
1787 panic("psignal signal number");
1788 mask = sigmask(signum);
1789 prop = sigprop[signum];
1790
1791 #if SIGNAL_DEBUG
1792 if(rdebug_proc && (p != PROC_NULL) && (p == rdebug_proc)) {
1793 ram_printf(3);
1794 }
1795 #endif /* SIGNAL_DEBUG */
1796
1797 /* catch unexpected initproc kills early for easier debuggging */
1798 if (signum == SIGKILL && p == initproc)
1799 panic_plain("unexpected SIGKILL of %s %s",
1800 (p->p_name[0] != '\0' ? p->p_name : "initproc"),
1801 ((p->p_csflags & CS_KILLED) ? "(CS_KILLED)" : ""));
1802
1803 /*
1804 * We will need the task pointer later. Grab it now to
1805 * check for a zombie process. Also don't send signals
1806 * to kernel internal tasks.
1807 */
1808 if (flavor & PSIG_VFORK) {
1809 sig_task = task;
1810 sig_thread = thread;
1811 sig_proc = p;
1812 } else if (flavor & PSIG_THREAD) {
1813 sig_task = get_threadtask(thread);
1814 sig_thread = thread;
1815 sig_proc = (proc_t)get_bsdtask_info(sig_task);
1816 } else if (flavor & PSIG_TRY_THREAD) {
1817 sig_task = p->task;
1818 sig_thread = thread;
1819 sig_proc = p;
1820 } else {
1821 sig_task = p->task;
1822 sig_thread = (struct thread *)0;
1823 sig_proc = p;
1824 }
1825
1826 if ((sig_task == TASK_NULL) || is_kerneltask(sig_task))
1827 return;
1828
1829 /*
1830 * do not send signals to the process that has the thread
1831 * doing a reboot(). Not doing so will mark that thread aborted
1832 * and can cause IO failures wich will cause data loss. There's
1833 * also no need to send a signal to a process that is in the middle
1834 * of being torn down.
1835 */
1836 if (ISSET(sig_proc->p_flag, P_REBOOT) ||
1837 ISSET(sig_proc->p_lflag, P_LEXIT))
1838 return;
1839
1840 if( (flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) {
1841 proc_knote(sig_proc, NOTE_SIGNAL | signum);
1842 }
1843
1844 if ((flavor & PSIG_LOCKED)== 0)
1845 proc_signalstart(sig_proc, 0);
1846
1847 /*
1848 * Deliver the signal to the first thread in the task. This
1849 * allows single threaded applications which use signals to
1850 * be able to be linked with multithreaded libraries. We have
1851 * an implicit reference to the current thread, but need
1852 * an explicit one otherwise. The thread reference keeps
1853 * the corresponding task data structures around too. This
1854 * reference is released by thread_deallocate.
1855 */
1856
1857
1858 if (((flavor & PSIG_VFORK) == 0) && ((sig_proc->p_lflag & P_LTRACED) == 0) && (sig_proc->p_sigignore & mask)) {
1859 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
1860 goto psigout;
1861 }
1862
1863 if (flavor & PSIG_VFORK) {
1864 action = SIG_DFL;
1865 act_set_astbsd(sig_thread);
1866 kret = KERN_SUCCESS;
1867 } else if (flavor & PSIG_TRY_THREAD) {
1868 uth = get_bsdthread_info(sig_thread);
1869 if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
1870 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask)) &&
1871 ((kret = check_actforsig(sig_proc->task, sig_thread, 1)) == KERN_SUCCESS)) {
1872 /* deliver to specified thread */
1873 } else {
1874 /* deliver to any willing thread */
1875 kret = get_signalthread(sig_proc, signum, &sig_thread);
1876 }
1877 } else if (flavor & PSIG_THREAD) {
1878 /* If successful return with ast set */
1879 kret = check_actforsig(sig_task, sig_thread, 1);
1880 } else {
1881 /* If successful return with ast set */
1882 kret = get_signalthread(sig_proc, signum, &sig_thread);
1883 }
1884 if (kret != KERN_SUCCESS) {
1885 #if SIGNAL_DEBUG
1886 ram_printf(1);
1887 #endif /* SIGNAL_DEBUG */
1888 goto psigout;
1889 }
1890
1891 uth = get_bsdthread_info(sig_thread);
1892
1893 /*
1894 * If proc is traced, always give parent a chance.
1895 */
1896
1897 if ((flavor & PSIG_VFORK) == 0) {
1898 if (sig_proc->p_lflag & P_LTRACED)
1899 action = SIG_DFL;
1900 else {
1901 /*
1902 * If the signal is being ignored,
1903 * then we forget about it immediately.
1904 * (Note: we don't set SIGCONT in p_sigignore,
1905 * and if it is set to SIG_IGN,
1906 * action will be SIG_DFL here.)
1907 */
1908 if (sig_proc->p_sigignore & mask)
1909 goto psigout;
1910 if (uth->uu_sigwait & mask)
1911 action = KERN_SIG_WAIT;
1912 else if (uth->uu_sigmask & mask)
1913 action = KERN_SIG_HOLD;
1914 else if (sig_proc->p_sigcatch & mask)
1915 action = KERN_SIG_CATCH;
1916 else
1917 action = SIG_DFL;
1918 }
1919 }
1920
1921 proc_lock(sig_proc);
1922
1923 if (sig_proc->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1924 (sig_proc->p_lflag & P_LTRACED) == 0)
1925 sig_proc->p_nice = NZERO;
1926
1927 if (prop & SA_CONT)
1928 uth->uu_siglist &= ~stopsigmask;
1929
1930 if (prop & SA_STOP) {
1931 struct pgrp *pg;
1932 /*
1933 * If sending a tty stop signal to a member of an orphaned
1934 * process group, discard the signal here if the action
1935 * is default; don't stop the process below if sleeping,
1936 * and don't clear any pending SIGCONT.
1937 */
1938 proc_unlock(sig_proc);
1939 pg = proc_pgrp(sig_proc);
1940 if (prop & SA_TTYSTOP && pg->pg_jobc == 0 &&
1941 action == SIG_DFL) {
1942 pg_rele(pg);
1943 goto psigout;
1944 }
1945 pg_rele(pg);
1946 proc_lock(sig_proc);
1947 uth->uu_siglist &= ~contsigmask;
1948 }
1949
1950 uth->uu_siglist |= mask;
1951 /*
1952 * Repost AST incase sigthread has processed
1953 * ast and missed signal post.
1954 */
1955 if (action == KERN_SIG_CATCH)
1956 act_set_astbsd(sig_thread);
1957
1958
1959 /*
1960 * Defer further processing for signals which are held,
1961 * except that stopped processes must be continued by SIGCONT.
1962 */
1963 /* vfork will not go thru as action is SIG_DFL */
1964 if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) {
1965 proc_unlock(sig_proc);
1966 goto psigout;
1967 }
1968 /*
1969 * SIGKILL priority twiddling moved here from above because
1970 * it needs sig_thread. Could merge it into large switch
1971 * below if we didn't care about priority for tracing
1972 * as SIGKILL's action is always SIG_DFL.
1973 */
1974 if ((signum == SIGKILL) && (sig_proc->p_nice > NZERO)) {
1975 sig_proc->p_nice = NZERO;
1976 }
1977
1978 /*
1979 * Process is traced - wake it up (if not already
1980 * stopped) so that it can discover the signal in
1981 * issig() and stop for the parent.
1982 */
1983 if (sig_proc->p_lflag & P_LTRACED) {
1984 if (sig_proc->p_stat != SSTOP)
1985 goto runlocked;
1986 else {
1987 proc_unlock(sig_proc);
1988 goto psigout;
1989 }
1990 }
1991 if ((flavor & PSIG_VFORK) != 0)
1992 goto runlocked;
1993
1994 if (action == KERN_SIG_WAIT) {
1995 #if CONFIG_DTRACE
1996 /*
1997 * DTrace proc signal-clear returns a siginfo_t. Collect the needed info.
1998 */
1999 r_uid = kauth_getruid(); /* per thread credential; protected by our thread context */
2000
2001 bzero((caddr_t)&(uth->t_dtrace_siginfo), sizeof(uth->t_dtrace_siginfo));
2002
2003 uth->t_dtrace_siginfo.si_signo = signum;
2004 uth->t_dtrace_siginfo.si_pid = current_proc()->p_pid;
2005 uth->t_dtrace_siginfo.si_status = W_EXITCODE(signum, 0);
2006 uth->t_dtrace_siginfo.si_uid = r_uid;
2007 uth->t_dtrace_siginfo.si_code = 0;
2008 #endif
2009 uth->uu_sigwait = mask;
2010 uth->uu_siglist &= ~mask;
2011 wakeup(&uth->uu_sigwait);
2012 /* if it is SIGCONT resume whole process */
2013 if (prop & SA_CONT) {
2014 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2015 sig_proc->p_contproc = current_proc()->p_pid;
2016
2017 proc_unlock(sig_proc);
2018 (void) task_resume_internal(sig_task);
2019 goto psigout;
2020 }
2021 proc_unlock(sig_proc);
2022 goto psigout;
2023 }
2024
2025 if (action != SIG_DFL) {
2026 /*
2027 * User wants to catch the signal.
2028 * Wake up the thread, but don't un-suspend it
2029 * (except for SIGCONT).
2030 */
2031 if (prop & SA_CONT) {
2032 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2033 proc_unlock(sig_proc);
2034 (void) task_resume_internal(sig_task);
2035 proc_lock(sig_proc);
2036 sig_proc->p_stat = SRUN;
2037 } else if (sig_proc->p_stat == SSTOP) {
2038 proc_unlock(sig_proc);
2039 goto psigout;
2040 }
2041 /*
2042 * Fill out siginfo structure information to pass to the
2043 * signalled process/thread sigaction handler, when it
2044 * wakes up. si_code is 0 because this is an ordinary
2045 * signal, not a SIGCHLD, and so si_status is the signal
2046 * number itself, instead of the child process exit status.
2047 * We shift this left because it will be shifted right before
2048 * it is passed to user space. kind of ugly to use W_EXITCODE
2049 * this way, but it beats defining a new macro.
2050 *
2051 * Note: Avoid the SIGCHLD recursion case!
2052 */
2053 if (signum != SIGCHLD) {
2054 proc_unlock(sig_proc);
2055 r_uid = kauth_getruid();
2056 proc_lock(sig_proc);
2057
2058 sig_proc->si_pid = current_proc()->p_pid;
2059 sig_proc->si_status = W_EXITCODE(signum, 0);
2060 sig_proc->si_uid = r_uid;
2061 sig_proc->si_code = 0;
2062 }
2063
2064 goto runlocked;
2065 } else {
2066 /* Default action - varies */
2067 if (mask & stopsigmask) {
2068 /*
2069 * These are the signals which by default
2070 * stop a process.
2071 *
2072 * Don't clog system with children of init
2073 * stopped from the keyboard.
2074 */
2075 if (!(prop & SA_STOP) && sig_proc->p_pptr == initproc) {
2076 proc_unlock(sig_proc);
2077 psignal_locked(sig_proc, SIGKILL);
2078 proc_lock(sig_proc);
2079 uth->uu_siglist &= ~mask;
2080 proc_unlock(sig_proc);
2081 goto psigout;
2082 }
2083
2084 /*
2085 * Stop the task
2086 * if task hasn't already been stopped by
2087 * a signal.
2088 */
2089 uth->uu_siglist &= ~mask;
2090 if (sig_proc->p_stat != SSTOP) {
2091 sig_proc->p_xstat = signum;
2092 sig_proc->p_stat = SSTOP;
2093 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &sig_proc->p_flag);
2094 sig_proc->p_lflag &= ~P_LWAITED;
2095 proc_unlock(sig_proc);
2096
2097 pp = proc_parentholdref(sig_proc);
2098 stop(sig_proc, pp);
2099 if (( pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2100
2101 my_cred = kauth_cred_proc_ref(sig_proc);
2102 r_uid = kauth_cred_getruid(my_cred);
2103 kauth_cred_unref(&my_cred);
2104
2105 proc_lock(sig_proc);
2106 pp->si_pid = sig_proc->p_pid;
2107 /*
2108 * POSIX: sigaction for a stopped child
2109 * when sent to the parent must set the
2110 * child's signal number into si_status.
2111 */
2112 if (signum != SIGSTOP)
2113 pp->si_status = WEXITSTATUS(sig_proc->p_xstat);
2114 else
2115 pp->si_status = W_EXITCODE(signum, signum);
2116 pp->si_code = CLD_STOPPED;
2117 pp->si_uid = r_uid;
2118 proc_unlock(sig_proc);
2119
2120 psignal(pp, SIGCHLD);
2121 }
2122 if (pp != PROC_NULL)
2123 proc_parentdropref(pp, 0);
2124 } else
2125 proc_unlock(sig_proc);
2126 goto psigout;
2127 }
2128
2129 DTRACE_PROC3(signal__send, thread_t, sig_thread, proc_t, p, int, signum);
2130
2131 /*
2132 * enters switch with sig_proc lock held but dropped when
2133 * gets out of switch
2134 */
2135 switch (signum) {
2136 /*
2137 * Signals ignored by default have been dealt
2138 * with already, since their bits are on in
2139 * p_sigignore.
2140 */
2141
2142 case SIGKILL:
2143 /*
2144 * Kill signal always sets process running and
2145 * unsuspends it.
2146 */
2147 /*
2148 * Process will be running after 'run'
2149 */
2150 sig_proc->p_stat = SRUN;
2151 /*
2152 * In scenarios where suspend/resume are racing
2153 * the signal we are missing AST_BSD by the time
2154 * we get here, set again to avoid races. This
2155 * was the scenario with spindump enabled shutdowns.
2156 * We would need to cover this approp down the line.
2157 */
2158 act_set_astbsd(sig_thread);
2159 thread_abort(sig_thread);
2160 proc_unlock(sig_proc);
2161
2162 goto psigout;
2163
2164 case SIGCONT:
2165 /*
2166 * Let the process run. If it's sleeping on an
2167 * event, it remains so.
2168 */
2169 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2170 sig_proc->p_contproc = sig_proc->p_pid;
2171
2172 proc_unlock(sig_proc);
2173 (void) task_resume_internal(sig_task);
2174 proc_lock(sig_proc);
2175 /*
2176 * When processing a SIGCONT, we need to check
2177 * to see if there are signals pending that
2178 * were not delivered because we had been
2179 * previously stopped. If that's the case,
2180 * we need to thread_abort_safely() to trigger
2181 * interruption of the current system call to
2182 * cause their handlers to fire. If it's only
2183 * the SIGCONT, then don't wake up.
2184 */
2185 if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (((uth->uu_siglist & ~uth->uu_sigmask) & ~sig_proc->p_sigignore) & ~mask)) {
2186 uth->uu_siglist &= ~mask;
2187 sig_proc->p_stat = SRUN;
2188 goto runlocked;
2189 }
2190
2191 uth->uu_siglist &= ~mask;
2192 sig_proc->p_stat = SRUN;
2193 proc_unlock(sig_proc);
2194 goto psigout;
2195
2196 default:
2197 /*
2198 * A signal which has a default action of killing
2199 * the process, and for which there is no handler,
2200 * needs to act like SIGKILL
2201 */
2202 if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (action == SIG_DFL) && (prop & SA_KILL)) {
2203 sig_proc->p_stat = SRUN;
2204 proc_unlock(sig_proc);
2205 thread_abort(sig_thread);
2206 goto psigout;
2207 }
2208
2209 /*
2210 * All other signals wake up the process, but don't
2211 * resume it.
2212 */
2213 if (sig_proc->p_stat == SSTOP) {
2214 proc_unlock(sig_proc);
2215 goto psigout;
2216 }
2217 goto runlocked;
2218 }
2219 }
2220 /*NOTREACHED*/
2221
2222 runlocked:
2223 /*
2224 * If we're being traced (possibly because someone attached us
2225 * while we were stopped), check for a signal from the debugger.
2226 */
2227 if (sig_proc->p_stat == SSTOP) {
2228 if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0)
2229 uth->uu_siglist |= sigmask(sig_proc->p_xstat);
2230 if ((flavor & PSIG_VFORK) != 0) {
2231 sig_proc->p_stat = SRUN;
2232 }
2233 proc_unlock(sig_proc);
2234 } else {
2235 /*
2236 * setrunnable(p) in BSD and
2237 * Wake up the thread if it is interruptible.
2238 */
2239 sig_proc->p_stat = SRUN;
2240 proc_unlock(sig_proc);
2241 if ((flavor & PSIG_VFORK) == 0)
2242 thread_abort_safely(sig_thread);
2243 }
2244 psigout:
2245 if ((flavor & PSIG_LOCKED)== 0) {
2246 proc_signalend(sig_proc, 0);
2247 }
2248 }
2249
2250 void
2251 psignal(proc_t p, int signum)
2252 {
2253 psignal_internal(p, NULL, NULL, 0, signum);
2254 }
2255
2256 void
2257 psignal_locked(proc_t p, int signum)
2258 {
2259 psignal_internal(p, NULL, NULL, PSIG_LOCKED, signum);
2260 }
2261
2262 void
2263 psignal_vfork(proc_t p, task_t new_task, thread_t thread, int signum)
2264 {
2265 psignal_internal(p, new_task, thread, PSIG_VFORK, signum);
2266 }
2267
2268 static void
2269 psignal_uthread(thread_t thread, int signum)
2270 {
2271 psignal_internal(PROC_NULL, TASK_NULL, thread, PSIG_THREAD, signum);
2272 }
2273
2274 /* same as psignal(), but prefer delivery to 'thread' if possible */
2275 static void
2276 psignal_try_thread(proc_t p, thread_t thread, int signum)
2277 {
2278 psignal_internal(p, NULL, thread, PSIG_TRY_THREAD, signum);
2279 }
2280
2281 /*
2282 * If the current process has received a signal (should be caught or cause
2283 * termination, should interrupt current syscall), return the signal number.
2284 * Stop signals with default action are processed immediately, then cleared;
2285 * they aren't returned. This is checked after each entry to the system for
2286 * a syscall or trap (though this can usually be done without calling issignal
2287 * by checking the pending signal masks in the CURSIG macro.) The normal call
2288 * sequence is
2289 *
2290 * while (signum = CURSIG(curproc))
2291 * postsig(signum);
2292 */
2293 int
2294 issignal_locked(proc_t p)
2295 {
2296 int signum, mask, prop, sigbits;
2297 thread_t cur_act;
2298 struct uthread * ut;
2299 proc_t pp;
2300 kauth_cred_t my_cred;
2301 int retval = 0;
2302 uid_t r_uid;
2303
2304 cur_act = current_thread();
2305
2306 #if SIGNAL_DEBUG
2307 if(rdebug_proc && (p == rdebug_proc)) {
2308 ram_printf(3);
2309 }
2310 #endif /* SIGNAL_DEBUG */
2311
2312 /*
2313 * Try to grab the signal lock.
2314 */
2315 if (sig_try_locked(p) <= 0) {
2316 return(0);
2317 }
2318
2319 proc_signalstart(p, 1);
2320
2321 ut = get_bsdthread_info(cur_act);
2322 for(;;) {
2323 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2324
2325 if (p->p_lflag & P_LPPWAIT)
2326 sigbits &= ~stopsigmask;
2327 if (sigbits == 0) { /* no signal to send */
2328 retval = 0;
2329 goto out;
2330 }
2331
2332 signum = ffs((long)sigbits);
2333 mask = sigmask(signum);
2334 prop = sigprop[signum];
2335
2336 /*
2337 * We should see pending but ignored signals
2338 * only if P_LTRACED was on when they were posted.
2339 */
2340 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2341 ut->uu_siglist &= ~mask; /* take the signal! */
2342 continue;
2343 }
2344 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2345 task_t task;
2346 /*
2347 * If traced, always stop, and stay
2348 * stopped until released by the debugger.
2349 */
2350 /* ptrace debugging */
2351 p->p_xstat = signum;
2352
2353 if (p->p_lflag & P_LSIGEXC) {
2354 p->sigwait = TRUE;
2355 p->sigwait_thread = cur_act;
2356 p->p_stat = SSTOP;
2357 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2358 p->p_lflag &= ~P_LWAITED;
2359 ut->uu_siglist &= ~mask; /* clear the old signal */
2360 proc_signalend(p, 1);
2361 proc_unlock(p);
2362 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2363 proc_lock(p);
2364 proc_signalstart(p, 1);
2365 } else {
2366 proc_unlock(p);
2367 my_cred = kauth_cred_proc_ref(p);
2368 r_uid = kauth_cred_getruid(my_cred);
2369 kauth_cred_unref(&my_cred);
2370
2371 pp = proc_parentholdref(p);
2372 if (pp != PROC_NULL) {
2373 proc_lock(pp);
2374
2375 pp->si_pid = p->p_pid;
2376 pp->si_status = p->p_xstat;
2377 pp->si_code = CLD_TRAPPED;
2378 pp->si_uid = r_uid;
2379
2380 proc_unlock(pp);
2381 }
2382
2383 /*
2384 * XXX Have to really stop for debuggers;
2385 * XXX stop() doesn't do the right thing.
2386 */
2387 task = p->task;
2388 task_suspend_internal(task);
2389
2390 proc_lock(p);
2391 p->sigwait = TRUE;
2392 p->sigwait_thread = cur_act;
2393 p->p_stat = SSTOP;
2394 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2395 p->p_lflag &= ~P_LWAITED;
2396 ut->uu_siglist &= ~mask; /* clear the old signal */
2397
2398 proc_signalend(p, 1);
2399 proc_unlock(p);
2400
2401 if (pp != PROC_NULL) {
2402 psignal(pp, SIGCHLD);
2403 proc_list_lock();
2404 wakeup((caddr_t)pp);
2405 proc_parentdropref(pp, 1);
2406 proc_list_unlock();
2407 }
2408
2409 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2410 thread_block(THREAD_CONTINUE_NULL);
2411 proc_lock(p);
2412 proc_signalstart(p, 1);
2413 }
2414
2415 p->sigwait = FALSE;
2416 p->sigwait_thread = NULL;
2417 wakeup((caddr_t)&p->sigwait_thread);
2418
2419 /*
2420 * This code is to detect when gdb is killed
2421 * even as the traced program is attached.
2422 * pgsignal would get the SIGKILL to traced program
2423 * That's what we are trying to see (I hope)
2424 */
2425 if (ut->uu_siglist & sigmask(SIGKILL)) {
2426 /*
2427 * Wait event may still be outstanding;
2428 * clear it, since sig_lock_to_exit will
2429 * wait.
2430 */
2431 clear_wait(current_thread(), THREAD_INTERRUPTED);
2432 sig_lock_to_exit(p);
2433 /*
2434 * Since this thread will be resumed
2435 * to allow the current syscall to
2436 * be completed, must save u_qsave
2437 * before calling exit(). (Since exit()
2438 * calls closef() which can trash u_qsave.)
2439 */
2440 proc_signalend(p, 1);
2441 proc_unlock(p);
2442 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
2443 p->p_pid, W_EXITCODE(0, SIGKILL), 2, 0, 0);
2444 exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL);
2445 proc_lock(p);
2446 return(0);
2447 }
2448
2449 /*
2450 * We may have to quit
2451 */
2452 if (thread_should_abort(current_thread())) {
2453 retval = 0;
2454 goto out;
2455 }
2456 /*
2457 * If parent wants us to take the signal,
2458 * then it will leave it in p->p_xstat;
2459 * otherwise we just look for signals again.
2460 */
2461 signum = p->p_xstat;
2462 if (signum == 0)
2463 continue;
2464 /*
2465 * Put the new signal into p_siglist. If the
2466 * signal is being masked, look for other signals.
2467 */
2468 mask = sigmask(signum);
2469 ut->uu_siglist |= mask;
2470 if (ut->uu_sigmask & mask)
2471 continue;
2472 }
2473
2474 /*
2475 * Decide whether the signal should be returned.
2476 * Return the signal's number, or fall through
2477 * to clear it from the pending mask.
2478 */
2479
2480 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2481
2482 case (long)SIG_DFL:
2483 /*
2484 * If there is a pending stop signal to process
2485 * with default action, stop here,
2486 * then clear the signal. However,
2487 * if process is member of an orphaned
2488 * process group, ignore tty stop signals.
2489 */
2490 if (prop & SA_STOP) {
2491 struct pgrp * pg;
2492
2493 proc_unlock(p);
2494 pg = proc_pgrp(p);
2495 if (p->p_lflag & P_LTRACED ||
2496 (pg->pg_jobc == 0 &&
2497 prop & SA_TTYSTOP)) {
2498 proc_lock(p);
2499 pg_rele(pg);
2500 break; /* == ignore */
2501 }
2502 pg_rele(pg);
2503 if (p->p_stat != SSTOP) {
2504 proc_lock(p);
2505 p->p_xstat = signum;
2506
2507 p->p_stat = SSTOP;
2508 p->p_lflag &= ~P_LWAITED;
2509 proc_unlock(p);
2510
2511 pp = proc_parentholdref(p);
2512 stop(p, pp);
2513 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2514 my_cred = kauth_cred_proc_ref(p);
2515 r_uid = kauth_cred_getruid(my_cred);
2516 kauth_cred_unref(&my_cred);
2517
2518 proc_lock(pp);
2519 pp->si_pid = p->p_pid;
2520 pp->si_status = WEXITSTATUS(p->p_xstat);
2521 pp->si_code = CLD_STOPPED;
2522 pp->si_uid = r_uid;
2523 proc_unlock(pp);
2524
2525 psignal(pp, SIGCHLD);
2526 }
2527 if (pp != PROC_NULL)
2528 proc_parentdropref(pp, 0);
2529 }
2530 proc_lock(p);
2531 break;
2532 } else if (prop & SA_IGNORE) {
2533 /*
2534 * Except for SIGCONT, shouldn't get here.
2535 * Default action is to ignore; drop it.
2536 */
2537 break; /* == ignore */
2538 } else {
2539 ut->uu_siglist &= ~mask; /* take the signal! */
2540 retval = signum;
2541 goto out;
2542 }
2543
2544 /*NOTREACHED*/
2545 break;
2546
2547 case (long)SIG_IGN:
2548 /*
2549 * Masking above should prevent us ever trying
2550 * to take action on an ignored signal other
2551 * than SIGCONT, unless process is traced.
2552 */
2553 if ((prop & SA_CONT) == 0 &&
2554 (p->p_lflag & P_LTRACED) == 0)
2555 printf("issignal\n");
2556 break; /* == ignore */
2557
2558 default:
2559 /*
2560 * This signal has an action, let
2561 * postsig() process it.
2562 */
2563 ut->uu_siglist &= ~mask; /* take the signal! */
2564 retval = signum;
2565 goto out;
2566 }
2567 ut->uu_siglist &= ~mask; /* take the signal! */
2568 }
2569 /* NOTREACHED */
2570 out:
2571 proc_signalend(p, 1);
2572 return(retval);
2573 }
2574
2575 /* called from _sleep */
2576 int
2577 CURSIG(proc_t p)
2578 {
2579 int signum, mask, prop, sigbits;
2580 thread_t cur_act;
2581 struct uthread * ut;
2582 int retnum = 0;
2583
2584
2585 cur_act = current_thread();
2586
2587 ut = get_bsdthread_info(cur_act);
2588
2589 if (ut->uu_siglist == 0)
2590 return (0);
2591
2592 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_lflag & P_LTRACED) == 0))
2593 return (0);
2594
2595 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2596
2597 for(;;) {
2598 if (p->p_lflag & P_LPPWAIT)
2599 sigbits &= ~stopsigmask;
2600 if (sigbits == 0) { /* no signal to send */
2601 return (retnum);
2602 }
2603
2604 signum = ffs((long)sigbits);
2605 mask = sigmask(signum);
2606 prop = sigprop[signum];
2607 sigbits &= ~mask; /* take the signal out */
2608
2609 /*
2610 * We should see pending but ignored signals
2611 * only if P_LTRACED was on when they were posted.
2612 */
2613 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2614 continue;
2615 }
2616
2617 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2618 return(signum);
2619 }
2620
2621 /*
2622 * Decide whether the signal should be returned.
2623 * Return the signal's number, or fall through
2624 * to clear it from the pending mask.
2625 */
2626
2627 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2628
2629 case (long)SIG_DFL:
2630 /*
2631 * If there is a pending stop signal to process
2632 * with default action, stop here,
2633 * then clear the signal. However,
2634 * if process is member of an orphaned
2635 * process group, ignore tty stop signals.
2636 */
2637 if (prop & SA_STOP) {
2638 struct pgrp *pg;
2639
2640 pg = proc_pgrp(p);
2641
2642 if (p->p_lflag & P_LTRACED ||
2643 (pg->pg_jobc == 0 &&
2644 prop & SA_TTYSTOP)) {
2645 pg_rele(pg);
2646 break; /* == ignore */
2647 }
2648 pg_rele(pg);
2649 retnum = signum;
2650 break;
2651 } else if (prop & SA_IGNORE) {
2652 /*
2653 * Except for SIGCONT, shouldn't get here.
2654 * Default action is to ignore; drop it.
2655 */
2656 break; /* == ignore */
2657 } else {
2658 return (signum);
2659 }
2660 /*NOTREACHED*/
2661
2662 case (long)SIG_IGN:
2663 /*
2664 * Masking above should prevent us ever trying
2665 * to take action on an ignored signal other
2666 * than SIGCONT, unless process is traced.
2667 */
2668 if ((prop & SA_CONT) == 0 &&
2669 (p->p_lflag & P_LTRACED) == 0)
2670 printf("issignal\n");
2671 break; /* == ignore */
2672
2673 default:
2674 /*
2675 * This signal has an action, let
2676 * postsig() process it.
2677 */
2678 return (signum);
2679 }
2680 }
2681 /* NOTREACHED */
2682 }
2683
2684 /*
2685 * Put the argument process into the stopped state and notify the parent
2686 * via wakeup. Signals are handled elsewhere. The process must not be
2687 * on the run queue.
2688 */
2689 static void
2690 stop(proc_t p, proc_t parent)
2691 {
2692 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2693 if ((parent != PROC_NULL) && (parent->p_stat != SSTOP)) {
2694 proc_list_lock();
2695 wakeup((caddr_t)parent);
2696 proc_list_unlock();
2697 }
2698 (void) task_suspend_internal(p->task);
2699 }
2700
2701 /*
2702 * Take the action for the specified signal
2703 * from the current set of pending signals.
2704 */
2705 void
2706 postsig_locked(int signum)
2707 {
2708 proc_t p = current_proc();
2709 struct sigacts *ps = p->p_sigacts;
2710 user_addr_t catcher;
2711 uint32_t code;
2712 int mask, returnmask;
2713 struct uthread * ut;
2714
2715 #if DIAGNOSTIC
2716 if (signum == 0)
2717 panic("postsig");
2718 /*
2719 * This must be called on master cpu
2720 */
2721 if (cpu_number() != master_cpu)
2722 panic("psig not on master");
2723 #endif
2724
2725 /*
2726 * Try to grab the signal lock.
2727 */
2728 if (sig_try_locked(p) <= 0) {
2729 return;
2730 }
2731
2732 proc_signalstart(p, 1);
2733
2734 ut = (struct uthread *)get_bsdthread_info(current_thread());
2735 mask = sigmask(signum);
2736 ut->uu_siglist &= ~mask;
2737 catcher = ps->ps_sigact[signum];
2738 if (catcher == SIG_DFL) {
2739 /*
2740 * Default catcher, where the default is to kill
2741 * the process. (Other cases were ignored above.)
2742 */
2743 sig_lock_to_exit(p);
2744 p->p_acflag |= AXSIG;
2745 if (sigprop[signum] & SA_CORE) {
2746 p->p_sigacts->ps_sig = signum;
2747 proc_signalend(p, 1);
2748 proc_unlock(p);
2749 if (coredump(p, 0, 0) == 0)
2750 signum |= WCOREFLAG;
2751 } else {
2752 proc_signalend(p, 1);
2753 proc_unlock(p);
2754 }
2755
2756 #if CONFIG_DTRACE
2757 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
2758
2759 ut->t_dtrace_siginfo.si_signo = signum;
2760 ut->t_dtrace_siginfo.si_pid = p->si_pid;
2761 ut->t_dtrace_siginfo.si_uid = p->si_uid;
2762 ut->t_dtrace_siginfo.si_status = WEXITSTATUS(p->si_status);
2763
2764 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
2765 switch (signum) {
2766 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
2767 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
2768 break;
2769 default:
2770 break;
2771 }
2772
2773
2774 DTRACE_PROC3(signal__handle, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo),
2775 void (*)(void), SIG_DFL);
2776 #endif
2777
2778 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
2779 p->p_pid, W_EXITCODE(0, signum), 3, 0, 0);
2780 exit1(p, W_EXITCODE(0, signum), (int *)NULL);
2781 proc_lock(p);
2782 return;
2783 } else {
2784 /*
2785 * If we get here, the signal must be caught.
2786 */
2787 #if DIAGNOSTIC
2788 if (catcher == SIG_IGN || (ut->uu_sigmask & mask))
2789 log(LOG_WARNING,
2790 "postsig: processing masked or ignored signal\n");
2791 #endif
2792
2793 /*
2794 * Set the new mask value and also defer further
2795 * occurences of this signal.
2796 *
2797 * Special case: user has done a sigpause. Here the
2798 * current mask is not of interest, but rather the
2799 * mask from before the sigpause is what we want
2800 * restored after the signal processing is completed.
2801 */
2802 if (ut->uu_flag & UT_SAS_OLDMASK) {
2803 returnmask = ut->uu_oldmask;
2804 ut->uu_flag &= ~UT_SAS_OLDMASK;
2805 ut->uu_oldmask = 0;
2806 } else
2807 returnmask = ut->uu_sigmask;
2808 ut->uu_sigmask |= ps->ps_catchmask[signum];
2809 if ((ps->ps_signodefer & mask) == 0)
2810 ut->uu_sigmask |= mask;
2811 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
2812 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE))
2813 p->p_sigignore |= mask;
2814 ps->ps_sigact[signum] = SIG_DFL;
2815 ps->ps_siginfo &= ~mask;
2816 ps->ps_signodefer &= ~mask;
2817 }
2818
2819 if (ps->ps_sig != signum) {
2820 code = 0;
2821 } else {
2822 code = ps->ps_code;
2823 ps->ps_code = 0;
2824 }
2825 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nsignals);
2826 sendsig(p, catcher, signum, returnmask, code);
2827 }
2828 proc_signalend(p, 1);
2829 }
2830
2831 /*
2832 * Attach a signal knote to the list of knotes for this process.
2833 *
2834 * Signal knotes share the knote list with proc knotes. This
2835 * could be avoided by using a signal-specific knote list, but
2836 * probably isn't worth the trouble.
2837 */
2838
2839 static int
2840 filt_sigattach(struct knote *kn)
2841 {
2842 proc_t p = current_proc(); /* can attach only to oneself */
2843
2844 proc_klist_lock();
2845
2846 kn->kn_ptr.p_proc = p;
2847 kn->kn_flags |= EV_CLEAR; /* automatically set */
2848
2849 KNOTE_ATTACH(&p->p_klist, kn);
2850
2851 proc_klist_unlock();
2852
2853 return (0);
2854 }
2855
2856 /*
2857 * remove the knote from the process list, if it hasn't already
2858 * been removed by exit processing.
2859 */
2860
2861 static void
2862 filt_sigdetach(struct knote *kn)
2863 {
2864 proc_t p = kn->kn_ptr.p_proc;
2865
2866 proc_klist_lock();
2867 kn->kn_ptr.p_proc = NULL;
2868 KNOTE_DETACH(&p->p_klist, kn);
2869 proc_klist_unlock();
2870 }
2871
2872 /*
2873 * Post an event to the signal filter. Because we share the same list
2874 * as process knotes, we have to filter out and handle only signal events.
2875 *
2876 * We assume that we process fdfree() before we post the NOTE_EXIT for
2877 * a process during exit. Therefore, since signal filters can only be
2878 * set up "in-process", we should have already torn down the kqueue
2879 * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT.
2880 */
2881 static int
2882 filt_signal(struct knote *kn, long hint)
2883 {
2884
2885 if (hint & NOTE_SIGNAL) {
2886 hint &= ~NOTE_SIGNAL;
2887
2888 if (kn->kn_id == (unsigned int)hint)
2889 kn->kn_data++;
2890 } else if (hint & NOTE_EXIT) {
2891 panic("filt_signal: detected NOTE_EXIT event");
2892 }
2893
2894 return (kn->kn_data != 0);
2895 }
2896
2897 static void
2898 filt_signaltouch(struct knote *kn, struct kevent_internal_s *kev, long type)
2899 {
2900 proc_klist_lock();
2901 switch (type) {
2902 case EVENT_REGISTER:
2903 kn->kn_sfflags = kev->fflags;
2904 kn->kn_sdata = kev->data;
2905 break;
2906 case EVENT_PROCESS:
2907 *kev = kn->kn_kevent;
2908 if (kn->kn_flags & EV_CLEAR) {
2909 kn->kn_data = 0;
2910 kn->kn_fflags = 0;
2911 }
2912 break;
2913 default:
2914 panic("filt_signaltouch() - invalid type (%ld)", type);
2915 break;
2916 }
2917 proc_klist_unlock();
2918 }
2919
2920 void
2921 bsd_ast(thread_t thread)
2922 {
2923 proc_t p = current_proc();
2924 struct uthread *ut = get_bsdthread_info(thread);
2925 int signum;
2926 user_addr_t pc;
2927 static int bsd_init_done = 0;
2928
2929 if (p == NULL)
2930 return;
2931
2932 if ((p->p_flag & P_OWEUPC) && (p->p_flag & P_PROFIL)) {
2933 pc = get_useraddr();
2934 addupc_task(p, pc, 1);
2935 OSBitAndAtomic(~((uint32_t)P_OWEUPC), &p->p_flag);
2936 }
2937
2938 if (timerisset(&p->p_vtimer_user.it_value)) {
2939 uint32_t microsecs;
2940
2941 task_vtimer_update(p->task, TASK_VTIMER_USER, &microsecs);
2942
2943 if (!itimerdecr(p, &p->p_vtimer_user, microsecs)) {
2944 if (timerisset(&p->p_vtimer_user.it_value))
2945 task_vtimer_set(p->task, TASK_VTIMER_USER);
2946 else
2947 task_vtimer_clear(p->task, TASK_VTIMER_USER);
2948
2949 psignal_try_thread(p, thread, SIGVTALRM);
2950 }
2951 }
2952
2953 if (timerisset(&p->p_vtimer_prof.it_value)) {
2954 uint32_t microsecs;
2955
2956 task_vtimer_update(p->task, TASK_VTIMER_PROF, &microsecs);
2957
2958 if (!itimerdecr(p, &p->p_vtimer_prof, microsecs)) {
2959 if (timerisset(&p->p_vtimer_prof.it_value))
2960 task_vtimer_set(p->task, TASK_VTIMER_PROF);
2961 else
2962 task_vtimer_clear(p->task, TASK_VTIMER_PROF);
2963
2964 psignal_try_thread(p, thread, SIGPROF);
2965 }
2966 }
2967
2968 if (timerisset(&p->p_rlim_cpu)) {
2969 struct timeval tv;
2970
2971 task_vtimer_update(p->task, TASK_VTIMER_RLIM, (uint32_t *) &tv.tv_usec);
2972
2973 proc_spinlock(p);
2974 if (p->p_rlim_cpu.tv_sec > 0 || p->p_rlim_cpu.tv_usec > tv.tv_usec) {
2975 tv.tv_sec = 0;
2976 timersub(&p->p_rlim_cpu, &tv, &p->p_rlim_cpu);
2977 proc_spinunlock(p);
2978 } else {
2979
2980 timerclear(&p->p_rlim_cpu);
2981 proc_spinunlock(p);
2982
2983 task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
2984
2985 psignal_try_thread(p, thread, SIGXCPU);
2986 }
2987 }
2988
2989 #if CONFIG_DTRACE
2990 if (ut->t_dtrace_sig) {
2991 uint8_t dt_action_sig = ut->t_dtrace_sig;
2992 ut->t_dtrace_sig = 0;
2993 psignal(p, dt_action_sig);
2994 }
2995
2996 if (ut->t_dtrace_stop) {
2997 ut->t_dtrace_stop = 0;
2998 proc_lock(p);
2999 p->p_dtrace_stop = 1;
3000 proc_unlock(p);
3001 (void)task_suspend_internal(p->task);
3002 }
3003
3004 if (ut->t_dtrace_resumepid) {
3005 proc_t resumeproc = proc_find(ut->t_dtrace_resumepid);
3006 ut->t_dtrace_resumepid = 0;
3007 if (resumeproc != PROC_NULL) {
3008 proc_lock(resumeproc);
3009 /* We only act on processes stopped by dtrace */
3010 if (resumeproc->p_dtrace_stop) {
3011 resumeproc->p_dtrace_stop = 0;
3012 proc_unlock(resumeproc);
3013 task_resume_internal(resumeproc->task);
3014 }
3015 else {
3016 proc_unlock(resumeproc);
3017 }
3018 proc_rele(resumeproc);
3019 }
3020 }
3021
3022 #endif /* CONFIG_DTRACE */
3023
3024 proc_lock(p);
3025 if (CHECK_SIGNALS(p, current_thread(), ut)) {
3026 while ( (signum = issignal_locked(p)) )
3027 postsig_locked(signum);
3028 }
3029 proc_unlock(p);
3030
3031 if (!bsd_init_done) {
3032 bsd_init_done = 1;
3033 bsdinit_task();
3034 }
3035
3036 }
3037
3038 /* ptrace set runnable */
3039 void
3040 pt_setrunnable(proc_t p)
3041 {
3042 task_t task;
3043
3044 task = p->task;
3045
3046 if (p->p_lflag & P_LTRACED) {
3047 proc_lock(p);
3048 p->p_stat = SRUN;
3049 proc_unlock(p);
3050 if (p->sigwait) {
3051 wakeup((caddr_t)&(p->sigwait));
3052 if ((p->p_lflag & P_LSIGEXC) == 0) { // 5878479
3053 task_release(task);
3054 }
3055 }
3056 }
3057 }
3058
3059 kern_return_t
3060 do_bsdexception(
3061 int exc,
3062 int code,
3063 int sub)
3064 {
3065 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
3066
3067 codes[0] = code;
3068 codes[1] = sub;
3069 return(bsd_exception(exc, codes, 2));
3070 }
3071
3072 int
3073 proc_pendingsignals(proc_t p, sigset_t mask)
3074 {
3075 struct uthread * uth;
3076 thread_t th;
3077 sigset_t bits = 0;
3078
3079 proc_lock(p);
3080 /* If the process is in proc exit return no signal info */
3081 if (p->p_lflag & P_LPEXIT) {
3082 goto out;
3083 }
3084
3085 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
3086 th = p->p_vforkact;
3087 uth = (struct uthread *)get_bsdthread_info(th);
3088 if (uth) {
3089 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3090 }
3091 goto out;
3092 }
3093
3094 bits = 0;
3095 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
3096 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3097 }
3098 out:
3099 proc_unlock(p);
3100 return(bits);
3101 }
3102
3103 int
3104 thread_issignal(proc_t p, thread_t th, sigset_t mask)
3105 {
3106 struct uthread * uth;
3107 sigset_t bits=0;
3108
3109 proc_lock(p);
3110 uth = (struct uthread *)get_bsdthread_info(th);
3111 if (uth) {
3112 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3113 }
3114 proc_unlock(p);
3115 return(bits);
3116 }
3117
3118 /*
3119 * Allow external reads of the sigprop array.
3120 */
3121 int
3122 hassigprop(int sig, int prop)
3123 {
3124 return (sigprop[sig] & prop);
3125 }
3126
3127 void
3128 pgsigio(pid_t pgid, int sig)
3129 {
3130 proc_t p = PROC_NULL;
3131
3132 if (pgid < 0)
3133 gsignal(-(pgid), sig);
3134
3135 else if (pgid > 0 && (p = proc_find(pgid)) != 0)
3136 psignal(p, sig);
3137 if (p != PROC_NULL)
3138 proc_rele(p);
3139 }
3140
3141 void
3142 proc_signalstart(proc_t p, int locked)
3143 {
3144 if (!locked)
3145 proc_lock(p);
3146
3147 if(p->p_signalholder == current_thread())
3148 panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock");
3149
3150 p->p_sigwaitcnt++;
3151 while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL)
3152 msleep(&p->p_sigmask, &p->p_mlock, 0, "proc_signstart", NULL);
3153 p->p_sigwaitcnt--;
3154
3155 p->p_lflag |= P_LINSIGNAL;
3156 p->p_signalholder = current_thread();
3157 if (!locked)
3158 proc_unlock(p);
3159 }
3160
3161 void
3162 proc_signalend(proc_t p, int locked)
3163 {
3164 if (!locked)
3165 proc_lock(p);
3166 p->p_lflag &= ~P_LINSIGNAL;
3167
3168 if (p->p_sigwaitcnt > 0)
3169 wakeup(&p->p_sigmask);
3170
3171 p->p_signalholder = NULL;
3172 if (!locked)
3173 proc_unlock(p);
3174 }
3175
3176 void
3177 sig_lock_to_exit(proc_t p)
3178 {
3179 thread_t self = current_thread();
3180
3181 p->exit_thread = self;
3182 proc_unlock(p);
3183
3184 task_hold(p->task);
3185 task_wait(p->task, FALSE);
3186
3187 proc_lock(p);
3188 }
3189
3190 int
3191 sig_try_locked(proc_t p)
3192 {
3193 thread_t self = current_thread();
3194
3195 while (p->sigwait || p->exit_thread) {
3196 if (p->exit_thread) {
3197 return(0);
3198 }
3199 msleep((caddr_t)&p->sigwait_thread, &p->p_mlock, PCATCH | PDROP, 0, 0);
3200 if (thread_should_abort(self)) {
3201 /*
3202 * Terminate request - clean up.
3203 */
3204 proc_lock(p);
3205 return -1;
3206 }
3207 proc_lock(p);
3208 }
3209 return 1;
3210 }