2 * Copyright (c) 1995-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1982, 1986, 1989, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
74 #define SIGPROP /* include signal properties table */
75 #include <sys/param.h>
76 #include <sys/resourcevar.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/timeb.h>
81 #include <sys/times.h>
83 #include <sys/file_internal.h>
84 #include <sys/kernel.h>
86 #include <sys/signalvar.h>
87 #include <sys/syslog.h>
90 #include <sys/kdebug.h>
92 #include <sys/mount.h>
93 #include <sys/sysproto.h>
95 #include <security/audit/audit.h>
97 #include <machine/spl.h>
99 #include <kern/cpu_number.h>
102 #include <sys/user.h> /* for coredump */
103 #include <kern/ast.h> /* for APC support */
104 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
105 #include <kern/thread.h>
106 #include <kern/sched_prim.h>
107 #include <kern/thread_call.h>
108 #include <mach/exception.h>
109 #include <mach/task.h>
110 #include <mach/thread_act.h>
111 #include <libkern/OSAtomic.h>
114 #include <sys/codesign.h>
117 * Missing prototypes that Mach should export
121 extern int thread_enable_fpe(thread_t act
, int onoff
);
122 extern thread_t
port_name_to_thread(mach_port_name_t port_name
);
123 extern kern_return_t
get_signalact(task_t
, thread_t
*, int);
124 extern unsigned int get_useraddr(void);
130 extern void doexception(int exc
, mach_exception_code_t code
,
131 mach_exception_subcode_t sub
);
133 static void stop(proc_t
, proc_t
);
134 int cansignal(proc_t
, kauth_cred_t
, proc_t
, int, int);
135 int killpg1(proc_t
, int, int, int, int);
136 static void psignal_uthread(thread_t
, int);
137 static void psignal_try_thread(proc_t
, thread_t
, int signum
);
138 kern_return_t
do_bsdexception(int, int, int);
139 void __posix_sem_syscall_return(kern_return_t
);
140 char *proc_name_address(void *p
);
142 /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */
143 kern_return_t
semaphore_timedwait_signal_trap_internal(mach_port_name_t
, mach_port_name_t
, unsigned int, clock_res_t
, void (*)(kern_return_t
));
144 kern_return_t
semaphore_timedwait_trap_internal(mach_port_name_t
, unsigned int, clock_res_t
, void (*)(kern_return_t
));
145 kern_return_t
semaphore_wait_signal_trap_internal(mach_port_name_t
, mach_port_name_t
, void (*)(kern_return_t
));
146 kern_return_t
semaphore_wait_trap_internal(mach_port_name_t
, void (*)(kern_return_t
));
148 static int filt_sigattach(struct knote
*kn
);
149 static void filt_sigdetach(struct knote
*kn
);
150 static int filt_signal(struct knote
*kn
, long hint
);
151 static void filt_signaltouch(struct knote
*kn
, struct kevent_internal_s
*kev
,
154 struct filterops sig_filtops
= {
155 .f_attach
= filt_sigattach
,
156 .f_detach
= filt_sigdetach
,
157 .f_event
= filt_signal
,
158 .f_touch
= filt_signaltouch
,
161 /* structures and fns for killpg1 iterartion callback and filters */
162 struct killpg1_filtargs
{
167 struct killpg1_iterargs
{
175 static int killpg1_filt(proc_t p
, void * arg
);
176 static int killpg1_pgrpfilt(proc_t p
, __unused
void * arg
);
177 static int killpg1_callback(proc_t p
, void * arg
);
179 static int pgsignal_filt(proc_t p
, void * arg
);
180 static int pgsignal_callback(proc_t p
, void * arg
);
181 static kern_return_t
get_signalthread(proc_t
, int, thread_t
*);
184 /* flags for psignal_internal */
185 #define PSIG_LOCKED 0x1
186 #define PSIG_VFORK 0x2
187 #define PSIG_THREAD 0x4
188 #define PSIG_TRY_THREAD 0x8
191 static void psignal_internal(proc_t p
, task_t task
, thread_t thread
, int flavor
, int signum
);
194 * NOTE: Source and target may *NOT* overlap! (target is smaller)
197 sigaltstack_kern_to_user32(struct kern_sigaltstack
*in
, struct user32_sigaltstack
*out
)
199 out
->ss_sp
= CAST_DOWN_EXPLICIT(user32_addr_t
, in
->ss_sp
);
200 out
->ss_size
= CAST_DOWN_EXPLICIT(user32_size_t
, in
->ss_size
);
201 out
->ss_flags
= in
->ss_flags
;
205 sigaltstack_kern_to_user64(struct kern_sigaltstack
*in
, struct user64_sigaltstack
*out
)
207 out
->ss_sp
= in
->ss_sp
;
208 out
->ss_size
= in
->ss_size
;
209 out
->ss_flags
= in
->ss_flags
;
213 * NOTE: Source and target may are permitted to overlap! (source is smaller);
214 * this works because we copy fields in order from the end of the struct to
218 sigaltstack_user32_to_kern(struct user32_sigaltstack
*in
, struct kern_sigaltstack
*out
)
220 out
->ss_flags
= in
->ss_flags
;
221 out
->ss_size
= in
->ss_size
;
222 out
->ss_sp
= CAST_USER_ADDR_T(in
->ss_sp
);
225 sigaltstack_user64_to_kern(struct user64_sigaltstack
*in
, struct kern_sigaltstack
*out
)
227 out
->ss_flags
= in
->ss_flags
;
228 out
->ss_size
= in
->ss_size
;
229 out
->ss_sp
= in
->ss_sp
;
233 sigaction_kern_to_user32(struct kern_sigaction
*in
, struct user32_sigaction
*out
)
235 /* This assumes 32 bit __sa_handler is of type sig_t */
236 out
->__sigaction_u
.__sa_handler
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->__sigaction_u
.__sa_handler
);
237 out
->sa_mask
= in
->sa_mask
;
238 out
->sa_flags
= in
->sa_flags
;
241 sigaction_kern_to_user64(struct kern_sigaction
*in
, struct user64_sigaction
*out
)
243 /* This assumes 32 bit __sa_handler is of type sig_t */
244 out
->__sigaction_u
.__sa_handler
= in
->__sigaction_u
.__sa_handler
;
245 out
->sa_mask
= in
->sa_mask
;
246 out
->sa_flags
= in
->sa_flags
;
250 __sigaction_user32_to_kern(struct __user32_sigaction
*in
, struct __kern_sigaction
*out
)
252 out
->__sigaction_u
.__sa_handler
= CAST_USER_ADDR_T(in
->__sigaction_u
.__sa_handler
);
253 out
->sa_tramp
= CAST_USER_ADDR_T(in
->sa_tramp
);
254 out
->sa_mask
= in
->sa_mask
;
255 out
->sa_flags
= in
->sa_flags
;
259 __sigaction_user64_to_kern(struct __user64_sigaction
*in
, struct __kern_sigaction
*out
)
261 out
->__sigaction_u
.__sa_handler
= in
->__sigaction_u
.__sa_handler
;
262 out
->sa_tramp
= in
->sa_tramp
;
263 out
->sa_mask
= in
->sa_mask
;
264 out
->sa_flags
= in
->sa_flags
;
268 void ram_printf(int);
270 unsigned int rdebug_proc
=0;
277 #endif /* SIGNAL_DEBUG */
281 signal_setast(thread_t sig_actthread
)
283 act_set_astbsd(sig_actthread
);
287 * Can process p, with ucred uc, send the signal signum to process q?
288 * uc is refcounted by the caller so internal fileds can be used safely
289 * when called with zombie arg, list lock is held
292 cansignal(proc_t p
, kauth_cred_t uc
, proc_t q
, int signum
, int zombie
)
294 kauth_cred_t my_cred
;
295 struct session
* p_sessp
= SESSION_NULL
;
296 struct session
* q_sessp
= SESSION_NULL
;
300 error
= mac_proc_check_signal(p
, q
, signum
);
305 /* you can signal yourself */
309 /* you can't send launchd SIGKILL, even if root */
310 if (signum
== SIGKILL
&& q
== initproc
)
313 if (!suser(uc
, NULL
))
314 return (1); /* root can always signal */
318 if (p
->p_pgrp
!= PGRP_NULL
)
319 p_sessp
= p
->p_pgrp
->pg_session
;
320 if (q
->p_pgrp
!= PGRP_NULL
)
321 q_sessp
= q
->p_pgrp
->pg_session
;
323 if (signum
== SIGCONT
&& q_sessp
== p_sessp
) {
326 return (1); /* SIGCONT in session */
333 * If the real or effective UID of the sender matches the real
334 * or saved UID of the target, permit the signal to
338 my_cred
= kauth_cred_proc_ref(q
);
340 my_cred
= proc_ucred(q
);
342 if (kauth_cred_getruid(uc
) == kauth_cred_getruid(my_cred
) ||
343 kauth_cred_getruid(uc
) == kauth_cred_getsvuid(my_cred
) ||
344 kauth_cred_getuid(uc
) == kauth_cred_getruid(my_cred
) ||
345 kauth_cred_getuid(uc
) == kauth_cred_getsvuid(my_cred
)) {
347 kauth_cred_unref(&my_cred
);
352 kauth_cred_unref(&my_cred
);
358 * <rdar://problem/21952708> Some signals can be restricted from being handled,
359 * forcing the default action for that signal. This behavior applies only to
360 * non-root (EUID != 0) processes, and is configured with the "sigrestrict=x"
363 * 0 (default): Disallow use of restricted signals. Trying to register a handler
364 * returns ENOTSUP, which userspace may use to take special action (e.g. abort).
365 * 1: As above, but return EINVAL. Restricted signals behave similarly to SIGKILL.
366 * 2: Usual POSIX semantics.
368 unsigned sigrestrict_arg
= 0;
370 #if PLATFORM_WatchOS || PLATFORM_AppleTVOS
372 sigrestrictmask(void)
374 if (kauth_getuid() != 0 && sigrestrict_arg
!= 2) {
375 return SIGRESTRICTMASK
;
381 signal_is_restricted(proc_t p
, int signum
)
383 if (sigmask(signum
) & sigrestrictmask()) {
384 if (sigrestrict_arg
== 0 &&
385 task_get_apptype(p
->task
) == TASK_APPTYPE_APP_DEFAULT
) {
397 signal_is_restricted(proc_t p
, int signum
)
403 #endif /* !(PLATFORM_WatchOS || PLATFORM_AppleTVOS) */
411 * Notes: Uses current thread as a parameter to inform PPC to enable
412 * FPU exceptions via setsigvec(); this operation is not proxy
417 sigaction(proc_t p
, struct sigaction_args
*uap
, __unused
int32_t *retval
)
419 struct kern_sigaction vec
;
420 struct __kern_sigaction __vec
;
422 struct kern_sigaction
*sa
= &vec
;
423 struct sigacts
*ps
= p
->p_sigacts
;
428 signum
= uap
->signum
;
429 if (signum
<= 0 || signum
>= NSIG
||
430 signum
== SIGKILL
|| signum
== SIGSTOP
)
433 if ((error
= signal_is_restricted(p
, signum
))) {
434 if (error
== ENOTSUP
) {
435 printf("%s(%d): denied attempt to register action for signal %d\n",
436 proc_name_address(p
), proc_pid(p
), signum
);
442 sa
->sa_handler
= ps
->ps_sigact
[signum
];
443 sa
->sa_mask
= ps
->ps_catchmask
[signum
];
444 bit
= sigmask(signum
);
446 if ((ps
->ps_sigonstack
& bit
) != 0)
447 sa
->sa_flags
|= SA_ONSTACK
;
448 if ((ps
->ps_sigintr
& bit
) == 0)
449 sa
->sa_flags
|= SA_RESTART
;
450 if (ps
->ps_siginfo
& bit
)
451 sa
->sa_flags
|= SA_SIGINFO
;
452 if (ps
->ps_signodefer
& bit
)
453 sa
->sa_flags
|= SA_NODEFER
;
454 if (ps
->ps_64regset
& bit
)
455 sa
->sa_flags
|= SA_64REGSET
;
456 if ((signum
== SIGCHLD
) && (p
->p_flag
& P_NOCLDSTOP
))
457 sa
->sa_flags
|= SA_NOCLDSTOP
;
458 if ((signum
== SIGCHLD
) && (p
->p_flag
& P_NOCLDWAIT
))
459 sa
->sa_flags
|= SA_NOCLDWAIT
;
461 if (IS_64BIT_PROCESS(p
)) {
462 struct user64_sigaction vec64
;
464 sigaction_kern_to_user64(sa
, &vec64
);
465 error
= copyout(&vec64
, uap
->osa
, sizeof(vec64
));
467 struct user32_sigaction vec32
;
469 sigaction_kern_to_user32(sa
, &vec32
);
470 error
= copyout(&vec32
, uap
->osa
, sizeof(vec32
));
476 if (IS_64BIT_PROCESS(p
)) {
477 struct __user64_sigaction __vec64
;
479 error
= copyin(uap
->nsa
, &__vec64
, sizeof(__vec64
));
480 __sigaction_user64_to_kern(&__vec64
, &__vec
);
482 struct __user32_sigaction __vec32
;
484 error
= copyin(uap
->nsa
, &__vec32
, sizeof(__vec32
));
485 __sigaction_user32_to_kern(&__vec32
, &__vec
);
489 __vec
.sa_flags
&= SA_USERSPACE_MASK
; /* Only pass on valid sa_flags */
490 error
= setsigvec(p
, current_thread(), signum
, &__vec
, FALSE
);
495 /* Routines to manipulate bits on all threads */
497 clear_procsiglist(proc_t p
, int bit
, boolean_t in_signalstart
)
499 struct uthread
* uth
;
504 proc_signalstart(p
, 1);
506 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
507 thact
= p
->p_vforkact
;
508 uth
= (struct uthread
*)get_bsdthread_info(thact
);
510 uth
->uu_siglist
&= ~bit
;
513 proc_signalend(p
, 1);
518 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
519 uth
->uu_siglist
&= ~bit
;
521 p
->p_siglist
&= ~bit
;
523 proc_signalend(p
, 1);
531 unblock_procsigmask(proc_t p
, int bit
)
533 struct uthread
* uth
;
537 proc_signalstart(p
, 1);
539 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
540 thact
= p
->p_vforkact
;
541 uth
= (struct uthread
*)get_bsdthread_info(thact
);
543 uth
->uu_sigmask
&= ~bit
;
545 p
->p_sigmask
&= ~bit
;
546 proc_signalend(p
, 1);
550 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
551 uth
->uu_sigmask
&= ~bit
;
553 p
->p_sigmask
&= ~bit
;
555 proc_signalend(p
, 1);
561 block_procsigmask(proc_t p
, int bit
)
563 struct uthread
* uth
;
567 proc_signalstart(p
, 1);
569 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
570 thact
= p
->p_vforkact
;
571 uth
= (struct uthread
*)get_bsdthread_info(thact
);
573 uth
->uu_sigmask
|= bit
;
576 proc_signalend(p
, 1);
580 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
581 uth
->uu_sigmask
|= bit
;
585 proc_signalend(p
, 1);
591 set_procsigmask(proc_t p
, int bit
)
593 struct uthread
* uth
;
597 proc_signalstart(p
, 1);
599 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
600 thact
= p
->p_vforkact
;
601 uth
= (struct uthread
*)get_bsdthread_info(thact
);
603 uth
->uu_sigmask
= bit
;
606 proc_signalend(p
, 1);
610 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
611 uth
->uu_sigmask
= bit
;
614 proc_signalend(p
, 1);
620 /* XXX should be static? */
622 * Notes: The thread parameter is used in the PPC case to select the
623 * thread on which the floating point exception will be enabled
624 * or disabled. We can't simply take current_thread(), since
625 * this is called from posix_spawn() on the not currently running
626 * process/thread pair.
628 * We mark thread as unused to alow compilation without warning
629 * on non-PPC platforms.
632 setsigvec(proc_t p
, __unused thread_t thread
, int signum
, struct __kern_sigaction
*sa
, boolean_t in_sigstart
)
634 struct sigacts
*ps
= p
->p_sigacts
;
637 if ((signum
== SIGKILL
|| signum
== SIGSTOP
) &&
638 sa
->sa_handler
!= SIG_DFL
)
640 bit
= sigmask(signum
);
642 * Change setting atomically.
644 ps
->ps_sigact
[signum
] = sa
->sa_handler
;
645 ps
->ps_trampact
[signum
] = sa
->sa_tramp
;
646 ps
->ps_catchmask
[signum
] = sa
->sa_mask
&~ sigcantmask
;
647 if (sa
->sa_flags
& SA_SIGINFO
)
648 ps
->ps_siginfo
|= bit
;
650 ps
->ps_siginfo
&= ~bit
;
651 if (sa
->sa_flags
& SA_64REGSET
)
652 ps
->ps_64regset
|= bit
;
654 ps
->ps_64regset
&= ~bit
;
655 if ((sa
->sa_flags
& SA_RESTART
) == 0)
656 ps
->ps_sigintr
|= bit
;
658 ps
->ps_sigintr
&= ~bit
;
659 if (sa
->sa_flags
& SA_ONSTACK
)
660 ps
->ps_sigonstack
|= bit
;
662 ps
->ps_sigonstack
&= ~bit
;
663 if (sa
->sa_flags
& SA_USERTRAMP
)
664 ps
->ps_usertramp
|= bit
;
666 ps
->ps_usertramp
&= ~bit
;
667 if (sa
->sa_flags
& SA_RESETHAND
)
668 ps
->ps_sigreset
|= bit
;
670 ps
->ps_sigreset
&= ~bit
;
671 if (sa
->sa_flags
& SA_NODEFER
)
672 ps
->ps_signodefer
|= bit
;
674 ps
->ps_signodefer
&= ~bit
;
675 if (signum
== SIGCHLD
) {
676 if (sa
->sa_flags
& SA_NOCLDSTOP
)
677 OSBitOrAtomic(P_NOCLDSTOP
, &p
->p_flag
);
679 OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP
), &p
->p_flag
);
680 if ((sa
->sa_flags
& SA_NOCLDWAIT
) || (sa
->sa_handler
== SIG_IGN
))
681 OSBitOrAtomic(P_NOCLDWAIT
, &p
->p_flag
);
683 OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT
), &p
->p_flag
);
687 * Set bit in p_sigignore for signals that are set to SIG_IGN,
688 * and for signals set to SIG_DFL where the default is to ignore.
689 * However, don't put SIGCONT in p_sigignore,
690 * as we have to restart the process.
692 if (sa
->sa_handler
== SIG_IGN
||
693 (sigprop
[signum
] & SA_IGNORE
&& sa
->sa_handler
== SIG_DFL
)) {
695 clear_procsiglist(p
, bit
, in_sigstart
);
696 if (signum
!= SIGCONT
)
697 p
->p_sigignore
|= bit
; /* easier in psignal */
698 p
->p_sigcatch
&= ~bit
;
700 p
->p_sigignore
&= ~bit
;
701 if (sa
->sa_handler
== SIG_DFL
)
702 p
->p_sigcatch
&= ~bit
;
704 p
->p_sigcatch
|= bit
;
710 * Initialize signal state for process 0;
711 * set to ignore signals that are ignored by default.
718 for (i
= 1; i
< NSIG
; i
++)
719 if (sigprop
[i
] & SA_IGNORE
&& i
!= SIGCONT
)
720 p
->p_sigignore
|= sigmask(i
);
724 * Reset signals for an exec of the specified process.
727 execsigs(proc_t p
, thread_t thread
)
729 struct sigacts
*ps
= p
->p_sigacts
;
733 ut
= (struct uthread
*)get_bsdthread_info(thread
);
736 * transfer saved signal states from the process
737 * back to the current thread.
739 * NOTE: We do this without the process locked,
740 * because we are guaranteed to be single-threaded
741 * by this point in exec and the p_siglist is
742 * only accessed by threads inside the process.
744 ut
->uu_siglist
|= p
->p_siglist
;
748 * Reset caught signals. Held signals remain held
749 * through p_sigmask (unless they were caught,
750 * and are now ignored by default).
752 while (p
->p_sigcatch
) {
753 nc
= ffs((long)p
->p_sigcatch
);
755 p
->p_sigcatch
&= ~mask
;
756 if (sigprop
[nc
] & SA_IGNORE
) {
758 p
->p_sigignore
|= mask
;
759 ut
->uu_siglist
&= ~mask
;
761 ps
->ps_sigact
[nc
] = SIG_DFL
;
765 * Reset stack state to the user stack.
766 * Clear set of signals caught on the signal stack.
769 ut
->uu_sigstk
.ss_flags
= SA_DISABLE
;
770 ut
->uu_sigstk
.ss_size
= 0;
771 ut
->uu_sigstk
.ss_sp
= USER_ADDR_NULL
;
772 ut
->uu_flag
&= ~UT_ALTSTACK
;
774 ps
->ps_sigonstack
= 0;
778 * Manipulate signal mask.
779 * Note that we receive new mask, not pointer,
780 * and return old mask as return value;
781 * the library stub does the rest.
784 sigprocmask(proc_t p
, struct sigprocmask_args
*uap
, __unused
int32_t *retval
)
787 sigset_t oldmask
, nmask
;
788 user_addr_t omask
= uap
->omask
;
791 ut
= (struct uthread
*)get_bsdthread_info(current_thread());
792 oldmask
= ut
->uu_sigmask
;
794 if (uap
->mask
== USER_ADDR_NULL
) {
795 /* just want old mask */
798 error
= copyin(uap
->mask
, &nmask
, sizeof(sigset_t
));
804 block_procsigmask(p
, (nmask
& ~sigcantmask
));
805 signal_setast(current_thread());
809 unblock_procsigmask(p
, (nmask
& ~sigcantmask
));
810 signal_setast(current_thread());
814 set_procsigmask(p
, (nmask
& ~sigcantmask
));
815 signal_setast(current_thread());
823 if (!error
&& omask
!= USER_ADDR_NULL
)
824 copyout(&oldmask
, omask
, sizeof(sigset_t
));
829 sigpending(__unused proc_t p
, struct sigpending_args
*uap
, __unused
int32_t *retval
)
834 ut
= (struct uthread
*)get_bsdthread_info(current_thread());
835 pendlist
= ut
->uu_siglist
;
838 copyout(&pendlist
, uap
->osv
, sizeof(sigset_t
));
843 * Suspend process until signal, providing mask to be set
844 * in the meantime. Note nonstandard calling convention:
845 * libc stub passes mask, not pointer, to save a copyin.
849 sigcontinue(__unused
int error
)
851 // struct uthread *ut = get_bsdthread_info(current_thread());
852 unix_syscall_return(EINTR
);
856 sigsuspend(proc_t p
, struct sigsuspend_args
*uap
, int32_t *retval
)
858 __pthread_testcancel(1);
859 return(sigsuspend_nocancel(p
, (struct sigsuspend_nocancel_args
*)uap
, retval
));
863 sigsuspend_nocancel(proc_t p
, struct sigsuspend_nocancel_args
*uap
, __unused
int32_t *retval
)
867 ut
= (struct uthread
*)get_bsdthread_info(current_thread());
870 * When returning from sigpause, we want
871 * the old mask to be restored after the
872 * signal handler has finished. Thus, we
873 * save it here and mark the sigacts structure
876 ut
->uu_oldmask
= ut
->uu_sigmask
;
877 ut
->uu_flag
|= UT_SAS_OLDMASK
;
878 ut
->uu_sigmask
= (uap
->mask
& ~sigcantmask
);
879 (void) tsleep0((caddr_t
) p
, PPAUSE
|PCATCH
, "pause", 0, sigcontinue
);
880 /* always return EINTR rather than ERESTART... */
886 __disable_threadsignal(__unused proc_t p
,
887 __unused
struct __disable_threadsignal_args
*uap
,
888 __unused
int32_t *retval
)
892 uth
= (struct uthread
*)get_bsdthread_info(current_thread());
894 /* No longer valid to have any signal delivered */
895 uth
->uu_flag
|= (UT_NO_SIGMASK
| UT_CANCELDISABLE
);
902 __pthread_testcancel(int presyscall
)
905 thread_t self
= current_thread();
906 struct uthread
* uthread
;
908 uthread
= (struct uthread
*)get_bsdthread_info(self
);
911 uthread
->uu_flag
&= ~UT_NOTCANCELPT
;
913 if ((uthread
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
) {
914 if(presyscall
!= 0) {
915 unix_syscall_return(EINTR
);
918 thread_abort_safely(self
);
925 __pthread_markcancel(__unused proc_t p
,
926 struct __pthread_markcancel_args
*uap
, __unused
int32_t *retval
)
928 thread_act_t target_act
;
932 target_act
= (thread_act_t
)port_name_to_thread(uap
->thread_port
);
934 if (target_act
== THR_ACT_NULL
)
937 uth
= (struct uthread
*)get_bsdthread_info(target_act
);
939 /* if the thread is in vfork do not cancel */
940 if ((uth
->uu_flag
& (UT_VFORK
| UT_CANCEL
| UT_CANCELED
)) == 0) {
941 uth
->uu_flag
|= (UT_CANCEL
| UT_NO_SIGMASK
);
942 if (((uth
->uu_flag
& UT_NOTCANCELPT
) == 0)
943 && ((uth
->uu_flag
& UT_CANCELDISABLE
) == 0))
944 thread_abort_safely(target_act
);
947 thread_deallocate(target_act
);
951 /* if action =0 ; return the cancellation state ,
952 * if marked for cancellation, make the thread canceled
953 * if action = 1 ; Enable the cancel handling
954 * if action = 2; Disable the cancel handling
957 __pthread_canceled(__unused proc_t p
,
958 struct __pthread_canceled_args
*uap
, __unused
int32_t *retval
)
962 int action
= uap
->action
;
964 thread
= current_thread();
965 uth
= (struct uthread
*)get_bsdthread_info(thread
);
969 uth
->uu_flag
&= ~UT_CANCELDISABLE
;
972 uth
->uu_flag
|= UT_CANCELDISABLE
;
976 /* if the thread is in vfork do not cancel */
977 if((uth
->uu_flag
& ( UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
) {
978 uth
->uu_flag
&= ~UT_CANCEL
;
979 uth
->uu_flag
|= (UT_CANCELED
| UT_NO_SIGMASK
);
988 __posix_sem_syscall_return(kern_return_t kern_result
)
992 if (kern_result
== KERN_SUCCESS
)
994 else if (kern_result
== KERN_ABORTED
)
996 else if (kern_result
== KERN_OPERATION_TIMED_OUT
)
1000 unix_syscall_return(error
);
1001 /* does not return */
1004 #if OLD_SEMWAIT_SIGNAL
1006 * Returns: 0 Success
1010 * EFAULT if timespec is NULL
1013 __old_semwait_signal(proc_t p
, struct __old_semwait_signal_args
*uap
,
1016 __pthread_testcancel(0);
1017 return(__old_semwait_signal_nocancel(p
, (struct __old_semwait_signal_nocancel_args
*)uap
, retval
));
1021 __old_semwait_signal_nocancel(proc_t p
, struct __old_semwait_signal_nocancel_args
*uap
,
1022 __unused
int32_t *retval
)
1025 kern_return_t kern_result
;
1027 mach_timespec_t then
;
1028 struct timespec now
;
1029 struct user_timespec ts
;
1030 boolean_t truncated_timeout
= FALSE
;
1034 if (IS_64BIT_PROCESS(p
)) {
1035 struct user64_timespec ts64
;
1036 error
= copyin(uap
->ts
, &ts64
, sizeof(ts64
));
1037 ts
.tv_sec
= ts64
.tv_sec
;
1038 ts
.tv_nsec
= ts64
.tv_nsec
;
1040 struct user32_timespec ts32
;
1041 error
= copyin(uap
->ts
, &ts32
, sizeof(ts32
));
1042 ts
.tv_sec
= ts32
.tv_sec
;
1043 ts
.tv_nsec
= ts32
.tv_nsec
;
1050 if ((ts
.tv_sec
& 0xFFFFFFFF00000000ULL
) != 0) {
1051 ts
.tv_sec
= 0xFFFFFFFF;
1053 truncated_timeout
= TRUE
;
1056 if (uap
->relative
) {
1057 then
.tv_sec
= ts
.tv_sec
;
1058 then
.tv_nsec
= ts
.tv_nsec
;
1062 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1063 if (now
.tv_sec
== ts
.tv_sec
?
1064 now
.tv_nsec
> ts
.tv_nsec
:
1065 now
.tv_sec
> ts
.tv_sec
) {
1069 then
.tv_sec
= ts
.tv_sec
- now
.tv_sec
;
1070 then
.tv_nsec
= ts
.tv_nsec
- now
.tv_nsec
;
1071 if (then
.tv_nsec
< 0) {
1072 then
.tv_nsec
+= NSEC_PER_SEC
;
1078 if (uap
->mutex_sem
== 0)
1079 kern_result
= semaphore_timedwait_trap_internal((mach_port_name_t
)uap
->cond_sem
, then
.tv_sec
, then
.tv_nsec
, __posix_sem_syscall_return
);
1081 kern_result
= semaphore_timedwait_signal_trap_internal(uap
->cond_sem
, uap
->mutex_sem
, then
.tv_sec
, then
.tv_nsec
, __posix_sem_syscall_return
);
1085 if (uap
->mutex_sem
== 0)
1086 kern_result
= semaphore_wait_trap_internal(uap
->cond_sem
, __posix_sem_syscall_return
);
1089 kern_result
= semaphore_wait_signal_trap_internal(uap
->cond_sem
, uap
->mutex_sem
, __posix_sem_syscall_return
);
1092 if (kern_result
== KERN_SUCCESS
&& !truncated_timeout
)
1094 else if (kern_result
== KERN_SUCCESS
&& truncated_timeout
)
1095 return(EINTR
); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1096 else if (kern_result
== KERN_ABORTED
)
1098 else if (kern_result
== KERN_OPERATION_TIMED_OUT
)
1103 #endif /* OLD_SEMWAIT_SIGNAL*/
1106 * Returns: 0 Success
1110 * EFAULT if timespec is NULL
1113 __semwait_signal(proc_t p
, struct __semwait_signal_args
*uap
,
1116 __pthread_testcancel(0);
1117 return(__semwait_signal_nocancel(p
, (struct __semwait_signal_nocancel_args
*)uap
, retval
));
1121 __semwait_signal_nocancel(__unused proc_t p
, struct __semwait_signal_nocancel_args
*uap
,
1122 __unused
int32_t *retval
)
1125 kern_return_t kern_result
;
1126 mach_timespec_t then
;
1127 struct timespec now
;
1128 struct user_timespec ts
;
1129 boolean_t truncated_timeout
= FALSE
;
1133 ts
.tv_sec
= uap
->tv_sec
;
1134 ts
.tv_nsec
= uap
->tv_nsec
;
1136 if ((ts
.tv_sec
& 0xFFFFFFFF00000000ULL
) != 0) {
1137 ts
.tv_sec
= 0xFFFFFFFF;
1139 truncated_timeout
= TRUE
;
1142 if (uap
->relative
) {
1143 then
.tv_sec
= ts
.tv_sec
;
1144 then
.tv_nsec
= ts
.tv_nsec
;
1148 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1149 if (now
.tv_sec
== ts
.tv_sec
?
1150 now
.tv_nsec
> ts
.tv_nsec
:
1151 now
.tv_sec
> ts
.tv_sec
) {
1155 then
.tv_sec
= ts
.tv_sec
- now
.tv_sec
;
1156 then
.tv_nsec
= ts
.tv_nsec
- now
.tv_nsec
;
1157 if (then
.tv_nsec
< 0) {
1158 then
.tv_nsec
+= NSEC_PER_SEC
;
1164 if (uap
->mutex_sem
== 0)
1165 kern_result
= semaphore_timedwait_trap_internal((mach_port_name_t
)uap
->cond_sem
, then
.tv_sec
, then
.tv_nsec
, __posix_sem_syscall_return
);
1167 kern_result
= semaphore_timedwait_signal_trap_internal(uap
->cond_sem
, uap
->mutex_sem
, then
.tv_sec
, then
.tv_nsec
, __posix_sem_syscall_return
);
1171 if (uap
->mutex_sem
== 0)
1172 kern_result
= semaphore_wait_trap_internal(uap
->cond_sem
, __posix_sem_syscall_return
);
1175 kern_result
= semaphore_wait_signal_trap_internal(uap
->cond_sem
, uap
->mutex_sem
, __posix_sem_syscall_return
);
1178 if (kern_result
== KERN_SUCCESS
&& !truncated_timeout
)
1180 else if (kern_result
== KERN_SUCCESS
&& truncated_timeout
)
1181 return(EINTR
); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1182 else if (kern_result
== KERN_ABORTED
)
1184 else if (kern_result
== KERN_OPERATION_TIMED_OUT
)
1192 __pthread_kill(__unused proc_t p
, struct __pthread_kill_args
*uap
,
1193 __unused
int32_t *retval
)
1195 thread_t target_act
;
1197 int signum
= uap
->sig
;
1198 struct uthread
*uth
;
1200 target_act
= (thread_t
)port_name_to_thread(uap
->thread_port
);
1202 if (target_act
== THREAD_NULL
)
1204 if ((u_int
)signum
>= NSIG
) {
1209 uth
= (struct uthread
*)get_bsdthread_info(target_act
);
1211 if (uth
->uu_flag
& UT_NO_SIGMASK
) {
1217 psignal_uthread(target_act
, signum
);
1219 thread_deallocate(target_act
);
1225 __pthread_sigmask(__unused proc_t p
, struct __pthread_sigmask_args
*uap
,
1226 __unused
int32_t *retval
)
1228 user_addr_t set
= uap
->set
;
1229 user_addr_t oset
= uap
->oset
;
1235 ut
= (struct uthread
*)get_bsdthread_info(current_thread());
1236 oldset
= ut
->uu_sigmask
;
1238 if (set
== USER_ADDR_NULL
) {
1239 /* need only old mask */
1243 error
= copyin(set
, &nset
, sizeof(sigset_t
));
1249 ut
->uu_sigmask
|= (nset
& ~sigcantmask
);
1253 ut
->uu_sigmask
&= ~(nset
);
1254 signal_setast(current_thread());
1258 ut
->uu_sigmask
= (nset
& ~sigcantmask
);
1259 signal_setast(current_thread());
1267 if (!error
&& oset
!= USER_ADDR_NULL
)
1268 copyout(&oldset
, oset
, sizeof(sigset_t
));
1274 * Returns: 0 Success
1280 __sigwait(proc_t p
, struct __sigwait_args
*uap
, int32_t *retval
)
1282 __pthread_testcancel(1);
1283 return(__sigwait_nocancel(p
, (struct __sigwait_nocancel_args
*)uap
, retval
));
1287 __sigwait_nocancel(proc_t p
, struct __sigwait_nocancel_args
*uap
, __unused
int32_t *retval
)
1290 struct uthread
*uth
;
1297 ut
= (struct uthread
*)get_bsdthread_info(current_thread());
1299 if (uap
->set
== USER_ADDR_NULL
)
1302 error
= copyin(uap
->set
, &mask
, sizeof(sigset_t
));
1306 siglist
= (mask
& ~sigcantmask
);
1312 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
1316 proc_signalstart(p
, 1);
1317 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
1318 if ( (sigw
= uth
->uu_siglist
& siglist
) ) {
1322 proc_signalend(p
, 1);
1326 /* The signal was pending on a thread */
1330 * When returning from sigwait, we want
1331 * the old mask to be restored after the
1332 * signal handler has finished. Thus, we
1333 * save it here and mark the sigacts structure
1336 uth
= ut
; /* wait for it to be delivered to us */
1337 ut
->uu_oldmask
= ut
->uu_sigmask
;
1338 ut
->uu_flag
|= UT_SAS_OLDMASK
;
1339 if (siglist
== (sigset_t
)0) {
1343 /* SIGKILL and SIGSTOP are not maskable as well */
1344 ut
->uu_sigmask
= ~(siglist
|sigcantmask
);
1345 ut
->uu_sigwait
= siglist
;
1347 /* No Continuations for now */
1348 error
= msleep((caddr_t
)&ut
->uu_sigwait
, &p
->p_mlock
, PPAUSE
|PCATCH
, "pause", 0);
1350 if (error
== ERESTART
)
1353 sigw
= (ut
->uu_sigwait
& siglist
);
1354 ut
->uu_sigmask
= ut
->uu_oldmask
;
1356 ut
->uu_flag
&= ~UT_SAS_OLDMASK
;
1360 signum
= ffs((unsigned int)sigw
);
1362 panic("sigwait with no signal wakeup");
1363 /* Clear the pending signal in the thread it was delivered */
1364 uth
->uu_siglist
&= ~(sigmask(signum
));
1367 DTRACE_PROC2(signal__clear
, int, signum
, siginfo_t
*, &(ut
->t_dtrace_siginfo
));
1371 if (uap
->sig
!= USER_ADDR_NULL
)
1372 error
= copyout(&signum
, uap
->sig
, sizeof(int));
1381 sigaltstack(__unused proc_t p
, struct sigaltstack_args
*uap
, __unused
int32_t *retval
)
1383 struct kern_sigaltstack ss
;
1384 struct kern_sigaltstack
*pstk
;
1386 struct uthread
*uth
;
1389 uth
= (struct uthread
*)get_bsdthread_info(current_thread());
1391 pstk
= &uth
->uu_sigstk
;
1392 if ((uth
->uu_flag
& UT_ALTSTACK
) == 0)
1393 uth
->uu_sigstk
.ss_flags
|= SA_DISABLE
;
1394 onstack
= pstk
->ss_flags
& SA_ONSTACK
;
1396 if (IS_64BIT_PROCESS(p
)) {
1397 struct user64_sigaltstack ss64
;
1398 sigaltstack_kern_to_user64(pstk
, &ss64
);
1399 error
= copyout(&ss64
, uap
->oss
, sizeof(ss64
));
1401 struct user32_sigaltstack ss32
;
1402 sigaltstack_kern_to_user32(pstk
, &ss32
);
1403 error
= copyout(&ss32
, uap
->oss
, sizeof(ss32
));
1408 if (uap
->nss
== USER_ADDR_NULL
)
1410 if (IS_64BIT_PROCESS(p
)) {
1411 struct user64_sigaltstack ss64
;
1412 error
= copyin(uap
->nss
, &ss64
, sizeof(ss64
));
1413 sigaltstack_user64_to_kern(&ss64
, &ss
);
1415 struct user32_sigaltstack ss32
;
1416 error
= copyin(uap
->nss
, &ss32
, sizeof(ss32
));
1417 sigaltstack_user32_to_kern(&ss32
, &ss
);
1421 if ((ss
.ss_flags
& ~SA_DISABLE
) != 0) {
1425 if (ss
.ss_flags
& SA_DISABLE
) {
1426 /* if we are here we are not in the signal handler ;so no need to check */
1427 if (uth
->uu_sigstk
.ss_flags
& SA_ONSTACK
)
1429 uth
->uu_flag
&= ~UT_ALTSTACK
;
1430 uth
->uu_sigstk
.ss_flags
= ss
.ss_flags
;
1435 /* The older stacksize was 8K, enforce that one so no compat problems */
1436 #define OLDMINSIGSTKSZ 8*1024
1437 if (ss
.ss_size
< OLDMINSIGSTKSZ
)
1439 uth
->uu_flag
|= UT_ALTSTACK
;
1445 kill(proc_t cp
, struct kill_args
*uap
, __unused
int32_t *retval
)
1448 kauth_cred_t uc
= kauth_cred_get();
1449 int posix
= uap
->posix
; /* !0 if posix behaviour desired */
1451 AUDIT_ARG(pid
, uap
->pid
);
1452 AUDIT_ARG(signum
, uap
->signum
);
1454 if ((u_int
)uap
->signum
>= NSIG
)
1457 /* kill single process */
1458 if ((p
= proc_find(uap
->pid
)) == NULL
) {
1459 if ((p
= pzfind(uap
->pid
)) != NULL
) {
1461 * IEEE Std 1003.1-2001: return success
1462 * when killing a zombie.
1468 AUDIT_ARG(process
, p
);
1469 if (!cansignal(cp
, uc
, p
, uap
->signum
, 0)) {
1474 psignal(p
, uap
->signum
);
1479 case -1: /* broadcast signal */
1480 return (killpg1(cp
, uap
->signum
, 0, 1, posix
));
1481 case 0: /* signal own process group */
1482 return (killpg1(cp
, uap
->signum
, 0, 0, posix
));
1483 default: /* negative explicit process group */
1484 return (killpg1(cp
, uap
->signum
, -(uap
->pid
), 0, posix
));
1490 killpg1_filt(proc_t p
, void * arg
)
1492 struct killpg1_filtargs
* kfargp
= (struct killpg1_filtargs
*)arg
;
1493 proc_t cp
= kfargp
->cp
;
1494 int posix
= kfargp
->posix
;
1497 if (p
->p_pid
<= 1 || p
->p_flag
& P_SYSTEM
||
1498 (!posix
&& p
== cp
))
1506 killpg1_pgrpfilt(proc_t p
, __unused
void * arg
)
1508 if (p
->p_pid
<= 1 || p
->p_flag
& P_SYSTEM
||
1509 (p
->p_stat
== SZOMB
))
1518 killpg1_callback(proc_t p
, void * arg
)
1520 struct killpg1_iterargs
* kargp
= (struct killpg1_iterargs
*)arg
;
1521 proc_t cp
= kargp
->cp
;
1522 kauth_cred_t uc
= kargp
->uc
; /* refcounted by the caller safe to use internal fields */
1523 int signum
= kargp
->signum
;
1524 int * nfoundp
= kargp
->nfoundp
;
1529 if ((kargp
->zombie
!= 0) && ((p
->p_listflag
& P_LIST_EXITED
) == P_LIST_EXITED
))
1534 error
= cansignal(cp
, uc
, p
, signum
, zombie
);
1537 if (error
!= 0 && nfoundp
!= NULL
) {
1542 if (cansignal(cp
, uc
, p
, signum
, 0) == 0)
1543 return(PROC_RETURNED
);
1545 if (nfoundp
!= NULL
) {
1553 return(PROC_RETURNED
);
1557 * Common code for kill process group/broadcast kill.
1558 * cp is calling process.
1561 killpg1(proc_t cp
, int signum
, int pgid
, int all
, int posix
)
1566 struct killpg1_iterargs karg
;
1567 struct killpg1_filtargs kfarg
;
1570 uc
= kauth_cred_proc_ref(cp
);
1575 kfarg
.posix
= posix
;
1580 karg
.nfoundp
= &nfound
;
1581 karg
.signum
= signum
;
1584 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
), killpg1_callback
, &karg
, killpg1_filt
, (void *)&kfarg
);
1589 * zero pgid means send to my process group.
1591 pgrp
= proc_pgrp(cp
);
1593 pgrp
= pgfind(pgid
);
1600 karg
.nfoundp
= &nfound
;
1602 karg
.signum
= signum
;
1607 /* PGRP_DROPREF drops the pgrp refernce */
1608 pgrp_iterate(pgrp
, PGRP_BLOCKITERATE
| PGRP_DROPREF
, killpg1_callback
, &karg
,
1609 killpg1_pgrpfilt
, NULL
);
1611 error
= (nfound
? 0 : (posix
? EPERM
: ESRCH
));
1613 kauth_cred_unref(&uc
);
1619 * Send a signal to a process group.
1622 gsignal(int pgid
, int signum
)
1626 if (pgid
&& (pgrp
= pgfind(pgid
))) {
1627 pgsignal(pgrp
, signum
, 0);
1633 * Send a signal to a process group. If checkctty is 1,
1634 * limit to members which have a controlling terminal.
1638 pgsignal_filt(proc_t p
, void * arg
)
1640 int checkctty
= *(int*)arg
;
1642 if ((checkctty
== 0) || p
->p_flag
& P_CONTROLT
)
1650 pgsignal_callback(proc_t p
, void * arg
)
1652 int signum
= *(int*)arg
;
1655 return(PROC_RETURNED
);
1660 pgsignal(struct pgrp
*pgrp
, int signum
, int checkctty
)
1662 if (pgrp
!= PGRP_NULL
) {
1663 pgrp_iterate(pgrp
, PGRP_BLOCKITERATE
, pgsignal_callback
, &signum
, pgsignal_filt
, &checkctty
);
1669 tty_pgsignal(struct tty
*tp
, int signum
, int checkctty
)
1674 if (pg
!= PGRP_NULL
) {
1675 pgrp_iterate(pg
, PGRP_BLOCKITERATE
, pgsignal_callback
, &signum
, pgsignal_filt
, &checkctty
);
1680 * Send a signal caused by a trap to a specific thread.
1683 threadsignal(thread_t sig_actthread
, int signum
, mach_exception_code_t code
)
1685 struct uthread
*uth
;
1686 struct task
* sig_task
;
1690 if ((u_int
)signum
>= NSIG
|| signum
== 0)
1693 mask
= sigmask(signum
);
1694 if ((mask
& threadmask
) == 0)
1696 sig_task
= get_threadtask(sig_actthread
);
1697 p
= (proc_t
)(get_bsdtask_info(sig_task
));
1699 uth
= get_bsdthread_info(sig_actthread
);
1700 if (uth
->uu_flag
& UT_VFORK
)
1704 if (!(p
->p_lflag
& P_LTRACED
) && (p
->p_sigignore
& mask
)) {
1709 uth
->uu_siglist
|= mask
;
1710 uth
->uu_code
= code
;
1713 /* mark on process as well */
1714 signal_setast(sig_actthread
);
1717 static kern_return_t
1718 get_signalthread(proc_t p
, int signum
, thread_t
* thr
)
1720 struct uthread
*uth
;
1721 sigset_t mask
= sigmask(signum
);
1722 thread_t sig_thread
;
1723 struct task
* sig_task
= p
->task
;
1728 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
1729 sig_thread
= p
->p_vforkact
;
1730 kret
= check_actforsig(sig_task
, sig_thread
, 1);
1731 if (kret
== KERN_SUCCESS
) {
1733 return(KERN_SUCCESS
);
1735 return(KERN_FAILURE
);
1740 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
1741 if(((uth
->uu_flag
& UT_NO_SIGMASK
)== 0) &&
1742 (((uth
->uu_sigmask
& mask
) == 0) || (uth
->uu_sigwait
& mask
))) {
1743 if (check_actforsig(p
->task
, uth
->uu_context
.vc_thread
, 1) == KERN_SUCCESS
) {
1744 *thr
= uth
->uu_context
.vc_thread
;
1746 return(KERN_SUCCESS
);
1751 if (get_signalact(p
->task
, thr
, 1) == KERN_SUCCESS
) {
1752 return(KERN_SUCCESS
);
1755 return(KERN_FAILURE
);
1759 * Send the signal to the process. If the signal has an action, the action
1760 * is usually performed by the target process rather than the caller; we add
1761 * the signal to the set of pending signals for the process.
1764 * o When a stop signal is sent to a sleeping process that takes the
1765 * default action, the process is stopped without awakening it.
1766 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1767 * regardless of the signal action (eg, blocked or ignored).
1769 * Other ignored signals are discarded immediately.
1772 psignal_internal(proc_t p
, task_t task
, thread_t thread
, int flavor
, int signum
)
1775 user_addr_t action
= USER_ADDR_NULL
;
1777 thread_t sig_thread
;
1778 register task_t sig_task
;
1780 struct uthread
*uth
;
1784 kauth_cred_t my_cred
;
1786 if ((u_int
)signum
>= NSIG
|| signum
== 0)
1787 panic("psignal signal number");
1788 mask
= sigmask(signum
);
1789 prop
= sigprop
[signum
];
1792 if(rdebug_proc
&& (p
!= PROC_NULL
) && (p
== rdebug_proc
)) {
1795 #endif /* SIGNAL_DEBUG */
1797 /* catch unexpected initproc kills early for easier debuggging */
1798 if (signum
== SIGKILL
&& p
== initproc
)
1799 panic_plain("unexpected SIGKILL of %s %s",
1800 (p
->p_name
[0] != '\0' ? p
->p_name
: "initproc"),
1801 ((p
->p_csflags
& CS_KILLED
) ? "(CS_KILLED)" : ""));
1804 * We will need the task pointer later. Grab it now to
1805 * check for a zombie process. Also don't send signals
1806 * to kernel internal tasks.
1808 if (flavor
& PSIG_VFORK
) {
1810 sig_thread
= thread
;
1812 } else if (flavor
& PSIG_THREAD
) {
1813 sig_task
= get_threadtask(thread
);
1814 sig_thread
= thread
;
1815 sig_proc
= (proc_t
)get_bsdtask_info(sig_task
);
1816 } else if (flavor
& PSIG_TRY_THREAD
) {
1818 sig_thread
= thread
;
1822 sig_thread
= (struct thread
*)0;
1826 if ((sig_task
== TASK_NULL
) || is_kerneltask(sig_task
))
1830 * do not send signals to the process that has the thread
1831 * doing a reboot(). Not doing so will mark that thread aborted
1832 * and can cause IO failures wich will cause data loss. There's
1833 * also no need to send a signal to a process that is in the middle
1834 * of being torn down.
1836 if (ISSET(sig_proc
->p_flag
, P_REBOOT
) ||
1837 ISSET(sig_proc
->p_lflag
, P_LEXIT
))
1840 if( (flavor
& (PSIG_VFORK
| PSIG_THREAD
)) == 0) {
1841 proc_knote(sig_proc
, NOTE_SIGNAL
| signum
);
1844 if ((flavor
& PSIG_LOCKED
)== 0)
1845 proc_signalstart(sig_proc
, 0);
1848 * Deliver the signal to the first thread in the task. This
1849 * allows single threaded applications which use signals to
1850 * be able to be linked with multithreaded libraries. We have
1851 * an implicit reference to the current thread, but need
1852 * an explicit one otherwise. The thread reference keeps
1853 * the corresponding task data structures around too. This
1854 * reference is released by thread_deallocate.
1858 if (((flavor
& PSIG_VFORK
) == 0) && ((sig_proc
->p_lflag
& P_LTRACED
) == 0) && (sig_proc
->p_sigignore
& mask
)) {
1859 DTRACE_PROC3(signal__discard
, thread_t
, sig_thread
, proc_t
, sig_proc
, int, signum
);
1863 if (flavor
& PSIG_VFORK
) {
1865 act_set_astbsd(sig_thread
);
1866 kret
= KERN_SUCCESS
;
1867 } else if (flavor
& PSIG_TRY_THREAD
) {
1868 uth
= get_bsdthread_info(sig_thread
);
1869 if (((uth
->uu_flag
& UT_NO_SIGMASK
) == 0) &&
1870 (((uth
->uu_sigmask
& mask
) == 0) || (uth
->uu_sigwait
& mask
)) &&
1871 ((kret
= check_actforsig(sig_proc
->task
, sig_thread
, 1)) == KERN_SUCCESS
)) {
1872 /* deliver to specified thread */
1874 /* deliver to any willing thread */
1875 kret
= get_signalthread(sig_proc
, signum
, &sig_thread
);
1877 } else if (flavor
& PSIG_THREAD
) {
1878 /* If successful return with ast set */
1879 kret
= check_actforsig(sig_task
, sig_thread
, 1);
1881 /* If successful return with ast set */
1882 kret
= get_signalthread(sig_proc
, signum
, &sig_thread
);
1884 if (kret
!= KERN_SUCCESS
) {
1887 #endif /* SIGNAL_DEBUG */
1891 uth
= get_bsdthread_info(sig_thread
);
1894 * If proc is traced, always give parent a chance.
1897 if ((flavor
& PSIG_VFORK
) == 0) {
1898 if (sig_proc
->p_lflag
& P_LTRACED
)
1902 * If the signal is being ignored,
1903 * then we forget about it immediately.
1904 * (Note: we don't set SIGCONT in p_sigignore,
1905 * and if it is set to SIG_IGN,
1906 * action will be SIG_DFL here.)
1908 if (sig_proc
->p_sigignore
& mask
)
1910 if (uth
->uu_sigwait
& mask
)
1911 action
= KERN_SIG_WAIT
;
1912 else if (uth
->uu_sigmask
& mask
)
1913 action
= KERN_SIG_HOLD
;
1914 else if (sig_proc
->p_sigcatch
& mask
)
1915 action
= KERN_SIG_CATCH
;
1921 proc_lock(sig_proc
);
1923 if (sig_proc
->p_nice
> NZERO
&& action
== SIG_DFL
&& (prop
& SA_KILL
) &&
1924 (sig_proc
->p_lflag
& P_LTRACED
) == 0)
1925 sig_proc
->p_nice
= NZERO
;
1928 uth
->uu_siglist
&= ~stopsigmask
;
1930 if (prop
& SA_STOP
) {
1933 * If sending a tty stop signal to a member of an orphaned
1934 * process group, discard the signal here if the action
1935 * is default; don't stop the process below if sleeping,
1936 * and don't clear any pending SIGCONT.
1938 proc_unlock(sig_proc
);
1939 pg
= proc_pgrp(sig_proc
);
1940 if (prop
& SA_TTYSTOP
&& pg
->pg_jobc
== 0 &&
1941 action
== SIG_DFL
) {
1946 proc_lock(sig_proc
);
1947 uth
->uu_siglist
&= ~contsigmask
;
1950 uth
->uu_siglist
|= mask
;
1952 * Repost AST incase sigthread has processed
1953 * ast and missed signal post.
1955 if (action
== KERN_SIG_CATCH
)
1956 act_set_astbsd(sig_thread
);
1960 * Defer further processing for signals which are held,
1961 * except that stopped processes must be continued by SIGCONT.
1963 /* vfork will not go thru as action is SIG_DFL */
1964 if ((action
== KERN_SIG_HOLD
) && ((prop
& SA_CONT
) == 0 || sig_proc
->p_stat
!= SSTOP
)) {
1965 proc_unlock(sig_proc
);
1969 * SIGKILL priority twiddling moved here from above because
1970 * it needs sig_thread. Could merge it into large switch
1971 * below if we didn't care about priority for tracing
1972 * as SIGKILL's action is always SIG_DFL.
1974 if ((signum
== SIGKILL
) && (sig_proc
->p_nice
> NZERO
)) {
1975 sig_proc
->p_nice
= NZERO
;
1979 * Process is traced - wake it up (if not already
1980 * stopped) so that it can discover the signal in
1981 * issig() and stop for the parent.
1983 if (sig_proc
->p_lflag
& P_LTRACED
) {
1984 if (sig_proc
->p_stat
!= SSTOP
)
1987 proc_unlock(sig_proc
);
1991 if ((flavor
& PSIG_VFORK
) != 0)
1994 if (action
== KERN_SIG_WAIT
) {
1997 * DTrace proc signal-clear returns a siginfo_t. Collect the needed info.
1999 r_uid
= kauth_getruid(); /* per thread credential; protected by our thread context */
2001 bzero((caddr_t
)&(uth
->t_dtrace_siginfo
), sizeof(uth
->t_dtrace_siginfo
));
2003 uth
->t_dtrace_siginfo
.si_signo
= signum
;
2004 uth
->t_dtrace_siginfo
.si_pid
= current_proc()->p_pid
;
2005 uth
->t_dtrace_siginfo
.si_status
= W_EXITCODE(signum
, 0);
2006 uth
->t_dtrace_siginfo
.si_uid
= r_uid
;
2007 uth
->t_dtrace_siginfo
.si_code
= 0;
2009 uth
->uu_sigwait
= mask
;
2010 uth
->uu_siglist
&= ~mask
;
2011 wakeup(&uth
->uu_sigwait
);
2012 /* if it is SIGCONT resume whole process */
2013 if (prop
& SA_CONT
) {
2014 OSBitOrAtomic(P_CONTINUED
, &sig_proc
->p_flag
);
2015 sig_proc
->p_contproc
= current_proc()->p_pid
;
2017 proc_unlock(sig_proc
);
2018 (void) task_resume_internal(sig_task
);
2021 proc_unlock(sig_proc
);
2025 if (action
!= SIG_DFL
) {
2027 * User wants to catch the signal.
2028 * Wake up the thread, but don't un-suspend it
2029 * (except for SIGCONT).
2031 if (prop
& SA_CONT
) {
2032 OSBitOrAtomic(P_CONTINUED
, &sig_proc
->p_flag
);
2033 proc_unlock(sig_proc
);
2034 (void) task_resume_internal(sig_task
);
2035 proc_lock(sig_proc
);
2036 sig_proc
->p_stat
= SRUN
;
2037 } else if (sig_proc
->p_stat
== SSTOP
) {
2038 proc_unlock(sig_proc
);
2042 * Fill out siginfo structure information to pass to the
2043 * signalled process/thread sigaction handler, when it
2044 * wakes up. si_code is 0 because this is an ordinary
2045 * signal, not a SIGCHLD, and so si_status is the signal
2046 * number itself, instead of the child process exit status.
2047 * We shift this left because it will be shifted right before
2048 * it is passed to user space. kind of ugly to use W_EXITCODE
2049 * this way, but it beats defining a new macro.
2051 * Note: Avoid the SIGCHLD recursion case!
2053 if (signum
!= SIGCHLD
) {
2054 proc_unlock(sig_proc
);
2055 r_uid
= kauth_getruid();
2056 proc_lock(sig_proc
);
2058 sig_proc
->si_pid
= current_proc()->p_pid
;
2059 sig_proc
->si_status
= W_EXITCODE(signum
, 0);
2060 sig_proc
->si_uid
= r_uid
;
2061 sig_proc
->si_code
= 0;
2066 /* Default action - varies */
2067 if (mask
& stopsigmask
) {
2069 * These are the signals which by default
2072 * Don't clog system with children of init
2073 * stopped from the keyboard.
2075 if (!(prop
& SA_STOP
) && sig_proc
->p_pptr
== initproc
) {
2076 proc_unlock(sig_proc
);
2077 psignal_locked(sig_proc
, SIGKILL
);
2078 proc_lock(sig_proc
);
2079 uth
->uu_siglist
&= ~mask
;
2080 proc_unlock(sig_proc
);
2086 * if task hasn't already been stopped by
2089 uth
->uu_siglist
&= ~mask
;
2090 if (sig_proc
->p_stat
!= SSTOP
) {
2091 sig_proc
->p_xstat
= signum
;
2092 sig_proc
->p_stat
= SSTOP
;
2093 OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &sig_proc
->p_flag
);
2094 sig_proc
->p_lflag
&= ~P_LWAITED
;
2095 proc_unlock(sig_proc
);
2097 pp
= proc_parentholdref(sig_proc
);
2099 if (( pp
!= PROC_NULL
) && ((pp
->p_flag
& P_NOCLDSTOP
) == 0)) {
2101 my_cred
= kauth_cred_proc_ref(sig_proc
);
2102 r_uid
= kauth_cred_getruid(my_cred
);
2103 kauth_cred_unref(&my_cred
);
2105 proc_lock(sig_proc
);
2106 pp
->si_pid
= sig_proc
->p_pid
;
2108 * POSIX: sigaction for a stopped child
2109 * when sent to the parent must set the
2110 * child's signal number into si_status.
2112 if (signum
!= SIGSTOP
)
2113 pp
->si_status
= WEXITSTATUS(sig_proc
->p_xstat
);
2115 pp
->si_status
= W_EXITCODE(signum
, signum
);
2116 pp
->si_code
= CLD_STOPPED
;
2118 proc_unlock(sig_proc
);
2120 psignal(pp
, SIGCHLD
);
2122 if (pp
!= PROC_NULL
)
2123 proc_parentdropref(pp
, 0);
2125 proc_unlock(sig_proc
);
2129 DTRACE_PROC3(signal__send
, thread_t
, sig_thread
, proc_t
, p
, int, signum
);
2132 * enters switch with sig_proc lock held but dropped when
2133 * gets out of switch
2137 * Signals ignored by default have been dealt
2138 * with already, since their bits are on in
2144 * Kill signal always sets process running and
2148 * Process will be running after 'run'
2150 sig_proc
->p_stat
= SRUN
;
2152 * In scenarios where suspend/resume are racing
2153 * the signal we are missing AST_BSD by the time
2154 * we get here, set again to avoid races. This
2155 * was the scenario with spindump enabled shutdowns.
2156 * We would need to cover this approp down the line.
2158 act_set_astbsd(sig_thread
);
2159 thread_abort(sig_thread
);
2160 proc_unlock(sig_proc
);
2166 * Let the process run. If it's sleeping on an
2167 * event, it remains so.
2169 OSBitOrAtomic(P_CONTINUED
, &sig_proc
->p_flag
);
2170 sig_proc
->p_contproc
= sig_proc
->p_pid
;
2172 proc_unlock(sig_proc
);
2173 (void) task_resume_internal(sig_task
);
2174 proc_lock(sig_proc
);
2176 * When processing a SIGCONT, we need to check
2177 * to see if there are signals pending that
2178 * were not delivered because we had been
2179 * previously stopped. If that's the case,
2180 * we need to thread_abort_safely() to trigger
2181 * interruption of the current system call to
2182 * cause their handlers to fire. If it's only
2183 * the SIGCONT, then don't wake up.
2185 if (((flavor
& (PSIG_VFORK
|PSIG_THREAD
)) == 0) && (((uth
->uu_siglist
& ~uth
->uu_sigmask
) & ~sig_proc
->p_sigignore
) & ~mask
)) {
2186 uth
->uu_siglist
&= ~mask
;
2187 sig_proc
->p_stat
= SRUN
;
2191 uth
->uu_siglist
&= ~mask
;
2192 sig_proc
->p_stat
= SRUN
;
2193 proc_unlock(sig_proc
);
2198 * A signal which has a default action of killing
2199 * the process, and for which there is no handler,
2200 * needs to act like SIGKILL
2202 if (((flavor
& (PSIG_VFORK
|PSIG_THREAD
)) == 0) && (action
== SIG_DFL
) && (prop
& SA_KILL
)) {
2203 sig_proc
->p_stat
= SRUN
;
2204 proc_unlock(sig_proc
);
2205 thread_abort(sig_thread
);
2210 * All other signals wake up the process, but don't
2213 if (sig_proc
->p_stat
== SSTOP
) {
2214 proc_unlock(sig_proc
);
2224 * If we're being traced (possibly because someone attached us
2225 * while we were stopped), check for a signal from the debugger.
2227 if (sig_proc
->p_stat
== SSTOP
) {
2228 if ((sig_proc
->p_lflag
& P_LTRACED
) != 0 && sig_proc
->p_xstat
!= 0)
2229 uth
->uu_siglist
|= sigmask(sig_proc
->p_xstat
);
2230 if ((flavor
& PSIG_VFORK
) != 0) {
2231 sig_proc
->p_stat
= SRUN
;
2233 proc_unlock(sig_proc
);
2236 * setrunnable(p) in BSD and
2237 * Wake up the thread if it is interruptible.
2239 sig_proc
->p_stat
= SRUN
;
2240 proc_unlock(sig_proc
);
2241 if ((flavor
& PSIG_VFORK
) == 0)
2242 thread_abort_safely(sig_thread
);
2245 if ((flavor
& PSIG_LOCKED
)== 0) {
2246 proc_signalend(sig_proc
, 0);
2251 psignal(proc_t p
, int signum
)
2253 psignal_internal(p
, NULL
, NULL
, 0, signum
);
2257 psignal_locked(proc_t p
, int signum
)
2259 psignal_internal(p
, NULL
, NULL
, PSIG_LOCKED
, signum
);
2263 psignal_vfork(proc_t p
, task_t new_task
, thread_t thread
, int signum
)
2265 psignal_internal(p
, new_task
, thread
, PSIG_VFORK
, signum
);
2269 psignal_uthread(thread_t thread
, int signum
)
2271 psignal_internal(PROC_NULL
, TASK_NULL
, thread
, PSIG_THREAD
, signum
);
2274 /* same as psignal(), but prefer delivery to 'thread' if possible */
2276 psignal_try_thread(proc_t p
, thread_t thread
, int signum
)
2278 psignal_internal(p
, NULL
, thread
, PSIG_TRY_THREAD
, signum
);
2282 * If the current process has received a signal (should be caught or cause
2283 * termination, should interrupt current syscall), return the signal number.
2284 * Stop signals with default action are processed immediately, then cleared;
2285 * they aren't returned. This is checked after each entry to the system for
2286 * a syscall or trap (though this can usually be done without calling issignal
2287 * by checking the pending signal masks in the CURSIG macro.) The normal call
2290 * while (signum = CURSIG(curproc))
2294 issignal_locked(proc_t p
)
2296 int signum
, mask
, prop
, sigbits
;
2298 struct uthread
* ut
;
2300 kauth_cred_t my_cred
;
2304 cur_act
= current_thread();
2307 if(rdebug_proc
&& (p
== rdebug_proc
)) {
2310 #endif /* SIGNAL_DEBUG */
2313 * Try to grab the signal lock.
2315 if (sig_try_locked(p
) <= 0) {
2319 proc_signalstart(p
, 1);
2321 ut
= get_bsdthread_info(cur_act
);
2323 sigbits
= ut
->uu_siglist
& ~ut
->uu_sigmask
;
2325 if (p
->p_lflag
& P_LPPWAIT
)
2326 sigbits
&= ~stopsigmask
;
2327 if (sigbits
== 0) { /* no signal to send */
2332 signum
= ffs((long)sigbits
);
2333 mask
= sigmask(signum
);
2334 prop
= sigprop
[signum
];
2337 * We should see pending but ignored signals
2338 * only if P_LTRACED was on when they were posted.
2340 if (mask
& p
->p_sigignore
&& (p
->p_lflag
& P_LTRACED
) == 0) {
2341 ut
->uu_siglist
&= ~mask
; /* take the signal! */
2344 if (p
->p_lflag
& P_LTRACED
&& (p
->p_lflag
& P_LPPWAIT
) == 0) {
2347 * If traced, always stop, and stay
2348 * stopped until released by the debugger.
2350 /* ptrace debugging */
2351 p
->p_xstat
= signum
;
2353 if (p
->p_lflag
& P_LSIGEXC
) {
2355 p
->sigwait_thread
= cur_act
;
2357 OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &p
->p_flag
);
2358 p
->p_lflag
&= ~P_LWAITED
;
2359 ut
->uu_siglist
&= ~mask
; /* clear the old signal */
2360 proc_signalend(p
, 1);
2362 do_bsdexception(EXC_SOFTWARE
, EXC_SOFT_SIGNAL
, signum
);
2364 proc_signalstart(p
, 1);
2367 my_cred
= kauth_cred_proc_ref(p
);
2368 r_uid
= kauth_cred_getruid(my_cred
);
2369 kauth_cred_unref(&my_cred
);
2371 pp
= proc_parentholdref(p
);
2372 if (pp
!= PROC_NULL
) {
2375 pp
->si_pid
= p
->p_pid
;
2376 pp
->si_status
= p
->p_xstat
;
2377 pp
->si_code
= CLD_TRAPPED
;
2384 * XXX Have to really stop for debuggers;
2385 * XXX stop() doesn't do the right thing.
2388 task_suspend_internal(task
);
2392 p
->sigwait_thread
= cur_act
;
2394 OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &p
->p_flag
);
2395 p
->p_lflag
&= ~P_LWAITED
;
2396 ut
->uu_siglist
&= ~mask
; /* clear the old signal */
2398 proc_signalend(p
, 1);
2401 if (pp
!= PROC_NULL
) {
2402 psignal(pp
, SIGCHLD
);
2404 wakeup((caddr_t
)pp
);
2405 proc_parentdropref(pp
, 1);
2409 assert_wait((caddr_t
)&p
->sigwait
, (THREAD_INTERRUPTIBLE
));
2410 thread_block(THREAD_CONTINUE_NULL
);
2412 proc_signalstart(p
, 1);
2416 p
->sigwait_thread
= NULL
;
2417 wakeup((caddr_t
)&p
->sigwait_thread
);
2420 * This code is to detect when gdb is killed
2421 * even as the traced program is attached.
2422 * pgsignal would get the SIGKILL to traced program
2423 * That's what we are trying to see (I hope)
2425 if (ut
->uu_siglist
& sigmask(SIGKILL
)) {
2427 * Wait event may still be outstanding;
2428 * clear it, since sig_lock_to_exit will
2431 clear_wait(current_thread(), THREAD_INTERRUPTED
);
2432 sig_lock_to_exit(p
);
2434 * Since this thread will be resumed
2435 * to allow the current syscall to
2436 * be completed, must save u_qsave
2437 * before calling exit(). (Since exit()
2438 * calls closef() which can trash u_qsave.)
2440 proc_signalend(p
, 1);
2442 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_FRCEXIT
) | DBG_FUNC_NONE
,
2443 p
->p_pid
, W_EXITCODE(0, SIGKILL
), 2, 0, 0);
2444 exit1(p
, W_EXITCODE(0, SIGKILL
), (int *)NULL
);
2450 * We may have to quit
2452 if (thread_should_abort(current_thread())) {
2457 * If parent wants us to take the signal,
2458 * then it will leave it in p->p_xstat;
2459 * otherwise we just look for signals again.
2461 signum
= p
->p_xstat
;
2465 * Put the new signal into p_siglist. If the
2466 * signal is being masked, look for other signals.
2468 mask
= sigmask(signum
);
2469 ut
->uu_siglist
|= mask
;
2470 if (ut
->uu_sigmask
& mask
)
2475 * Decide whether the signal should be returned.
2476 * Return the signal's number, or fall through
2477 * to clear it from the pending mask.
2480 switch ((long)p
->p_sigacts
->ps_sigact
[signum
]) {
2484 * If there is a pending stop signal to process
2485 * with default action, stop here,
2486 * then clear the signal. However,
2487 * if process is member of an orphaned
2488 * process group, ignore tty stop signals.
2490 if (prop
& SA_STOP
) {
2495 if (p
->p_lflag
& P_LTRACED
||
2496 (pg
->pg_jobc
== 0 &&
2497 prop
& SA_TTYSTOP
)) {
2500 break; /* == ignore */
2503 if (p
->p_stat
!= SSTOP
) {
2505 p
->p_xstat
= signum
;
2508 p
->p_lflag
&= ~P_LWAITED
;
2511 pp
= proc_parentholdref(p
);
2513 if ((pp
!= PROC_NULL
) && ((pp
->p_flag
& P_NOCLDSTOP
) == 0)) {
2514 my_cred
= kauth_cred_proc_ref(p
);
2515 r_uid
= kauth_cred_getruid(my_cred
);
2516 kauth_cred_unref(&my_cred
);
2519 pp
->si_pid
= p
->p_pid
;
2520 pp
->si_status
= WEXITSTATUS(p
->p_xstat
);
2521 pp
->si_code
= CLD_STOPPED
;
2525 psignal(pp
, SIGCHLD
);
2527 if (pp
!= PROC_NULL
)
2528 proc_parentdropref(pp
, 0);
2532 } else if (prop
& SA_IGNORE
) {
2534 * Except for SIGCONT, shouldn't get here.
2535 * Default action is to ignore; drop it.
2537 break; /* == ignore */
2539 ut
->uu_siglist
&= ~mask
; /* take the signal! */
2549 * Masking above should prevent us ever trying
2550 * to take action on an ignored signal other
2551 * than SIGCONT, unless process is traced.
2553 if ((prop
& SA_CONT
) == 0 &&
2554 (p
->p_lflag
& P_LTRACED
) == 0)
2555 printf("issignal\n");
2556 break; /* == ignore */
2560 * This signal has an action, let
2561 * postsig() process it.
2563 ut
->uu_siglist
&= ~mask
; /* take the signal! */
2567 ut
->uu_siglist
&= ~mask
; /* take the signal! */
2571 proc_signalend(p
, 1);
2575 /* called from _sleep */
2579 int signum
, mask
, prop
, sigbits
;
2581 struct uthread
* ut
;
2585 cur_act
= current_thread();
2587 ut
= get_bsdthread_info(cur_act
);
2589 if (ut
->uu_siglist
== 0)
2592 if (((ut
->uu_siglist
& ~ut
->uu_sigmask
) == 0) && ((p
->p_lflag
& P_LTRACED
) == 0))
2595 sigbits
= ut
->uu_siglist
& ~ut
->uu_sigmask
;
2598 if (p
->p_lflag
& P_LPPWAIT
)
2599 sigbits
&= ~stopsigmask
;
2600 if (sigbits
== 0) { /* no signal to send */
2604 signum
= ffs((long)sigbits
);
2605 mask
= sigmask(signum
);
2606 prop
= sigprop
[signum
];
2607 sigbits
&= ~mask
; /* take the signal out */
2610 * We should see pending but ignored signals
2611 * only if P_LTRACED was on when they were posted.
2613 if (mask
& p
->p_sigignore
&& (p
->p_lflag
& P_LTRACED
) == 0) {
2617 if (p
->p_lflag
& P_LTRACED
&& (p
->p_lflag
& P_LPPWAIT
) == 0) {
2622 * Decide whether the signal should be returned.
2623 * Return the signal's number, or fall through
2624 * to clear it from the pending mask.
2627 switch ((long)p
->p_sigacts
->ps_sigact
[signum
]) {
2631 * If there is a pending stop signal to process
2632 * with default action, stop here,
2633 * then clear the signal. However,
2634 * if process is member of an orphaned
2635 * process group, ignore tty stop signals.
2637 if (prop
& SA_STOP
) {
2642 if (p
->p_lflag
& P_LTRACED
||
2643 (pg
->pg_jobc
== 0 &&
2644 prop
& SA_TTYSTOP
)) {
2646 break; /* == ignore */
2651 } else if (prop
& SA_IGNORE
) {
2653 * Except for SIGCONT, shouldn't get here.
2654 * Default action is to ignore; drop it.
2656 break; /* == ignore */
2664 * Masking above should prevent us ever trying
2665 * to take action on an ignored signal other
2666 * than SIGCONT, unless process is traced.
2668 if ((prop
& SA_CONT
) == 0 &&
2669 (p
->p_lflag
& P_LTRACED
) == 0)
2670 printf("issignal\n");
2671 break; /* == ignore */
2675 * This signal has an action, let
2676 * postsig() process it.
2685 * Put the argument process into the stopped state and notify the parent
2686 * via wakeup. Signals are handled elsewhere. The process must not be
2690 stop(proc_t p
, proc_t parent
)
2692 OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &p
->p_flag
);
2693 if ((parent
!= PROC_NULL
) && (parent
->p_stat
!= SSTOP
)) {
2695 wakeup((caddr_t
)parent
);
2698 (void) task_suspend_internal(p
->task
);
2702 * Take the action for the specified signal
2703 * from the current set of pending signals.
2706 postsig_locked(int signum
)
2708 proc_t p
= current_proc();
2709 struct sigacts
*ps
= p
->p_sigacts
;
2710 user_addr_t catcher
;
2712 int mask
, returnmask
;
2713 struct uthread
* ut
;
2719 * This must be called on master cpu
2721 if (cpu_number() != master_cpu
)
2722 panic("psig not on master");
2726 * Try to grab the signal lock.
2728 if (sig_try_locked(p
) <= 0) {
2732 proc_signalstart(p
, 1);
2734 ut
= (struct uthread
*)get_bsdthread_info(current_thread());
2735 mask
= sigmask(signum
);
2736 ut
->uu_siglist
&= ~mask
;
2737 catcher
= ps
->ps_sigact
[signum
];
2738 if (catcher
== SIG_DFL
) {
2740 * Default catcher, where the default is to kill
2741 * the process. (Other cases were ignored above.)
2743 sig_lock_to_exit(p
);
2744 p
->p_acflag
|= AXSIG
;
2745 if (sigprop
[signum
] & SA_CORE
) {
2746 p
->p_sigacts
->ps_sig
= signum
;
2747 proc_signalend(p
, 1);
2749 if (coredump(p
, 0, 0) == 0)
2750 signum
|= WCOREFLAG
;
2752 proc_signalend(p
, 1);
2757 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
2759 ut
->t_dtrace_siginfo
.si_signo
= signum
;
2760 ut
->t_dtrace_siginfo
.si_pid
= p
->si_pid
;
2761 ut
->t_dtrace_siginfo
.si_uid
= p
->si_uid
;
2762 ut
->t_dtrace_siginfo
.si_status
= WEXITSTATUS(p
->si_status
);
2764 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
2766 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
2767 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
2774 DTRACE_PROC3(signal__handle
, int, signum
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
2775 void (*)(void), SIG_DFL
);
2778 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_FRCEXIT
) | DBG_FUNC_NONE
,
2779 p
->p_pid
, W_EXITCODE(0, signum
), 3, 0, 0);
2780 exit1(p
, W_EXITCODE(0, signum
), (int *)NULL
);
2785 * If we get here, the signal must be caught.
2788 if (catcher
== SIG_IGN
|| (ut
->uu_sigmask
& mask
))
2790 "postsig: processing masked or ignored signal\n");
2794 * Set the new mask value and also defer further
2795 * occurences of this signal.
2797 * Special case: user has done a sigpause. Here the
2798 * current mask is not of interest, but rather the
2799 * mask from before the sigpause is what we want
2800 * restored after the signal processing is completed.
2802 if (ut
->uu_flag
& UT_SAS_OLDMASK
) {
2803 returnmask
= ut
->uu_oldmask
;
2804 ut
->uu_flag
&= ~UT_SAS_OLDMASK
;
2807 returnmask
= ut
->uu_sigmask
;
2808 ut
->uu_sigmask
|= ps
->ps_catchmask
[signum
];
2809 if ((ps
->ps_signodefer
& mask
) == 0)
2810 ut
->uu_sigmask
|= mask
;
2811 if ((signum
!= SIGILL
) && (signum
!= SIGTRAP
) && (ps
->ps_sigreset
& mask
)) {
2812 if ((signum
!= SIGCONT
) && (sigprop
[signum
] & SA_IGNORE
))
2813 p
->p_sigignore
|= mask
;
2814 ps
->ps_sigact
[signum
] = SIG_DFL
;
2815 ps
->ps_siginfo
&= ~mask
;
2816 ps
->ps_signodefer
&= ~mask
;
2819 if (ps
->ps_sig
!= signum
) {
2825 OSIncrementAtomicLong(&p
->p_stats
->p_ru
.ru_nsignals
);
2826 sendsig(p
, catcher
, signum
, returnmask
, code
);
2828 proc_signalend(p
, 1);
2832 * Attach a signal knote to the list of knotes for this process.
2834 * Signal knotes share the knote list with proc knotes. This
2835 * could be avoided by using a signal-specific knote list, but
2836 * probably isn't worth the trouble.
2840 filt_sigattach(struct knote
*kn
)
2842 proc_t p
= current_proc(); /* can attach only to oneself */
2846 kn
->kn_ptr
.p_proc
= p
;
2847 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
2849 KNOTE_ATTACH(&p
->p_klist
, kn
);
2851 proc_klist_unlock();
2857 * remove the knote from the process list, if it hasn't already
2858 * been removed by exit processing.
2862 filt_sigdetach(struct knote
*kn
)
2864 proc_t p
= kn
->kn_ptr
.p_proc
;
2867 kn
->kn_ptr
.p_proc
= NULL
;
2868 KNOTE_DETACH(&p
->p_klist
, kn
);
2869 proc_klist_unlock();
2873 * Post an event to the signal filter. Because we share the same list
2874 * as process knotes, we have to filter out and handle only signal events.
2876 * We assume that we process fdfree() before we post the NOTE_EXIT for
2877 * a process during exit. Therefore, since signal filters can only be
2878 * set up "in-process", we should have already torn down the kqueue
2879 * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT.
2882 filt_signal(struct knote
*kn
, long hint
)
2885 if (hint
& NOTE_SIGNAL
) {
2886 hint
&= ~NOTE_SIGNAL
;
2888 if (kn
->kn_id
== (unsigned int)hint
)
2890 } else if (hint
& NOTE_EXIT
) {
2891 panic("filt_signal: detected NOTE_EXIT event");
2894 return (kn
->kn_data
!= 0);
2898 filt_signaltouch(struct knote
*kn
, struct kevent_internal_s
*kev
, long type
)
2902 case EVENT_REGISTER
:
2903 kn
->kn_sfflags
= kev
->fflags
;
2904 kn
->kn_sdata
= kev
->data
;
2907 *kev
= kn
->kn_kevent
;
2908 if (kn
->kn_flags
& EV_CLEAR
) {
2914 panic("filt_signaltouch() - invalid type (%ld)", type
);
2917 proc_klist_unlock();
2921 bsd_ast(thread_t thread
)
2923 proc_t p
= current_proc();
2924 struct uthread
*ut
= get_bsdthread_info(thread
);
2927 static int bsd_init_done
= 0;
2932 if ((p
->p_flag
& P_OWEUPC
) && (p
->p_flag
& P_PROFIL
)) {
2933 pc
= get_useraddr();
2934 addupc_task(p
, pc
, 1);
2935 OSBitAndAtomic(~((uint32_t)P_OWEUPC
), &p
->p_flag
);
2938 if (timerisset(&p
->p_vtimer_user
.it_value
)) {
2941 task_vtimer_update(p
->task
, TASK_VTIMER_USER
, µsecs
);
2943 if (!itimerdecr(p
, &p
->p_vtimer_user
, microsecs
)) {
2944 if (timerisset(&p
->p_vtimer_user
.it_value
))
2945 task_vtimer_set(p
->task
, TASK_VTIMER_USER
);
2947 task_vtimer_clear(p
->task
, TASK_VTIMER_USER
);
2949 psignal_try_thread(p
, thread
, SIGVTALRM
);
2953 if (timerisset(&p
->p_vtimer_prof
.it_value
)) {
2956 task_vtimer_update(p
->task
, TASK_VTIMER_PROF
, µsecs
);
2958 if (!itimerdecr(p
, &p
->p_vtimer_prof
, microsecs
)) {
2959 if (timerisset(&p
->p_vtimer_prof
.it_value
))
2960 task_vtimer_set(p
->task
, TASK_VTIMER_PROF
);
2962 task_vtimer_clear(p
->task
, TASK_VTIMER_PROF
);
2964 psignal_try_thread(p
, thread
, SIGPROF
);
2968 if (timerisset(&p
->p_rlim_cpu
)) {
2971 task_vtimer_update(p
->task
, TASK_VTIMER_RLIM
, (uint32_t *) &tv
.tv_usec
);
2974 if (p
->p_rlim_cpu
.tv_sec
> 0 || p
->p_rlim_cpu
.tv_usec
> tv
.tv_usec
) {
2976 timersub(&p
->p_rlim_cpu
, &tv
, &p
->p_rlim_cpu
);
2980 timerclear(&p
->p_rlim_cpu
);
2983 task_vtimer_clear(p
->task
, TASK_VTIMER_RLIM
);
2985 psignal_try_thread(p
, thread
, SIGXCPU
);
2990 if (ut
->t_dtrace_sig
) {
2991 uint8_t dt_action_sig
= ut
->t_dtrace_sig
;
2992 ut
->t_dtrace_sig
= 0;
2993 psignal(p
, dt_action_sig
);
2996 if (ut
->t_dtrace_stop
) {
2997 ut
->t_dtrace_stop
= 0;
2999 p
->p_dtrace_stop
= 1;
3001 (void)task_suspend_internal(p
->task
);
3004 if (ut
->t_dtrace_resumepid
) {
3005 proc_t resumeproc
= proc_find(ut
->t_dtrace_resumepid
);
3006 ut
->t_dtrace_resumepid
= 0;
3007 if (resumeproc
!= PROC_NULL
) {
3008 proc_lock(resumeproc
);
3009 /* We only act on processes stopped by dtrace */
3010 if (resumeproc
->p_dtrace_stop
) {
3011 resumeproc
->p_dtrace_stop
= 0;
3012 proc_unlock(resumeproc
);
3013 task_resume_internal(resumeproc
->task
);
3016 proc_unlock(resumeproc
);
3018 proc_rele(resumeproc
);
3022 #endif /* CONFIG_DTRACE */
3025 if (CHECK_SIGNALS(p
, current_thread(), ut
)) {
3026 while ( (signum
= issignal_locked(p
)) )
3027 postsig_locked(signum
);
3031 if (!bsd_init_done
) {
3038 /* ptrace set runnable */
3040 pt_setrunnable(proc_t p
)
3046 if (p
->p_lflag
& P_LTRACED
) {
3051 wakeup((caddr_t
)&(p
->sigwait
));
3052 if ((p
->p_lflag
& P_LSIGEXC
) == 0) { // 5878479
3065 mach_exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
3069 return(bsd_exception(exc
, codes
, 2));
3073 proc_pendingsignals(proc_t p
, sigset_t mask
)
3075 struct uthread
* uth
;
3080 /* If the process is in proc exit return no signal info */
3081 if (p
->p_lflag
& P_LPEXIT
) {
3085 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
3087 uth
= (struct uthread
*)get_bsdthread_info(th
);
3089 bits
= (((uth
->uu_siglist
& ~uth
->uu_sigmask
) & ~p
->p_sigignore
) & mask
);
3095 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
3096 bits
|= (((uth
->uu_siglist
& ~uth
->uu_sigmask
) & ~p
->p_sigignore
) & mask
);
3104 thread_issignal(proc_t p
, thread_t th
, sigset_t mask
)
3106 struct uthread
* uth
;
3110 uth
= (struct uthread
*)get_bsdthread_info(th
);
3112 bits
= (((uth
->uu_siglist
& ~uth
->uu_sigmask
) & ~p
->p_sigignore
) & mask
);
3119 * Allow external reads of the sigprop array.
3122 hassigprop(int sig
, int prop
)
3124 return (sigprop
[sig
] & prop
);
3128 pgsigio(pid_t pgid
, int sig
)
3130 proc_t p
= PROC_NULL
;
3133 gsignal(-(pgid
), sig
);
3135 else if (pgid
> 0 && (p
= proc_find(pgid
)) != 0)
3142 proc_signalstart(proc_t p
, int locked
)
3147 if(p
->p_signalholder
== current_thread())
3148 panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock");
3151 while ((p
->p_lflag
& P_LINSIGNAL
) == P_LINSIGNAL
)
3152 msleep(&p
->p_sigmask
, &p
->p_mlock
, 0, "proc_signstart", NULL
);
3155 p
->p_lflag
|= P_LINSIGNAL
;
3156 p
->p_signalholder
= current_thread();
3162 proc_signalend(proc_t p
, int locked
)
3166 p
->p_lflag
&= ~P_LINSIGNAL
;
3168 if (p
->p_sigwaitcnt
> 0)
3169 wakeup(&p
->p_sigmask
);
3171 p
->p_signalholder
= NULL
;
3177 sig_lock_to_exit(proc_t p
)
3179 thread_t self
= current_thread();
3181 p
->exit_thread
= self
;
3185 task_wait(p
->task
, FALSE
);
3191 sig_try_locked(proc_t p
)
3193 thread_t self
= current_thread();
3195 while (p
->sigwait
|| p
->exit_thread
) {
3196 if (p
->exit_thread
) {
3199 msleep((caddr_t
)&p
->sigwait_thread
, &p
->p_mlock
, PCATCH
| PDROP
, 0, 0);
3200 if (thread_should_abort(self
)) {
3202 * Terminate request - clean up.