2 * Copyright (c) 1995-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1982, 1986, 1989, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
74 #define SIGPROP /* include signal properties table */
75 #include <sys/param.h>
76 #include <sys/resourcevar.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/timeb.h>
81 #include <sys/times.h>
83 #include <sys/file_internal.h>
84 #include <sys/kernel.h>
86 #include <sys/signalvar.h>
87 #include <sys/syslog.h>
90 #include <sys/kdebug.h>
91 #include <sys/reason.h>
93 #include <sys/mount.h>
94 #include <sys/sysproto.h>
96 #include <security/audit/audit.h>
98 #include <kern/cpu_number.h>
101 #include <sys/user.h> /* for coredump */
102 #include <kern/ast.h> /* for APC support */
103 #include <kern/kalloc.h>
104 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
105 #include <kern/thread.h>
106 #include <kern/sched_prim.h>
107 #include <kern/thread_call.h>
108 #include <kern/policy_internal.h>
110 #include <mach/exception.h>
111 #include <mach/task.h>
112 #include <mach/thread_act.h>
113 #include <libkern/OSAtomic.h>
116 #include <sys/codesign.h>
117 #include <sys/random.h>
118 #include <libkern/section_keywords.h>
121 #include <security/mac_framework.h>
125 * Missing prototypes that Mach should export
129 extern int thread_enable_fpe(thread_t act
, int onoff
);
130 extern kern_return_t
get_signalact(task_t
, thread_t
*, int);
131 extern unsigned int get_useraddr(void);
132 extern boolean_t
task_did_exec(task_t task
);
133 extern boolean_t
task_is_exec_copy(task_t task
);
134 extern void vm_shared_region_reslide_stale(void);
140 extern void doexception(int exc
, mach_exception_code_t code
,
141 mach_exception_subcode_t sub
);
143 static void stop(proc_t
, proc_t
);
144 static int cansignal_nomac(proc_t
, kauth_cred_t
, proc_t
, int);
145 int cansignal(proc_t
, kauth_cred_t
, proc_t
, int);
146 int killpg1(proc_t
, int, int, int, int);
147 kern_return_t
do_bsdexception(int, int, int);
148 void __posix_sem_syscall_return(kern_return_t
);
149 char *proc_name_address(void *p
);
151 /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */
152 kern_return_t
semaphore_timedwait_signal_trap_internal(mach_port_name_t
, mach_port_name_t
, unsigned int, clock_res_t
, void (*)(kern_return_t
));
153 kern_return_t
semaphore_timedwait_trap_internal(mach_port_name_t
, unsigned int, clock_res_t
, void (*)(kern_return_t
));
154 kern_return_t
semaphore_wait_signal_trap_internal(mach_port_name_t
, mach_port_name_t
, void (*)(kern_return_t
));
155 kern_return_t
semaphore_wait_trap_internal(mach_port_name_t
, void (*)(kern_return_t
));
157 static int filt_sigattach(struct knote
*kn
, struct kevent_qos_s
*kev
);
158 static void filt_sigdetach(struct knote
*kn
);
159 static int filt_signal(struct knote
*kn
, long hint
);
160 static int filt_signaltouch(struct knote
*kn
, struct kevent_qos_s
*kev
);
161 static int filt_signalprocess(struct knote
*kn
, struct kevent_qos_s
*kev
);
163 SECURITY_READ_ONLY_EARLY(struct filterops
) sig_filtops
= {
164 .f_attach
= filt_sigattach
,
165 .f_detach
= filt_sigdetach
,
166 .f_event
= filt_signal
,
167 .f_touch
= filt_signaltouch
,
168 .f_process
= filt_signalprocess
,
171 /* structures and fns for killpg1 iterartion callback and filters */
172 struct killpg1_filtargs
{
177 struct killpg1_iterargs
{
184 static int killpg1_allfilt(proc_t p
, void * arg
);
185 static int killpg1_pgrpfilt(proc_t p
, __unused
void * arg
);
186 static int killpg1_callback(proc_t p
, void * arg
);
188 static int pgsignal_filt(proc_t p
, void * arg
);
189 static int pgsignal_callback(proc_t p
, void * arg
);
190 static kern_return_t
get_signalthread(proc_t
, int, thread_t
*);
193 /* flags for psignal_internal */
194 #define PSIG_LOCKED 0x1
195 #define PSIG_VFORK 0x2
196 #define PSIG_THREAD 0x4
197 #define PSIG_TRY_THREAD 0x8
199 static os_reason_t
build_signal_reason(int signum
, const char *procname
);
200 static void psignal_internal(proc_t p
, task_t task
, thread_t thread
, int flavor
, int signum
, os_reason_t signal_reason
);
203 * NOTE: Source and target may *NOT* overlap! (target is smaller)
206 sigaltstack_kern_to_user32(struct kern_sigaltstack
*in
, struct user32_sigaltstack
*out
)
208 out
->ss_sp
= CAST_DOWN_EXPLICIT(user32_addr_t
, in
->ss_sp
);
209 out
->ss_size
= CAST_DOWN_EXPLICIT(user32_size_t
, in
->ss_size
);
210 out
->ss_flags
= in
->ss_flags
;
214 sigaltstack_kern_to_user64(struct kern_sigaltstack
*in
, struct user64_sigaltstack
*out
)
216 out
->ss_sp
= in
->ss_sp
;
217 out
->ss_size
= in
->ss_size
;
218 out
->ss_flags
= in
->ss_flags
;
222 * NOTE: Source and target may are permitted to overlap! (source is smaller);
223 * this works because we copy fields in order from the end of the struct to
227 sigaltstack_user32_to_kern(struct user32_sigaltstack
*in
, struct kern_sigaltstack
*out
)
229 out
->ss_flags
= in
->ss_flags
;
230 out
->ss_size
= in
->ss_size
;
231 out
->ss_sp
= CAST_USER_ADDR_T(in
->ss_sp
);
234 sigaltstack_user64_to_kern(struct user64_sigaltstack
*in
, struct kern_sigaltstack
*out
)
236 out
->ss_flags
= in
->ss_flags
;
237 out
->ss_size
= (user_size_t
)in
->ss_size
;
238 out
->ss_sp
= (user_addr_t
)in
->ss_sp
;
242 sigaction_kern_to_user32(struct kern_sigaction
*in
, struct user32_sigaction
*out
)
244 /* This assumes 32 bit __sa_handler is of type sig_t */
245 out
->__sigaction_u
.__sa_handler
= CAST_DOWN_EXPLICIT(user32_addr_t
, in
->__sigaction_u
.__sa_handler
);
246 out
->sa_mask
= in
->sa_mask
;
247 out
->sa_flags
= in
->sa_flags
;
250 sigaction_kern_to_user64(struct kern_sigaction
*in
, struct user64_sigaction
*out
)
252 /* This assumes 32 bit __sa_handler is of type sig_t */
253 out
->__sigaction_u
.__sa_handler
= in
->__sigaction_u
.__sa_handler
;
254 out
->sa_mask
= in
->sa_mask
;
255 out
->sa_flags
= in
->sa_flags
;
259 __sigaction_user32_to_kern(struct __user32_sigaction
*in
, struct __kern_sigaction
*out
)
261 out
->__sigaction_u
.__sa_handler
= CAST_USER_ADDR_T(in
->__sigaction_u
.__sa_handler
);
262 out
->sa_tramp
= CAST_USER_ADDR_T(in
->sa_tramp
);
263 out
->sa_mask
= in
->sa_mask
;
264 out
->sa_flags
= in
->sa_flags
;
267 kr
= machine_thread_function_pointers_convert_from_user(current_thread(),
269 assert(kr
== KERN_SUCCESS
);
273 __sigaction_user64_to_kern(struct __user64_sigaction
*in
, struct __kern_sigaction
*out
)
275 out
->__sigaction_u
.__sa_handler
= (user_addr_t
)in
->__sigaction_u
.__sa_handler
;
276 out
->sa_tramp
= (user_addr_t
)in
->sa_tramp
;
277 out
->sa_mask
= in
->sa_mask
;
278 out
->sa_flags
= in
->sa_flags
;
281 kr
= machine_thread_function_pointers_convert_from_user(current_thread(),
283 assert(kr
== KERN_SUCCESS
);
287 void ram_printf(int);
289 unsigned int rdebug_proc
= 0;
293 printf("x is %d", x
);
295 #endif /* SIGNAL_DEBUG */
299 signal_setast(thread_t sig_actthread
)
301 act_set_astbsd(sig_actthread
);
305 cansignal_nomac(proc_t src
, kauth_cred_t uc_src
, proc_t dst
, int signum
)
307 /* you can signal yourself */
312 /* you can't send the init proc SIGKILL, even if root */
313 if (signum
== SIGKILL
&& dst
== initproc
) {
317 /* otherwise, root can always signal */
318 if (kauth_cred_issuser(uc_src
)) {
322 /* processes in the same session can send SIGCONT to each other */
324 struct session
*sess_src
= SESSION_NULL
;
325 struct session
*sess_dst
= SESSION_NULL
;
327 /* The session field is protected by the list lock. */
329 if (src
->p_pgrp
!= PGRP_NULL
) {
330 sess_src
= src
->p_pgrp
->pg_session
;
332 if (dst
->p_pgrp
!= PGRP_NULL
) {
333 sess_dst
= dst
->p_pgrp
->pg_session
;
337 /* allow SIGCONT within session and for processes without session */
338 if (signum
== SIGCONT
&& sess_src
== sess_dst
) {
343 /* the source process must be authorized to signal the target */
346 kauth_cred_t uc_dst
= NOCRED
, uc_ref
= NOCRED
;
348 uc_dst
= uc_ref
= kauth_cred_proc_ref(dst
);
351 * If the real or effective UID of the sender matches the real or saved
352 * UID of the target, allow the signal to be sent.
354 if (kauth_cred_getruid(uc_src
) == kauth_cred_getruid(uc_dst
) ||
355 kauth_cred_getruid(uc_src
) == kauth_cred_getsvuid(uc_dst
) ||
356 kauth_cred_getuid(uc_src
) == kauth_cred_getruid(uc_dst
) ||
357 kauth_cred_getuid(uc_src
) == kauth_cred_getsvuid(uc_dst
)) {
361 if (uc_ref
!= NOCRED
) {
362 kauth_cred_unref(&uc_ref
);
371 * Can process `src`, with ucred `uc_src`, send the signal `signum` to process
372 * `dst`? The ucred is referenced by the caller so internal fileds can be used
376 cansignal(proc_t src
, kauth_cred_t uc_src
, proc_t dst
, int signum
)
379 if (mac_proc_check_signal(src
, dst
, signum
)) {
384 return cansignal_nomac(src
, uc_src
, dst
, signum
);
388 * <rdar://problem/21952708> Some signals can be restricted from being handled,
389 * forcing the default action for that signal. This behavior applies only to
390 * non-root (EUID != 0) processes, and is configured with the "sigrestrict=x"
393 * 0 (default): Disallow use of restricted signals. Trying to register a handler
394 * returns ENOTSUP, which userspace may use to take special action (e.g. abort).
395 * 1: As above, but return EINVAL. Restricted signals behave similarly to SIGKILL.
396 * 2: Usual POSIX semantics.
398 unsigned sigrestrict_arg
= 0;
402 sigrestrictmask(void)
404 if (kauth_getuid() != 0 && sigrestrict_arg
!= 2) {
405 return SIGRESTRICTMASK
;
411 signal_is_restricted(proc_t p
, int signum
)
413 if (sigmask(signum
) & sigrestrictmask()) {
414 if (sigrestrict_arg
== 0 &&
415 task_get_apptype(p
->task
) == TASK_APPTYPE_APP_DEFAULT
) {
427 signal_is_restricted(proc_t p
, int signum
)
433 #endif /* !PLATFORM_WatchOS */
441 * Notes: Uses current thread as a parameter to inform PPC to enable
442 * FPU exceptions via setsigvec(); this operation is not proxy
447 sigaction(proc_t p
, struct sigaction_args
*uap
, __unused
int32_t *retval
)
449 struct kern_sigaction vec
;
450 struct __kern_sigaction __vec
;
452 struct kern_sigaction
*sa
= &vec
;
453 struct sigacts
*ps
= p
->p_sigacts
;
457 uint32_t sigreturn_validation
= PS_SIGRETURN_VALIDATION_DEFAULT
;
459 signum
= uap
->signum
;
460 if (signum
<= 0 || signum
>= NSIG
||
461 signum
== SIGKILL
|| signum
== SIGSTOP
) {
466 if (IS_64BIT_PROCESS(p
)) {
467 struct __user64_sigaction __vec64
;
468 error
= copyin(uap
->nsa
, &__vec64
, sizeof(__vec64
));
469 __sigaction_user64_to_kern(&__vec64
, &__vec
);
471 struct __user32_sigaction __vec32
;
472 error
= copyin(uap
->nsa
, &__vec32
, sizeof(__vec32
));
473 __sigaction_user32_to_kern(&__vec32
, &__vec
);
479 sigreturn_validation
= (__vec
.sa_flags
& SA_VALIDATE_SIGRETURN_FROM_SIGTRAMP
) ?
480 PS_SIGRETURN_VALIDATION_ENABLED
: PS_SIGRETURN_VALIDATION_DISABLED
;
481 __vec
.sa_flags
&= SA_USERSPACE_MASK
; /* Only pass on valid sa_flags */
483 if ((__vec
.sa_flags
& SA_SIGINFO
) || __vec
.sa_handler
!= SIG_DFL
) {
484 if ((error
= signal_is_restricted(p
, signum
))) {
485 if (error
== ENOTSUP
) {
486 printf("%s(%d): denied attempt to register action for signal %d\n",
487 proc_name_address(p
), proc_pid(p
), signum
);
495 sa
->sa_handler
= ps
->ps_sigact
[signum
];
496 sa
->sa_mask
= ps
->ps_catchmask
[signum
];
497 bit
= sigmask(signum
);
499 if ((ps
->ps_sigonstack
& bit
) != 0) {
500 sa
->sa_flags
|= SA_ONSTACK
;
502 if ((ps
->ps_sigintr
& bit
) == 0) {
503 sa
->sa_flags
|= SA_RESTART
;
505 if (ps
->ps_siginfo
& bit
) {
506 sa
->sa_flags
|= SA_SIGINFO
;
508 if (ps
->ps_signodefer
& bit
) {
509 sa
->sa_flags
|= SA_NODEFER
;
511 if ((signum
== SIGCHLD
) && (p
->p_flag
& P_NOCLDSTOP
)) {
512 sa
->sa_flags
|= SA_NOCLDSTOP
;
514 if ((signum
== SIGCHLD
) && (p
->p_flag
& P_NOCLDWAIT
)) {
515 sa
->sa_flags
|= SA_NOCLDWAIT
;
518 if (IS_64BIT_PROCESS(p
)) {
519 struct user64_sigaction vec64
= {};
520 sigaction_kern_to_user64(sa
, &vec64
);
521 error
= copyout(&vec64
, uap
->osa
, sizeof(vec64
));
523 struct user32_sigaction vec32
= {};
524 sigaction_kern_to_user32(sa
, &vec32
);
525 error
= copyout(&vec32
, uap
->osa
, sizeof(vec32
));
533 uint32_t old_sigreturn_validation
= atomic_load_explicit(
534 &ps
->ps_sigreturn_validation
, memory_order_relaxed
);
535 if (old_sigreturn_validation
== PS_SIGRETURN_VALIDATION_DEFAULT
) {
536 atomic_compare_exchange_strong_explicit(&ps
->ps_sigreturn_validation
,
537 &old_sigreturn_validation
, sigreturn_validation
,
538 memory_order_relaxed
, memory_order_relaxed
);
540 error
= setsigvec(p
, current_thread(), signum
, &__vec
, FALSE
);
546 /* Routines to manipulate bits on all threads */
548 clear_procsiglist(proc_t p
, int bit
, boolean_t in_signalstart
)
550 struct uthread
* uth
;
554 if (!in_signalstart
) {
555 proc_signalstart(p
, 1);
558 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
559 thact
= p
->p_vforkact
;
560 uth
= (struct uthread
*)get_bsdthread_info(thact
);
562 uth
->uu_siglist
&= ~bit
;
564 if (!in_signalstart
) {
565 proc_signalend(p
, 1);
571 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
572 uth
->uu_siglist
&= ~bit
;
574 p
->p_siglist
&= ~bit
;
575 if (!in_signalstart
) {
576 proc_signalend(p
, 1);
585 unblock_procsigmask(proc_t p
, int bit
)
587 struct uthread
* uth
;
591 proc_signalstart(p
, 1);
593 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
594 thact
= p
->p_vforkact
;
595 uth
= (struct uthread
*)get_bsdthread_info(thact
);
597 uth
->uu_sigmask
&= ~bit
;
599 p
->p_sigmask
&= ~bit
;
600 proc_signalend(p
, 1);
604 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
605 uth
->uu_sigmask
&= ~bit
;
607 p
->p_sigmask
&= ~bit
;
609 proc_signalend(p
, 1);
615 block_procsigmask(proc_t p
, int bit
)
617 struct uthread
* uth
;
621 proc_signalstart(p
, 1);
623 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
624 thact
= p
->p_vforkact
;
625 uth
= (struct uthread
*)get_bsdthread_info(thact
);
627 uth
->uu_sigmask
|= bit
;
630 proc_signalend(p
, 1);
634 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
635 uth
->uu_sigmask
|= bit
;
639 proc_signalend(p
, 1);
645 set_procsigmask(proc_t p
, int bit
)
647 struct uthread
* uth
;
651 proc_signalstart(p
, 1);
653 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
654 thact
= p
->p_vforkact
;
655 uth
= (struct uthread
*)get_bsdthread_info(thact
);
657 uth
->uu_sigmask
= bit
;
660 proc_signalend(p
, 1);
664 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
665 uth
->uu_sigmask
= bit
;
668 proc_signalend(p
, 1);
674 /* XXX should be static? */
676 * Notes: The thread parameter is used in the PPC case to select the
677 * thread on which the floating point exception will be enabled
678 * or disabled. We can't simply take current_thread(), since
679 * this is called from posix_spawn() on the not currently running
680 * process/thread pair.
682 * We mark thread as unused to alow compilation without warning
683 * on non-PPC platforms.
686 setsigvec(proc_t p
, __unused thread_t thread
, int signum
, struct __kern_sigaction
*sa
, boolean_t in_sigstart
)
688 struct sigacts
*ps
= p
->p_sigacts
;
691 assert(signum
< NSIG
);
693 if ((signum
== SIGKILL
|| signum
== SIGSTOP
) &&
694 sa
->sa_handler
!= SIG_DFL
) {
697 bit
= sigmask(signum
);
699 * Change setting atomically.
701 ps
->ps_sigact
[signum
] = sa
->sa_handler
;
702 ps
->ps_trampact
[signum
] = sa
->sa_tramp
;
703 ps
->ps_catchmask
[signum
] = sa
->sa_mask
& ~sigcantmask
;
704 if (sa
->sa_flags
& SA_SIGINFO
) {
705 ps
->ps_siginfo
|= bit
;
707 ps
->ps_siginfo
&= ~bit
;
709 if ((sa
->sa_flags
& SA_RESTART
) == 0) {
710 ps
->ps_sigintr
|= bit
;
712 ps
->ps_sigintr
&= ~bit
;
714 if (sa
->sa_flags
& SA_ONSTACK
) {
715 ps
->ps_sigonstack
|= bit
;
717 ps
->ps_sigonstack
&= ~bit
;
719 if (sa
->sa_flags
& SA_RESETHAND
) {
720 ps
->ps_sigreset
|= bit
;
722 ps
->ps_sigreset
&= ~bit
;
724 if (sa
->sa_flags
& SA_NODEFER
) {
725 ps
->ps_signodefer
|= bit
;
727 ps
->ps_signodefer
&= ~bit
;
729 if (signum
== SIGCHLD
) {
730 if (sa
->sa_flags
& SA_NOCLDSTOP
) {
731 OSBitOrAtomic(P_NOCLDSTOP
, &p
->p_flag
);
733 OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP
), &p
->p_flag
);
735 if ((sa
->sa_flags
& SA_NOCLDWAIT
) || (sa
->sa_handler
== SIG_IGN
)) {
736 OSBitOrAtomic(P_NOCLDWAIT
, &p
->p_flag
);
738 OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT
), &p
->p_flag
);
743 * Set bit in p_sigignore for signals that are set to SIG_IGN,
744 * and for signals set to SIG_DFL where the default is to ignore.
745 * However, don't put SIGCONT in p_sigignore,
746 * as we have to restart the process.
748 if (sa
->sa_handler
== SIG_IGN
||
749 (sigprop
[signum
] & SA_IGNORE
&& sa
->sa_handler
== SIG_DFL
)) {
750 clear_procsiglist(p
, bit
, in_sigstart
);
751 if (signum
!= SIGCONT
) {
752 p
->p_sigignore
|= bit
; /* easier in psignal */
754 p
->p_sigcatch
&= ~bit
;
756 p
->p_sigignore
&= ~bit
;
757 if (sa
->sa_handler
== SIG_DFL
) {
758 p
->p_sigcatch
&= ~bit
;
760 p
->p_sigcatch
|= bit
;
767 * Initialize signal state for process 0;
768 * set to ignore signals that are ignored by default.
775 for (i
= 1; i
< NSIG
; i
++) {
776 if (sigprop
[i
] & SA_IGNORE
&& i
!= SIGCONT
) {
777 p
->p_sigignore
|= sigmask(i
);
783 * Reset signals for an exec of the specified process.
786 execsigs(proc_t p
, thread_t thread
)
788 struct sigacts
*ps
= p
->p_sigacts
;
792 ut
= (struct uthread
*)get_bsdthread_info(thread
);
795 * transfer saved signal states from the process
796 * back to the current thread.
798 * NOTE: We do this without the process locked,
799 * because we are guaranteed to be single-threaded
800 * by this point in exec and the p_siglist is
801 * only accessed by threads inside the process.
803 ut
->uu_siglist
|= p
->p_siglist
;
807 * Reset caught signals. Held signals remain held
808 * through p_sigmask (unless they were caught,
809 * and are now ignored by default).
811 while (p
->p_sigcatch
) {
812 nc
= ffs((unsigned int)p
->p_sigcatch
);
814 p
->p_sigcatch
&= ~mask
;
815 if (sigprop
[nc
] & SA_IGNORE
) {
817 p
->p_sigignore
|= mask
;
819 ut
->uu_siglist
&= ~mask
;
821 ps
->ps_sigact
[nc
] = SIG_DFL
;
824 atomic_store_explicit(&ps
->ps_sigreturn_validation
,
825 PS_SIGRETURN_VALIDATION_DEFAULT
, memory_order_relaxed
);
826 /* Generate random token value used to validate sigreturn arguments */
827 read_random(&ps
->ps_sigreturn_token
, sizeof(ps
->ps_sigreturn_token
));
830 * Reset stack state to the user stack.
831 * Clear set of signals caught on the signal stack.
834 ut
->uu_sigstk
.ss_flags
= SA_DISABLE
;
835 ut
->uu_sigstk
.ss_size
= 0;
836 ut
->uu_sigstk
.ss_sp
= USER_ADDR_NULL
;
837 ut
->uu_flag
&= ~UT_ALTSTACK
;
839 ps
->ps_sigonstack
= 0;
843 * Manipulate signal mask.
844 * Note that we receive new mask, not pointer,
845 * and return old mask as return value;
846 * the library stub does the rest.
849 sigprocmask(proc_t p
, struct sigprocmask_args
*uap
, __unused
int32_t *retval
)
852 sigset_t oldmask
, nmask
;
853 user_addr_t omask
= uap
->omask
;
856 ut
= (struct uthread
*)get_bsdthread_info(current_thread());
857 oldmask
= ut
->uu_sigmask
;
859 if (uap
->mask
== USER_ADDR_NULL
) {
860 /* just want old mask */
863 error
= copyin(uap
->mask
, &nmask
, sizeof(sigset_t
));
870 block_procsigmask(p
, (nmask
& ~sigcantmask
));
871 signal_setast(current_thread());
875 unblock_procsigmask(p
, (nmask
& ~sigcantmask
));
876 signal_setast(current_thread());
880 set_procsigmask(p
, (nmask
& ~sigcantmask
));
881 signal_setast(current_thread());
889 if (!error
&& omask
!= USER_ADDR_NULL
) {
890 copyout(&oldmask
, omask
, sizeof(sigset_t
));
896 sigpending(__unused proc_t p
, struct sigpending_args
*uap
, __unused
int32_t *retval
)
901 ut
= (struct uthread
*)get_bsdthread_info(current_thread());
902 pendlist
= ut
->uu_siglist
;
905 copyout(&pendlist
, uap
->osv
, sizeof(sigset_t
));
911 * Suspend process until signal, providing mask to be set
912 * in the meantime. Note nonstandard calling convention:
913 * libc stub passes mask, not pointer, to save a copyin.
917 sigcontinue(__unused
int error
)
919 // struct uthread *ut = get_bsdthread_info(current_thread());
920 unix_syscall_return(EINTR
);
924 sigsuspend(proc_t p
, struct sigsuspend_args
*uap
, int32_t *retval
)
926 __pthread_testcancel(1);
927 return sigsuspend_nocancel(p
, (struct sigsuspend_nocancel_args
*)uap
, retval
);
931 sigsuspend_nocancel(proc_t p
, struct sigsuspend_nocancel_args
*uap
, __unused
int32_t *retval
)
935 ut
= (struct uthread
*)get_bsdthread_info(current_thread());
938 * When returning from sigpause, we want
939 * the old mask to be restored after the
940 * signal handler has finished. Thus, we
941 * save it here and mark the sigacts structure
944 ut
->uu_oldmask
= ut
->uu_sigmask
;
945 ut
->uu_flag
|= UT_SAS_OLDMASK
;
946 ut
->uu_sigmask
= (uap
->mask
& ~sigcantmask
);
947 (void) tsleep0((caddr_t
) p
, PPAUSE
| PCATCH
, "pause", 0, sigcontinue
);
948 /* always return EINTR rather than ERESTART... */
954 __disable_threadsignal(__unused proc_t p
,
955 __unused
struct __disable_threadsignal_args
*uap
,
956 __unused
int32_t *retval
)
960 uth
= (struct uthread
*)get_bsdthread_info(current_thread());
962 /* No longer valid to have any signal delivered */
963 uth
->uu_flag
|= (UT_NO_SIGMASK
| UT_CANCELDISABLE
);
969 __pthread_testcancel(int presyscall
)
971 thread_t self
= current_thread();
972 struct uthread
* uthread
;
974 uthread
= (struct uthread
*)get_bsdthread_info(self
);
977 uthread
->uu_flag
&= ~UT_NOTCANCELPT
;
979 if ((uthread
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
) {
980 if (presyscall
!= 0) {
981 unix_syscall_return(EINTR
);
984 thread_abort_safely(self
);
992 __pthread_markcancel(__unused proc_t p
,
993 struct __pthread_markcancel_args
*uap
, __unused
int32_t *retval
)
995 thread_act_t target_act
;
999 target_act
= (thread_act_t
)port_name_to_thread(uap
->thread_port
,
1000 PORT_TO_THREAD_IN_CURRENT_TASK
);
1002 if (target_act
== THR_ACT_NULL
) {
1006 uth
= (struct uthread
*)get_bsdthread_info(target_act
);
1008 /* if the thread is in vfork do not cancel */
1009 if ((uth
->uu_flag
& (UT_VFORK
| UT_CANCEL
| UT_CANCELED
)) == 0) {
1010 uth
->uu_flag
|= (UT_CANCEL
| UT_NO_SIGMASK
);
1011 if (((uth
->uu_flag
& UT_NOTCANCELPT
) == 0)
1012 && ((uth
->uu_flag
& UT_CANCELDISABLE
) == 0)) {
1013 thread_abort_safely(target_act
);
1017 thread_deallocate(target_act
);
1021 /* if action =0 ; return the cancellation state ,
1022 * if marked for cancellation, make the thread canceled
1023 * if action = 1 ; Enable the cancel handling
1024 * if action = 2; Disable the cancel handling
1027 __pthread_canceled(__unused proc_t p
,
1028 struct __pthread_canceled_args
*uap
, __unused
int32_t *retval
)
1030 thread_act_t thread
;
1031 struct uthread
*uth
;
1032 int action
= uap
->action
;
1034 thread
= current_thread();
1035 uth
= (struct uthread
*)get_bsdthread_info(thread
);
1039 uth
->uu_flag
&= ~UT_CANCELDISABLE
;
1042 uth
->uu_flag
|= UT_CANCELDISABLE
;
1046 /* if the thread is in vfork do not cancel */
1047 if ((uth
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
) {
1048 uth
->uu_flag
&= ~UT_CANCEL
;
1049 uth
->uu_flag
|= (UT_CANCELED
| UT_NO_SIGMASK
);
1057 __attribute__((noreturn
))
1059 __posix_sem_syscall_return(kern_return_t kern_result
)
1063 if (kern_result
== KERN_SUCCESS
) {
1065 } else if (kern_result
== KERN_ABORTED
) {
1067 } else if (kern_result
== KERN_OPERATION_TIMED_OUT
) {
1072 unix_syscall_return(error
);
1073 /* does not return */
1076 #if OLD_SEMWAIT_SIGNAL
1078 * Returns: 0 Success
1082 * EFAULT if timespec is NULL
1085 __old_semwait_signal(proc_t p
, struct __old_semwait_signal_args
*uap
,
1088 __pthread_testcancel(0);
1089 return __old_semwait_signal_nocancel(p
, (struct __old_semwait_signal_nocancel_args
*)uap
, retval
);
1093 __old_semwait_signal_nocancel(proc_t p
, struct __old_semwait_signal_nocancel_args
*uap
,
1094 __unused
int32_t *retval
)
1096 kern_return_t kern_result
;
1098 mach_timespec_t then
;
1099 struct timespec now
;
1100 struct user_timespec ts
;
1101 boolean_t truncated_timeout
= FALSE
;
1104 if (IS_64BIT_PROCESS(p
)) {
1105 struct user64_timespec ts64
;
1106 error
= copyin(uap
->ts
, &ts64
, sizeof(ts64
));
1107 ts
.tv_sec
= (user_time_t
)ts64
.tv_sec
;
1108 ts
.tv_nsec
= (user_long_t
)ts64
.tv_nsec
;
1110 struct user32_timespec ts32
;
1111 error
= copyin(uap
->ts
, &ts32
, sizeof(ts32
));
1112 ts
.tv_sec
= ts32
.tv_sec
;
1113 ts
.tv_nsec
= ts32
.tv_nsec
;
1120 if ((ts
.tv_sec
& 0xFFFFFFFF00000000ULL
) != 0) {
1121 ts
.tv_sec
= 0xFFFFFFFF;
1123 truncated_timeout
= TRUE
;
1126 if (uap
->relative
) {
1127 then
.tv_sec
= (unsigned int)ts
.tv_sec
;
1128 then
.tv_nsec
= (clock_res_t
)ts
.tv_nsec
;
1132 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1133 if (now
.tv_sec
== ts
.tv_sec
?
1134 now
.tv_nsec
> ts
.tv_nsec
:
1135 now
.tv_sec
> ts
.tv_sec
) {
1139 then
.tv_sec
= (unsigned int)(ts
.tv_sec
- now
.tv_sec
);
1140 then
.tv_nsec
= (clock_res_t
)(ts
.tv_nsec
- now
.tv_nsec
);
1141 if (then
.tv_nsec
< 0) {
1142 then
.tv_nsec
+= NSEC_PER_SEC
;
1148 if (uap
->mutex_sem
== 0) {
1149 kern_result
= semaphore_timedwait_trap_internal((mach_port_name_t
)uap
->cond_sem
, then
.tv_sec
, then
.tv_nsec
, __posix_sem_syscall_return
);
1151 kern_result
= semaphore_timedwait_signal_trap_internal(uap
->cond_sem
, uap
->mutex_sem
, then
.tv_sec
, then
.tv_nsec
, __posix_sem_syscall_return
);
1154 if (uap
->mutex_sem
== 0) {
1155 kern_result
= semaphore_wait_trap_internal(uap
->cond_sem
, __posix_sem_syscall_return
);
1157 kern_result
= semaphore_wait_signal_trap_internal(uap
->cond_sem
, uap
->mutex_sem
, __posix_sem_syscall_return
);
1161 if (kern_result
== KERN_SUCCESS
&& !truncated_timeout
) {
1163 } else if (kern_result
== KERN_SUCCESS
&& truncated_timeout
) {
1164 return EINTR
; /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1165 } else if (kern_result
== KERN_ABORTED
) {
1167 } else if (kern_result
== KERN_OPERATION_TIMED_OUT
) {
1173 #endif /* OLD_SEMWAIT_SIGNAL*/
1176 * Returns: 0 Success
1180 * EFAULT if timespec is NULL
1183 __semwait_signal(proc_t p
, struct __semwait_signal_args
*uap
,
1186 __pthread_testcancel(0);
1187 return __semwait_signal_nocancel(p
, (struct __semwait_signal_nocancel_args
*)uap
, retval
);
1191 __semwait_signal_nocancel(__unused proc_t p
, struct __semwait_signal_nocancel_args
*uap
,
1192 __unused
int32_t *retval
)
1194 kern_return_t kern_result
;
1195 mach_timespec_t then
;
1196 struct timespec now
;
1197 struct user_timespec ts
;
1198 boolean_t truncated_timeout
= FALSE
;
1201 ts
.tv_sec
= (user_time_t
)uap
->tv_sec
;
1202 ts
.tv_nsec
= uap
->tv_nsec
;
1204 if ((ts
.tv_sec
& 0xFFFFFFFF00000000ULL
) != 0) {
1205 ts
.tv_sec
= 0xFFFFFFFF;
1207 truncated_timeout
= TRUE
;
1210 if (uap
->relative
) {
1211 then
.tv_sec
= (unsigned int)ts
.tv_sec
;
1212 then
.tv_nsec
= (clock_res_t
)ts
.tv_nsec
;
1216 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1217 if (now
.tv_sec
== ts
.tv_sec
?
1218 now
.tv_nsec
> ts
.tv_nsec
:
1219 now
.tv_sec
> ts
.tv_sec
) {
1223 then
.tv_sec
= (unsigned int)(ts
.tv_sec
- now
.tv_sec
);
1224 then
.tv_nsec
= (clock_res_t
)(ts
.tv_nsec
- now
.tv_nsec
);
1225 if (then
.tv_nsec
< 0) {
1226 then
.tv_nsec
+= NSEC_PER_SEC
;
1232 if (uap
->mutex_sem
== 0) {
1233 kern_result
= semaphore_timedwait_trap_internal((mach_port_name_t
)uap
->cond_sem
, then
.tv_sec
, then
.tv_nsec
, __posix_sem_syscall_return
);
1235 kern_result
= semaphore_timedwait_signal_trap_internal(uap
->cond_sem
, uap
->mutex_sem
, then
.tv_sec
, then
.tv_nsec
, __posix_sem_syscall_return
);
1238 if (uap
->mutex_sem
== 0) {
1239 kern_result
= semaphore_wait_trap_internal(uap
->cond_sem
, __posix_sem_syscall_return
);
1241 kern_result
= semaphore_wait_signal_trap_internal(uap
->cond_sem
, uap
->mutex_sem
, __posix_sem_syscall_return
);
1245 if (kern_result
== KERN_SUCCESS
&& !truncated_timeout
) {
1247 } else if (kern_result
== KERN_SUCCESS
&& truncated_timeout
) {
1248 return EINTR
; /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1249 } else if (kern_result
== KERN_ABORTED
) {
1251 } else if (kern_result
== KERN_OPERATION_TIMED_OUT
) {
1260 __pthread_kill(__unused proc_t p
, struct __pthread_kill_args
*uap
,
1261 __unused
int32_t *retval
)
1263 thread_t target_act
;
1265 int signum
= uap
->sig
;
1266 struct uthread
*uth
;
1268 target_act
= (thread_t
)port_name_to_thread(uap
->thread_port
,
1269 PORT_TO_THREAD_NONE
);
1271 if (target_act
== THREAD_NULL
) {
1274 if ((u_int
)signum
>= NSIG
) {
1279 uth
= (struct uthread
*)get_bsdthread_info(target_act
);
1281 if (uth
->uu_flag
& UT_NO_SIGMASK
) {
1286 if ((thread_get_tag(target_act
) & THREAD_TAG_WORKQUEUE
) && !uth
->uu_workq_pthread_kill_allowed
) {
1292 psignal_uthread(target_act
, signum
);
1295 thread_deallocate(target_act
);
1301 __pthread_sigmask(__unused proc_t p
, struct __pthread_sigmask_args
*uap
,
1302 __unused
int32_t *retval
)
1304 user_addr_t set
= uap
->set
;
1305 user_addr_t oset
= uap
->oset
;
1311 ut
= (struct uthread
*)get_bsdthread_info(current_thread());
1312 oldset
= ut
->uu_sigmask
;
1314 if (set
== USER_ADDR_NULL
) {
1315 /* need only old mask */
1319 error
= copyin(set
, &nset
, sizeof(sigset_t
));
1326 ut
->uu_sigmask
|= (nset
& ~sigcantmask
);
1330 ut
->uu_sigmask
&= ~(nset
);
1331 signal_setast(current_thread());
1335 ut
->uu_sigmask
= (nset
& ~sigcantmask
);
1336 signal_setast(current_thread());
1343 if (!error
&& oset
!= USER_ADDR_NULL
) {
1344 copyout(&oldset
, oset
, sizeof(sigset_t
));
1351 * Returns: 0 Success
1357 __sigwait(proc_t p
, struct __sigwait_args
*uap
, int32_t *retval
)
1359 __pthread_testcancel(1);
1360 return __sigwait_nocancel(p
, (struct __sigwait_nocancel_args
*)uap
, retval
);
1364 __sigwait_nocancel(proc_t p
, struct __sigwait_nocancel_args
*uap
, __unused
int32_t *retval
)
1367 struct uthread
*uth
;
1374 ut
= (struct uthread
*)get_bsdthread_info(current_thread());
1376 if (uap
->set
== USER_ADDR_NULL
) {
1380 error
= copyin(uap
->set
, &mask
, sizeof(sigset_t
));
1385 siglist
= (mask
& ~sigcantmask
);
1392 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
1396 proc_signalstart(p
, 1);
1397 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
1398 if ((sigw
= uth
->uu_siglist
& siglist
)) {
1402 proc_signalend(p
, 1);
1406 /* The signal was pending on a thread */
1410 * When returning from sigwait, we want
1411 * the old mask to be restored after the
1412 * signal handler has finished. Thus, we
1413 * save it here and mark the sigacts structure
1416 uth
= ut
; /* wait for it to be delivered to us */
1417 ut
->uu_oldmask
= ut
->uu_sigmask
;
1418 ut
->uu_flag
|= UT_SAS_OLDMASK
;
1419 if (siglist
== (sigset_t
)0) {
1423 /* SIGKILL and SIGSTOP are not maskable as well */
1424 ut
->uu_sigmask
= ~(siglist
| sigcantmask
);
1425 ut
->uu_sigwait
= siglist
;
1427 /* No Continuations for now */
1428 error
= msleep((caddr_t
)&ut
->uu_sigwait
, &p
->p_mlock
, PPAUSE
| PCATCH
, "pause", 0);
1430 if (error
== ERESTART
) {
1434 sigw
= (ut
->uu_sigwait
& siglist
);
1435 ut
->uu_sigmask
= ut
->uu_oldmask
;
1437 ut
->uu_flag
&= ~UT_SAS_OLDMASK
;
1441 signum
= ffs((unsigned int)sigw
);
1443 panic("sigwait with no signal wakeup");
1445 /* Clear the pending signal in the thread it was delivered */
1446 uth
->uu_siglist
&= ~(sigmask(signum
));
1449 DTRACE_PROC2(signal__clear
, int, signum
, siginfo_t
*, &(ut
->t_dtrace_siginfo
));
1453 if (uap
->sig
!= USER_ADDR_NULL
) {
1454 error
= copyout(&signum
, uap
->sig
, sizeof(int));
1464 sigaltstack(__unused proc_t p
, struct sigaltstack_args
*uap
, __unused
int32_t *retval
)
1466 struct kern_sigaltstack ss
;
1467 struct kern_sigaltstack
*pstk
;
1469 struct uthread
*uth
;
1472 uth
= (struct uthread
*)get_bsdthread_info(current_thread());
1474 pstk
= &uth
->uu_sigstk
;
1475 if ((uth
->uu_flag
& UT_ALTSTACK
) == 0) {
1476 uth
->uu_sigstk
.ss_flags
|= SA_DISABLE
;
1478 onstack
= pstk
->ss_flags
& SA_ONSTACK
;
1480 if (IS_64BIT_PROCESS(p
)) {
1481 struct user64_sigaltstack ss64
= {};
1482 sigaltstack_kern_to_user64(pstk
, &ss64
);
1483 error
= copyout(&ss64
, uap
->oss
, sizeof(ss64
));
1485 struct user32_sigaltstack ss32
= {};
1486 sigaltstack_kern_to_user32(pstk
, &ss32
);
1487 error
= copyout(&ss32
, uap
->oss
, sizeof(ss32
));
1493 if (uap
->nss
== USER_ADDR_NULL
) {
1496 if (IS_64BIT_PROCESS(p
)) {
1497 struct user64_sigaltstack ss64
;
1498 error
= copyin(uap
->nss
, &ss64
, sizeof(ss64
));
1499 sigaltstack_user64_to_kern(&ss64
, &ss
);
1501 struct user32_sigaltstack ss32
;
1502 error
= copyin(uap
->nss
, &ss32
, sizeof(ss32
));
1503 sigaltstack_user32_to_kern(&ss32
, &ss
);
1508 if ((ss
.ss_flags
& ~SA_DISABLE
) != 0) {
1512 if (ss
.ss_flags
& SA_DISABLE
) {
1513 /* if we are here we are not in the signal handler ;so no need to check */
1514 if (uth
->uu_sigstk
.ss_flags
& SA_ONSTACK
) {
1517 uth
->uu_flag
&= ~UT_ALTSTACK
;
1518 uth
->uu_sigstk
.ss_flags
= ss
.ss_flags
;
1524 /* The older stacksize was 8K, enforce that one so no compat problems */
1525 #define OLDMINSIGSTKSZ 8*1024
1526 if (ss
.ss_size
< OLDMINSIGSTKSZ
) {
1529 uth
->uu_flag
|= UT_ALTSTACK
;
1530 uth
->uu_sigstk
= ss
;
1535 kill(proc_t cp
, struct kill_args
*uap
, __unused
int32_t *retval
)
1538 kauth_cred_t uc
= kauth_cred_get();
1539 int posix
= uap
->posix
; /* !0 if posix behaviour desired */
1541 AUDIT_ARG(pid
, uap
->pid
);
1542 AUDIT_ARG(signum
, uap
->signum
);
1544 if ((u_int
)uap
->signum
>= NSIG
) {
1548 /* kill single process */
1549 if ((p
= proc_find(uap
->pid
)) == NULL
) {
1550 if ((p
= pzfind(uap
->pid
)) != NULL
) {
1552 * POSIX 1003.1-2001 requires returning success when killing a
1553 * zombie; see Rationale for kill(2).
1559 AUDIT_ARG(process
, p
);
1560 if (!cansignal(cp
, uc
, p
, uap
->signum
)) {
1565 psignal(p
, uap
->signum
);
1571 case -1: /* broadcast signal */
1572 return killpg1(cp
, uap
->signum
, 0, 1, posix
);
1573 case 0: /* signal own process group */
1574 return killpg1(cp
, uap
->signum
, 0, 0, posix
);
1575 default: /* negative explicit process group */
1576 return killpg1(cp
, uap
->signum
, -(uap
->pid
), 0, posix
);
1582 build_userspace_exit_reason(uint32_t reason_namespace
, uint64_t reason_code
, user_addr_t payload
, uint32_t payload_size
,
1583 user_addr_t reason_string
, uint64_t reason_flags
)
1585 os_reason_t exit_reason
= OS_REASON_NULL
;
1588 int num_items_to_copy
= 0;
1589 uint32_t user_data_to_copy
= 0;
1590 char *reason_user_desc
= NULL
;
1591 size_t reason_user_desc_len
= 0;
1593 exit_reason
= os_reason_create(reason_namespace
, reason_code
);
1594 if (exit_reason
== OS_REASON_NULL
) {
1595 printf("build_userspace_exit_reason: failed to allocate exit reason\n");
1599 exit_reason
->osr_flags
|= OS_REASON_FLAG_FROM_USERSPACE
;
1602 * Only apply flags that are allowed to be passed from userspace.
1604 exit_reason
->osr_flags
|= (reason_flags
& OS_REASON_FLAG_MASK_ALLOWED_FROM_USER
);
1605 if ((reason_flags
& OS_REASON_FLAG_MASK_ALLOWED_FROM_USER
) != reason_flags
) {
1606 printf("build_userspace_exit_reason: illegal flags passed from userspace (some masked off) 0x%llx, ns: %u, code 0x%llx\n",
1607 reason_flags
, reason_namespace
, reason_code
);
1610 if (!(exit_reason
->osr_flags
& OS_REASON_FLAG_NO_CRASH_REPORT
)) {
1611 exit_reason
->osr_flags
|= OS_REASON_FLAG_GENERATE_CRASH_REPORT
;
1614 if (payload
!= USER_ADDR_NULL
) {
1615 if (payload_size
== 0) {
1616 printf("build_userspace_exit_reason: exit reason with namespace %u, nonzero payload but zero length\n",
1618 exit_reason
->osr_flags
|= OS_REASON_FLAG_BAD_PARAMS
;
1619 payload
= USER_ADDR_NULL
;
1621 num_items_to_copy
++;
1623 if (payload_size
> EXIT_REASON_PAYLOAD_MAX_LEN
) {
1624 exit_reason
->osr_flags
|= OS_REASON_FLAG_PAYLOAD_TRUNCATED
;
1625 payload_size
= EXIT_REASON_PAYLOAD_MAX_LEN
;
1628 user_data_to_copy
+= payload_size
;
1632 if (reason_string
!= USER_ADDR_NULL
) {
1633 reason_user_desc
= kheap_alloc(KHEAP_TEMP
,
1634 EXIT_REASON_USER_DESC_MAX_LEN
, Z_WAITOK
);
1636 if (reason_user_desc
!= NULL
) {
1637 error
= copyinstr(reason_string
, (void *) reason_user_desc
,
1638 EXIT_REASON_USER_DESC_MAX_LEN
, &reason_user_desc_len
);
1641 num_items_to_copy
++;
1642 user_data_to_copy
+= reason_user_desc_len
;
1643 } else if (error
== ENAMETOOLONG
) {
1644 num_items_to_copy
++;
1645 reason_user_desc
[EXIT_REASON_USER_DESC_MAX_LEN
- 1] = '\0';
1646 user_data_to_copy
+= reason_user_desc_len
;
1648 exit_reason
->osr_flags
|= OS_REASON_FLAG_FAILED_DATA_COPYIN
;
1649 kheap_free(KHEAP_TEMP
, reason_user_desc
,
1650 EXIT_REASON_USER_DESC_MAX_LEN
);
1651 reason_user_desc
= NULL
;
1652 reason_user_desc_len
= 0;
1657 if (num_items_to_copy
!= 0) {
1658 uint32_t reason_buffer_size_estimate
= 0;
1659 mach_vm_address_t data_addr
= 0;
1661 reason_buffer_size_estimate
= kcdata_estimate_required_buffer_size(num_items_to_copy
, user_data_to_copy
);
1663 error
= os_reason_alloc_buffer(exit_reason
, reason_buffer_size_estimate
);
1665 printf("build_userspace_exit_reason: failed to allocate signal reason buffer\n");
1666 goto out_failed_copyin
;
1669 if (reason_user_desc
!= NULL
&& reason_user_desc_len
!= 0) {
1670 if (KERN_SUCCESS
== kcdata_get_memory_addr(&exit_reason
->osr_kcd_descriptor
,
1671 EXIT_REASON_USER_DESC
,
1672 (uint32_t)reason_user_desc_len
,
1674 kcdata_memcpy(&exit_reason
->osr_kcd_descriptor
, (mach_vm_address_t
) data_addr
,
1675 reason_user_desc
, (uint32_t)reason_user_desc_len
);
1677 printf("build_userspace_exit_reason: failed to allocate space for reason string\n");
1678 goto out_failed_copyin
;
1682 if (payload
!= USER_ADDR_NULL
) {
1684 kcdata_get_memory_addr(&exit_reason
->osr_kcd_descriptor
,
1685 EXIT_REASON_USER_PAYLOAD
,
1688 error
= copyin(payload
, (void *) data_addr
, payload_size
);
1690 printf("build_userspace_exit_reason: failed to copy in payload data with error %d\n", error
);
1691 goto out_failed_copyin
;
1694 printf("build_userspace_exit_reason: failed to allocate space for payload data\n");
1695 goto out_failed_copyin
;
1700 if (reason_user_desc
!= NULL
) {
1701 kheap_free(KHEAP_TEMP
, reason_user_desc
, EXIT_REASON_USER_DESC_MAX_LEN
);
1702 reason_user_desc
= NULL
;
1703 reason_user_desc_len
= 0;
1710 if (reason_user_desc
!= NULL
) {
1711 kheap_free(KHEAP_TEMP
, reason_user_desc
, EXIT_REASON_USER_DESC_MAX_LEN
);
1712 reason_user_desc
= NULL
;
1713 reason_user_desc_len
= 0;
1716 exit_reason
->osr_flags
|= OS_REASON_FLAG_FAILED_DATA_COPYIN
;
1717 os_reason_alloc_buffer(exit_reason
, 0);
1722 terminate_with_payload_internal(struct proc
*cur_proc
, int target_pid
, uint32_t reason_namespace
,
1723 uint64_t reason_code
, user_addr_t payload
, uint32_t payload_size
,
1724 user_addr_t reason_string
, uint64_t reason_flags
)
1726 proc_t target_proc
= PROC_NULL
;
1727 kauth_cred_t cur_cred
= kauth_cred_get();
1729 os_reason_t signal_reason
= OS_REASON_NULL
;
1731 AUDIT_ARG(pid
, target_pid
);
1732 if ((target_pid
<= 0)) {
1736 target_proc
= proc_find(target_pid
);
1737 if (target_proc
== PROC_NULL
) {
1741 AUDIT_ARG(process
, target_proc
);
1743 if (!cansignal(cur_proc
, cur_cred
, target_proc
, SIGKILL
)) {
1744 proc_rele(target_proc
);
1748 if (target_pid
!= cur_proc
->p_pid
) {
1750 * FLAG_ABORT should only be set on terminate_with_reason(getpid()) that
1751 * was a fallback from an unsuccessful abort_with_reason(). In that case
1752 * caller's pid matches the target one. Otherwise remove the flag.
1754 reason_flags
&= ~((typeof(reason_flags
))OS_REASON_FLAG_ABORT
);
1757 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
1758 target_proc
->p_pid
, reason_namespace
,
1761 signal_reason
= build_userspace_exit_reason(reason_namespace
, reason_code
, payload
, payload_size
,
1762 reason_string
, (reason_flags
| OS_REASON_FLAG_NO_CRASHED_TID
));
1764 if (target_pid
== cur_proc
->p_pid
) {
1766 * psignal_thread_with_reason() will pend a SIGKILL on the specified thread or
1767 * return if the thread and/or task are already terminating. Either way, the
1768 * current thread won't return to userspace.
1770 psignal_thread_with_reason(target_proc
, current_thread(), SIGKILL
, signal_reason
);
1772 psignal_with_reason(target_proc
, SIGKILL
, signal_reason
);
1775 proc_rele(target_proc
);
1781 terminate_with_payload(struct proc
*cur_proc
, struct terminate_with_payload_args
*args
,
1782 __unused
int32_t *retval
)
1784 return terminate_with_payload_internal(cur_proc
, args
->pid
, args
->reason_namespace
, args
->reason_code
, args
->payload
,
1785 args
->payload_size
, args
->reason_string
, args
->reason_flags
);
1789 killpg1_allfilt(proc_t p
, void * arg
)
1791 struct killpg1_filtargs
* kfargp
= (struct killpg1_filtargs
*)arg
;
1794 * Don't signal initproc, a system process, or the current process if POSIX
1797 return p
->p_pid
> 1 && !(p
->p_flag
& P_SYSTEM
) &&
1798 (kfargp
->posix
? true : p
!= kfargp
->curproc
);
1802 killpg1_pgrpfilt(proc_t p
, __unused
void * arg
)
1804 /* XXX shouldn't this allow signalling zombies? */
1805 return p
->p_pid
> 1 && !(p
->p_flag
& P_SYSTEM
) && p
->p_stat
!= SZOMB
;
1809 killpg1_callback(proc_t p
, void *arg
)
1811 struct killpg1_iterargs
*kargp
= (struct killpg1_iterargs
*)arg
;
1812 int signum
= kargp
->signum
;
1814 if ((p
->p_listflag
& P_LIST_EXITED
) == P_LIST_EXITED
) {
1816 * Count zombies as found for the purposes of signalling, since POSIX
1817 * 1003.1-2001 sees signalling zombies as successful. If killpg(2) or
1818 * kill(2) with pid -1 only finds zombies that can be signalled, it
1819 * shouldn't return ESRCH. See the Rationale for kill(2).
1821 * Don't call into MAC -- it's not expecting signal checks for exited
1824 if (cansignal_nomac(kargp
->curproc
, kargp
->uc
, p
, signum
)) {
1827 } else if (cansignal(kargp
->curproc
, kargp
->uc
, p
, signum
)) {
1835 return PROC_RETURNED
;
1839 * Common code for kill process group/broadcast kill.
1842 killpg1(proc_t curproc
, int signum
, int pgid
, int all
, int posix
)
1848 uc
= kauth_cred_proc_ref(curproc
);
1849 struct killpg1_iterargs karg
= {
1850 .curproc
= curproc
, .uc
= uc
, .nfound
= 0, .signum
= signum
1855 * Broadcast to all processes that the user can signal (pid was -1).
1857 struct killpg1_filtargs kfarg
= {
1858 .posix
= posix
, .curproc
= curproc
1860 proc_iterate(PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
, killpg1_callback
,
1861 &karg
, killpg1_allfilt
, &kfarg
);
1865 * Send to current the current process' process group.
1867 pgrp
= proc_pgrp(curproc
);
1869 pgrp
= pgfind(pgid
);
1876 /* PGRP_DROPREF drops the pgrp refernce */
1877 pgrp_iterate(pgrp
, PGRP_DROPREF
, killpg1_callback
, &karg
,
1878 killpg1_pgrpfilt
, NULL
);
1880 error
= (karg
.nfound
> 0 ? 0 : (posix
? EPERM
: ESRCH
));
1882 kauth_cred_unref(&uc
);
1887 * Send a signal to a process group.
1890 gsignal(int pgid
, int signum
)
1894 if (pgid
&& (pgrp
= pgfind(pgid
))) {
1895 pgsignal(pgrp
, signum
, 0);
1901 * Send a signal to a process group. If checkctty is 1,
1902 * limit to members which have a controlling terminal.
1906 pgsignal_filt(proc_t p
, void * arg
)
1908 int checkctty
= *(int*)arg
;
1910 if ((checkctty
== 0) || p
->p_flag
& P_CONTROLT
) {
1919 pgsignal_callback(proc_t p
, void * arg
)
1921 int signum
= *(int*)arg
;
1924 return PROC_RETURNED
;
1929 pgsignal(struct pgrp
*pgrp
, int signum
, int checkctty
)
1931 if (pgrp
!= PGRP_NULL
) {
1932 pgrp_iterate(pgrp
, 0, pgsignal_callback
, &signum
, pgsignal_filt
, &checkctty
);
1938 tty_pgsignal(struct tty
*tp
, int signum
, int checkctty
)
1943 if (pg
!= PGRP_NULL
) {
1944 pgrp_iterate(pg
, 0, pgsignal_callback
, &signum
, pgsignal_filt
, &checkctty
);
1949 * Send a signal caused by a trap to a specific thread.
1952 threadsignal(thread_t sig_actthread
, int signum
, mach_exception_code_t code
, boolean_t set_exitreason
)
1954 struct uthread
*uth
;
1955 struct task
* sig_task
;
1959 if ((u_int
)signum
>= NSIG
|| signum
== 0) {
1963 mask
= sigmask(signum
);
1964 if ((mask
& threadmask
) == 0) {
1967 sig_task
= get_threadtask(sig_actthread
);
1968 p
= (proc_t
)(get_bsdtask_info(sig_task
));
1970 uth
= get_bsdthread_info(sig_actthread
);
1971 if (uth
->uu_flag
& UT_VFORK
) {
1976 if (!(p
->p_lflag
& P_LTRACED
) && (p
->p_sigignore
& mask
)) {
1981 uth
->uu_siglist
|= mask
;
1982 uth
->uu_code
= code
;
1984 /* Attempt to establish whether the signal will be fatal (mirrors logic in psignal_internal()) */
1985 if (set_exitreason
&& ((p
->p_lflag
& P_LTRACED
) || (!(uth
->uu_sigwait
& mask
)
1986 && !(uth
->uu_sigmask
& mask
) && !(p
->p_sigcatch
& mask
))) &&
1987 !(mask
& stopsigmask
) && !(mask
& contsigmask
)) {
1988 if (uth
->uu_exit_reason
== OS_REASON_NULL
) {
1989 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
1990 p
->p_pid
, OS_REASON_SIGNAL
, signum
, 0, 0);
1992 os_reason_t signal_reason
= build_signal_reason(signum
, "exc handler");
1994 set_thread_exit_reason(sig_actthread
, signal_reason
, TRUE
);
1996 /* We dropped/consumed the reference in set_thread_exit_reason() */
1997 signal_reason
= OS_REASON_NULL
;
2003 /* mark on process as well */
2004 signal_setast(sig_actthread
);
2007 /* Called with proc locked */
2009 set_thread_extra_flags(struct uthread
*uth
, os_reason_t reason
)
2011 extern int vm_shared_region_reslide_restrict
;
2012 assert(uth
!= NULL
);
2014 * Check whether the userland fault address falls within the shared
2015 * region and notify userland if so. This allows launchd to apply
2016 * special policies around this fault type.
2018 if (reason
->osr_namespace
== OS_REASON_SIGNAL
&&
2019 reason
->osr_code
== SIGSEGV
) {
2020 mach_vm_address_t fault_address
= uth
->uu_subcode
;
2022 #if defined(__arm64__)
2023 /* taken from osfmk/arm/misc_protos.h */
2024 #define TBI_MASK 0xff00000000000000
2025 #define tbi_clear(addr) ((addr) & ~(TBI_MASK))
2026 fault_address
= tbi_clear(fault_address
);
2027 #endif /* __arm64__ */
2029 if (fault_address
>= SHARED_REGION_BASE
&&
2030 fault_address
<= SHARED_REGION_BASE
+ SHARED_REGION_SIZE
) {
2032 * Always report whether the fault happened within the shared cache
2033 * region, but only stale the slide if the resliding is extended
2034 * to all processes or if the process faulting is a platform one.
2036 reason
->osr_flags
|= OS_REASON_FLAG_SHAREDREGION_FAULT
;
2038 #if __has_feature(ptrauth_calls)
2039 if (!vm_shared_region_reslide_restrict
|| csproc_get_platform_binary(current_proc())) {
2040 vm_shared_region_reslide_stale();
2042 #endif /* __has_feature(ptrauth_calls) */
2048 set_thread_exit_reason(void *th
, void *reason
, boolean_t proc_locked
)
2050 struct uthread
*targ_uth
= get_bsdthread_info(th
);
2051 struct task
*targ_task
= NULL
;
2052 proc_t targ_proc
= NULL
;
2054 os_reason_t exit_reason
= (os_reason_t
)reason
;
2056 if (exit_reason
== OS_REASON_NULL
) {
2061 targ_task
= get_threadtask(th
);
2062 targ_proc
= (proc_t
)(get_bsdtask_info(targ_task
));
2064 proc_lock(targ_proc
);
2067 set_thread_extra_flags(targ_uth
, exit_reason
);
2069 if (targ_uth
->uu_exit_reason
== OS_REASON_NULL
) {
2070 targ_uth
->uu_exit_reason
= exit_reason
;
2072 /* The caller expects that we drop a reference on the exit reason */
2073 os_reason_free(exit_reason
);
2077 assert(targ_proc
!= NULL
);
2078 proc_unlock(targ_proc
);
2085 * Picks an appropriate thread from a process to target with a signal.
2087 * Called with proc locked.
2088 * Returns thread with BSD ast set.
2090 * We attempt to deliver a proc-wide signal to the first thread in the task.
2091 * This allows single threaded applications which use signals to
2092 * be able to be linked with multithreaded libraries.
2094 static kern_return_t
2095 get_signalthread(proc_t p
, int signum
, thread_t
* thr
)
2097 struct uthread
*uth
;
2098 sigset_t mask
= sigmask(signum
);
2099 thread_t sig_thread
;
2100 struct task
* sig_task
= p
->task
;
2102 bool skip_wqthreads
= true;
2106 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
2107 sig_thread
= p
->p_vforkact
;
2108 kret
= check_actforsig(sig_task
, sig_thread
, 1);
2109 if (kret
== KERN_SUCCESS
) {
2111 return KERN_SUCCESS
;
2113 return KERN_FAILURE
;
2118 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
2119 if (((uth
->uu_flag
& UT_NO_SIGMASK
) == 0) &&
2120 (((uth
->uu_sigmask
& mask
) == 0) || (uth
->uu_sigwait
& mask
))) {
2121 thread_t th
= uth
->uu_context
.vc_thread
;
2122 if (skip_wqthreads
&& (thread_get_tag(th
) & THREAD_TAG_WORKQUEUE
)) {
2123 /* Workqueue threads may be parked in the kernel unable to
2124 * deliver signals for an extended period of time, so skip them
2125 * in favor of pthreads in a first pass. (rdar://50054475). */
2126 } else if (check_actforsig(p
->task
, th
, 1) == KERN_SUCCESS
) {
2128 return KERN_SUCCESS
;
2132 if (skip_wqthreads
) {
2133 skip_wqthreads
= false;
2136 if (get_signalact(p
->task
, thr
, 1) == KERN_SUCCESS
) {
2137 return KERN_SUCCESS
;
2140 return KERN_FAILURE
;
2144 build_signal_reason(int signum
, const char *procname
)
2146 os_reason_t signal_reason
= OS_REASON_NULL
;
2147 proc_t sender_proc
= current_proc();
2148 uint32_t reason_buffer_size_estimate
= 0, proc_name_length
= 0;
2149 const char *default_sender_procname
= "unknown";
2150 mach_vm_address_t data_addr
;
2153 signal_reason
= os_reason_create(OS_REASON_SIGNAL
, signum
);
2154 if (signal_reason
== OS_REASON_NULL
) {
2155 printf("build_signal_reason: unable to allocate signal reason structure.\n");
2156 return signal_reason
;
2159 reason_buffer_size_estimate
= kcdata_estimate_required_buffer_size(2, sizeof(sender_proc
->p_name
) +
2160 sizeof(sender_proc
->p_pid
));
2162 ret
= os_reason_alloc_buffer_noblock(signal_reason
, reason_buffer_size_estimate
);
2164 printf("build_signal_reason: unable to allocate signal reason buffer.\n");
2165 return signal_reason
;
2168 if (KERN_SUCCESS
== kcdata_get_memory_addr(&signal_reason
->osr_kcd_descriptor
, KCDATA_TYPE_PID
,
2169 sizeof(sender_proc
->p_pid
), &data_addr
)) {
2170 kcdata_memcpy(&signal_reason
->osr_kcd_descriptor
, data_addr
, &sender_proc
->p_pid
,
2171 sizeof(sender_proc
->p_pid
));
2173 printf("build_signal_reason: exceeded space in signal reason buf, unable to log PID\n");
2176 proc_name_length
= sizeof(sender_proc
->p_name
);
2177 if (KERN_SUCCESS
== kcdata_get_memory_addr(&signal_reason
->osr_kcd_descriptor
, KCDATA_TYPE_PROCNAME
,
2178 proc_name_length
, &data_addr
)) {
2180 char truncated_procname
[proc_name_length
];
2181 strncpy((char *) &truncated_procname
, procname
, proc_name_length
);
2182 truncated_procname
[proc_name_length
- 1] = '\0';
2184 kcdata_memcpy(&signal_reason
->osr_kcd_descriptor
, data_addr
, truncated_procname
,
2185 (uint32_t)strlen((char *) &truncated_procname
));
2186 } else if (*sender_proc
->p_name
) {
2187 kcdata_memcpy(&signal_reason
->osr_kcd_descriptor
, data_addr
, &sender_proc
->p_name
,
2188 sizeof(sender_proc
->p_name
));
2190 kcdata_memcpy(&signal_reason
->osr_kcd_descriptor
, data_addr
, &default_sender_procname
,
2191 (uint32_t)strlen(default_sender_procname
) + 1);
2194 printf("build_signal_reason: exceeded space in signal reason buf, unable to log procname\n");
2197 return signal_reason
;
2201 * Send the signal to the process. If the signal has an action, the action
2202 * is usually performed by the target process rather than the caller; we add
2203 * the signal to the set of pending signals for the process.
2205 * Always drops a reference on a signal_reason if one is provided, whether via
2206 * passing it to a thread or deallocating directly.
2209 * o When a stop signal is sent to a sleeping process that takes the
2210 * default action, the process is stopped without awakening it.
2211 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2212 * regardless of the signal action (eg, blocked or ignored).
2214 * Other ignored signals are discarded immediately.
2217 psignal_internal(proc_t p
, task_t task
, thread_t thread
, int flavor
, int signum
, os_reason_t signal_reason
)
2220 user_addr_t action
= USER_ADDR_NULL
;
2222 thread_t sig_thread
;
2225 struct uthread
*uth
;
2229 kauth_cred_t my_cred
;
2230 char *launchd_exit_reason_desc
= NULL
;
2231 boolean_t update_thread_policy
= FALSE
;
2233 if ((u_int
)signum
>= NSIG
|| signum
== 0) {
2234 panic("psignal: bad signal number %d", signum
);
2237 mask
= sigmask(signum
);
2238 prop
= sigprop
[signum
];
2241 if (rdebug_proc
&& (p
!= PROC_NULL
) && (p
== rdebug_proc
)) {
2244 #endif /* SIGNAL_DEBUG */
2246 /* catch unexpected initproc kills early for easier debuggging */
2247 if (signum
== SIGKILL
&& p
== initproc
) {
2248 if (signal_reason
== NULL
) {
2249 panic_plain("unexpected SIGKILL of %s %s (no reason provided)",
2250 (p
->p_name
[0] != '\0' ? p
->p_name
: "initproc"),
2251 ((p
->p_csflags
& CS_KILLED
) ? "(CS_KILLED)" : ""));
2253 launchd_exit_reason_desc
= launchd_exit_reason_get_string_desc(signal_reason
);
2254 panic_plain("unexpected SIGKILL of %s %s with reason -- namespace %d code 0x%llx description %." LAUNCHD_PANIC_REASON_STRING_MAXLEN
"s",
2255 (p
->p_name
[0] != '\0' ? p
->p_name
: "initproc"),
2256 ((p
->p_csflags
& CS_KILLED
) ? "(CS_KILLED)" : ""),
2257 signal_reason
->osr_namespace
, signal_reason
->osr_code
,
2258 launchd_exit_reason_desc
? launchd_exit_reason_desc
: "none");
2263 * We will need the task pointer later. Grab it now to
2264 * check for a zombie process. Also don't send signals
2265 * to kernel internal tasks.
2267 if (flavor
& PSIG_VFORK
) {
2269 sig_thread
= thread
;
2271 } else if (flavor
& PSIG_THREAD
) {
2272 sig_task
= get_threadtask(thread
);
2273 sig_thread
= thread
;
2274 sig_proc
= (proc_t
)get_bsdtask_info(sig_task
);
2275 } else if (flavor
& PSIG_TRY_THREAD
) {
2276 assert((thread
== current_thread()) && (p
== current_proc()));
2278 sig_thread
= thread
;
2282 sig_thread
= THREAD_NULL
;
2286 if ((sig_task
== TASK_NULL
) || is_kerneltask(sig_task
)) {
2287 os_reason_free(signal_reason
);
2292 * do not send signals to the process that has the thread
2293 * doing a reboot(). Not doing so will mark that thread aborted
2294 * and can cause IO failures wich will cause data loss. There's
2295 * also no need to send a signal to a process that is in the middle
2296 * of being torn down.
2298 if (ISSET(sig_proc
->p_flag
, P_REBOOT
) || ISSET(sig_proc
->p_lflag
, P_LEXIT
)) {
2299 DTRACE_PROC3(signal__discard
, thread_t
, sig_thread
, proc_t
, sig_proc
, int, signum
);
2300 os_reason_free(signal_reason
);
2304 if ((flavor
& (PSIG_VFORK
| PSIG_THREAD
)) == 0) {
2305 proc_knote(sig_proc
, NOTE_SIGNAL
| signum
);
2308 if ((flavor
& PSIG_LOCKED
) == 0) {
2309 proc_signalstart(sig_proc
, 0);
2312 /* Don't send signals to a process that has ignored them. */
2313 if (((flavor
& PSIG_VFORK
) == 0) && ((sig_proc
->p_lflag
& P_LTRACED
) == 0) && (sig_proc
->p_sigignore
& mask
)) {
2314 DTRACE_PROC3(signal__discard
, thread_t
, sig_thread
, proc_t
, sig_proc
, int, signum
);
2315 goto sigout_unlocked
;
2319 * The proc_lock prevents the targeted thread from being deallocated
2320 * or handling the signal until we're done signaling it.
2322 * Once the proc_lock is dropped, we have no guarantee the thread or uthread exists anymore.
2324 * XXX: What if the thread goes inactive after the thread passes bsd ast point?
2326 proc_lock(sig_proc
);
2328 if (flavor
& PSIG_VFORK
) {
2330 act_set_astbsd(sig_thread
);
2331 kret
= KERN_SUCCESS
;
2332 } else if (flavor
& PSIG_TRY_THREAD
) {
2333 uth
= get_bsdthread_info(sig_thread
);
2334 if (((uth
->uu_flag
& UT_NO_SIGMASK
) == 0) &&
2335 (((uth
->uu_sigmask
& mask
) == 0) || (uth
->uu_sigwait
& mask
)) &&
2336 ((kret
= check_actforsig(sig_proc
->task
, sig_thread
, 1)) == KERN_SUCCESS
)) {
2337 /* deliver to specified thread */
2339 /* deliver to any willing thread */
2340 kret
= get_signalthread(sig_proc
, signum
, &sig_thread
);
2342 } else if (flavor
& PSIG_THREAD
) {
2343 /* If successful return with ast set */
2344 kret
= check_actforsig(sig_task
, sig_thread
, 1);
2346 /* If successful return with ast set */
2347 kret
= get_signalthread(sig_proc
, signum
, &sig_thread
);
2350 if (kret
!= KERN_SUCCESS
) {
2351 DTRACE_PROC3(signal__discard
, thread_t
, sig_thread
, proc_t
, sig_proc
, int, signum
);
2352 proc_unlock(sig_proc
);
2353 goto sigout_unlocked
;
2356 uth
= get_bsdthread_info(sig_thread
);
2359 * If proc is traced, always give parent a chance.
2362 if ((flavor
& PSIG_VFORK
) == 0) {
2363 if (sig_proc
->p_lflag
& P_LTRACED
) {
2367 * If the signal is being ignored,
2368 * then we forget about it immediately.
2369 * (Note: we don't set SIGCONT in p_sigignore,
2370 * and if it is set to SIG_IGN,
2371 * action will be SIG_DFL here.)
2373 if (sig_proc
->p_sigignore
& mask
) {
2377 if (uth
->uu_sigwait
& mask
) {
2378 action
= KERN_SIG_WAIT
;
2379 } else if (uth
->uu_sigmask
& mask
) {
2380 action
= KERN_SIG_HOLD
;
2381 } else if (sig_proc
->p_sigcatch
& mask
) {
2382 action
= KERN_SIG_CATCH
;
2389 /* TODO: p_nice isn't hooked up to the scheduler... */
2390 if (sig_proc
->p_nice
> NZERO
&& action
== SIG_DFL
&& (prop
& SA_KILL
) &&
2391 (sig_proc
->p_lflag
& P_LTRACED
) == 0) {
2392 sig_proc
->p_nice
= NZERO
;
2395 if (prop
& SA_CONT
) {
2396 uth
->uu_siglist
&= ~stopsigmask
;
2399 if (prop
& SA_STOP
) {
2402 * If sending a tty stop signal to a member of an orphaned
2403 * process group, discard the signal here if the action
2404 * is default; don't stop the process below if sleeping,
2405 * and don't clear any pending SIGCONT.
2407 pg
= proc_pgrp(sig_proc
);
2408 if (prop
& SA_TTYSTOP
&& pg
->pg_jobc
== 0 &&
2409 action
== SIG_DFL
) {
2414 uth
->uu_siglist
&= ~contsigmask
;
2417 uth
->uu_siglist
|= mask
;
2420 * Defer further processing for signals which are held,
2421 * except that stopped processes must be continued by SIGCONT.
2423 /* vfork will not go thru as action is SIG_DFL */
2424 if ((action
== KERN_SIG_HOLD
) && ((prop
& SA_CONT
) == 0 || sig_proc
->p_stat
!= SSTOP
)) {
2429 * SIGKILL priority twiddling moved here from above because
2430 * it needs sig_thread. Could merge it into large switch
2431 * below if we didn't care about priority for tracing
2432 * as SIGKILL's action is always SIG_DFL.
2434 * TODO: p_nice isn't hooked up to the scheduler...
2436 if ((signum
== SIGKILL
) && (sig_proc
->p_nice
> NZERO
)) {
2437 sig_proc
->p_nice
= NZERO
;
2441 * Process is traced - wake it up (if not already
2442 * stopped) so that it can discover the signal in
2443 * issig() and stop for the parent.
2445 if (sig_proc
->p_lflag
& P_LTRACED
) {
2446 if (sig_proc
->p_stat
!= SSTOP
) {
2453 if ((flavor
& PSIG_VFORK
) != 0) {
2457 if (action
== KERN_SIG_WAIT
) {
2460 * DTrace proc signal-clear returns a siginfo_t. Collect the needed info.
2462 r_uid
= kauth_getruid(); /* per thread credential; protected by our thread context */
2464 bzero((caddr_t
)&(uth
->t_dtrace_siginfo
), sizeof(uth
->t_dtrace_siginfo
));
2466 uth
->t_dtrace_siginfo
.si_signo
= signum
;
2467 uth
->t_dtrace_siginfo
.si_pid
= current_proc()->p_pid
;
2468 uth
->t_dtrace_siginfo
.si_status
= W_EXITCODE(signum
, 0);
2469 uth
->t_dtrace_siginfo
.si_uid
= r_uid
;
2470 uth
->t_dtrace_siginfo
.si_code
= 0;
2472 uth
->uu_sigwait
= mask
;
2473 uth
->uu_siglist
&= ~mask
;
2474 wakeup(&uth
->uu_sigwait
);
2475 /* if it is SIGCONT resume whole process */
2476 if (prop
& SA_CONT
) {
2477 OSBitOrAtomic(P_CONTINUED
, &sig_proc
->p_flag
);
2478 sig_proc
->p_contproc
= current_proc()->p_pid
;
2479 (void) task_resume_internal(sig_task
);
2484 if (action
!= SIG_DFL
) {
2486 * User wants to catch the signal.
2487 * Wake up the thread, but don't un-suspend it
2488 * (except for SIGCONT).
2490 if (prop
& SA_CONT
) {
2491 OSBitOrAtomic(P_CONTINUED
, &sig_proc
->p_flag
);
2492 (void) task_resume_internal(sig_task
);
2493 sig_proc
->p_stat
= SRUN
;
2494 } else if (sig_proc
->p_stat
== SSTOP
) {
2498 * Fill out siginfo structure information to pass to the
2499 * signalled process/thread sigaction handler, when it
2500 * wakes up. si_code is 0 because this is an ordinary
2501 * signal, not a SIGCHLD, and so si_status is the signal
2502 * number itself, instead of the child process exit status.
2503 * We shift this left because it will be shifted right before
2504 * it is passed to user space. kind of ugly to use W_EXITCODE
2505 * this way, but it beats defining a new macro.
2507 * Note: Avoid the SIGCHLD recursion case!
2509 if (signum
!= SIGCHLD
) {
2510 r_uid
= kauth_getruid();
2512 sig_proc
->si_pid
= current_proc()->p_pid
;
2513 sig_proc
->si_status
= W_EXITCODE(signum
, 0);
2514 sig_proc
->si_uid
= r_uid
;
2515 sig_proc
->si_code
= 0;
2520 /* Default action - varies */
2521 if (mask
& stopsigmask
) {
2522 assert(signal_reason
== NULL
);
2524 * These are the signals which by default
2527 * Don't clog system with children of init
2528 * stopped from the keyboard.
2530 if (!(prop
& SA_STOP
) && sig_proc
->p_pptr
== initproc
) {
2531 uth
->uu_siglist
&= ~mask
;
2532 proc_unlock(sig_proc
);
2533 /* siglock still locked, proc_lock not locked */
2534 psignal_locked(sig_proc
, SIGKILL
);
2535 goto sigout_unlocked
;
2540 * if task hasn't already been stopped by
2543 uth
->uu_siglist
&= ~mask
;
2544 if (sig_proc
->p_stat
!= SSTOP
) {
2545 sig_proc
->p_xstat
= signum
;
2546 sig_proc
->p_stat
= SSTOP
;
2547 OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &sig_proc
->p_flag
);
2548 sig_proc
->p_lflag
&= ~P_LWAITED
;
2549 proc_unlock(sig_proc
);
2551 pp
= proc_parentholdref(sig_proc
);
2553 if ((pp
!= PROC_NULL
) && ((pp
->p_flag
& P_NOCLDSTOP
) == 0)) {
2554 my_cred
= kauth_cred_proc_ref(sig_proc
);
2555 r_uid
= kauth_cred_getruid(my_cred
);
2556 kauth_cred_unref(&my_cred
);
2558 proc_lock(sig_proc
);
2559 pp
->si_pid
= sig_proc
->p_pid
;
2561 * POSIX: sigaction for a stopped child
2562 * when sent to the parent must set the
2563 * child's signal number into si_status.
2565 if (signum
!= SIGSTOP
) {
2566 pp
->si_status
= WEXITSTATUS(sig_proc
->p_xstat
);
2568 pp
->si_status
= W_EXITCODE(signum
, signum
);
2570 pp
->si_code
= CLD_STOPPED
;
2572 proc_unlock(sig_proc
);
2574 psignal(pp
, SIGCHLD
);
2576 if (pp
!= PROC_NULL
) {
2577 proc_parentdropref(pp
, 0);
2580 goto sigout_unlocked
;
2586 DTRACE_PROC3(signal__send
, thread_t
, sig_thread
, proc_t
, p
, int, signum
);
2590 * Signals ignored by default have been dealt
2591 * with already, since their bits are on in
2597 * Kill signal always sets process running and
2601 * Process will be running after 'run'
2603 sig_proc
->p_stat
= SRUN
;
2605 * In scenarios where suspend/resume are racing
2606 * the signal we are missing AST_BSD by the time
2607 * we get here, set again to avoid races. This
2608 * was the scenario with spindump enabled shutdowns.
2609 * We would need to cover this approp down the line.
2611 act_set_astbsd(sig_thread
);
2612 kret
= thread_abort(sig_thread
);
2613 update_thread_policy
= (kret
== KERN_SUCCESS
);
2615 if (uth
->uu_exit_reason
== OS_REASON_NULL
) {
2616 if (signal_reason
== OS_REASON_NULL
) {
2617 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
2618 sig_proc
->p_pid
, OS_REASON_SIGNAL
, signum
, 0, 0);
2620 signal_reason
= build_signal_reason(signum
, NULL
);
2623 os_reason_ref(signal_reason
);
2624 set_thread_exit_reason(sig_thread
, signal_reason
, TRUE
);
2631 * Let the process run. If it's sleeping on an
2632 * event, it remains so.
2634 assert(signal_reason
== NULL
);
2635 OSBitOrAtomic(P_CONTINUED
, &sig_proc
->p_flag
);
2636 sig_proc
->p_contproc
= sig_proc
->p_pid
;
2637 sig_proc
->p_xstat
= signum
;
2639 (void) task_resume_internal(sig_task
);
2642 * When processing a SIGCONT, we need to check
2643 * to see if there are signals pending that
2644 * were not delivered because we had been
2645 * previously stopped. If that's the case,
2646 * we need to thread_abort_safely() to trigger
2647 * interruption of the current system call to
2648 * cause their handlers to fire. If it's only
2649 * the SIGCONT, then don't wake up.
2651 if (((flavor
& (PSIG_VFORK
| PSIG_THREAD
)) == 0) && (((uth
->uu_siglist
& ~uth
->uu_sigmask
) & ~sig_proc
->p_sigignore
) & ~mask
)) {
2652 uth
->uu_siglist
&= ~mask
;
2653 sig_proc
->p_stat
= SRUN
;
2657 uth
->uu_siglist
&= ~mask
;
2658 sig_proc
->p_stat
= SRUN
;
2663 * A signal which has a default action of killing
2664 * the process, and for which there is no handler,
2665 * needs to act like SIGKILL
2667 if (((flavor
& (PSIG_VFORK
| PSIG_THREAD
)) == 0) && (action
== SIG_DFL
) && (prop
& SA_KILL
)) {
2668 sig_proc
->p_stat
= SRUN
;
2669 kret
= thread_abort(sig_thread
);
2670 update_thread_policy
= (kret
== KERN_SUCCESS
);
2672 if (uth
->uu_exit_reason
== OS_REASON_NULL
) {
2673 if (signal_reason
== OS_REASON_NULL
) {
2674 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
2675 sig_proc
->p_pid
, OS_REASON_SIGNAL
, signum
, 0, 0);
2677 signal_reason
= build_signal_reason(signum
, NULL
);
2680 os_reason_ref(signal_reason
);
2681 set_thread_exit_reason(sig_thread
, signal_reason
, TRUE
);
2688 * All other signals wake up the process, but don't
2691 if (sig_proc
->p_stat
== SSTOP
) {
2701 * If we're being traced (possibly because someone attached us
2702 * while we were stopped), check for a signal from the debugger.
2704 if (sig_proc
->p_stat
== SSTOP
) {
2705 if ((sig_proc
->p_lflag
& P_LTRACED
) != 0 && sig_proc
->p_xstat
!= 0) {
2706 uth
->uu_siglist
|= sigmask(sig_proc
->p_xstat
);
2709 if ((flavor
& PSIG_VFORK
) != 0) {
2710 sig_proc
->p_stat
= SRUN
;
2714 * setrunnable(p) in BSD and
2715 * Wake up the thread if it is interruptible.
2717 sig_proc
->p_stat
= SRUN
;
2718 if ((flavor
& PSIG_VFORK
) == 0) {
2719 thread_abort_safely(sig_thread
);
2724 if (update_thread_policy
) {
2726 * Update the thread policy to heading to terminate, increase priority if
2727 * necessary. This needs to be done before we drop the proc lock because the
2728 * thread can take the fatal signal once it's dropped.
2730 proc_set_thread_policy(sig_thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_TERMINATED
, TASK_POLICY_ENABLE
);
2733 proc_unlock(sig_proc
);
2736 os_reason_free(signal_reason
);
2737 if ((flavor
& PSIG_LOCKED
) == 0) {
2738 proc_signalend(sig_proc
, 0);
2743 psignal(proc_t p
, int signum
)
2745 psignal_internal(p
, NULL
, NULL
, 0, signum
, NULL
);
2749 psignal_with_reason(proc_t p
, int signum
, struct os_reason
*signal_reason
)
2751 psignal_internal(p
, NULL
, NULL
, 0, signum
, signal_reason
);
2755 psignal_sigkill_with_reason(struct proc
*p
, struct os_reason
*signal_reason
)
2757 psignal_internal(p
, NULL
, NULL
, 0, SIGKILL
, signal_reason
);
2761 psignal_locked(proc_t p
, int signum
)
2763 psignal_internal(p
, NULL
, NULL
, PSIG_LOCKED
, signum
, NULL
);
2767 psignal_vfork_with_reason(proc_t p
, task_t new_task
, thread_t thread
, int signum
, struct os_reason
*signal_reason
)
2769 psignal_internal(p
, new_task
, thread
, PSIG_VFORK
, signum
, signal_reason
);
2774 psignal_vfork(proc_t p
, task_t new_task
, thread_t thread
, int signum
)
2776 psignal_internal(p
, new_task
, thread
, PSIG_VFORK
, signum
, NULL
);
2780 psignal_uthread(thread_t thread
, int signum
)
2782 psignal_internal(PROC_NULL
, TASK_NULL
, thread
, PSIG_THREAD
, signum
, NULL
);
2785 /* same as psignal(), but prefer delivery to 'thread' if possible */
2787 psignal_try_thread(proc_t p
, thread_t thread
, int signum
)
2789 psignal_internal(p
, NULL
, thread
, PSIG_TRY_THREAD
, signum
, NULL
);
2793 psignal_try_thread_with_reason(proc_t p
, thread_t thread
, int signum
, struct os_reason
*signal_reason
)
2795 psignal_internal(p
, TASK_NULL
, thread
, PSIG_TRY_THREAD
, signum
, signal_reason
);
2799 psignal_thread_with_reason(proc_t p
, thread_t thread
, int signum
, struct os_reason
*signal_reason
)
2801 psignal_internal(p
, TASK_NULL
, thread
, PSIG_THREAD
, signum
, signal_reason
);
2805 * If the current process has received a signal (should be caught or cause
2806 * termination, should interrupt current syscall), return the signal number.
2807 * Stop signals with default action are processed immediately, then cleared;
2808 * they aren't returned. This is checked after each entry to the system for
2809 * a syscall or trap (though this can usually be done without calling issignal
2810 * by checking the pending signal masks in the CURSIG macro.) The normal call
2813 * while (signum = CURSIG(curproc))
2817 issignal_locked(proc_t p
)
2819 int signum
, mask
, prop
, sigbits
;
2821 struct uthread
* ut
;
2823 kauth_cred_t my_cred
;
2827 cur_act
= current_thread();
2830 if (rdebug_proc
&& (p
== rdebug_proc
)) {
2833 #endif /* SIGNAL_DEBUG */
2836 * Try to grab the signal lock.
2838 if (sig_try_locked(p
) <= 0) {
2842 proc_signalstart(p
, 1);
2844 ut
= get_bsdthread_info(cur_act
);
2846 sigbits
= ut
->uu_siglist
& ~ut
->uu_sigmask
;
2848 if (p
->p_lflag
& P_LPPWAIT
) {
2849 sigbits
&= ~stopsigmask
;
2851 if (sigbits
== 0) { /* no signal to send */
2856 signum
= ffs((unsigned int)sigbits
);
2857 mask
= sigmask(signum
);
2858 prop
= sigprop
[signum
];
2861 * We should see pending but ignored signals
2862 * only if P_LTRACED was on when they were posted.
2864 if (mask
& p
->p_sigignore
&& (p
->p_lflag
& P_LTRACED
) == 0) {
2865 ut
->uu_siglist
&= ~mask
;
2869 if (p
->p_lflag
& P_LTRACED
&& (p
->p_lflag
& P_LPPWAIT
) == 0) {
2871 * If traced, deliver the signal to the debugger, and wait to be
2875 p
->p_xstat
= signum
;
2877 if (p
->p_lflag
& P_LSIGEXC
) {
2879 p
->sigwait_thread
= cur_act
;
2881 OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &p
->p_flag
);
2882 p
->p_lflag
&= ~P_LWAITED
;
2883 ut
->uu_siglist
&= ~mask
; /* clear the current signal from the pending list */
2884 proc_signalend(p
, 1);
2886 do_bsdexception(EXC_SOFTWARE
, EXC_SOFT_SIGNAL
, signum
);
2888 proc_signalstart(p
, 1);
2891 my_cred
= kauth_cred_proc_ref(p
);
2892 r_uid
= kauth_cred_getruid(my_cred
);
2893 kauth_cred_unref(&my_cred
);
2895 pp
= proc_parentholdref(p
);
2896 if (pp
!= PROC_NULL
) {
2899 pp
->si_pid
= p
->p_pid
;
2900 pp
->p_xhighbits
= p
->p_xhighbits
;
2902 pp
->si_status
= p
->p_xstat
;
2903 pp
->si_code
= CLD_TRAPPED
;
2910 * XXX Have to really stop for debuggers;
2911 * XXX stop() doesn't do the right thing.
2914 task_suspend_internal(task
);
2918 p
->sigwait_thread
= cur_act
;
2920 OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &p
->p_flag
);
2921 p
->p_lflag
&= ~P_LWAITED
;
2922 ut
->uu_siglist
&= ~mask
;
2924 proc_signalend(p
, 1);
2927 if (pp
!= PROC_NULL
) {
2928 psignal(pp
, SIGCHLD
);
2930 wakeup((caddr_t
)pp
);
2931 proc_parentdropref(pp
, 1);
2935 assert_wait((caddr_t
)&p
->sigwait
, (THREAD_INTERRUPTIBLE
));
2936 thread_block(THREAD_CONTINUE_NULL
);
2938 proc_signalstart(p
, 1);
2942 p
->sigwait_thread
= NULL
;
2943 wakeup((caddr_t
)&p
->sigwait_thread
);
2945 if (signum
== SIGKILL
|| ut
->uu_siglist
& sigmask(SIGKILL
)) {
2947 * Deliver a pending sigkill even if it's not the current signal.
2948 * Necessary for PT_KILL, which should not be delivered to the
2949 * debugger, but we can't differentiate it from any other KILL.
2955 /* We may have to quit. */
2956 if (thread_should_abort(current_thread())) {
2962 * If parent wants us to take the signal,
2963 * then it will leave it in p->p_xstat;
2964 * otherwise we just look for signals again.
2966 signum
= p
->p_xstat
;
2972 * Put the new signal into p_siglist. If the
2973 * signal is being masked, look for other signals.
2975 mask
= sigmask(signum
);
2976 ut
->uu_siglist
|= mask
;
2977 if (ut
->uu_sigmask
& mask
) {
2983 * Decide whether the signal should be returned.
2984 * Return the signal's number, or fall through
2985 * to clear it from the pending mask.
2988 switch ((long)p
->p_sigacts
->ps_sigact
[signum
]) {
2991 * If there is a pending stop signal to process
2992 * with default action, stop here,
2993 * then clear the signal. However,
2994 * if process is member of an orphaned
2995 * process group, ignore tty stop signals.
2997 if (prop
& SA_STOP
) {
3002 if (p
->p_lflag
& P_LTRACED
||
3003 (pg
->pg_jobc
== 0 &&
3004 prop
& SA_TTYSTOP
)) {
3007 break; /* ignore signal */
3010 if (p
->p_stat
!= SSTOP
) {
3012 p
->p_xstat
= signum
;
3014 p
->p_lflag
&= ~P_LWAITED
;
3017 pp
= proc_parentholdref(p
);
3019 if ((pp
!= PROC_NULL
) && ((pp
->p_flag
& P_NOCLDSTOP
) == 0)) {
3020 my_cred
= kauth_cred_proc_ref(p
);
3021 r_uid
= kauth_cred_getruid(my_cred
);
3022 kauth_cred_unref(&my_cred
);
3025 pp
->si_pid
= p
->p_pid
;
3026 pp
->si_status
= WEXITSTATUS(p
->p_xstat
);
3027 pp
->si_code
= CLD_STOPPED
;
3031 psignal(pp
, SIGCHLD
);
3033 if (pp
!= PROC_NULL
) {
3034 proc_parentdropref(pp
, 0);
3039 } else if (prop
& SA_IGNORE
) {
3041 * Except for SIGCONT, shouldn't get here.
3042 * Default action is to ignore; drop it.
3044 break; /* ignore signal */
3051 * Masking above should prevent us ever trying
3052 * to take action on an ignored signal other
3053 * than SIGCONT, unless process is traced.
3055 if ((prop
& SA_CONT
) == 0 &&
3056 (p
->p_lflag
& P_LTRACED
) == 0) {
3057 printf("issignal\n");
3059 break; /* ignore signal */
3062 /* This signal has an action - deliver it. */
3066 /* If we dropped through, the signal was ignored - remove it from pending list. */
3067 ut
->uu_siglist
&= ~mask
;
3073 ut
->uu_siglist
&= ~mask
;
3077 proc_signalend(p
, 1);
3081 /* called from _sleep */
3085 int signum
, mask
, prop
, sigbits
;
3087 struct uthread
* ut
;
3091 cur_act
= current_thread();
3093 ut
= get_bsdthread_info(cur_act
);
3095 if (ut
->uu_siglist
== 0) {
3099 if (((ut
->uu_siglist
& ~ut
->uu_sigmask
) == 0) && ((p
->p_lflag
& P_LTRACED
) == 0)) {
3103 sigbits
= ut
->uu_siglist
& ~ut
->uu_sigmask
;
3106 if (p
->p_lflag
& P_LPPWAIT
) {
3107 sigbits
&= ~stopsigmask
;
3109 if (sigbits
== 0) { /* no signal to send */
3113 signum
= ffs((unsigned int)sigbits
);
3114 mask
= sigmask(signum
);
3115 prop
= sigprop
[signum
];
3116 sigbits
&= ~mask
; /* take the signal out */
3119 * We should see pending but ignored signals
3120 * only if P_LTRACED was on when they were posted.
3122 if (mask
& p
->p_sigignore
&& (p
->p_lflag
& P_LTRACED
) == 0) {
3126 if (p
->p_lflag
& P_LTRACED
&& (p
->p_lflag
& P_LPPWAIT
) == 0) {
3131 * Decide whether the signal should be returned.
3132 * Return the signal's number, or fall through
3133 * to clear it from the pending mask.
3136 switch ((long)p
->p_sigacts
->ps_sigact
[signum
]) {
3139 * If there is a pending stop signal to process
3140 * with default action, stop here,
3141 * then clear the signal. However,
3142 * if process is member of an orphaned
3143 * process group, ignore tty stop signals.
3145 if (prop
& SA_STOP
) {
3150 if (p
->p_lflag
& P_LTRACED
||
3151 (pg
->pg_jobc
== 0 &&
3152 prop
& SA_TTYSTOP
)) {
3154 break; /* == ignore */
3159 } else if (prop
& SA_IGNORE
) {
3161 * Except for SIGCONT, shouldn't get here.
3162 * Default action is to ignore; drop it.
3164 break; /* == ignore */
3172 * Masking above should prevent us ever trying
3173 * to take action on an ignored signal other
3174 * than SIGCONT, unless process is traced.
3176 if ((prop
& SA_CONT
) == 0 &&
3177 (p
->p_lflag
& P_LTRACED
) == 0) {
3178 printf("issignal\n");
3180 break; /* == ignore */
3184 * This signal has an action, let
3185 * postsig() process it.
3194 * Put the argument process into the stopped state and notify the parent
3195 * via wakeup. Signals are handled elsewhere. The process must not be
3199 stop(proc_t p
, proc_t parent
)
3201 OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &p
->p_flag
);
3202 if ((parent
!= PROC_NULL
) && (parent
->p_stat
!= SSTOP
)) {
3204 wakeup((caddr_t
)parent
);
3207 (void) task_suspend_internal(p
->task
);
3211 * Take the action for the specified signal
3212 * from the current set of pending signals.
3215 postsig_locked(int signum
)
3217 proc_t p
= current_proc();
3218 struct sigacts
*ps
= p
->p_sigacts
;
3219 user_addr_t catcher
;
3221 int mask
, returnmask
;
3222 struct uthread
* ut
;
3223 os_reason_t ut_exit_reason
= OS_REASON_NULL
;
3230 * This must be called on master cpu
3232 if (cpu_number() != master_cpu
) {
3233 panic("psig not on master");
3238 * Try to grab the signal lock.
3240 if (sig_try_locked(p
) <= 0) {
3244 proc_signalstart(p
, 1);
3246 ut
= (struct uthread
*)get_bsdthread_info(current_thread());
3247 mask
= sigmask(signum
);
3248 ut
->uu_siglist
&= ~mask
;
3249 catcher
= ps
->ps_sigact
[signum
];
3250 if (catcher
== SIG_DFL
) {
3252 * Default catcher, where the default is to kill
3253 * the process. (Other cases were ignored above.)
3255 sig_lock_to_exit(p
);
3258 * exit_with_reason() below will consume a reference to the thread's exit reason, so we take another
3259 * reference so the thread still has one even after we call exit_with_reason(). The thread's reference will
3260 * ultimately be destroyed in uthread_cleanup().
3262 ut_exit_reason
= ut
->uu_exit_reason
;
3263 os_reason_ref(ut_exit_reason
);
3265 p
->p_acflag
|= AXSIG
;
3266 if (sigprop
[signum
] & SA_CORE
) {
3267 p
->p_sigacts
->ps_sig
= signum
;
3268 proc_signalend(p
, 1);
3271 if (coredump(p
, 0, 0) == 0) {
3272 signum
|= WCOREFLAG
;
3276 proc_signalend(p
, 1);
3281 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
3283 ut
->t_dtrace_siginfo
.si_signo
= signum
;
3284 ut
->t_dtrace_siginfo
.si_pid
= p
->si_pid
;
3285 ut
->t_dtrace_siginfo
.si_uid
= p
->si_uid
;
3286 ut
->t_dtrace_siginfo
.si_status
= WEXITSTATUS(p
->si_status
);
3288 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
3290 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
3291 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
3298 DTRACE_PROC3(signal__handle
, int, signum
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
3299 void (*)(void), SIG_DFL
);
3302 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_FRCEXIT
) | DBG_FUNC_NONE
,
3303 p
->p_pid
, W_EXITCODE(0, signum
), 3, 0, 0);
3305 exit_with_reason(p
, W_EXITCODE(0, signum
), (int *)NULL
, TRUE
, TRUE
, 0, ut_exit_reason
);
3311 * If we get here, the signal must be caught.
3314 if (catcher
== SIG_IGN
|| (ut
->uu_sigmask
& mask
)) {
3316 "postsig: processing masked or ignored signal\n");
3321 * Set the new mask value and also defer further
3322 * occurences of this signal.
3324 * Special case: user has done a sigpause. Here the
3325 * current mask is not of interest, but rather the
3326 * mask from before the sigpause is what we want
3327 * restored after the signal processing is completed.
3329 if (ut
->uu_flag
& UT_SAS_OLDMASK
) {
3330 returnmask
= ut
->uu_oldmask
;
3331 ut
->uu_flag
&= ~UT_SAS_OLDMASK
;
3334 returnmask
= ut
->uu_sigmask
;
3336 ut
->uu_sigmask
|= ps
->ps_catchmask
[signum
];
3337 if ((ps
->ps_signodefer
& mask
) == 0) {
3338 ut
->uu_sigmask
|= mask
;
3340 sigset_t siginfo
= ps
->ps_siginfo
;
3341 if ((signum
!= SIGILL
) && (signum
!= SIGTRAP
) && (ps
->ps_sigreset
& mask
)) {
3342 if ((signum
!= SIGCONT
) && (sigprop
[signum
] & SA_IGNORE
)) {
3343 p
->p_sigignore
|= mask
;
3345 ps
->ps_sigact
[signum
] = SIG_DFL
;
3346 ps
->ps_siginfo
&= ~mask
;
3347 ps
->ps_signodefer
&= ~mask
;
3350 if (ps
->ps_sig
!= signum
) {
3356 OSIncrementAtomicLong(&p
->p_stats
->p_ru
.ru_nsignals
);
3357 sendsig(p
, catcher
, signum
, returnmask
, code
, siginfo
);
3359 proc_signalend(p
, 1);
3363 * Attach a signal knote to the list of knotes for this process.
3365 * Signal knotes share the knote list with proc knotes. This
3366 * could be avoided by using a signal-specific knote list, but
3367 * probably isn't worth the trouble.
3371 filt_sigattach(struct knote
*kn
, __unused
struct kevent_qos_s
*kev
)
3373 proc_t p
= current_proc(); /* can attach only to oneself */
3378 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
3379 kn
->kn_sdata
= 0; /* incoming data is ignored */
3381 KNOTE_ATTACH(&p
->p_klist
, kn
);
3383 proc_klist_unlock();
3385 /* edge-triggered events can't have fired before we attached */
3390 * remove the knote from the process list, if it hasn't already
3391 * been removed by exit processing.
3395 filt_sigdetach(struct knote
*kn
)
3397 proc_t p
= kn
->kn_proc
;
3401 KNOTE_DETACH(&p
->p_klist
, kn
);
3402 proc_klist_unlock();
3406 * Post an event to the signal filter. Because we share the same list
3407 * as process knotes, we have to filter out and handle only signal events.
3409 * We assume that we process fdfree() before we post the NOTE_EXIT for
3410 * a process during exit. Therefore, since signal filters can only be
3411 * set up "in-process", we should have already torn down the kqueue
3412 * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT.
3415 filt_signal(struct knote
*kn
, long hint
)
3417 if (hint
& NOTE_SIGNAL
) {
3418 hint
&= ~NOTE_SIGNAL
;
3420 if (kn
->kn_id
== (unsigned int)hint
) {
3423 } else if (hint
& NOTE_EXIT
) {
3424 panic("filt_signal: detected NOTE_EXIT event");
3427 return kn
->kn_hook32
!= 0;
3431 filt_signaltouch(struct knote
*kn
, struct kevent_qos_s
*kev
)
3440 * No data to save - just capture if it is already fired
3442 res
= (kn
->kn_hook32
> 0);
3444 proc_klist_unlock();
3450 filt_signalprocess(struct knote
*kn
, struct kevent_qos_s
*kev
)
3455 * Snapshot the event data.
3459 if (kn
->kn_hook32
) {
3460 knote_fill_kevent(kn
, kev
, kn
->kn_hook32
);
3464 proc_klist_unlock();
3469 bsd_ast(thread_t thread
)
3471 proc_t p
= current_proc();
3472 struct uthread
*ut
= get_bsdthread_info(thread
);
3474 static int bsd_init_done
= 0;
3480 /* don't run bsd ast on exec copy or exec'ed tasks */
3481 if (task_did_exec(current_task()) || task_is_exec_copy(current_task())) {
3485 if (timerisset(&p
->p_vtimer_user
.it_value
)) {
3488 task_vtimer_update(p
->task
, TASK_VTIMER_USER
, µsecs
);
3490 if (!itimerdecr(p
, &p
->p_vtimer_user
, microsecs
)) {
3491 if (timerisset(&p
->p_vtimer_user
.it_value
)) {
3492 task_vtimer_set(p
->task
, TASK_VTIMER_USER
);
3494 task_vtimer_clear(p
->task
, TASK_VTIMER_USER
);
3497 psignal_try_thread(p
, thread
, SIGVTALRM
);
3501 if (timerisset(&p
->p_vtimer_prof
.it_value
)) {
3504 task_vtimer_update(p
->task
, TASK_VTIMER_PROF
, µsecs
);
3506 if (!itimerdecr(p
, &p
->p_vtimer_prof
, microsecs
)) {
3507 if (timerisset(&p
->p_vtimer_prof
.it_value
)) {
3508 task_vtimer_set(p
->task
, TASK_VTIMER_PROF
);
3510 task_vtimer_clear(p
->task
, TASK_VTIMER_PROF
);
3513 psignal_try_thread(p
, thread
, SIGPROF
);
3517 if (timerisset(&p
->p_rlim_cpu
)) {
3520 task_vtimer_update(p
->task
, TASK_VTIMER_RLIM
, (uint32_t *) &tv
.tv_usec
);
3523 if (p
->p_rlim_cpu
.tv_sec
> 0 || p
->p_rlim_cpu
.tv_usec
> tv
.tv_usec
) {
3525 timersub(&p
->p_rlim_cpu
, &tv
, &p
->p_rlim_cpu
);
3528 timerclear(&p
->p_rlim_cpu
);
3531 task_vtimer_clear(p
->task
, TASK_VTIMER_RLIM
);
3533 psignal_try_thread(p
, thread
, SIGXCPU
);
3538 if (ut
->t_dtrace_sig
) {
3539 uint8_t dt_action_sig
= ut
->t_dtrace_sig
;
3540 ut
->t_dtrace_sig
= 0;
3541 psignal(p
, dt_action_sig
);
3544 if (ut
->t_dtrace_stop
) {
3545 ut
->t_dtrace_stop
= 0;
3547 p
->p_dtrace_stop
= 1;
3549 (void)task_suspend_internal(p
->task
);
3552 if (ut
->t_dtrace_resumepid
) {
3553 proc_t resumeproc
= proc_find((int)ut
->t_dtrace_resumepid
);
3554 ut
->t_dtrace_resumepid
= 0;
3555 if (resumeproc
!= PROC_NULL
) {
3556 proc_lock(resumeproc
);
3557 /* We only act on processes stopped by dtrace */
3558 if (resumeproc
->p_dtrace_stop
) {
3559 resumeproc
->p_dtrace_stop
= 0;
3560 proc_unlock(resumeproc
);
3561 task_resume_internal(resumeproc
->task
);
3563 proc_unlock(resumeproc
);
3565 proc_rele(resumeproc
);
3569 #endif /* CONFIG_DTRACE */
3572 if (CHECK_SIGNALS(p
, current_thread(), ut
)) {
3573 while ((signum
= issignal_locked(p
))) {
3574 postsig_locked(signum
);
3579 #ifdef CONFIG_32BIT_TELEMETRY
3580 if (task_consume_32bit_log_flag(p
->task
)) {
3581 proc_log_32bit_telemetry(p
);
3583 #endif /* CONFIG_32BIT_TELEMETRY */
3585 if (!bsd_init_done
) {
3591 /* ptrace set runnable */
3593 pt_setrunnable(proc_t p
)
3599 if (p
->p_lflag
& P_LTRACED
) {
3604 wakeup((caddr_t
)&(p
->sigwait
));
3605 if ((p
->p_lflag
& P_LSIGEXC
) == 0) { // 5878479
3618 mach_exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
3622 return bsd_exception(exc
, codes
, 2);
3626 proc_pendingsignals(proc_t p
, sigset_t mask
)
3628 struct uthread
* uth
;
3633 /* If the process is in proc exit return no signal info */
3634 if (p
->p_lflag
& P_LPEXIT
) {
3638 if ((p
->p_lflag
& P_LINVFORK
) && p
->p_vforkact
) {
3640 uth
= (struct uthread
*)get_bsdthread_info(th
);
3642 bits
= (((uth
->uu_siglist
& ~uth
->uu_sigmask
) & ~p
->p_sigignore
) & mask
);
3648 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) {
3649 bits
|= (((uth
->uu_siglist
& ~uth
->uu_sigmask
) & ~p
->p_sigignore
) & mask
);
3657 thread_issignal(proc_t p
, thread_t th
, sigset_t mask
)
3659 struct uthread
* uth
;
3663 uth
= (struct uthread
*)get_bsdthread_info(th
);
3665 bits
= (((uth
->uu_siglist
& ~uth
->uu_sigmask
) & ~p
->p_sigignore
) & mask
);
3672 * Allow external reads of the sigprop array.
3675 hassigprop(int sig
, int prop
)
3677 return sigprop
[sig
] & prop
;
3681 pgsigio(pid_t pgid
, int sig
)
3683 proc_t p
= PROC_NULL
;
3686 gsignal(-(pgid
), sig
);
3687 } else if (pgid
> 0 && (p
= proc_find(pgid
)) != 0) {
3690 if (p
!= PROC_NULL
) {
3696 proc_signalstart(proc_t p
, int locked
)
3702 if (p
->p_signalholder
== current_thread()) {
3703 panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock");
3707 while ((p
->p_lflag
& P_LINSIGNAL
) == P_LINSIGNAL
) {
3708 msleep(&p
->p_sigmask
, &p
->p_mlock
, 0, "proc_signstart", NULL
);
3712 p
->p_lflag
|= P_LINSIGNAL
;
3713 p
->p_signalholder
= current_thread();
3720 proc_signalend(proc_t p
, int locked
)
3725 p
->p_lflag
&= ~P_LINSIGNAL
;
3727 if (p
->p_sigwaitcnt
> 0) {
3728 wakeup(&p
->p_sigmask
);
3731 p
->p_signalholder
= NULL
;
3738 sig_lock_to_exit(proc_t p
)
3740 thread_t self
= current_thread();
3742 p
->exit_thread
= self
;
3746 task_wait(p
->task
, FALSE
);
3752 sig_try_locked(proc_t p
)
3754 thread_t self
= current_thread();
3756 while (p
->sigwait
|| p
->exit_thread
) {
3757 if (p
->exit_thread
) {
3760 msleep((caddr_t
)&p
->sigwait_thread
, &p
->p_mlock
, PCATCH
| PDROP
, 0, 0);
3761 if (thread_should_abort(self
)) {
3763 * Terminate request - clean up.