2  * Copyright (c) 1995-2016 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29  * Copyright (c) 1982, 1986, 1989, 1991, 1993 
  30  *      The Regents of the University of California.  All rights reserved. 
  31  * (c) UNIX System Laboratories, Inc. 
  32  * All or some portions of this file are derived from material licensed 
  33  * to the University of California by American Telephone and Telegraph 
  34  * Co. or Unix System Laboratories, Inc. and are reproduced herein with 
  35  * the permission of UNIX System Laboratories, Inc. 
  37  * Redistribution and use in source and binary forms, with or without 
  38  * modification, are permitted provided that the following conditions 
  40  * 1. Redistributions of source code must retain the above copyright 
  41  *    notice, this list of conditions and the following disclaimer. 
  42  * 2. Redistributions in binary form must reproduce the above copyright 
  43  *    notice, this list of conditions and the following disclaimer in the 
  44  *    documentation and/or other materials provided with the distribution. 
  45  * 3. All advertising materials mentioning features or use of this software 
  46  *    must display the following acknowledgement: 
  47  *      This product includes software developed by the University of 
  48  *      California, Berkeley and its contributors. 
  49  * 4. Neither the name of the University nor the names of its contributors 
  50  *    may be used to endorse or promote products derived from this software 
  51  *    without specific prior written permission. 
  53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 
  54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
  56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 
  57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
  58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
  59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
  60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
  61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 
  62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
  65  *      @(#)kern_sig.c  8.7 (Berkeley) 4/18/94 
  68  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce 
  69  * support for mandatory and extensible security protections.  This notice 
  70  * is included in support of clause 2.2 (b) of the Apple Public License, 
  74 #define SIGPROP         /* include signal properties table */ 
  75 #include <sys/param.h> 
  76 #include <sys/resourcevar.h> 
  77 #include <sys/proc_internal.h> 
  78 #include <sys/kauth.h> 
  79 #include <sys/systm.h> 
  80 #include <sys/timeb.h> 
  81 #include <sys/times.h> 
  83 #include <sys/file_internal.h> 
  84 #include <sys/kernel.h> 
  86 #include <sys/signalvar.h> 
  87 #include <sys/syslog.h> 
  90 #include <sys/kdebug.h> 
  91 #include <sys/reason.h> 
  93 #include <sys/mount.h> 
  94 #include <sys/sysproto.h> 
  96 #include <security/audit/audit.h> 
  98 #include <machine/spl.h> 
 100 #include <kern/cpu_number.h> 
 103 #include <sys/user.h>           /* for coredump */ 
 104 #include <kern/ast.h>           /* for APC support */ 
 105 #include <kern/kalloc.h> 
 106 #include <kern/task.h>          /* extern void   *get_bsdtask_info(task_t); */ 
 107 #include <kern/thread.h> 
 108 #include <kern/sched_prim.h> 
 109 #include <kern/thread_call.h> 
 110 #include <kern/policy_internal.h> 
 112 #include <mach/exception.h> 
 113 #include <mach/task.h> 
 114 #include <mach/thread_act.h> 
 115 #include <libkern/OSAtomic.h> 
 118 #include <sys/codesign.h> 
 121  * Missing prototypes that Mach should export 
 125 extern int thread_enable_fpe(thread_t act
, int onoff
); 
 126 extern thread_t 
port_name_to_thread(mach_port_name_t port_name
); 
 127 extern kern_return_t 
get_signalact(task_t 
, thread_t 
*, int); 
 128 extern unsigned int get_useraddr(void); 
 134 extern void doexception(int exc
, mach_exception_code_t code
,  
 135                 mach_exception_subcode_t sub
); 
 137 static void stop(proc_t
, proc_t
); 
 138 int cansignal(proc_t
, kauth_cred_t
, proc_t
, int, int); 
 139 int killpg1(proc_t
, int, int, int, int); 
 140 kern_return_t 
do_bsdexception(int, int, int); 
 141 void __posix_sem_syscall_return(kern_return_t
); 
 142 char *proc_name_address(void *p
); 
 144 /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them  */ 
 145 kern_return_t 
semaphore_timedwait_signal_trap_internal(mach_port_name_t
, mach_port_name_t
, unsigned int, clock_res_t
, void (*)(kern_return_t
)); 
 146 kern_return_t 
semaphore_timedwait_trap_internal(mach_port_name_t
, unsigned int, clock_res_t
, void (*)(kern_return_t
)); 
 147 kern_return_t 
semaphore_wait_signal_trap_internal(mach_port_name_t
, mach_port_name_t
, void (*)(kern_return_t
)); 
 148 kern_return_t 
semaphore_wait_trap_internal(mach_port_name_t
, void (*)(kern_return_t
)); 
 150 static int      filt_sigattach(struct knote 
*kn
); 
 151 static void     filt_sigdetach(struct knote 
*kn
); 
 152 static int      filt_signal(struct knote 
*kn
, long hint
); 
 153 static int      filt_signaltouch(struct knote 
*kn
, struct kevent_internal_s 
*kev
); 
 154 static int      filt_signalprocess(struct knote 
*kn
, struct filt_process_s 
*data
, struct kevent_internal_s 
*kev
); 
 156 struct filterops sig_filtops 
= { 
 157         .f_attach 
= filt_sigattach
, 
 158         .f_detach 
= filt_sigdetach
, 
 159         .f_event 
= filt_signal
, 
 160         .f_touch 
= filt_signaltouch
, 
 161         .f_process 
= filt_signalprocess
, 
 164 /* structures  and fns for killpg1 iterartion callback and filters */ 
 165 struct killpg1_filtargs 
{ 
 170 struct killpg1_iterargs 
{ 
 178 static int killpg1_filt(proc_t p
, void * arg
); 
 179 static int killpg1_pgrpfilt(proc_t p
, __unused 
void * arg
); 
 180 static int killpg1_callback(proc_t p
, void * arg
); 
 182 static int pgsignal_filt(proc_t p
, void * arg
); 
 183 static int pgsignal_callback(proc_t p
, void * arg
); 
 184 static kern_return_t 
get_signalthread(proc_t
, int, thread_t 
*); 
 187 /* flags for psignal_internal */ 
 188 #define PSIG_LOCKED     0x1 
 189 #define PSIG_VFORK      0x2 
 190 #define PSIG_THREAD     0x4 
 191 #define PSIG_TRY_THREAD 0x8 
 193 static os_reason_t 
build_signal_reason(int signum
, const char *procname
); 
 194 static void psignal_internal(proc_t p
, task_t task
, thread_t thread
, int flavor
, int signum
, os_reason_t signal_reason
); 
 197  * NOTE: Source and target may *NOT* overlap! (target is smaller) 
 200 sigaltstack_kern_to_user32(struct kern_sigaltstack 
*in
, struct user32_sigaltstack 
*out
) 
 202         out
->ss_sp          
= CAST_DOWN_EXPLICIT(user32_addr_t
, in
->ss_sp
); 
 203         out
->ss_size    
= CAST_DOWN_EXPLICIT(user32_size_t
, in
->ss_size
); 
 204         out
->ss_flags   
= in
->ss_flags
; 
 208 sigaltstack_kern_to_user64(struct kern_sigaltstack 
*in
, struct user64_sigaltstack 
*out
) 
 210         out
->ss_sp          
= in
->ss_sp
; 
 211         out
->ss_size    
= in
->ss_size
; 
 212         out
->ss_flags   
= in
->ss_flags
; 
 216  * NOTE: Source and target may are permitted to overlap! (source is smaller); 
 217  * this works because we copy fields in order from the end of the struct to 
 221 sigaltstack_user32_to_kern(struct user32_sigaltstack 
*in
, struct kern_sigaltstack 
*out
) 
 223         out
->ss_flags   
= in
->ss_flags
; 
 224         out
->ss_size    
= in
->ss_size
; 
 225         out
->ss_sp              
= CAST_USER_ADDR_T(in
->ss_sp
); 
 228 sigaltstack_user64_to_kern(struct user64_sigaltstack 
*in
, struct kern_sigaltstack 
*out
) 
 230         out
->ss_flags   
= in
->ss_flags
; 
 231         out
->ss_size    
= in
->ss_size
; 
 232         out
->ss_sp              
= in
->ss_sp
; 
 236 sigaction_kern_to_user32(struct kern_sigaction 
*in
, struct user32_sigaction 
*out
) 
 238         /* This assumes 32 bit __sa_handler is of type sig_t */ 
 239         out
->__sigaction_u
.__sa_handler 
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->__sigaction_u
.__sa_handler
); 
 240         out
->sa_mask 
= in
->sa_mask
; 
 241         out
->sa_flags 
= in
->sa_flags
; 
 244 sigaction_kern_to_user64(struct kern_sigaction 
*in
, struct user64_sigaction 
*out
) 
 246         /* This assumes 32 bit __sa_handler is of type sig_t */ 
 247         out
->__sigaction_u
.__sa_handler 
= in
->__sigaction_u
.__sa_handler
; 
 248         out
->sa_mask 
= in
->sa_mask
; 
 249         out
->sa_flags 
= in
->sa_flags
; 
 253 __sigaction_user32_to_kern(struct __user32_sigaction 
*in
, struct __kern_sigaction 
*out
) 
 255         out
->__sigaction_u
.__sa_handler 
= CAST_USER_ADDR_T(in
->__sigaction_u
.__sa_handler
); 
 256         out
->sa_tramp 
= CAST_USER_ADDR_T(in
->sa_tramp
); 
 257         out
->sa_mask 
= in
->sa_mask
; 
 258         out
->sa_flags 
= in
->sa_flags
; 
 262 __sigaction_user64_to_kern(struct __user64_sigaction 
*in
, struct __kern_sigaction 
*out
) 
 264         out
->__sigaction_u
.__sa_handler 
= in
->__sigaction_u
.__sa_handler
; 
 265         out
->sa_tramp 
= in
->sa_tramp
; 
 266         out
->sa_mask 
= in
->sa_mask
; 
 267         out
->sa_flags 
= in
->sa_flags
; 
 271 void ram_printf(int); 
 273 unsigned int rdebug_proc
=0; 
 280 #endif /* SIGNAL_DEBUG */ 
 284 signal_setast(thread_t sig_actthread
) 
 286         act_set_astbsd(sig_actthread
); 
 290  * Can process p, with ucred uc, send the signal signum to process q? 
 291  * uc is refcounted  by the caller so internal fileds can be used safely 
 292  * when called with zombie arg, list lock is held 
 295 cansignal(proc_t p
, kauth_cred_t uc
, proc_t q
, int signum
, int zombie
) 
 297         kauth_cred_t my_cred
; 
 298         struct session 
* p_sessp 
= SESSION_NULL
; 
 299         struct session 
* q_sessp 
= SESSION_NULL
; 
 303         error 
= mac_proc_check_signal(p
, q
, signum
); 
 308         /* you can signal yourself */ 
 312         /* you can't send launchd SIGKILL, even if root */ 
 313         if (signum 
== SIGKILL 
&& q 
== initproc
) 
 316         if (!suser(uc
, NULL
)) 
 317                 return (1);             /* root can always signal */ 
 321         if (p
->p_pgrp 
!= PGRP_NULL
) 
 322                 p_sessp 
= p
->p_pgrp
->pg_session
; 
 323         if (q
->p_pgrp 
!= PGRP_NULL
) 
 324                 q_sessp 
= q
->p_pgrp
->pg_session
; 
 326         if (signum 
== SIGCONT 
&& q_sessp 
== p_sessp
) { 
 329                 return (1);             /* SIGCONT in session */ 
 336          * If the real or effective UID of the sender matches the real 
 337          * or saved UID of the target, permit the signal to 
 341                 my_cred 
= kauth_cred_proc_ref(q
); 
 343                 my_cred 
= proc_ucred(q
); 
 345         if (kauth_cred_getruid(uc
) == kauth_cred_getruid(my_cred
) || 
 346             kauth_cred_getruid(uc
) == kauth_cred_getsvuid(my_cred
) || 
 347             kauth_cred_getuid(uc
) == kauth_cred_getruid(my_cred
) || 
 348             kauth_cred_getuid(uc
) == kauth_cred_getsvuid(my_cred
)) { 
 350                         kauth_cred_unref(&my_cred
); 
 355                 kauth_cred_unref(&my_cred
); 
 361  * <rdar://problem/21952708> Some signals can be restricted from being handled, 
 362  * forcing the default action for that signal. This behavior applies only to 
 363  * non-root (EUID != 0) processes, and is configured with the "sigrestrict=x" 
 366  *   0 (default): Disallow use of restricted signals. Trying to register a handler 
 367  *              returns ENOTSUP, which userspace may use to take special action (e.g. abort). 
 368  *   1: As above, but return EINVAL. Restricted signals behave similarly to SIGKILL. 
 369  *   2: Usual POSIX semantics. 
 371 unsigned sigrestrict_arg 
= 0; 
 375 sigrestrictmask(void) 
 377         if (kauth_getuid() != 0 && sigrestrict_arg 
!= 2) { 
 378                 return SIGRESTRICTMASK
; 
 384 signal_is_restricted(proc_t p
, int signum
) 
 386         if (sigmask(signum
) & sigrestrictmask()) { 
 387                 if (sigrestrict_arg 
== 0 && 
 388                                 task_get_apptype(p
->task
) == TASK_APPTYPE_APP_DEFAULT
) { 
 400 signal_is_restricted(proc_t p
, int signum
) 
 406 #endif /* !PLATFORM_WatchOS */ 
 414  * Notes:       Uses current thread as a parameter to inform PPC to enable 
 415  *              FPU exceptions via setsigvec(); this operation is not proxy 
 420 sigaction(proc_t p
, struct sigaction_args 
*uap
, __unused 
int32_t *retval
) 
 422         struct kern_sigaction vec
; 
 423         struct __kern_sigaction __vec
; 
 425         struct kern_sigaction 
*sa 
= &vec
; 
 426         struct sigacts 
*ps 
= p
->p_sigacts
; 
 431         signum 
= uap
->signum
; 
 432         if (signum 
<= 0 || signum 
>= NSIG 
|| 
 433                         signum 
== SIGKILL 
|| signum 
== SIGSTOP
) 
 437                 if (IS_64BIT_PROCESS(p
)) { 
 438                         struct __user64_sigaction       __vec64
; 
 439                         error 
= copyin(uap
->nsa
, &__vec64
, sizeof(__vec64
)); 
 440                         __sigaction_user64_to_kern(&__vec64
, &__vec
); 
 442                         struct __user32_sigaction       __vec32
; 
 443                         error 
= copyin(uap
->nsa
, &__vec32
, sizeof(__vec32
)); 
 444                         __sigaction_user32_to_kern(&__vec32
, &__vec
); 
 448                 __vec
.sa_flags 
&= SA_USERSPACE_MASK
; /* Only pass on valid sa_flags */ 
 450                 if ((__vec
.sa_flags 
& SA_SIGINFO
) || __vec
.sa_handler 
!= SIG_DFL
) { 
 451                         if ((error 
= signal_is_restricted(p
, signum
))) { 
 452                                 if (error 
== ENOTSUP
) { 
 453                                         printf("%s(%d): denied attempt to register action for signal %d\n", 
 454                                                         proc_name_address(p
), proc_pid(p
), signum
); 
 462                 sa
->sa_handler 
= ps
->ps_sigact
[signum
]; 
 463                 sa
->sa_mask 
= ps
->ps_catchmask
[signum
]; 
 464                 bit 
= sigmask(signum
); 
 466                 if ((ps
->ps_sigonstack 
& bit
) != 0) 
 467                         sa
->sa_flags 
|= SA_ONSTACK
; 
 468                 if ((ps
->ps_sigintr 
& bit
) == 0) 
 469                         sa
->sa_flags 
|= SA_RESTART
; 
 470                 if (ps
->ps_siginfo 
& bit
) 
 471                         sa
->sa_flags 
|= SA_SIGINFO
; 
 472                 if (ps
->ps_signodefer 
& bit
) 
 473                         sa
->sa_flags 
|= SA_NODEFER
; 
 474                 if (ps
->ps_64regset 
& bit
) 
 475                         sa
->sa_flags 
|= SA_64REGSET
; 
 476                 if ((signum 
== SIGCHLD
) && (p
->p_flag 
& P_NOCLDSTOP
)) 
 477                         sa
->sa_flags 
|= SA_NOCLDSTOP
; 
 478                 if ((signum 
== SIGCHLD
) && (p
->p_flag 
& P_NOCLDWAIT
)) 
 479                         sa
->sa_flags 
|= SA_NOCLDWAIT
; 
 481                 if (IS_64BIT_PROCESS(p
)) { 
 482                         struct user64_sigaction vec64
; 
 483                         sigaction_kern_to_user64(sa
, &vec64
); 
 484                         error 
= copyout(&vec64
, uap
->osa
, sizeof(vec64
)); 
 486                         struct user32_sigaction vec32
; 
 487                         sigaction_kern_to_user32(sa
, &vec32
); 
 488                         error 
= copyout(&vec32
, uap
->osa
, sizeof(vec32
)); 
 495                 error 
= setsigvec(p
, current_thread(), signum
, &__vec
, FALSE
); 
 501 /* Routines to manipulate bits on all threads */ 
 503 clear_procsiglist(proc_t p
,  int bit
, boolean_t in_signalstart
) 
 505         struct uthread 
* uth
; 
 510                 proc_signalstart(p
, 1); 
 512         if ((p
->p_lflag 
& P_LINVFORK
) && p
->p_vforkact
) { 
 513                 thact 
= p
->p_vforkact
;   
 514                 uth 
= (struct uthread 
*)get_bsdthread_info(thact
); 
 516                         uth
->uu_siglist 
&= ~bit
; 
 519                         proc_signalend(p
, 1); 
 524         TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) { 
 525                 uth
->uu_siglist 
&= ~bit
; 
 527         p
->p_siglist 
&= ~bit
; 
 529                 proc_signalend(p
, 1); 
 537 unblock_procsigmask(proc_t p
,  int bit
) 
 539         struct uthread 
* uth
; 
 543         proc_signalstart(p
, 1); 
 545         if ((p
->p_lflag 
& P_LINVFORK
) && p
->p_vforkact
) { 
 546                 thact 
= p
->p_vforkact
;   
 547                 uth 
= (struct uthread 
*)get_bsdthread_info(thact
); 
 549                         uth
->uu_sigmask 
&= ~bit
; 
 551                 p
->p_sigmask 
&= ~bit
; 
 552                 proc_signalend(p
, 1); 
 556         TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) { 
 557                 uth
->uu_sigmask 
&= ~bit
; 
 559         p
->p_sigmask 
&= ~bit
; 
 561         proc_signalend(p
, 1); 
 567 block_procsigmask(proc_t p
,  int bit
) 
 569         struct uthread 
* uth
; 
 573         proc_signalstart(p
, 1); 
 575         if ((p
->p_lflag 
& P_LINVFORK
) && p
->p_vforkact
) { 
 576                 thact 
= p
->p_vforkact
;   
 577                 uth 
= (struct uthread 
*)get_bsdthread_info(thact
); 
 579                         uth
->uu_sigmask 
|= bit
; 
 582                 proc_signalend(p
, 1); 
 586         TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) { 
 587                 uth
->uu_sigmask 
|= bit
; 
 591         proc_signalend(p
, 1); 
 597 set_procsigmask(proc_t p
,  int bit
) 
 599         struct uthread 
* uth
; 
 603         proc_signalstart(p
, 1); 
 605         if ((p
->p_lflag 
& P_LINVFORK
) && p
->p_vforkact
) { 
 606                 thact 
= p
->p_vforkact
;   
 607                 uth 
= (struct uthread 
*)get_bsdthread_info(thact
); 
 609                         uth
->uu_sigmask 
= bit
; 
 612                 proc_signalend(p
, 1); 
 616         TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) { 
 617                 uth
->uu_sigmask 
= bit
; 
 620         proc_signalend(p
, 1); 
 626 /* XXX should be static? */ 
 628  * Notes:       The thread parameter is used in the PPC case to select the 
 629  *              thread on which the floating point exception will be enabled 
 630  *              or disabled.  We can't simply take current_thread(), since 
 631  *              this is called from posix_spawn() on the not currently running 
 632  *              process/thread pair. 
 634  *              We mark thread as unused to alow compilation without warning 
 635  *              on non-PPC platforms. 
 638 setsigvec(proc_t p
, __unused thread_t thread
, int signum
, struct __kern_sigaction 
*sa
, boolean_t in_sigstart
) 
 640         struct sigacts 
*ps 
= p
->p_sigacts
; 
 643         assert(signum 
< NSIG
); 
 645         if ((signum 
== SIGKILL 
|| signum 
== SIGSTOP
) && 
 646                 sa
->sa_handler 
!= SIG_DFL
) 
 648         bit 
= sigmask(signum
); 
 650          * Change setting atomically. 
 652         ps
->ps_sigact
[signum
] = sa
->sa_handler
; 
 653         ps
->ps_trampact
[signum
] = sa
->sa_tramp
; 
 654         ps
->ps_catchmask
[signum
] = sa
->sa_mask 
&~ sigcantmask
; 
 655         if (sa
->sa_flags 
& SA_SIGINFO
) 
 656                 ps
->ps_siginfo 
|= bit
; 
 658                 ps
->ps_siginfo 
&= ~bit
; 
 659         if (sa
->sa_flags 
& SA_64REGSET
) 
 660                 ps
->ps_64regset 
|= bit
; 
 662                 ps
->ps_64regset 
&= ~bit
; 
 663         if ((sa
->sa_flags 
& SA_RESTART
) == 0) 
 664                 ps
->ps_sigintr 
|= bit
; 
 666                 ps
->ps_sigintr 
&= ~bit
; 
 667         if (sa
->sa_flags 
& SA_ONSTACK
) 
 668                 ps
->ps_sigonstack 
|= bit
; 
 670                 ps
->ps_sigonstack 
&= ~bit
; 
 671         if (sa
->sa_flags 
& SA_USERTRAMP
) 
 672                 ps
->ps_usertramp 
|= bit
; 
 674                 ps
->ps_usertramp 
&= ~bit
; 
 675         if (sa
->sa_flags 
& SA_RESETHAND
) 
 676                 ps
->ps_sigreset 
|= bit
; 
 678                 ps
->ps_sigreset 
&= ~bit
; 
 679         if (sa
->sa_flags 
& SA_NODEFER
) 
 680                 ps
->ps_signodefer 
|= bit
; 
 682                 ps
->ps_signodefer 
&= ~bit
; 
 683         if (signum 
== SIGCHLD
) { 
 684                 if (sa
->sa_flags 
& SA_NOCLDSTOP
) 
 685                         OSBitOrAtomic(P_NOCLDSTOP
, &p
->p_flag
); 
 687                         OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP
), &p
->p_flag
); 
 688                 if ((sa
->sa_flags 
& SA_NOCLDWAIT
) || (sa
->sa_handler 
== SIG_IGN
)) 
 689                         OSBitOrAtomic(P_NOCLDWAIT
, &p
->p_flag
); 
 691                         OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT
), &p
->p_flag
); 
 695          * Set bit in p_sigignore for signals that are set to SIG_IGN, 
 696          * and for signals set to SIG_DFL where the default is to ignore. 
 697          * However, don't put SIGCONT in p_sigignore, 
 698          * as we have to restart the process. 
 700         if (sa
->sa_handler 
== SIG_IGN 
|| 
 701             (sigprop
[signum
] & SA_IGNORE 
&& sa
->sa_handler 
== SIG_DFL
)) { 
 703                 clear_procsiglist(p
, bit
, in_sigstart
); 
 704                 if (signum 
!= SIGCONT
) 
 705                         p
->p_sigignore 
|= bit
;  /* easier in psignal */ 
 706                 p
->p_sigcatch 
&= ~bit
; 
 708                 p
->p_sigignore 
&= ~bit
; 
 709                 if (sa
->sa_handler 
== SIG_DFL
) 
 710                         p
->p_sigcatch 
&= ~bit
; 
 712                         p
->p_sigcatch 
|= bit
; 
 718  * Initialize signal state for process 0; 
 719  * set to ignore signals that are ignored by default. 
 726         for (i 
= 1; i 
< NSIG
; i
++) 
 727                 if (sigprop
[i
] & SA_IGNORE 
&& i 
!= SIGCONT
) 
 728                         p
->p_sigignore 
|= sigmask(i
); 
 732  * Reset signals for an exec of the specified process. 
 735 execsigs(proc_t p
, thread_t thread
) 
 737         struct sigacts 
*ps 
= p
->p_sigacts
; 
 741         ut 
= (struct uthread 
*)get_bsdthread_info(thread
); 
 744          * transfer saved signal states from the process 
 745          * back to the current thread. 
 747          * NOTE: We do this without the process locked, 
 748          * because we are guaranteed to be single-threaded 
 749          * by this point in exec and the p_siglist is 
 750          * only accessed by threads inside the process. 
 752         ut
->uu_siglist 
|= p
->p_siglist
; 
 756          * Reset caught signals.  Held signals remain held 
 757          * through p_sigmask (unless they were caught, 
 758          * and are now ignored by default). 
 760         while (p
->p_sigcatch
) { 
 761                 nc 
= ffs((long)p
->p_sigcatch
); 
 763                 p
->p_sigcatch 
&= ~mask
; 
 764                 if (sigprop
[nc
] & SA_IGNORE
) { 
 766                                 p
->p_sigignore 
|= mask
; 
 767                         ut
->uu_siglist 
&= ~mask
; 
 769                 ps
->ps_sigact
[nc
] = SIG_DFL
; 
 773          * Reset stack state to the user stack. 
 774          * Clear set of signals caught on the signal stack. 
 777         ut
->uu_sigstk
.ss_flags 
= SA_DISABLE
; 
 778         ut
->uu_sigstk
.ss_size 
= 0; 
 779         ut
->uu_sigstk
.ss_sp 
= USER_ADDR_NULL
; 
 780         ut
->uu_flag 
&= ~UT_ALTSTACK
; 
 782         ps
->ps_sigonstack 
= 0; 
 786  * Manipulate signal mask. 
 787  * Note that we receive new mask, not pointer, 
 788  * and return old mask as return value; 
 789  * the library stub does the rest. 
 792 sigprocmask(proc_t p
, struct sigprocmask_args 
*uap
, __unused 
int32_t *retval
) 
 795         sigset_t oldmask
, nmask
; 
 796         user_addr_t omask 
= uap
->omask
; 
 799         ut 
= (struct uthread 
*)get_bsdthread_info(current_thread()); 
 800         oldmask  
= ut
->uu_sigmask
; 
 802         if (uap
->mask 
== USER_ADDR_NULL
) { 
 803                 /* just want old mask */ 
 806         error 
= copyin(uap
->mask
, &nmask
, sizeof(sigset_t
)); 
 812                 block_procsigmask(p
, (nmask 
& ~sigcantmask
)); 
 813                 signal_setast(current_thread()); 
 817                 unblock_procsigmask(p
, (nmask 
& ~sigcantmask
)); 
 818                 signal_setast(current_thread()); 
 822                 set_procsigmask(p
, (nmask 
& ~sigcantmask
)); 
 823                 signal_setast(current_thread()); 
 831         if (!error 
&& omask 
!= USER_ADDR_NULL
) 
 832                 copyout(&oldmask
, omask
, sizeof(sigset_t
)); 
 837 sigpending(__unused proc_t p
, struct sigpending_args 
*uap
, __unused 
int32_t *retval
) 
 842         ut 
= (struct uthread 
*)get_bsdthread_info(current_thread()); 
 843         pendlist 
= ut
->uu_siglist
; 
 846                 copyout(&pendlist
, uap
->osv
, sizeof(sigset_t
)); 
 851  * Suspend process until signal, providing mask to be set 
 852  * in the meantime.  Note nonstandard calling convention: 
 853  * libc stub passes mask, not pointer, to save a copyin. 
 857 sigcontinue(__unused 
int error
) 
 859 //      struct uthread *ut = get_bsdthread_info(current_thread()); 
 860         unix_syscall_return(EINTR
); 
 864 sigsuspend(proc_t p
, struct sigsuspend_args 
*uap
, int32_t *retval
) 
 866         __pthread_testcancel(1); 
 867         return(sigsuspend_nocancel(p
, (struct sigsuspend_nocancel_args 
*)uap
, retval
)); 
 871 sigsuspend_nocancel(proc_t p
, struct sigsuspend_nocancel_args 
*uap
, __unused 
int32_t *retval
) 
 875         ut 
= (struct uthread 
*)get_bsdthread_info(current_thread()); 
 878          * When returning from sigpause, we want 
 879          * the old mask to be restored after the 
 880          * signal handler has finished.  Thus, we 
 881          * save it here and mark the sigacts structure 
 884         ut
->uu_oldmask 
= ut
->uu_sigmask
; 
 885         ut
->uu_flag 
|= UT_SAS_OLDMASK
; 
 886         ut
->uu_sigmask 
= (uap
->mask 
& ~sigcantmask
); 
 887         (void) tsleep0((caddr_t
) p
, PPAUSE
|PCATCH
, "pause", 0, sigcontinue
); 
 888         /* always return EINTR rather than ERESTART... */ 
 894 __disable_threadsignal(__unused proc_t p
, 
 895                        __unused 
struct __disable_threadsignal_args 
*uap
, 
 896                        __unused 
int32_t *retval
) 
 900         uth 
= (struct uthread 
*)get_bsdthread_info(current_thread()); 
 902         /* No longer valid to have any signal delivered */ 
 903         uth
->uu_flag 
|= (UT_NO_SIGMASK 
| UT_CANCELDISABLE
); 
 910 __pthread_testcancel(int presyscall
) 
 913         thread_t self 
= current_thread(); 
 914         struct uthread 
* uthread
; 
 916         uthread 
= (struct uthread 
*)get_bsdthread_info(self
); 
 919         uthread
->uu_flag 
&= ~UT_NOTCANCELPT
; 
 921         if ((uthread
->uu_flag 
& (UT_CANCELDISABLE 
| UT_CANCEL 
| UT_CANCELED
)) == UT_CANCEL
) { 
 922                 if(presyscall 
!= 0) { 
 923                         unix_syscall_return(EINTR
); 
 926                         thread_abort_safely(self
); 
 933 __pthread_markcancel(__unused proc_t p
, 
 934         struct __pthread_markcancel_args 
*uap
, __unused 
int32_t *retval
) 
 936         thread_act_t target_act
; 
 940         target_act 
= (thread_act_t
)port_name_to_thread(uap
->thread_port
); 
 942         if (target_act 
== THR_ACT_NULL
) 
 945         uth 
= (struct uthread 
*)get_bsdthread_info(target_act
); 
 947         /* if the thread is in vfork do not cancel */ 
 948         if ((uth
->uu_flag 
& (UT_VFORK 
| UT_CANCEL 
| UT_CANCELED 
)) == 0) { 
 949                 uth
->uu_flag 
|= (UT_CANCEL 
| UT_NO_SIGMASK
); 
 950                 if (((uth
->uu_flag 
& UT_NOTCANCELPT
) == 0)  
 951                         && ((uth
->uu_flag 
& UT_CANCELDISABLE
) == 0)) 
 952                                 thread_abort_safely(target_act
); 
 955         thread_deallocate(target_act
); 
 959 /* if action =0 ; return the cancellation state ,  
 960  *      if marked for cancellation, make the thread canceled 
 961  * if action = 1 ; Enable the cancel handling 
 962  * if action = 2; Disable the cancel handling 
 965 __pthread_canceled(__unused proc_t p
, 
 966         struct __pthread_canceled_args 
*uap
, __unused 
int32_t *retval
) 
 970         int action 
= uap
->action
; 
 972         thread 
= current_thread(); 
 973         uth 
= (struct uthread 
*)get_bsdthread_info(thread
); 
 977                         uth
->uu_flag 
&= ~UT_CANCELDISABLE
; 
 980                         uth
->uu_flag 
|= UT_CANCELDISABLE
; 
 984                         /* if the thread is in vfork do not cancel */ 
 985                         if((uth
->uu_flag 
& ( UT_CANCELDISABLE 
| UT_CANCEL 
| UT_CANCELED
)) == UT_CANCEL
) { 
 986                                 uth
->uu_flag 
&= ~UT_CANCEL
; 
 987                                 uth
->uu_flag 
|= (UT_CANCELED 
| UT_NO_SIGMASK
); 
 995 __attribute__((noreturn
)) 
 997 __posix_sem_syscall_return(kern_return_t kern_result
)  
1001         if (kern_result 
== KERN_SUCCESS
) 
1003         else if (kern_result 
== KERN_ABORTED
) 
1005         else if (kern_result 
== KERN_OPERATION_TIMED_OUT
)  
1009         unix_syscall_return(error
); 
1010         /* does not return */ 
1013 #if OLD_SEMWAIT_SIGNAL 
1015  * Returns:     0                       Success 
1019  *      EFAULT if timespec is NULL 
1022 __old_semwait_signal(proc_t p
, struct __old_semwait_signal_args 
*uap
, 
1025         __pthread_testcancel(0); 
1026         return(__old_semwait_signal_nocancel(p
, (struct __old_semwait_signal_nocancel_args 
*)uap
, retval
)); 
1030 __old_semwait_signal_nocancel(proc_t p
, struct __old_semwait_signal_nocancel_args 
*uap
, 
1031                               __unused 
int32_t *retval
) 
1034         kern_return_t kern_result
; 
1036         mach_timespec_t then
; 
1037         struct timespec now
; 
1038         struct user_timespec ts
; 
1039         boolean_t truncated_timeout 
= FALSE
; 
1043                 if (IS_64BIT_PROCESS(p
)) { 
1044                         struct user64_timespec ts64
; 
1045                         error 
= copyin(uap
->ts
, &ts64
, sizeof(ts64
)); 
1046                         ts
.tv_sec 
= ts64
.tv_sec
; 
1047                         ts
.tv_nsec 
= ts64
.tv_nsec
; 
1049                         struct user32_timespec ts32
; 
1050                         error 
= copyin(uap
->ts
, &ts32
, sizeof(ts32
)); 
1051                         ts
.tv_sec 
= ts32
.tv_sec
; 
1052                         ts
.tv_nsec 
= ts32
.tv_nsec
; 
1059                 if ((ts
.tv_sec 
& 0xFFFFFFFF00000000ULL
) != 0) { 
1060                         ts
.tv_sec 
= 0xFFFFFFFF; 
1062                         truncated_timeout 
= TRUE
; 
1065                 if (uap
->relative
) { 
1066                         then
.tv_sec 
= ts
.tv_sec
; 
1067                         then
.tv_nsec 
= ts
.tv_nsec
; 
1071                         /* if time has elapsed, set time to null timepsec to bailout rightaway */ 
1072                         if (now
.tv_sec 
== ts
.tv_sec 
? 
1073                                 now
.tv_nsec 
> ts
.tv_nsec 
: 
1074                                 now
.tv_sec 
> ts
.tv_sec
) { 
1078                                 then
.tv_sec 
= ts
.tv_sec 
- now
.tv_sec
; 
1079                                 then
.tv_nsec 
= ts
.tv_nsec 
- now
.tv_nsec
; 
1080                                 if (then
.tv_nsec 
< 0) { 
1081                                         then
.tv_nsec 
+= NSEC_PER_SEC
; 
1087                 if (uap
->mutex_sem 
== 0) 
1088                         kern_result 
= semaphore_timedwait_trap_internal((mach_port_name_t
)uap
->cond_sem
, then
.tv_sec
, then
.tv_nsec
, __posix_sem_syscall_return
); 
1090                         kern_result 
= semaphore_timedwait_signal_trap_internal(uap
->cond_sem
, uap
->mutex_sem
, then
.tv_sec
, then
.tv_nsec
, __posix_sem_syscall_return
); 
1094                 if (uap
->mutex_sem 
== 0) 
1095                         kern_result 
= semaphore_wait_trap_internal(uap
->cond_sem
, __posix_sem_syscall_return
); 
1098                         kern_result 
= semaphore_wait_signal_trap_internal(uap
->cond_sem
, uap
->mutex_sem
, __posix_sem_syscall_return
); 
1101         if (kern_result 
== KERN_SUCCESS 
&& !truncated_timeout
) 
1103         else if (kern_result 
== KERN_SUCCESS 
&& truncated_timeout
) 
1104                 return(EINTR
); /* simulate an exceptional condition because Mach doesn't support a longer timeout */ 
1105         else if (kern_result 
== KERN_ABORTED
) 
1107         else if (kern_result 
== KERN_OPERATION_TIMED_OUT
)  
1112 #endif /* OLD_SEMWAIT_SIGNAL*/ 
1115  * Returns:     0                       Success 
1119  *      EFAULT if timespec is NULL 
1122 __semwait_signal(proc_t p
, struct __semwait_signal_args 
*uap
, 
1125         __pthread_testcancel(0); 
1126         return(__semwait_signal_nocancel(p
, (struct __semwait_signal_nocancel_args 
*)uap
, retval
)); 
1130 __semwait_signal_nocancel(__unused proc_t p
, struct __semwait_signal_nocancel_args 
*uap
, 
1131                               __unused 
int32_t *retval
) 
1134         kern_return_t kern_result
; 
1135         mach_timespec_t then
; 
1136         struct timespec now
; 
1137         struct user_timespec ts
; 
1138         boolean_t truncated_timeout 
= FALSE
; 
1142                 ts
.tv_sec 
= uap
->tv_sec
; 
1143                 ts
.tv_nsec 
= uap
->tv_nsec
; 
1145                 if ((ts
.tv_sec 
& 0xFFFFFFFF00000000ULL
) != 0) { 
1146                         ts
.tv_sec 
= 0xFFFFFFFF; 
1148                         truncated_timeout 
= TRUE
; 
1151                 if (uap
->relative
) { 
1152                         then
.tv_sec 
= ts
.tv_sec
; 
1153                         then
.tv_nsec 
= ts
.tv_nsec
; 
1157                         /* if time has elapsed, set time to null timepsec to bailout rightaway */ 
1158                         if (now
.tv_sec 
== ts
.tv_sec 
? 
1159                                 now
.tv_nsec 
> ts
.tv_nsec 
: 
1160                                 now
.tv_sec 
> ts
.tv_sec
) { 
1164                                 then
.tv_sec 
= ts
.tv_sec 
- now
.tv_sec
; 
1165                                 then
.tv_nsec 
= ts
.tv_nsec 
- now
.tv_nsec
; 
1166                                 if (then
.tv_nsec 
< 0) { 
1167                                         then
.tv_nsec 
+= NSEC_PER_SEC
; 
1173                 if (uap
->mutex_sem 
== 0) 
1174                         kern_result 
= semaphore_timedwait_trap_internal((mach_port_name_t
)uap
->cond_sem
, then
.tv_sec
, then
.tv_nsec
, __posix_sem_syscall_return
); 
1176                         kern_result 
= semaphore_timedwait_signal_trap_internal(uap
->cond_sem
, uap
->mutex_sem
, then
.tv_sec
, then
.tv_nsec
, __posix_sem_syscall_return
); 
1180                 if (uap
->mutex_sem 
== 0) 
1181                         kern_result 
= semaphore_wait_trap_internal(uap
->cond_sem
, __posix_sem_syscall_return
); 
1184                         kern_result 
= semaphore_wait_signal_trap_internal(uap
->cond_sem
, uap
->mutex_sem
, __posix_sem_syscall_return
); 
1187         if (kern_result 
== KERN_SUCCESS 
&& !truncated_timeout
) 
1189         else if (kern_result 
== KERN_SUCCESS 
&& truncated_timeout
) 
1190                 return(EINTR
); /* simulate an exceptional condition because Mach doesn't support a longer timeout */ 
1191         else if (kern_result 
== KERN_ABORTED
) 
1193         else if (kern_result 
== KERN_OPERATION_TIMED_OUT
)  
1201 __pthread_kill(__unused proc_t p
, struct __pthread_kill_args 
*uap
, 
1202                __unused 
int32_t *retval
)  
1204         thread_t target_act
; 
1206         int signum 
= uap
->sig
; 
1207         struct uthread 
*uth
; 
1209         target_act 
= (thread_t
)port_name_to_thread(uap
->thread_port
); 
1211         if (target_act 
== THREAD_NULL
) 
1213         if ((u_int
)signum 
>= NSIG
) { 
1218         uth 
= (struct uthread 
*)get_bsdthread_info(target_act
); 
1220         if (uth
->uu_flag 
& UT_NO_SIGMASK
) { 
1226                 psignal_uthread(target_act
, signum
); 
1228         thread_deallocate(target_act
); 
1234 __pthread_sigmask(__unused proc_t p
, struct __pthread_sigmask_args 
*uap
, 
1235                   __unused 
int32_t *retval
) 
1237         user_addr_t set 
= uap
->set
; 
1238         user_addr_t oset 
= uap
->oset
; 
1244         ut 
= (struct uthread 
*)get_bsdthread_info(current_thread()); 
1245         oldset 
= ut
->uu_sigmask
; 
1247         if (set 
== USER_ADDR_NULL
) { 
1248                 /* need only old mask */ 
1252         error 
= copyin(set
, &nset
, sizeof(sigset_t
)); 
1258                 ut
->uu_sigmask 
|= (nset 
& ~sigcantmask
); 
1262                 ut
->uu_sigmask 
&= ~(nset
); 
1263                 signal_setast(current_thread()); 
1267                 ut
->uu_sigmask 
= (nset 
& ~sigcantmask
); 
1268                 signal_setast(current_thread()); 
1276         if (!error 
&& oset 
!= USER_ADDR_NULL
) 
1277                 copyout(&oldset
, oset
, sizeof(sigset_t
)); 
1283  * Returns:     0                       Success 
1289 __sigwait(proc_t p
, struct __sigwait_args 
*uap
, int32_t *retval
) 
1291         __pthread_testcancel(1); 
1292         return(__sigwait_nocancel(p
, (struct __sigwait_nocancel_args 
*)uap
, retval
)); 
1296 __sigwait_nocancel(proc_t p
, struct __sigwait_nocancel_args 
*uap
, __unused 
int32_t *retval
) 
1299         struct uthread 
*uth
; 
1306         ut 
= (struct uthread 
*)get_bsdthread_info(current_thread()); 
1308         if (uap
->set 
== USER_ADDR_NULL
) 
1311         error 
= copyin(uap
->set
, &mask
, sizeof(sigset_t
)); 
1315         siglist 
= (mask 
& ~sigcantmask
); 
1321         if ((p
->p_lflag 
& P_LINVFORK
) && p
->p_vforkact
) { 
1325                 proc_signalstart(p
, 1); 
1326                 TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) { 
1327                         if ( (sigw 
= uth
->uu_siglist 
& siglist
) ) { 
1331                 proc_signalend(p
, 1); 
1335                 /* The signal was pending on a thread */ 
1339          * When returning from sigwait, we want 
1340          * the old mask to be restored after the 
1341          * signal handler has finished.  Thus, we 
1342          * save it here and mark the sigacts structure 
1345         uth 
= ut
;               /* wait for it to be delivered to us */ 
1346         ut
->uu_oldmask 
= ut
->uu_sigmask
; 
1347         ut
->uu_flag 
|= UT_SAS_OLDMASK
; 
1348         if (siglist 
== (sigset_t
)0) { 
1352         /* SIGKILL and SIGSTOP are not maskable as well */ 
1353         ut
->uu_sigmask 
= ~(siglist
|sigcantmask
); 
1354         ut
->uu_sigwait 
= siglist
;  
1356         /* No Continuations for now */ 
1357         error 
=  msleep((caddr_t
)&ut
->uu_sigwait
, &p
->p_mlock
, PPAUSE
|PCATCH
, "pause", 0); 
1359         if (error 
== ERESTART
) 
1362         sigw 
= (ut
->uu_sigwait 
& siglist
); 
1363         ut
->uu_sigmask 
= ut
->uu_oldmask
; 
1365         ut
->uu_flag 
&= ~UT_SAS_OLDMASK
; 
1369                 signum 
= ffs((unsigned int)sigw
); 
1371                         panic("sigwait with no signal wakeup"); 
1372                 /* Clear the pending signal in the thread it was delivered */ 
1373                 uth
->uu_siglist 
&= ~(sigmask(signum
)); 
1376                 DTRACE_PROC2(signal__clear
, int, signum
, siginfo_t 
*, &(ut
->t_dtrace_siginfo
)); 
1380                 if (uap
->sig 
!= USER_ADDR_NULL
) 
1381                                 error 
= copyout(&signum
, uap
->sig
, sizeof(int)); 
1390 sigaltstack(__unused proc_t p
, struct sigaltstack_args 
*uap
, __unused 
int32_t *retval
) 
1392         struct kern_sigaltstack ss
; 
1393         struct kern_sigaltstack 
*pstk
; 
1395         struct uthread 
*uth
; 
1398         uth 
= (struct uthread 
*)get_bsdthread_info(current_thread()); 
1400         pstk 
= &uth
->uu_sigstk
; 
1401         if ((uth
->uu_flag 
& UT_ALTSTACK
) == 0) 
1402                 uth
->uu_sigstk
.ss_flags 
|= SA_DISABLE
; 
1403         onstack 
= pstk
->ss_flags 
& SA_ONSTACK
; 
1405                 if (IS_64BIT_PROCESS(p
)) { 
1406                         struct user64_sigaltstack ss64
; 
1407                         sigaltstack_kern_to_user64(pstk
, &ss64
);                         
1408                         error 
= copyout(&ss64
, uap
->oss
, sizeof(ss64
)); 
1410                         struct user32_sigaltstack ss32
; 
1411                         sigaltstack_kern_to_user32(pstk
, &ss32
);                         
1412                         error 
= copyout(&ss32
, uap
->oss
, sizeof(ss32
)); 
1417         if (uap
->nss 
== USER_ADDR_NULL
) 
1419         if (IS_64BIT_PROCESS(p
)) { 
1420                 struct user64_sigaltstack ss64
; 
1421                 error 
= copyin(uap
->nss
, &ss64
, sizeof(ss64
)); 
1422                 sigaltstack_user64_to_kern(&ss64
, &ss
); 
1424                 struct user32_sigaltstack ss32
; 
1425                 error 
= copyin(uap
->nss
, &ss32
, sizeof(ss32
)); 
1426                 sigaltstack_user32_to_kern(&ss32
, &ss
); 
1430         if ((ss
.ss_flags 
& ~SA_DISABLE
) != 0)  { 
1434         if (ss
.ss_flags 
& SA_DISABLE
) { 
1435                 /* if we are here we are not in the signal handler ;so no need to check */ 
1436                 if (uth
->uu_sigstk
.ss_flags 
& SA_ONSTACK
) 
1438                 uth
->uu_flag 
&= ~UT_ALTSTACK
; 
1439                 uth
->uu_sigstk
.ss_flags 
= ss
.ss_flags
; 
1444 /* The older stacksize was 8K, enforce that one so no compat problems */ 
1445 #define OLDMINSIGSTKSZ 8*1024 
1446         if (ss
.ss_size 
< OLDMINSIGSTKSZ
) 
1448         uth
->uu_flag 
|= UT_ALTSTACK
; 
1454 kill(proc_t cp
, struct kill_args 
*uap
, __unused 
int32_t *retval
) 
1457         kauth_cred_t uc 
= kauth_cred_get(); 
1458         int posix 
= uap
->posix
;         /* !0 if posix behaviour desired */ 
1460        AUDIT_ARG(pid
, uap
->pid
); 
1461        AUDIT_ARG(signum
, uap
->signum
); 
1463         if ((u_int
)uap
->signum 
>= NSIG
) 
1466                 /* kill single process */ 
1467                 if ((p 
= proc_find(uap
->pid
)) == NULL
) { 
1468                         if ((p 
= pzfind(uap
->pid
)) != NULL
) { 
1470                                  * IEEE Std 1003.1-2001: return success 
1471                                  * when killing a zombie. 
1477                 AUDIT_ARG(process
, p
); 
1478                 if (!cansignal(cp
, uc
, p
, uap
->signum
, 0)) { 
1483                         psignal(p
, uap
->signum
); 
1488         case -1:                /* broadcast signal */ 
1489                 return (killpg1(cp
, uap
->signum
, 0, 1, posix
)); 
1490         case 0:                 /* signal own process group */ 
1491                 return (killpg1(cp
, uap
->signum
, 0, 0, posix
)); 
1492         default:                /* negative explicit process group */ 
1493                 return (killpg1(cp
, uap
->signum
, -(uap
->pid
), 0, posix
)); 
1499 build_userspace_exit_reason(uint32_t reason_namespace
, uint64_t reason_code
, user_addr_t payload
, uint32_t payload_size
, 
1500                                         user_addr_t reason_string
, uint64_t reason_flags
) 
1502         os_reason_t exit_reason 
= OS_REASON_NULL
; 
1505         int num_items_to_copy 
= 0; 
1506         uint32_t user_data_to_copy 
= 0; 
1507         char *reason_user_desc 
= NULL
; 
1508         size_t reason_user_desc_len 
= 0; 
1510         exit_reason 
= os_reason_create(reason_namespace
, reason_code
); 
1511         if (exit_reason 
== OS_REASON_NULL
) { 
1512                 printf("build_userspace_exit_reason: failed to allocate exit reason\n"); 
1516         exit_reason
->osr_flags 
|= OS_REASON_FLAG_FROM_USERSPACE
; 
1519          * Only apply flags that are allowed to be passed from userspace. 
1521         exit_reason
->osr_flags 
|= (reason_flags 
& OS_REASON_FLAG_MASK_ALLOWED_FROM_USER
); 
1522         if ((reason_flags 
& OS_REASON_FLAG_MASK_ALLOWED_FROM_USER
) != reason_flags
) { 
1523                 printf("build_userspace_exit_reason: illegal flags passed from userspace (some masked off) 0x%llx, ns: %u, code 0x%llx\n", 
1524                         reason_flags
, reason_namespace
, reason_code
); 
1527         if (!(exit_reason
->osr_flags 
& OS_REASON_FLAG_NO_CRASH_REPORT
)) { 
1528                 exit_reason
->osr_flags 
|= OS_REASON_FLAG_GENERATE_CRASH_REPORT
; 
1531         if (payload 
!= USER_ADDR_NULL
) { 
1532                 if (payload_size 
== 0) { 
1533                         printf("build_userspace_exit_reason: exit reason with namespace %u, nonzero payload but zero length\n", 
1535                         exit_reason
->osr_flags 
|= OS_REASON_FLAG_BAD_PARAMS
; 
1536                         payload 
= USER_ADDR_NULL
; 
1538                         num_items_to_copy
++; 
1540                         if (payload_size 
> EXIT_REASON_PAYLOAD_MAX_LEN
) { 
1541                                 exit_reason
->osr_flags 
|= OS_REASON_FLAG_PAYLOAD_TRUNCATED
; 
1542                                 payload_size 
= EXIT_REASON_PAYLOAD_MAX_LEN
; 
1545                         user_data_to_copy 
+= payload_size
; 
1549         if (reason_string 
!= USER_ADDR_NULL
) { 
1550                 reason_user_desc 
= (char *) kalloc(EXIT_REASON_USER_DESC_MAX_LEN
); 
1552                 if (reason_user_desc 
!= NULL
) { 
1553                         error 
= copyinstr(reason_string
, (void *) reason_user_desc
, 
1554                                                 EXIT_REASON_USER_DESC_MAX_LEN
, &reason_user_desc_len
); 
1557                                 num_items_to_copy
++; 
1558                                 user_data_to_copy 
+= reason_user_desc_len
; 
1559                         } else if (error 
== ENAMETOOLONG
) { 
1560                                 num_items_to_copy
++; 
1561                                 reason_user_desc
[EXIT_REASON_USER_DESC_MAX_LEN 
- 1] = '\0'; 
1562                                 user_data_to_copy 
+= reason_user_desc_len
; 
1564                                 exit_reason
->osr_flags 
|= OS_REASON_FLAG_FAILED_DATA_COPYIN
; 
1565                                 kfree(reason_user_desc
, EXIT_REASON_USER_DESC_MAX_LEN
); 
1566                                 reason_user_desc 
= NULL
; 
1567                                 reason_user_desc_len 
= 0; 
1572         if (num_items_to_copy 
!= 0) { 
1573                 uint32_t reason_buffer_size_estimate 
= 0; 
1574                 mach_vm_address_t data_addr 
= 0; 
1576                 reason_buffer_size_estimate 
= kcdata_estimate_required_buffer_size(num_items_to_copy
, user_data_to_copy
); 
1578                 error 
= os_reason_alloc_buffer(exit_reason
, reason_buffer_size_estimate
); 
1580                         printf("build_userspace_exit_reason: failed to allocate signal reason buffer\n"); 
1581                         goto out_failed_copyin
; 
1584                 if (reason_user_desc 
!= NULL 
&& reason_user_desc_len 
!= 0) { 
1585                         if (KERN_SUCCESS 
== kcdata_get_memory_addr(&exit_reason
->osr_kcd_descriptor
, 
1586                                                 EXIT_REASON_USER_DESC
, 
1587                                                 reason_user_desc_len
, 
1590                                 kcdata_memcpy(&exit_reason
->osr_kcd_descriptor
, (mach_vm_address_t
) data_addr
, 
1591                                                 reason_user_desc
, reason_user_desc_len
); 
1593                                 printf("build_userspace_exit_reason: failed to allocate space for reason string\n"); 
1594                                 goto out_failed_copyin
; 
1598                 if (payload 
!= USER_ADDR_NULL
) { 
1600                                         kcdata_get_memory_addr(&exit_reason
->osr_kcd_descriptor
, 
1601                                                 EXIT_REASON_USER_PAYLOAD
, 
1604                                 error 
= copyin(payload
, (void *) data_addr
, payload_size
); 
1606                                         printf("build_userspace_exit_reason: failed to copy in payload data with error %d\n", error
); 
1607                                         goto out_failed_copyin
; 
1610                                 printf("build_userspace_exit_reason: failed to allocate space for payload data\n"); 
1611                                 goto out_failed_copyin
; 
1616         if (reason_user_desc 
!= NULL
) { 
1617                 kfree(reason_user_desc
, EXIT_REASON_USER_DESC_MAX_LEN
); 
1618                 reason_user_desc 
= NULL
; 
1619                 reason_user_desc_len 
= 0; 
1626         if (reason_user_desc 
!= NULL
) { 
1627                 kfree(reason_user_desc
, EXIT_REASON_USER_DESC_MAX_LEN
); 
1628                 reason_user_desc 
= NULL
; 
1629                 reason_user_desc_len 
= 0; 
1632         exit_reason
->osr_flags 
|= OS_REASON_FLAG_FAILED_DATA_COPYIN
; 
1633         os_reason_alloc_buffer(exit_reason
, 0); 
1638 terminate_with_payload_internal(struct proc 
*cur_proc
, int target_pid
, uint32_t reason_namespace
, 
1639                                 uint64_t reason_code
, user_addr_t payload
, uint32_t payload_size
, 
1640                                 user_addr_t reason_string
, uint64_t reason_flags
) 
1642         proc_t target_proc 
= PROC_NULL
; 
1643         kauth_cred_t cur_cred 
= kauth_cred_get(); 
1644         int signum 
= SIGKILL
; 
1646         os_reason_t signal_reason 
= OS_REASON_NULL
; 
1648         AUDIT_ARG(pid
, target_pid
); 
1649         if ((target_pid 
<= 0) || (cur_proc
->p_pid 
== target_pid
)) { 
1653         if (reason_namespace 
== OS_REASON_INVALID 
|| 
1654                 reason_namespace 
> OS_REASON_MAX_VALID_NAMESPACE
) { 
1659         target_proc 
= proc_find(target_pid
); 
1660         if (target_proc 
== PROC_NULL
) { 
1664         AUDIT_ARG(process
, target_proc
); 
1666         if (!cansignal(cur_proc
, cur_cred
, target_proc
, signum
, 0)) { 
1667                 proc_rele(target_proc
); 
1671         KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
, 
1672                                         target_proc
->p_pid
, reason_namespace
, 
1675         signal_reason 
= build_userspace_exit_reason(reason_namespace
, reason_code
, payload
, payload_size
, 
1676                                                         reason_string
, reason_flags
); 
1678         psignal_with_reason(target_proc
, signum
, signal_reason
); 
1679         proc_rele(target_proc
); 
1685 terminate_with_payload(struct proc 
*cur_proc
, struct terminate_with_payload_args 
*args
, 
1686                                 __unused 
int32_t *retval
) 
1688         return terminate_with_payload_internal(cur_proc
, args
->pid
, args
->reason_namespace
, args
->reason_code
, args
->payload
, 
1689                                                 args
->payload_size
, args
->reason_string
, args
->reason_flags
); 
1693 killpg1_filt(proc_t p
, void * arg
) 
1695         struct killpg1_filtargs 
* kfargp 
= (struct killpg1_filtargs 
*)arg
; 
1696         proc_t cp 
= kfargp
->cp
; 
1697         int posix 
= kfargp
->posix
; 
1700         if (p
->p_pid 
<= 1 || p
->p_flag 
& P_SYSTEM 
|| 
1701                 (!posix 
&& p 
== cp
)) 
1709 killpg1_pgrpfilt(proc_t p
, __unused 
void * arg
) 
1711         if (p
->p_pid 
<= 1 || p
->p_flag 
& P_SYSTEM 
|| 
1712                 (p
->p_stat 
== SZOMB
)) 
1721 killpg1_callback(proc_t p
, void * arg
) 
1723         struct killpg1_iterargs 
* kargp 
= (struct killpg1_iterargs 
*)arg
; 
1724         proc_t cp 
= kargp
->cp
; 
1725         kauth_cred_t uc 
= kargp
->uc
;   /* refcounted by the caller safe to use internal fields */ 
1726         int signum 
= kargp
->signum
; 
1727         int * nfoundp 
= kargp
->nfoundp
; 
1732         if ((kargp
->zombie 
!= 0) && ((p
->p_listflag 
& P_LIST_EXITED
) == P_LIST_EXITED
)) 
1737                 error 
= cansignal(cp
, uc
, p
, signum
, zombie
); 
1740                 if (error 
!= 0 && nfoundp 
!= NULL
) { 
1745                 if (cansignal(cp
, uc
, p
, signum
, 0) == 0) 
1746                         return(PROC_RETURNED
); 
1748                 if (nfoundp 
!= NULL
) { 
1756         return(PROC_RETURNED
); 
1760  * Common code for kill process group/broadcast kill. 
1761  * cp is calling process. 
1764 killpg1(proc_t cp
, int signum
, int pgid
, int all
, int posix
) 
1769         struct killpg1_iterargs karg
; 
1770         struct killpg1_filtargs kfarg
; 
1773         uc 
= kauth_cred_proc_ref(cp
); 
1778                 kfarg
.posix 
= posix
; 
1783                 karg
.nfoundp 
= &nfound
; 
1784                 karg
.signum 
= signum
; 
1787                 proc_iterate((PROC_ALLPROCLIST 
| PROC_ZOMBPROCLIST
), killpg1_callback
, &karg
, killpg1_filt
, (void *)&kfarg
); 
1792                          * zero pgid means send to my process group. 
1794                         pgrp 
= proc_pgrp(cp
); 
1796                         pgrp 
= pgfind(pgid
); 
1803                 karg
.nfoundp 
= &nfound
; 
1805                 karg
.signum 
= signum
; 
1810                 /* PGRP_DROPREF drops the pgrp refernce */ 
1811                 pgrp_iterate(pgrp
, PGRP_DROPREF
, killpg1_callback
, &karg
, 
1812                         killpg1_pgrpfilt
, NULL
); 
1814         error 
=  (nfound 
? 0 : (posix 
? EPERM 
: ESRCH
)); 
1816         kauth_cred_unref(&uc
); 
1822  * Send a signal to a process group. 
1825 gsignal(int pgid
, int signum
) 
1829         if (pgid 
&& (pgrp 
= pgfind(pgid
))) { 
1830                 pgsignal(pgrp
, signum
, 0); 
1836  * Send a signal to a process group.  If checkctty is 1, 
1837  * limit to members which have a controlling terminal. 
1841 pgsignal_filt(proc_t p
, void * arg
) 
1843         int checkctty 
= *(int*)arg
; 
1845         if ((checkctty 
== 0) || p
->p_flag 
& P_CONTROLT
) 
1853 pgsignal_callback(proc_t p
, void * arg
) 
1855         int  signum 
= *(int*)arg
; 
1858         return(PROC_RETURNED
); 
1863 pgsignal(struct pgrp 
*pgrp
, int signum
, int checkctty
) 
1865         if (pgrp 
!= PGRP_NULL
) { 
1866                 pgrp_iterate(pgrp
, 0, pgsignal_callback
, &signum
, pgsignal_filt
, &checkctty
); 
1872 tty_pgsignal(struct tty 
*tp
, int signum
, int checkctty
) 
1877         if (pg 
!= PGRP_NULL
) { 
1878                 pgrp_iterate(pg
, 0, pgsignal_callback
, &signum
, pgsignal_filt
, &checkctty
); 
1883  * Send a signal caused by a trap to a specific thread. 
1886 threadsignal(thread_t sig_actthread
, int signum
, mach_exception_code_t code
, boolean_t set_exitreason
) 
1888         struct uthread 
*uth
; 
1889         struct task 
* sig_task
; 
1893         if ((u_int
)signum 
>= NSIG 
|| signum 
== 0) 
1896         mask 
= sigmask(signum
); 
1897         if ((mask 
& threadmask
) == 0) 
1899         sig_task 
= get_threadtask(sig_actthread
); 
1900         p 
= (proc_t
)(get_bsdtask_info(sig_task
)); 
1902         uth 
= get_bsdthread_info(sig_actthread
); 
1903         if (uth
->uu_flag 
& UT_VFORK
) 
1907         if (!(p
->p_lflag 
& P_LTRACED
) && (p
->p_sigignore 
& mask
)) { 
1912         uth
->uu_siglist 
|= mask
; 
1913         uth
->uu_code 
= code
; 
1915         /* Attempt to establish whether the signal will be fatal (mirrors logic in psignal_internal()) */ 
1916         if (set_exitreason 
&& ((p
->p_lflag 
& P_LTRACED
) || (!(uth
->uu_sigwait 
& mask
) 
1917                 && !(uth
->uu_sigmask 
& mask
) && !(p
->p_sigcatch 
& mask
))) && 
1918                 !(mask 
& stopsigmask
) && !(mask 
& contsigmask
)) { 
1920                 if (uth
->uu_exit_reason 
== OS_REASON_NULL
) { 
1921                         KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
, 
1922                                                                 p
->p_pid
, OS_REASON_SIGNAL
, signum
, 0, 0); 
1924                         os_reason_t signal_reason 
= build_signal_reason(signum
, "exc handler"); 
1926                         set_thread_exit_reason(sig_actthread
, signal_reason
, TRUE
); 
1928                         /* We dropped/consumed the reference in set_thread_exit_reason() */ 
1929                         signal_reason 
= OS_REASON_NULL
; 
1935         /* mark on process as well */ 
1936         signal_setast(sig_actthread
); 
1940 set_thread_exit_reason(void *th
, void *reason
, boolean_t proc_locked
) 
1942         struct uthread 
*targ_uth 
= get_bsdthread_info(th
); 
1943         struct task 
*targ_task 
= NULL
; 
1944         proc_t targ_proc 
= NULL
; 
1946         os_reason_t exit_reason 
= (os_reason_t
)reason
; 
1948         if (exit_reason 
== OS_REASON_NULL
) 
1952                 targ_task 
= get_threadtask(th
); 
1953                 targ_proc 
= (proc_t
)(get_bsdtask_info(targ_task
)); 
1955                 proc_lock(targ_proc
); 
1958         if (targ_uth
->uu_exit_reason 
== OS_REASON_NULL
) { 
1959                 targ_uth
->uu_exit_reason 
= exit_reason
; 
1961                 /* The caller expects that we drop a reference on the exit reason */ 
1962                 os_reason_free(exit_reason
); 
1966                 assert(targ_proc 
!= NULL
); 
1967                 proc_unlock(targ_proc
); 
1974  * Picks an appropriate thread from a process to target with a signal. 
1976  * Called with proc locked. 
1977  * Returns thread with BSD ast set. 
1979  * We attempt to deliver a proc-wide signal to the first thread in the task. 
1980  * This allows single threaded applications which use signals to 
1981  * be able to be linked with multithreaded libraries. 
1983 static kern_return_t
 
1984 get_signalthread(proc_t p
, int signum
, thread_t 
* thr
) 
1986         struct uthread 
*uth
; 
1987         sigset_t mask 
= sigmask(signum
); 
1988         thread_t sig_thread
; 
1989         struct task 
* sig_task 
= p
->task
; 
1994         if ((p
->p_lflag 
& P_LINVFORK
) && p
->p_vforkact
) { 
1995                 sig_thread 
= p
->p_vforkact
;      
1996                 kret 
= check_actforsig(sig_task
, sig_thread
, 1); 
1997                 if (kret 
== KERN_SUCCESS
)  { 
1999                         return(KERN_SUCCESS
); 
2001                         return(KERN_FAILURE
); 
2004         TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) { 
2005                 if(((uth
->uu_flag 
& UT_NO_SIGMASK
)== 0) &&  
2006                         (((uth
->uu_sigmask 
& mask
) == 0) || (uth
->uu_sigwait 
& mask
))) { 
2007                         if (check_actforsig(p
->task
, uth
->uu_context
.vc_thread
, 1) == KERN_SUCCESS
) { 
2008                                 *thr 
= uth
->uu_context
.vc_thread
; 
2009                                 return(KERN_SUCCESS
); 
2013         if (get_signalact(p
->task
, thr
, 1) == KERN_SUCCESS
) { 
2014                 return(KERN_SUCCESS
); 
2017         return(KERN_FAILURE
); 
2021 build_signal_reason(int signum
, const char *procname
) 
2023         os_reason_t signal_reason 
= OS_REASON_NULL
; 
2024         proc_t sender_proc 
= current_proc(); 
2025         uint32_t reason_buffer_size_estimate 
= 0, proc_name_length 
= 0; 
2026         const char *default_sender_procname 
= "unknown"; 
2027         mach_vm_address_t data_addr
; 
2030         signal_reason 
= os_reason_create(OS_REASON_SIGNAL
, signum
); 
2031         if (signal_reason 
== OS_REASON_NULL
) { 
2032                 printf("build_signal_reason: unable to allocate signal reason structure.\n"); 
2033                 return signal_reason
; 
2036         reason_buffer_size_estimate 
= kcdata_estimate_required_buffer_size(2, sizeof(sender_proc
->p_name
) + 
2037                                                                                 sizeof(sender_proc
->p_pid
)); 
2039         ret 
= os_reason_alloc_buffer(signal_reason
, reason_buffer_size_estimate
); 
2041                 printf("build_signal_reason: unable to allocate signal reason buffer.\n"); 
2042                 return signal_reason
; 
2045         if (KERN_SUCCESS 
== kcdata_get_memory_addr(&signal_reason
->osr_kcd_descriptor
, KCDATA_TYPE_PID
, 
2046                                                         sizeof(sender_proc
->p_pid
), &data_addr
)) { 
2047                 kcdata_memcpy(&signal_reason
->osr_kcd_descriptor
, data_addr
, &sender_proc
->p_pid
, 
2048                                         sizeof(sender_proc
->p_pid
)); 
2050                 printf("build_signal_reason: exceeded space in signal reason buf, unable to log PID\n"); 
2053         proc_name_length 
= sizeof(sender_proc
->p_name
); 
2054         if (KERN_SUCCESS 
== kcdata_get_memory_addr(&signal_reason
->osr_kcd_descriptor
, KCDATA_TYPE_PROCNAME
, 
2055                                                         proc_name_length
, &data_addr
)) { 
2057                         char truncated_procname
[proc_name_length
]; 
2058                         strncpy((char *) &truncated_procname
, procname
, proc_name_length
); 
2059                         truncated_procname
[proc_name_length 
- 1] = '\0'; 
2061                         kcdata_memcpy(&signal_reason
->osr_kcd_descriptor
, data_addr
, truncated_procname
, 
2062                                         strlen((char *) &truncated_procname
)); 
2063                 } else if (*sender_proc
->p_name
) { 
2064                         kcdata_memcpy(&signal_reason
->osr_kcd_descriptor
, data_addr
, &sender_proc
->p_name
, 
2065                                         sizeof(sender_proc
->p_name
)); 
2067                         kcdata_memcpy(&signal_reason
->osr_kcd_descriptor
, data_addr
, &default_sender_procname
, 
2068                                         strlen(default_sender_procname
) + 1); 
2071                 printf("build_signal_reason: exceeded space in signal reason buf, unable to log procname\n"); 
2074         return signal_reason
; 
2078  * Send the signal to the process.  If the signal has an action, the action 
2079  * is usually performed by the target process rather than the caller; we add 
2080  * the signal to the set of pending signals for the process. 
2082  * Always drops a reference on a signal_reason if one is provided, whether via 
2083  * passing it to a thread or deallocating directly. 
2086  *   o When a stop signal is sent to a sleeping process that takes the 
2087  *     default action, the process is stopped without awakening it. 
2088  *   o SIGCONT restarts stopped processes (or puts them back to sleep) 
2089  *     regardless of the signal action (eg, blocked or ignored). 
2091  * Other ignored signals are discarded immediately. 
2094 psignal_internal(proc_t p
, task_t task
, thread_t thread
, int flavor
, int signum
, os_reason_t signal_reason
) 
2097         user_addr_t action 
= USER_ADDR_NULL
; 
2099         thread_t                sig_thread
; 
2102         struct uthread          
*uth
; 
2106         kauth_cred_t            my_cred
; 
2107         char                    *launchd_exit_reason_desc 
= NULL
; 
2108         boolean_t               update_thread_policy 
= FALSE
; 
2110         if ((u_int
)signum 
>= NSIG 
|| signum 
== 0) 
2111                 panic("psignal: bad signal number %d", signum
); 
2113         mask 
= sigmask(signum
); 
2114         prop 
= sigprop
[signum
]; 
2117         if(rdebug_proc 
&& (p 
!= PROC_NULL
) && (p 
== rdebug_proc
)) { 
2120 #endif /* SIGNAL_DEBUG */ 
2122         /* catch unexpected initproc kills early for easier debuggging */ 
2123         if (signum 
== SIGKILL 
&& p 
== initproc
) { 
2124                 if (signal_reason 
== NULL
) { 
2125                         panic_plain("unexpected SIGKILL of %s %s (no reason provided)", 
2126                             (p
->p_name
[0] != '\0' ? p
->p_name 
: "initproc"), 
2127                             ((p
->p_csflags 
& CS_KILLED
) ? "(CS_KILLED)" : "")); 
2129                         launchd_exit_reason_desc 
= launchd_exit_reason_get_string_desc(signal_reason
); 
2130                         panic_plain("unexpected SIGKILL of %s %s with reason -- namespace %d code 0x%llx description %." LAUNCHD_PANIC_REASON_STRING_MAXLEN 
"s", 
2131                             (p
->p_name
[0] != '\0' ? p
->p_name 
: "initproc"), 
2132                             ((p
->p_csflags 
& CS_KILLED
) ? "(CS_KILLED)" : ""), 
2133                             signal_reason
->osr_namespace
, signal_reason
->osr_code
, 
2134                             launchd_exit_reason_desc 
? launchd_exit_reason_desc 
: "none"); 
2139          *      We will need the task pointer later.  Grab it now to 
2140          *      check for a zombie process.  Also don't send signals 
2141          *      to kernel internal tasks. 
2143         if (flavor 
& PSIG_VFORK
) { 
2145                 sig_thread 
= thread
; 
2147         } else if (flavor 
& PSIG_THREAD
) { 
2148                 sig_task 
= get_threadtask(thread
); 
2149                 sig_thread 
= thread
; 
2150                 sig_proc 
= (proc_t
)get_bsdtask_info(sig_task
); 
2151         } else if (flavor 
& PSIG_TRY_THREAD
) { 
2152                 assert((thread 
== current_thread()) && (p 
== current_proc())); 
2154                 sig_thread 
= thread
; 
2158                 sig_thread 
= THREAD_NULL
; 
2162         if ((sig_task 
== TASK_NULL
) || is_kerneltask(sig_task
)) { 
2163                 os_reason_free(signal_reason
); 
2168          * do not send signals to the process that has the thread 
2169          * doing a reboot(). Not doing so will mark that thread aborted 
2170          * and can cause IO failures wich will cause data loss.  There's 
2171          * also no need to send a signal to a process that is in the middle 
2172          * of being torn down. 
2174         if (ISSET(sig_proc
->p_flag
, P_REBOOT
) || ISSET(sig_proc
->p_lflag
, P_LEXIT
)) { 
2175                 DTRACE_PROC3(signal__discard
, thread_t
, sig_thread
, proc_t
, sig_proc
, int, signum
); 
2176                 os_reason_free(signal_reason
); 
2180         if( (flavor 
& (PSIG_VFORK 
| PSIG_THREAD
)) == 0) { 
2181                 proc_knote(sig_proc
, NOTE_SIGNAL 
| signum
); 
2184         if ((flavor 
& PSIG_LOCKED
)== 0) 
2185                 proc_signalstart(sig_proc
, 0); 
2187         /* Don't send signals to a process that has ignored them. */ 
2188         if (((flavor 
& PSIG_VFORK
) == 0) && ((sig_proc
->p_lflag 
& P_LTRACED
) == 0) && (sig_proc
->p_sigignore 
& mask
)) { 
2189                 DTRACE_PROC3(signal__discard
, thread_t
, sig_thread
, proc_t
, sig_proc
, int, signum
); 
2190                 goto sigout_unlocked
; 
2194          * The proc_lock prevents the targeted thread from being deallocated 
2195          * or handling the signal until we're done signaling it. 
2197          * Once the proc_lock is dropped, we have no guarantee the thread or uthread exists anymore. 
2199          * XXX: What if the thread goes inactive after the thread passes bsd ast point? 
2201         proc_lock(sig_proc
); 
2203         if (flavor 
& PSIG_VFORK
) { 
2205                 act_set_astbsd(sig_thread
); 
2206                 kret 
= KERN_SUCCESS
; 
2207         } else if (flavor 
& PSIG_TRY_THREAD
) { 
2208                 uth 
= get_bsdthread_info(sig_thread
); 
2209                 if (((uth
->uu_flag 
& UT_NO_SIGMASK
) == 0) && 
2210                                 (((uth
->uu_sigmask 
& mask
) == 0) || (uth
->uu_sigwait 
& mask
)) && 
2211                                 ((kret 
= check_actforsig(sig_proc
->task
, sig_thread
, 1)) == KERN_SUCCESS
)) { 
2212                         /* deliver to specified thread */ 
2214                         /* deliver to any willing thread */ 
2215                         kret 
= get_signalthread(sig_proc
, signum
, &sig_thread
); 
2217         } else if (flavor 
& PSIG_THREAD
) { 
2218                 /* If successful return with ast set */ 
2219                 kret 
= check_actforsig(sig_task
, sig_thread
, 1); 
2221                 /* If successful return with ast set */ 
2222                 kret 
= get_signalthread(sig_proc
, signum
, &sig_thread
); 
2225         if (kret 
!= KERN_SUCCESS
) { 
2226                 DTRACE_PROC3(signal__discard
, thread_t
, sig_thread
, proc_t
, sig_proc
, int, signum
); 
2227                 proc_unlock(sig_proc
); 
2228                 goto sigout_unlocked
; 
2231         uth 
= get_bsdthread_info(sig_thread
); 
2234          * If proc is traced, always give parent a chance. 
2237         if ((flavor 
& PSIG_VFORK
) == 0) { 
2238                 if (sig_proc
->p_lflag 
& P_LTRACED
) 
2242                          * If the signal is being ignored, 
2243                          * then we forget about it immediately. 
2244                          * (Note: we don't set SIGCONT in p_sigignore, 
2245                          * and if it is set to SIG_IGN, 
2246                          * action will be SIG_DFL here.) 
2248                         if (sig_proc
->p_sigignore 
& mask
) 
2251                         if (uth
->uu_sigwait 
& mask
) 
2252                                 action 
= KERN_SIG_WAIT
; 
2253                         else if (uth
->uu_sigmask 
& mask
) 
2254                                 action 
= KERN_SIG_HOLD
; 
2255                         else if (sig_proc
->p_sigcatch 
& mask
) 
2256                                 action 
= KERN_SIG_CATCH
; 
2262         /* TODO: p_nice isn't hooked up to the scheduler... */ 
2263         if (sig_proc
->p_nice 
> NZERO 
&& action 
== SIG_DFL 
&& (prop 
& SA_KILL
) && 
2264                 (sig_proc
->p_lflag 
& P_LTRACED
) == 0) 
2265                         sig_proc
->p_nice 
= NZERO
; 
2268                 uth
->uu_siglist 
&= ~stopsigmask
; 
2270         if (prop 
& SA_STOP
) { 
2273                  * If sending a tty stop signal to a member of an orphaned 
2274                  * process group, discard the signal here if the action 
2275                  * is default; don't stop the process below if sleeping, 
2276                  * and don't clear any pending SIGCONT. 
2278                 pg 
= proc_pgrp(sig_proc
); 
2279                 if (prop 
& SA_TTYSTOP 
&& pg
->pg_jobc 
== 0 && 
2280                         action 
== SIG_DFL
) { 
2285                 uth
->uu_siglist 
&= ~contsigmask
; 
2288         uth
->uu_siglist 
|= mask
; 
2291          * Defer further processing for signals which are held, 
2292          * except that stopped processes must be continued by SIGCONT. 
2294         /* vfork will not go thru as action is SIG_DFL */ 
2295         if ((action 
== KERN_SIG_HOLD
) && ((prop 
& SA_CONT
) == 0 || sig_proc
->p_stat 
!= SSTOP
)) 
2299          *      SIGKILL priority twiddling moved here from above because 
2300          *      it needs sig_thread.  Could merge it into large switch 
2301          *      below if we didn't care about priority for tracing 
2302          *      as SIGKILL's action is always SIG_DFL. 
2304          *      TODO: p_nice isn't hooked up to the scheduler... 
2306         if ((signum 
== SIGKILL
) && (sig_proc
->p_nice 
> NZERO
)) { 
2307                 sig_proc
->p_nice 
= NZERO
; 
2311          *      Process is traced - wake it up (if not already 
2312          *      stopped) so that it can discover the signal in 
2313          *      issig() and stop for the parent. 
2315         if (sig_proc
->p_lflag 
& P_LTRACED
) { 
2316                 if (sig_proc
->p_stat 
!= SSTOP
) 
2322         if ((flavor 
& PSIG_VFORK
) != 0) 
2325         if (action 
== KERN_SIG_WAIT
) { 
2328                  * DTrace proc signal-clear returns a siginfo_t. Collect the needed info. 
2330                 r_uid 
= kauth_getruid(); /* per thread credential; protected by our thread context */ 
2332                 bzero((caddr_t
)&(uth
->t_dtrace_siginfo
), sizeof(uth
->t_dtrace_siginfo
)); 
2334                 uth
->t_dtrace_siginfo
.si_signo 
= signum
; 
2335                 uth
->t_dtrace_siginfo
.si_pid 
= current_proc()->p_pid
; 
2336                 uth
->t_dtrace_siginfo
.si_status 
= W_EXITCODE(signum
, 0); 
2337                 uth
->t_dtrace_siginfo
.si_uid 
= r_uid
; 
2338                 uth
->t_dtrace_siginfo
.si_code 
= 0; 
2340                 uth
->uu_sigwait 
= mask
; 
2341                 uth
->uu_siglist 
&= ~mask
; 
2342                 wakeup(&uth
->uu_sigwait
); 
2343                 /* if it is SIGCONT resume whole process */ 
2344                 if (prop 
& SA_CONT
) { 
2345                         OSBitOrAtomic(P_CONTINUED
, &sig_proc
->p_flag
); 
2346                         sig_proc
->p_contproc 
= current_proc()->p_pid
; 
2347                         (void) task_resume_internal(sig_task
); 
2352         if (action 
!= SIG_DFL
) { 
2354                  *      User wants to catch the signal. 
2355                  *      Wake up the thread, but don't un-suspend it 
2356                  *      (except for SIGCONT). 
2358                 if (prop 
& SA_CONT
) { 
2359                         OSBitOrAtomic(P_CONTINUED
, &sig_proc
->p_flag
); 
2360                         (void) task_resume_internal(sig_task
); 
2361                         sig_proc
->p_stat 
= SRUN
; 
2362                 }  else if (sig_proc
->p_stat 
== SSTOP
) { 
2366                  * Fill out siginfo structure information to pass to the 
2367                  * signalled process/thread sigaction handler, when it 
2368                  * wakes up.  si_code is 0 because this is an ordinary 
2369                  * signal, not a SIGCHLD, and so si_status is the signal 
2370                  * number itself, instead of the child process exit status. 
2371                  * We shift this left because it will be shifted right before 
2372                  * it is passed to user space.  kind of ugly to use W_EXITCODE 
2373                  * this way, but it beats defining a new macro. 
2375                  * Note:        Avoid the SIGCHLD recursion case! 
2377                 if (signum 
!= SIGCHLD
) { 
2378                         r_uid 
= kauth_getruid(); 
2380                         sig_proc
->si_pid 
= current_proc()->p_pid
; 
2381                         sig_proc
->si_status 
= W_EXITCODE(signum
, 0); 
2382                         sig_proc
->si_uid 
= r_uid
; 
2383                         sig_proc
->si_code 
= 0; 
2388                 /*      Default action - varies */ 
2389                 if (mask 
& stopsigmask
) { 
2390                         assert(signal_reason 
== NULL
); 
2392                          * These are the signals which by default 
2395                          * Don't clog system with children of init 
2396                          * stopped from the keyboard. 
2398                         if (!(prop 
& SA_STOP
) && sig_proc
->p_pptr 
== initproc
) { 
2399                                 uth
->uu_siglist 
&= ~mask
; 
2400                                 proc_unlock(sig_proc
); 
2401                                 /* siglock still locked, proc_lock not locked */ 
2402                                 psignal_locked(sig_proc
, SIGKILL
); 
2403                                 goto sigout_unlocked
; 
2408                          *      if task hasn't already been stopped by 
2411                         uth
->uu_siglist 
&= ~mask
; 
2412                         if (sig_proc
->p_stat 
!= SSTOP
) { 
2413                                 sig_proc
->p_xstat 
= signum
; 
2414                                 sig_proc
->p_stat 
= SSTOP
; 
2415                                 OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &sig_proc
->p_flag
); 
2416                                 sig_proc
->p_lflag 
&= ~P_LWAITED
; 
2417                                 proc_unlock(sig_proc
); 
2419                                 pp 
= proc_parentholdref(sig_proc
); 
2421                                 if (( pp 
!= PROC_NULL
) && ((pp
->p_flag 
& P_NOCLDSTOP
) == 0)) { 
2423                                         my_cred 
= kauth_cred_proc_ref(sig_proc
); 
2424                                         r_uid 
= kauth_cred_getruid(my_cred
); 
2425                                         kauth_cred_unref(&my_cred
); 
2427                                         proc_lock(sig_proc
); 
2428                                         pp
->si_pid 
= sig_proc
->p_pid
; 
2430                                          * POSIX: sigaction for a stopped child 
2431                                          * when sent to the parent must set the 
2432                                          * child's signal number into si_status. 
2434                                         if (signum 
!= SIGSTOP
) 
2435                                                 pp
->si_status 
= WEXITSTATUS(sig_proc
->p_xstat
); 
2437                                                 pp
->si_status 
= W_EXITCODE(signum
, signum
); 
2438                                         pp
->si_code 
= CLD_STOPPED
; 
2440                                         proc_unlock(sig_proc
); 
2442                                         psignal(pp
, SIGCHLD
); 
2444                                 if (pp 
!= PROC_NULL
) { 
2445                                         proc_parentdropref(pp
, 0); 
2448                                 goto sigout_unlocked
; 
2454                 DTRACE_PROC3(signal__send
, thread_t
, sig_thread
, proc_t
, p
, int, signum
); 
2458                          * Signals ignored by default have been dealt 
2459                          * with already, since their bits are on in 
2465                          * Kill signal always sets process running and 
2469                          *      Process will be running after 'run' 
2471                         sig_proc
->p_stat 
= SRUN
; 
2473                          * In scenarios where suspend/resume are racing 
2474                          * the signal we are missing AST_BSD by the time 
2475                          * we get here, set again to avoid races. This 
2476                          * was the scenario with spindump enabled shutdowns. 
2477                          * We would need to cover this approp down the line. 
2479                         act_set_astbsd(sig_thread
); 
2480                         kret 
= thread_abort(sig_thread
); 
2481                         update_thread_policy 
= (kret 
== KERN_SUCCESS
); 
2483                         if (uth
->uu_exit_reason 
== OS_REASON_NULL
) { 
2484                                 if (signal_reason 
== OS_REASON_NULL
) { 
2485                                         KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
, 
2486                                                                 sig_proc
->p_pid
, OS_REASON_SIGNAL
, signum
, 0, 0); 
2488                                         signal_reason 
= build_signal_reason(signum
, NULL
); 
2491                                 os_reason_ref(signal_reason
); 
2492                                 set_thread_exit_reason(sig_thread
, signal_reason
, TRUE
); 
2499                          * Let the process run.  If it's sleeping on an 
2500                          * event, it remains so. 
2502                         assert(signal_reason 
== NULL
); 
2503                         OSBitOrAtomic(P_CONTINUED
, &sig_proc
->p_flag
); 
2504                         sig_proc
->p_contproc 
= sig_proc
->p_pid
; 
2506                         (void) task_resume_internal(sig_task
); 
2509                          * When processing a SIGCONT, we need to check 
2510                          * to see if there are signals pending that 
2511                          * were not delivered because we had been 
2512                          * previously stopped.  If that's the case, 
2513                          * we need to thread_abort_safely() to trigger 
2514                          * interruption of the current system call to 
2515                          * cause their handlers to fire.  If it's only 
2516                          * the SIGCONT, then don't wake up. 
2518                         if (((flavor 
& (PSIG_VFORK
|PSIG_THREAD
)) == 0) && (((uth
->uu_siglist 
& ~uth
->uu_sigmask
) & ~sig_proc
->p_sigignore
) & ~mask
)) { 
2519                                 uth
->uu_siglist 
&= ~mask
; 
2520                                 sig_proc
->p_stat 
= SRUN
; 
2524                         uth
->uu_siglist 
&= ~mask
; 
2525                         sig_proc
->p_stat 
= SRUN
; 
2530                          * A signal which has a default action of killing 
2531                          * the process, and for which there is no handler, 
2532                          * needs to act like SIGKILL 
2534                         if (((flavor 
& (PSIG_VFORK
|PSIG_THREAD
)) == 0) && (action 
== SIG_DFL
) && (prop 
& SA_KILL
)) { 
2535                                 sig_proc
->p_stat 
= SRUN
; 
2536                                 kret 
= thread_abort(sig_thread
); 
2537                                 update_thread_policy 
= (kret 
== KERN_SUCCESS
); 
2539                                 if (uth
->uu_exit_reason 
== OS_REASON_NULL
) { 
2540                                         if (signal_reason 
== OS_REASON_NULL
) { 
2541                                                 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
, 
2542                                                                         sig_proc
->p_pid
, OS_REASON_SIGNAL
, signum
, 0, 0); 
2544                                                 signal_reason 
= build_signal_reason(signum
, NULL
); 
2547                                         os_reason_ref(signal_reason
); 
2548                                         set_thread_exit_reason(sig_thread
, signal_reason
, TRUE
); 
2555                          * All other signals wake up the process, but don't 
2558                         if (sig_proc
->p_stat 
== SSTOP
) { 
2568          * If we're being traced (possibly because someone attached us 
2569          * while we were stopped), check for a signal from the debugger. 
2571         if (sig_proc
->p_stat 
== SSTOP
) { 
2572                 if ((sig_proc
->p_lflag 
& P_LTRACED
) != 0 && sig_proc
->p_xstat 
!= 0) 
2573                         uth
->uu_siglist 
|= sigmask(sig_proc
->p_xstat
); 
2575                 if ((flavor 
& PSIG_VFORK
) != 0) { 
2576                         sig_proc
->p_stat 
= SRUN
; 
2580                  * setrunnable(p) in BSD and 
2581                  * Wake up the thread if it is interruptible. 
2583                 sig_proc
->p_stat 
= SRUN
; 
2584                 if ((flavor 
& PSIG_VFORK
) == 0) 
2585                         thread_abort_safely(sig_thread
); 
2589         if (update_thread_policy
) { 
2591                  * Update the thread policy to heading to terminate, increase priority if 
2592                  * necessary. This needs to be done before we drop the proc lock because the 
2593                  * thread can take the fatal signal once it's dropped. 
2595                 proc_set_thread_policy(sig_thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_TERMINATED
, TASK_POLICY_ENABLE
); 
2598         proc_unlock(sig_proc
); 
2601         os_reason_free(signal_reason
); 
2602         if ((flavor 
& PSIG_LOCKED
)== 0) { 
2603                 proc_signalend(sig_proc
, 0); 
2608 psignal(proc_t p
, int signum
) 
2610         psignal_internal(p
, NULL
, NULL
, 0, signum
, NULL
); 
2614 psignal_with_reason(proc_t p
, int signum
, struct os_reason 
*signal_reason
) 
2616         psignal_internal(p
, NULL
, NULL
, 0, signum
, signal_reason
); 
2620 psignal_locked(proc_t p
, int signum
) 
2622         psignal_internal(p
, NULL
, NULL
, PSIG_LOCKED
, signum
, NULL
); 
2626 psignal_vfork_with_reason(proc_t p
, task_t new_task
, thread_t thread
, int signum
, struct os_reason 
*signal_reason
) 
2628         psignal_internal(p
, new_task
, thread
, PSIG_VFORK
, signum
, signal_reason
); 
2633 psignal_vfork(proc_t p
, task_t new_task
, thread_t thread
, int signum
) 
2635         psignal_internal(p
, new_task
, thread
, PSIG_VFORK
, signum
, NULL
); 
2639 psignal_uthread(thread_t thread
, int signum
) 
2641         psignal_internal(PROC_NULL
, TASK_NULL
, thread
, PSIG_THREAD
, signum
, NULL
); 
2644 /* same as psignal(), but prefer delivery to 'thread' if possible */ 
2646 psignal_try_thread(proc_t p
, thread_t thread
, int signum
) 
2648         psignal_internal(p
, NULL
, thread
, PSIG_TRY_THREAD
, signum
, NULL
); 
2652 psignal_try_thread_with_reason(proc_t p
, thread_t thread
, int signum
, struct os_reason 
*signal_reason
) 
2654         psignal_internal(p
, TASK_NULL
, thread
, PSIG_TRY_THREAD
, signum
, signal_reason
); 
2658  * If the current process has received a signal (should be caught or cause 
2659  * termination, should interrupt current syscall), return the signal number. 
2660  * Stop signals with default action are processed immediately, then cleared; 
2661  * they aren't returned.  This is checked after each entry to the system for 
2662  * a syscall or trap (though this can usually be done without calling issignal 
2663  * by checking the pending signal masks in the CURSIG macro.) The normal call 
2666  *      while (signum = CURSIG(curproc)) 
2670 issignal_locked(proc_t p
) 
2672         int signum
, mask
, prop
, sigbits
; 
2674         struct uthread 
* ut
; 
2676         kauth_cred_t my_cred
; 
2680         cur_act 
= current_thread(); 
2683         if(rdebug_proc 
&& (p 
== rdebug_proc
)) { 
2686 #endif /* SIGNAL_DEBUG */ 
2689          * Try to grab the signal lock. 
2691         if (sig_try_locked(p
) <= 0) { 
2695         proc_signalstart(p
, 1); 
2697         ut 
= get_bsdthread_info(cur_act
); 
2699                 sigbits 
= ut
->uu_siglist 
& ~ut
->uu_sigmask
; 
2701                 if (p
->p_lflag 
& P_LPPWAIT
) 
2702                         sigbits 
&= ~stopsigmask
; 
2703                 if (sigbits 
== 0) {             /* no signal to send */ 
2708                 signum 
= ffs((long)sigbits
); 
2709                 mask 
= sigmask(signum
); 
2710                 prop 
= sigprop
[signum
]; 
2713                  * We should see pending but ignored signals 
2714                  * only if P_LTRACED was on when they were posted. 
2716                 if (mask 
& p
->p_sigignore 
&& (p
->p_lflag 
& P_LTRACED
) == 0) { 
2717                         ut
->uu_siglist 
&= ~mask
; 
2721                 if (p
->p_lflag 
& P_LTRACED 
&& (p
->p_lflag 
& P_LPPWAIT
) == 0)  { 
2723                          * If traced, deliver the signal to the debugger, and wait to be 
2727                         p
->p_xstat 
= signum
; 
2729                         if (p
->p_lflag 
& P_LSIGEXC
) { 
2731                                 p
->sigwait_thread 
= cur_act
; 
2733                                 OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &p
->p_flag
); 
2734                                 p
->p_lflag 
&= ~P_LWAITED
; 
2735                                 ut
->uu_siglist 
&= ~mask
; /* clear the current signal from the pending list */ 
2736                                 proc_signalend(p
, 1); 
2738                                 do_bsdexception(EXC_SOFTWARE
, EXC_SOFT_SIGNAL
, signum
); 
2740                                 proc_signalstart(p
, 1); 
2743                                 my_cred 
= kauth_cred_proc_ref(p
); 
2744                                 r_uid 
= kauth_cred_getruid(my_cred
); 
2745                                 kauth_cred_unref(&my_cred
); 
2747                                 pp 
= proc_parentholdref(p
); 
2748                                 if (pp 
!= PROC_NULL
) { 
2751                                         pp
->si_pid 
= p
->p_pid
; 
2752                                         pp
->si_status 
= p
->p_xstat
; 
2753                                         pp
->si_code 
= CLD_TRAPPED
; 
2760                                 *       XXX Have to really stop for debuggers; 
2761                                 *       XXX stop() doesn't do the right thing. 
2764                                 task_suspend_internal(task
); 
2768                                 p
->sigwait_thread 
= cur_act
; 
2770                                 OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &p
->p_flag
); 
2771                                 p
->p_lflag 
&= ~P_LWAITED
; 
2772                                 ut
->uu_siglist 
&= ~mask
; 
2774                                 proc_signalend(p
, 1); 
2777                                 if (pp 
!= PROC_NULL
) { 
2778                                         psignal(pp
, SIGCHLD
); 
2780                                         wakeup((caddr_t
)pp
); 
2781                                         proc_parentdropref(pp
, 1); 
2785                                 assert_wait((caddr_t
)&p
->sigwait
, (THREAD_INTERRUPTIBLE
)); 
2786                                 thread_block(THREAD_CONTINUE_NULL
); 
2788                                 proc_signalstart(p
, 1); 
2792                         p
->sigwait_thread 
= NULL
; 
2793                         wakeup((caddr_t
)&p
->sigwait_thread
); 
2795                         if (signum 
== SIGKILL 
|| ut
->uu_siglist 
& sigmask(SIGKILL
)) { 
2797                                  * Deliver a pending sigkill even if it's not the current signal. 
2798                                  * Necessary for PT_KILL, which should not be delivered to the 
2799                                  * debugger, but we can't differentiate it from any other KILL. 
2805                         /* We may have to quit. */ 
2806                         if (thread_should_abort(current_thread())) { 
2812                          * If parent wants us to take the signal, 
2813                          * then it will leave it in p->p_xstat; 
2814                          * otherwise we just look for signals again. 
2816                         signum 
= p
->p_xstat
; 
2821                          * Put the new signal into p_siglist.  If the 
2822                          * signal is being masked, look for other signals. 
2824                         mask 
= sigmask(signum
); 
2825                         ut
->uu_siglist 
|= mask
; 
2826                         if (ut
->uu_sigmask 
& mask
) 
2831                  * Decide whether the signal should be returned. 
2832                  * Return the signal's number, or fall through 
2833                  * to clear it from the pending mask. 
2836                 switch ((long)p
->p_sigacts
->ps_sigact
[signum
]) { 
2840                          * If there is a pending stop signal to process 
2841                          * with default action, stop here, 
2842                          * then clear the signal.  However, 
2843                          * if process is member of an orphaned 
2844                          * process group, ignore tty stop signals. 
2846                         if (prop 
& SA_STOP
) { 
2851                                 if (p
->p_lflag 
& P_LTRACED 
|| 
2852                                         (pg
->pg_jobc 
== 0 && 
2853                                         prop 
& SA_TTYSTOP
)) { 
2856                                         break; /* ignore signal */ 
2859                                 if (p
->p_stat 
!= SSTOP
) { 
2861                                         p
->p_xstat 
= signum
; 
2863                                         p
->p_lflag 
&= ~P_LWAITED
; 
2866                                         pp 
= proc_parentholdref(p
); 
2868                                         if ((pp 
!= PROC_NULL
) && ((pp
->p_flag 
& P_NOCLDSTOP
) == 0)) { 
2869                                                 my_cred 
= kauth_cred_proc_ref(p
); 
2870                                                 r_uid 
= kauth_cred_getruid(my_cred
); 
2871                                                 kauth_cred_unref(&my_cred
); 
2874                                                 pp
->si_pid 
= p
->p_pid
; 
2875                                                 pp
->si_status 
= WEXITSTATUS(p
->p_xstat
); 
2876                                                 pp
->si_code 
= CLD_STOPPED
; 
2880                                                 psignal(pp
, SIGCHLD
); 
2882                                         if (pp 
!= PROC_NULL
) 
2883                                                 proc_parentdropref(pp
, 0); 
2887                         } else if (prop 
& SA_IGNORE
) { 
2889                                  * Except for SIGCONT, shouldn't get here. 
2890                                  * Default action is to ignore; drop it. 
2892                                 break; /* ignore signal */ 
2899                          * Masking above should prevent us ever trying 
2900                          * to take action on an ignored signal other 
2901                          * than SIGCONT, unless process is traced. 
2903                         if ((prop 
& SA_CONT
) == 0 && 
2904                                 (p
->p_lflag 
& P_LTRACED
) == 0) 
2905                                 printf("issignal\n"); 
2906                         break; /* ignore signal */ 
2909                         /* This signal has an action - deliver it. */ 
2913                 /* If we dropped through, the signal was ignored - remove it from pending list. */ 
2914                 ut
->uu_siglist 
&= ~mask
; 
2921         ut
->uu_siglist 
&= ~mask
; 
2925         proc_signalend(p
, 1); 
2929 /* called from _sleep */ 
2933         int signum
, mask
, prop
, sigbits
; 
2935         struct uthread 
* ut
; 
2939         cur_act 
= current_thread(); 
2941         ut 
= get_bsdthread_info(cur_act
); 
2943         if (ut
->uu_siglist 
== 0) 
2946         if (((ut
->uu_siglist 
& ~ut
->uu_sigmask
) == 0) && ((p
->p_lflag 
& P_LTRACED
) == 0)) 
2949         sigbits 
= ut
->uu_siglist 
& ~ut
->uu_sigmask
; 
2952                 if (p
->p_lflag 
& P_LPPWAIT
) 
2953                         sigbits 
&= ~stopsigmask
; 
2954                 if (sigbits 
== 0) {             /* no signal to send */ 
2958                 signum 
= ffs((long)sigbits
); 
2959                 mask 
= sigmask(signum
); 
2960                 prop 
= sigprop
[signum
]; 
2961                 sigbits 
&= ~mask
;               /* take the signal out */ 
2964                  * We should see pending but ignored signals 
2965                  * only if P_LTRACED was on when they were posted. 
2967                 if (mask 
& p
->p_sigignore 
&& (p
->p_lflag 
& P_LTRACED
) == 0) { 
2971                 if (p
->p_lflag 
& P_LTRACED 
&& (p
->p_lflag 
& P_LPPWAIT
) == 0) { 
2976                  * Decide whether the signal should be returned. 
2977                  * Return the signal's number, or fall through 
2978                  * to clear it from the pending mask. 
2981                 switch ((long)p
->p_sigacts
->ps_sigact
[signum
]) { 
2985                          * If there is a pending stop signal to process 
2986                          * with default action, stop here, 
2987                          * then clear the signal.  However, 
2988                          * if process is member of an orphaned 
2989                          * process group, ignore tty stop signals. 
2991                         if (prop 
& SA_STOP
) { 
2996                                 if (p
->p_lflag 
& P_LTRACED 
|| 
2997                                         (pg
->pg_jobc 
== 0 && 
2998                                         prop 
& SA_TTYSTOP
)) { 
3000                                         break;  /* == ignore */ 
3005                         } else if (prop 
& SA_IGNORE
) { 
3007                                  * Except for SIGCONT, shouldn't get here. 
3008                                  * Default action is to ignore; drop it. 
3010                                 break;          /* == ignore */ 
3018                          * Masking above should prevent us ever trying 
3019                          * to take action on an ignored signal other 
3020                          * than SIGCONT, unless process is traced. 
3022                         if ((prop 
& SA_CONT
) == 0 && 
3023                                 (p
->p_lflag 
& P_LTRACED
) == 0) 
3024                                 printf("issignal\n"); 
3025                         break;          /* == ignore */ 
3029                          * This signal has an action, let 
3030                          * postsig() process it. 
3039  * Put the argument process into the stopped state and notify the parent 
3040  * via wakeup.  Signals are handled elsewhere.  The process must not be 
3044 stop(proc_t p
, proc_t parent
) 
3046         OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &p
->p_flag
); 
3047         if ((parent 
!= PROC_NULL
) && (parent
->p_stat 
!= SSTOP
)) { 
3049                 wakeup((caddr_t
)parent
); 
3052         (void) task_suspend_internal(p
->task
); 
3056  * Take the action for the specified signal 
3057  * from the current set of pending signals. 
3060 postsig_locked(int signum
) 
3062         proc_t p 
= current_proc(); 
3063         struct sigacts 
*ps 
= p
->p_sigacts
; 
3064         user_addr_t catcher
; 
3066         int mask
, returnmask
; 
3067         struct uthread 
* ut
; 
3073          *      This must be called on master cpu 
3075         if (cpu_number() != master_cpu
) 
3076                 panic("psig not on master"); 
3080          * Try to grab the signal lock. 
3082         if (sig_try_locked(p
) <= 0) { 
3086         proc_signalstart(p
, 1); 
3088         ut 
= (struct uthread 
*)get_bsdthread_info(current_thread()); 
3089         mask 
= sigmask(signum
); 
3090         ut
->uu_siglist 
&= ~mask
; 
3091         catcher 
= ps
->ps_sigact
[signum
]; 
3092         if (catcher 
== SIG_DFL
) { 
3094                  * Default catcher, where the default is to kill 
3095                  * the process.  (Other cases were ignored above.) 
3097                 sig_lock_to_exit(p
); 
3098                 p
->p_acflag 
|= AXSIG
; 
3099                 if (sigprop
[signum
] & SA_CORE
) { 
3100                         p
->p_sigacts
->ps_sig 
= signum
; 
3101                         proc_signalend(p
, 1); 
3104                         if (coredump(p
, 0, 0) == 0) 
3105                                 signum 
|= WCOREFLAG
; 
3108                         proc_signalend(p
, 1); 
3113                 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
)); 
3115                 ut
->t_dtrace_siginfo
.si_signo 
= signum
; 
3116                 ut
->t_dtrace_siginfo
.si_pid 
= p
->si_pid
; 
3117                 ut
->t_dtrace_siginfo
.si_uid 
= p
->si_uid
; 
3118                 ut
->t_dtrace_siginfo
.si_status 
= WEXITSTATUS(p
->si_status
); 
3120                 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */ 
3122                 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
: 
3123                         DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t 
*, &(ut
->t_dtrace_siginfo
)); 
3130                 DTRACE_PROC3(signal__handle
, int, signum
, siginfo_t 
*, &(ut
->t_dtrace_siginfo
), 
3131                                         void (*)(void), SIG_DFL
); 
3134                 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_FRCEXIT
) | DBG_FUNC_NONE
, 
3135                                               p
->p_pid
, W_EXITCODE(0, signum
), 3, 0, 0); 
3138                  * exit_with_reason() will consume a reference to the thread's exit reason, so we take another 
3139                  * reference for the thread. This reference will be destroyed in uthread_cleanup(). 
3141                 os_reason_ref(ut
->uu_exit_reason
); 
3142                 exit_with_reason(p
, W_EXITCODE(0, signum
), (int *)NULL
, TRUE
, TRUE
, 0, ut
->uu_exit_reason
); 
3148                  * If we get here, the signal must be caught. 
3151                 if (catcher 
== SIG_IGN 
|| (ut
->uu_sigmask 
& mask
)) 
3153                                 "postsig: processing masked or ignored signal\n"); 
3157                  * Set the new mask value and also defer further 
3158                  * occurences of this signal. 
3160                  * Special case: user has done a sigpause.  Here the 
3161                  * current mask is not of interest, but rather the 
3162                  * mask from before the sigpause is what we want 
3163                  * restored after the signal processing is completed. 
3165                 if (ut
->uu_flag 
& UT_SAS_OLDMASK
) { 
3166                         returnmask 
= ut
->uu_oldmask
; 
3167                         ut
->uu_flag 
&= ~UT_SAS_OLDMASK
; 
3170                         returnmask 
= ut
->uu_sigmask
; 
3171                 ut
->uu_sigmask 
|= ps
->ps_catchmask
[signum
]; 
3172                 if ((ps
->ps_signodefer 
& mask
) == 0) 
3173                         ut
->uu_sigmask 
|= mask
; 
3174                 if ((signum 
!= SIGILL
) && (signum 
!= SIGTRAP
) && (ps
->ps_sigreset 
& mask
)) { 
3175                         if ((signum 
!= SIGCONT
) && (sigprop
[signum
] & SA_IGNORE
)) 
3176                                 p
->p_sigignore 
|= mask
; 
3177                         ps
->ps_sigact
[signum
] = SIG_DFL
; 
3178                         ps
->ps_siginfo 
&= ~mask
; 
3179                         ps
->ps_signodefer 
&= ~mask
; 
3182                 if (ps
->ps_sig 
!= signum
) { 
3188                 OSIncrementAtomicLong(&p
->p_stats
->p_ru
.ru_nsignals
); 
3189                 sendsig(p
, catcher
, signum
, returnmask
, code
); 
3191         proc_signalend(p
, 1); 
3195  * Attach a signal knote to the list of knotes for this process. 
3197  * Signal knotes share the knote list with proc knotes.  This 
3198  * could be avoided by using a signal-specific knote list, but 
3199  * probably isn't worth the trouble. 
3203 filt_sigattach(struct knote 
*kn
) 
3205         proc_t p 
= current_proc();  /* can attach only to oneself */ 
3209         kn
->kn_ptr
.p_proc 
= p
; 
3211         KNOTE_ATTACH(&p
->p_klist
, kn
); 
3213         proc_klist_unlock(); 
3215         /* edge-triggered events can't have fired before we attached */ 
3220  * remove the knote from the process list, if it hasn't already 
3221  * been removed by exit processing.   
3225 filt_sigdetach(struct knote 
*kn
) 
3227         proc_t p 
= kn
->kn_ptr
.p_proc
; 
3230         kn
->kn_ptr
.p_proc 
= NULL
; 
3231         KNOTE_DETACH(&p
->p_klist
, kn
); 
3232         proc_klist_unlock(); 
3236  * Post an event to the signal filter.  Because we share the same list 
3237  * as process knotes, we have to filter out and handle only signal events. 
3239  * We assume that we process fdfree() before we post the NOTE_EXIT for 
3240  * a process during exit.  Therefore, since signal filters can only be 
3241  * set up "in-process", we should have already torn down the kqueue 
3242  * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT. 
3245 filt_signal(struct knote 
*kn
, long hint
) 
3248         if (hint 
& NOTE_SIGNAL
) { 
3249                 hint 
&= ~NOTE_SIGNAL
; 
3251                 if (kn
->kn_id 
== (unsigned int)hint
) 
3253         } else if (hint 
& NOTE_EXIT
) { 
3254                 panic("filt_signal: detected NOTE_EXIT event"); 
3257         return (kn
->kn_data 
!= 0); 
3263         struct kevent_internal_s 
*kev
) 
3271         if ((kn
->kn_status 
& KN_UDATA_SPECIFIC
) == 0) 
3272                 kn
->kn_udata 
= kev
->udata
; 
3275          * just capture if it is already fired 
3277         res 
= (kn
->kn_data 
> 0); 
3279         proc_klist_unlock(); 
3287         __unused 
struct filt_process_s 
*data
, 
3288         struct kevent_internal_s 
*kev
) 
3292         if (kn
->kn_data 
== 0) { 
3293                 proc_klist_unlock(); 
3298          * Snapshot the event data. 
3299          * All signal events are EV_CLEAR, so 
3300          * add that and clear out the data field. 
3302         *kev 
= kn
->kn_kevent
; 
3303         kev
->flags 
|= EV_CLEAR
; 
3306         proc_klist_unlock(); 
3311 bsd_ast(thread_t thread
) 
3313         proc_t p 
= current_proc(); 
3314         struct uthread 
*ut 
= get_bsdthread_info(thread
); 
3317         static int bsd_init_done 
= 0; 
3322         if ((p
->p_flag 
& P_OWEUPC
) && (p
->p_flag 
& P_PROFIL
)) { 
3323                 pc 
= get_useraddr(); 
3324                 addupc_task(p
, pc
, 1); 
3325                 OSBitAndAtomic(~((uint32_t)P_OWEUPC
), &p
->p_flag
); 
3328         if (timerisset(&p
->p_vtimer_user
.it_value
)) { 
3331                 task_vtimer_update(p
->task
, TASK_VTIMER_USER
, µsecs
); 
3333                 if (!itimerdecr(p
, &p
->p_vtimer_user
, microsecs
)) { 
3334                         if (timerisset(&p
->p_vtimer_user
.it_value
)) 
3335                                 task_vtimer_set(p
->task
, TASK_VTIMER_USER
); 
3337                                 task_vtimer_clear(p
->task
, TASK_VTIMER_USER
); 
3339                         psignal_try_thread(p
, thread
, SIGVTALRM
); 
3343         if (timerisset(&p
->p_vtimer_prof
.it_value
)) { 
3346                 task_vtimer_update(p
->task
, TASK_VTIMER_PROF
, µsecs
); 
3348                 if (!itimerdecr(p
, &p
->p_vtimer_prof
, microsecs
)) { 
3349                         if (timerisset(&p
->p_vtimer_prof
.it_value
)) 
3350                                 task_vtimer_set(p
->task
, TASK_VTIMER_PROF
); 
3352                                 task_vtimer_clear(p
->task
, TASK_VTIMER_PROF
); 
3354                         psignal_try_thread(p
, thread
, SIGPROF
); 
3358         if (timerisset(&p
->p_rlim_cpu
)) { 
3361                 task_vtimer_update(p
->task
, TASK_VTIMER_RLIM
, (uint32_t *) &tv
.tv_usec
); 
3364                 if (p
->p_rlim_cpu
.tv_sec 
> 0 || p
->p_rlim_cpu
.tv_usec 
> tv
.tv_usec
) { 
3366                         timersub(&p
->p_rlim_cpu
, &tv
, &p
->p_rlim_cpu
); 
3370                         timerclear(&p
->p_rlim_cpu
); 
3373                         task_vtimer_clear(p
->task
, TASK_VTIMER_RLIM
); 
3375                         psignal_try_thread(p
, thread
, SIGXCPU
); 
3380         if (ut
->t_dtrace_sig
) { 
3381             uint8_t dt_action_sig 
= ut
->t_dtrace_sig
; 
3382             ut
->t_dtrace_sig 
= 0; 
3383             psignal(p
, dt_action_sig
); 
3386         if (ut
->t_dtrace_stop
) { 
3387                 ut
->t_dtrace_stop 
= 0; 
3389                 p
->p_dtrace_stop 
= 1; 
3391                 (void)task_suspend_internal(p
->task
); 
3394         if (ut
->t_dtrace_resumepid
) { 
3395                 proc_t resumeproc 
= proc_find(ut
->t_dtrace_resumepid
); 
3396                 ut
->t_dtrace_resumepid 
= 0; 
3397                 if (resumeproc 
!= PROC_NULL
) { 
3398                         proc_lock(resumeproc
); 
3399                         /* We only act on processes stopped by dtrace */ 
3400                         if (resumeproc
->p_dtrace_stop
) { 
3401                                 resumeproc
->p_dtrace_stop 
= 0; 
3402                                 proc_unlock(resumeproc
); 
3403                                 task_resume_internal(resumeproc
->task
); 
3406                                 proc_unlock(resumeproc
); 
3408                         proc_rele(resumeproc
); 
3412 #endif /* CONFIG_DTRACE */ 
3415         if (CHECK_SIGNALS(p
, current_thread(), ut
)) { 
3416                 while ( (signum 
= issignal_locked(p
)) ) 
3417                         postsig_locked(signum
); 
3421         if (!bsd_init_done
) { 
3428 /* ptrace set runnable */ 
3430 pt_setrunnable(proc_t p
) 
3436         if (p
->p_lflag 
& P_LTRACED
) { 
3441                         wakeup((caddr_t
)&(p
->sigwait
)); 
3442                         if ((p
->p_lflag 
& P_LSIGEXC
) == 0) {    // 5878479 
3455         mach_exception_data_type_t   codes
[EXCEPTION_CODE_MAX
]; 
3459         return(bsd_exception(exc
, codes
, 2)); 
3463 proc_pendingsignals(proc_t p
, sigset_t mask
) 
3465         struct uthread 
* uth
; 
3470         /* If the process is in proc exit return no signal info */ 
3471         if (p
->p_lflag 
& P_LPEXIT
)  { 
3475         if ((p
->p_lflag 
& P_LINVFORK
) && p
->p_vforkact
) { 
3477                 uth 
= (struct uthread 
*)get_bsdthread_info(th
); 
3479                         bits 
= (((uth
->uu_siglist 
& ~uth
->uu_sigmask
) & ~p
->p_sigignore
) & mask
); 
3485         TAILQ_FOREACH(uth
, &p
->p_uthlist
, uu_list
) { 
3486                 bits 
|= (((uth
->uu_siglist 
& ~uth
->uu_sigmask
) & ~p
->p_sigignore
) & mask
); 
3494 thread_issignal(proc_t p
, thread_t th
, sigset_t mask
) 
3496         struct uthread 
* uth
; 
3500         uth 
= (struct uthread 
*)get_bsdthread_info(th
); 
3502                 bits 
= (((uth
->uu_siglist 
& ~uth
->uu_sigmask
) & ~p
->p_sigignore
) & mask
); 
3509  * Allow external reads of the sigprop array. 
3512 hassigprop(int sig
, int prop
) 
3514         return (sigprop
[sig
] & prop
); 
3518 pgsigio(pid_t pgid
, int sig
) 
3520         proc_t p 
= PROC_NULL
; 
3523                 gsignal(-(pgid
), sig
); 
3525         else if (pgid 
> 0 && (p 
= proc_find(pgid
)) != 0)  
3532 proc_signalstart(proc_t p
, int locked
) 
3537         if(p
->p_signalholder 
== current_thread()) 
3538                 panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock");     
3541         while ((p
->p_lflag 
& P_LINSIGNAL
) == P_LINSIGNAL
) 
3542                 msleep(&p
->p_sigmask
, &p
->p_mlock
, 0, "proc_signstart", NULL
); 
3545         p
->p_lflag 
|= P_LINSIGNAL
; 
3546         p
->p_signalholder 
= current_thread(); 
3552 proc_signalend(proc_t p
, int locked
) 
3556         p
->p_lflag 
&= ~P_LINSIGNAL
; 
3558         if (p
->p_sigwaitcnt 
> 0) 
3559                 wakeup(&p
->p_sigmask
); 
3561         p
->p_signalholder 
= NULL
; 
3567 sig_lock_to_exit(proc_t p
) 
3569         thread_t        self 
= current_thread(); 
3571         p
->exit_thread 
= self
; 
3575         task_wait(p
->task
, FALSE
); 
3581 sig_try_locked(proc_t p
) 
3583         thread_t        self 
= current_thread(); 
3585         while (p
->sigwait 
|| p
->exit_thread
) { 
3586                 if (p
->exit_thread
) { 
3589                 msleep((caddr_t
)&p
->sigwait_thread
, &p
->p_mlock
, PCATCH 
| PDROP
, 0, 0); 
3590                 if (thread_should_abort(self
)) { 
3592                          * Terminate request - clean up.