]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_sig.c
xnu-3247.10.11.tar.gz
[apple/xnu.git] / bsd / kern / kern_sig.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 1995-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
1c79356b
A
28/*
29 * Copyright (c) 1982, 1986, 1989, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
66 */
2d21ac55
A
67/*
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
71 * Version 2.0.
72 */
1c79356b
A
73
74#define SIGPROP /* include signal properties table */
75#include <sys/param.h>
1c79356b 76#include <sys/resourcevar.h>
91447636
A
77#include <sys/proc_internal.h>
78#include <sys/kauth.h>
1c79356b
A
79#include <sys/systm.h>
80#include <sys/timeb.h>
81#include <sys/times.h>
1c79356b 82#include <sys/acct.h>
91447636 83#include <sys/file_internal.h>
1c79356b
A
84#include <sys/kernel.h>
85#include <sys/wait.h>
9bccf70c 86#include <sys/signalvar.h>
1c79356b
A
87#include <sys/syslog.h>
88#include <sys/stat.h>
89#include <sys/lock.h>
9bccf70c 90#include <sys/kdebug.h>
91447636 91
1c79356b 92#include <sys/mount.h>
91447636 93#include <sys/sysproto.h>
1c79356b 94
b0d623f7 95#include <security/audit/audit.h>
e5568f75 96
91447636
A
97#include <machine/spl.h>
98
1c79356b
A
99#include <kern/cpu_number.h>
100
101#include <sys/vm.h>
102#include <sys/user.h> /* for coredump */
103#include <kern/ast.h> /* for APC support */
91447636 104#include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
1c79356b 105#include <kern/thread.h>
9bccf70c 106#include <kern/sched_prim.h>
1c79356b 107#include <kern/thread_call.h>
9bccf70c 108#include <mach/exception.h>
91447636
A
109#include <mach/task.h>
110#include <mach/thread_act.h>
2d21ac55
A
111#include <libkern/OSAtomic.h>
112
113#include <sys/sdt.h>
3e170ce0 114#include <sys/codesign.h>
91447636
A
115
116/*
117 * Missing prototypes that Mach should export
118 *
119 * +++
120 */
121extern int thread_enable_fpe(thread_t act, int onoff);
91447636 122extern thread_t port_name_to_thread(mach_port_name_t port_name);
91447636 123extern kern_return_t get_signalact(task_t , thread_t *, int);
91447636
A
124extern unsigned int get_useraddr(void);
125
126/*
127 * ---
128 */
9bccf70c 129
2d21ac55
A
130extern void doexception(int exc, mach_exception_code_t code,
131 mach_exception_subcode_t sub);
1c79356b 132
2d21ac55
A
133static void stop(proc_t, proc_t);
134int cansignal(proc_t, kauth_cred_t, proc_t, int, int);
135int killpg1(proc_t, int, int, int, int);
2d21ac55 136static void psignal_uthread(thread_t, int);
3e170ce0 137static void psignal_try_thread(proc_t, thread_t, int signum);
9bccf70c 138kern_return_t do_bsdexception(int, int, int);
91447636 139void __posix_sem_syscall_return(kern_return_t);
3e170ce0 140char *proc_name_address(void *p);
91447636
A
141
142/* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */
2d21ac55
A
143kern_return_t semaphore_timedwait_signal_trap_internal(mach_port_name_t, mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t));
144kern_return_t semaphore_timedwait_trap_internal(mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t));
145kern_return_t semaphore_wait_signal_trap_internal(mach_port_name_t, mach_port_name_t, void (*)(kern_return_t));
146kern_return_t semaphore_wait_trap_internal(mach_port_name_t, void (*)(kern_return_t));
1c79356b 147
55e303ae
A
148static int filt_sigattach(struct knote *kn);
149static void filt_sigdetach(struct knote *kn);
150static int filt_signal(struct knote *kn, long hint);
3e170ce0 151static void filt_signaltouch(struct knote *kn, struct kevent_internal_s *kev,
b0d623f7
A
152 long type);
153
154struct filterops sig_filtops = {
155 .f_attach = filt_sigattach,
156 .f_detach = filt_sigdetach,
157 .f_event = filt_signal,
158 .f_touch = filt_signaltouch,
159};
55e303ae 160
2d21ac55
A
161/* structures and fns for killpg1 iterartion callback and filters */
162struct killpg1_filtargs {
163 int posix;
164 proc_t cp;
165};
166
167struct killpg1_iterargs {
168 proc_t cp;
169 kauth_cred_t uc;
170 int signum;
171 int * nfoundp;
b0d623f7 172 int zombie;
2d21ac55
A
173};
174
175static int killpg1_filt(proc_t p, void * arg);
176static int killpg1_pgrpfilt(proc_t p, __unused void * arg);
177static int killpg1_callback(proc_t p, void * arg);
178
179static int pgsignal_filt(proc_t p, void * arg);
180static int pgsignal_callback(proc_t p, void * arg);
181static kern_return_t get_signalthread(proc_t, int, thread_t *);
182
183
184/* flags for psignal_internal */
185#define PSIG_LOCKED 0x1
186#define PSIG_VFORK 0x2
187#define PSIG_THREAD 0x4
3e170ce0 188#define PSIG_TRY_THREAD 0x8
2d21ac55
A
189
190
191static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum);
91447636
A
192
193/*
194 * NOTE: Source and target may *NOT* overlap! (target is smaller)
195 */
196static void
b0d623f7 197sigaltstack_kern_to_user32(struct kern_sigaltstack *in, struct user32_sigaltstack *out)
91447636 198{
b0d623f7
A
199 out->ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp);
200 out->ss_size = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size);
201 out->ss_flags = in->ss_flags;
202}
203
204static void
205sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstack *out)
206{
207 out->ss_sp = in->ss_sp;
91447636
A
208 out->ss_size = in->ss_size;
209 out->ss_flags = in->ss_flags;
210}
211
212/*
213 * NOTE: Source and target may are permitted to overlap! (source is smaller);
214 * this works because we copy fields in order from the end of the struct to
215 * the beginning.
216 */
217static void
b0d623f7
A
218sigaltstack_user32_to_kern(struct user32_sigaltstack *in, struct kern_sigaltstack *out)
219{
220 out->ss_flags = in->ss_flags;
221 out->ss_size = in->ss_size;
222 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
223}
224static void
225sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out)
91447636
A
226{
227 out->ss_flags = in->ss_flags;
228 out->ss_size = in->ss_size;
b0d623f7 229 out->ss_sp = in->ss_sp;
91447636
A
230}
231
232static void
b0d623f7 233sigaction_kern_to_user32(struct kern_sigaction *in, struct user32_sigaction *out)
91447636
A
234{
235 /* This assumes 32 bit __sa_handler is of type sig_t */
b0d623f7
A
236 out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t,in->__sigaction_u.__sa_handler);
237 out->sa_mask = in->sa_mask;
238 out->sa_flags = in->sa_flags;
239}
240static void
241sigaction_kern_to_user64(struct kern_sigaction *in, struct user64_sigaction *out)
242{
243 /* This assumes 32 bit __sa_handler is of type sig_t */
244 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
91447636
A
245 out->sa_mask = in->sa_mask;
246 out->sa_flags = in->sa_flags;
247}
248
249static void
b0d623f7 250__sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigaction *out)
91447636
A
251{
252 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
253 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
254 out->sa_mask = in->sa_mask;
255 out->sa_flags = in->sa_flags;
256}
257
b0d623f7
A
258static void
259__sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigaction *out)
260{
261 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
262 out->sa_tramp = in->sa_tramp;
263 out->sa_mask = in->sa_mask;
264 out->sa_flags = in->sa_flags;
265}
91447636 266
1c79356b 267#if SIGNAL_DEBUG
91447636 268void ram_printf(int);
1c79356b
A
269int ram_debug=0;
270unsigned int rdebug_proc=0;
271void
272ram_printf(int x)
273{
274 printf("x is %d",x);
275
276}
1c79356b 277#endif /* SIGNAL_DEBUG */
9bccf70c 278
1c79356b
A
279
280void
2d21ac55 281signal_setast(thread_t sig_actthread)
1c79356b 282{
9bccf70c 283 act_set_astbsd(sig_actthread);
1c79356b
A
284}
285
286/*
91447636 287 * Can process p, with ucred uc, send the signal signum to process q?
2d21ac55
A
288 * uc is refcounted by the caller so internal fileds can be used safely
289 * when called with zombie arg, list lock is held
1c79356b
A
290 */
291int
2d21ac55 292cansignal(proc_t p, kauth_cred_t uc, proc_t q, int signum, int zombie)
1c79356b 293{
2d21ac55
A
294 kauth_cred_t my_cred;
295 struct session * p_sessp = SESSION_NULL;
296 struct session * q_sessp = SESSION_NULL;
297#if CONFIG_MACF
298 int error;
299
300 error = mac_proc_check_signal(p, q, signum);
301 if (error)
302 return (0);
303#endif
304
9bccf70c
A
305 /* you can signal yourself */
306 if (p == q)
307 return(1);
308
3e170ce0
A
309 /* you can't send launchd SIGKILL, even if root */
310 if (signum == SIGKILL && q == initproc)
311 return(0);
312
91447636 313 if (!suser(uc, NULL))
1c79356b
A
314 return (1); /* root can always signal */
315
2d21ac55
A
316 if (zombie == 0)
317 proc_list_lock();
318 if (p->p_pgrp != PGRP_NULL)
319 p_sessp = p->p_pgrp->pg_session;
320 if (q->p_pgrp != PGRP_NULL)
321 q_sessp = q->p_pgrp->pg_session;
322
323 if (signum == SIGCONT && q_sessp == p_sessp) {
324 if (zombie == 0)
325 proc_list_unlock();
1c79356b 326 return (1); /* SIGCONT in session */
2d21ac55
A
327 }
328
329 if (zombie == 0)
330 proc_list_unlock();
1c79356b
A
331
332 /*
b0d623f7
A
333 * If the real or effective UID of the sender matches the real
334 * or saved UID of the target, permit the signal to
2d21ac55 335 * be sent.
1c79356b 336 */
2d21ac55
A
337 if (zombie == 0)
338 my_cred = kauth_cred_proc_ref(q);
339 else
340 my_cred = proc_ucred(q);
341
6d2010ae
A
342 if (kauth_cred_getruid(uc) == kauth_cred_getruid(my_cred) ||
343 kauth_cred_getruid(uc) == kauth_cred_getsvuid(my_cred) ||
344 kauth_cred_getuid(uc) == kauth_cred_getruid(my_cred) ||
345 kauth_cred_getuid(uc) == kauth_cred_getsvuid(my_cred)) {
2d21ac55
A
346 if (zombie == 0)
347 kauth_cred_unref(&my_cred);
348 return (1);
1c79356b
A
349 }
350
2d21ac55
A
351 if (zombie == 0)
352 kauth_cred_unref(&my_cred);
353
1c79356b
A
354 return (0);
355}
356
3e170ce0
A
357/*
358 * <rdar://problem/21952708> Some signals can be restricted from being handled,
359 * forcing the default action for that signal. This behavior applies only to
360 * non-root (EUID != 0) processes, and is configured with the "sigrestrict=x"
361 * bootarg:
362 *
363 * 0 (default): Disallow use of restricted signals. Trying to register a handler
364 * returns ENOTSUP, which userspace may use to take special action (e.g. abort).
365 * 1: As above, but return EINVAL. Restricted signals behave similarly to SIGKILL.
366 * 2: Usual POSIX semantics.
367 */
368unsigned sigrestrict_arg = 0;
369
370#if PLATFORM_WatchOS || PLATFORM_AppleTVOS
371static int
372sigrestrictmask(void)
373{
374 if (kauth_getuid() != 0 && sigrestrict_arg != 2) {
375 return SIGRESTRICTMASK;
376 }
377 return 0;
378}
379
380static int
381signal_is_restricted(proc_t p, int signum)
382{
383 if (sigmask(signum) & sigrestrictmask()) {
384 if (sigrestrict_arg == 0 &&
385 task_get_apptype(p->task) == TASK_APPTYPE_APP_DEFAULT) {
386 return ENOTSUP;
387 } else {
388 return EINVAL;
389 }
390 }
391 return 0;
392}
393
394#else
395
396static inline int
397signal_is_restricted(proc_t p, int signum)
398{
399 (void)p;
400 (void)signum;
401 return 0;
402}
403#endif /* !(PLATFORM_WatchOS || PLATFORM_AppleTVOS) */
1c79356b 404
2d21ac55
A
405/*
406 * Returns: 0 Success
407 * EINVAL
408 * copyout:EFAULT
409 * copyin:EFAULT
b0d623f7
A
410 *
411 * Notes: Uses current thread as a parameter to inform PPC to enable
412 * FPU exceptions via setsigvec(); this operation is not proxy
413 * safe!
2d21ac55 414 */
1c79356b
A
415/* ARGSUSED */
416int
b0d623f7 417sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval)
1c79356b 418{
b0d623f7
A
419 struct kern_sigaction vec;
420 struct __kern_sigaction __vec;
9bccf70c 421
b0d623f7 422 struct kern_sigaction *sa = &vec;
2d21ac55 423 struct sigacts *ps = p->p_sigacts;
91447636 424
2d21ac55 425 int signum;
9bccf70c 426 int bit, error=0;
1c79356b
A
427
428 signum = uap->signum;
429 if (signum <= 0 || signum >= NSIG ||
3e170ce0 430 signum == SIGKILL || signum == SIGSTOP)
1c79356b 431 return (EINVAL);
91447636 432
3e170ce0
A
433 if ((error = signal_is_restricted(p, signum))) {
434 if (error == ENOTSUP) {
435 printf("%s(%d): denied attempt to register action for signal %d\n",
436 proc_name_address(p), proc_pid(p), signum);
437 }
438 return error;
439 }
440
1c79356b
A
441 if (uap->osa) {
442 sa->sa_handler = ps->ps_sigact[signum];
443 sa->sa_mask = ps->ps_catchmask[signum];
444 bit = sigmask(signum);
445 sa->sa_flags = 0;
446 if ((ps->ps_sigonstack & bit) != 0)
447 sa->sa_flags |= SA_ONSTACK;
448 if ((ps->ps_sigintr & bit) == 0)
449 sa->sa_flags |= SA_RESTART;
9bccf70c
A
450 if (ps->ps_siginfo & bit)
451 sa->sa_flags |= SA_SIGINFO;
452 if (ps->ps_signodefer & bit)
453 sa->sa_flags |= SA_NODEFER;
55e303ae
A
454 if (ps->ps_64regset & bit)
455 sa->sa_flags |= SA_64REGSET;
9bccf70c 456 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP))
1c79356b 457 sa->sa_flags |= SA_NOCLDSTOP;
9bccf70c
A
458 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT))
459 sa->sa_flags |= SA_NOCLDWAIT;
91447636
A
460
461 if (IS_64BIT_PROCESS(p)) {
b0d623f7
A
462 struct user64_sigaction vec64;
463
464 sigaction_kern_to_user64(sa, &vec64);
465 error = copyout(&vec64, uap->osa, sizeof(vec64));
91447636 466 } else {
b0d623f7
A
467 struct user32_sigaction vec32;
468
469 sigaction_kern_to_user32(sa, &vec32);
470 error = copyout(&vec32, uap->osa, sizeof(vec32));
91447636
A
471 }
472 if (error)
1c79356b
A
473 return (error);
474 }
475 if (uap->nsa) {
91447636 476 if (IS_64BIT_PROCESS(p)) {
b0d623f7
A
477 struct __user64_sigaction __vec64;
478
479 error = copyin(uap->nsa, &__vec64, sizeof(__vec64));
480 __sigaction_user64_to_kern(&__vec64, &__vec);
91447636 481 } else {
b0d623f7
A
482 struct __user32_sigaction __vec32;
483
484 error = copyin(uap->nsa, &__vec32, sizeof(__vec32));
485 __sigaction_user32_to_kern(&__vec32, &__vec);
91447636
A
486 }
487 if (error)
1c79356b 488 return (error);
2d21ac55 489 __vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */
b0d623f7 490 error = setsigvec(p, current_thread(), signum, &__vec, FALSE);
1c79356b 491 }
9bccf70c
A
492 return (error);
493}
494
495/* Routines to manipulate bits on all threads */
496int
b0d623f7 497clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart)
9bccf70c
A
498{
499 struct uthread * uth;
91447636 500 thread_t thact;
9bccf70c 501
2d21ac55 502 proc_lock(p);
b0d623f7
A
503 if (!in_signalstart)
504 proc_signalstart(p, 1);
9bccf70c 505
2d21ac55 506 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
9bccf70c
A
507 thact = p->p_vforkact;
508 uth = (struct uthread *)get_bsdthread_info(thact);
509 if (uth) {
510 uth->uu_siglist &= ~bit;
511 }
b0d623f7
A
512 if (!in_signalstart)
513 proc_signalend(p, 1);
2d21ac55 514 proc_unlock(p);
9bccf70c
A
515 return(0);
516 }
517
518 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
519 uth->uu_siglist &= ~bit;
520 }
b0d623f7
A
521 p->p_siglist &= ~bit;
522 if (!in_signalstart)
523 proc_signalend(p, 1);
2d21ac55
A
524 proc_unlock(p);
525
9bccf70c 526 return(0);
1c79356b
A
527}
528
91447636
A
529
530static int
2d21ac55 531unblock_procsigmask(proc_t p, int bit)
1c79356b 532{
9bccf70c 533 struct uthread * uth;
91447636 534 thread_t thact;
9bccf70c 535
2d21ac55
A
536 proc_lock(p);
537 proc_signalstart(p, 1);
538
539 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
9bccf70c
A
540 thact = p->p_vforkact;
541 uth = (struct uthread *)get_bsdthread_info(thact);
542 if (uth) {
543 uth->uu_sigmask &= ~bit;
544 }
545 p->p_sigmask &= ~bit;
2d21ac55
A
546 proc_signalend(p, 1);
547 proc_unlock(p);
9bccf70c
A
548 return(0);
549 }
550 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
551 uth->uu_sigmask &= ~bit;
1c79356b 552 }
9bccf70c 553 p->p_sigmask &= ~bit;
2d21ac55
A
554
555 proc_signalend(p, 1);
556 proc_unlock(p);
9bccf70c 557 return(0);
1c79356b
A
558}
559
91447636 560static int
2d21ac55 561block_procsigmask(proc_t p, int bit)
1c79356b 562{
9bccf70c 563 struct uthread * uth;
91447636 564 thread_t thact;
1c79356b 565
2d21ac55
A
566 proc_lock(p);
567 proc_signalstart(p, 1);
568
569 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
9bccf70c
A
570 thact = p->p_vforkact;
571 uth = (struct uthread *)get_bsdthread_info(thact);
572 if (uth) {
573 uth->uu_sigmask |= bit;
574 }
575 p->p_sigmask |= bit;
2d21ac55
A
576 proc_signalend(p, 1);
577 proc_unlock(p);
9bccf70c
A
578 return(0);
579 }
580 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
581 uth->uu_sigmask |= bit;
582 }
583 p->p_sigmask |= bit;
2d21ac55
A
584
585 proc_signalend(p, 1);
586 proc_unlock(p);
1c79356b
A
587 return(0);
588}
91447636 589
9bccf70c 590int
2d21ac55 591set_procsigmask(proc_t p, int bit)
9bccf70c
A
592{
593 struct uthread * uth;
91447636 594 thread_t thact;
1c79356b 595
2d21ac55
A
596 proc_lock(p);
597 proc_signalstart(p, 1);
598
599 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
9bccf70c
A
600 thact = p->p_vforkact;
601 uth = (struct uthread *)get_bsdthread_info(thact);
602 if (uth) {
603 uth->uu_sigmask = bit;
604 }
605 p->p_sigmask = bit;
2d21ac55
A
606 proc_signalend(p, 1);
607 proc_unlock(p);
9bccf70c
A
608 return(0);
609 }
610 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
611 uth->uu_sigmask = bit;
612 }
613 p->p_sigmask = bit;
2d21ac55
A
614 proc_signalend(p, 1);
615 proc_unlock(p);
616
9bccf70c
A
617 return(0);
618}
1c79356b 619
91447636 620/* XXX should be static? */
b0d623f7
A
621/*
622 * Notes: The thread parameter is used in the PPC case to select the
623 * thread on which the floating point exception will be enabled
624 * or disabled. We can't simply take current_thread(), since
625 * this is called from posix_spawn() on the not currently running
626 * process/thread pair.
627 *
628 * We mark thread as unused to alow compilation without warning
6d2010ae 629 * on non-PPC platforms.
b0d623f7 630 */
9bccf70c 631int
b0d623f7 632setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigaction *sa, boolean_t in_sigstart)
1c79356b 633{
2d21ac55
A
634 struct sigacts *ps = p->p_sigacts;
635 int bit;
1c79356b 636
9bccf70c
A
637 if ((signum == SIGKILL || signum == SIGSTOP) &&
638 sa->sa_handler != SIG_DFL)
639 return(EINVAL);
1c79356b
A
640 bit = sigmask(signum);
641 /*
642 * Change setting atomically.
643 */
644 ps->ps_sigact[signum] = sa->sa_handler;
91447636 645 ps->ps_trampact[signum] = sa->sa_tramp;
1c79356b 646 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
9bccf70c
A
647 if (sa->sa_flags & SA_SIGINFO)
648 ps->ps_siginfo |= bit;
649 else
650 ps->ps_siginfo &= ~bit;
55e303ae
A
651 if (sa->sa_flags & SA_64REGSET)
652 ps->ps_64regset |= bit;
653 else
654 ps->ps_64regset &= ~bit;
1c79356b
A
655 if ((sa->sa_flags & SA_RESTART) == 0)
656 ps->ps_sigintr |= bit;
657 else
658 ps->ps_sigintr &= ~bit;
659 if (sa->sa_flags & SA_ONSTACK)
660 ps->ps_sigonstack |= bit;
661 else
662 ps->ps_sigonstack &= ~bit;
663 if (sa->sa_flags & SA_USERTRAMP)
664 ps->ps_usertramp |= bit;
665 else
666 ps->ps_usertramp &= ~bit;
9bccf70c
A
667 if (sa->sa_flags & SA_RESETHAND)
668 ps->ps_sigreset |= bit;
669 else
670 ps->ps_sigreset &= ~bit;
671 if (sa->sa_flags & SA_NODEFER)
672 ps->ps_signodefer |= bit;
673 else
674 ps->ps_signodefer &= ~bit;
1c79356b
A
675 if (signum == SIGCHLD) {
676 if (sa->sa_flags & SA_NOCLDSTOP)
b0d623f7 677 OSBitOrAtomic(P_NOCLDSTOP, &p->p_flag);
1c79356b 678 else
b0d623f7 679 OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), &p->p_flag);
9bccf70c 680 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN))
b0d623f7 681 OSBitOrAtomic(P_NOCLDWAIT, &p->p_flag);
9bccf70c 682 else
b0d623f7 683 OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), &p->p_flag);
1c79356b 684 }
9bccf70c 685
1c79356b
A
686 /*
687 * Set bit in p_sigignore for signals that are set to SIG_IGN,
688 * and for signals set to SIG_DFL where the default is to ignore.
689 * However, don't put SIGCONT in p_sigignore,
690 * as we have to restart the process.
691 */
692 if (sa->sa_handler == SIG_IGN ||
693 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
1c79356b 694
b0d623f7 695 clear_procsiglist(p, bit, in_sigstart);
1c79356b
A
696 if (signum != SIGCONT)
697 p->p_sigignore |= bit; /* easier in psignal */
698 p->p_sigcatch &= ~bit;
699 } else {
700 p->p_sigignore &= ~bit;
701 if (sa->sa_handler == SIG_DFL)
702 p->p_sigcatch &= ~bit;
703 else
704 p->p_sigcatch |= bit;
705 }
9bccf70c 706 return(0);
1c79356b
A
707}
708
709/*
710 * Initialize signal state for process 0;
711 * set to ignore signals that are ignored by default.
712 */
713void
2d21ac55 714siginit(proc_t p)
1c79356b 715{
2d21ac55 716 int i;
1c79356b 717
316670eb 718 for (i = 1; i < NSIG; i++)
1c79356b
A
719 if (sigprop[i] & SA_IGNORE && i != SIGCONT)
720 p->p_sigignore |= sigmask(i);
721}
722
723/*
724 * Reset signals for an exec of the specified process.
725 */
726void
2d21ac55 727execsigs(proc_t p, thread_t thread)
1c79356b 728{
2d21ac55
A
729 struct sigacts *ps = p->p_sigacts;
730 int nc, mask;
731 struct uthread *ut;
1c79356b 732
2d21ac55 733 ut = (struct uthread *)get_bsdthread_info(thread);
b0d623f7
A
734
735 /*
736 * transfer saved signal states from the process
737 * back to the current thread.
738 *
739 * NOTE: We do this without the process locked,
740 * because we are guaranteed to be single-threaded
741 * by this point in exec and the p_siglist is
742 * only accessed by threads inside the process.
743 */
744 ut->uu_siglist |= p->p_siglist;
745 p->p_siglist = 0;
746
1c79356b
A
747 /*
748 * Reset caught signals. Held signals remain held
749 * through p_sigmask (unless they were caught,
750 * and are now ignored by default).
751 */
752 while (p->p_sigcatch) {
753 nc = ffs((long)p->p_sigcatch);
754 mask = sigmask(nc);
755 p->p_sigcatch &= ~mask;
756 if (sigprop[nc] & SA_IGNORE) {
757 if (nc != SIGCONT)
758 p->p_sigignore |= mask;
b0d623f7 759 ut->uu_siglist &= ~mask;
1c79356b
A
760 }
761 ps->ps_sigact[nc] = SIG_DFL;
762 }
b0d623f7 763
1c79356b
A
764 /*
765 * Reset stack state to the user stack.
766 * Clear set of signals caught on the signal stack.
767 */
2d21ac55
A
768 /* thread */
769 ut->uu_sigstk.ss_flags = SA_DISABLE;
770 ut->uu_sigstk.ss_size = 0;
771 ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
772 ut->uu_flag &= ~UT_ALTSTACK;
773 /* process */
0c530ab8 774 ps->ps_sigonstack = 0;
1c79356b
A
775}
776
777/*
778 * Manipulate signal mask.
779 * Note that we receive new mask, not pointer,
780 * and return old mask as return value;
781 * the library stub does the rest.
782 */
1c79356b 783int
b0d623f7 784sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval)
1c79356b
A
785{
786 int error = 0;
9bccf70c 787 sigset_t oldmask, nmask;
91447636 788 user_addr_t omask = uap->omask;
9bccf70c 789 struct uthread *ut;
1c79356b 790
91447636 791 ut = (struct uthread *)get_bsdthread_info(current_thread());
9bccf70c
A
792 oldmask = ut->uu_sigmask;
793
91447636 794 if (uap->mask == USER_ADDR_NULL) {
9bccf70c
A
795 /* just want old mask */
796 goto out;
797 }
91447636 798 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
9bccf70c
A
799 if (error)
800 goto out;
1c79356b
A
801
802 switch (uap->how) {
803 case SIG_BLOCK:
9bccf70c 804 block_procsigmask(p, (nmask & ~sigcantmask));
91447636 805 signal_setast(current_thread());
1c79356b
A
806 break;
807
808 case SIG_UNBLOCK:
9bccf70c 809 unblock_procsigmask(p, (nmask & ~sigcantmask));
91447636 810 signal_setast(current_thread());
1c79356b
A
811 break;
812
813 case SIG_SETMASK:
9bccf70c 814 set_procsigmask(p, (nmask & ~sigcantmask));
91447636 815 signal_setast(current_thread());
1c79356b
A
816 break;
817
818 default:
819 error = EINVAL;
820 break;
821 }
9bccf70c 822out:
91447636 823 if (!error && omask != USER_ADDR_NULL)
9bccf70c 824 copyout(&oldmask, omask, sizeof(sigset_t));
1c79356b
A
825 return (error);
826}
827
1c79356b 828int
b0d623f7 829sigpending(__unused proc_t p, struct sigpending_args *uap, __unused int32_t *retval)
1c79356b 830{
9bccf70c
A
831 struct uthread *ut;
832 sigset_t pendlist;
1c79356b 833
91447636 834 ut = (struct uthread *)get_bsdthread_info(current_thread());
9bccf70c
A
835 pendlist = ut->uu_siglist;
836
837 if (uap->osv)
838 copyout(&pendlist, uap->osv, sizeof(sigset_t));
839 return(0);
1c79356b
A
840}
841
1c79356b
A
842/*
843 * Suspend process until signal, providing mask to be set
844 * in the meantime. Note nonstandard calling convention:
845 * libc stub passes mask, not pointer, to save a copyin.
846 */
847
91447636
A
848static int
849sigcontinue(__unused int error)
1c79356b 850{
91447636 851// struct uthread *ut = get_bsdthread_info(current_thread());
2d21ac55
A
852 unix_syscall_return(EINTR);
853}
854
855int
b0d623f7 856sigsuspend(proc_t p, struct sigsuspend_args *uap, int32_t *retval)
2d21ac55
A
857{
858 __pthread_testcancel(1);
859 return(sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval));
1c79356b
A
860}
861
1c79356b 862int
b0d623f7 863sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused int32_t *retval)
1c79356b 864{
9bccf70c
A
865 struct uthread *ut;
866
91447636 867 ut = (struct uthread *)get_bsdthread_info(current_thread());
1c79356b
A
868
869 /*
870 * When returning from sigpause, we want
871 * the old mask to be restored after the
872 * signal handler has finished. Thus, we
873 * save it here and mark the sigacts structure
874 * to indicate this.
875 */
9bccf70c 876 ut->uu_oldmask = ut->uu_sigmask;
91447636 877 ut->uu_flag |= UT_SAS_OLDMASK;
9bccf70c 878 ut->uu_sigmask = (uap->mask & ~sigcantmask);
1c79356b
A
879 (void) tsleep0((caddr_t) p, PPAUSE|PCATCH, "pause", 0, sigcontinue);
880 /* always return EINTR rather than ERESTART... */
881 return (EINTR);
882}
883
9bccf70c
A
884
885int
2d21ac55
A
886__disable_threadsignal(__unused proc_t p,
887 __unused struct __disable_threadsignal_args *uap,
b0d623f7 888 __unused int32_t *retval)
9bccf70c
A
889{
890 struct uthread *uth;
891
91447636 892 uth = (struct uthread *)get_bsdthread_info(current_thread());
9bccf70c
A
893
894 /* No longer valid to have any signal delivered */
2d21ac55 895 uth->uu_flag |= (UT_NO_SIGMASK | UT_CANCELDISABLE);
9bccf70c
A
896
897 return(0);
898
899}
900
2d21ac55
A
901void
902__pthread_testcancel(int presyscall)
903{
904
905 thread_t self = current_thread();
906 struct uthread * uthread;
907
908 uthread = (struct uthread *)get_bsdthread_info(self);
909
910
911 uthread->uu_flag &= ~UT_NOTCANCELPT;
912
913 if ((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
914 if(presyscall != 0) {
915 unix_syscall_return(EINTR);
916 /* NOTREACHED */
917 } else
918 thread_abort_safely(self);
919 }
920}
921
922
9bccf70c 923
91447636 924int
2d21ac55 925__pthread_markcancel(__unused proc_t p,
b0d623f7 926 struct __pthread_markcancel_args *uap, __unused int32_t *retval)
9bccf70c
A
927{
928 thread_act_t target_act;
929 int error = 0;
9bccf70c
A
930 struct uthread *uth;
931
91447636 932 target_act = (thread_act_t)port_name_to_thread(uap->thread_port);
9bccf70c
A
933
934 if (target_act == THR_ACT_NULL)
935 return (ESRCH);
91447636
A
936
937 uth = (struct uthread *)get_bsdthread_info(target_act);
938
939 /* if the thread is in vfork do not cancel */
2d21ac55 940 if ((uth->uu_flag & (UT_VFORK | UT_CANCEL | UT_CANCELED )) == 0) {
91447636
A
941 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
942 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
943 && ((uth->uu_flag & UT_CANCELDISABLE) == 0))
944 thread_abort_safely(target_act);
945 }
946
947 thread_deallocate(target_act);
948 return (error);
949}
950
951/* if action =0 ; return the cancellation state ,
952 * if marked for cancellation, make the thread canceled
953 * if action = 1 ; Enable the cancel handling
954 * if action = 2; Disable the cancel handling
955 */
956int
2d21ac55 957__pthread_canceled(__unused proc_t p,
b0d623f7 958 struct __pthread_canceled_args *uap, __unused int32_t *retval)
91447636 959{
2d21ac55 960 thread_act_t thread;
91447636
A
961 struct uthread *uth;
962 int action = uap->action;
963
2d21ac55
A
964 thread = current_thread();
965 uth = (struct uthread *)get_bsdthread_info(thread);
91447636
A
966
967 switch (action) {
968 case 1:
969 uth->uu_flag &= ~UT_CANCELDISABLE;
970 return(0);
971 case 2:
972 uth->uu_flag |= UT_CANCELDISABLE;
973 return(0);
974 case 0:
975 default:
976 /* if the thread is in vfork do not cancel */
977 if((uth->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
978 uth->uu_flag &= ~UT_CANCEL;
979 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
980 return(0);
981 }
982 return(EINVAL);
983 }
984 return(EINVAL);
985}
986
987void
988__posix_sem_syscall_return(kern_return_t kern_result)
989{
990 int error = 0;
991
992 if (kern_result == KERN_SUCCESS)
993 error = 0;
994 else if (kern_result == KERN_ABORTED)
995 error = EINTR;
996 else if (kern_result == KERN_OPERATION_TIMED_OUT)
997 error = ETIMEDOUT;
998 else
999 error = EINVAL;
1000 unix_syscall_return(error);
1001 /* does not return */
1002}
1003
b0d623f7 1004#if OLD_SEMWAIT_SIGNAL
2d21ac55
A
1005/*
1006 * Returns: 0 Success
1007 * EINTR
1008 * ETIMEDOUT
1009 * EINVAL
b0d623f7 1010 * EFAULT if timespec is NULL
2d21ac55
A
1011 */
1012int
b0d623f7
A
1013__old_semwait_signal(proc_t p, struct __old_semwait_signal_args *uap,
1014 int32_t *retval)
2d21ac55
A
1015{
1016 __pthread_testcancel(0);
b0d623f7 1017 return(__old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval));
2d21ac55 1018}
91447636
A
1019
1020int
b0d623f7
A
1021__old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_args *uap,
1022 __unused int32_t *retval)
91447636 1023{
b0d623f7 1024
91447636 1025 kern_return_t kern_result;
b0d623f7 1026 int error;
91447636
A
1027 mach_timespec_t then;
1028 struct timespec now;
b0d623f7
A
1029 struct user_timespec ts;
1030 boolean_t truncated_timeout = FALSE;
1031
91447636 1032 if(uap->timeout) {
b0d623f7
A
1033
1034 if (IS_64BIT_PROCESS(p)) {
1035 struct user64_timespec ts64;
1036 error = copyin(uap->ts, &ts64, sizeof(ts64));
1037 ts.tv_sec = ts64.tv_sec;
1038 ts.tv_nsec = ts64.tv_nsec;
1039 } else {
1040 struct user32_timespec ts32;
1041 error = copyin(uap->ts, &ts32, sizeof(ts32));
1042 ts.tv_sec = ts32.tv_sec;
1043 ts.tv_nsec = ts32.tv_nsec;
1044 }
1045
1046 if (error) {
1047 return error;
1048 }
1049
1050 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1051 ts.tv_sec = 0xFFFFFFFF;
1052 ts.tv_nsec = 0;
1053 truncated_timeout = TRUE;
1054 }
1055
91447636 1056 if (uap->relative) {
b0d623f7
A
1057 then.tv_sec = ts.tv_sec;
1058 then.tv_nsec = ts.tv_nsec;
91447636
A
1059 } else {
1060 nanotime(&now);
b0d623f7 1061
2d21ac55 1062 /* if time has elapsed, set time to null timepsec to bailout rightaway */
b0d623f7
A
1063 if (now.tv_sec == ts.tv_sec ?
1064 now.tv_nsec > ts.tv_nsec :
1065 now.tv_sec > ts.tv_sec) {
2d21ac55
A
1066 then.tv_sec = 0;
1067 then.tv_nsec = 0;
b0d623f7
A
1068 } else {
1069 then.tv_sec = ts.tv_sec - now.tv_sec;
1070 then.tv_nsec = ts.tv_nsec - now.tv_nsec;
1071 if (then.tv_nsec < 0) {
1072 then.tv_nsec += NSEC_PER_SEC;
1073 then.tv_sec--;
1074 }
2d21ac55 1075 }
91447636 1076 }
b0d623f7 1077
2d21ac55
A
1078 if (uap->mutex_sem == 0)
1079 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
91447636
A
1080 else
1081 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
b0d623f7 1082
91447636 1083 } else {
b0d623f7 1084
2d21ac55 1085 if (uap->mutex_sem == 0)
91447636
A
1086 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1087 else
b0d623f7 1088
91447636
A
1089 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1090 }
b0d623f7
A
1091
1092 if (kern_result == KERN_SUCCESS && !truncated_timeout)
91447636 1093 return(0);
b0d623f7
A
1094 else if (kern_result == KERN_SUCCESS && truncated_timeout)
1095 return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1096 else if (kern_result == KERN_ABORTED)
1097 return(EINTR);
1098 else if (kern_result == KERN_OPERATION_TIMED_OUT)
1099 return(ETIMEDOUT);
1100 else
1101 return(EINVAL);
1102}
1103#endif /* OLD_SEMWAIT_SIGNAL*/
1104
1105/*
1106 * Returns: 0 Success
1107 * EINTR
1108 * ETIMEDOUT
1109 * EINVAL
1110 * EFAULT if timespec is NULL
1111 */
1112int
1113__semwait_signal(proc_t p, struct __semwait_signal_args *uap,
1114 int32_t *retval)
1115{
1116 __pthread_testcancel(0);
1117 return(__semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval));
1118}
1119
1120int
1121__semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap,
1122 __unused int32_t *retval)
1123{
1124
1125 kern_return_t kern_result;
1126 mach_timespec_t then;
1127 struct timespec now;
1128 struct user_timespec ts;
1129 boolean_t truncated_timeout = FALSE;
1130
1131 if(uap->timeout) {
1132
1133 ts.tv_sec = uap->tv_sec;
1134 ts.tv_nsec = uap->tv_nsec;
1135
1136 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1137 ts.tv_sec = 0xFFFFFFFF;
1138 ts.tv_nsec = 0;
1139 truncated_timeout = TRUE;
1140 }
1141
1142 if (uap->relative) {
1143 then.tv_sec = ts.tv_sec;
1144 then.tv_nsec = ts.tv_nsec;
1145 } else {
1146 nanotime(&now);
1147
1148 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1149 if (now.tv_sec == ts.tv_sec ?
1150 now.tv_nsec > ts.tv_nsec :
1151 now.tv_sec > ts.tv_sec) {
1152 then.tv_sec = 0;
1153 then.tv_nsec = 0;
1154 } else {
1155 then.tv_sec = ts.tv_sec - now.tv_sec;
1156 then.tv_nsec = ts.tv_nsec - now.tv_nsec;
1157 if (then.tv_nsec < 0) {
1158 then.tv_nsec += NSEC_PER_SEC;
1159 then.tv_sec--;
1160 }
1161 }
1162 }
1163
1164 if (uap->mutex_sem == 0)
1165 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1166 else
1167 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1168
1169 } else {
1170
1171 if (uap->mutex_sem == 0)
1172 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1173 else
1174
1175 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1176 }
1177
1178 if (kern_result == KERN_SUCCESS && !truncated_timeout)
1179 return(0);
1180 else if (kern_result == KERN_SUCCESS && truncated_timeout)
1181 return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
91447636
A
1182 else if (kern_result == KERN_ABORTED)
1183 return(EINTR);
1184 else if (kern_result == KERN_OPERATION_TIMED_OUT)
1185 return(ETIMEDOUT);
1186 else
1187 return(EINVAL);
1188}
1189
b0d623f7 1190
91447636 1191int
2d21ac55 1192__pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap,
b0d623f7 1193 __unused int32_t *retval)
91447636
A
1194{
1195 thread_t target_act;
1196 int error = 0;
1197 int signum = uap->sig;
1198 struct uthread *uth;
1199
1200 target_act = (thread_t)port_name_to_thread(uap->thread_port);
1201
1202 if (target_act == THREAD_NULL)
1203 return (ESRCH);
9bccf70c
A
1204 if ((u_int)signum >= NSIG) {
1205 error = EINVAL;
1206 goto out;
1207 }
1208
1209 uth = (struct uthread *)get_bsdthread_info(target_act);
55e303ae 1210
91447636 1211 if (uth->uu_flag & UT_NO_SIGMASK) {
9bccf70c
A
1212 error = ESRCH;
1213 goto out;
1214 }
1215
1216 if (signum)
1217 psignal_uthread(target_act, signum);
1218out:
91447636 1219 thread_deallocate(target_act);
9bccf70c
A
1220 return (error);
1221}
1222
1223
9bccf70c 1224int
2d21ac55 1225__pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap,
b0d623f7 1226 __unused int32_t *retval)
9bccf70c 1227{
91447636
A
1228 user_addr_t set = uap->set;
1229 user_addr_t oset = uap->oset;
1230 sigset_t nset;
9bccf70c
A
1231 int error = 0;
1232 struct uthread *ut;
1233 sigset_t oldset;
1234
91447636 1235 ut = (struct uthread *)get_bsdthread_info(current_thread());
9bccf70c
A
1236 oldset = ut->uu_sigmask;
1237
91447636 1238 if (set == USER_ADDR_NULL) {
9bccf70c
A
1239 /* need only old mask */
1240 goto out;
1241 }
1242
91447636 1243 error = copyin(set, &nset, sizeof(sigset_t));
9bccf70c
A
1244 if (error)
1245 goto out;
1246
1247 switch (uap->how) {
1248 case SIG_BLOCK:
1249 ut->uu_sigmask |= (nset & ~sigcantmask);
1250 break;
1251
1252 case SIG_UNBLOCK:
1253 ut->uu_sigmask &= ~(nset);
91447636 1254 signal_setast(current_thread());
9bccf70c
A
1255 break;
1256
1257 case SIG_SETMASK:
1258 ut->uu_sigmask = (nset & ~sigcantmask);
91447636 1259 signal_setast(current_thread());
9bccf70c
A
1260 break;
1261
1262 default:
1263 error = EINVAL;
1264
1265 }
1266out:
91447636
A
1267 if (!error && oset != USER_ADDR_NULL)
1268 copyout(&oldset, oset, sizeof(sigset_t));
9bccf70c
A
1269
1270 return(error);
1271}
1272
2d21ac55
A
1273/*
1274 * Returns: 0 Success
1275 * EINVAL
1276 * copyin:EFAULT
1277 * copyout:EFAULT
1278 */
1279int
b0d623f7 1280__sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval)
2d21ac55
A
1281{
1282 __pthread_testcancel(1);
1283 return(__sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval));
1284}
9bccf70c 1285
9bccf70c 1286int
b0d623f7 1287__sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval)
9bccf70c 1288{
9bccf70c
A
1289 struct uthread *ut;
1290 struct uthread *uth;
9bccf70c
A
1291 int error = 0;
1292 sigset_t mask;
1293 sigset_t siglist;
1294 sigset_t sigw=0;
1295 int signum;
1296
91447636 1297 ut = (struct uthread *)get_bsdthread_info(current_thread());
9bccf70c 1298
91447636 1299 if (uap->set == USER_ADDR_NULL)
9bccf70c
A
1300 return(EINVAL);
1301
91447636 1302 error = copyin(uap->set, &mask, sizeof(sigset_t));
9bccf70c
A
1303 if (error)
1304 return(error);
1305
1306 siglist = (mask & ~sigcantmask);
1307
1308 if (siglist == 0)
1309 return(EINVAL);
1310
2d21ac55
A
1311 proc_lock(p);
1312 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
1313 proc_unlock(p);
9bccf70c
A
1314 return(EINVAL);
1315 } else {
2d21ac55 1316 proc_signalstart(p, 1);
9bccf70c 1317 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
91447636 1318 if ( (sigw = uth->uu_siglist & siglist) ) {
9bccf70c
A
1319 break;
1320 }
1321 }
2d21ac55 1322 proc_signalend(p, 1);
9bccf70c 1323 }
2d21ac55 1324
9bccf70c
A
1325 if (sigw) {
1326 /* The signal was pending on a thread */
1327 goto sigwait1;
1328 }
1329 /*
1330 * When returning from sigwait, we want
1331 * the old mask to be restored after the
1332 * signal handler has finished. Thus, we
1333 * save it here and mark the sigacts structure
1334 * to indicate this.
1335 */
2d21ac55 1336 uth = ut; /* wait for it to be delivered to us */
9bccf70c 1337 ut->uu_oldmask = ut->uu_sigmask;
91447636 1338 ut->uu_flag |= UT_SAS_OLDMASK;
2d21ac55
A
1339 if (siglist == (sigset_t)0) {
1340 proc_unlock(p);
9bccf70c 1341 return(EINVAL);
2d21ac55 1342 }
9bccf70c
A
1343 /* SIGKILL and SIGSTOP are not maskable as well */
1344 ut->uu_sigmask = ~(siglist|sigcantmask);
1345 ut->uu_sigwait = siglist;
2d21ac55 1346
9bccf70c 1347 /* No Continuations for now */
2d21ac55 1348 error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE|PCATCH, "pause", 0);
9bccf70c 1349
b0d623f7 1350 if (error == ERESTART)
9bccf70c
A
1351 error = 0;
1352
1353 sigw = (ut->uu_sigwait & siglist);
1354 ut->uu_sigmask = ut->uu_oldmask;
1355 ut->uu_oldmask = 0;
91447636 1356 ut->uu_flag &= ~UT_SAS_OLDMASK;
9bccf70c
A
1357sigwait1:
1358 ut->uu_sigwait = 0;
1359 if (!error) {
1360 signum = ffs((unsigned int)sigw);
1361 if (!signum)
1362 panic("sigwait with no signal wakeup");
2d21ac55
A
1363 /* Clear the pending signal in the thread it was delivered */
1364 uth->uu_siglist &= ~(sigmask(signum));
b0d623f7
A
1365
1366#if CONFIG_DTRACE
1367 DTRACE_PROC2(signal__clear, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo));
1368#endif
1369
2d21ac55 1370 proc_unlock(p);
91447636 1371 if (uap->sig != USER_ADDR_NULL)
9bccf70c 1372 error = copyout(&signum, uap->sig, sizeof(int));
2d21ac55
A
1373 } else
1374 proc_unlock(p);
9bccf70c
A
1375
1376 return(error);
1377
1378}
1379
1c79356b 1380int
b0d623f7 1381sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *retval)
1c79356b 1382{
b0d623f7
A
1383 struct kern_sigaltstack ss;
1384 struct kern_sigaltstack *pstk;
1c79356b 1385 int error;
2d21ac55
A
1386 struct uthread *uth;
1387 int onstack;
1c79356b 1388
0c530ab8 1389 uth = (struct uthread *)get_bsdthread_info(current_thread());
0c530ab8 1390
2d21ac55
A
1391 pstk = &uth->uu_sigstk;
1392 if ((uth->uu_flag & UT_ALTSTACK) == 0)
1393 uth->uu_sigstk.ss_flags |= SA_DISABLE;
1394 onstack = pstk->ss_flags & SA_ONSTACK;
91447636
A
1395 if (uap->oss) {
1396 if (IS_64BIT_PROCESS(p)) {
b0d623f7
A
1397 struct user64_sigaltstack ss64;
1398 sigaltstack_kern_to_user64(pstk, &ss64);
1399 error = copyout(&ss64, uap->oss, sizeof(ss64));
91447636 1400 } else {
b0d623f7
A
1401 struct user32_sigaltstack ss32;
1402 sigaltstack_kern_to_user32(pstk, &ss32);
1403 error = copyout(&ss32, uap->oss, sizeof(ss32));
91447636
A
1404 }
1405 if (error)
1406 return (error);
1407 }
1408 if (uap->nss == USER_ADDR_NULL)
1c79356b 1409 return (0);
91447636 1410 if (IS_64BIT_PROCESS(p)) {
b0d623f7
A
1411 struct user64_sigaltstack ss64;
1412 error = copyin(uap->nss, &ss64, sizeof(ss64));
1413 sigaltstack_user64_to_kern(&ss64, &ss);
91447636 1414 } else {
b0d623f7
A
1415 struct user32_sigaltstack ss32;
1416 error = copyin(uap->nss, &ss32, sizeof(ss32));
1417 sigaltstack_user32_to_kern(&ss32, &ss);
91447636
A
1418 }
1419 if (error)
1c79356b 1420 return (error);
9bccf70c
A
1421 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1422 return(EINVAL);
1423 }
1424
1c79356b 1425 if (ss.ss_flags & SA_DISABLE) {
2d21ac55
A
1426 /* if we are here we are not in the signal handler ;so no need to check */
1427 if (uth->uu_sigstk.ss_flags & SA_ONSTACK)
1428 return (EINVAL);
1429 uth->uu_flag &= ~UT_ALTSTACK;
1430 uth->uu_sigstk.ss_flags = ss.ss_flags;
1c79356b
A
1431 return (0);
1432 }
2d21ac55
A
1433 if (onstack)
1434 return (EPERM);
55e303ae
A
1435/* The older stacksize was 8K, enforce that one so no compat problems */
1436#define OLDMINSIGSTKSZ 8*1024
1437 if (ss.ss_size < OLDMINSIGSTKSZ)
1c79356b 1438 return (ENOMEM);
2d21ac55
A
1439 uth->uu_flag |= UT_ALTSTACK;
1440 uth->uu_sigstk= ss;
1c79356b
A
1441 return (0);
1442}
1443
1c79356b 1444int
b0d623f7 1445kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval)
1c79356b 1446{
2d21ac55 1447 proc_t p;
91447636 1448 kauth_cred_t uc = kauth_cred_get();
2d21ac55 1449 int posix = uap->posix; /* !0 if posix behaviour desired */
91447636
A
1450
1451 AUDIT_ARG(pid, uap->pid);
1452 AUDIT_ARG(signum, uap->signum);
1c79356b
A
1453
1454 if ((u_int)uap->signum >= NSIG)
1455 return (EINVAL);
1456 if (uap->pid > 0) {
1457 /* kill single process */
2d21ac55 1458 if ((p = proc_find(uap->pid)) == NULL) {
55e303ae
A
1459 if ((p = pzfind(uap->pid)) != NULL) {
1460 /*
1461 * IEEE Std 1003.1-2001: return success
1462 * when killing a zombie.
1463 */
1464 return (0);
1465 }
1c79356b 1466 return (ESRCH);
55e303ae 1467 }
e5568f75 1468 AUDIT_ARG(process, p);
2d21ac55
A
1469 if (!cansignal(cp, uc, p, uap->signum, 0)) {
1470 proc_rele(p);
ff6e181a
A
1471 return(EPERM);
1472 }
1c79356b
A
1473 if (uap->signum)
1474 psignal(p, uap->signum);
2d21ac55 1475 proc_rele(p);
1c79356b
A
1476 return (0);
1477 }
1478 switch (uap->pid) {
1479 case -1: /* broadcast signal */
2d21ac55 1480 return (killpg1(cp, uap->signum, 0, 1, posix));
1c79356b 1481 case 0: /* signal own process group */
2d21ac55 1482 return (killpg1(cp, uap->signum, 0, 0, posix));
1c79356b 1483 default: /* negative explicit process group */
2d21ac55 1484 return (killpg1(cp, uap->signum, -(uap->pid), 0, posix));
1c79356b
A
1485 }
1486 /* NOTREACHED */
1487}
1488
2d21ac55
A
1489static int
1490killpg1_filt(proc_t p, void * arg)
1491{
1492 struct killpg1_filtargs * kfargp = (struct killpg1_filtargs *)arg;
1493 proc_t cp = kfargp->cp;
1494 int posix = kfargp->posix;
1495
1496
1497 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1498 (!posix && p == cp))
1499 return(0);
1500 else
1501 return(1);
1502}
1503
1504
1505static int
1506killpg1_pgrpfilt(proc_t p, __unused void * arg)
1507{
1508 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1509 (p->p_stat == SZOMB))
1510 return(0);
1511 else
1512 return(1);
1513}
1514
1515
1516
1517static int
1518killpg1_callback(proc_t p, void * arg)
1519{
1520 struct killpg1_iterargs * kargp = (struct killpg1_iterargs *)arg;
1521 proc_t cp = kargp->cp;
1522 kauth_cred_t uc = kargp->uc; /* refcounted by the caller safe to use internal fields */
1523 int signum = kargp->signum;
1524 int * nfoundp = kargp->nfoundp;
1525 int n;
b0d623f7
A
1526 int zombie = 0;
1527 int error = 0;
2d21ac55 1528
b0d623f7
A
1529 if ((kargp->zombie != 0) && ((p->p_listflag & P_LIST_EXITED) == P_LIST_EXITED))
1530 zombie = 1;
2d21ac55 1531
b0d623f7
A
1532 if (zombie != 0) {
1533 proc_list_lock();
1534 error = cansignal(cp, uc, p, signum, zombie);
1535 proc_list_unlock();
1536
1537 if (error != 0 && nfoundp != NULL) {
1538 n = *nfoundp;
1539 *nfoundp = n+1;
1540 }
1541 } else {
1542 if (cansignal(cp, uc, p, signum, 0) == 0)
1543 return(PROC_RETURNED);
2d21ac55 1544
b0d623f7
A
1545 if (nfoundp != NULL) {
1546 n = *nfoundp;
1547 *nfoundp = n+1;
1548 }
1549 if (signum != 0)
1550 psignal(p, signum);
2d21ac55 1551 }
2d21ac55
A
1552
1553 return(PROC_RETURNED);
2d21ac55 1554}
1c79356b
A
1555
1556/*
1557 * Common code for kill process group/broadcast kill.
1558 * cp is calling process.
1559 */
1560int
2d21ac55 1561killpg1(proc_t cp, int signum, int pgid, int all, int posix)
1c79356b 1562{
2d21ac55 1563 kauth_cred_t uc;
1c79356b
A
1564 struct pgrp *pgrp;
1565 int nfound = 0;
2d21ac55
A
1566 struct killpg1_iterargs karg;
1567 struct killpg1_filtargs kfarg;
1568 int error = 0;
1c79356b 1569
2d21ac55 1570 uc = kauth_cred_proc_ref(cp);
1c79356b
A
1571 if (all) {
1572 /*
1573 * broadcast
1574 */
2d21ac55
A
1575 kfarg.posix = posix;
1576 kfarg.cp = cp;
1577
1578 karg.cp = cp;
1579 karg.uc = uc;
1580 karg.nfoundp = &nfound;
1581 karg.signum = signum;
b0d623f7 1582 karg.zombie = 1;
2d21ac55 1583
b0d623f7 1584 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST), killpg1_callback, &karg, killpg1_filt, (void *)&kfarg);
2d21ac55 1585
1c79356b 1586 } else {
2d21ac55 1587 if (pgid == 0) {
1c79356b
A
1588 /*
1589 * zero pgid means send to my process group.
1590 */
2d21ac55
A
1591 pgrp = proc_pgrp(cp);
1592 } else {
1c79356b 1593 pgrp = pgfind(pgid);
2d21ac55
A
1594 if (pgrp == NULL) {
1595 error = ESRCH;
1596 goto out;
1597 }
1c79356b 1598 }
2d21ac55
A
1599
1600 karg.nfoundp = &nfound;
1601 karg.uc = uc;
1602 karg.signum = signum;
1603 karg.cp = cp;
b0d623f7 1604 karg.zombie = 0;
2d21ac55
A
1605
1606
1607 /* PGRP_DROPREF drops the pgrp refernce */
1608 pgrp_iterate(pgrp, PGRP_BLOCKITERATE | PGRP_DROPREF, killpg1_callback, &karg,
1609 killpg1_pgrpfilt, NULL);
1c79356b 1610 }
2d21ac55
A
1611 error = (nfound ? 0 : (posix ? EPERM : ESRCH));
1612out:
1613 kauth_cred_unref(&uc);
1614 return (error);
1c79356b
A
1615}
1616
2d21ac55 1617
1c79356b
A
1618/*
1619 * Send a signal to a process group.
1620 */
1621void
2d21ac55 1622gsignal(int pgid, int signum)
1c79356b
A
1623{
1624 struct pgrp *pgrp;
1625
2d21ac55 1626 if (pgid && (pgrp = pgfind(pgid))) {
1c79356b 1627 pgsignal(pgrp, signum, 0);
2d21ac55
A
1628 pg_rele(pgrp);
1629 }
1c79356b
A
1630}
1631
1632/*
2d21ac55 1633 * Send a signal to a process group. If checkctty is 1,
1c79356b
A
1634 * limit to members which have a controlling terminal.
1635 */
2d21ac55
A
1636
1637static int
1638pgsignal_filt(proc_t p, void * arg)
1c79356b 1639{
b0d623f7 1640 int checkctty = *(int*)arg;
1c79356b 1641
2d21ac55
A
1642 if ((checkctty == 0) || p->p_flag & P_CONTROLT)
1643 return(1);
1644 else
1645 return(0);
1c79356b
A
1646}
1647
2d21ac55
A
1648
1649static int
1650pgsignal_callback(proc_t p, void * arg)
9bccf70c 1651{
b0d623f7 1652 int signum = *(int*)arg;
9bccf70c 1653
2d21ac55
A
1654 psignal(p, signum);
1655 return(PROC_RETURNED);
1656}
1657
1658
1659void
1660pgsignal(struct pgrp *pgrp, int signum, int checkctty)
1661{
1662 if (pgrp != PGRP_NULL) {
b0d623f7 1663 pgrp_iterate(pgrp, PGRP_BLOCKITERATE, pgsignal_callback, &signum, pgsignal_filt, &checkctty);
2d21ac55 1664 }
9bccf70c
A
1665}
1666
2d21ac55
A
1667
1668void
1669tty_pgsignal(struct tty *tp, int signum, int checkctty)
1670{
1671 struct pgrp * pg;
1672
1673 pg = tty_pgrp(tp);
1674 if (pg != PGRP_NULL) {
b0d623f7 1675 pgrp_iterate(pg, PGRP_BLOCKITERATE, pgsignal_callback, &signum, pgsignal_filt, &checkctty);
2d21ac55
A
1676 pg_rele(pg);
1677 }
1678}
1c79356b
A
1679/*
1680 * Send a signal caused by a trap to a specific thread.
1681 */
1682void
2d21ac55 1683threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code)
1c79356b 1684{
2d21ac55
A
1685 struct uthread *uth;
1686 struct task * sig_task;
1687 proc_t p;
1c79356b
A
1688 int mask;
1689
1690 if ((u_int)signum >= NSIG || signum == 0)
1691 return;
1692
1693 mask = sigmask(signum);
1694 if ((mask & threadmask) == 0)
1695 return;
1696 sig_task = get_threadtask(sig_actthread);
2d21ac55 1697 p = (proc_t)(get_bsdtask_info(sig_task));
1c79356b 1698
0b4e3aa0 1699 uth = get_bsdthread_info(sig_actthread);
316670eb 1700 if (uth->uu_flag & UT_VFORK)
0b4e3aa0
A
1701 p = uth->uu_proc;
1702
2d21ac55
A
1703 proc_lock(p);
1704 if (!(p->p_lflag & P_LTRACED) && (p->p_sigignore & mask)) {
1705 proc_unlock(p);
1c79356b 1706 return;
2d21ac55 1707 }
1c79356b 1708
9bccf70c 1709 uth->uu_siglist |= mask;
1c79356b 1710 uth->uu_code = code;
2d21ac55
A
1711 proc_unlock(p);
1712
1c79356b 1713 /* mark on process as well */
1c79356b
A
1714 signal_setast(sig_actthread);
1715}
1716
2d21ac55
A
1717static kern_return_t
1718get_signalthread(proc_t p, int signum, thread_t * thr)
1c79356b 1719{
2d21ac55
A
1720 struct uthread *uth;
1721 sigset_t mask = sigmask(signum);
1722 thread_t sig_thread;
1723 struct task * sig_task = p->task;
1724 kern_return_t kret;
3e170ce0 1725
2d21ac55
A
1726 *thr = THREAD_NULL;
1727
1728 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
1729 sig_thread = p->p_vforkact;
1730 kret = check_actforsig(sig_task, sig_thread, 1);
1731 if (kret == KERN_SUCCESS) {
1732 *thr = sig_thread;
1733 return(KERN_SUCCESS);
1734 }else
1735 return(KERN_FAILURE);
3e170ce0 1736 }
2d21ac55
A
1737
1738 proc_lock(p);
3e170ce0 1739
2d21ac55
A
1740 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1741 if(((uth->uu_flag & UT_NO_SIGMASK)== 0) &&
1742 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
1743 if (check_actforsig(p->task, uth->uu_context.vc_thread, 1) == KERN_SUCCESS) {
1744 *thr = uth->uu_context.vc_thread;
1745 proc_unlock(p);
1746 return(KERN_SUCCESS);
1747 }
1748 }
1749 }
1750 proc_unlock(p);
1751 if (get_signalact(p->task, thr, 1) == KERN_SUCCESS) {
1752 return(KERN_SUCCESS);
1753 }
1754
1755 return(KERN_FAILURE);
1c79356b
A
1756}
1757
2d21ac55
A
1758/*
1759 * Send the signal to the process. If the signal has an action, the action
1760 * is usually performed by the target process rather than the caller; we add
1761 * the signal to the set of pending signals for the process.
1762 *
1763 * Exceptions:
1764 * o When a stop signal is sent to a sleeping process that takes the
1765 * default action, the process is stopped without awakening it.
1766 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1767 * regardless of the signal action (eg, blocked or ignored).
1768 *
1769 * Other ignored signals are discarded immediately.
1770 */
1771static void
1772psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum)
0b4e3aa0 1773{
2d21ac55 1774 int prop;
fe8ab488 1775 user_addr_t action = USER_ADDR_NULL;
2d21ac55
A
1776 proc_t sig_proc;
1777 thread_t sig_thread;
1778 register task_t sig_task;
0b4e3aa0 1779 int mask;
9bccf70c 1780 struct uthread *uth;
2d21ac55
A
1781 kern_return_t kret;
1782 uid_t r_uid;
1783 proc_t pp;
1784 kauth_cred_t my_cred;
0b4e3aa0
A
1785
1786 if ((u_int)signum >= NSIG || signum == 0)
1787 panic("psignal signal number");
1788 mask = sigmask(signum);
1789 prop = sigprop[signum];
1790
1791#if SIGNAL_DEBUG
2d21ac55 1792 if(rdebug_proc && (p != PROC_NULL) && (p == rdebug_proc)) {
0b4e3aa0
A
1793 ram_printf(3);
1794 }
1795#endif /* SIGNAL_DEBUG */
1796
3e170ce0
A
1797 /* catch unexpected initproc kills early for easier debuggging */
1798 if (signum == SIGKILL && p == initproc)
1799 panic_plain("unexpected SIGKILL of %s %s",
1800 (p->p_name[0] != '\0' ? p->p_name : "initproc"),
1801 ((p->p_csflags & CS_KILLED) ? "(CS_KILLED)" : ""));
1802
1c79356b
A
1803 /*
1804 * We will need the task pointer later. Grab it now to
1805 * check for a zombie process. Also don't send signals
1806 * to kernel internal tasks.
1807 */
2d21ac55
A
1808 if (flavor & PSIG_VFORK) {
1809 sig_task = task;
1810 sig_thread = thread;
6d2010ae 1811 sig_proc = p;
2d21ac55
A
1812 } else if (flavor & PSIG_THREAD) {
1813 sig_task = get_threadtask(thread);
1814 sig_thread = thread;
1815 sig_proc = (proc_t)get_bsdtask_info(sig_task);
3e170ce0
A
1816 } else if (flavor & PSIG_TRY_THREAD) {
1817 sig_task = p->task;
1818 sig_thread = thread;
1819 sig_proc = p;
2d21ac55
A
1820 } else {
1821 sig_task = p->task;
2d21ac55 1822 sig_thread = (struct thread *)0;
6d2010ae 1823 sig_proc = p;
2d21ac55 1824 }
6d2010ae
A
1825
1826 if ((sig_task == TASK_NULL) || is_kerneltask(sig_task))
1c79356b
A
1827 return;
1828
1829 /*
1830 * do not send signals to the process that has the thread
1831 * doing a reboot(). Not doing so will mark that thread aborted
6d2010ae
A
1832 * and can cause IO failures wich will cause data loss. There's
1833 * also no need to send a signal to a process that is in the middle
1834 * of being torn down.
1c79356b 1835 */
6d2010ae
A
1836 if (ISSET(sig_proc->p_flag, P_REBOOT) ||
1837 ISSET(sig_proc->p_lflag, P_LEXIT))
1c79356b
A
1838 return;
1839
2d21ac55
A
1840 if( (flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) {
1841 proc_knote(sig_proc, NOTE_SIGNAL | signum);
1842 }
1843
2d21ac55
A
1844 if ((flavor & PSIG_LOCKED)== 0)
1845 proc_signalstart(sig_proc, 0);
9bccf70c
A
1846
1847 /*
1848 * Deliver the signal to the first thread in the task. This
1849 * allows single threaded applications which use signals to
1850 * be able to be linked with multithreaded libraries. We have
55e303ae 1851 * an implicit reference to the current thread, but need
9bccf70c
A
1852 * an explicit one otherwise. The thread reference keeps
1853 * the corresponding task data structures around too. This
1854 * reference is released by thread_deallocate.
1c79356b 1855 */
3e170ce0 1856
9bccf70c 1857
2d21ac55
A
1858 if (((flavor & PSIG_VFORK) == 0) && ((sig_proc->p_lflag & P_LTRACED) == 0) && (sig_proc->p_sigignore & mask)) {
1859 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
1860 goto psigout;
1861 }
9bccf70c 1862
2d21ac55
A
1863 if (flavor & PSIG_VFORK) {
1864 action = SIG_DFL;
1865 act_set_astbsd(sig_thread);
1866 kret = KERN_SUCCESS;
3e170ce0
A
1867 } else if (flavor & PSIG_TRY_THREAD) {
1868 uth = get_bsdthread_info(sig_thread);
1869 if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
1870 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask)) &&
1871 ((kret = check_actforsig(sig_proc->task, sig_thread, 1)) == KERN_SUCCESS)) {
1872 /* deliver to specified thread */
1873 } else {
1874 /* deliver to any willing thread */
1875 kret = get_signalthread(sig_proc, signum, &sig_thread);
1876 }
2d21ac55
A
1877 } else if (flavor & PSIG_THREAD) {
1878 /* If successful return with ast set */
1879 kret = check_actforsig(sig_task, sig_thread, 1);
1880 } else {
1881 /* If successful return with ast set */
1882 kret = get_signalthread(sig_proc, signum, &sig_thread);
1883 }
1884 if (kret != KERN_SUCCESS) {
9bccf70c
A
1885#if SIGNAL_DEBUG
1886 ram_printf(1);
1887#endif /* SIGNAL_DEBUG */
1888 goto psigout;
1c79356b
A
1889 }
1890
2d21ac55 1891 uth = get_bsdthread_info(sig_thread);
1c79356b
A
1892
1893 /*
1894 * If proc is traced, always give parent a chance.
1895 */
2d21ac55
A
1896
1897 if ((flavor & PSIG_VFORK) == 0) {
1898 if (sig_proc->p_lflag & P_LTRACED)
1c79356b 1899 action = SIG_DFL;
2d21ac55
A
1900 else {
1901 /*
1902 * If the signal is being ignored,
1903 * then we forget about it immediately.
1904 * (Note: we don't set SIGCONT in p_sigignore,
1905 * and if it is set to SIG_IGN,
1906 * action will be SIG_DFL here.)
1907 */
1908 if (sig_proc->p_sigignore & mask)
1909 goto psigout;
1910 if (uth->uu_sigwait & mask)
1911 action = KERN_SIG_WAIT;
1912 else if (uth->uu_sigmask & mask)
1913 action = KERN_SIG_HOLD;
1914 else if (sig_proc->p_sigcatch & mask)
1915 action = KERN_SIG_CATCH;
1916 else
1917 action = SIG_DFL;
1918 }
1c79356b
A
1919 }
1920
2d21ac55
A
1921 proc_lock(sig_proc);
1922
1923 if (sig_proc->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1924 (sig_proc->p_lflag & P_LTRACED) == 0)
1925 sig_proc->p_nice = NZERO;
1926
1927 if (prop & SA_CONT)
9bccf70c 1928 uth->uu_siglist &= ~stopsigmask;
1c79356b
A
1929
1930 if (prop & SA_STOP) {
2d21ac55 1931 struct pgrp *pg;
1c79356b
A
1932 /*
1933 * If sending a tty stop signal to a member of an orphaned
1934 * process group, discard the signal here if the action
1935 * is default; don't stop the process below if sleeping,
1936 * and don't clear any pending SIGCONT.
1937 */
2d21ac55
A
1938 proc_unlock(sig_proc);
1939 pg = proc_pgrp(sig_proc);
1940 if (prop & SA_TTYSTOP && pg->pg_jobc == 0 &&
1941 action == SIG_DFL) {
1942 pg_rele(pg);
1c79356b 1943 goto psigout;
2d21ac55
A
1944 }
1945 pg_rele(pg);
1946 proc_lock(sig_proc);
9bccf70c 1947 uth->uu_siglist &= ~contsigmask;
1c79356b 1948 }
2d21ac55 1949
9bccf70c 1950 uth->uu_siglist |= mask;
2d21ac55
A
1951 /*
1952 * Repost AST incase sigthread has processed
1953 * ast and missed signal post.
1954 */
1955 if (action == KERN_SIG_CATCH)
1956 act_set_astbsd(sig_thread);
1c79356b 1957
9bccf70c 1958
1c79356b
A
1959 /*
1960 * Defer further processing for signals which are held,
1961 * except that stopped processes must be continued by SIGCONT.
1962 */
2d21ac55
A
1963 /* vfork will not go thru as action is SIG_DFL */
1964 if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) {
1965 proc_unlock(sig_proc);
1c79356b 1966 goto psigout;
9bccf70c
A
1967 }
1968 /*
1969 * SIGKILL priority twiddling moved here from above because
1970 * it needs sig_thread. Could merge it into large switch
1971 * below if we didn't care about priority for tracing
1972 * as SIGKILL's action is always SIG_DFL.
1973 */
2d21ac55
A
1974 if ((signum == SIGKILL) && (sig_proc->p_nice > NZERO)) {
1975 sig_proc->p_nice = NZERO;
9bccf70c
A
1976 }
1977
1978 /*
1979 * Process is traced - wake it up (if not already
1980 * stopped) so that it can discover the signal in
1981 * issig() and stop for the parent.
1982 */
2d21ac55
A
1983 if (sig_proc->p_lflag & P_LTRACED) {
1984 if (sig_proc->p_stat != SSTOP)
1985 goto runlocked;
1986 else {
1987 proc_unlock(sig_proc);
9bccf70c 1988 goto psigout;
2d21ac55 1989 }
9bccf70c 1990 }
2d21ac55
A
1991 if ((flavor & PSIG_VFORK) != 0)
1992 goto runlocked;
9bccf70c 1993
91447636 1994 if (action == KERN_SIG_WAIT) {
b0d623f7
A
1995#if CONFIG_DTRACE
1996 /*
1997 * DTrace proc signal-clear returns a siginfo_t. Collect the needed info.
1998 */
1999 r_uid = kauth_getruid(); /* per thread credential; protected by our thread context */
2000
2001 bzero((caddr_t)&(uth->t_dtrace_siginfo), sizeof(uth->t_dtrace_siginfo));
2002
2003 uth->t_dtrace_siginfo.si_signo = signum;
2004 uth->t_dtrace_siginfo.si_pid = current_proc()->p_pid;
2005 uth->t_dtrace_siginfo.si_status = W_EXITCODE(signum, 0);
2006 uth->t_dtrace_siginfo.si_uid = r_uid;
2007 uth->t_dtrace_siginfo.si_code = 0;
2008#endif
9bccf70c
A
2009 uth->uu_sigwait = mask;
2010 uth->uu_siglist &= ~mask;
9bccf70c
A
2011 wakeup(&uth->uu_sigwait);
2012 /* if it is SIGCONT resume whole process */
91447636 2013 if (prop & SA_CONT) {
b0d623f7 2014 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2d21ac55
A
2015 sig_proc->p_contproc = current_proc()->p_pid;
2016
2017 proc_unlock(sig_proc);
39236c6e 2018 (void) task_resume_internal(sig_task);
2d21ac55 2019 goto psigout;
91447636 2020 }
2d21ac55 2021 proc_unlock(sig_proc);
9bccf70c
A
2022 goto psigout;
2023 }
2024
2025 if (action != SIG_DFL) {
2026 /*
2027 * User wants to catch the signal.
2028 * Wake up the thread, but don't un-suspend it
2029 * (except for SIGCONT).
2030 */
55e303ae 2031 if (prop & SA_CONT) {
b0d623f7 2032 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2d21ac55 2033 proc_unlock(sig_proc);
39236c6e 2034 (void) task_resume_internal(sig_task);
2d21ac55
A
2035 proc_lock(sig_proc);
2036 sig_proc->p_stat = SRUN;
2037 } else if (sig_proc->p_stat == SSTOP) {
2038 proc_unlock(sig_proc);
9bccf70c 2039 goto psigout;
9bccf70c 2040 }
9bccf70c 2041 /*
2d21ac55
A
2042 * Fill out siginfo structure information to pass to the
2043 * signalled process/thread sigaction handler, when it
2044 * wakes up. si_code is 0 because this is an ordinary
2045 * signal, not a SIGCHLD, and so si_status is the signal
2046 * number itself, instead of the child process exit status.
2047 * We shift this left because it will be shifted right before
2048 * it is passed to user space. kind of ugly to use W_EXITCODE
2049 * this way, but it beats defining a new macro.
2050 *
2051 * Note: Avoid the SIGCHLD recursion case!
9bccf70c 2052 */
2d21ac55
A
2053 if (signum != SIGCHLD) {
2054 proc_unlock(sig_proc);
2055 r_uid = kauth_getruid();
2056 proc_lock(sig_proc);
2057
2058 sig_proc->si_pid = current_proc()->p_pid;
2059 sig_proc->si_status = W_EXITCODE(signum, 0);
2060 sig_proc->si_uid = r_uid;
2061 sig_proc->si_code = 0;
91447636 2062 }
1c79356b 2063
2d21ac55 2064 goto runlocked;
1c79356b
A
2065 } else {
2066 /* Default action - varies */
2067 if (mask & stopsigmask) {
2068 /*
2069 * These are the signals which by default
2070 * stop a process.
2071 *
2072 * Don't clog system with children of init
2073 * stopped from the keyboard.
2074 */
2d21ac55
A
2075 if (!(prop & SA_STOP) && sig_proc->p_pptr == initproc) {
2076 proc_unlock(sig_proc);
2077 psignal_locked(sig_proc, SIGKILL);
2078 proc_lock(sig_proc);
9bccf70c 2079 uth->uu_siglist &= ~mask;
2d21ac55
A
2080 proc_unlock(sig_proc);
2081 goto psigout;
1c79356b
A
2082 }
2083
2084 /*
9bccf70c
A
2085 * Stop the task
2086 * if task hasn't already been stopped by
2087 * a signal.
1c79356b 2088 */
2d21ac55
A
2089 uth->uu_siglist &= ~mask;
2090 if (sig_proc->p_stat != SSTOP) {
2091 sig_proc->p_xstat = signum;
2092 sig_proc->p_stat = SSTOP;
b0d623f7 2093 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &sig_proc->p_flag);
2d21ac55
A
2094 sig_proc->p_lflag &= ~P_LWAITED;
2095 proc_unlock(sig_proc);
2096
2097 pp = proc_parentholdref(sig_proc);
2098 stop(sig_proc, pp);
2099 if (( pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2100
2101 my_cred = kauth_cred_proc_ref(sig_proc);
6d2010ae 2102 r_uid = kauth_cred_getruid(my_cred);
2d21ac55
A
2103 kauth_cred_unref(&my_cred);
2104
2105 proc_lock(sig_proc);
2106 pp->si_pid = sig_proc->p_pid;
2107 /*
2108 * POSIX: sigaction for a stopped child
2109 * when sent to the parent must set the
2110 * child's signal number into si_status.
2111 */
2112 if (signum != SIGSTOP)
2113 pp->si_status = WEXITSTATUS(sig_proc->p_xstat);
2114 else
2115 pp->si_status = W_EXITCODE(signum, signum);
9bccf70c 2116 pp->si_code = CLD_STOPPED;
2d21ac55
A
2117 pp->si_uid = r_uid;
2118 proc_unlock(sig_proc);
2119
9bccf70c 2120 psignal(pp, SIGCHLD);
1c79356b 2121 }
2d21ac55
A
2122 if (pp != PROC_NULL)
2123 proc_parentdropref(pp, 0);
2124 } else
2125 proc_unlock(sig_proc);
2126 goto psigout;
1c79356b
A
2127 }
2128
2d21ac55
A
2129 DTRACE_PROC3(signal__send, thread_t, sig_thread, proc_t, p, int, signum);
2130
2131 /*
2132 * enters switch with sig_proc lock held but dropped when
2133 * gets out of switch
2134 */
1c79356b
A
2135 switch (signum) {
2136 /*
2137 * Signals ignored by default have been dealt
2138 * with already, since their bits are on in
2139 * p_sigignore.
2140 */
2141
2142 case SIGKILL:
2143 /*
2144 * Kill signal always sets process running and
2145 * unsuspends it.
2146 */
2147 /*
2148 * Process will be running after 'run'
2149 */
2d21ac55 2150 sig_proc->p_stat = SRUN;
6d2010ae
A
2151 /*
2152 * In scenarios where suspend/resume are racing
2153 * the signal we are missing AST_BSD by the time
2154 * we get here, set again to avoid races. This
2155 * was the scenario with spindump enabled shutdowns.
2156 * We would need to cover this approp down the line.
2157 */
2158 act_set_astbsd(sig_thread);
2d21ac55 2159 thread_abort(sig_thread);
316670eb 2160 proc_unlock(sig_proc);
1c79356b 2161
2d21ac55 2162 goto psigout;
1c79356b
A
2163
2164 case SIGCONT:
2165 /*
2166 * Let the process run. If it's sleeping on an
2167 * event, it remains so.
2168 */
b0d623f7 2169 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2d21ac55
A
2170 sig_proc->p_contproc = sig_proc->p_pid;
2171
2172 proc_unlock(sig_proc);
39236c6e 2173 (void) task_resume_internal(sig_task);
2d21ac55
A
2174 proc_lock(sig_proc);
2175 /*
2176 * When processing a SIGCONT, we need to check
2177 * to see if there are signals pending that
2178 * were not delivered because we had been
2179 * previously stopped. If that's the case,
2180 * we need to thread_abort_safely() to trigger
2181 * interruption of the current system call to
2182 * cause their handlers to fire. If it's only
2183 * the SIGCONT, then don't wake up.
2184 */
2185 if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (((uth->uu_siglist & ~uth->uu_sigmask) & ~sig_proc->p_sigignore) & ~mask)) {
2186 uth->uu_siglist &= ~mask;
2187 sig_proc->p_stat = SRUN;
2188 goto runlocked;
1c79356b 2189 }
2d21ac55 2190
9bccf70c 2191 uth->uu_siglist &= ~mask;
2d21ac55
A
2192 sig_proc->p_stat = SRUN;
2193 proc_unlock(sig_proc);
2194 goto psigout;
1c79356b
A
2195
2196 default:
2d21ac55
A
2197 /*
2198 * A signal which has a default action of killing
2199 * the process, and for which there is no handler,
2200 * needs to act like SIGKILL
2201 */
2202 if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (action == SIG_DFL) && (prop & SA_KILL)) {
2203 sig_proc->p_stat = SRUN;
2204 proc_unlock(sig_proc);
2205 thread_abort(sig_thread);
2206 goto psigout;
2207 }
2208
1c79356b
A
2209 /*
2210 * All other signals wake up the process, but don't
2211 * resume it.
2212 */
2d21ac55
A
2213 if (sig_proc->p_stat == SSTOP) {
2214 proc_unlock(sig_proc);
2215 goto psigout;
2216 }
2217 goto runlocked;
1c79356b
A
2218 }
2219 }
2220 /*NOTREACHED*/
2d21ac55
A
2221
2222runlocked:
1c79356b
A
2223 /*
2224 * If we're being traced (possibly because someone attached us
2225 * while we were stopped), check for a signal from the debugger.
2226 */
2d21ac55
A
2227 if (sig_proc->p_stat == SSTOP) {
2228 if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0)
2229 uth->uu_siglist |= sigmask(sig_proc->p_xstat);
2230 if ((flavor & PSIG_VFORK) != 0) {
2231 sig_proc->p_stat = SRUN;
9bccf70c 2232 }
2d21ac55 2233 proc_unlock(sig_proc);
9bccf70c
A
2234 } else {
2235 /*
2236 * setrunnable(p) in BSD and
2237 * Wake up the thread if it is interruptible.
2238 */
2d21ac55
A
2239 sig_proc->p_stat = SRUN;
2240 proc_unlock(sig_proc);
2241 if ((flavor & PSIG_VFORK) == 0)
2242 thread_abort_safely(sig_thread);
2243 }
2244psigout:
2245 if ((flavor & PSIG_LOCKED)== 0) {
2246 proc_signalend(sig_proc, 0);
1c79356b 2247 }
1c79356b
A
2248}
2249
2d21ac55
A
2250void
2251psignal(proc_t p, int signum)
1c79356b 2252{
2d21ac55
A
2253 psignal_internal(p, NULL, NULL, 0, signum);
2254}
1c79356b 2255
2d21ac55
A
2256void
2257psignal_locked(proc_t p, int signum)
2258{
2259 psignal_internal(p, NULL, NULL, PSIG_LOCKED, signum);
1c79356b
A
2260}
2261
2d21ac55
A
2262void
2263psignal_vfork(proc_t p, task_t new_task, thread_t thread, int signum)
1c79356b 2264{
2d21ac55
A
2265 psignal_internal(p, new_task, thread, PSIG_VFORK, signum);
2266}
1c79356b 2267
2d21ac55
A
2268static void
2269psignal_uthread(thread_t thread, int signum)
2270{
2271 psignal_internal(PROC_NULL, TASK_NULL, thread, PSIG_THREAD, signum);
1c79356b
A
2272}
2273
3e170ce0
A
2274/* same as psignal(), but prefer delivery to 'thread' if possible */
2275static void
2276psignal_try_thread(proc_t p, thread_t thread, int signum)
2277{
2278 psignal_internal(p, NULL, thread, PSIG_TRY_THREAD, signum);
2279}
2d21ac55 2280
1c79356b
A
2281/*
2282 * If the current process has received a signal (should be caught or cause
2283 * termination, should interrupt current syscall), return the signal number.
2284 * Stop signals with default action are processed immediately, then cleared;
2285 * they aren't returned. This is checked after each entry to the system for
2286 * a syscall or trap (though this can usually be done without calling issignal
2287 * by checking the pending signal masks in the CURSIG macro.) The normal call
2288 * sequence is
2289 *
2290 * while (signum = CURSIG(curproc))
2291 * postsig(signum);
2292 */
2293int
316670eb 2294issignal_locked(proc_t p)
1c79356b 2295{
2d21ac55 2296 int signum, mask, prop, sigbits;
91447636 2297 thread_t cur_act;
1c79356b 2298 struct uthread * ut;
2d21ac55
A
2299 proc_t pp;
2300 kauth_cred_t my_cred;
2301 int retval = 0;
2302 uid_t r_uid;
1c79356b 2303
91447636 2304 cur_act = current_thread();
1c79356b 2305
9bccf70c
A
2306#if SIGNAL_DEBUG
2307 if(rdebug_proc && (p == rdebug_proc)) {
2308 ram_printf(3);
2309 }
2310#endif /* SIGNAL_DEBUG */
1c79356b 2311
1c79356b
A
2312 /*
2313 * Try to grab the signal lock.
2314 */
2315 if (sig_try_locked(p) <= 0) {
2d21ac55 2316 return(0);
1c79356b
A
2317 }
2318
2d21ac55
A
2319 proc_signalstart(p, 1);
2320
1c79356b
A
2321 ut = get_bsdthread_info(cur_act);
2322 for(;;) {
9bccf70c 2323 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
1c79356b 2324
2d21ac55 2325 if (p->p_lflag & P_LPPWAIT)
1c79356b
A
2326 sigbits &= ~stopsigmask;
2327 if (sigbits == 0) { /* no signal to send */
2d21ac55
A
2328 retval = 0;
2329 goto out;
1c79356b 2330 }
2d21ac55 2331
1c79356b
A
2332 signum = ffs((long)sigbits);
2333 mask = sigmask(signum);
2334 prop = sigprop[signum];
2335
1c79356b
A
2336 /*
2337 * We should see pending but ignored signals
2d21ac55 2338 * only if P_LTRACED was on when they were posted.
1c79356b 2339 */
2d21ac55 2340 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
9bccf70c 2341 ut->uu_siglist &= ~mask; /* take the signal! */
1c79356b
A
2342 continue;
2343 }
2d21ac55
A
2344 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2345 task_t task;
1c79356b
A
2346 /*
2347 * If traced, always stop, and stay
2348 * stopped until released by the debugger.
2349 */
2350 /* ptrace debugging */
2351 p->p_xstat = signum;
2d21ac55
A
2352
2353 if (p->p_lflag & P_LSIGEXC) {
9bccf70c
A
2354 p->sigwait = TRUE;
2355 p->sigwait_thread = cur_act;
2356 p->p_stat = SSTOP;
b0d623f7 2357 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2d21ac55 2358 p->p_lflag &= ~P_LWAITED;
9bccf70c 2359 ut->uu_siglist &= ~mask; /* clear the old signal */
2d21ac55
A
2360 proc_signalend(p, 1);
2361 proc_unlock(p);
9bccf70c 2362 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2d21ac55
A
2363 proc_lock(p);
2364 proc_signalstart(p, 1);
9bccf70c 2365 } else {
2d21ac55
A
2366 proc_unlock(p);
2367 my_cred = kauth_cred_proc_ref(p);
6d2010ae 2368 r_uid = kauth_cred_getruid(my_cred);
2d21ac55
A
2369 kauth_cred_unref(&my_cred);
2370
2371 pp = proc_parentholdref(p);
2372 if (pp != PROC_NULL) {
2373 proc_lock(pp);
2374
2375 pp->si_pid = p->p_pid;
2376 pp->si_status = p->p_xstat;
2377 pp->si_code = CLD_TRAPPED;
2378 pp->si_uid = r_uid;
2379
2380 proc_unlock(pp);
2381 }
2382
9bccf70c
A
2383 /*
2384 * XXX Have to really stop for debuggers;
2385 * XXX stop() doesn't do the right thing.
9bccf70c
A
2386 */
2387 task = p->task;
39236c6e 2388 task_suspend_internal(task);
2d21ac55
A
2389
2390 proc_lock(p);
9bccf70c
A
2391 p->sigwait = TRUE;
2392 p->sigwait_thread = cur_act;
2393 p->p_stat = SSTOP;
b0d623f7 2394 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2d21ac55 2395 p->p_lflag &= ~P_LWAITED;
9bccf70c 2396 ut->uu_siglist &= ~mask; /* clear the old signal */
9bccf70c 2397
2d21ac55
A
2398 proc_signalend(p, 1);
2399 proc_unlock(p);
2400
2401 if (pp != PROC_NULL) {
2402 psignal(pp, SIGCHLD);
2403 proc_list_lock();
2404 wakeup((caddr_t)pp);
2405 proc_parentdropref(pp, 1);
2406 proc_list_unlock();
2407 }
2408
9bccf70c
A
2409 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2410 thread_block(THREAD_CONTINUE_NULL);
2d21ac55
A
2411 proc_lock(p);
2412 proc_signalstart(p, 1);
9bccf70c
A
2413 }
2414
1c79356b
A
2415 p->sigwait = FALSE;
2416 p->sigwait_thread = NULL;
2417 wakeup((caddr_t)&p->sigwait_thread);
2418
2419 /*
2420 * This code is to detect when gdb is killed
2421 * even as the traced program is attached.
2422 * pgsignal would get the SIGKILL to traced program
2423 * That's what we are trying to see (I hope)
2424 */
9bccf70c 2425 if (ut->uu_siglist & sigmask(SIGKILL)) {
1c79356b
A
2426 /*
2427 * Wait event may still be outstanding;
2428 * clear it, since sig_lock_to_exit will
2429 * wait.
2430 */
91447636 2431 clear_wait(current_thread(), THREAD_INTERRUPTED);
1c79356b
A
2432 sig_lock_to_exit(p);
2433 /*
2434 * Since this thread will be resumed
2435 * to allow the current syscall to
2436 * be completed, must save u_qsave
2437 * before calling exit(). (Since exit()
2438 * calls closef() which can trash u_qsave.)
2439 */
2d21ac55
A
2440 proc_signalend(p, 1);
2441 proc_unlock(p);
b0d623f7
A
2442 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
2443 p->p_pid, W_EXITCODE(0, SIGKILL), 2, 0, 0);
2d21ac55 2444 exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL);
316670eb 2445 proc_lock(p);
1c79356b
A
2446 return(0);
2447 }
2448
2449 /*
2450 * We may have to quit
2451 */
91447636 2452 if (thread_should_abort(current_thread())) {
2d21ac55
A
2453 retval = 0;
2454 goto out;
1c79356b
A
2455 }
2456 /*
2457 * If parent wants us to take the signal,
2458 * then it will leave it in p->p_xstat;
2459 * otherwise we just look for signals again.
2460 */
2461 signum = p->p_xstat;
2462 if (signum == 0)
2463 continue;
2464 /*
2465 * Put the new signal into p_siglist. If the
2466 * signal is being masked, look for other signals.
2467 */
2468 mask = sigmask(signum);
9bccf70c 2469 ut->uu_siglist |= mask;
9bccf70c 2470 if (ut->uu_sigmask & mask)
1c79356b
A
2471 continue;
2472 }
2473
2474 /*
2475 * Decide whether the signal should be returned.
2476 * Return the signal's number, or fall through
2477 * to clear it from the pending mask.
2478 */
2479
2480 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2481
2482 case (long)SIG_DFL:
1c79356b
A
2483 /*
2484 * If there is a pending stop signal to process
2485 * with default action, stop here,
2486 * then clear the signal. However,
2487 * if process is member of an orphaned
2488 * process group, ignore tty stop signals.
2489 */
2490 if (prop & SA_STOP) {
2d21ac55
A
2491 struct pgrp * pg;
2492
2493 proc_unlock(p);
2494 pg = proc_pgrp(p);
2495 if (p->p_lflag & P_LTRACED ||
2496 (pg->pg_jobc == 0 &&
2497 prop & SA_TTYSTOP)) {
2498 proc_lock(p);
2499 pg_rele(pg);
1c79356b 2500 break; /* == ignore */
2d21ac55
A
2501 }
2502 pg_rele(pg);
9bccf70c 2503 if (p->p_stat != SSTOP) {
2d21ac55 2504 proc_lock(p);
9bccf70c 2505 p->p_xstat = signum;
2d21ac55
A
2506
2507 p->p_stat = SSTOP;
2508 p->p_lflag &= ~P_LWAITED;
2509 proc_unlock(p);
2510
2511 pp = proc_parentholdref(p);
2512 stop(p, pp);
2513 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2514 my_cred = kauth_cred_proc_ref(p);
6d2010ae 2515 r_uid = kauth_cred_getruid(my_cred);
2d21ac55
A
2516 kauth_cred_unref(&my_cred);
2517
2518 proc_lock(pp);
9bccf70c 2519 pp->si_pid = p->p_pid;
2d21ac55 2520 pp->si_status = WEXITSTATUS(p->p_xstat);
9bccf70c 2521 pp->si_code = CLD_STOPPED;
2d21ac55
A
2522 pp->si_uid = r_uid;
2523 proc_unlock(pp);
2524
9bccf70c
A
2525 psignal(pp, SIGCHLD);
2526 }
2d21ac55
A
2527 if (pp != PROC_NULL)
2528 proc_parentdropref(pp, 0);
1c79356b 2529 }
2d21ac55 2530 proc_lock(p);
1c79356b
A
2531 break;
2532 } else if (prop & SA_IGNORE) {
2533 /*
2534 * Except for SIGCONT, shouldn't get here.
2535 * Default action is to ignore; drop it.
2536 */
2537 break; /* == ignore */
2538 } else {
9bccf70c 2539 ut->uu_siglist &= ~mask; /* take the signal! */
2d21ac55
A
2540 retval = signum;
2541 goto out;
1c79356b 2542 }
2d21ac55 2543
1c79356b 2544 /*NOTREACHED*/
2d21ac55 2545 break;
1c79356b
A
2546
2547 case (long)SIG_IGN:
2548 /*
2549 * Masking above should prevent us ever trying
2550 * to take action on an ignored signal other
2551 * than SIGCONT, unless process is traced.
2552 */
2553 if ((prop & SA_CONT) == 0 &&
2d21ac55 2554 (p->p_lflag & P_LTRACED) == 0)
1c79356b
A
2555 printf("issignal\n");
2556 break; /* == ignore */
2557
2558 default:
2559 /*
2560 * This signal has an action, let
2561 * postsig() process it.
2562 */
9bccf70c 2563 ut->uu_siglist &= ~mask; /* take the signal! */
2d21ac55
A
2564 retval = signum;
2565 goto out;
1c79356b 2566 }
9bccf70c 2567 ut->uu_siglist &= ~mask; /* take the signal! */
1c79356b
A
2568 }
2569 /* NOTREACHED */
2d21ac55 2570out:
6d2010ae 2571 proc_signalend(p, 1);
2d21ac55 2572 return(retval);
1c79356b
A
2573}
2574
2575/* called from _sleep */
2576int
2d21ac55 2577CURSIG(proc_t p)
1c79356b 2578{
2d21ac55 2579 int signum, mask, prop, sigbits;
91447636 2580 thread_t cur_act;
1c79356b
A
2581 struct uthread * ut;
2582 int retnum = 0;
2583
1c79356b 2584
91447636 2585 cur_act = current_thread();
1c79356b
A
2586
2587 ut = get_bsdthread_info(cur_act);
2588
9bccf70c
A
2589 if (ut->uu_siglist == 0)
2590 return (0);
2591
2d21ac55 2592 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_lflag & P_LTRACED) == 0))
9bccf70c
A
2593 return (0);
2594
2595 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
1c79356b
A
2596
2597 for(;;) {
2d21ac55 2598 if (p->p_lflag & P_LPPWAIT)
1c79356b
A
2599 sigbits &= ~stopsigmask;
2600 if (sigbits == 0) { /* no signal to send */
2601 return (retnum);
2602 }
2603
2604 signum = ffs((long)sigbits);
2605 mask = sigmask(signum);
2606 prop = sigprop[signum];
6d2010ae 2607 sigbits &= ~mask; /* take the signal out */
1c79356b
A
2608
2609 /*
2610 * We should see pending but ignored signals
2d21ac55 2611 * only if P_LTRACED was on when they were posted.
1c79356b 2612 */
2d21ac55 2613 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
1c79356b
A
2614 continue;
2615 }
6d2010ae 2616
2d21ac55 2617 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
1c79356b
A
2618 return(signum);
2619 }
2620
2621 /*
2622 * Decide whether the signal should be returned.
2623 * Return the signal's number, or fall through
2624 * to clear it from the pending mask.
2625 */
2626
2627 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2628
2629 case (long)SIG_DFL:
1c79356b
A
2630 /*
2631 * If there is a pending stop signal to process
2632 * with default action, stop here,
2633 * then clear the signal. However,
2634 * if process is member of an orphaned
2635 * process group, ignore tty stop signals.
2636 */
2637 if (prop & SA_STOP) {
2d21ac55
A
2638 struct pgrp *pg;
2639
2640 pg = proc_pgrp(p);
2641
2642 if (p->p_lflag & P_LTRACED ||
2643 (pg->pg_jobc == 0 &&
2644 prop & SA_TTYSTOP)) {
2645 pg_rele(pg);
1c79356b 2646 break; /* == ignore */
2d21ac55
A
2647 }
2648 pg_rele(pg);
1c79356b
A
2649 retnum = signum;
2650 break;
2651 } else if (prop & SA_IGNORE) {
2652 /*
2653 * Except for SIGCONT, shouldn't get here.
2654 * Default action is to ignore; drop it.
2655 */
2656 break; /* == ignore */
2657 } else {
2658 return (signum);
2659 }
2660 /*NOTREACHED*/
2661
2662 case (long)SIG_IGN:
2663 /*
2664 * Masking above should prevent us ever trying
2665 * to take action on an ignored signal other
2666 * than SIGCONT, unless process is traced.
2667 */
2668 if ((prop & SA_CONT) == 0 &&
2d21ac55 2669 (p->p_lflag & P_LTRACED) == 0)
1c79356b
A
2670 printf("issignal\n");
2671 break; /* == ignore */
2672
2673 default:
2674 /*
2675 * This signal has an action, let
2676 * postsig() process it.
2677 */
2678 return (signum);
2679 }
1c79356b
A
2680 }
2681 /* NOTREACHED */
2682}
2683
2684/*
2685 * Put the argument process into the stopped state and notify the parent
2686 * via wakeup. Signals are handled elsewhere. The process must not be
2687 * on the run queue.
2688 */
2d21ac55
A
2689static void
2690stop(proc_t p, proc_t parent)
1c79356b 2691{
b0d623f7 2692 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2d21ac55
A
2693 if ((parent != PROC_NULL) && (parent->p_stat != SSTOP)) {
2694 proc_list_lock();
2695 wakeup((caddr_t)parent);
2696 proc_list_unlock();
2697 }
39236c6e 2698 (void) task_suspend_internal(p->task);
1c79356b
A
2699}
2700
2701/*
2702 * Take the action for the specified signal
2703 * from the current set of pending signals.
2704 */
2705void
316670eb 2706postsig_locked(int signum)
1c79356b 2707{
2d21ac55 2708 proc_t p = current_proc();
91447636
A
2709 struct sigacts *ps = p->p_sigacts;
2710 user_addr_t catcher;
b0d623f7 2711 uint32_t code;
1c79356b 2712 int mask, returnmask;
9bccf70c 2713 struct uthread * ut;
1c79356b
A
2714
2715#if DIAGNOSTIC
2716 if (signum == 0)
2717 panic("postsig");
2718 /*
2719 * This must be called on master cpu
2720 */
2721 if (cpu_number() != master_cpu)
2722 panic("psig not on master");
2723#endif
2724
1c79356b
A
2725 /*
2726 * Try to grab the signal lock.
2727 */
2728 if (sig_try_locked(p) <= 0) {
1c79356b
A
2729 return;
2730 }
2731
2d21ac55
A
2732 proc_signalstart(p, 1);
2733
91447636 2734 ut = (struct uthread *)get_bsdthread_info(current_thread());
1c79356b 2735 mask = sigmask(signum);
9bccf70c 2736 ut->uu_siglist &= ~mask;
91447636 2737 catcher = ps->ps_sigact[signum];
91447636 2738 if (catcher == SIG_DFL) {
1c79356b 2739 /*
91447636 2740 * Default catcher, where the default is to kill
1c79356b
A
2741 * the process. (Other cases were ignored above.)
2742 */
2d21ac55
A
2743 sig_lock_to_exit(p);
2744 p->p_acflag |= AXSIG;
2745 if (sigprop[signum] & SA_CORE) {
2746 p->p_sigacts->ps_sig = signum;
2747 proc_signalend(p, 1);
2748 proc_unlock(p);
39236c6e 2749 if (coredump(p, 0, 0) == 0)
2d21ac55
A
2750 signum |= WCOREFLAG;
2751 } else {
2752 proc_signalend(p, 1);
2753 proc_unlock(p);
2754 }
2755
b0d623f7
A
2756#if CONFIG_DTRACE
2757 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
2758
2759 ut->t_dtrace_siginfo.si_signo = signum;
2760 ut->t_dtrace_siginfo.si_pid = p->si_pid;
2761 ut->t_dtrace_siginfo.si_uid = p->si_uid;
2762 ut->t_dtrace_siginfo.si_status = WEXITSTATUS(p->si_status);
2d21ac55 2763
316670eb
A
2764 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
2765 switch (signum) {
2766 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
2767 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
2768 break;
2769 default:
2770 break;
2771 }
2772
2773
b0d623f7 2774 DTRACE_PROC3(signal__handle, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo),
2d21ac55 2775 void (*)(void), SIG_DFL);
b0d623f7 2776#endif
2d21ac55 2777
b0d623f7
A
2778 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
2779 p->p_pid, W_EXITCODE(0, signum), 3, 0, 0);
2d21ac55 2780 exit1(p, W_EXITCODE(0, signum), (int *)NULL);
316670eb 2781 proc_lock(p);
1c79356b 2782 return;
1c79356b
A
2783 } else {
2784 /*
2785 * If we get here, the signal must be caught.
2786 */
2787#if DIAGNOSTIC
91447636 2788 if (catcher == SIG_IGN || (ut->uu_sigmask & mask))
1c79356b
A
2789 log(LOG_WARNING,
2790 "postsig: processing masked or ignored signal\n");
2791#endif
2d21ac55 2792
1c79356b
A
2793 /*
2794 * Set the new mask value and also defer further
2795 * occurences of this signal.
2796 *
2797 * Special case: user has done a sigpause. Here the
2798 * current mask is not of interest, but rather the
2799 * mask from before the sigpause is what we want
2800 * restored after the signal processing is completed.
2801 */
91447636 2802 if (ut->uu_flag & UT_SAS_OLDMASK) {
9bccf70c 2803 returnmask = ut->uu_oldmask;
91447636 2804 ut->uu_flag &= ~UT_SAS_OLDMASK;
9bccf70c 2805 ut->uu_oldmask = 0;
1c79356b 2806 } else
9bccf70c
A
2807 returnmask = ut->uu_sigmask;
2808 ut->uu_sigmask |= ps->ps_catchmask[signum];
2809 if ((ps->ps_signodefer & mask) == 0)
2810 ut->uu_sigmask |= mask;
2811 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
2812 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE))
2813 p->p_sigignore |= mask;
2814 ps->ps_sigact[signum] = SIG_DFL;
2815 ps->ps_siginfo &= ~mask;
2816 ps->ps_signodefer &= ~mask;
2817 }
9bccf70c 2818
1c79356b
A
2819 if (ps->ps_sig != signum) {
2820 code = 0;
2821 } else {
2822 code = ps->ps_code;
2823 ps->ps_code = 0;
2824 }
b0d623f7 2825 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nsignals);
91447636 2826 sendsig(p, catcher, signum, returnmask, code);
1c79356b 2827 }
2d21ac55 2828 proc_signalend(p, 1);
1c79356b
A
2829}
2830
2831/*
2d21ac55
A
2832 * Attach a signal knote to the list of knotes for this process.
2833 *
2834 * Signal knotes share the knote list with proc knotes. This
2835 * could be avoided by using a signal-specific knote list, but
2836 * probably isn't worth the trouble.
1c79356b 2837 */
55e303ae
A
2838
2839static int
2840filt_sigattach(struct knote *kn)
2841{
2d21ac55
A
2842 proc_t p = current_proc(); /* can attach only to oneself */
2843
2844 proc_klist_lock();
55e303ae
A
2845
2846 kn->kn_ptr.p_proc = p;
2847 kn->kn_flags |= EV_CLEAR; /* automatically set */
2848
55e303ae 2849 KNOTE_ATTACH(&p->p_klist, kn);
2d21ac55
A
2850
2851 proc_klist_unlock();
55e303ae
A
2852
2853 return (0);
2854}
2855
2d21ac55
A
2856/*
2857 * remove the knote from the process list, if it hasn't already
2858 * been removed by exit processing.
2859 */
2860
55e303ae
A
2861static void
2862filt_sigdetach(struct knote *kn)
2863{
2d21ac55 2864 proc_t p = kn->kn_ptr.p_proc;
55e303ae 2865
2d21ac55
A
2866 proc_klist_lock();
2867 kn->kn_ptr.p_proc = NULL;
55e303ae 2868 KNOTE_DETACH(&p->p_klist, kn);
2d21ac55 2869 proc_klist_unlock();
55e303ae
A
2870}
2871
2872/*
2d21ac55
A
2873 * Post an event to the signal filter. Because we share the same list
2874 * as process knotes, we have to filter out and handle only signal events.
2875 *
2876 * We assume that we process fdfree() before we post the NOTE_EXIT for
2877 * a process during exit. Therefore, since signal filters can only be
2878 * set up "in-process", we should have already torn down the kqueue
2879 * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT.
55e303ae
A
2880 */
2881static int
2882filt_signal(struct knote *kn, long hint)
2883{
2884
2885 if (hint & NOTE_SIGNAL) {
2886 hint &= ~NOTE_SIGNAL;
2887
91447636 2888 if (kn->kn_id == (unsigned int)hint)
55e303ae 2889 kn->kn_data++;
2d21ac55
A
2890 } else if (hint & NOTE_EXIT) {
2891 panic("filt_signal: detected NOTE_EXIT event");
55e303ae 2892 }
2d21ac55 2893
55e303ae
A
2894 return (kn->kn_data != 0);
2895}
2896
b0d623f7 2897static void
3e170ce0 2898filt_signaltouch(struct knote *kn, struct kevent_internal_s *kev, long type)
b0d623f7
A
2899{
2900 proc_klist_lock();
2901 switch (type) {
2902 case EVENT_REGISTER:
2903 kn->kn_sfflags = kev->fflags;
2904 kn->kn_sdata = kev->data;
2905 break;
2906 case EVENT_PROCESS:
2907 *kev = kn->kn_kevent;
2908 if (kn->kn_flags & EV_CLEAR) {
2909 kn->kn_data = 0;
2910 kn->kn_fflags = 0;
2911 }
2912 break;
2913 default:
3e170ce0 2914 panic("filt_signaltouch() - invalid type (%ld)", type);
b0d623f7
A
2915 break;
2916 }
2917 proc_klist_unlock();
2918}
2919
1c79356b 2920void
2d21ac55 2921bsd_ast(thread_t thread)
1c79356b 2922{
2d21ac55
A
2923 proc_t p = current_proc();
2924 struct uthread *ut = get_bsdthread_info(thread);
1c79356b 2925 int signum;
91447636 2926 user_addr_t pc;
91447636 2927 static int bsd_init_done = 0;
1c79356b
A
2928
2929 if (p == NULL)
2930 return;
2931
1c79356b
A
2932 if ((p->p_flag & P_OWEUPC) && (p->p_flag & P_PROFIL)) {
2933 pc = get_useraddr();
2934 addupc_task(p, pc, 1);
b0d623f7 2935 OSBitAndAtomic(~((uint32_t)P_OWEUPC), &p->p_flag);
1c79356b
A
2936 }
2937
2d21ac55
A
2938 if (timerisset(&p->p_vtimer_user.it_value)) {
2939 uint32_t microsecs;
2940
2941 task_vtimer_update(p->task, TASK_VTIMER_USER, &microsecs);
2942
2943 if (!itimerdecr(p, &p->p_vtimer_user, microsecs)) {
2944 if (timerisset(&p->p_vtimer_user.it_value))
2945 task_vtimer_set(p->task, TASK_VTIMER_USER);
2946 else
2947 task_vtimer_clear(p->task, TASK_VTIMER_USER);
2948
3e170ce0 2949 psignal_try_thread(p, thread, SIGVTALRM);
b0d623f7 2950 }
9bccf70c 2951 }
1c79356b 2952
2d21ac55
A
2953 if (timerisset(&p->p_vtimer_prof.it_value)) {
2954 uint32_t microsecs;
1c79356b 2955
2d21ac55 2956 task_vtimer_update(p->task, TASK_VTIMER_PROF, &microsecs);
1c79356b 2957
2d21ac55
A
2958 if (!itimerdecr(p, &p->p_vtimer_prof, microsecs)) {
2959 if (timerisset(&p->p_vtimer_prof.it_value))
2960 task_vtimer_set(p->task, TASK_VTIMER_PROF);
2961 else
2962 task_vtimer_clear(p->task, TASK_VTIMER_PROF);
2963
3e170ce0 2964 psignal_try_thread(p, thread, SIGPROF);
2d21ac55 2965 }
b0d623f7 2966 }
1c79356b 2967
2d21ac55
A
2968 if (timerisset(&p->p_rlim_cpu)) {
2969 struct timeval tv;
1c79356b 2970
2d21ac55 2971 task_vtimer_update(p->task, TASK_VTIMER_RLIM, (uint32_t *) &tv.tv_usec);
1c79356b 2972
2d21ac55
A
2973 proc_spinlock(p);
2974 if (p->p_rlim_cpu.tv_sec > 0 || p->p_rlim_cpu.tv_usec > tv.tv_usec) {
2975 tv.tv_sec = 0;
2976 timersub(&p->p_rlim_cpu, &tv, &p->p_rlim_cpu);
2977 proc_spinunlock(p);
2978 } else {
2979
2980 timerclear(&p->p_rlim_cpu);
2981 proc_spinunlock(p);
2982
2983 task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
2984
3e170ce0 2985 psignal_try_thread(p, thread, SIGXCPU);
2d21ac55
A
2986 }
2987 }
2988
b0d623f7
A
2989#if CONFIG_DTRACE
2990 if (ut->t_dtrace_sig) {
2991 uint8_t dt_action_sig = ut->t_dtrace_sig;
2992 ut->t_dtrace_sig = 0;
2993 psignal(p, dt_action_sig);
2994 }
6d2010ae 2995
b0d623f7 2996 if (ut->t_dtrace_stop) {
6d2010ae
A
2997 ut->t_dtrace_stop = 0;
2998 proc_lock(p);
2999 p->p_dtrace_stop = 1;
3000 proc_unlock(p);
39236c6e 3001 (void)task_suspend_internal(p->task);
6d2010ae
A
3002 }
3003
3004 if (ut->t_dtrace_resumepid) {
3005 proc_t resumeproc = proc_find(ut->t_dtrace_resumepid);
3006 ut->t_dtrace_resumepid = 0;
3007 if (resumeproc != PROC_NULL) {
3008 proc_lock(resumeproc);
3009 /* We only act on processes stopped by dtrace */
3010 if (resumeproc->p_dtrace_stop) {
3011 resumeproc->p_dtrace_stop = 0;
3012 proc_unlock(resumeproc);
39236c6e 3013 task_resume_internal(resumeproc->task);
6d2010ae
A
3014 }
3015 else {
3016 proc_unlock(resumeproc);
3017 }
3018 proc_rele(resumeproc);
3019 }
b0d623f7 3020 }
6d2010ae 3021
b0d623f7
A
3022#endif /* CONFIG_DTRACE */
3023
316670eb 3024 proc_lock(p);
2d21ac55 3025 if (CHECK_SIGNALS(p, current_thread(), ut)) {
316670eb
A
3026 while ( (signum = issignal_locked(p)) )
3027 postsig_locked(signum);
2d21ac55 3028 }
316670eb 3029 proc_unlock(p);
2d21ac55
A
3030
3031 if (!bsd_init_done) {
3032 bsd_init_done = 1;
3033 bsdinit_task();
3034 }
1c79356b 3035
1c79356b
A
3036}
3037
593a1d5f 3038/* ptrace set runnable */
1c79356b 3039void
2d21ac55 3040pt_setrunnable(proc_t p)
1c79356b 3041{
2d21ac55 3042 task_t task;
1c79356b
A
3043
3044 task = p->task;
3045
2d21ac55
A
3046 if (p->p_lflag & P_LTRACED) {
3047 proc_lock(p);
1c79356b 3048 p->p_stat = SRUN;
2d21ac55 3049 proc_unlock(p);
1c79356b
A
3050 if (p->sigwait) {
3051 wakeup((caddr_t)&(p->sigwait));
593a1d5f
A
3052 if ((p->p_lflag & P_LSIGEXC) == 0) { // 5878479
3053 task_release(task);
3054 }
1c79356b
A
3055 }
3056 }
3057}
9bccf70c
A
3058
3059kern_return_t
3060do_bsdexception(
3061 int exc,
3062 int code,
3063 int sub)
3064{
2d21ac55 3065 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
9bccf70c
A
3066
3067 codes[0] = code;
3068 codes[1] = sub;
3069 return(bsd_exception(exc, codes, 2));
3070}
3071
91447636 3072int
2d21ac55 3073proc_pendingsignals(proc_t p, sigset_t mask)
91447636
A
3074{
3075 struct uthread * uth;
3076 thread_t th;
3077 sigset_t bits = 0;
91447636 3078
2d21ac55 3079 proc_lock(p);
91447636 3080 /* If the process is in proc exit return no signal info */
2d21ac55
A
3081 if (p->p_lflag & P_LPEXIT) {
3082 goto out;
3083 }
91447636 3084
2d21ac55 3085 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
91447636
A
3086 th = p->p_vforkact;
3087 uth = (struct uthread *)get_bsdthread_info(th);
3088 if (uth) {
3089 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3090 }
3091 goto out;
3092 }
3093
3094 bits = 0;
3095 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
3096 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3097 }
3098out:
2d21ac55 3099 proc_unlock(p);
91447636
A
3100 return(bits);
3101}
3102
3103int
3104thread_issignal(proc_t p, thread_t th, sigset_t mask)
3105{
3106 struct uthread * uth;
3107 sigset_t bits=0;
3108
2d21ac55
A
3109 proc_lock(p);
3110 uth = (struct uthread *)get_bsdthread_info(th);
3111 if (uth) {
3112 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3113 }
3114 proc_unlock(p);
3115 return(bits);
3116}
91447636 3117
2d21ac55
A
3118/*
3119 * Allow external reads of the sigprop array.
3120 */
3121int
3122hassigprop(int sig, int prop)
3123{
3124 return (sigprop[sig] & prop);
3125}
3126
3127void
3128pgsigio(pid_t pgid, int sig)
3129{
3130 proc_t p = PROC_NULL;
3131
3132 if (pgid < 0)
3133 gsignal(-(pgid), sig);
3134
3135 else if (pgid > 0 && (p = proc_find(pgid)) != 0)
3136 psignal(p, sig);
3137 if (p != PROC_NULL)
3138 proc_rele(p);
3139}
3140
2d21ac55
A
3141void
3142proc_signalstart(proc_t p, int locked)
3143{
6d2010ae 3144 if (!locked)
2d21ac55 3145 proc_lock(p);
fe8ab488
A
3146
3147 if(p->p_signalholder == current_thread())
3148 panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock");
3149
6d2010ae
A
3150 p->p_sigwaitcnt++;
3151 while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL)
2d21ac55 3152 msleep(&p->p_sigmask, &p->p_mlock, 0, "proc_signstart", NULL);
6d2010ae
A
3153 p->p_sigwaitcnt--;
3154
2d21ac55 3155 p->p_lflag |= P_LINSIGNAL;
2d21ac55 3156 p->p_signalholder = current_thread();
6d2010ae 3157 if (!locked)
2d21ac55 3158 proc_unlock(p);
2d21ac55
A
3159}
3160
3161void
3162proc_signalend(proc_t p, int locked)
3163{
6d2010ae 3164 if (!locked)
2d21ac55
A
3165 proc_lock(p);
3166 p->p_lflag &= ~P_LINSIGNAL;
3167
6d2010ae 3168 if (p->p_sigwaitcnt > 0)
2d21ac55 3169 wakeup(&p->p_sigmask);
6d2010ae 3170
2d21ac55 3171 p->p_signalholder = NULL;
6d2010ae 3172 if (!locked)
2d21ac55
A
3173 proc_unlock(p);
3174}
3175
2d21ac55
A
3176void
3177sig_lock_to_exit(proc_t p)
3178{
3179 thread_t self = current_thread();
3180
3181 p->exit_thread = self;
3182 proc_unlock(p);
316670eb
A
3183
3184 task_hold(p->task);
3185 task_wait(p->task, FALSE);
3186
2d21ac55 3187 proc_lock(p);
91447636
A
3188}
3189
2d21ac55
A
3190int
3191sig_try_locked(proc_t p)
3192{
3193 thread_t self = current_thread();
3194
3195 while (p->sigwait || p->exit_thread) {
3196 if (p->exit_thread) {
3197 return(0);
3198 }
3199 msleep((caddr_t)&p->sigwait_thread, &p->p_mlock, PCATCH | PDROP, 0, 0);
3200 if (thread_should_abort(self)) {
3201 /*
3202 * Terminate request - clean up.
3203 */
3204 proc_lock(p);
3205 return -1;
3206 }
3207 proc_lock(p);
3208 }
3209 return 1;
3210}