]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sig.c
254f60066d2955ee3f157da60bb22aa7d4744aff
[apple/xnu.git] / bsd / kern / kern_sig.c
1 /*
2 * Copyright (c) 1995-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1989, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
66 */
67 /*
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
71 * Version 2.0.
72 */
73
74 #define SIGPROP /* include signal properties table */
75 #include <sys/param.h>
76 #include <sys/resourcevar.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/timeb.h>
81 #include <sys/times.h>
82 #include <sys/acct.h>
83 #include <sys/file_internal.h>
84 #include <sys/kernel.h>
85 #include <sys/wait.h>
86 #include <sys/signalvar.h>
87 #include <sys/syslog.h>
88 #include <sys/stat.h>
89 #include <sys/lock.h>
90 #include <sys/kdebug.h>
91 #include <sys/reason.h>
92
93 #include <sys/mount.h>
94 #include <sys/sysproto.h>
95
96 #include <security/audit/audit.h>
97
98 #include <kern/cpu_number.h>
99
100 #include <sys/vm.h>
101 #include <sys/user.h> /* for coredump */
102 #include <kern/ast.h> /* for APC support */
103 #include <kern/kalloc.h>
104 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
105 #include <kern/thread.h>
106 #include <kern/sched_prim.h>
107 #include <kern/thread_call.h>
108 #include <kern/policy_internal.h>
109
110 #include <mach/exception.h>
111 #include <mach/task.h>
112 #include <mach/thread_act.h>
113 #include <libkern/OSAtomic.h>
114
115 #include <sys/sdt.h>
116 #include <sys/codesign.h>
117 #include <sys/random.h>
118 #include <libkern/section_keywords.h>
119
120 #if CONFIG_MACF
121 #include <security/mac_framework.h>
122 #endif
123
124 /*
125 * Missing prototypes that Mach should export
126 *
127 * +++
128 */
129 extern int thread_enable_fpe(thread_t act, int onoff);
130 extern thread_t port_name_to_thread(mach_port_name_t port_name);
131 extern kern_return_t get_signalact(task_t, thread_t *, int);
132 extern unsigned int get_useraddr(void);
133 extern boolean_t task_did_exec(task_t task);
134 extern boolean_t task_is_exec_copy(task_t task);
135
136 /*
137 * ---
138 */
139
140 extern void doexception(int exc, mach_exception_code_t code,
141 mach_exception_subcode_t sub);
142
143 static void stop(proc_t, proc_t);
144 static int cansignal_nomac(proc_t, kauth_cred_t, proc_t, int);
145 int cansignal(proc_t, kauth_cred_t, proc_t, int);
146 int killpg1(proc_t, int, int, int, int);
147 kern_return_t do_bsdexception(int, int, int);
148 void __posix_sem_syscall_return(kern_return_t);
149 char *proc_name_address(void *p);
150
151 /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */
152 kern_return_t semaphore_timedwait_signal_trap_internal(mach_port_name_t, mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t));
153 kern_return_t semaphore_timedwait_trap_internal(mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t));
154 kern_return_t semaphore_wait_signal_trap_internal(mach_port_name_t, mach_port_name_t, void (*)(kern_return_t));
155 kern_return_t semaphore_wait_trap_internal(mach_port_name_t, void (*)(kern_return_t));
156
157 static int filt_sigattach(struct knote *kn, struct kevent_internal_s *kev);
158 static void filt_sigdetach(struct knote *kn);
159 static int filt_signal(struct knote *kn, long hint);
160 static int filt_signaltouch(struct knote *kn, struct kevent_internal_s *kev);
161 static int filt_signalprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
162
163 SECURITY_READ_ONLY_EARLY(struct filterops) sig_filtops = {
164 .f_attach = filt_sigattach,
165 .f_detach = filt_sigdetach,
166 .f_event = filt_signal,
167 .f_touch = filt_signaltouch,
168 .f_process = filt_signalprocess,
169 };
170
171 /* structures and fns for killpg1 iterartion callback and filters */
172 struct killpg1_filtargs {
173 bool posix;
174 proc_t curproc;
175 };
176
177 struct killpg1_iterargs {
178 proc_t curproc;
179 kauth_cred_t uc;
180 int signum;
181 int nfound;
182 };
183
184 static int killpg1_allfilt(proc_t p, void * arg);
185 static int killpg1_pgrpfilt(proc_t p, __unused void * arg);
186 static int killpg1_callback(proc_t p, void * arg);
187
188 static int pgsignal_filt(proc_t p, void * arg);
189 static int pgsignal_callback(proc_t p, void * arg);
190 static kern_return_t get_signalthread(proc_t, int, thread_t *);
191
192
193 /* flags for psignal_internal */
194 #define PSIG_LOCKED 0x1
195 #define PSIG_VFORK 0x2
196 #define PSIG_THREAD 0x4
197 #define PSIG_TRY_THREAD 0x8
198
199 static os_reason_t build_signal_reason(int signum, const char *procname);
200 static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, os_reason_t signal_reason);
201
202 /*
203 * NOTE: Source and target may *NOT* overlap! (target is smaller)
204 */
205 static void
206 sigaltstack_kern_to_user32(struct kern_sigaltstack *in, struct user32_sigaltstack *out)
207 {
208 out->ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp);
209 out->ss_size = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size);
210 out->ss_flags = in->ss_flags;
211 }
212
213 static void
214 sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstack *out)
215 {
216 out->ss_sp = in->ss_sp;
217 out->ss_size = in->ss_size;
218 out->ss_flags = in->ss_flags;
219 }
220
221 /*
222 * NOTE: Source and target may are permitted to overlap! (source is smaller);
223 * this works because we copy fields in order from the end of the struct to
224 * the beginning.
225 */
226 static void
227 sigaltstack_user32_to_kern(struct user32_sigaltstack *in, struct kern_sigaltstack *out)
228 {
229 out->ss_flags = in->ss_flags;
230 out->ss_size = in->ss_size;
231 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
232 }
233 static void
234 sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out)
235 {
236 out->ss_flags = in->ss_flags;
237 out->ss_size = in->ss_size;
238 out->ss_sp = in->ss_sp;
239 }
240
241 static void
242 sigaction_kern_to_user32(struct kern_sigaction *in, struct user32_sigaction *out)
243 {
244 /* This assumes 32 bit __sa_handler is of type sig_t */
245 out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t, in->__sigaction_u.__sa_handler);
246 out->sa_mask = in->sa_mask;
247 out->sa_flags = in->sa_flags;
248 }
249 static void
250 sigaction_kern_to_user64(struct kern_sigaction *in, struct user64_sigaction *out)
251 {
252 /* This assumes 32 bit __sa_handler is of type sig_t */
253 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
254 out->sa_mask = in->sa_mask;
255 out->sa_flags = in->sa_flags;
256 }
257
258 static void
259 __sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigaction *out)
260 {
261 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
262 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
263 out->sa_mask = in->sa_mask;
264 out->sa_flags = in->sa_flags;
265
266 kern_return_t kr;
267 kr = machine_thread_function_pointers_convert_from_user(current_thread(),
268 &out->sa_tramp, 1);
269 assert(kr == KERN_SUCCESS);
270 }
271
272 static void
273 __sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigaction *out)
274 {
275 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
276 out->sa_tramp = in->sa_tramp;
277 out->sa_mask = in->sa_mask;
278 out->sa_flags = in->sa_flags;
279
280 kern_return_t kr;
281 kr = machine_thread_function_pointers_convert_from_user(current_thread(),
282 &out->sa_tramp, 1);
283 assert(kr == KERN_SUCCESS);
284 }
285
286 #if SIGNAL_DEBUG
287 void ram_printf(int);
288 int ram_debug = 0;
289 unsigned int rdebug_proc = 0;
290 void
291 ram_printf(int x)
292 {
293 printf("x is %d", x);
294 }
295 #endif /* SIGNAL_DEBUG */
296
297
298 void
299 signal_setast(thread_t sig_actthread)
300 {
301 act_set_astbsd(sig_actthread);
302 }
303
304 static int
305 cansignal_nomac(proc_t src, kauth_cred_t uc_src, proc_t dst, int signum)
306 {
307 /* you can signal yourself */
308 if (src == dst) {
309 return 1;
310 }
311
312 /* you can't send the init proc SIGKILL, even if root */
313 if (signum == SIGKILL && dst == initproc) {
314 return 0;
315 }
316
317 /* otherwise, root can always signal */
318 if (kauth_cred_issuser(uc_src)) {
319 return 1;
320 }
321
322 /* processes in the same session can send SIGCONT to each other */
323 {
324 struct session *sess_src = SESSION_NULL;
325 struct session *sess_dst = SESSION_NULL;
326
327 /* The session field is protected by the list lock. */
328 proc_list_lock();
329 if (src->p_pgrp != PGRP_NULL) {
330 sess_src = src->p_pgrp->pg_session;
331 }
332 if (dst->p_pgrp != PGRP_NULL) {
333 sess_dst = dst->p_pgrp->pg_session;
334 }
335 proc_list_unlock();
336
337 /* allow SIGCONT within session and for processes without session */
338 if (signum == SIGCONT && sess_src == sess_dst) {
339 return 1;
340 }
341 }
342
343 /* the source process must be authorized to signal the target */
344 {
345 int allowed = 0;
346 kauth_cred_t uc_dst = NOCRED, uc_ref = NOCRED;
347
348 uc_dst = uc_ref = kauth_cred_proc_ref(dst);
349
350 /*
351 * If the real or effective UID of the sender matches the real or saved
352 * UID of the target, allow the signal to be sent.
353 */
354 if (kauth_cred_getruid(uc_src) == kauth_cred_getruid(uc_dst) ||
355 kauth_cred_getruid(uc_src) == kauth_cred_getsvuid(uc_dst) ||
356 kauth_cred_getuid(uc_src) == kauth_cred_getruid(uc_dst) ||
357 kauth_cred_getuid(uc_src) == kauth_cred_getsvuid(uc_dst)) {
358 allowed = 1;
359 }
360
361 if (uc_ref != NOCRED) {
362 kauth_cred_unref(&uc_ref);
363 uc_ref = NOCRED;
364 }
365
366 return allowed;
367 }
368 }
369
370 /*
371 * Can process `src`, with ucred `uc_src`, send the signal `signum` to process
372 * `dst`? The ucred is referenced by the caller so internal fileds can be used
373 * safely.
374 */
375 int
376 cansignal(proc_t src, kauth_cred_t uc_src, proc_t dst, int signum)
377 {
378 #if CONFIG_MACF
379 if (mac_proc_check_signal(src, dst, signum)) {
380 return 0;
381 }
382 #endif
383
384 return cansignal_nomac(src, uc_src, dst, signum);
385 }
386
387 /*
388 * <rdar://problem/21952708> Some signals can be restricted from being handled,
389 * forcing the default action for that signal. This behavior applies only to
390 * non-root (EUID != 0) processes, and is configured with the "sigrestrict=x"
391 * bootarg:
392 *
393 * 0 (default): Disallow use of restricted signals. Trying to register a handler
394 * returns ENOTSUP, which userspace may use to take special action (e.g. abort).
395 * 1: As above, but return EINVAL. Restricted signals behave similarly to SIGKILL.
396 * 2: Usual POSIX semantics.
397 */
398 unsigned sigrestrict_arg = 0;
399
400 #if PLATFORM_WatchOS
401 static int
402 sigrestrictmask(void)
403 {
404 if (kauth_getuid() != 0 && sigrestrict_arg != 2) {
405 return SIGRESTRICTMASK;
406 }
407 return 0;
408 }
409
410 static int
411 signal_is_restricted(proc_t p, int signum)
412 {
413 if (sigmask(signum) & sigrestrictmask()) {
414 if (sigrestrict_arg == 0 &&
415 task_get_apptype(p->task) == TASK_APPTYPE_APP_DEFAULT) {
416 return ENOTSUP;
417 } else {
418 return EINVAL;
419 }
420 }
421 return 0;
422 }
423
424 #else
425
426 static inline int
427 signal_is_restricted(proc_t p, int signum)
428 {
429 (void)p;
430 (void)signum;
431 return 0;
432 }
433 #endif /* !PLATFORM_WatchOS */
434
435 /*
436 * Returns: 0 Success
437 * EINVAL
438 * copyout:EFAULT
439 * copyin:EFAULT
440 *
441 * Notes: Uses current thread as a parameter to inform PPC to enable
442 * FPU exceptions via setsigvec(); this operation is not proxy
443 * safe!
444 */
445 /* ARGSUSED */
446 int
447 sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval)
448 {
449 struct kern_sigaction vec;
450 struct __kern_sigaction __vec;
451
452 struct kern_sigaction *sa = &vec;
453 struct sigacts *ps = p->p_sigacts;
454
455 int signum;
456 int bit, error = 0;
457 uint32_t sigreturn_validation = PS_SIGRETURN_VALIDATION_DEFAULT;
458
459 signum = uap->signum;
460 if (signum <= 0 || signum >= NSIG ||
461 signum == SIGKILL || signum == SIGSTOP) {
462 return EINVAL;
463 }
464
465 if (uap->nsa) {
466 if (IS_64BIT_PROCESS(p)) {
467 struct __user64_sigaction __vec64;
468 error = copyin(uap->nsa, &__vec64, sizeof(__vec64));
469 __sigaction_user64_to_kern(&__vec64, &__vec);
470 } else {
471 struct __user32_sigaction __vec32;
472 error = copyin(uap->nsa, &__vec32, sizeof(__vec32));
473 __sigaction_user32_to_kern(&__vec32, &__vec);
474 }
475 if (error) {
476 return error;
477 }
478
479 sigreturn_validation = (__vec.sa_flags & SA_VALIDATE_SIGRETURN_FROM_SIGTRAMP) ?
480 PS_SIGRETURN_VALIDATION_ENABLED : PS_SIGRETURN_VALIDATION_DISABLED;
481 __vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */
482
483 if ((__vec.sa_flags & SA_SIGINFO) || __vec.sa_handler != SIG_DFL) {
484 if ((error = signal_is_restricted(p, signum))) {
485 if (error == ENOTSUP) {
486 printf("%s(%d): denied attempt to register action for signal %d\n",
487 proc_name_address(p), proc_pid(p), signum);
488 }
489 return error;
490 }
491 }
492 }
493
494 if (uap->osa) {
495 sa->sa_handler = ps->ps_sigact[signum];
496 sa->sa_mask = ps->ps_catchmask[signum];
497 bit = sigmask(signum);
498 sa->sa_flags = 0;
499 if ((ps->ps_sigonstack & bit) != 0) {
500 sa->sa_flags |= SA_ONSTACK;
501 }
502 if ((ps->ps_sigintr & bit) == 0) {
503 sa->sa_flags |= SA_RESTART;
504 }
505 if (ps->ps_siginfo & bit) {
506 sa->sa_flags |= SA_SIGINFO;
507 }
508 if (ps->ps_signodefer & bit) {
509 sa->sa_flags |= SA_NODEFER;
510 }
511 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP)) {
512 sa->sa_flags |= SA_NOCLDSTOP;
513 }
514 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT)) {
515 sa->sa_flags |= SA_NOCLDWAIT;
516 }
517
518 if (IS_64BIT_PROCESS(p)) {
519 struct user64_sigaction vec64 = {};
520 sigaction_kern_to_user64(sa, &vec64);
521 error = copyout(&vec64, uap->osa, sizeof(vec64));
522 } else {
523 struct user32_sigaction vec32 = {};
524 sigaction_kern_to_user32(sa, &vec32);
525 error = copyout(&vec32, uap->osa, sizeof(vec32));
526 }
527 if (error) {
528 return error;
529 }
530 }
531
532 if (uap->nsa) {
533 uint32_t old_sigreturn_validation = atomic_load_explicit(
534 &ps->ps_sigreturn_validation, memory_order_relaxed);
535 if (old_sigreturn_validation == PS_SIGRETURN_VALIDATION_DEFAULT) {
536 atomic_compare_exchange_strong_explicit(&ps->ps_sigreturn_validation,
537 &old_sigreturn_validation, sigreturn_validation,
538 memory_order_relaxed, memory_order_relaxed);
539 }
540 error = setsigvec(p, current_thread(), signum, &__vec, FALSE);
541 }
542
543 return error;
544 }
545
546 /* Routines to manipulate bits on all threads */
547 int
548 clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart)
549 {
550 struct uthread * uth;
551 thread_t thact;
552
553 proc_lock(p);
554 if (!in_signalstart) {
555 proc_signalstart(p, 1);
556 }
557
558 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
559 thact = p->p_vforkact;
560 uth = (struct uthread *)get_bsdthread_info(thact);
561 if (uth) {
562 uth->uu_siglist &= ~bit;
563 }
564 if (!in_signalstart) {
565 proc_signalend(p, 1);
566 }
567 proc_unlock(p);
568 return 0;
569 }
570
571 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
572 uth->uu_siglist &= ~bit;
573 }
574 p->p_siglist &= ~bit;
575 if (!in_signalstart) {
576 proc_signalend(p, 1);
577 }
578 proc_unlock(p);
579
580 return 0;
581 }
582
583
584 static int
585 unblock_procsigmask(proc_t p, int bit)
586 {
587 struct uthread * uth;
588 thread_t thact;
589
590 proc_lock(p);
591 proc_signalstart(p, 1);
592
593 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
594 thact = p->p_vforkact;
595 uth = (struct uthread *)get_bsdthread_info(thact);
596 if (uth) {
597 uth->uu_sigmask &= ~bit;
598 }
599 p->p_sigmask &= ~bit;
600 proc_signalend(p, 1);
601 proc_unlock(p);
602 return 0;
603 }
604 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
605 uth->uu_sigmask &= ~bit;
606 }
607 p->p_sigmask &= ~bit;
608
609 proc_signalend(p, 1);
610 proc_unlock(p);
611 return 0;
612 }
613
614 static int
615 block_procsigmask(proc_t p, int bit)
616 {
617 struct uthread * uth;
618 thread_t thact;
619
620 proc_lock(p);
621 proc_signalstart(p, 1);
622
623 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
624 thact = p->p_vforkact;
625 uth = (struct uthread *)get_bsdthread_info(thact);
626 if (uth) {
627 uth->uu_sigmask |= bit;
628 }
629 p->p_sigmask |= bit;
630 proc_signalend(p, 1);
631 proc_unlock(p);
632 return 0;
633 }
634 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
635 uth->uu_sigmask |= bit;
636 }
637 p->p_sigmask |= bit;
638
639 proc_signalend(p, 1);
640 proc_unlock(p);
641 return 0;
642 }
643
644 int
645 set_procsigmask(proc_t p, int bit)
646 {
647 struct uthread * uth;
648 thread_t thact;
649
650 proc_lock(p);
651 proc_signalstart(p, 1);
652
653 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
654 thact = p->p_vforkact;
655 uth = (struct uthread *)get_bsdthread_info(thact);
656 if (uth) {
657 uth->uu_sigmask = bit;
658 }
659 p->p_sigmask = bit;
660 proc_signalend(p, 1);
661 proc_unlock(p);
662 return 0;
663 }
664 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
665 uth->uu_sigmask = bit;
666 }
667 p->p_sigmask = bit;
668 proc_signalend(p, 1);
669 proc_unlock(p);
670
671 return 0;
672 }
673
674 /* XXX should be static? */
675 /*
676 * Notes: The thread parameter is used in the PPC case to select the
677 * thread on which the floating point exception will be enabled
678 * or disabled. We can't simply take current_thread(), since
679 * this is called from posix_spawn() on the not currently running
680 * process/thread pair.
681 *
682 * We mark thread as unused to alow compilation without warning
683 * on non-PPC platforms.
684 */
685 int
686 setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigaction *sa, boolean_t in_sigstart)
687 {
688 struct sigacts *ps = p->p_sigacts;
689 int bit;
690
691 assert(signum < NSIG);
692
693 if ((signum == SIGKILL || signum == SIGSTOP) &&
694 sa->sa_handler != SIG_DFL) {
695 return EINVAL;
696 }
697 bit = sigmask(signum);
698 /*
699 * Change setting atomically.
700 */
701 ps->ps_sigact[signum] = sa->sa_handler;
702 ps->ps_trampact[signum] = sa->sa_tramp;
703 ps->ps_catchmask[signum] = sa->sa_mask & ~sigcantmask;
704 if (sa->sa_flags & SA_SIGINFO) {
705 ps->ps_siginfo |= bit;
706 } else {
707 ps->ps_siginfo &= ~bit;
708 }
709 if ((sa->sa_flags & SA_RESTART) == 0) {
710 ps->ps_sigintr |= bit;
711 } else {
712 ps->ps_sigintr &= ~bit;
713 }
714 if (sa->sa_flags & SA_ONSTACK) {
715 ps->ps_sigonstack |= bit;
716 } else {
717 ps->ps_sigonstack &= ~bit;
718 }
719 if (sa->sa_flags & SA_RESETHAND) {
720 ps->ps_sigreset |= bit;
721 } else {
722 ps->ps_sigreset &= ~bit;
723 }
724 if (sa->sa_flags & SA_NODEFER) {
725 ps->ps_signodefer |= bit;
726 } else {
727 ps->ps_signodefer &= ~bit;
728 }
729 if (signum == SIGCHLD) {
730 if (sa->sa_flags & SA_NOCLDSTOP) {
731 OSBitOrAtomic(P_NOCLDSTOP, &p->p_flag);
732 } else {
733 OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), &p->p_flag);
734 }
735 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN)) {
736 OSBitOrAtomic(P_NOCLDWAIT, &p->p_flag);
737 } else {
738 OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), &p->p_flag);
739 }
740 }
741
742 /*
743 * Set bit in p_sigignore for signals that are set to SIG_IGN,
744 * and for signals set to SIG_DFL where the default is to ignore.
745 * However, don't put SIGCONT in p_sigignore,
746 * as we have to restart the process.
747 */
748 if (sa->sa_handler == SIG_IGN ||
749 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
750 clear_procsiglist(p, bit, in_sigstart);
751 if (signum != SIGCONT) {
752 p->p_sigignore |= bit; /* easier in psignal */
753 }
754 p->p_sigcatch &= ~bit;
755 } else {
756 p->p_sigignore &= ~bit;
757 if (sa->sa_handler == SIG_DFL) {
758 p->p_sigcatch &= ~bit;
759 } else {
760 p->p_sigcatch |= bit;
761 }
762 }
763 return 0;
764 }
765
766 /*
767 * Initialize signal state for process 0;
768 * set to ignore signals that are ignored by default.
769 */
770 void
771 siginit(proc_t p)
772 {
773 int i;
774
775 for (i = 1; i < NSIG; i++) {
776 if (sigprop[i] & SA_IGNORE && i != SIGCONT) {
777 p->p_sigignore |= sigmask(i);
778 }
779 }
780 }
781
782 /*
783 * Reset signals for an exec of the specified process.
784 */
785 void
786 execsigs(proc_t p, thread_t thread)
787 {
788 struct sigacts *ps = p->p_sigacts;
789 int nc, mask;
790 struct uthread *ut;
791
792 ut = (struct uthread *)get_bsdthread_info(thread);
793
794 /*
795 * transfer saved signal states from the process
796 * back to the current thread.
797 *
798 * NOTE: We do this without the process locked,
799 * because we are guaranteed to be single-threaded
800 * by this point in exec and the p_siglist is
801 * only accessed by threads inside the process.
802 */
803 ut->uu_siglist |= p->p_siglist;
804 p->p_siglist = 0;
805
806 /*
807 * Reset caught signals. Held signals remain held
808 * through p_sigmask (unless they were caught,
809 * and are now ignored by default).
810 */
811 while (p->p_sigcatch) {
812 nc = ffs((long)p->p_sigcatch);
813 mask = sigmask(nc);
814 p->p_sigcatch &= ~mask;
815 if (sigprop[nc] & SA_IGNORE) {
816 if (nc != SIGCONT) {
817 p->p_sigignore |= mask;
818 }
819 ut->uu_siglist &= ~mask;
820 }
821 ps->ps_sigact[nc] = SIG_DFL;
822 }
823
824 atomic_store_explicit(&ps->ps_sigreturn_validation,
825 PS_SIGRETURN_VALIDATION_DEFAULT, memory_order_relaxed);
826 /* Generate random token value used to validate sigreturn arguments */
827 read_random(&ps->ps_sigreturn_token, sizeof(ps->ps_sigreturn_token));
828
829 /*
830 * Reset stack state to the user stack.
831 * Clear set of signals caught on the signal stack.
832 */
833 /* thread */
834 ut->uu_sigstk.ss_flags = SA_DISABLE;
835 ut->uu_sigstk.ss_size = 0;
836 ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
837 ut->uu_flag &= ~UT_ALTSTACK;
838 /* process */
839 ps->ps_sigonstack = 0;
840 }
841
842 /*
843 * Manipulate signal mask.
844 * Note that we receive new mask, not pointer,
845 * and return old mask as return value;
846 * the library stub does the rest.
847 */
848 int
849 sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval)
850 {
851 int error = 0;
852 sigset_t oldmask, nmask;
853 user_addr_t omask = uap->omask;
854 struct uthread *ut;
855
856 ut = (struct uthread *)get_bsdthread_info(current_thread());
857 oldmask = ut->uu_sigmask;
858
859 if (uap->mask == USER_ADDR_NULL) {
860 /* just want old mask */
861 goto out;
862 }
863 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
864 if (error) {
865 goto out;
866 }
867
868 switch (uap->how) {
869 case SIG_BLOCK:
870 block_procsigmask(p, (nmask & ~sigcantmask));
871 signal_setast(current_thread());
872 break;
873
874 case SIG_UNBLOCK:
875 unblock_procsigmask(p, (nmask & ~sigcantmask));
876 signal_setast(current_thread());
877 break;
878
879 case SIG_SETMASK:
880 set_procsigmask(p, (nmask & ~sigcantmask));
881 signal_setast(current_thread());
882 break;
883
884 default:
885 error = EINVAL;
886 break;
887 }
888 out:
889 if (!error && omask != USER_ADDR_NULL) {
890 copyout(&oldmask, omask, sizeof(sigset_t));
891 }
892 return error;
893 }
894
895 int
896 sigpending(__unused proc_t p, struct sigpending_args *uap, __unused int32_t *retval)
897 {
898 struct uthread *ut;
899 sigset_t pendlist;
900
901 ut = (struct uthread *)get_bsdthread_info(current_thread());
902 pendlist = ut->uu_siglist;
903
904 if (uap->osv) {
905 copyout(&pendlist, uap->osv, sizeof(sigset_t));
906 }
907 return 0;
908 }
909
910 /*
911 * Suspend process until signal, providing mask to be set
912 * in the meantime. Note nonstandard calling convention:
913 * libc stub passes mask, not pointer, to save a copyin.
914 */
915
916 static int
917 sigcontinue(__unused int error)
918 {
919 // struct uthread *ut = get_bsdthread_info(current_thread());
920 unix_syscall_return(EINTR);
921 }
922
923 int
924 sigsuspend(proc_t p, struct sigsuspend_args *uap, int32_t *retval)
925 {
926 __pthread_testcancel(1);
927 return sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval);
928 }
929
930 int
931 sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused int32_t *retval)
932 {
933 struct uthread *ut;
934
935 ut = (struct uthread *)get_bsdthread_info(current_thread());
936
937 /*
938 * When returning from sigpause, we want
939 * the old mask to be restored after the
940 * signal handler has finished. Thus, we
941 * save it here and mark the sigacts structure
942 * to indicate this.
943 */
944 ut->uu_oldmask = ut->uu_sigmask;
945 ut->uu_flag |= UT_SAS_OLDMASK;
946 ut->uu_sigmask = (uap->mask & ~sigcantmask);
947 (void) tsleep0((caddr_t) p, PPAUSE | PCATCH, "pause", 0, sigcontinue);
948 /* always return EINTR rather than ERESTART... */
949 return EINTR;
950 }
951
952
953 int
954 __disable_threadsignal(__unused proc_t p,
955 __unused struct __disable_threadsignal_args *uap,
956 __unused int32_t *retval)
957 {
958 struct uthread *uth;
959
960 uth = (struct uthread *)get_bsdthread_info(current_thread());
961
962 /* No longer valid to have any signal delivered */
963 uth->uu_flag |= (UT_NO_SIGMASK | UT_CANCELDISABLE);
964
965 return 0;
966 }
967
968 void
969 __pthread_testcancel(int presyscall)
970 {
971 thread_t self = current_thread();
972 struct uthread * uthread;
973
974 uthread = (struct uthread *)get_bsdthread_info(self);
975
976
977 uthread->uu_flag &= ~UT_NOTCANCELPT;
978
979 if ((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
980 if (presyscall != 0) {
981 unix_syscall_return(EINTR);
982 /* NOTREACHED */
983 } else {
984 thread_abort_safely(self);
985 }
986 }
987 }
988
989
990
991 int
992 __pthread_markcancel(__unused proc_t p,
993 struct __pthread_markcancel_args *uap, __unused int32_t *retval)
994 {
995 thread_act_t target_act;
996 int error = 0;
997 struct uthread *uth;
998
999 target_act = (thread_act_t)port_name_to_thread(uap->thread_port);
1000
1001 if (target_act == THR_ACT_NULL) {
1002 return ESRCH;
1003 }
1004
1005 uth = (struct uthread *)get_bsdthread_info(target_act);
1006
1007 /* if the thread is in vfork do not cancel */
1008 if ((uth->uu_flag & (UT_VFORK | UT_CANCEL | UT_CANCELED)) == 0) {
1009 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
1010 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
1011 && ((uth->uu_flag & UT_CANCELDISABLE) == 0)) {
1012 thread_abort_safely(target_act);
1013 }
1014 }
1015
1016 thread_deallocate(target_act);
1017 return error;
1018 }
1019
1020 /* if action =0 ; return the cancellation state ,
1021 * if marked for cancellation, make the thread canceled
1022 * if action = 1 ; Enable the cancel handling
1023 * if action = 2; Disable the cancel handling
1024 */
1025 int
1026 __pthread_canceled(__unused proc_t p,
1027 struct __pthread_canceled_args *uap, __unused int32_t *retval)
1028 {
1029 thread_act_t thread;
1030 struct uthread *uth;
1031 int action = uap->action;
1032
1033 thread = current_thread();
1034 uth = (struct uthread *)get_bsdthread_info(thread);
1035
1036 switch (action) {
1037 case 1:
1038 uth->uu_flag &= ~UT_CANCELDISABLE;
1039 return 0;
1040 case 2:
1041 uth->uu_flag |= UT_CANCELDISABLE;
1042 return 0;
1043 case 0:
1044 default:
1045 /* if the thread is in vfork do not cancel */
1046 if ((uth->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
1047 uth->uu_flag &= ~UT_CANCEL;
1048 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
1049 return 0;
1050 }
1051 return EINVAL;
1052 }
1053 return EINVAL;
1054 }
1055
1056 __attribute__((noreturn))
1057 void
1058 __posix_sem_syscall_return(kern_return_t kern_result)
1059 {
1060 int error = 0;
1061
1062 if (kern_result == KERN_SUCCESS) {
1063 error = 0;
1064 } else if (kern_result == KERN_ABORTED) {
1065 error = EINTR;
1066 } else if (kern_result == KERN_OPERATION_TIMED_OUT) {
1067 error = ETIMEDOUT;
1068 } else {
1069 error = EINVAL;
1070 }
1071 unix_syscall_return(error);
1072 /* does not return */
1073 }
1074
1075 #if OLD_SEMWAIT_SIGNAL
1076 /*
1077 * Returns: 0 Success
1078 * EINTR
1079 * ETIMEDOUT
1080 * EINVAL
1081 * EFAULT if timespec is NULL
1082 */
1083 int
1084 __old_semwait_signal(proc_t p, struct __old_semwait_signal_args *uap,
1085 int32_t *retval)
1086 {
1087 __pthread_testcancel(0);
1088 return __old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval);
1089 }
1090
1091 int
1092 __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_args *uap,
1093 __unused int32_t *retval)
1094 {
1095 kern_return_t kern_result;
1096 int error;
1097 mach_timespec_t then;
1098 struct timespec now;
1099 struct user_timespec ts;
1100 boolean_t truncated_timeout = FALSE;
1101
1102 if (uap->timeout) {
1103 if (IS_64BIT_PROCESS(p)) {
1104 struct user64_timespec ts64;
1105 error = copyin(uap->ts, &ts64, sizeof(ts64));
1106 ts.tv_sec = ts64.tv_sec;
1107 ts.tv_nsec = ts64.tv_nsec;
1108 } else {
1109 struct user32_timespec ts32;
1110 error = copyin(uap->ts, &ts32, sizeof(ts32));
1111 ts.tv_sec = ts32.tv_sec;
1112 ts.tv_nsec = ts32.tv_nsec;
1113 }
1114
1115 if (error) {
1116 return error;
1117 }
1118
1119 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1120 ts.tv_sec = 0xFFFFFFFF;
1121 ts.tv_nsec = 0;
1122 truncated_timeout = TRUE;
1123 }
1124
1125 if (uap->relative) {
1126 then.tv_sec = ts.tv_sec;
1127 then.tv_nsec = ts.tv_nsec;
1128 } else {
1129 nanotime(&now);
1130
1131 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1132 if (now.tv_sec == ts.tv_sec ?
1133 now.tv_nsec > ts.tv_nsec :
1134 now.tv_sec > ts.tv_sec) {
1135 then.tv_sec = 0;
1136 then.tv_nsec = 0;
1137 } else {
1138 then.tv_sec = ts.tv_sec - now.tv_sec;
1139 then.tv_nsec = ts.tv_nsec - now.tv_nsec;
1140 if (then.tv_nsec < 0) {
1141 then.tv_nsec += NSEC_PER_SEC;
1142 then.tv_sec--;
1143 }
1144 }
1145 }
1146
1147 if (uap->mutex_sem == 0) {
1148 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1149 } else {
1150 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1151 }
1152 } else {
1153 if (uap->mutex_sem == 0) {
1154 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1155 } else {
1156 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1157 }
1158 }
1159
1160 if (kern_result == KERN_SUCCESS && !truncated_timeout) {
1161 return 0;
1162 } else if (kern_result == KERN_SUCCESS && truncated_timeout) {
1163 return EINTR; /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1164 } else if (kern_result == KERN_ABORTED) {
1165 return EINTR;
1166 } else if (kern_result == KERN_OPERATION_TIMED_OUT) {
1167 return ETIMEDOUT;
1168 } else {
1169 return EINVAL;
1170 }
1171 }
1172 #endif /* OLD_SEMWAIT_SIGNAL*/
1173
1174 /*
1175 * Returns: 0 Success
1176 * EINTR
1177 * ETIMEDOUT
1178 * EINVAL
1179 * EFAULT if timespec is NULL
1180 */
1181 int
1182 __semwait_signal(proc_t p, struct __semwait_signal_args *uap,
1183 int32_t *retval)
1184 {
1185 __pthread_testcancel(0);
1186 return __semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval);
1187 }
1188
1189 int
1190 __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap,
1191 __unused int32_t *retval)
1192 {
1193 kern_return_t kern_result;
1194 mach_timespec_t then;
1195 struct timespec now;
1196 struct user_timespec ts;
1197 boolean_t truncated_timeout = FALSE;
1198
1199 if (uap->timeout) {
1200 ts.tv_sec = uap->tv_sec;
1201 ts.tv_nsec = uap->tv_nsec;
1202
1203 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1204 ts.tv_sec = 0xFFFFFFFF;
1205 ts.tv_nsec = 0;
1206 truncated_timeout = TRUE;
1207 }
1208
1209 if (uap->relative) {
1210 then.tv_sec = ts.tv_sec;
1211 then.tv_nsec = ts.tv_nsec;
1212 } else {
1213 nanotime(&now);
1214
1215 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1216 if (now.tv_sec == ts.tv_sec ?
1217 now.tv_nsec > ts.tv_nsec :
1218 now.tv_sec > ts.tv_sec) {
1219 then.tv_sec = 0;
1220 then.tv_nsec = 0;
1221 } else {
1222 then.tv_sec = ts.tv_sec - now.tv_sec;
1223 then.tv_nsec = ts.tv_nsec - now.tv_nsec;
1224 if (then.tv_nsec < 0) {
1225 then.tv_nsec += NSEC_PER_SEC;
1226 then.tv_sec--;
1227 }
1228 }
1229 }
1230
1231 if (uap->mutex_sem == 0) {
1232 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1233 } else {
1234 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1235 }
1236 } else {
1237 if (uap->mutex_sem == 0) {
1238 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1239 } else {
1240 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1241 }
1242 }
1243
1244 if (kern_result == KERN_SUCCESS && !truncated_timeout) {
1245 return 0;
1246 } else if (kern_result == KERN_SUCCESS && truncated_timeout) {
1247 return EINTR; /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1248 } else if (kern_result == KERN_ABORTED) {
1249 return EINTR;
1250 } else if (kern_result == KERN_OPERATION_TIMED_OUT) {
1251 return ETIMEDOUT;
1252 } else {
1253 return EINVAL;
1254 }
1255 }
1256
1257
1258 int
1259 __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap,
1260 __unused int32_t *retval)
1261 {
1262 thread_t target_act;
1263 int error = 0;
1264 int signum = uap->sig;
1265 struct uthread *uth;
1266
1267 target_act = (thread_t)port_name_to_thread(uap->thread_port);
1268
1269 if (target_act == THREAD_NULL) {
1270 return ESRCH;
1271 }
1272 if ((u_int)signum >= NSIG) {
1273 error = EINVAL;
1274 goto out;
1275 }
1276
1277 uth = (struct uthread *)get_bsdthread_info(target_act);
1278
1279 if (uth->uu_flag & UT_NO_SIGMASK) {
1280 error = ESRCH;
1281 goto out;
1282 }
1283
1284 if (signum) {
1285 psignal_uthread(target_act, signum);
1286 }
1287 out:
1288 thread_deallocate(target_act);
1289 return error;
1290 }
1291
1292
1293 int
1294 __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap,
1295 __unused int32_t *retval)
1296 {
1297 user_addr_t set = uap->set;
1298 user_addr_t oset = uap->oset;
1299 sigset_t nset;
1300 int error = 0;
1301 struct uthread *ut;
1302 sigset_t oldset;
1303
1304 ut = (struct uthread *)get_bsdthread_info(current_thread());
1305 oldset = ut->uu_sigmask;
1306
1307 if (set == USER_ADDR_NULL) {
1308 /* need only old mask */
1309 goto out;
1310 }
1311
1312 error = copyin(set, &nset, sizeof(sigset_t));
1313 if (error) {
1314 goto out;
1315 }
1316
1317 switch (uap->how) {
1318 case SIG_BLOCK:
1319 ut->uu_sigmask |= (nset & ~sigcantmask);
1320 break;
1321
1322 case SIG_UNBLOCK:
1323 ut->uu_sigmask &= ~(nset);
1324 signal_setast(current_thread());
1325 break;
1326
1327 case SIG_SETMASK:
1328 ut->uu_sigmask = (nset & ~sigcantmask);
1329 signal_setast(current_thread());
1330 break;
1331
1332 default:
1333 error = EINVAL;
1334 }
1335 out:
1336 if (!error && oset != USER_ADDR_NULL) {
1337 copyout(&oldset, oset, sizeof(sigset_t));
1338 }
1339
1340 return error;
1341 }
1342
1343 /*
1344 * Returns: 0 Success
1345 * EINVAL
1346 * copyin:EFAULT
1347 * copyout:EFAULT
1348 */
1349 int
1350 __sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval)
1351 {
1352 __pthread_testcancel(1);
1353 return __sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval);
1354 }
1355
1356 int
1357 __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval)
1358 {
1359 struct uthread *ut;
1360 struct uthread *uth;
1361 int error = 0;
1362 sigset_t mask;
1363 sigset_t siglist;
1364 sigset_t sigw = 0;
1365 int signum;
1366
1367 ut = (struct uthread *)get_bsdthread_info(current_thread());
1368
1369 if (uap->set == USER_ADDR_NULL) {
1370 return EINVAL;
1371 }
1372
1373 error = copyin(uap->set, &mask, sizeof(sigset_t));
1374 if (error) {
1375 return error;
1376 }
1377
1378 siglist = (mask & ~sigcantmask);
1379
1380 if (siglist == 0) {
1381 return EINVAL;
1382 }
1383
1384 proc_lock(p);
1385 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
1386 proc_unlock(p);
1387 return EINVAL;
1388 } else {
1389 proc_signalstart(p, 1);
1390 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1391 if ((sigw = uth->uu_siglist & siglist)) {
1392 break;
1393 }
1394 }
1395 proc_signalend(p, 1);
1396 }
1397
1398 if (sigw) {
1399 /* The signal was pending on a thread */
1400 goto sigwait1;
1401 }
1402 /*
1403 * When returning from sigwait, we want
1404 * the old mask to be restored after the
1405 * signal handler has finished. Thus, we
1406 * save it here and mark the sigacts structure
1407 * to indicate this.
1408 */
1409 uth = ut; /* wait for it to be delivered to us */
1410 ut->uu_oldmask = ut->uu_sigmask;
1411 ut->uu_flag |= UT_SAS_OLDMASK;
1412 if (siglist == (sigset_t)0) {
1413 proc_unlock(p);
1414 return EINVAL;
1415 }
1416 /* SIGKILL and SIGSTOP are not maskable as well */
1417 ut->uu_sigmask = ~(siglist | sigcantmask);
1418 ut->uu_sigwait = siglist;
1419
1420 /* No Continuations for now */
1421 error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE | PCATCH, "pause", 0);
1422
1423 if (error == ERESTART) {
1424 error = 0;
1425 }
1426
1427 sigw = (ut->uu_sigwait & siglist);
1428 ut->uu_sigmask = ut->uu_oldmask;
1429 ut->uu_oldmask = 0;
1430 ut->uu_flag &= ~UT_SAS_OLDMASK;
1431 sigwait1:
1432 ut->uu_sigwait = 0;
1433 if (!error) {
1434 signum = ffs((unsigned int)sigw);
1435 if (!signum) {
1436 panic("sigwait with no signal wakeup");
1437 }
1438 /* Clear the pending signal in the thread it was delivered */
1439 uth->uu_siglist &= ~(sigmask(signum));
1440
1441 #if CONFIG_DTRACE
1442 DTRACE_PROC2(signal__clear, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo));
1443 #endif
1444
1445 proc_unlock(p);
1446 if (uap->sig != USER_ADDR_NULL) {
1447 error = copyout(&signum, uap->sig, sizeof(int));
1448 }
1449 } else {
1450 proc_unlock(p);
1451 }
1452
1453 return error;
1454 }
1455
1456 int
1457 sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *retval)
1458 {
1459 struct kern_sigaltstack ss;
1460 struct kern_sigaltstack *pstk;
1461 int error;
1462 struct uthread *uth;
1463 int onstack;
1464
1465 uth = (struct uthread *)get_bsdthread_info(current_thread());
1466
1467 pstk = &uth->uu_sigstk;
1468 if ((uth->uu_flag & UT_ALTSTACK) == 0) {
1469 uth->uu_sigstk.ss_flags |= SA_DISABLE;
1470 }
1471 onstack = pstk->ss_flags & SA_ONSTACK;
1472 if (uap->oss) {
1473 if (IS_64BIT_PROCESS(p)) {
1474 struct user64_sigaltstack ss64 = {};
1475 sigaltstack_kern_to_user64(pstk, &ss64);
1476 error = copyout(&ss64, uap->oss, sizeof(ss64));
1477 } else {
1478 struct user32_sigaltstack ss32 = {};
1479 sigaltstack_kern_to_user32(pstk, &ss32);
1480 error = copyout(&ss32, uap->oss, sizeof(ss32));
1481 }
1482 if (error) {
1483 return error;
1484 }
1485 }
1486 if (uap->nss == USER_ADDR_NULL) {
1487 return 0;
1488 }
1489 if (IS_64BIT_PROCESS(p)) {
1490 struct user64_sigaltstack ss64;
1491 error = copyin(uap->nss, &ss64, sizeof(ss64));
1492 sigaltstack_user64_to_kern(&ss64, &ss);
1493 } else {
1494 struct user32_sigaltstack ss32;
1495 error = copyin(uap->nss, &ss32, sizeof(ss32));
1496 sigaltstack_user32_to_kern(&ss32, &ss);
1497 }
1498 if (error) {
1499 return error;
1500 }
1501 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1502 return EINVAL;
1503 }
1504
1505 if (ss.ss_flags & SA_DISABLE) {
1506 /* if we are here we are not in the signal handler ;so no need to check */
1507 if (uth->uu_sigstk.ss_flags & SA_ONSTACK) {
1508 return EINVAL;
1509 }
1510 uth->uu_flag &= ~UT_ALTSTACK;
1511 uth->uu_sigstk.ss_flags = ss.ss_flags;
1512 return 0;
1513 }
1514 if (onstack) {
1515 return EPERM;
1516 }
1517 /* The older stacksize was 8K, enforce that one so no compat problems */
1518 #define OLDMINSIGSTKSZ 8*1024
1519 if (ss.ss_size < OLDMINSIGSTKSZ) {
1520 return ENOMEM;
1521 }
1522 uth->uu_flag |= UT_ALTSTACK;
1523 uth->uu_sigstk = ss;
1524 return 0;
1525 }
1526
1527 int
1528 kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval)
1529 {
1530 proc_t p;
1531 kauth_cred_t uc = kauth_cred_get();
1532 int posix = uap->posix; /* !0 if posix behaviour desired */
1533
1534 AUDIT_ARG(pid, uap->pid);
1535 AUDIT_ARG(signum, uap->signum);
1536
1537 if ((u_int)uap->signum >= NSIG) {
1538 return EINVAL;
1539 }
1540 if (uap->pid > 0) {
1541 /* kill single process */
1542 if ((p = proc_find(uap->pid)) == NULL) {
1543 if ((p = pzfind(uap->pid)) != NULL) {
1544 /*
1545 * POSIX 1003.1-2001 requires returning success when killing a
1546 * zombie; see Rationale for kill(2).
1547 */
1548 return 0;
1549 }
1550 return ESRCH;
1551 }
1552 AUDIT_ARG(process, p);
1553 if (!cansignal(cp, uc, p, uap->signum)) {
1554 proc_rele(p);
1555 return EPERM;
1556 }
1557 if (uap->signum) {
1558 psignal(p, uap->signum);
1559 }
1560 proc_rele(p);
1561 return 0;
1562 }
1563 switch (uap->pid) {
1564 case -1: /* broadcast signal */
1565 return killpg1(cp, uap->signum, 0, 1, posix);
1566 case 0: /* signal own process group */
1567 return killpg1(cp, uap->signum, 0, 0, posix);
1568 default: /* negative explicit process group */
1569 return killpg1(cp, uap->signum, -(uap->pid), 0, posix);
1570 }
1571 /* NOTREACHED */
1572 }
1573
1574 os_reason_t
1575 build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, user_addr_t payload, uint32_t payload_size,
1576 user_addr_t reason_string, uint64_t reason_flags)
1577 {
1578 os_reason_t exit_reason = OS_REASON_NULL;
1579
1580 int error = 0;
1581 int num_items_to_copy = 0;
1582 uint32_t user_data_to_copy = 0;
1583 char *reason_user_desc = NULL;
1584 size_t reason_user_desc_len = 0;
1585
1586 exit_reason = os_reason_create(reason_namespace, reason_code);
1587 if (exit_reason == OS_REASON_NULL) {
1588 printf("build_userspace_exit_reason: failed to allocate exit reason\n");
1589 return exit_reason;
1590 }
1591
1592 exit_reason->osr_flags |= OS_REASON_FLAG_FROM_USERSPACE;
1593
1594 /*
1595 * Only apply flags that are allowed to be passed from userspace.
1596 */
1597 exit_reason->osr_flags |= (reason_flags & OS_REASON_FLAG_MASK_ALLOWED_FROM_USER);
1598 if ((reason_flags & OS_REASON_FLAG_MASK_ALLOWED_FROM_USER) != reason_flags) {
1599 printf("build_userspace_exit_reason: illegal flags passed from userspace (some masked off) 0x%llx, ns: %u, code 0x%llx\n",
1600 reason_flags, reason_namespace, reason_code);
1601 }
1602
1603 if (!(exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT)) {
1604 exit_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1605 }
1606
1607 if (payload != USER_ADDR_NULL) {
1608 if (payload_size == 0) {
1609 printf("build_userspace_exit_reason: exit reason with namespace %u, nonzero payload but zero length\n",
1610 reason_namespace);
1611 exit_reason->osr_flags |= OS_REASON_FLAG_BAD_PARAMS;
1612 payload = USER_ADDR_NULL;
1613 } else {
1614 num_items_to_copy++;
1615
1616 if (payload_size > EXIT_REASON_PAYLOAD_MAX_LEN) {
1617 exit_reason->osr_flags |= OS_REASON_FLAG_PAYLOAD_TRUNCATED;
1618 payload_size = EXIT_REASON_PAYLOAD_MAX_LEN;
1619 }
1620
1621 user_data_to_copy += payload_size;
1622 }
1623 }
1624
1625 if (reason_string != USER_ADDR_NULL) {
1626 reason_user_desc = (char *) kalloc(EXIT_REASON_USER_DESC_MAX_LEN);
1627
1628 if (reason_user_desc != NULL) {
1629 error = copyinstr(reason_string, (void *) reason_user_desc,
1630 EXIT_REASON_USER_DESC_MAX_LEN, &reason_user_desc_len);
1631
1632 if (error == 0) {
1633 num_items_to_copy++;
1634 user_data_to_copy += reason_user_desc_len;
1635 } else if (error == ENAMETOOLONG) {
1636 num_items_to_copy++;
1637 reason_user_desc[EXIT_REASON_USER_DESC_MAX_LEN - 1] = '\0';
1638 user_data_to_copy += reason_user_desc_len;
1639 } else {
1640 exit_reason->osr_flags |= OS_REASON_FLAG_FAILED_DATA_COPYIN;
1641 kfree(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1642 reason_user_desc = NULL;
1643 reason_user_desc_len = 0;
1644 }
1645 }
1646 }
1647
1648 if (num_items_to_copy != 0) {
1649 uint32_t reason_buffer_size_estimate = 0;
1650 mach_vm_address_t data_addr = 0;
1651
1652 reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(num_items_to_copy, user_data_to_copy);
1653
1654 error = os_reason_alloc_buffer(exit_reason, reason_buffer_size_estimate);
1655 if (error != 0) {
1656 printf("build_userspace_exit_reason: failed to allocate signal reason buffer\n");
1657 goto out_failed_copyin;
1658 }
1659
1660 if (reason_user_desc != NULL && reason_user_desc_len != 0) {
1661 if (KERN_SUCCESS == kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor,
1662 EXIT_REASON_USER_DESC,
1663 reason_user_desc_len,
1664 &data_addr)) {
1665 kcdata_memcpy(&exit_reason->osr_kcd_descriptor, (mach_vm_address_t) data_addr,
1666 reason_user_desc, reason_user_desc_len);
1667 } else {
1668 printf("build_userspace_exit_reason: failed to allocate space for reason string\n");
1669 goto out_failed_copyin;
1670 }
1671 }
1672
1673 if (payload != USER_ADDR_NULL) {
1674 if (KERN_SUCCESS ==
1675 kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor,
1676 EXIT_REASON_USER_PAYLOAD,
1677 payload_size,
1678 &data_addr)) {
1679 error = copyin(payload, (void *) data_addr, payload_size);
1680 if (error) {
1681 printf("build_userspace_exit_reason: failed to copy in payload data with error %d\n", error);
1682 goto out_failed_copyin;
1683 }
1684 } else {
1685 printf("build_userspace_exit_reason: failed to allocate space for payload data\n");
1686 goto out_failed_copyin;
1687 }
1688 }
1689 }
1690
1691 if (reason_user_desc != NULL) {
1692 kfree(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1693 reason_user_desc = NULL;
1694 reason_user_desc_len = 0;
1695 }
1696
1697 return exit_reason;
1698
1699 out_failed_copyin:
1700
1701 if (reason_user_desc != NULL) {
1702 kfree(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1703 reason_user_desc = NULL;
1704 reason_user_desc_len = 0;
1705 }
1706
1707 exit_reason->osr_flags |= OS_REASON_FLAG_FAILED_DATA_COPYIN;
1708 os_reason_alloc_buffer(exit_reason, 0);
1709 return exit_reason;
1710 }
1711
1712 static int
1713 terminate_with_payload_internal(struct proc *cur_proc, int target_pid, uint32_t reason_namespace,
1714 uint64_t reason_code, user_addr_t payload, uint32_t payload_size,
1715 user_addr_t reason_string, uint64_t reason_flags)
1716 {
1717 proc_t target_proc = PROC_NULL;
1718 kauth_cred_t cur_cred = kauth_cred_get();
1719
1720 os_reason_t signal_reason = OS_REASON_NULL;
1721
1722 AUDIT_ARG(pid, target_pid);
1723 if ((target_pid <= 0)) {
1724 return EINVAL;
1725 }
1726
1727 target_proc = proc_find(target_pid);
1728 if (target_proc == PROC_NULL) {
1729 return ESRCH;
1730 }
1731
1732 AUDIT_ARG(process, target_proc);
1733
1734 if (!cansignal(cur_proc, cur_cred, target_proc, SIGKILL)) {
1735 proc_rele(target_proc);
1736 return EPERM;
1737 }
1738
1739 if (target_pid != cur_proc->p_pid) {
1740 /*
1741 * FLAG_ABORT should only be set on terminate_with_reason(getpid()) that
1742 * was a fallback from an unsuccessful abort_with_reason(). In that case
1743 * caller's pid matches the target one. Otherwise remove the flag.
1744 */
1745 reason_flags &= ~((typeof(reason_flags))OS_REASON_FLAG_ABORT);
1746 }
1747
1748 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1749 target_proc->p_pid, reason_namespace,
1750 reason_code, 0, 0);
1751
1752 signal_reason = build_userspace_exit_reason(reason_namespace, reason_code, payload, payload_size,
1753 reason_string, (reason_flags | OS_REASON_FLAG_NO_CRASHED_TID));
1754
1755 if (target_pid == cur_proc->p_pid) {
1756 /*
1757 * psignal_thread_with_reason() will pend a SIGKILL on the specified thread or
1758 * return if the thread and/or task are already terminating. Either way, the
1759 * current thread won't return to userspace.
1760 */
1761 psignal_thread_with_reason(target_proc, current_thread(), SIGKILL, signal_reason);
1762 } else {
1763 psignal_with_reason(target_proc, SIGKILL, signal_reason);
1764 }
1765
1766 proc_rele(target_proc);
1767
1768 return 0;
1769 }
1770
1771 int
1772 terminate_with_payload(struct proc *cur_proc, struct terminate_with_payload_args *args,
1773 __unused int32_t *retval)
1774 {
1775 return terminate_with_payload_internal(cur_proc, args->pid, args->reason_namespace, args->reason_code, args->payload,
1776 args->payload_size, args->reason_string, args->reason_flags);
1777 }
1778
1779 static int
1780 killpg1_allfilt(proc_t p, void * arg)
1781 {
1782 struct killpg1_filtargs * kfargp = (struct killpg1_filtargs *)arg;
1783
1784 /*
1785 * Don't signal initproc, a system process, or the current process if POSIX
1786 * isn't specified.
1787 */
1788 return p->p_pid > 1 && !(p->p_flag & P_SYSTEM) &&
1789 (kfargp->posix ? true : p != kfargp->curproc);
1790 }
1791
1792 static int
1793 killpg1_pgrpfilt(proc_t p, __unused void * arg)
1794 {
1795 /* XXX shouldn't this allow signalling zombies? */
1796 return p->p_pid > 1 && !(p->p_flag & P_SYSTEM) && p->p_stat != SZOMB;
1797 }
1798
1799 static int
1800 killpg1_callback(proc_t p, void *arg)
1801 {
1802 struct killpg1_iterargs *kargp = (struct killpg1_iterargs *)arg;
1803 int signum = kargp->signum;
1804
1805 if ((p->p_listflag & P_LIST_EXITED) == P_LIST_EXITED) {
1806 /*
1807 * Count zombies as found for the purposes of signalling, since POSIX
1808 * 1003.1-2001 sees signalling zombies as successful. If killpg(2) or
1809 * kill(2) with pid -1 only finds zombies that can be signalled, it
1810 * shouldn't return ESRCH. See the Rationale for kill(2).
1811 *
1812 * Don't call into MAC -- it's not expecting signal checks for exited
1813 * processes.
1814 */
1815 if (cansignal_nomac(kargp->curproc, kargp->uc, p, signum)) {
1816 kargp->nfound++;
1817 }
1818 } else if (cansignal(kargp->curproc, kargp->uc, p, signum)) {
1819 kargp->nfound++;
1820
1821 if (signum != 0) {
1822 psignal(p, signum);
1823 }
1824 }
1825
1826 return PROC_RETURNED;
1827 }
1828
1829 /*
1830 * Common code for kill process group/broadcast kill.
1831 */
1832 int
1833 killpg1(proc_t curproc, int signum, int pgid, int all, int posix)
1834 {
1835 kauth_cred_t uc;
1836 struct pgrp *pgrp;
1837 int error = 0;
1838
1839 uc = kauth_cred_proc_ref(curproc);
1840 struct killpg1_iterargs karg = {
1841 .curproc = curproc, .uc = uc, .nfound = 0, .signum = signum
1842 };
1843
1844 if (all) {
1845 /*
1846 * Broadcast to all processes that the user can signal (pid was -1).
1847 */
1848 struct killpg1_filtargs kfarg = {
1849 .posix = posix, .curproc = curproc
1850 };
1851 proc_iterate(PROC_ALLPROCLIST | PROC_ZOMBPROCLIST, killpg1_callback,
1852 &karg, killpg1_allfilt, &kfarg);
1853 } else {
1854 if (pgid == 0) {
1855 /*
1856 * Send to current the current process' process group.
1857 */
1858 pgrp = proc_pgrp(curproc);
1859 } else {
1860 pgrp = pgfind(pgid);
1861 if (pgrp == NULL) {
1862 error = ESRCH;
1863 goto out;
1864 }
1865 }
1866
1867 /* PGRP_DROPREF drops the pgrp refernce */
1868 pgrp_iterate(pgrp, PGRP_DROPREF, killpg1_callback, &karg,
1869 killpg1_pgrpfilt, NULL);
1870 }
1871 error = (karg.nfound > 0 ? 0 : (posix ? EPERM : ESRCH));
1872 out:
1873 kauth_cred_unref(&uc);
1874 return error;
1875 }
1876
1877 /*
1878 * Send a signal to a process group.
1879 */
1880 void
1881 gsignal(int pgid, int signum)
1882 {
1883 struct pgrp *pgrp;
1884
1885 if (pgid && (pgrp = pgfind(pgid))) {
1886 pgsignal(pgrp, signum, 0);
1887 pg_rele(pgrp);
1888 }
1889 }
1890
1891 /*
1892 * Send a signal to a process group. If checkctty is 1,
1893 * limit to members which have a controlling terminal.
1894 */
1895
1896 static int
1897 pgsignal_filt(proc_t p, void * arg)
1898 {
1899 int checkctty = *(int*)arg;
1900
1901 if ((checkctty == 0) || p->p_flag & P_CONTROLT) {
1902 return 1;
1903 } else {
1904 return 0;
1905 }
1906 }
1907
1908
1909 static int
1910 pgsignal_callback(proc_t p, void * arg)
1911 {
1912 int signum = *(int*)arg;
1913
1914 psignal(p, signum);
1915 return PROC_RETURNED;
1916 }
1917
1918
1919 void
1920 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
1921 {
1922 if (pgrp != PGRP_NULL) {
1923 pgrp_iterate(pgrp, 0, pgsignal_callback, &signum, pgsignal_filt, &checkctty);
1924 }
1925 }
1926
1927
1928 void
1929 tty_pgsignal(struct tty *tp, int signum, int checkctty)
1930 {
1931 struct pgrp * pg;
1932
1933 pg = tty_pgrp(tp);
1934 if (pg != PGRP_NULL) {
1935 pgrp_iterate(pg, 0, pgsignal_callback, &signum, pgsignal_filt, &checkctty);
1936 pg_rele(pg);
1937 }
1938 }
1939 /*
1940 * Send a signal caused by a trap to a specific thread.
1941 */
1942 void
1943 threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code, boolean_t set_exitreason)
1944 {
1945 struct uthread *uth;
1946 struct task * sig_task;
1947 proc_t p;
1948 int mask;
1949
1950 if ((u_int)signum >= NSIG || signum == 0) {
1951 return;
1952 }
1953
1954 mask = sigmask(signum);
1955 if ((mask & threadmask) == 0) {
1956 return;
1957 }
1958 sig_task = get_threadtask(sig_actthread);
1959 p = (proc_t)(get_bsdtask_info(sig_task));
1960
1961 uth = get_bsdthread_info(sig_actthread);
1962 if (uth->uu_flag & UT_VFORK) {
1963 p = uth->uu_proc;
1964 }
1965
1966 proc_lock(p);
1967 if (!(p->p_lflag & P_LTRACED) && (p->p_sigignore & mask)) {
1968 proc_unlock(p);
1969 return;
1970 }
1971
1972 uth->uu_siglist |= mask;
1973 uth->uu_code = code;
1974
1975 /* Attempt to establish whether the signal will be fatal (mirrors logic in psignal_internal()) */
1976 if (set_exitreason && ((p->p_lflag & P_LTRACED) || (!(uth->uu_sigwait & mask)
1977 && !(uth->uu_sigmask & mask) && !(p->p_sigcatch & mask))) &&
1978 !(mask & stopsigmask) && !(mask & contsigmask)) {
1979 if (uth->uu_exit_reason == OS_REASON_NULL) {
1980 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1981 p->p_pid, OS_REASON_SIGNAL, signum, 0, 0);
1982
1983 os_reason_t signal_reason = build_signal_reason(signum, "exc handler");
1984
1985 set_thread_exit_reason(sig_actthread, signal_reason, TRUE);
1986
1987 /* We dropped/consumed the reference in set_thread_exit_reason() */
1988 signal_reason = OS_REASON_NULL;
1989 }
1990 }
1991
1992 proc_unlock(p);
1993
1994 /* mark on process as well */
1995 signal_setast(sig_actthread);
1996 }
1997
1998 void
1999 set_thread_exit_reason(void *th, void *reason, boolean_t proc_locked)
2000 {
2001 struct uthread *targ_uth = get_bsdthread_info(th);
2002 struct task *targ_task = NULL;
2003 proc_t targ_proc = NULL;
2004
2005 os_reason_t exit_reason = (os_reason_t)reason;
2006
2007 if (exit_reason == OS_REASON_NULL) {
2008 return;
2009 }
2010
2011 if (!proc_locked) {
2012 targ_task = get_threadtask(th);
2013 targ_proc = (proc_t)(get_bsdtask_info(targ_task));
2014
2015 proc_lock(targ_proc);
2016 }
2017
2018 if (targ_uth->uu_exit_reason == OS_REASON_NULL) {
2019 targ_uth->uu_exit_reason = exit_reason;
2020 } else {
2021 /* The caller expects that we drop a reference on the exit reason */
2022 os_reason_free(exit_reason);
2023 }
2024
2025 if (!proc_locked) {
2026 assert(targ_proc != NULL);
2027 proc_unlock(targ_proc);
2028 }
2029 }
2030
2031 /*
2032 * get_signalthread
2033 *
2034 * Picks an appropriate thread from a process to target with a signal.
2035 *
2036 * Called with proc locked.
2037 * Returns thread with BSD ast set.
2038 *
2039 * We attempt to deliver a proc-wide signal to the first thread in the task.
2040 * This allows single threaded applications which use signals to
2041 * be able to be linked with multithreaded libraries.
2042 */
2043 static kern_return_t
2044 get_signalthread(proc_t p, int signum, thread_t * thr)
2045 {
2046 struct uthread *uth;
2047 sigset_t mask = sigmask(signum);
2048 thread_t sig_thread;
2049 struct task * sig_task = p->task;
2050 kern_return_t kret;
2051
2052 *thr = THREAD_NULL;
2053
2054 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
2055 sig_thread = p->p_vforkact;
2056 kret = check_actforsig(sig_task, sig_thread, 1);
2057 if (kret == KERN_SUCCESS) {
2058 *thr = sig_thread;
2059 return KERN_SUCCESS;
2060 } else {
2061 return KERN_FAILURE;
2062 }
2063 }
2064
2065 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
2066 if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
2067 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
2068 if (check_actforsig(p->task, uth->uu_context.vc_thread, 1) == KERN_SUCCESS) {
2069 *thr = uth->uu_context.vc_thread;
2070 return KERN_SUCCESS;
2071 }
2072 }
2073 }
2074 if (get_signalact(p->task, thr, 1) == KERN_SUCCESS) {
2075 return KERN_SUCCESS;
2076 }
2077
2078 return KERN_FAILURE;
2079 }
2080
2081 static os_reason_t
2082 build_signal_reason(int signum, const char *procname)
2083 {
2084 os_reason_t signal_reason = OS_REASON_NULL;
2085 proc_t sender_proc = current_proc();
2086 uint32_t reason_buffer_size_estimate = 0, proc_name_length = 0;
2087 const char *default_sender_procname = "unknown";
2088 mach_vm_address_t data_addr;
2089 int ret;
2090
2091 signal_reason = os_reason_create(OS_REASON_SIGNAL, signum);
2092 if (signal_reason == OS_REASON_NULL) {
2093 printf("build_signal_reason: unable to allocate signal reason structure.\n");
2094 return signal_reason;
2095 }
2096
2097 reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(2, sizeof(sender_proc->p_name) +
2098 sizeof(sender_proc->p_pid));
2099
2100 ret = os_reason_alloc_buffer_noblock(signal_reason, reason_buffer_size_estimate);
2101 if (ret != 0) {
2102 printf("build_signal_reason: unable to allocate signal reason buffer.\n");
2103 return signal_reason;
2104 }
2105
2106 if (KERN_SUCCESS == kcdata_get_memory_addr(&signal_reason->osr_kcd_descriptor, KCDATA_TYPE_PID,
2107 sizeof(sender_proc->p_pid), &data_addr)) {
2108 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &sender_proc->p_pid,
2109 sizeof(sender_proc->p_pid));
2110 } else {
2111 printf("build_signal_reason: exceeded space in signal reason buf, unable to log PID\n");
2112 }
2113
2114 proc_name_length = sizeof(sender_proc->p_name);
2115 if (KERN_SUCCESS == kcdata_get_memory_addr(&signal_reason->osr_kcd_descriptor, KCDATA_TYPE_PROCNAME,
2116 proc_name_length, &data_addr)) {
2117 if (procname) {
2118 char truncated_procname[proc_name_length];
2119 strncpy((char *) &truncated_procname, procname, proc_name_length);
2120 truncated_procname[proc_name_length - 1] = '\0';
2121
2122 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, truncated_procname,
2123 strlen((char *) &truncated_procname));
2124 } else if (*sender_proc->p_name) {
2125 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &sender_proc->p_name,
2126 sizeof(sender_proc->p_name));
2127 } else {
2128 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &default_sender_procname,
2129 strlen(default_sender_procname) + 1);
2130 }
2131 } else {
2132 printf("build_signal_reason: exceeded space in signal reason buf, unable to log procname\n");
2133 }
2134
2135 return signal_reason;
2136 }
2137
2138 /*
2139 * Send the signal to the process. If the signal has an action, the action
2140 * is usually performed by the target process rather than the caller; we add
2141 * the signal to the set of pending signals for the process.
2142 *
2143 * Always drops a reference on a signal_reason if one is provided, whether via
2144 * passing it to a thread or deallocating directly.
2145 *
2146 * Exceptions:
2147 * o When a stop signal is sent to a sleeping process that takes the
2148 * default action, the process is stopped without awakening it.
2149 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2150 * regardless of the signal action (eg, blocked or ignored).
2151 *
2152 * Other ignored signals are discarded immediately.
2153 */
2154 static void
2155 psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, os_reason_t signal_reason)
2156 {
2157 int prop;
2158 user_addr_t action = USER_ADDR_NULL;
2159 proc_t sig_proc;
2160 thread_t sig_thread;
2161 task_t sig_task;
2162 int mask;
2163 struct uthread *uth;
2164 kern_return_t kret;
2165 uid_t r_uid;
2166 proc_t pp;
2167 kauth_cred_t my_cred;
2168 char *launchd_exit_reason_desc = NULL;
2169 boolean_t update_thread_policy = FALSE;
2170
2171 if ((u_int)signum >= NSIG || signum == 0) {
2172 panic("psignal: bad signal number %d", signum);
2173 }
2174
2175 mask = sigmask(signum);
2176 prop = sigprop[signum];
2177
2178 #if SIGNAL_DEBUG
2179 if (rdebug_proc && (p != PROC_NULL) && (p == rdebug_proc)) {
2180 ram_printf(3);
2181 }
2182 #endif /* SIGNAL_DEBUG */
2183
2184 /* catch unexpected initproc kills early for easier debuggging */
2185 if (signum == SIGKILL && p == initproc) {
2186 if (signal_reason == NULL) {
2187 panic_plain("unexpected SIGKILL of %s %s (no reason provided)",
2188 (p->p_name[0] != '\0' ? p->p_name : "initproc"),
2189 ((p->p_csflags & CS_KILLED) ? "(CS_KILLED)" : ""));
2190 } else {
2191 launchd_exit_reason_desc = launchd_exit_reason_get_string_desc(signal_reason);
2192 panic_plain("unexpected SIGKILL of %s %s with reason -- namespace %d code 0x%llx description %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
2193 (p->p_name[0] != '\0' ? p->p_name : "initproc"),
2194 ((p->p_csflags & CS_KILLED) ? "(CS_KILLED)" : ""),
2195 signal_reason->osr_namespace, signal_reason->osr_code,
2196 launchd_exit_reason_desc ? launchd_exit_reason_desc : "none");
2197 }
2198 }
2199
2200 /*
2201 * We will need the task pointer later. Grab it now to
2202 * check for a zombie process. Also don't send signals
2203 * to kernel internal tasks.
2204 */
2205 if (flavor & PSIG_VFORK) {
2206 sig_task = task;
2207 sig_thread = thread;
2208 sig_proc = p;
2209 } else if (flavor & PSIG_THREAD) {
2210 sig_task = get_threadtask(thread);
2211 sig_thread = thread;
2212 sig_proc = (proc_t)get_bsdtask_info(sig_task);
2213 } else if (flavor & PSIG_TRY_THREAD) {
2214 assert((thread == current_thread()) && (p == current_proc()));
2215 sig_task = p->task;
2216 sig_thread = thread;
2217 sig_proc = p;
2218 } else {
2219 sig_task = p->task;
2220 sig_thread = THREAD_NULL;
2221 sig_proc = p;
2222 }
2223
2224 if ((sig_task == TASK_NULL) || is_kerneltask(sig_task)) {
2225 os_reason_free(signal_reason);
2226 return;
2227 }
2228
2229 /*
2230 * do not send signals to the process that has the thread
2231 * doing a reboot(). Not doing so will mark that thread aborted
2232 * and can cause IO failures wich will cause data loss. There's
2233 * also no need to send a signal to a process that is in the middle
2234 * of being torn down.
2235 */
2236 if (ISSET(sig_proc->p_flag, P_REBOOT) || ISSET(sig_proc->p_lflag, P_LEXIT)) {
2237 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2238 os_reason_free(signal_reason);
2239 return;
2240 }
2241
2242 if ((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) {
2243 proc_knote(sig_proc, NOTE_SIGNAL | signum);
2244 }
2245
2246 if ((flavor & PSIG_LOCKED) == 0) {
2247 proc_signalstart(sig_proc, 0);
2248 }
2249
2250 /* Don't send signals to a process that has ignored them. */
2251 if (((flavor & PSIG_VFORK) == 0) && ((sig_proc->p_lflag & P_LTRACED) == 0) && (sig_proc->p_sigignore & mask)) {
2252 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2253 goto sigout_unlocked;
2254 }
2255
2256 /*
2257 * The proc_lock prevents the targeted thread from being deallocated
2258 * or handling the signal until we're done signaling it.
2259 *
2260 * Once the proc_lock is dropped, we have no guarantee the thread or uthread exists anymore.
2261 *
2262 * XXX: What if the thread goes inactive after the thread passes bsd ast point?
2263 */
2264 proc_lock(sig_proc);
2265
2266 if (flavor & PSIG_VFORK) {
2267 action = SIG_DFL;
2268 act_set_astbsd(sig_thread);
2269 kret = KERN_SUCCESS;
2270 } else if (flavor & PSIG_TRY_THREAD) {
2271 uth = get_bsdthread_info(sig_thread);
2272 if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
2273 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask)) &&
2274 ((kret = check_actforsig(sig_proc->task, sig_thread, 1)) == KERN_SUCCESS)) {
2275 /* deliver to specified thread */
2276 } else {
2277 /* deliver to any willing thread */
2278 kret = get_signalthread(sig_proc, signum, &sig_thread);
2279 }
2280 } else if (flavor & PSIG_THREAD) {
2281 /* If successful return with ast set */
2282 kret = check_actforsig(sig_task, sig_thread, 1);
2283 } else {
2284 /* If successful return with ast set */
2285 kret = get_signalthread(sig_proc, signum, &sig_thread);
2286 }
2287
2288 if (kret != KERN_SUCCESS) {
2289 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2290 proc_unlock(sig_proc);
2291 goto sigout_unlocked;
2292 }
2293
2294 uth = get_bsdthread_info(sig_thread);
2295
2296 /*
2297 * If proc is traced, always give parent a chance.
2298 */
2299
2300 if ((flavor & PSIG_VFORK) == 0) {
2301 if (sig_proc->p_lflag & P_LTRACED) {
2302 action = SIG_DFL;
2303 } else {
2304 /*
2305 * If the signal is being ignored,
2306 * then we forget about it immediately.
2307 * (Note: we don't set SIGCONT in p_sigignore,
2308 * and if it is set to SIG_IGN,
2309 * action will be SIG_DFL here.)
2310 */
2311 if (sig_proc->p_sigignore & mask) {
2312 goto sigout_locked;
2313 }
2314
2315 if (uth->uu_sigwait & mask) {
2316 action = KERN_SIG_WAIT;
2317 } else if (uth->uu_sigmask & mask) {
2318 action = KERN_SIG_HOLD;
2319 } else if (sig_proc->p_sigcatch & mask) {
2320 action = KERN_SIG_CATCH;
2321 } else {
2322 action = SIG_DFL;
2323 }
2324 }
2325 }
2326
2327 /* TODO: p_nice isn't hooked up to the scheduler... */
2328 if (sig_proc->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
2329 (sig_proc->p_lflag & P_LTRACED) == 0) {
2330 sig_proc->p_nice = NZERO;
2331 }
2332
2333 if (prop & SA_CONT) {
2334 uth->uu_siglist &= ~stopsigmask;
2335 }
2336
2337 if (prop & SA_STOP) {
2338 struct pgrp *pg;
2339 /*
2340 * If sending a tty stop signal to a member of an orphaned
2341 * process group, discard the signal here if the action
2342 * is default; don't stop the process below if sleeping,
2343 * and don't clear any pending SIGCONT.
2344 */
2345 pg = proc_pgrp(sig_proc);
2346 if (prop & SA_TTYSTOP && pg->pg_jobc == 0 &&
2347 action == SIG_DFL) {
2348 pg_rele(pg);
2349 goto sigout_locked;
2350 }
2351 pg_rele(pg);
2352 uth->uu_siglist &= ~contsigmask;
2353 }
2354
2355 uth->uu_siglist |= mask;
2356
2357 /*
2358 * Defer further processing for signals which are held,
2359 * except that stopped processes must be continued by SIGCONT.
2360 */
2361 /* vfork will not go thru as action is SIG_DFL */
2362 if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) {
2363 goto sigout_locked;
2364 }
2365
2366 /*
2367 * SIGKILL priority twiddling moved here from above because
2368 * it needs sig_thread. Could merge it into large switch
2369 * below if we didn't care about priority for tracing
2370 * as SIGKILL's action is always SIG_DFL.
2371 *
2372 * TODO: p_nice isn't hooked up to the scheduler...
2373 */
2374 if ((signum == SIGKILL) && (sig_proc->p_nice > NZERO)) {
2375 sig_proc->p_nice = NZERO;
2376 }
2377
2378 /*
2379 * Process is traced - wake it up (if not already
2380 * stopped) so that it can discover the signal in
2381 * issig() and stop for the parent.
2382 */
2383 if (sig_proc->p_lflag & P_LTRACED) {
2384 if (sig_proc->p_stat != SSTOP) {
2385 goto runlocked;
2386 } else {
2387 goto sigout_locked;
2388 }
2389 }
2390
2391 if ((flavor & PSIG_VFORK) != 0) {
2392 goto runlocked;
2393 }
2394
2395 if (action == KERN_SIG_WAIT) {
2396 #if CONFIG_DTRACE
2397 /*
2398 * DTrace proc signal-clear returns a siginfo_t. Collect the needed info.
2399 */
2400 r_uid = kauth_getruid(); /* per thread credential; protected by our thread context */
2401
2402 bzero((caddr_t)&(uth->t_dtrace_siginfo), sizeof(uth->t_dtrace_siginfo));
2403
2404 uth->t_dtrace_siginfo.si_signo = signum;
2405 uth->t_dtrace_siginfo.si_pid = current_proc()->p_pid;
2406 uth->t_dtrace_siginfo.si_status = W_EXITCODE(signum, 0);
2407 uth->t_dtrace_siginfo.si_uid = r_uid;
2408 uth->t_dtrace_siginfo.si_code = 0;
2409 #endif
2410 uth->uu_sigwait = mask;
2411 uth->uu_siglist &= ~mask;
2412 wakeup(&uth->uu_sigwait);
2413 /* if it is SIGCONT resume whole process */
2414 if (prop & SA_CONT) {
2415 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2416 sig_proc->p_contproc = current_proc()->p_pid;
2417 (void) task_resume_internal(sig_task);
2418 }
2419 goto sigout_locked;
2420 }
2421
2422 if (action != SIG_DFL) {
2423 /*
2424 * User wants to catch the signal.
2425 * Wake up the thread, but don't un-suspend it
2426 * (except for SIGCONT).
2427 */
2428 if (prop & SA_CONT) {
2429 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2430 (void) task_resume_internal(sig_task);
2431 sig_proc->p_stat = SRUN;
2432 } else if (sig_proc->p_stat == SSTOP) {
2433 goto sigout_locked;
2434 }
2435 /*
2436 * Fill out siginfo structure information to pass to the
2437 * signalled process/thread sigaction handler, when it
2438 * wakes up. si_code is 0 because this is an ordinary
2439 * signal, not a SIGCHLD, and so si_status is the signal
2440 * number itself, instead of the child process exit status.
2441 * We shift this left because it will be shifted right before
2442 * it is passed to user space. kind of ugly to use W_EXITCODE
2443 * this way, but it beats defining a new macro.
2444 *
2445 * Note: Avoid the SIGCHLD recursion case!
2446 */
2447 if (signum != SIGCHLD) {
2448 r_uid = kauth_getruid();
2449
2450 sig_proc->si_pid = current_proc()->p_pid;
2451 sig_proc->si_status = W_EXITCODE(signum, 0);
2452 sig_proc->si_uid = r_uid;
2453 sig_proc->si_code = 0;
2454 }
2455
2456 goto runlocked;
2457 } else {
2458 /* Default action - varies */
2459 if (mask & stopsigmask) {
2460 assert(signal_reason == NULL);
2461 /*
2462 * These are the signals which by default
2463 * stop a process.
2464 *
2465 * Don't clog system with children of init
2466 * stopped from the keyboard.
2467 */
2468 if (!(prop & SA_STOP) && sig_proc->p_pptr == initproc) {
2469 uth->uu_siglist &= ~mask;
2470 proc_unlock(sig_proc);
2471 /* siglock still locked, proc_lock not locked */
2472 psignal_locked(sig_proc, SIGKILL);
2473 goto sigout_unlocked;
2474 }
2475
2476 /*
2477 * Stop the task
2478 * if task hasn't already been stopped by
2479 * a signal.
2480 */
2481 uth->uu_siglist &= ~mask;
2482 if (sig_proc->p_stat != SSTOP) {
2483 sig_proc->p_xstat = signum;
2484 sig_proc->p_stat = SSTOP;
2485 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &sig_proc->p_flag);
2486 sig_proc->p_lflag &= ~P_LWAITED;
2487 proc_unlock(sig_proc);
2488
2489 pp = proc_parentholdref(sig_proc);
2490 stop(sig_proc, pp);
2491 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2492 my_cred = kauth_cred_proc_ref(sig_proc);
2493 r_uid = kauth_cred_getruid(my_cred);
2494 kauth_cred_unref(&my_cred);
2495
2496 proc_lock(sig_proc);
2497 pp->si_pid = sig_proc->p_pid;
2498 /*
2499 * POSIX: sigaction for a stopped child
2500 * when sent to the parent must set the
2501 * child's signal number into si_status.
2502 */
2503 if (signum != SIGSTOP) {
2504 pp->si_status = WEXITSTATUS(sig_proc->p_xstat);
2505 } else {
2506 pp->si_status = W_EXITCODE(signum, signum);
2507 }
2508 pp->si_code = CLD_STOPPED;
2509 pp->si_uid = r_uid;
2510 proc_unlock(sig_proc);
2511
2512 psignal(pp, SIGCHLD);
2513 }
2514 if (pp != PROC_NULL) {
2515 proc_parentdropref(pp, 0);
2516 }
2517
2518 goto sigout_unlocked;
2519 }
2520
2521 goto sigout_locked;
2522 }
2523
2524 DTRACE_PROC3(signal__send, thread_t, sig_thread, proc_t, p, int, signum);
2525
2526 switch (signum) {
2527 /*
2528 * Signals ignored by default have been dealt
2529 * with already, since their bits are on in
2530 * p_sigignore.
2531 */
2532
2533 case SIGKILL:
2534 /*
2535 * Kill signal always sets process running and
2536 * unsuspends it.
2537 */
2538 /*
2539 * Process will be running after 'run'
2540 */
2541 sig_proc->p_stat = SRUN;
2542 /*
2543 * In scenarios where suspend/resume are racing
2544 * the signal we are missing AST_BSD by the time
2545 * we get here, set again to avoid races. This
2546 * was the scenario with spindump enabled shutdowns.
2547 * We would need to cover this approp down the line.
2548 */
2549 act_set_astbsd(sig_thread);
2550 kret = thread_abort(sig_thread);
2551 update_thread_policy = (kret == KERN_SUCCESS);
2552
2553 if (uth->uu_exit_reason == OS_REASON_NULL) {
2554 if (signal_reason == OS_REASON_NULL) {
2555 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2556 sig_proc->p_pid, OS_REASON_SIGNAL, signum, 0, 0);
2557
2558 signal_reason = build_signal_reason(signum, NULL);
2559 }
2560
2561 os_reason_ref(signal_reason);
2562 set_thread_exit_reason(sig_thread, signal_reason, TRUE);
2563 }
2564
2565 goto sigout_locked;
2566
2567 case SIGCONT:
2568 /*
2569 * Let the process run. If it's sleeping on an
2570 * event, it remains so.
2571 */
2572 assert(signal_reason == NULL);
2573 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2574 sig_proc->p_contproc = sig_proc->p_pid;
2575 sig_proc->p_xstat = signum;
2576
2577 (void) task_resume_internal(sig_task);
2578
2579 /*
2580 * When processing a SIGCONT, we need to check
2581 * to see if there are signals pending that
2582 * were not delivered because we had been
2583 * previously stopped. If that's the case,
2584 * we need to thread_abort_safely() to trigger
2585 * interruption of the current system call to
2586 * cause their handlers to fire. If it's only
2587 * the SIGCONT, then don't wake up.
2588 */
2589 if (((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) && (((uth->uu_siglist & ~uth->uu_sigmask) & ~sig_proc->p_sigignore) & ~mask)) {
2590 uth->uu_siglist &= ~mask;
2591 sig_proc->p_stat = SRUN;
2592 goto runlocked;
2593 }
2594
2595 uth->uu_siglist &= ~mask;
2596 sig_proc->p_stat = SRUN;
2597 goto sigout_locked;
2598
2599 default:
2600 /*
2601 * A signal which has a default action of killing
2602 * the process, and for which there is no handler,
2603 * needs to act like SIGKILL
2604 */
2605 if (((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) && (action == SIG_DFL) && (prop & SA_KILL)) {
2606 sig_proc->p_stat = SRUN;
2607 kret = thread_abort(sig_thread);
2608 update_thread_policy = (kret == KERN_SUCCESS);
2609
2610 if (uth->uu_exit_reason == OS_REASON_NULL) {
2611 if (signal_reason == OS_REASON_NULL) {
2612 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2613 sig_proc->p_pid, OS_REASON_SIGNAL, signum, 0, 0);
2614
2615 signal_reason = build_signal_reason(signum, NULL);
2616 }
2617
2618 os_reason_ref(signal_reason);
2619 set_thread_exit_reason(sig_thread, signal_reason, TRUE);
2620 }
2621
2622 goto sigout_locked;
2623 }
2624
2625 /*
2626 * All other signals wake up the process, but don't
2627 * resume it.
2628 */
2629 if (sig_proc->p_stat == SSTOP) {
2630 goto sigout_locked;
2631 }
2632 goto runlocked;
2633 }
2634 }
2635 /*NOTREACHED*/
2636
2637 runlocked:
2638 /*
2639 * If we're being traced (possibly because someone attached us
2640 * while we were stopped), check for a signal from the debugger.
2641 */
2642 if (sig_proc->p_stat == SSTOP) {
2643 if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0) {
2644 uth->uu_siglist |= sigmask(sig_proc->p_xstat);
2645 }
2646
2647 if ((flavor & PSIG_VFORK) != 0) {
2648 sig_proc->p_stat = SRUN;
2649 }
2650 } else {
2651 /*
2652 * setrunnable(p) in BSD and
2653 * Wake up the thread if it is interruptible.
2654 */
2655 sig_proc->p_stat = SRUN;
2656 if ((flavor & PSIG_VFORK) == 0) {
2657 thread_abort_safely(sig_thread);
2658 }
2659 }
2660
2661 sigout_locked:
2662 if (update_thread_policy) {
2663 /*
2664 * Update the thread policy to heading to terminate, increase priority if
2665 * necessary. This needs to be done before we drop the proc lock because the
2666 * thread can take the fatal signal once it's dropped.
2667 */
2668 proc_set_thread_policy(sig_thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
2669 }
2670
2671 proc_unlock(sig_proc);
2672
2673 sigout_unlocked:
2674 os_reason_free(signal_reason);
2675 if ((flavor & PSIG_LOCKED) == 0) {
2676 proc_signalend(sig_proc, 0);
2677 }
2678 }
2679
2680 void
2681 psignal(proc_t p, int signum)
2682 {
2683 psignal_internal(p, NULL, NULL, 0, signum, NULL);
2684 }
2685
2686 void
2687 psignal_with_reason(proc_t p, int signum, struct os_reason *signal_reason)
2688 {
2689 psignal_internal(p, NULL, NULL, 0, signum, signal_reason);
2690 }
2691
2692 void
2693 psignal_locked(proc_t p, int signum)
2694 {
2695 psignal_internal(p, NULL, NULL, PSIG_LOCKED, signum, NULL);
2696 }
2697
2698 void
2699 psignal_vfork_with_reason(proc_t p, task_t new_task, thread_t thread, int signum, struct os_reason *signal_reason)
2700 {
2701 psignal_internal(p, new_task, thread, PSIG_VFORK, signum, signal_reason);
2702 }
2703
2704
2705 void
2706 psignal_vfork(proc_t p, task_t new_task, thread_t thread, int signum)
2707 {
2708 psignal_internal(p, new_task, thread, PSIG_VFORK, signum, NULL);
2709 }
2710
2711 void
2712 psignal_uthread(thread_t thread, int signum)
2713 {
2714 psignal_internal(PROC_NULL, TASK_NULL, thread, PSIG_THREAD, signum, NULL);
2715 }
2716
2717 /* same as psignal(), but prefer delivery to 'thread' if possible */
2718 void
2719 psignal_try_thread(proc_t p, thread_t thread, int signum)
2720 {
2721 psignal_internal(p, NULL, thread, PSIG_TRY_THREAD, signum, NULL);
2722 }
2723
2724 void
2725 psignal_try_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_reason *signal_reason)
2726 {
2727 psignal_internal(p, TASK_NULL, thread, PSIG_TRY_THREAD, signum, signal_reason);
2728 }
2729
2730 void
2731 psignal_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_reason *signal_reason)
2732 {
2733 psignal_internal(p, TASK_NULL, thread, PSIG_THREAD, signum, signal_reason);
2734 }
2735
2736 /*
2737 * If the current process has received a signal (should be caught or cause
2738 * termination, should interrupt current syscall), return the signal number.
2739 * Stop signals with default action are processed immediately, then cleared;
2740 * they aren't returned. This is checked after each entry to the system for
2741 * a syscall or trap (though this can usually be done without calling issignal
2742 * by checking the pending signal masks in the CURSIG macro.) The normal call
2743 * sequence is
2744 *
2745 * while (signum = CURSIG(curproc))
2746 * postsig(signum);
2747 */
2748 int
2749 issignal_locked(proc_t p)
2750 {
2751 int signum, mask, prop, sigbits;
2752 thread_t cur_act;
2753 struct uthread * ut;
2754 proc_t pp;
2755 kauth_cred_t my_cred;
2756 int retval = 0;
2757 uid_t r_uid;
2758
2759 cur_act = current_thread();
2760
2761 #if SIGNAL_DEBUG
2762 if (rdebug_proc && (p == rdebug_proc)) {
2763 ram_printf(3);
2764 }
2765 #endif /* SIGNAL_DEBUG */
2766
2767 /*
2768 * Try to grab the signal lock.
2769 */
2770 if (sig_try_locked(p) <= 0) {
2771 return 0;
2772 }
2773
2774 proc_signalstart(p, 1);
2775
2776 ut = get_bsdthread_info(cur_act);
2777 for (;;) {
2778 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2779
2780 if (p->p_lflag & P_LPPWAIT) {
2781 sigbits &= ~stopsigmask;
2782 }
2783 if (sigbits == 0) { /* no signal to send */
2784 retval = 0;
2785 goto out;
2786 }
2787
2788 signum = ffs((long)sigbits);
2789 mask = sigmask(signum);
2790 prop = sigprop[signum];
2791
2792 /*
2793 * We should see pending but ignored signals
2794 * only if P_LTRACED was on when they were posted.
2795 */
2796 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2797 ut->uu_siglist &= ~mask;
2798 continue;
2799 }
2800
2801 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2802 /*
2803 * If traced, deliver the signal to the debugger, and wait to be
2804 * released.
2805 */
2806 task_t task;
2807 p->p_xstat = signum;
2808
2809 if (p->p_lflag & P_LSIGEXC) {
2810 p->sigwait = TRUE;
2811 p->sigwait_thread = cur_act;
2812 p->p_stat = SSTOP;
2813 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2814 p->p_lflag &= ~P_LWAITED;
2815 ut->uu_siglist &= ~mask; /* clear the current signal from the pending list */
2816 proc_signalend(p, 1);
2817 proc_unlock(p);
2818 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2819 proc_lock(p);
2820 proc_signalstart(p, 1);
2821 } else {
2822 proc_unlock(p);
2823 my_cred = kauth_cred_proc_ref(p);
2824 r_uid = kauth_cred_getruid(my_cred);
2825 kauth_cred_unref(&my_cred);
2826
2827 pp = proc_parentholdref(p);
2828 if (pp != PROC_NULL) {
2829 proc_lock(pp);
2830
2831 pp->si_pid = p->p_pid;
2832 pp->p_xhighbits = p->p_xhighbits;
2833 p->p_xhighbits = 0;
2834 pp->si_status = p->p_xstat;
2835 pp->si_code = CLD_TRAPPED;
2836 pp->si_uid = r_uid;
2837
2838 proc_unlock(pp);
2839 }
2840
2841 /*
2842 * XXX Have to really stop for debuggers;
2843 * XXX stop() doesn't do the right thing.
2844 */
2845 task = p->task;
2846 task_suspend_internal(task);
2847
2848 proc_lock(p);
2849 p->sigwait = TRUE;
2850 p->sigwait_thread = cur_act;
2851 p->p_stat = SSTOP;
2852 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2853 p->p_lflag &= ~P_LWAITED;
2854 ut->uu_siglist &= ~mask;
2855
2856 proc_signalend(p, 1);
2857 proc_unlock(p);
2858
2859 if (pp != PROC_NULL) {
2860 psignal(pp, SIGCHLD);
2861 proc_list_lock();
2862 wakeup((caddr_t)pp);
2863 proc_parentdropref(pp, 1);
2864 proc_list_unlock();
2865 }
2866
2867 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2868 thread_block(THREAD_CONTINUE_NULL);
2869 proc_lock(p);
2870 proc_signalstart(p, 1);
2871 }
2872
2873 p->sigwait = FALSE;
2874 p->sigwait_thread = NULL;
2875 wakeup((caddr_t)&p->sigwait_thread);
2876
2877 if (signum == SIGKILL || ut->uu_siglist & sigmask(SIGKILL)) {
2878 /*
2879 * Deliver a pending sigkill even if it's not the current signal.
2880 * Necessary for PT_KILL, which should not be delivered to the
2881 * debugger, but we can't differentiate it from any other KILL.
2882 */
2883 signum = SIGKILL;
2884 goto deliver_sig;
2885 }
2886
2887 /* We may have to quit. */
2888 if (thread_should_abort(current_thread())) {
2889 retval = 0;
2890 goto out;
2891 }
2892
2893 /*
2894 * If parent wants us to take the signal,
2895 * then it will leave it in p->p_xstat;
2896 * otherwise we just look for signals again.
2897 */
2898 signum = p->p_xstat;
2899 if (signum == 0) {
2900 continue;
2901 }
2902
2903 /*
2904 * Put the new signal into p_siglist. If the
2905 * signal is being masked, look for other signals.
2906 */
2907 mask = sigmask(signum);
2908 ut->uu_siglist |= mask;
2909 if (ut->uu_sigmask & mask) {
2910 continue;
2911 }
2912 }
2913
2914 /*
2915 * Decide whether the signal should be returned.
2916 * Return the signal's number, or fall through
2917 * to clear it from the pending mask.
2918 */
2919
2920 switch ((long)p->p_sigacts->ps_sigact[signum]) {
2921 case (long)SIG_DFL:
2922 /*
2923 * If there is a pending stop signal to process
2924 * with default action, stop here,
2925 * then clear the signal. However,
2926 * if process is member of an orphaned
2927 * process group, ignore tty stop signals.
2928 */
2929 if (prop & SA_STOP) {
2930 struct pgrp * pg;
2931
2932 proc_unlock(p);
2933 pg = proc_pgrp(p);
2934 if (p->p_lflag & P_LTRACED ||
2935 (pg->pg_jobc == 0 &&
2936 prop & SA_TTYSTOP)) {
2937 proc_lock(p);
2938 pg_rele(pg);
2939 break; /* ignore signal */
2940 }
2941 pg_rele(pg);
2942 if (p->p_stat != SSTOP) {
2943 proc_lock(p);
2944 p->p_xstat = signum;
2945 p->p_stat = SSTOP;
2946 p->p_lflag &= ~P_LWAITED;
2947 proc_unlock(p);
2948
2949 pp = proc_parentholdref(p);
2950 stop(p, pp);
2951 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2952 my_cred = kauth_cred_proc_ref(p);
2953 r_uid = kauth_cred_getruid(my_cred);
2954 kauth_cred_unref(&my_cred);
2955
2956 proc_lock(pp);
2957 pp->si_pid = p->p_pid;
2958 pp->si_status = WEXITSTATUS(p->p_xstat);
2959 pp->si_code = CLD_STOPPED;
2960 pp->si_uid = r_uid;
2961 proc_unlock(pp);
2962
2963 psignal(pp, SIGCHLD);
2964 }
2965 if (pp != PROC_NULL) {
2966 proc_parentdropref(pp, 0);
2967 }
2968 }
2969 proc_lock(p);
2970 break;
2971 } else if (prop & SA_IGNORE) {
2972 /*
2973 * Except for SIGCONT, shouldn't get here.
2974 * Default action is to ignore; drop it.
2975 */
2976 break; /* ignore signal */
2977 } else {
2978 goto deliver_sig;
2979 }
2980
2981 case (long)SIG_IGN:
2982 /*
2983 * Masking above should prevent us ever trying
2984 * to take action on an ignored signal other
2985 * than SIGCONT, unless process is traced.
2986 */
2987 if ((prop & SA_CONT) == 0 &&
2988 (p->p_lflag & P_LTRACED) == 0) {
2989 printf("issignal\n");
2990 }
2991 break; /* ignore signal */
2992
2993 default:
2994 /* This signal has an action - deliver it. */
2995 goto deliver_sig;
2996 }
2997
2998 /* If we dropped through, the signal was ignored - remove it from pending list. */
2999 ut->uu_siglist &= ~mask;
3000 } /* for(;;) */
3001
3002 /* NOTREACHED */
3003
3004 deliver_sig:
3005 ut->uu_siglist &= ~mask;
3006 retval = signum;
3007
3008 out:
3009 proc_signalend(p, 1);
3010 return retval;
3011 }
3012
3013 /* called from _sleep */
3014 int
3015 CURSIG(proc_t p)
3016 {
3017 int signum, mask, prop, sigbits;
3018 thread_t cur_act;
3019 struct uthread * ut;
3020 int retnum = 0;
3021
3022
3023 cur_act = current_thread();
3024
3025 ut = get_bsdthread_info(cur_act);
3026
3027 if (ut->uu_siglist == 0) {
3028 return 0;
3029 }
3030
3031 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_lflag & P_LTRACED) == 0)) {
3032 return 0;
3033 }
3034
3035 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
3036
3037 for (;;) {
3038 if (p->p_lflag & P_LPPWAIT) {
3039 sigbits &= ~stopsigmask;
3040 }
3041 if (sigbits == 0) { /* no signal to send */
3042 return retnum;
3043 }
3044
3045 signum = ffs((long)sigbits);
3046 mask = sigmask(signum);
3047 prop = sigprop[signum];
3048 sigbits &= ~mask; /* take the signal out */
3049
3050 /*
3051 * We should see pending but ignored signals
3052 * only if P_LTRACED was on when they were posted.
3053 */
3054 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
3055 continue;
3056 }
3057
3058 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
3059 return signum;
3060 }
3061
3062 /*
3063 * Decide whether the signal should be returned.
3064 * Return the signal's number, or fall through
3065 * to clear it from the pending mask.
3066 */
3067
3068 switch ((long)p->p_sigacts->ps_sigact[signum]) {
3069 case (long)SIG_DFL:
3070 /*
3071 * If there is a pending stop signal to process
3072 * with default action, stop here,
3073 * then clear the signal. However,
3074 * if process is member of an orphaned
3075 * process group, ignore tty stop signals.
3076 */
3077 if (prop & SA_STOP) {
3078 struct pgrp *pg;
3079
3080 pg = proc_pgrp(p);
3081
3082 if (p->p_lflag & P_LTRACED ||
3083 (pg->pg_jobc == 0 &&
3084 prop & SA_TTYSTOP)) {
3085 pg_rele(pg);
3086 break; /* == ignore */
3087 }
3088 pg_rele(pg);
3089 retnum = signum;
3090 break;
3091 } else if (prop & SA_IGNORE) {
3092 /*
3093 * Except for SIGCONT, shouldn't get here.
3094 * Default action is to ignore; drop it.
3095 */
3096 break; /* == ignore */
3097 } else {
3098 return signum;
3099 }
3100 /*NOTREACHED*/
3101
3102 case (long)SIG_IGN:
3103 /*
3104 * Masking above should prevent us ever trying
3105 * to take action on an ignored signal other
3106 * than SIGCONT, unless process is traced.
3107 */
3108 if ((prop & SA_CONT) == 0 &&
3109 (p->p_lflag & P_LTRACED) == 0) {
3110 printf("issignal\n");
3111 }
3112 break; /* == ignore */
3113
3114 default:
3115 /*
3116 * This signal has an action, let
3117 * postsig() process it.
3118 */
3119 return signum;
3120 }
3121 }
3122 /* NOTREACHED */
3123 }
3124
3125 /*
3126 * Put the argument process into the stopped state and notify the parent
3127 * via wakeup. Signals are handled elsewhere. The process must not be
3128 * on the run queue.
3129 */
3130 static void
3131 stop(proc_t p, proc_t parent)
3132 {
3133 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
3134 if ((parent != PROC_NULL) && (parent->p_stat != SSTOP)) {
3135 proc_list_lock();
3136 wakeup((caddr_t)parent);
3137 proc_list_unlock();
3138 }
3139 (void) task_suspend_internal(p->task);
3140 }
3141
3142 /*
3143 * Take the action for the specified signal
3144 * from the current set of pending signals.
3145 */
3146 void
3147 postsig_locked(int signum)
3148 {
3149 proc_t p = current_proc();
3150 struct sigacts *ps = p->p_sigacts;
3151 user_addr_t catcher;
3152 uint32_t code;
3153 int mask, returnmask;
3154 struct uthread * ut;
3155 os_reason_t ut_exit_reason = OS_REASON_NULL;
3156
3157 #if DIAGNOSTIC
3158 if (signum == 0) {
3159 panic("postsig");
3160 }
3161 /*
3162 * This must be called on master cpu
3163 */
3164 if (cpu_number() != master_cpu) {
3165 panic("psig not on master");
3166 }
3167 #endif
3168
3169 /*
3170 * Try to grab the signal lock.
3171 */
3172 if (sig_try_locked(p) <= 0) {
3173 return;
3174 }
3175
3176 proc_signalstart(p, 1);
3177
3178 ut = (struct uthread *)get_bsdthread_info(current_thread());
3179 mask = sigmask(signum);
3180 ut->uu_siglist &= ~mask;
3181 catcher = ps->ps_sigact[signum];
3182 if (catcher == SIG_DFL) {
3183 /*
3184 * Default catcher, where the default is to kill
3185 * the process. (Other cases were ignored above.)
3186 */
3187 sig_lock_to_exit(p);
3188
3189 /*
3190 * exit_with_reason() below will consume a reference to the thread's exit reason, so we take another
3191 * reference so the thread still has one even after we call exit_with_reason(). The thread's reference will
3192 * ultimately be destroyed in uthread_cleanup().
3193 */
3194 ut_exit_reason = ut->uu_exit_reason;
3195 os_reason_ref(ut_exit_reason);
3196
3197 p->p_acflag |= AXSIG;
3198 if (sigprop[signum] & SA_CORE) {
3199 p->p_sigacts->ps_sig = signum;
3200 proc_signalend(p, 1);
3201 proc_unlock(p);
3202 #if CONFIG_COREDUMP
3203 if (coredump(p, 0, 0) == 0) {
3204 signum |= WCOREFLAG;
3205 }
3206 #endif
3207 } else {
3208 proc_signalend(p, 1);
3209 proc_unlock(p);
3210 }
3211
3212 #if CONFIG_DTRACE
3213 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
3214
3215 ut->t_dtrace_siginfo.si_signo = signum;
3216 ut->t_dtrace_siginfo.si_pid = p->si_pid;
3217 ut->t_dtrace_siginfo.si_uid = p->si_uid;
3218 ut->t_dtrace_siginfo.si_status = WEXITSTATUS(p->si_status);
3219
3220 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
3221 switch (signum) {
3222 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
3223 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
3224 break;
3225 default:
3226 break;
3227 }
3228
3229
3230 DTRACE_PROC3(signal__handle, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo),
3231 void (*)(void), SIG_DFL);
3232 #endif
3233
3234 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
3235 p->p_pid, W_EXITCODE(0, signum), 3, 0, 0);
3236
3237 exit_with_reason(p, W_EXITCODE(0, signum), (int *)NULL, TRUE, TRUE, 0, ut_exit_reason);
3238
3239 proc_lock(p);
3240 return;
3241 } else {
3242 /*
3243 * If we get here, the signal must be caught.
3244 */
3245 #if DIAGNOSTIC
3246 if (catcher == SIG_IGN || (ut->uu_sigmask & mask)) {
3247 log(LOG_WARNING,
3248 "postsig: processing masked or ignored signal\n");
3249 }
3250 #endif
3251
3252 /*
3253 * Set the new mask value and also defer further
3254 * occurences of this signal.
3255 *
3256 * Special case: user has done a sigpause. Here the
3257 * current mask is not of interest, but rather the
3258 * mask from before the sigpause is what we want
3259 * restored after the signal processing is completed.
3260 */
3261 if (ut->uu_flag & UT_SAS_OLDMASK) {
3262 returnmask = ut->uu_oldmask;
3263 ut->uu_flag &= ~UT_SAS_OLDMASK;
3264 ut->uu_oldmask = 0;
3265 } else {
3266 returnmask = ut->uu_sigmask;
3267 }
3268 ut->uu_sigmask |= ps->ps_catchmask[signum];
3269 if ((ps->ps_signodefer & mask) == 0) {
3270 ut->uu_sigmask |= mask;
3271 }
3272 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
3273 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE)) {
3274 p->p_sigignore |= mask;
3275 }
3276 ps->ps_sigact[signum] = SIG_DFL;
3277 ps->ps_siginfo &= ~mask;
3278 ps->ps_signodefer &= ~mask;
3279 }
3280
3281 if (ps->ps_sig != signum) {
3282 code = 0;
3283 } else {
3284 code = ps->ps_code;
3285 ps->ps_code = 0;
3286 }
3287 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nsignals);
3288 sendsig(p, catcher, signum, returnmask, code);
3289 }
3290 proc_signalend(p, 1);
3291 }
3292
3293 /*
3294 * Attach a signal knote to the list of knotes for this process.
3295 *
3296 * Signal knotes share the knote list with proc knotes. This
3297 * could be avoided by using a signal-specific knote list, but
3298 * probably isn't worth the trouble.
3299 */
3300
3301 static int
3302 filt_sigattach(struct knote *kn, __unused struct kevent_internal_s *kev)
3303 {
3304 proc_t p = current_proc(); /* can attach only to oneself */
3305
3306 proc_klist_lock();
3307
3308 kn->kn_ptr.p_proc = p;
3309
3310 KNOTE_ATTACH(&p->p_klist, kn);
3311
3312 proc_klist_unlock();
3313
3314 /* edge-triggered events can't have fired before we attached */
3315 return 0;
3316 }
3317
3318 /*
3319 * remove the knote from the process list, if it hasn't already
3320 * been removed by exit processing.
3321 */
3322
3323 static void
3324 filt_sigdetach(struct knote *kn)
3325 {
3326 proc_t p = kn->kn_ptr.p_proc;
3327
3328 proc_klist_lock();
3329 kn->kn_ptr.p_proc = NULL;
3330 KNOTE_DETACH(&p->p_klist, kn);
3331 proc_klist_unlock();
3332 }
3333
3334 /*
3335 * Post an event to the signal filter. Because we share the same list
3336 * as process knotes, we have to filter out and handle only signal events.
3337 *
3338 * We assume that we process fdfree() before we post the NOTE_EXIT for
3339 * a process during exit. Therefore, since signal filters can only be
3340 * set up "in-process", we should have already torn down the kqueue
3341 * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT.
3342 */
3343 static int
3344 filt_signal(struct knote *kn, long hint)
3345 {
3346 if (hint & NOTE_SIGNAL) {
3347 hint &= ~NOTE_SIGNAL;
3348
3349 if (kn->kn_id == (unsigned int)hint) {
3350 kn->kn_data++;
3351 }
3352 } else if (hint & NOTE_EXIT) {
3353 panic("filt_signal: detected NOTE_EXIT event");
3354 }
3355
3356 return kn->kn_data != 0;
3357 }
3358
3359 static int
3360 filt_signaltouch(
3361 struct knote *kn,
3362 struct kevent_internal_s *kev)
3363 {
3364 #pragma unused(kev)
3365
3366 int res;
3367
3368 proc_klist_lock();
3369
3370 /*
3371 * No data to save - just capture if it is already fired
3372 */
3373 res = (kn->kn_data > 0);
3374
3375 proc_klist_unlock();
3376
3377 return res;
3378 }
3379
3380 static int
3381 filt_signalprocess(
3382 struct knote *kn,
3383 __unused struct filt_process_s *data,
3384 struct kevent_internal_s *kev)
3385 {
3386 proc_klist_lock();
3387
3388 if (kn->kn_data == 0) {
3389 proc_klist_unlock();
3390 return 0;
3391 }
3392
3393 /*
3394 * Snapshot the event data.
3395 * All signal events are EV_CLEAR, so
3396 * add that and clear out the data field.
3397 */
3398 *kev = kn->kn_kevent;
3399 kev->flags |= EV_CLEAR;
3400 kn->kn_data = 0;
3401
3402 proc_klist_unlock();
3403 return 1;
3404 }
3405
3406 void
3407 bsd_ast(thread_t thread)
3408 {
3409 proc_t p = current_proc();
3410 struct uthread *ut = get_bsdthread_info(thread);
3411 int signum;
3412 user_addr_t pc;
3413 static int bsd_init_done = 0;
3414
3415 if (p == NULL) {
3416 return;
3417 }
3418
3419 /* don't run bsd ast on exec copy or exec'ed tasks */
3420 if (task_did_exec(current_task()) || task_is_exec_copy(current_task())) {
3421 return;
3422 }
3423
3424 if ((p->p_flag & P_OWEUPC) && (p->p_flag & P_PROFIL)) {
3425 pc = get_useraddr();
3426 addupc_task(p, pc, 1);
3427 OSBitAndAtomic(~((uint32_t)P_OWEUPC), &p->p_flag);
3428 }
3429
3430 if (timerisset(&p->p_vtimer_user.it_value)) {
3431 uint32_t microsecs;
3432
3433 task_vtimer_update(p->task, TASK_VTIMER_USER, &microsecs);
3434
3435 if (!itimerdecr(p, &p->p_vtimer_user, microsecs)) {
3436 if (timerisset(&p->p_vtimer_user.it_value)) {
3437 task_vtimer_set(p->task, TASK_VTIMER_USER);
3438 } else {
3439 task_vtimer_clear(p->task, TASK_VTIMER_USER);
3440 }
3441
3442 psignal_try_thread(p, thread, SIGVTALRM);
3443 }
3444 }
3445
3446 if (timerisset(&p->p_vtimer_prof.it_value)) {
3447 uint32_t microsecs;
3448
3449 task_vtimer_update(p->task, TASK_VTIMER_PROF, &microsecs);
3450
3451 if (!itimerdecr(p, &p->p_vtimer_prof, microsecs)) {
3452 if (timerisset(&p->p_vtimer_prof.it_value)) {
3453 task_vtimer_set(p->task, TASK_VTIMER_PROF);
3454 } else {
3455 task_vtimer_clear(p->task, TASK_VTIMER_PROF);
3456 }
3457
3458 psignal_try_thread(p, thread, SIGPROF);
3459 }
3460 }
3461
3462 if (timerisset(&p->p_rlim_cpu)) {
3463 struct timeval tv;
3464
3465 task_vtimer_update(p->task, TASK_VTIMER_RLIM, (uint32_t *) &tv.tv_usec);
3466
3467 proc_spinlock(p);
3468 if (p->p_rlim_cpu.tv_sec > 0 || p->p_rlim_cpu.tv_usec > tv.tv_usec) {
3469 tv.tv_sec = 0;
3470 timersub(&p->p_rlim_cpu, &tv, &p->p_rlim_cpu);
3471 proc_spinunlock(p);
3472 } else {
3473 timerclear(&p->p_rlim_cpu);
3474 proc_spinunlock(p);
3475
3476 task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
3477
3478 psignal_try_thread(p, thread, SIGXCPU);
3479 }
3480 }
3481
3482 #if CONFIG_DTRACE
3483 if (ut->t_dtrace_sig) {
3484 uint8_t dt_action_sig = ut->t_dtrace_sig;
3485 ut->t_dtrace_sig = 0;
3486 psignal(p, dt_action_sig);
3487 }
3488
3489 if (ut->t_dtrace_stop) {
3490 ut->t_dtrace_stop = 0;
3491 proc_lock(p);
3492 p->p_dtrace_stop = 1;
3493 proc_unlock(p);
3494 (void)task_suspend_internal(p->task);
3495 }
3496
3497 if (ut->t_dtrace_resumepid) {
3498 proc_t resumeproc = proc_find(ut->t_dtrace_resumepid);
3499 ut->t_dtrace_resumepid = 0;
3500 if (resumeproc != PROC_NULL) {
3501 proc_lock(resumeproc);
3502 /* We only act on processes stopped by dtrace */
3503 if (resumeproc->p_dtrace_stop) {
3504 resumeproc->p_dtrace_stop = 0;
3505 proc_unlock(resumeproc);
3506 task_resume_internal(resumeproc->task);
3507 } else {
3508 proc_unlock(resumeproc);
3509 }
3510 proc_rele(resumeproc);
3511 }
3512 }
3513
3514 #endif /* CONFIG_DTRACE */
3515
3516 proc_lock(p);
3517 if (CHECK_SIGNALS(p, current_thread(), ut)) {
3518 while ((signum = issignal_locked(p))) {
3519 postsig_locked(signum);
3520 }
3521 }
3522 proc_unlock(p);
3523
3524 #ifdef CONFIG_32BIT_TELEMETRY
3525 if (task_consume_32bit_log_flag(p->task)) {
3526 proc_log_32bit_telemetry(p);
3527 }
3528 #endif /* CONFIG_32BIT_TELEMETRY */
3529
3530 if (!bsd_init_done) {
3531 bsd_init_done = 1;
3532 bsdinit_task();
3533 }
3534 }
3535
3536 /* ptrace set runnable */
3537 void
3538 pt_setrunnable(proc_t p)
3539 {
3540 task_t task;
3541
3542 task = p->task;
3543
3544 if (p->p_lflag & P_LTRACED) {
3545 proc_lock(p);
3546 p->p_stat = SRUN;
3547 proc_unlock(p);
3548 if (p->sigwait) {
3549 wakeup((caddr_t)&(p->sigwait));
3550 if ((p->p_lflag & P_LSIGEXC) == 0) { // 5878479
3551 task_release(task);
3552 }
3553 }
3554 }
3555 }
3556
3557 kern_return_t
3558 do_bsdexception(
3559 int exc,
3560 int code,
3561 int sub)
3562 {
3563 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
3564
3565 codes[0] = code;
3566 codes[1] = sub;
3567 return bsd_exception(exc, codes, 2);
3568 }
3569
3570 int
3571 proc_pendingsignals(proc_t p, sigset_t mask)
3572 {
3573 struct uthread * uth;
3574 thread_t th;
3575 sigset_t bits = 0;
3576
3577 proc_lock(p);
3578 /* If the process is in proc exit return no signal info */
3579 if (p->p_lflag & P_LPEXIT) {
3580 goto out;
3581 }
3582
3583 if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
3584 th = p->p_vforkact;
3585 uth = (struct uthread *)get_bsdthread_info(th);
3586 if (uth) {
3587 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3588 }
3589 goto out;
3590 }
3591
3592 bits = 0;
3593 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
3594 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3595 }
3596 out:
3597 proc_unlock(p);
3598 return bits;
3599 }
3600
3601 int
3602 thread_issignal(proc_t p, thread_t th, sigset_t mask)
3603 {
3604 struct uthread * uth;
3605 sigset_t bits = 0;
3606
3607 proc_lock(p);
3608 uth = (struct uthread *)get_bsdthread_info(th);
3609 if (uth) {
3610 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3611 }
3612 proc_unlock(p);
3613 return bits;
3614 }
3615
3616 /*
3617 * Allow external reads of the sigprop array.
3618 */
3619 int
3620 hassigprop(int sig, int prop)
3621 {
3622 return sigprop[sig] & prop;
3623 }
3624
3625 void
3626 pgsigio(pid_t pgid, int sig)
3627 {
3628 proc_t p = PROC_NULL;
3629
3630 if (pgid < 0) {
3631 gsignal(-(pgid), sig);
3632 } else if (pgid > 0 && (p = proc_find(pgid)) != 0) {
3633 psignal(p, sig);
3634 }
3635 if (p != PROC_NULL) {
3636 proc_rele(p);
3637 }
3638 }
3639
3640 void
3641 proc_signalstart(proc_t p, int locked)
3642 {
3643 if (!locked) {
3644 proc_lock(p);
3645 }
3646
3647 if (p->p_signalholder == current_thread()) {
3648 panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock");
3649 }
3650
3651 p->p_sigwaitcnt++;
3652 while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL) {
3653 msleep(&p->p_sigmask, &p->p_mlock, 0, "proc_signstart", NULL);
3654 }
3655 p->p_sigwaitcnt--;
3656
3657 p->p_lflag |= P_LINSIGNAL;
3658 p->p_signalholder = current_thread();
3659 if (!locked) {
3660 proc_unlock(p);
3661 }
3662 }
3663
3664 void
3665 proc_signalend(proc_t p, int locked)
3666 {
3667 if (!locked) {
3668 proc_lock(p);
3669 }
3670 p->p_lflag &= ~P_LINSIGNAL;
3671
3672 if (p->p_sigwaitcnt > 0) {
3673 wakeup(&p->p_sigmask);
3674 }
3675
3676 p->p_signalholder = NULL;
3677 if (!locked) {
3678 proc_unlock(p);
3679 }
3680 }
3681
3682 void
3683 sig_lock_to_exit(proc_t p)
3684 {
3685 thread_t self = current_thread();
3686
3687 p->exit_thread = self;
3688 proc_unlock(p);
3689
3690 task_hold(p->task);
3691 task_wait(p->task, FALSE);
3692
3693 proc_lock(p);
3694 }
3695
3696 int
3697 sig_try_locked(proc_t p)
3698 {
3699 thread_t self = current_thread();
3700
3701 while (p->sigwait || p->exit_thread) {
3702 if (p->exit_thread) {
3703 return 0;
3704 }
3705 msleep((caddr_t)&p->sigwait_thread, &p->p_mlock, PCATCH | PDROP, 0, 0);
3706 if (thread_should_abort(self)) {
3707 /*
3708 * Terminate request - clean up.
3709 */
3710 proc_lock(p);
3711 return -1;
3712 }
3713 proc_lock(p);
3714 }
3715 return 1;
3716 }