]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52
53 #include <mach/mach_types.h>
54 #include <mach/kern_return.h>
55 #include <mach/thread_act_server.h>
56
57 #include <kern/kern_types.h>
58 #include <kern/ast.h>
59 #include <kern/mach_param.h>
60 #include <kern/zalloc.h>
61 #include <kern/extmod_statistics.h>
62 #include <kern/thread.h>
63 #include <kern/task.h>
64 #include <kern/sched_prim.h>
65 #include <kern/misc_protos.h>
66 #include <kern/assert.h>
67 #include <kern/exception.h>
68 #include <kern/ipc_mig.h>
69 #include <kern/ipc_tt.h>
70 #include <kern/machine.h>
71 #include <kern/spl.h>
72 #include <kern/syscall_subr.h>
73 #include <kern/sync_lock.h>
74 #include <kern/processor.h>
75 #include <kern/timer.h>
76 #include <kern/affinity.h>
77
78 #include <stdatomic.h>
79
80 #include <security/mac_mach_internal.h>
81
82 static void act_abort(thread_t thread);
83
84 static void thread_suspended(void *arg, wait_result_t result);
85 static void thread_set_apc_ast(thread_t thread);
86 static void thread_set_apc_ast_locked(thread_t thread);
87
88 /*
89 * Internal routine to mark a thread as started.
90 * Always called with the thread mutex locked.
91 */
92 void
93 thread_start(
94 thread_t thread)
95 {
96 clear_wait(thread, THREAD_AWAKENED);
97 thread->started = TRUE;
98 }
99
100 /*
101 * Internal routine to mark a thread as waiting
102 * right after it has been created. The caller
103 * is responsible to call wakeup()/thread_wakeup()
104 * or thread_terminate() to get it going.
105 *
106 * Always called with the thread mutex locked.
107 *
108 * Task and task_threads mutexes also held
109 * (so nobody can set the thread running before
110 * this point)
111 *
112 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
113 * to allow termination from this point forward.
114 */
115 void
116 thread_start_in_assert_wait(
117 thread_t thread,
118 event_t event,
119 wait_interrupt_t interruptible)
120 {
121 struct waitq *waitq = assert_wait_queue(event);
122 wait_result_t wait_result;
123 spl_t spl;
124
125 spl = splsched();
126 waitq_lock(waitq);
127
128 /* clear out startup condition (safe because thread not started yet) */
129 thread_lock(thread);
130 assert(!thread->started);
131 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
132 thread->state &= ~(TH_WAIT | TH_UNINT);
133 thread_unlock(thread);
134
135 /* assert wait interruptibly forever */
136 wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
137 interruptible,
138 TIMEOUT_URGENCY_SYS_NORMAL,
139 TIMEOUT_WAIT_FOREVER,
140 TIMEOUT_NO_LEEWAY,
141 thread);
142 assert(wait_result == THREAD_WAITING);
143
144 /* mark thread started while we still hold the waitq lock */
145 thread_lock(thread);
146 thread->started = TRUE;
147 thread_unlock(thread);
148
149 waitq_unlock(waitq);
150 splx(spl);
151 }
152
153 /*
154 * Internal routine to terminate a thread.
155 * Sometimes called with task already locked.
156 */
157 kern_return_t
158 thread_terminate_internal(
159 thread_t thread)
160 {
161 kern_return_t result = KERN_SUCCESS;
162
163 thread_mtx_lock(thread);
164
165 if (thread->active) {
166 thread->active = FALSE;
167
168 act_abort(thread);
169
170 if (thread->started) {
171 clear_wait(thread, THREAD_INTERRUPTED);
172 } else {
173 thread_start(thread);
174 }
175 } else {
176 result = KERN_TERMINATED;
177 }
178
179 if (thread->affinity_set != NULL) {
180 thread_affinity_terminate(thread);
181 }
182
183 thread_mtx_unlock(thread);
184
185 if (thread != current_thread() && result == KERN_SUCCESS) {
186 thread_wait(thread, FALSE);
187 }
188
189 return result;
190 }
191
192 /*
193 * Terminate a thread.
194 */
195 kern_return_t
196 thread_terminate(
197 thread_t thread)
198 {
199 if (thread == THREAD_NULL) {
200 return KERN_INVALID_ARGUMENT;
201 }
202
203 /* Kernel threads can't be terminated without their own cooperation */
204 if (thread->task == kernel_task && thread != current_thread()) {
205 return KERN_FAILURE;
206 }
207
208 kern_return_t result = thread_terminate_internal(thread);
209
210 /*
211 * If a kernel thread is terminating itself, force handle the APC_AST here.
212 * Kernel threads don't pass through the return-to-user AST checking code,
213 * but all threads must finish their own termination in thread_apc_ast.
214 */
215 if (thread->task == kernel_task) {
216 assert(thread->active == FALSE);
217 thread_ast_clear(thread, AST_APC);
218 thread_apc_ast(thread);
219
220 panic("thread_terminate");
221 /* NOTREACHED */
222 }
223
224 return result;
225 }
226
227 /*
228 * Suspend execution of the specified thread.
229 * This is a recursive-style suspension of the thread, a count of
230 * suspends is maintained.
231 *
232 * Called with thread mutex held.
233 */
234 void
235 thread_hold(thread_t thread)
236 {
237 if (thread->suspend_count++ == 0) {
238 thread_set_apc_ast(thread);
239 assert(thread->suspend_parked == FALSE);
240 }
241 }
242
243 /*
244 * Decrement internal suspension count, setting thread
245 * runnable when count falls to zero.
246 *
247 * Because the wait is abortsafe, we can't be guaranteed that the thread
248 * is currently actually waiting even if suspend_parked is set.
249 *
250 * Called with thread mutex held.
251 */
252 void
253 thread_release(thread_t thread)
254 {
255 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
256
257 /* fail-safe on non-assert builds */
258 if (thread->suspend_count == 0) {
259 return;
260 }
261
262 if (--thread->suspend_count == 0) {
263 if (!thread->started) {
264 thread_start(thread);
265 } else if (thread->suspend_parked) {
266 thread->suspend_parked = FALSE;
267 thread_wakeup_thread(&thread->suspend_count, thread);
268 }
269 }
270 }
271
272 kern_return_t
273 thread_suspend(thread_t thread)
274 {
275 kern_return_t result = KERN_SUCCESS;
276
277 if (thread == THREAD_NULL || thread->task == kernel_task) {
278 return KERN_INVALID_ARGUMENT;
279 }
280
281 thread_mtx_lock(thread);
282
283 if (thread->active) {
284 if (thread->user_stop_count++ == 0) {
285 thread_hold(thread);
286 }
287 } else {
288 result = KERN_TERMINATED;
289 }
290
291 thread_mtx_unlock(thread);
292
293 if (thread != current_thread() && result == KERN_SUCCESS) {
294 thread_wait(thread, FALSE);
295 }
296
297 return result;
298 }
299
300 kern_return_t
301 thread_resume(thread_t thread)
302 {
303 kern_return_t result = KERN_SUCCESS;
304
305 if (thread == THREAD_NULL || thread->task == kernel_task) {
306 return KERN_INVALID_ARGUMENT;
307 }
308
309 thread_mtx_lock(thread);
310
311 if (thread->active) {
312 if (thread->user_stop_count > 0) {
313 if (--thread->user_stop_count == 0) {
314 thread_release(thread);
315 }
316 } else {
317 result = KERN_FAILURE;
318 }
319 } else {
320 result = KERN_TERMINATED;
321 }
322
323 thread_mtx_unlock(thread);
324
325 return result;
326 }
327
328 /*
329 * thread_depress_abort_from_user:
330 *
331 * Prematurely abort priority depression if there is one.
332 */
333 kern_return_t
334 thread_depress_abort_from_user(thread_t thread)
335 {
336 kern_return_t result;
337
338 if (thread == THREAD_NULL) {
339 return KERN_INVALID_ARGUMENT;
340 }
341
342 thread_mtx_lock(thread);
343
344 if (thread->active) {
345 result = thread_depress_abort(thread);
346 } else {
347 result = KERN_TERMINATED;
348 }
349
350 thread_mtx_unlock(thread);
351
352 return result;
353 }
354
355
356 /*
357 * Indicate that the thread should run the AST_APC callback
358 * to detect an abort condition.
359 *
360 * Called with thread mutex held.
361 */
362 static void
363 act_abort(
364 thread_t thread)
365 {
366 spl_t s = splsched();
367
368 thread_lock(thread);
369
370 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
371 thread->sched_flags |= TH_SFLAG_ABORT;
372 thread_set_apc_ast_locked(thread);
373 thread_depress_abort_locked(thread);
374 } else {
375 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
376 }
377
378 thread_unlock(thread);
379 splx(s);
380 }
381
382 kern_return_t
383 thread_abort(
384 thread_t thread)
385 {
386 kern_return_t result = KERN_SUCCESS;
387
388 if (thread == THREAD_NULL) {
389 return KERN_INVALID_ARGUMENT;
390 }
391
392 thread_mtx_lock(thread);
393
394 if (thread->active) {
395 act_abort(thread);
396 clear_wait(thread, THREAD_INTERRUPTED);
397 } else {
398 result = KERN_TERMINATED;
399 }
400
401 thread_mtx_unlock(thread);
402
403 return result;
404 }
405
406 kern_return_t
407 thread_abort_safely(
408 thread_t thread)
409 {
410 kern_return_t result = KERN_SUCCESS;
411
412 if (thread == THREAD_NULL) {
413 return KERN_INVALID_ARGUMENT;
414 }
415
416 thread_mtx_lock(thread);
417
418 if (thread->active) {
419 spl_t s = splsched();
420
421 thread_lock(thread);
422 if (!thread->at_safe_point ||
423 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
424 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
425 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
426 thread_set_apc_ast_locked(thread);
427 thread_depress_abort_locked(thread);
428 }
429 }
430 thread_unlock(thread);
431 splx(s);
432 } else {
433 result = KERN_TERMINATED;
434 }
435
436 thread_mtx_unlock(thread);
437
438 return result;
439 }
440
441 /*** backward compatibility hacks ***/
442 #include <mach/thread_info.h>
443 #include <mach/thread_special_ports.h>
444 #include <ipc/ipc_port.h>
445
446 kern_return_t
447 thread_info(
448 thread_t thread,
449 thread_flavor_t flavor,
450 thread_info_t thread_info_out,
451 mach_msg_type_number_t *thread_info_count)
452 {
453 kern_return_t result;
454
455 if (thread == THREAD_NULL) {
456 return KERN_INVALID_ARGUMENT;
457 }
458
459 thread_mtx_lock(thread);
460
461 if (thread->active || thread->inspection) {
462 result = thread_info_internal(
463 thread, flavor, thread_info_out, thread_info_count);
464 } else {
465 result = KERN_TERMINATED;
466 }
467
468 thread_mtx_unlock(thread);
469
470 return result;
471 }
472
473 static inline kern_return_t
474 thread_get_state_internal(
475 thread_t thread,
476 int flavor,
477 thread_state_t state, /* pointer to OUT array */
478 mach_msg_type_number_t *state_count, /*IN/OUT*/
479 boolean_t to_user)
480 {
481 kern_return_t result = KERN_SUCCESS;
482
483 if (thread == THREAD_NULL) {
484 return KERN_INVALID_ARGUMENT;
485 }
486
487 thread_mtx_lock(thread);
488
489 if (thread->active) {
490 if (thread != current_thread()) {
491 thread_hold(thread);
492
493 thread_mtx_unlock(thread);
494
495 if (thread_stop(thread, FALSE)) {
496 thread_mtx_lock(thread);
497 result = machine_thread_get_state(
498 thread, flavor, state, state_count);
499 thread_unstop(thread);
500 } else {
501 thread_mtx_lock(thread);
502 result = KERN_ABORTED;
503 }
504
505 thread_release(thread);
506 } else {
507 result = machine_thread_get_state(
508 thread, flavor, state, state_count);
509 }
510 } else if (thread->inspection) {
511 result = machine_thread_get_state(
512 thread, flavor, state, state_count);
513 } else {
514 result = KERN_TERMINATED;
515 }
516
517 if (to_user && result == KERN_SUCCESS) {
518 result = machine_thread_state_convert_to_user(thread, flavor, state,
519 state_count);
520 }
521
522 thread_mtx_unlock(thread);
523
524 return result;
525 }
526
527 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
528
529 kern_return_t
530 thread_get_state(
531 thread_t thread,
532 int flavor,
533 thread_state_t state,
534 mach_msg_type_number_t *state_count);
535
536 kern_return_t
537 thread_get_state(
538 thread_t thread,
539 int flavor,
540 thread_state_t state, /* pointer to OUT array */
541 mach_msg_type_number_t *state_count) /*IN/OUT*/
542 {
543 return thread_get_state_internal(thread, flavor, state, state_count, FALSE);
544 }
545
546 kern_return_t
547 thread_get_state_to_user(
548 thread_t thread,
549 int flavor,
550 thread_state_t state, /* pointer to OUT array */
551 mach_msg_type_number_t *state_count) /*IN/OUT*/
552 {
553 return thread_get_state_internal(thread, flavor, state, state_count, TRUE);
554 }
555
556 /*
557 * Change thread's machine-dependent state. Called with nothing
558 * locked. Returns same way.
559 */
560 static inline kern_return_t
561 thread_set_state_internal(
562 thread_t thread,
563 int flavor,
564 thread_state_t state,
565 mach_msg_type_number_t state_count,
566 boolean_t from_user)
567 {
568 kern_return_t result = KERN_SUCCESS;
569
570 if (thread == THREAD_NULL) {
571 return KERN_INVALID_ARGUMENT;
572 }
573
574 thread_mtx_lock(thread);
575
576 if (thread->active) {
577 if (from_user) {
578 result = machine_thread_state_convert_from_user(thread, flavor,
579 state, state_count);
580 if (result != KERN_SUCCESS) {
581 goto out;
582 }
583 }
584 if (thread != current_thread()) {
585 thread_hold(thread);
586
587 thread_mtx_unlock(thread);
588
589 if (thread_stop(thread, TRUE)) {
590 thread_mtx_lock(thread);
591 result = machine_thread_set_state(
592 thread, flavor, state, state_count);
593 thread_unstop(thread);
594 } else {
595 thread_mtx_lock(thread);
596 result = KERN_ABORTED;
597 }
598
599 thread_release(thread);
600 } else {
601 result = machine_thread_set_state(
602 thread, flavor, state, state_count);
603 }
604 } else {
605 result = KERN_TERMINATED;
606 }
607
608 if ((result == KERN_SUCCESS) && from_user) {
609 extmod_statistics_incr_thread_set_state(thread);
610 }
611
612 out:
613 thread_mtx_unlock(thread);
614
615 return result;
616 }
617
618 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
619 kern_return_t
620 thread_set_state(
621 thread_t thread,
622 int flavor,
623 thread_state_t state,
624 mach_msg_type_number_t state_count);
625
626 kern_return_t
627 thread_set_state(
628 thread_t thread,
629 int flavor,
630 thread_state_t state,
631 mach_msg_type_number_t state_count)
632 {
633 return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
634 }
635
636 kern_return_t
637 thread_set_state_from_user(
638 thread_t thread,
639 int flavor,
640 thread_state_t state,
641 mach_msg_type_number_t state_count)
642 {
643 return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
644 }
645
646 /*
647 * Kernel-internal "thread" interfaces used outside this file:
648 */
649
650 /* Initialize (or re-initialize) a thread state. Called from execve
651 * with nothing locked, returns same way.
652 */
653 kern_return_t
654 thread_state_initialize(
655 thread_t thread)
656 {
657 kern_return_t result = KERN_SUCCESS;
658
659 if (thread == THREAD_NULL) {
660 return KERN_INVALID_ARGUMENT;
661 }
662
663 thread_mtx_lock(thread);
664
665 if (thread->active) {
666 if (thread != current_thread()) {
667 thread_hold(thread);
668
669 thread_mtx_unlock(thread);
670
671 if (thread_stop(thread, TRUE)) {
672 thread_mtx_lock(thread);
673 result = machine_thread_state_initialize( thread );
674 thread_unstop(thread);
675 } else {
676 thread_mtx_lock(thread);
677 result = KERN_ABORTED;
678 }
679
680 thread_release(thread);
681 } else {
682 result = machine_thread_state_initialize( thread );
683 }
684 } else {
685 result = KERN_TERMINATED;
686 }
687
688 thread_mtx_unlock(thread);
689
690 return result;
691 }
692
693
694 kern_return_t
695 thread_dup(
696 thread_t target)
697 {
698 thread_t self = current_thread();
699 kern_return_t result = KERN_SUCCESS;
700
701 if (target == THREAD_NULL || target == self) {
702 return KERN_INVALID_ARGUMENT;
703 }
704
705 thread_mtx_lock(target);
706
707 if (target->active) {
708 thread_hold(target);
709
710 thread_mtx_unlock(target);
711
712 if (thread_stop(target, TRUE)) {
713 thread_mtx_lock(target);
714 result = machine_thread_dup(self, target, FALSE);
715
716 if (self->affinity_set != AFFINITY_SET_NULL) {
717 thread_affinity_dup(self, target);
718 }
719 thread_unstop(target);
720 } else {
721 thread_mtx_lock(target);
722 result = KERN_ABORTED;
723 }
724
725 thread_release(target);
726 } else {
727 result = KERN_TERMINATED;
728 }
729
730 thread_mtx_unlock(target);
731
732 return result;
733 }
734
735
736 kern_return_t
737 thread_dup2(
738 thread_t source,
739 thread_t target)
740 {
741 kern_return_t result = KERN_SUCCESS;
742 uint32_t active = 0;
743
744 if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
745 return KERN_INVALID_ARGUMENT;
746 }
747
748 thread_mtx_lock(source);
749 active = source->active;
750 thread_mtx_unlock(source);
751
752 if (!active) {
753 return KERN_TERMINATED;
754 }
755
756 thread_mtx_lock(target);
757
758 if (target->active || target->inspection) {
759 thread_hold(target);
760
761 thread_mtx_unlock(target);
762
763 if (thread_stop(target, TRUE)) {
764 thread_mtx_lock(target);
765 result = machine_thread_dup(source, target, TRUE);
766 if (source->affinity_set != AFFINITY_SET_NULL) {
767 thread_affinity_dup(source, target);
768 }
769 thread_unstop(target);
770 } else {
771 thread_mtx_lock(target);
772 result = KERN_ABORTED;
773 }
774
775 thread_release(target);
776 } else {
777 result = KERN_TERMINATED;
778 }
779
780 thread_mtx_unlock(target);
781
782 return result;
783 }
784
785 /*
786 * thread_setstatus:
787 *
788 * Set the status of the specified thread.
789 * Called with (and returns with) no locks held.
790 */
791 kern_return_t
792 thread_setstatus(
793 thread_t thread,
794 int flavor,
795 thread_state_t tstate,
796 mach_msg_type_number_t count)
797 {
798 return thread_set_state(thread, flavor, tstate, count);
799 }
800
801 kern_return_t
802 thread_setstatus_from_user(
803 thread_t thread,
804 int flavor,
805 thread_state_t tstate,
806 mach_msg_type_number_t count)
807 {
808 return thread_set_state_from_user(thread, flavor, tstate, count);
809 }
810
811 /*
812 * thread_getstatus:
813 *
814 * Get the status of the specified thread.
815 */
816 kern_return_t
817 thread_getstatus(
818 thread_t thread,
819 int flavor,
820 thread_state_t tstate,
821 mach_msg_type_number_t *count)
822 {
823 return thread_get_state(thread, flavor, tstate, count);
824 }
825
826 kern_return_t
827 thread_getstatus_to_user(
828 thread_t thread,
829 int flavor,
830 thread_state_t tstate,
831 mach_msg_type_number_t *count)
832 {
833 return thread_get_state_to_user(thread, flavor, tstate, count);
834 }
835
836 /*
837 * Change thread's machine-dependent userspace TSD base.
838 * Called with nothing locked. Returns same way.
839 */
840 kern_return_t
841 thread_set_tsd_base(
842 thread_t thread,
843 mach_vm_offset_t tsd_base)
844 {
845 kern_return_t result = KERN_SUCCESS;
846
847 if (thread == THREAD_NULL) {
848 return KERN_INVALID_ARGUMENT;
849 }
850
851 thread_mtx_lock(thread);
852
853 if (thread->active) {
854 if (thread != current_thread()) {
855 thread_hold(thread);
856
857 thread_mtx_unlock(thread);
858
859 if (thread_stop(thread, TRUE)) {
860 thread_mtx_lock(thread);
861 result = machine_thread_set_tsd_base(thread, tsd_base);
862 thread_unstop(thread);
863 } else {
864 thread_mtx_lock(thread);
865 result = KERN_ABORTED;
866 }
867
868 thread_release(thread);
869 } else {
870 result = machine_thread_set_tsd_base(thread, tsd_base);
871 }
872 } else {
873 result = KERN_TERMINATED;
874 }
875
876 thread_mtx_unlock(thread);
877
878 return result;
879 }
880
881 /*
882 * thread_set_apc_ast:
883 *
884 * Register the AST_APC callback that handles suspension and
885 * termination, if it hasn't been installed already.
886 *
887 * Called with the thread mutex held.
888 */
889 static void
890 thread_set_apc_ast(thread_t thread)
891 {
892 spl_t s = splsched();
893
894 thread_lock(thread);
895 thread_set_apc_ast_locked(thread);
896 thread_unlock(thread);
897
898 splx(s);
899 }
900
901 /*
902 * thread_set_apc_ast_locked:
903 *
904 * Do the work of registering for the AST_APC callback.
905 *
906 * Called with the thread mutex and scheduling lock held.
907 */
908 static void
909 thread_set_apc_ast_locked(thread_t thread)
910 {
911 thread_ast_set(thread, AST_APC);
912
913 if (thread == current_thread()) {
914 ast_propagate(thread);
915 } else {
916 processor_t processor = thread->last_processor;
917
918 if (processor != PROCESSOR_NULL &&
919 processor->state == PROCESSOR_RUNNING &&
920 processor->active_thread == thread) {
921 cause_ast_check(processor);
922 }
923 }
924 }
925
926 /*
927 * Activation control support routines internal to this file:
928 *
929 */
930
931 /*
932 * thread_suspended
933 *
934 * Continuation routine for thread suspension. It checks
935 * to see whether there has been any new suspensions. If so, it
936 * installs the AST_APC handler again.
937 */
938 __attribute__((noreturn))
939 static void
940 thread_suspended(__unused void *parameter, wait_result_t result)
941 {
942 thread_t thread = current_thread();
943
944 thread_mtx_lock(thread);
945
946 if (result == THREAD_INTERRUPTED) {
947 thread->suspend_parked = FALSE;
948 } else {
949 assert(thread->suspend_parked == FALSE);
950 }
951
952 if (thread->suspend_count > 0) {
953 thread_set_apc_ast(thread);
954 }
955
956 thread_mtx_unlock(thread);
957
958 thread_exception_return();
959 /*NOTREACHED*/
960 }
961
962 /*
963 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
964 * Called with nothing locked. Returns (if it returns) the same way.
965 */
966 void
967 thread_apc_ast(thread_t thread)
968 {
969 thread_mtx_lock(thread);
970
971 assert(thread->suspend_parked == FALSE);
972
973 spl_t s = splsched();
974 thread_lock(thread);
975
976 /* TH_SFLAG_POLLDEPRESS is OK to have here */
977 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
978
979 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
980 thread_unlock(thread);
981 splx(s);
982
983 if (!thread->active) {
984 /* Thread is ready to terminate, time to tear it down */
985 thread_mtx_unlock(thread);
986
987 thread_terminate_self();
988 /*NOTREACHED*/
989 }
990
991 /* If we're suspended, go to sleep and wait for someone to wake us up. */
992 if (thread->suspend_count > 0) {
993 thread->suspend_parked = TRUE;
994 assert_wait(&thread->suspend_count,
995 THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
996 thread_mtx_unlock(thread);
997
998 thread_block(thread_suspended);
999 /*NOTREACHED*/
1000 }
1001
1002 thread_mtx_unlock(thread);
1003 }
1004
1005 /* Prototype, see justification above */
1006 kern_return_t
1007 act_set_state(
1008 thread_t thread,
1009 int flavor,
1010 thread_state_t state,
1011 mach_msg_type_number_t count);
1012
1013 kern_return_t
1014 act_set_state(
1015 thread_t thread,
1016 int flavor,
1017 thread_state_t state,
1018 mach_msg_type_number_t count)
1019 {
1020 if (thread == current_thread()) {
1021 return KERN_INVALID_ARGUMENT;
1022 }
1023
1024 return thread_set_state(thread, flavor, state, count);
1025 }
1026
1027 kern_return_t
1028 act_set_state_from_user(
1029 thread_t thread,
1030 int flavor,
1031 thread_state_t state,
1032 mach_msg_type_number_t count)
1033 {
1034 if (thread == current_thread()) {
1035 return KERN_INVALID_ARGUMENT;
1036 }
1037
1038 return thread_set_state_from_user(thread, flavor, state, count);
1039 }
1040
1041 /* Prototype, see justification above */
1042 kern_return_t
1043 act_get_state(
1044 thread_t thread,
1045 int flavor,
1046 thread_state_t state,
1047 mach_msg_type_number_t *count);
1048
1049 kern_return_t
1050 act_get_state(
1051 thread_t thread,
1052 int flavor,
1053 thread_state_t state,
1054 mach_msg_type_number_t *count)
1055 {
1056 if (thread == current_thread()) {
1057 return KERN_INVALID_ARGUMENT;
1058 }
1059
1060 return thread_get_state(thread, flavor, state, count);
1061 }
1062
1063 kern_return_t
1064 act_get_state_to_user(
1065 thread_t thread,
1066 int flavor,
1067 thread_state_t state,
1068 mach_msg_type_number_t *count)
1069 {
1070 if (thread == current_thread()) {
1071 return KERN_INVALID_ARGUMENT;
1072 }
1073
1074 return thread_get_state_to_user(thread, flavor, state, count);
1075 }
1076
1077 static void
1078 act_set_ast(
1079 thread_t thread,
1080 ast_t ast)
1081 {
1082 spl_t s = splsched();
1083
1084 if (thread == current_thread()) {
1085 thread_ast_set(thread, ast);
1086 ast_propagate(thread);
1087 } else {
1088 processor_t processor;
1089
1090 thread_lock(thread);
1091 thread_ast_set(thread, ast);
1092 processor = thread->last_processor;
1093 if (processor != PROCESSOR_NULL &&
1094 processor->state == PROCESSOR_RUNNING &&
1095 processor->active_thread == thread) {
1096 cause_ast_check(processor);
1097 }
1098 thread_unlock(thread);
1099 }
1100
1101 splx(s);
1102 }
1103
1104 /*
1105 * set AST on thread without causing an AST check
1106 * and without taking the thread lock
1107 *
1108 * If thread is not the current thread, then it may take
1109 * up until the next context switch or quantum expiration
1110 * on that thread for it to notice the AST.
1111 */
1112 static void
1113 act_set_ast_async(thread_t thread,
1114 ast_t ast)
1115 {
1116 thread_ast_set(thread, ast);
1117
1118 if (thread == current_thread()) {
1119 spl_t s = splsched();
1120 ast_propagate(thread);
1121 splx(s);
1122 }
1123 }
1124
1125 void
1126 act_set_astbsd(
1127 thread_t thread)
1128 {
1129 act_set_ast( thread, AST_BSD );
1130 }
1131
1132 void
1133 act_set_astkevent(thread_t thread, uint16_t bits)
1134 {
1135 os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1136
1137 /* kevent AST shouldn't send immediate IPIs */
1138 act_set_ast_async(thread, AST_KEVENT);
1139 }
1140
1141 uint16_t
1142 act_clear_astkevent(thread_t thread, uint16_t bits)
1143 {
1144 /*
1145 * avoid the atomic operation if none of the bits is set,
1146 * which will be the common case.
1147 */
1148 uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1149 if (cur & bits) {
1150 cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1151 }
1152 return cur & bits;
1153 }
1154
1155 void
1156 act_set_ast_reset_pcs(thread_t thread)
1157 {
1158 act_set_ast(thread, AST_RESET_PCS);
1159 }
1160
1161 void
1162 act_set_kperf(
1163 thread_t thread)
1164 {
1165 /* safety check */
1166 if (thread != current_thread()) {
1167 if (!ml_get_interrupts_enabled()) {
1168 panic("unsafe act_set_kperf operation");
1169 }
1170 }
1171
1172 act_set_ast( thread, AST_KPERF );
1173 }
1174
1175 #if CONFIG_MACF
1176 void
1177 act_set_astmacf(
1178 thread_t thread)
1179 {
1180 act_set_ast( thread, AST_MACF);
1181 }
1182 #endif
1183
1184 void
1185 act_set_astledger(thread_t thread)
1186 {
1187 act_set_ast(thread, AST_LEDGER);
1188 }
1189
1190 /*
1191 * The ledger AST may need to be set while already holding
1192 * the thread lock. This routine skips sending the IPI,
1193 * allowing us to avoid the lock hold.
1194 *
1195 * However, it means the targeted thread must context switch
1196 * to recognize the ledger AST.
1197 */
1198 void
1199 act_set_astledger_async(thread_t thread)
1200 {
1201 act_set_ast_async(thread, AST_LEDGER);
1202 }
1203
1204 void
1205 act_set_io_telemetry_ast(thread_t thread)
1206 {
1207 act_set_ast(thread, AST_TELEMETRY_IO);
1208 }