2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
48 * Author: Bryan Ford, University of Utah CSS
50 * Thread management routines
53 #include <mach/mach_types.h>
54 #include <mach/kern_return.h>
55 #include <mach/thread_act_server.h>
57 #include <kern/kern_types.h>
59 #include <kern/mach_param.h>
60 #include <kern/zalloc.h>
61 #include <kern/extmod_statistics.h>
62 #include <kern/thread.h>
63 #include <kern/task.h>
64 #include <kern/sched_prim.h>
65 #include <kern/misc_protos.h>
66 #include <kern/assert.h>
67 #include <kern/exception.h>
68 #include <kern/ipc_mig.h>
69 #include <kern/ipc_tt.h>
70 #include <kern/machine.h>
72 #include <kern/syscall_subr.h>
73 #include <kern/sync_lock.h>
74 #include <kern/processor.h>
75 #include <kern/timer.h>
76 #include <kern/affinity.h>
78 #include <stdatomic.h>
80 #include <security/mac_mach_internal.h>
82 static void act_abort(thread_t thread
);
84 static void thread_suspended(void *arg
, wait_result_t result
);
85 static void thread_set_apc_ast(thread_t thread
);
86 static void thread_set_apc_ast_locked(thread_t thread
);
89 * Internal routine to mark a thread as started.
90 * Always called with the thread mutex locked.
96 clear_wait(thread
, THREAD_AWAKENED
);
97 thread
->started
= TRUE
;
101 * Internal routine to mark a thread as waiting
102 * right after it has been created. The caller
103 * is responsible to call wakeup()/thread_wakeup()
104 * or thread_terminate() to get it going.
106 * Always called with the thread mutex locked.
108 * Task and task_threads mutexes also held
109 * (so nobody can set the thread running before
112 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
113 * to allow termination from this point forward.
116 thread_start_in_assert_wait(
119 wait_interrupt_t interruptible
)
121 struct waitq
*waitq
= assert_wait_queue(event
);
122 wait_result_t wait_result
;
128 /* clear out startup condition (safe because thread not started yet) */
130 assert(!thread
->started
);
131 assert((thread
->state
& (TH_WAIT
| TH_UNINT
)) == (TH_WAIT
| TH_UNINT
));
132 thread
->state
&= ~(TH_WAIT
| TH_UNINT
);
133 thread_unlock(thread
);
135 /* assert wait interruptibly forever */
136 wait_result
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
138 TIMEOUT_URGENCY_SYS_NORMAL
,
139 TIMEOUT_WAIT_FOREVER
,
142 assert(wait_result
== THREAD_WAITING
);
144 /* mark thread started while we still hold the waitq lock */
146 thread
->started
= TRUE
;
147 thread_unlock(thread
);
154 * Internal routine to terminate a thread.
155 * Sometimes called with task already locked.
158 thread_terminate_internal(
161 kern_return_t result
= KERN_SUCCESS
;
163 thread_mtx_lock(thread
);
165 if (thread
->active
) {
166 thread
->active
= FALSE
;
170 if (thread
->started
) {
171 clear_wait(thread
, THREAD_INTERRUPTED
);
173 thread_start(thread
);
176 result
= KERN_TERMINATED
;
179 if (thread
->affinity_set
!= NULL
) {
180 thread_affinity_terminate(thread
);
183 thread_mtx_unlock(thread
);
185 if (thread
!= current_thread() && result
== KERN_SUCCESS
) {
186 thread_wait(thread
, FALSE
);
193 * Terminate a thread.
199 if (thread
== THREAD_NULL
) {
200 return KERN_INVALID_ARGUMENT
;
203 /* Kernel threads can't be terminated without their own cooperation */
204 if (thread
->task
== kernel_task
&& thread
!= current_thread()) {
208 kern_return_t result
= thread_terminate_internal(thread
);
211 * If a kernel thread is terminating itself, force handle the APC_AST here.
212 * Kernel threads don't pass through the return-to-user AST checking code,
213 * but all threads must finish their own termination in thread_apc_ast.
215 if (thread
->task
== kernel_task
) {
216 assert(thread
->active
== FALSE
);
217 thread_ast_clear(thread
, AST_APC
);
218 thread_apc_ast(thread
);
220 panic("thread_terminate");
228 * Suspend execution of the specified thread.
229 * This is a recursive-style suspension of the thread, a count of
230 * suspends is maintained.
232 * Called with thread mutex held.
235 thread_hold(thread_t thread
)
237 if (thread
->suspend_count
++ == 0) {
238 thread_set_apc_ast(thread
);
239 assert(thread
->suspend_parked
== FALSE
);
244 * Decrement internal suspension count, setting thread
245 * runnable when count falls to zero.
247 * Because the wait is abortsafe, we can't be guaranteed that the thread
248 * is currently actually waiting even if suspend_parked is set.
250 * Called with thread mutex held.
253 thread_release(thread_t thread
)
255 assertf(thread
->suspend_count
> 0, "thread %p over-resumed", thread
);
257 /* fail-safe on non-assert builds */
258 if (thread
->suspend_count
== 0) {
262 if (--thread
->suspend_count
== 0) {
263 if (!thread
->started
) {
264 thread_start(thread
);
265 } else if (thread
->suspend_parked
) {
266 thread
->suspend_parked
= FALSE
;
267 thread_wakeup_thread(&thread
->suspend_count
, thread
);
273 thread_suspend(thread_t thread
)
275 kern_return_t result
= KERN_SUCCESS
;
277 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
) {
278 return KERN_INVALID_ARGUMENT
;
281 thread_mtx_lock(thread
);
283 if (thread
->active
) {
284 if (thread
->user_stop_count
++ == 0) {
288 result
= KERN_TERMINATED
;
291 thread_mtx_unlock(thread
);
293 if (thread
!= current_thread() && result
== KERN_SUCCESS
) {
294 thread_wait(thread
, FALSE
);
301 thread_resume(thread_t thread
)
303 kern_return_t result
= KERN_SUCCESS
;
305 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
) {
306 return KERN_INVALID_ARGUMENT
;
309 thread_mtx_lock(thread
);
311 if (thread
->active
) {
312 if (thread
->user_stop_count
> 0) {
313 if (--thread
->user_stop_count
== 0) {
314 thread_release(thread
);
317 result
= KERN_FAILURE
;
320 result
= KERN_TERMINATED
;
323 thread_mtx_unlock(thread
);
329 * thread_depress_abort_from_user:
331 * Prematurely abort priority depression if there is one.
334 thread_depress_abort_from_user(thread_t thread
)
336 kern_return_t result
;
338 if (thread
== THREAD_NULL
) {
339 return KERN_INVALID_ARGUMENT
;
342 thread_mtx_lock(thread
);
344 if (thread
->active
) {
345 result
= thread_depress_abort(thread
);
347 result
= KERN_TERMINATED
;
350 thread_mtx_unlock(thread
);
357 * Indicate that the thread should run the AST_APC callback
358 * to detect an abort condition.
360 * Called with thread mutex held.
366 spl_t s
= splsched();
370 if (!(thread
->sched_flags
& TH_SFLAG_ABORT
)) {
371 thread
->sched_flags
|= TH_SFLAG_ABORT
;
372 thread_set_apc_ast_locked(thread
);
373 thread_depress_abort_locked(thread
);
375 thread
->sched_flags
&= ~TH_SFLAG_ABORTSAFELY
;
378 thread_unlock(thread
);
386 kern_return_t result
= KERN_SUCCESS
;
388 if (thread
== THREAD_NULL
) {
389 return KERN_INVALID_ARGUMENT
;
392 thread_mtx_lock(thread
);
394 if (thread
->active
) {
396 clear_wait(thread
, THREAD_INTERRUPTED
);
398 result
= KERN_TERMINATED
;
401 thread_mtx_unlock(thread
);
410 kern_return_t result
= KERN_SUCCESS
;
412 if (thread
== THREAD_NULL
) {
413 return KERN_INVALID_ARGUMENT
;
416 thread_mtx_lock(thread
);
418 if (thread
->active
) {
419 spl_t s
= splsched();
422 if (!thread
->at_safe_point
||
423 clear_wait_internal(thread
, THREAD_INTERRUPTED
) != KERN_SUCCESS
) {
424 if (!(thread
->sched_flags
& TH_SFLAG_ABORT
)) {
425 thread
->sched_flags
|= TH_SFLAG_ABORTED_MASK
;
426 thread_set_apc_ast_locked(thread
);
427 thread_depress_abort_locked(thread
);
430 thread_unlock(thread
);
433 result
= KERN_TERMINATED
;
436 thread_mtx_unlock(thread
);
441 /*** backward compatibility hacks ***/
442 #include <mach/thread_info.h>
443 #include <mach/thread_special_ports.h>
444 #include <ipc/ipc_port.h>
449 thread_flavor_t flavor
,
450 thread_info_t thread_info_out
,
451 mach_msg_type_number_t
*thread_info_count
)
453 kern_return_t result
;
455 if (thread
== THREAD_NULL
) {
456 return KERN_INVALID_ARGUMENT
;
459 thread_mtx_lock(thread
);
461 if (thread
->active
|| thread
->inspection
) {
462 result
= thread_info_internal(
463 thread
, flavor
, thread_info_out
, thread_info_count
);
465 result
= KERN_TERMINATED
;
468 thread_mtx_unlock(thread
);
473 static inline kern_return_t
474 thread_get_state_internal(
477 thread_state_t state
, /* pointer to OUT array */
478 mach_msg_type_number_t
*state_count
, /*IN/OUT*/
481 kern_return_t result
= KERN_SUCCESS
;
483 if (thread
== THREAD_NULL
) {
484 return KERN_INVALID_ARGUMENT
;
487 thread_mtx_lock(thread
);
489 if (thread
->active
) {
490 if (thread
!= current_thread()) {
493 thread_mtx_unlock(thread
);
495 if (thread_stop(thread
, FALSE
)) {
496 thread_mtx_lock(thread
);
497 result
= machine_thread_get_state(
498 thread
, flavor
, state
, state_count
);
499 thread_unstop(thread
);
501 thread_mtx_lock(thread
);
502 result
= KERN_ABORTED
;
505 thread_release(thread
);
507 result
= machine_thread_get_state(
508 thread
, flavor
, state
, state_count
);
510 } else if (thread
->inspection
) {
511 result
= machine_thread_get_state(
512 thread
, flavor
, state
, state_count
);
514 result
= KERN_TERMINATED
;
517 if (to_user
&& result
== KERN_SUCCESS
) {
518 result
= machine_thread_state_convert_to_user(thread
, flavor
, state
,
522 thread_mtx_unlock(thread
);
527 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
533 thread_state_t state
,
534 mach_msg_type_number_t
*state_count
);
540 thread_state_t state
, /* pointer to OUT array */
541 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
543 return thread_get_state_internal(thread
, flavor
, state
, state_count
, FALSE
);
547 thread_get_state_to_user(
550 thread_state_t state
, /* pointer to OUT array */
551 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
553 return thread_get_state_internal(thread
, flavor
, state
, state_count
, TRUE
);
557 * Change thread's machine-dependent state. Called with nothing
558 * locked. Returns same way.
560 static inline kern_return_t
561 thread_set_state_internal(
564 thread_state_t state
,
565 mach_msg_type_number_t state_count
,
568 kern_return_t result
= KERN_SUCCESS
;
570 if (thread
== THREAD_NULL
) {
571 return KERN_INVALID_ARGUMENT
;
574 thread_mtx_lock(thread
);
576 if (thread
->active
) {
578 result
= machine_thread_state_convert_from_user(thread
, flavor
,
580 if (result
!= KERN_SUCCESS
) {
584 if (thread
!= current_thread()) {
587 thread_mtx_unlock(thread
);
589 if (thread_stop(thread
, TRUE
)) {
590 thread_mtx_lock(thread
);
591 result
= machine_thread_set_state(
592 thread
, flavor
, state
, state_count
);
593 thread_unstop(thread
);
595 thread_mtx_lock(thread
);
596 result
= KERN_ABORTED
;
599 thread_release(thread
);
601 result
= machine_thread_set_state(
602 thread
, flavor
, state
, state_count
);
605 result
= KERN_TERMINATED
;
608 if ((result
== KERN_SUCCESS
) && from_user
) {
609 extmod_statistics_incr_thread_set_state(thread
);
613 thread_mtx_unlock(thread
);
618 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
623 thread_state_t state
,
624 mach_msg_type_number_t state_count
);
630 thread_state_t state
,
631 mach_msg_type_number_t state_count
)
633 return thread_set_state_internal(thread
, flavor
, state
, state_count
, FALSE
);
637 thread_set_state_from_user(
640 thread_state_t state
,
641 mach_msg_type_number_t state_count
)
643 return thread_set_state_internal(thread
, flavor
, state
, state_count
, TRUE
);
647 * Kernel-internal "thread" interfaces used outside this file:
650 /* Initialize (or re-initialize) a thread state. Called from execve
651 * with nothing locked, returns same way.
654 thread_state_initialize(
657 kern_return_t result
= KERN_SUCCESS
;
659 if (thread
== THREAD_NULL
) {
660 return KERN_INVALID_ARGUMENT
;
663 thread_mtx_lock(thread
);
665 if (thread
->active
) {
666 if (thread
!= current_thread()) {
669 thread_mtx_unlock(thread
);
671 if (thread_stop(thread
, TRUE
)) {
672 thread_mtx_lock(thread
);
673 result
= machine_thread_state_initialize( thread
);
674 thread_unstop(thread
);
676 thread_mtx_lock(thread
);
677 result
= KERN_ABORTED
;
680 thread_release(thread
);
682 result
= machine_thread_state_initialize( thread
);
685 result
= KERN_TERMINATED
;
688 thread_mtx_unlock(thread
);
698 thread_t self
= current_thread();
699 kern_return_t result
= KERN_SUCCESS
;
701 if (target
== THREAD_NULL
|| target
== self
) {
702 return KERN_INVALID_ARGUMENT
;
705 thread_mtx_lock(target
);
707 if (target
->active
) {
710 thread_mtx_unlock(target
);
712 if (thread_stop(target
, TRUE
)) {
713 thread_mtx_lock(target
);
714 result
= machine_thread_dup(self
, target
, FALSE
);
716 if (self
->affinity_set
!= AFFINITY_SET_NULL
) {
717 thread_affinity_dup(self
, target
);
719 thread_unstop(target
);
721 thread_mtx_lock(target
);
722 result
= KERN_ABORTED
;
725 thread_release(target
);
727 result
= KERN_TERMINATED
;
730 thread_mtx_unlock(target
);
741 kern_return_t result
= KERN_SUCCESS
;
744 if (source
== THREAD_NULL
|| target
== THREAD_NULL
|| target
== source
) {
745 return KERN_INVALID_ARGUMENT
;
748 thread_mtx_lock(source
);
749 active
= source
->active
;
750 thread_mtx_unlock(source
);
753 return KERN_TERMINATED
;
756 thread_mtx_lock(target
);
758 if (target
->active
|| target
->inspection
) {
761 thread_mtx_unlock(target
);
763 if (thread_stop(target
, TRUE
)) {
764 thread_mtx_lock(target
);
765 result
= machine_thread_dup(source
, target
, TRUE
);
766 if (source
->affinity_set
!= AFFINITY_SET_NULL
) {
767 thread_affinity_dup(source
, target
);
769 thread_unstop(target
);
771 thread_mtx_lock(target
);
772 result
= KERN_ABORTED
;
775 thread_release(target
);
777 result
= KERN_TERMINATED
;
780 thread_mtx_unlock(target
);
788 * Set the status of the specified thread.
789 * Called with (and returns with) no locks held.
795 thread_state_t tstate
,
796 mach_msg_type_number_t count
)
798 return thread_set_state(thread
, flavor
, tstate
, count
);
802 thread_setstatus_from_user(
805 thread_state_t tstate
,
806 mach_msg_type_number_t count
)
808 return thread_set_state_from_user(thread
, flavor
, tstate
, count
);
814 * Get the status of the specified thread.
820 thread_state_t tstate
,
821 mach_msg_type_number_t
*count
)
823 return thread_get_state(thread
, flavor
, tstate
, count
);
827 thread_getstatus_to_user(
830 thread_state_t tstate
,
831 mach_msg_type_number_t
*count
)
833 return thread_get_state_to_user(thread
, flavor
, tstate
, count
);
837 * Change thread's machine-dependent userspace TSD base.
838 * Called with nothing locked. Returns same way.
843 mach_vm_offset_t tsd_base
)
845 kern_return_t result
= KERN_SUCCESS
;
847 if (thread
== THREAD_NULL
) {
848 return KERN_INVALID_ARGUMENT
;
851 thread_mtx_lock(thread
);
853 if (thread
->active
) {
854 if (thread
!= current_thread()) {
857 thread_mtx_unlock(thread
);
859 if (thread_stop(thread
, TRUE
)) {
860 thread_mtx_lock(thread
);
861 result
= machine_thread_set_tsd_base(thread
, tsd_base
);
862 thread_unstop(thread
);
864 thread_mtx_lock(thread
);
865 result
= KERN_ABORTED
;
868 thread_release(thread
);
870 result
= machine_thread_set_tsd_base(thread
, tsd_base
);
873 result
= KERN_TERMINATED
;
876 thread_mtx_unlock(thread
);
882 * thread_set_apc_ast:
884 * Register the AST_APC callback that handles suspension and
885 * termination, if it hasn't been installed already.
887 * Called with the thread mutex held.
890 thread_set_apc_ast(thread_t thread
)
892 spl_t s
= splsched();
895 thread_set_apc_ast_locked(thread
);
896 thread_unlock(thread
);
902 * thread_set_apc_ast_locked:
904 * Do the work of registering for the AST_APC callback.
906 * Called with the thread mutex and scheduling lock held.
909 thread_set_apc_ast_locked(thread_t thread
)
911 thread_ast_set(thread
, AST_APC
);
913 if (thread
== current_thread()) {
914 ast_propagate(thread
);
916 processor_t processor
= thread
->last_processor
;
918 if (processor
!= PROCESSOR_NULL
&&
919 processor
->state
== PROCESSOR_RUNNING
&&
920 processor
->active_thread
== thread
) {
921 cause_ast_check(processor
);
927 * Activation control support routines internal to this file:
934 * Continuation routine for thread suspension. It checks
935 * to see whether there has been any new suspensions. If so, it
936 * installs the AST_APC handler again.
938 __attribute__((noreturn
))
940 thread_suspended(__unused
void *parameter
, wait_result_t result
)
942 thread_t thread
= current_thread();
944 thread_mtx_lock(thread
);
946 if (result
== THREAD_INTERRUPTED
) {
947 thread
->suspend_parked
= FALSE
;
949 assert(thread
->suspend_parked
== FALSE
);
952 if (thread
->suspend_count
> 0) {
953 thread_set_apc_ast(thread
);
956 thread_mtx_unlock(thread
);
958 thread_exception_return();
963 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
964 * Called with nothing locked. Returns (if it returns) the same way.
967 thread_apc_ast(thread_t thread
)
969 thread_mtx_lock(thread
);
971 assert(thread
->suspend_parked
== FALSE
);
973 spl_t s
= splsched();
976 /* TH_SFLAG_POLLDEPRESS is OK to have here */
977 assert((thread
->sched_flags
& TH_SFLAG_DEPRESS
) == 0);
979 thread
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
980 thread_unlock(thread
);
983 if (!thread
->active
) {
984 /* Thread is ready to terminate, time to tear it down */
985 thread_mtx_unlock(thread
);
987 thread_terminate_self();
991 /* If we're suspended, go to sleep and wait for someone to wake us up. */
992 if (thread
->suspend_count
> 0) {
993 thread
->suspend_parked
= TRUE
;
994 assert_wait(&thread
->suspend_count
,
995 THREAD_ABORTSAFE
| THREAD_WAIT_NOREPORT_USER
);
996 thread_mtx_unlock(thread
);
998 thread_block(thread_suspended
);
1002 thread_mtx_unlock(thread
);
1005 /* Prototype, see justification above */
1010 thread_state_t state
,
1011 mach_msg_type_number_t count
);
1017 thread_state_t state
,
1018 mach_msg_type_number_t count
)
1020 if (thread
== current_thread()) {
1021 return KERN_INVALID_ARGUMENT
;
1024 return thread_set_state(thread
, flavor
, state
, count
);
1028 act_set_state_from_user(
1031 thread_state_t state
,
1032 mach_msg_type_number_t count
)
1034 if (thread
== current_thread()) {
1035 return KERN_INVALID_ARGUMENT
;
1038 return thread_set_state_from_user(thread
, flavor
, state
, count
);
1041 /* Prototype, see justification above */
1046 thread_state_t state
,
1047 mach_msg_type_number_t
*count
);
1053 thread_state_t state
,
1054 mach_msg_type_number_t
*count
)
1056 if (thread
== current_thread()) {
1057 return KERN_INVALID_ARGUMENT
;
1060 return thread_get_state(thread
, flavor
, state
, count
);
1064 act_get_state_to_user(
1067 thread_state_t state
,
1068 mach_msg_type_number_t
*count
)
1070 if (thread
== current_thread()) {
1071 return KERN_INVALID_ARGUMENT
;
1074 return thread_get_state_to_user(thread
, flavor
, state
, count
);
1082 spl_t s
= splsched();
1084 if (thread
== current_thread()) {
1085 thread_ast_set(thread
, ast
);
1086 ast_propagate(thread
);
1088 processor_t processor
;
1090 thread_lock(thread
);
1091 thread_ast_set(thread
, ast
);
1092 processor
= thread
->last_processor
;
1093 if (processor
!= PROCESSOR_NULL
&&
1094 processor
->state
== PROCESSOR_RUNNING
&&
1095 processor
->active_thread
== thread
) {
1096 cause_ast_check(processor
);
1098 thread_unlock(thread
);
1105 * set AST on thread without causing an AST check
1106 * and without taking the thread lock
1108 * If thread is not the current thread, then it may take
1109 * up until the next context switch or quantum expiration
1110 * on that thread for it to notice the AST.
1113 act_set_ast_async(thread_t thread
,
1116 thread_ast_set(thread
, ast
);
1118 if (thread
== current_thread()) {
1119 spl_t s
= splsched();
1120 ast_propagate(thread
);
1129 act_set_ast( thread
, AST_BSD
);
1133 act_set_astkevent(thread_t thread
, uint16_t bits
)
1135 os_atomic_or(&thread
->kevent_ast_bits
, bits
, relaxed
);
1137 /* kevent AST shouldn't send immediate IPIs */
1138 act_set_ast_async(thread
, AST_KEVENT
);
1142 act_clear_astkevent(thread_t thread
, uint16_t bits
)
1145 * avoid the atomic operation if none of the bits is set,
1146 * which will be the common case.
1148 uint16_t cur
= os_atomic_load(&thread
->kevent_ast_bits
, relaxed
);
1150 cur
= os_atomic_andnot_orig(&thread
->kevent_ast_bits
, bits
, relaxed
);
1156 act_set_ast_reset_pcs(thread_t thread
)
1158 act_set_ast(thread
, AST_RESET_PCS
);
1166 if (thread
!= current_thread()) {
1167 if (!ml_get_interrupts_enabled()) {
1168 panic("unsafe act_set_kperf operation");
1172 act_set_ast( thread
, AST_KPERF
);
1180 act_set_ast( thread
, AST_MACF
);
1185 act_set_astledger(thread_t thread
)
1187 act_set_ast(thread
, AST_LEDGER
);
1191 * The ledger AST may need to be set while already holding
1192 * the thread lock. This routine skips sending the IPI,
1193 * allowing us to avoid the lock hold.
1195 * However, it means the targeted thread must context switch
1196 * to recognize the ledger AST.
1199 act_set_astledger_async(thread_t thread
)
1201 act_set_ast_async(thread
, AST_LEDGER
);
1205 act_set_io_telemetry_ast(thread_t thread
)
1207 act_set_ast(thread
, AST_TELEMETRY_IO
);