2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
48 * Author: Bryan Ford, University of Utah CSS
50 * Thread management routines
53 #include <mach/mach_types.h>
54 #include <mach/kern_return.h>
55 #include <mach/thread_act_server.h>
57 #include <kern/kern_types.h>
59 #include <kern/mach_param.h>
60 #include <kern/zalloc.h>
61 #include <kern/extmod_statistics.h>
62 #include <kern/thread.h>
63 #include <kern/task.h>
64 #include <kern/sched_prim.h>
65 #include <kern/misc_protos.h>
66 #include <kern/assert.h>
67 #include <kern/exception.h>
68 #include <kern/ipc_mig.h>
69 #include <kern/ipc_tt.h>
70 #include <kern/machine.h>
72 #include <kern/syscall_subr.h>
73 #include <kern/sync_lock.h>
74 #include <kern/processor.h>
75 #include <kern/timer.h>
76 #include <kern/affinity.h>
78 #include <stdatomic.h>
80 #include <security/mac_mach_internal.h>
82 static void act_abort(thread_t thread
);
84 static void thread_suspended(void *arg
, wait_result_t result
);
85 static void thread_set_apc_ast(thread_t thread
);
86 static void thread_set_apc_ast_locked(thread_t thread
);
89 * Internal routine to mark a thread as started.
90 * Always called with the thread mutex locked.
96 clear_wait(thread
, THREAD_AWAKENED
);
97 thread
->started
= TRUE
;
101 * Internal routine to mark a thread as waiting
102 * right after it has been created. The caller
103 * is responsible to call wakeup()/thread_wakeup()
104 * or thread_terminate() to get it going.
106 * Always called with the thread mutex locked.
108 * Task and task_threads mutexes also held
109 * (so nobody can set the thread running before
112 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
113 * to allow termination from this point forward.
116 thread_start_in_assert_wait(
119 wait_interrupt_t interruptible
)
121 struct waitq
*waitq
= assert_wait_queue(event
);
122 wait_result_t wait_result
;
128 /* clear out startup condition (safe because thread not started yet) */
130 assert(!thread
->started
);
131 assert((thread
->state
& (TH_WAIT
| TH_UNINT
)) == (TH_WAIT
| TH_UNINT
));
132 thread
->state
&= ~(TH_WAIT
| TH_UNINT
);
133 thread_unlock(thread
);
135 /* assert wait interruptibly forever */
136 wait_result
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
138 TIMEOUT_URGENCY_SYS_NORMAL
,
139 TIMEOUT_WAIT_FOREVER
,
142 assert (wait_result
== THREAD_WAITING
);
144 /* mark thread started while we still hold the waitq lock */
146 thread
->started
= TRUE
;
147 thread_unlock(thread
);
154 * Internal routine to terminate a thread.
155 * Sometimes called with task already locked.
158 thread_terminate_internal(
161 kern_return_t result
= KERN_SUCCESS
;
163 thread_mtx_lock(thread
);
165 if (thread
->active
) {
166 thread
->active
= FALSE
;
171 clear_wait(thread
, THREAD_INTERRUPTED
);
173 thread_start(thread
);
177 result
= KERN_TERMINATED
;
179 if (thread
->affinity_set
!= NULL
)
180 thread_affinity_terminate(thread
);
182 thread_mtx_unlock(thread
);
184 if (thread
!= current_thread() && result
== KERN_SUCCESS
)
185 thread_wait(thread
, FALSE
);
191 * Terminate a thread.
197 if (thread
== THREAD_NULL
)
198 return (KERN_INVALID_ARGUMENT
);
200 /* Kernel threads can't be terminated without their own cooperation */
201 if (thread
->task
== kernel_task
&& thread
!= current_thread())
202 return (KERN_FAILURE
);
204 kern_return_t result
= thread_terminate_internal(thread
);
207 * If a kernel thread is terminating itself, force handle the APC_AST here.
208 * Kernel threads don't pass through the return-to-user AST checking code,
209 * but all threads must finish their own termination in thread_apc_ast.
211 if (thread
->task
== kernel_task
) {
212 assert(thread
->active
== FALSE
);
213 thread_ast_clear(thread
, AST_APC
);
214 thread_apc_ast(thread
);
216 panic("thread_terminate");
224 * Suspend execution of the specified thread.
225 * This is a recursive-style suspension of the thread, a count of
226 * suspends is maintained.
228 * Called with thread mutex held.
231 thread_hold(thread_t thread
)
233 if (thread
->suspend_count
++ == 0) {
234 thread_set_apc_ast(thread
);
235 assert(thread
->suspend_parked
== FALSE
);
240 * Decrement internal suspension count, setting thread
241 * runnable when count falls to zero.
243 * Because the wait is abortsafe, we can't be guaranteed that the thread
244 * is currently actually waiting even if suspend_parked is set.
246 * Called with thread mutex held.
249 thread_release(thread_t thread
)
251 assertf(thread
->suspend_count
> 0, "thread %p over-resumed", thread
);
253 /* fail-safe on non-assert builds */
254 if (thread
->suspend_count
== 0)
257 if (--thread
->suspend_count
== 0) {
258 if (!thread
->started
) {
259 thread_start(thread
);
260 } else if (thread
->suspend_parked
) {
261 thread
->suspend_parked
= FALSE
;
262 thread_wakeup_thread(&thread
->suspend_count
, thread
);
268 thread_suspend(thread_t thread
)
270 kern_return_t result
= KERN_SUCCESS
;
272 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
)
273 return (KERN_INVALID_ARGUMENT
);
275 thread_mtx_lock(thread
);
277 if (thread
->active
) {
278 if (thread
->user_stop_count
++ == 0)
281 result
= KERN_TERMINATED
;
284 thread_mtx_unlock(thread
);
286 if (thread
!= current_thread() && result
== KERN_SUCCESS
)
287 thread_wait(thread
, FALSE
);
293 thread_resume(thread_t thread
)
295 kern_return_t result
= KERN_SUCCESS
;
297 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
)
298 return (KERN_INVALID_ARGUMENT
);
300 thread_mtx_lock(thread
);
302 if (thread
->active
) {
303 if (thread
->user_stop_count
> 0) {
304 if (--thread
->user_stop_count
== 0)
305 thread_release(thread
);
307 result
= KERN_FAILURE
;
310 result
= KERN_TERMINATED
;
313 thread_mtx_unlock(thread
);
319 * thread_depress_abort_from_user:
321 * Prematurely abort priority depression if there is one.
324 thread_depress_abort_from_user(thread_t thread
)
326 kern_return_t result
;
328 if (thread
== THREAD_NULL
)
329 return (KERN_INVALID_ARGUMENT
);
331 thread_mtx_lock(thread
);
334 result
= thread_depress_abort(thread
);
336 result
= KERN_TERMINATED
;
338 thread_mtx_unlock(thread
);
345 * Indicate that the thread should run the AST_APC callback
346 * to detect an abort condition.
348 * Called with thread mutex held.
354 spl_t s
= splsched();
358 if (!(thread
->sched_flags
& TH_SFLAG_ABORT
)) {
359 thread
->sched_flags
|= TH_SFLAG_ABORT
;
360 thread_set_apc_ast_locked(thread
);
361 thread_depress_abort_locked(thread
);
363 thread
->sched_flags
&= ~TH_SFLAG_ABORTSAFELY
;
366 thread_unlock(thread
);
374 kern_return_t result
= KERN_SUCCESS
;
376 if (thread
== THREAD_NULL
)
377 return (KERN_INVALID_ARGUMENT
);
379 thread_mtx_lock(thread
);
381 if (thread
->active
) {
383 clear_wait(thread
, THREAD_INTERRUPTED
);
386 result
= KERN_TERMINATED
;
388 thread_mtx_unlock(thread
);
397 kern_return_t result
= KERN_SUCCESS
;
399 if (thread
== THREAD_NULL
)
400 return (KERN_INVALID_ARGUMENT
);
402 thread_mtx_lock(thread
);
404 if (thread
->active
) {
405 spl_t s
= splsched();
408 if (!thread
->at_safe_point
||
409 clear_wait_internal(thread
, THREAD_INTERRUPTED
) != KERN_SUCCESS
) {
410 if (!(thread
->sched_flags
& TH_SFLAG_ABORT
)) {
411 thread
->sched_flags
|= TH_SFLAG_ABORTED_MASK
;
412 thread_set_apc_ast_locked(thread
);
413 thread_depress_abort_locked(thread
);
416 thread_unlock(thread
);
419 result
= KERN_TERMINATED
;
422 thread_mtx_unlock(thread
);
427 /*** backward compatibility hacks ***/
428 #include <mach/thread_info.h>
429 #include <mach/thread_special_ports.h>
430 #include <ipc/ipc_port.h>
435 thread_flavor_t flavor
,
436 thread_info_t thread_info_out
,
437 mach_msg_type_number_t
*thread_info_count
)
439 kern_return_t result
;
441 if (thread
== THREAD_NULL
)
442 return (KERN_INVALID_ARGUMENT
);
444 thread_mtx_lock(thread
);
446 if (thread
->active
|| thread
->inspection
)
447 result
= thread_info_internal(
448 thread
, flavor
, thread_info_out
, thread_info_count
);
450 result
= KERN_TERMINATED
;
452 thread_mtx_unlock(thread
);
457 static inline kern_return_t
458 thread_get_state_internal(
461 thread_state_t state
, /* pointer to OUT array */
462 mach_msg_type_number_t
*state_count
, /*IN/OUT*/
465 kern_return_t result
= KERN_SUCCESS
;
467 if (thread
== THREAD_NULL
)
468 return (KERN_INVALID_ARGUMENT
);
470 thread_mtx_lock(thread
);
472 if (thread
->active
) {
473 if (thread
!= current_thread()) {
476 thread_mtx_unlock(thread
);
478 if (thread_stop(thread
, FALSE
)) {
479 thread_mtx_lock(thread
);
480 result
= machine_thread_get_state(
481 thread
, flavor
, state
, state_count
);
482 thread_unstop(thread
);
485 thread_mtx_lock(thread
);
486 result
= KERN_ABORTED
;
489 thread_release(thread
);
492 result
= machine_thread_get_state(
493 thread
, flavor
, state
, state_count
);
495 else if (thread
->inspection
)
497 result
= machine_thread_get_state(
498 thread
, flavor
, state
, state_count
);
501 result
= KERN_TERMINATED
;
503 if (to_user
&& result
== KERN_SUCCESS
) {
504 result
= machine_thread_state_convert_to_user(thread
, flavor
, state
,
508 thread_mtx_unlock(thread
);
513 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
519 thread_state_t state
,
520 mach_msg_type_number_t
*state_count
);
526 thread_state_t state
, /* pointer to OUT array */
527 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
529 return thread_get_state_internal(thread
, flavor
, state
, state_count
, FALSE
);
533 thread_get_state_to_user(
536 thread_state_t state
, /* pointer to OUT array */
537 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
539 return thread_get_state_internal(thread
, flavor
, state
, state_count
, TRUE
);
543 * Change thread's machine-dependent state. Called with nothing
544 * locked. Returns same way.
546 static inline kern_return_t
547 thread_set_state_internal(
550 thread_state_t state
,
551 mach_msg_type_number_t state_count
,
554 kern_return_t result
= KERN_SUCCESS
;
556 if (thread
== THREAD_NULL
)
557 return (KERN_INVALID_ARGUMENT
);
559 thread_mtx_lock(thread
);
561 if (thread
->active
) {
563 result
= machine_thread_state_convert_from_user(thread
, flavor
,
565 if (result
!= KERN_SUCCESS
) {
569 if (thread
!= current_thread()) {
572 thread_mtx_unlock(thread
);
574 if (thread_stop(thread
, TRUE
)) {
575 thread_mtx_lock(thread
);
576 result
= machine_thread_set_state(
577 thread
, flavor
, state
, state_count
);
578 thread_unstop(thread
);
581 thread_mtx_lock(thread
);
582 result
= KERN_ABORTED
;
585 thread_release(thread
);
588 result
= machine_thread_set_state(
589 thread
, flavor
, state
, state_count
);
592 result
= KERN_TERMINATED
;
594 if ((result
== KERN_SUCCESS
) && from_user
)
595 extmod_statistics_incr_thread_set_state(thread
);
598 thread_mtx_unlock(thread
);
603 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
608 thread_state_t state
,
609 mach_msg_type_number_t state_count
);
615 thread_state_t state
,
616 mach_msg_type_number_t state_count
)
618 return thread_set_state_internal(thread
, flavor
, state
, state_count
, FALSE
);
622 thread_set_state_from_user(
625 thread_state_t state
,
626 mach_msg_type_number_t state_count
)
628 return thread_set_state_internal(thread
, flavor
, state
, state_count
, TRUE
);
632 * Kernel-internal "thread" interfaces used outside this file:
635 /* Initialize (or re-initialize) a thread state. Called from execve
636 * with nothing locked, returns same way.
639 thread_state_initialize(
642 kern_return_t result
= KERN_SUCCESS
;
644 if (thread
== THREAD_NULL
)
645 return (KERN_INVALID_ARGUMENT
);
647 thread_mtx_lock(thread
);
649 if (thread
->active
) {
650 if (thread
!= current_thread()) {
653 thread_mtx_unlock(thread
);
655 if (thread_stop(thread
, TRUE
)) {
656 thread_mtx_lock(thread
);
657 result
= machine_thread_state_initialize( thread
);
658 thread_unstop(thread
);
661 thread_mtx_lock(thread
);
662 result
= KERN_ABORTED
;
665 thread_release(thread
);
668 result
= machine_thread_state_initialize( thread
);
671 result
= KERN_TERMINATED
;
673 thread_mtx_unlock(thread
);
683 thread_t self
= current_thread();
684 kern_return_t result
= KERN_SUCCESS
;
686 if (target
== THREAD_NULL
|| target
== self
)
687 return (KERN_INVALID_ARGUMENT
);
689 thread_mtx_lock(target
);
691 if (target
->active
) {
694 thread_mtx_unlock(target
);
696 if (thread_stop(target
, TRUE
)) {
697 thread_mtx_lock(target
);
698 result
= machine_thread_dup(self
, target
, FALSE
);
700 if (self
->affinity_set
!= AFFINITY_SET_NULL
)
701 thread_affinity_dup(self
, target
);
702 thread_unstop(target
);
705 thread_mtx_lock(target
);
706 result
= KERN_ABORTED
;
709 thread_release(target
);
712 result
= KERN_TERMINATED
;
714 thread_mtx_unlock(target
);
725 kern_return_t result
= KERN_SUCCESS
;
728 if (source
== THREAD_NULL
|| target
== THREAD_NULL
|| target
== source
)
729 return (KERN_INVALID_ARGUMENT
);
731 thread_mtx_lock(source
);
732 active
= source
->active
;
733 thread_mtx_unlock(source
);
736 return KERN_TERMINATED
;
739 thread_mtx_lock(target
);
741 if (target
->active
|| target
->inspection
) {
744 thread_mtx_unlock(target
);
746 if (thread_stop(target
, TRUE
)) {
747 thread_mtx_lock(target
);
748 result
= machine_thread_dup(source
, target
, TRUE
);
749 if (source
->affinity_set
!= AFFINITY_SET_NULL
)
750 thread_affinity_dup(source
, target
);
751 thread_unstop(target
);
754 thread_mtx_lock(target
);
755 result
= KERN_ABORTED
;
758 thread_release(target
);
761 result
= KERN_TERMINATED
;
763 thread_mtx_unlock(target
);
771 * Set the status of the specified thread.
772 * Called with (and returns with) no locks held.
778 thread_state_t tstate
,
779 mach_msg_type_number_t count
)
782 return (thread_set_state(thread
, flavor
, tstate
, count
));
786 thread_setstatus_from_user(
789 thread_state_t tstate
,
790 mach_msg_type_number_t count
)
793 return (thread_set_state_from_user(thread
, flavor
, tstate
, count
));
799 * Get the status of the specified thread.
805 thread_state_t tstate
,
806 mach_msg_type_number_t
*count
)
808 return (thread_get_state(thread
, flavor
, tstate
, count
));
812 thread_getstatus_to_user(
815 thread_state_t tstate
,
816 mach_msg_type_number_t
*count
)
818 return (thread_get_state_to_user(thread
, flavor
, tstate
, count
));
822 * Change thread's machine-dependent userspace TSD base.
823 * Called with nothing locked. Returns same way.
828 mach_vm_offset_t tsd_base
)
830 kern_return_t result
= KERN_SUCCESS
;
832 if (thread
== THREAD_NULL
)
833 return (KERN_INVALID_ARGUMENT
);
835 thread_mtx_lock(thread
);
837 if (thread
->active
) {
838 if (thread
!= current_thread()) {
841 thread_mtx_unlock(thread
);
843 if (thread_stop(thread
, TRUE
)) {
844 thread_mtx_lock(thread
);
845 result
= machine_thread_set_tsd_base(thread
, tsd_base
);
846 thread_unstop(thread
);
849 thread_mtx_lock(thread
);
850 result
= KERN_ABORTED
;
853 thread_release(thread
);
856 result
= machine_thread_set_tsd_base(thread
, tsd_base
);
859 result
= KERN_TERMINATED
;
861 thread_mtx_unlock(thread
);
867 * thread_set_apc_ast:
869 * Register the AST_APC callback that handles suspension and
870 * termination, if it hasn't been installed already.
872 * Called with the thread mutex held.
875 thread_set_apc_ast(thread_t thread
)
877 spl_t s
= splsched();
880 thread_set_apc_ast_locked(thread
);
881 thread_unlock(thread
);
887 * thread_set_apc_ast_locked:
889 * Do the work of registering for the AST_APC callback.
891 * Called with the thread mutex and scheduling lock held.
894 thread_set_apc_ast_locked(thread_t thread
)
896 thread_ast_set(thread
, AST_APC
);
898 if (thread
== current_thread()) {
899 ast_propagate(thread
);
901 processor_t processor
= thread
->last_processor
;
903 if (processor
!= PROCESSOR_NULL
&&
904 processor
->state
== PROCESSOR_RUNNING
&&
905 processor
->active_thread
== thread
) {
906 cause_ast_check(processor
);
912 * Activation control support routines internal to this file:
919 * Continuation routine for thread suspension. It checks
920 * to see whether there has been any new suspensions. If so, it
921 * installs the AST_APC handler again.
923 __attribute__((noreturn
))
925 thread_suspended(__unused
void *parameter
, wait_result_t result
)
927 thread_t thread
= current_thread();
929 thread_mtx_lock(thread
);
931 if (result
== THREAD_INTERRUPTED
)
932 thread
->suspend_parked
= FALSE
;
934 assert(thread
->suspend_parked
== FALSE
);
936 if (thread
->suspend_count
> 0)
937 thread_set_apc_ast(thread
);
939 thread_mtx_unlock(thread
);
941 thread_exception_return();
946 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
947 * Called with nothing locked. Returns (if it returns) the same way.
950 thread_apc_ast(thread_t thread
)
952 thread_mtx_lock(thread
);
954 assert(thread
->suspend_parked
== FALSE
);
956 spl_t s
= splsched();
959 /* TH_SFLAG_POLLDEPRESS is OK to have here */
960 assert((thread
->sched_flags
& TH_SFLAG_DEPRESS
) == 0);
962 thread
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
963 thread_unlock(thread
);
966 if (!thread
->active
) {
967 /* Thread is ready to terminate, time to tear it down */
968 thread_mtx_unlock(thread
);
970 thread_terminate_self();
974 /* If we're suspended, go to sleep and wait for someone to wake us up. */
975 if (thread
->suspend_count
> 0) {
976 thread
->suspend_parked
= TRUE
;
977 assert_wait(&thread
->suspend_count
,
978 THREAD_ABORTSAFE
| THREAD_WAIT_NOREPORT_USER
);
979 thread_mtx_unlock(thread
);
981 thread_block(thread_suspended
);
985 thread_mtx_unlock(thread
);
988 /* Prototype, see justification above */
993 thread_state_t state
,
994 mach_msg_type_number_t count
);
1000 thread_state_t state
,
1001 mach_msg_type_number_t count
)
1003 if (thread
== current_thread())
1004 return (KERN_INVALID_ARGUMENT
);
1006 return (thread_set_state(thread
, flavor
, state
, count
));
1011 act_set_state_from_user(
1014 thread_state_t state
,
1015 mach_msg_type_number_t count
)
1017 if (thread
== current_thread())
1018 return (KERN_INVALID_ARGUMENT
);
1020 return (thread_set_state_from_user(thread
, flavor
, state
, count
));
1024 /* Prototype, see justification above */
1029 thread_state_t state
,
1030 mach_msg_type_number_t
*count
);
1036 thread_state_t state
,
1037 mach_msg_type_number_t
*count
)
1039 if (thread
== current_thread())
1040 return (KERN_INVALID_ARGUMENT
);
1042 return (thread_get_state(thread
, flavor
, state
, count
));
1046 act_get_state_to_user(
1049 thread_state_t state
,
1050 mach_msg_type_number_t
*count
)
1052 if (thread
== current_thread())
1053 return (KERN_INVALID_ARGUMENT
);
1055 return (thread_get_state_to_user(thread
, flavor
, state
, count
));
1063 spl_t s
= splsched();
1065 if (thread
== current_thread()) {
1066 thread_ast_set(thread
, ast
);
1067 ast_propagate(thread
);
1069 processor_t processor
;
1071 thread_lock(thread
);
1072 thread_ast_set(thread
, ast
);
1073 processor
= thread
->last_processor
;
1074 if ( processor
!= PROCESSOR_NULL
&&
1075 processor
->state
== PROCESSOR_RUNNING
&&
1076 processor
->active_thread
== thread
)
1077 cause_ast_check(processor
);
1078 thread_unlock(thread
);
1085 * set AST on thread without causing an AST check
1086 * and without taking the thread lock
1088 * If thread is not the current thread, then it may take
1089 * up until the next context switch or quantum expiration
1090 * on that thread for it to notice the AST.
1093 act_set_ast_async(thread_t thread
,
1096 thread_ast_set(thread
, ast
);
1098 if (thread
== current_thread()) {
1099 spl_t s
= splsched();
1100 ast_propagate(thread
);
1109 act_set_ast( thread
, AST_BSD
);
1113 act_set_astkevent(thread_t thread
, uint16_t bits
)
1115 atomic_fetch_or(&thread
->kevent_ast_bits
, bits
);
1117 /* kevent AST shouldn't send immediate IPIs */
1118 act_set_ast_async(thread
, AST_KEVENT
);
1126 if (thread
!= current_thread())
1127 if( !ml_get_interrupts_enabled() )
1128 panic("unsafe act_set_kperf operation");
1130 act_set_ast( thread
, AST_KPERF
);
1138 act_set_ast( thread
, AST_MACF
);
1143 act_set_astledger(thread_t thread
)
1145 act_set_ast(thread
, AST_LEDGER
);
1149 * The ledger AST may need to be set while already holding
1150 * the thread lock. This routine skips sending the IPI,
1151 * allowing us to avoid the lock hold.
1153 * However, it means the targeted thread must context switch
1154 * to recognize the ledger AST.
1157 act_set_astledger_async(thread_t thread
)
1159 act_set_ast_async(thread
, AST_LEDGER
);
1163 act_set_io_telemetry_ast(thread_t thread
)
1165 act_set_ast(thread
, AST_TELEMETRY_IO
);