2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
48 * Author: Bryan Ford, University of Utah CSS
50 * Thread management routines
52 #include <mach/mach_types.h>
53 #include <mach/kern_return.h>
54 #include <mach/thread_act_server.h>
56 #include <kern/kern_types.h>
58 #include <kern/mach_param.h>
59 #include <kern/zalloc.h>
60 #include <kern/extmod_statistics.h>
61 #include <kern/thread.h>
62 #include <kern/task.h>
63 #include <kern/sched_prim.h>
64 #include <kern/misc_protos.h>
65 #include <kern/assert.h>
66 #include <kern/exception.h>
67 #include <kern/ipc_mig.h>
68 #include <kern/ipc_tt.h>
69 #include <kern/machine.h>
71 #include <kern/syscall_subr.h>
72 #include <kern/sync_lock.h>
73 #include <kern/processor.h>
74 #include <kern/timer.h>
75 #include <kern/affinity.h>
77 #include <stdatomic.h>
79 #include <security/mac_mach_internal.h>
81 static void act_abort(thread_t thread
);
83 static void thread_suspended(void *arg
, wait_result_t result
);
84 static void thread_set_apc_ast(thread_t thread
);
85 static void thread_set_apc_ast_locked(thread_t thread
);
88 * Internal routine to mark a thread as started.
89 * Always called with the thread mutex locked.
95 clear_wait(thread
, THREAD_AWAKENED
);
96 thread
->started
= TRUE
;
100 * Internal routine to mark a thread as waiting
101 * right after it has been created. The caller
102 * is responsible to call wakeup()/thread_wakeup()
103 * or thread_terminate() to get it going.
105 * Always called with the thread mutex locked.
107 * Task and task_threads mutexes also held
108 * (so nobody can set the thread running before
111 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
112 * to allow termination from this point forward.
115 thread_start_in_assert_wait(
118 wait_interrupt_t interruptible
)
120 struct waitq
*waitq
= assert_wait_queue(event
);
121 wait_result_t wait_result
;
127 /* clear out startup condition (safe because thread not started yet) */
129 assert(!thread
->started
);
130 assert((thread
->state
& (TH_WAIT
| TH_UNINT
)) == (TH_WAIT
| TH_UNINT
));
131 thread
->state
&= ~(TH_WAIT
| TH_UNINT
);
132 thread_unlock(thread
);
134 /* assert wait interruptibly forever */
135 wait_result
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
137 TIMEOUT_URGENCY_SYS_NORMAL
,
138 TIMEOUT_WAIT_FOREVER
,
141 assert (wait_result
== THREAD_WAITING
);
143 /* mark thread started while we still hold the waitq lock */
145 thread
->started
= TRUE
;
146 thread_unlock(thread
);
153 * Internal routine to terminate a thread.
154 * Sometimes called with task already locked.
157 thread_terminate_internal(
160 kern_return_t result
= KERN_SUCCESS
;
162 thread_mtx_lock(thread
);
164 if (thread
->active
) {
165 thread
->active
= FALSE
;
170 clear_wait(thread
, THREAD_INTERRUPTED
);
172 thread_start(thread
);
176 result
= KERN_TERMINATED
;
178 if (thread
->affinity_set
!= NULL
)
179 thread_affinity_terminate(thread
);
181 thread_mtx_unlock(thread
);
183 if (thread
!= current_thread() && result
== KERN_SUCCESS
)
184 thread_wait(thread
, FALSE
);
190 * Terminate a thread.
196 if (thread
== THREAD_NULL
)
197 return (KERN_INVALID_ARGUMENT
);
199 /* Kernel threads can't be terminated without their own cooperation */
200 if (thread
->task
== kernel_task
&& thread
!= current_thread())
201 return (KERN_FAILURE
);
203 kern_return_t result
= thread_terminate_internal(thread
);
206 * If a kernel thread is terminating itself, force handle the APC_AST here.
207 * Kernel threads don't pass through the return-to-user AST checking code,
208 * but all threads must finish their own termination in thread_apc_ast.
210 if (thread
->task
== kernel_task
) {
211 assert(thread
->active
== FALSE
);
212 thread_ast_clear(thread
, AST_APC
);
213 thread_apc_ast(thread
);
215 panic("thread_terminate");
223 * Suspend execution of the specified thread.
224 * This is a recursive-style suspension of the thread, a count of
225 * suspends is maintained.
227 * Called with thread mutex held.
230 thread_hold(thread_t thread
)
232 if (thread
->suspend_count
++ == 0) {
233 thread_set_apc_ast(thread
);
234 assert(thread
->suspend_parked
== FALSE
);
239 * Decrement internal suspension count, setting thread
240 * runnable when count falls to zero.
242 * Because the wait is abortsafe, we can't be guaranteed that the thread
243 * is currently actually waiting even if suspend_parked is set.
245 * Called with thread mutex held.
248 thread_release(thread_t thread
)
250 assertf(thread
->suspend_count
> 0, "thread %p over-resumed", thread
);
252 /* fail-safe on non-assert builds */
253 if (thread
->suspend_count
== 0)
256 if (--thread
->suspend_count
== 0) {
257 if (!thread
->started
) {
258 thread_start(thread
);
259 } else if (thread
->suspend_parked
) {
260 thread
->suspend_parked
= FALSE
;
261 thread_wakeup_thread(&thread
->suspend_count
, thread
);
267 thread_suspend(thread_t thread
)
269 kern_return_t result
= KERN_SUCCESS
;
271 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
)
272 return (KERN_INVALID_ARGUMENT
);
274 thread_mtx_lock(thread
);
276 if (thread
->active
) {
277 if (thread
->user_stop_count
++ == 0)
280 result
= KERN_TERMINATED
;
283 thread_mtx_unlock(thread
);
285 if (thread
!= current_thread() && result
== KERN_SUCCESS
)
286 thread_wait(thread
, FALSE
);
292 thread_resume(thread_t thread
)
294 kern_return_t result
= KERN_SUCCESS
;
296 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
)
297 return (KERN_INVALID_ARGUMENT
);
299 thread_mtx_lock(thread
);
301 if (thread
->active
) {
302 if (thread
->user_stop_count
> 0) {
303 if (--thread
->user_stop_count
== 0)
304 thread_release(thread
);
306 result
= KERN_FAILURE
;
309 result
= KERN_TERMINATED
;
312 thread_mtx_unlock(thread
);
318 * thread_depress_abort:
320 * Prematurely abort priority depression if there is one.
323 thread_depress_abort(
326 kern_return_t result
;
328 if (thread
== THREAD_NULL
)
329 return (KERN_INVALID_ARGUMENT
);
331 thread_mtx_lock(thread
);
334 result
= thread_depress_abort_internal(thread
);
336 result
= KERN_TERMINATED
;
338 thread_mtx_unlock(thread
);
345 * Indicate that the thread should run the AST_APC callback
346 * to detect an abort condition.
348 * Called with thread mutex held.
354 spl_t s
= splsched();
358 if (!(thread
->sched_flags
& TH_SFLAG_ABORT
)) {
359 thread
->sched_flags
|= TH_SFLAG_ABORT
;
360 thread_set_apc_ast_locked(thread
);
362 thread
->sched_flags
&= ~TH_SFLAG_ABORTSAFELY
;
365 thread_unlock(thread
);
373 kern_return_t result
= KERN_SUCCESS
;
375 if (thread
== THREAD_NULL
)
376 return (KERN_INVALID_ARGUMENT
);
378 thread_mtx_lock(thread
);
380 if (thread
->active
) {
382 clear_wait(thread
, THREAD_INTERRUPTED
);
385 result
= KERN_TERMINATED
;
387 thread_mtx_unlock(thread
);
396 kern_return_t result
= KERN_SUCCESS
;
398 if (thread
== THREAD_NULL
)
399 return (KERN_INVALID_ARGUMENT
);
401 thread_mtx_lock(thread
);
403 if (thread
->active
) {
404 spl_t s
= splsched();
407 if (!thread
->at_safe_point
||
408 clear_wait_internal(thread
, THREAD_INTERRUPTED
) != KERN_SUCCESS
) {
409 if (!(thread
->sched_flags
& TH_SFLAG_ABORT
)) {
410 thread
->sched_flags
|= TH_SFLAG_ABORTED_MASK
;
411 thread_set_apc_ast_locked(thread
);
414 thread_unlock(thread
);
417 result
= KERN_TERMINATED
;
420 thread_mtx_unlock(thread
);
425 /*** backward compatibility hacks ***/
426 #include <mach/thread_info.h>
427 #include <mach/thread_special_ports.h>
428 #include <ipc/ipc_port.h>
433 thread_flavor_t flavor
,
434 thread_info_t thread_info_out
,
435 mach_msg_type_number_t
*thread_info_count
)
437 kern_return_t result
;
439 if (thread
== THREAD_NULL
)
440 return (KERN_INVALID_ARGUMENT
);
442 thread_mtx_lock(thread
);
444 if (thread
->active
|| thread
->inspection
)
445 result
= thread_info_internal(
446 thread
, flavor
, thread_info_out
, thread_info_count
);
448 result
= KERN_TERMINATED
;
450 thread_mtx_unlock(thread
);
459 thread_state_t state
, /* pointer to OUT array */
460 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
462 kern_return_t result
= KERN_SUCCESS
;
464 if (thread
== THREAD_NULL
)
465 return (KERN_INVALID_ARGUMENT
);
467 thread_mtx_lock(thread
);
469 if (thread
->active
) {
470 if (thread
!= current_thread()) {
473 thread_mtx_unlock(thread
);
475 if (thread_stop(thread
, FALSE
)) {
476 thread_mtx_lock(thread
);
477 result
= machine_thread_get_state(
478 thread
, flavor
, state
, state_count
);
479 thread_unstop(thread
);
482 thread_mtx_lock(thread
);
483 result
= KERN_ABORTED
;
486 thread_release(thread
);
489 result
= machine_thread_get_state(
490 thread
, flavor
, state
, state_count
);
492 else if (thread
->inspection
)
494 result
= machine_thread_get_state(
495 thread
, flavor
, state
, state_count
);
498 result
= KERN_TERMINATED
;
500 thread_mtx_unlock(thread
);
506 * Change thread's machine-dependent state. Called with nothing
507 * locked. Returns same way.
510 thread_set_state_internal(
513 thread_state_t state
,
514 mach_msg_type_number_t state_count
,
517 kern_return_t result
= KERN_SUCCESS
;
519 if (thread
== THREAD_NULL
)
520 return (KERN_INVALID_ARGUMENT
);
522 thread_mtx_lock(thread
);
524 if (thread
->active
) {
525 if (thread
!= current_thread()) {
528 thread_mtx_unlock(thread
);
530 if (thread_stop(thread
, TRUE
)) {
531 thread_mtx_lock(thread
);
532 result
= machine_thread_set_state(
533 thread
, flavor
, state
, state_count
);
534 thread_unstop(thread
);
537 thread_mtx_lock(thread
);
538 result
= KERN_ABORTED
;
541 thread_release(thread
);
544 result
= machine_thread_set_state(
545 thread
, flavor
, state
, state_count
);
548 result
= KERN_TERMINATED
;
550 if ((result
== KERN_SUCCESS
) && from_user
)
551 extmod_statistics_incr_thread_set_state(thread
);
553 thread_mtx_unlock(thread
);
558 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
563 thread_state_t state
,
564 mach_msg_type_number_t state_count
);
570 thread_state_t state
,
571 mach_msg_type_number_t state_count
)
573 return thread_set_state_internal(thread
, flavor
, state
, state_count
, FALSE
);
577 thread_set_state_from_user(
580 thread_state_t state
,
581 mach_msg_type_number_t state_count
)
583 return thread_set_state_internal(thread
, flavor
, state
, state_count
, TRUE
);
587 * Kernel-internal "thread" interfaces used outside this file:
590 /* Initialize (or re-initialize) a thread state. Called from execve
591 * with nothing locked, returns same way.
594 thread_state_initialize(
597 kern_return_t result
= KERN_SUCCESS
;
599 if (thread
== THREAD_NULL
)
600 return (KERN_INVALID_ARGUMENT
);
602 thread_mtx_lock(thread
);
604 if (thread
->active
) {
605 if (thread
!= current_thread()) {
608 thread_mtx_unlock(thread
);
610 if (thread_stop(thread
, TRUE
)) {
611 thread_mtx_lock(thread
);
612 result
= machine_thread_state_initialize( thread
);
613 thread_unstop(thread
);
616 thread_mtx_lock(thread
);
617 result
= KERN_ABORTED
;
620 thread_release(thread
);
623 result
= machine_thread_state_initialize( thread
);
626 result
= KERN_TERMINATED
;
628 thread_mtx_unlock(thread
);
638 thread_t self
= current_thread();
639 kern_return_t result
= KERN_SUCCESS
;
641 if (target
== THREAD_NULL
|| target
== self
)
642 return (KERN_INVALID_ARGUMENT
);
644 thread_mtx_lock(target
);
646 if (target
->active
) {
649 thread_mtx_unlock(target
);
651 if (thread_stop(target
, TRUE
)) {
652 thread_mtx_lock(target
);
653 result
= machine_thread_dup(self
, target
);
654 if (self
->affinity_set
!= AFFINITY_SET_NULL
)
655 thread_affinity_dup(self
, target
);
656 thread_unstop(target
);
659 thread_mtx_lock(target
);
660 result
= KERN_ABORTED
;
663 thread_release(target
);
666 result
= KERN_TERMINATED
;
668 thread_mtx_unlock(target
);
679 kern_return_t result
= KERN_SUCCESS
;
682 if (source
== THREAD_NULL
|| target
== THREAD_NULL
|| target
== source
)
683 return (KERN_INVALID_ARGUMENT
);
685 thread_mtx_lock(source
);
686 active
= source
->active
;
687 thread_mtx_unlock(source
);
690 return KERN_TERMINATED
;
693 thread_mtx_lock(target
);
695 if (target
->active
|| target
->inspection
) {
698 thread_mtx_unlock(target
);
700 if (thread_stop(target
, TRUE
)) {
701 thread_mtx_lock(target
);
702 result
= machine_thread_dup(source
, target
);
703 if (source
->affinity_set
!= AFFINITY_SET_NULL
)
704 thread_affinity_dup(source
, target
);
705 thread_unstop(target
);
708 thread_mtx_lock(target
);
709 result
= KERN_ABORTED
;
712 thread_release(target
);
715 result
= KERN_TERMINATED
;
717 thread_mtx_unlock(target
);
725 * Set the status of the specified thread.
726 * Called with (and returns with) no locks held.
732 thread_state_t tstate
,
733 mach_msg_type_number_t count
)
736 return (thread_set_state(thread
, flavor
, tstate
, count
));
742 * Get the status of the specified thread.
748 thread_state_t tstate
,
749 mach_msg_type_number_t
*count
)
751 return (thread_get_state(thread
, flavor
, tstate
, count
));
755 * Change thread's machine-dependent userspace TSD base.
756 * Called with nothing locked. Returns same way.
761 mach_vm_offset_t tsd_base
)
763 kern_return_t result
= KERN_SUCCESS
;
765 if (thread
== THREAD_NULL
)
766 return (KERN_INVALID_ARGUMENT
);
768 thread_mtx_lock(thread
);
770 if (thread
->active
) {
771 if (thread
!= current_thread()) {
774 thread_mtx_unlock(thread
);
776 if (thread_stop(thread
, TRUE
)) {
777 thread_mtx_lock(thread
);
778 result
= machine_thread_set_tsd_base(thread
, tsd_base
);
779 thread_unstop(thread
);
782 thread_mtx_lock(thread
);
783 result
= KERN_ABORTED
;
786 thread_release(thread
);
789 result
= machine_thread_set_tsd_base(thread
, tsd_base
);
792 result
= KERN_TERMINATED
;
794 thread_mtx_unlock(thread
);
800 * thread_set_apc_ast:
802 * Register the AST_APC callback that handles suspension and
803 * termination, if it hasn't been installed already.
805 * Called with the thread mutex held.
808 thread_set_apc_ast(thread_t thread
)
810 spl_t s
= splsched();
813 thread_set_apc_ast_locked(thread
);
814 thread_unlock(thread
);
820 * thread_set_apc_ast_locked:
822 * Do the work of registering for the AST_APC callback.
824 * Called with the thread mutex and scheduling lock held.
827 thread_set_apc_ast_locked(thread_t thread
)
830 * Temporarily undepress, so target has
831 * a chance to do locking required to
832 * block itself in thread_suspended.
834 * Leaves the depress flag set so we can reinstate when it's blocked.
836 if (thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
)
837 thread_recompute_sched_pri(thread
, TRUE
);
839 thread_ast_set(thread
, AST_APC
);
841 if (thread
== current_thread()) {
842 ast_propagate(thread
);
844 processor_t processor
= thread
->last_processor
;
846 if (processor
!= PROCESSOR_NULL
&&
847 processor
->state
== PROCESSOR_RUNNING
&&
848 processor
->active_thread
== thread
) {
849 cause_ast_check(processor
);
855 * Activation control support routines internal to this file:
862 * Continuation routine for thread suspension. It checks
863 * to see whether there has been any new suspensions. If so, it
864 * installs the AST_APC handler again. Otherwise, it checks to see
865 * if the current depression needs to be re-instated (it may have
866 * been temporarily removed in order to get to this point in a hurry).
868 __attribute__((noreturn
))
870 thread_suspended(__unused
void *parameter
, wait_result_t result
)
872 thread_t thread
= current_thread();
874 thread_mtx_lock(thread
);
876 if (result
== THREAD_INTERRUPTED
)
877 thread
->suspend_parked
= FALSE
;
879 assert(thread
->suspend_parked
== FALSE
);
881 if (thread
->suspend_count
> 0) {
882 thread_set_apc_ast(thread
);
884 spl_t s
= splsched();
887 if (thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) {
888 thread
->sched_pri
= DEPRESSPRI
;
889 thread
->last_processor
->current_pri
= thread
->sched_pri
;
890 thread
->last_processor
->current_perfctl_class
= thread_get_perfcontrol_class(thread
);
892 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHANGE_PRIORITY
),
893 (uintptr_t)thread_tid(thread
),
899 thread_unlock(thread
);
903 thread_mtx_unlock(thread
);
905 thread_exception_return();
910 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
911 * Called with nothing locked. Returns (if it returns) the same way.
914 thread_apc_ast(thread_t thread
)
916 thread_mtx_lock(thread
);
918 assert(thread
->suspend_parked
== FALSE
);
920 spl_t s
= splsched();
923 /* TH_SFLAG_POLLDEPRESS is OK to have here */
924 assert((thread
->sched_flags
& TH_SFLAG_DEPRESS
) == 0);
926 thread
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
927 thread_unlock(thread
);
930 if (!thread
->active
) {
931 /* Thread is ready to terminate, time to tear it down */
932 thread_mtx_unlock(thread
);
934 thread_terminate_self();
938 /* If we're suspended, go to sleep and wait for someone to wake us up. */
939 if (thread
->suspend_count
> 0) {
940 thread
->suspend_parked
= TRUE
;
941 assert_wait(&thread
->suspend_count
, THREAD_ABORTSAFE
);
942 thread_mtx_unlock(thread
);
944 thread_block(thread_suspended
);
948 thread_mtx_unlock(thread
);
951 /* Prototype, see justification above */
956 thread_state_t state
,
957 mach_msg_type_number_t count
);
963 thread_state_t state
,
964 mach_msg_type_number_t count
)
966 if (thread
== current_thread())
967 return (KERN_INVALID_ARGUMENT
);
969 return (thread_set_state(thread
, flavor
, state
, count
));
974 act_set_state_from_user(
977 thread_state_t state
,
978 mach_msg_type_number_t count
)
980 if (thread
== current_thread())
981 return (KERN_INVALID_ARGUMENT
);
983 return (thread_set_state_from_user(thread
, flavor
, state
, count
));
991 thread_state_t state
,
992 mach_msg_type_number_t
*count
)
994 if (thread
== current_thread())
995 return (KERN_INVALID_ARGUMENT
);
997 return (thread_get_state(thread
, flavor
, state
, count
));
1005 spl_t s
= splsched();
1007 if (thread
== current_thread()) {
1008 thread_ast_set(thread
, ast
);
1009 ast_propagate(thread
);
1011 processor_t processor
;
1013 thread_lock(thread
);
1014 thread_ast_set(thread
, ast
);
1015 processor
= thread
->last_processor
;
1016 if ( processor
!= PROCESSOR_NULL
&&
1017 processor
->state
== PROCESSOR_RUNNING
&&
1018 processor
->active_thread
== thread
)
1019 cause_ast_check(processor
);
1020 thread_unlock(thread
);
1027 * set AST on thread without causing an AST check
1028 * and without taking the thread lock
1030 * If thread is not the current thread, then it may take
1031 * up until the next context switch or quantum expiration
1032 * on that thread for it to notice the AST.
1035 act_set_ast_async(thread_t thread
,
1038 thread_ast_set(thread
, ast
);
1040 if (thread
== current_thread()) {
1041 spl_t s
= splsched();
1042 ast_propagate(thread
);
1051 act_set_ast( thread
, AST_BSD
);
1055 act_set_astkevent(thread_t thread
, uint16_t bits
)
1057 atomic_fetch_or(&thread
->kevent_ast_bits
, bits
);
1059 /* kevent AST shouldn't send immediate IPIs */
1060 act_set_ast_async(thread
, AST_KEVENT
);
1068 if (thread
!= current_thread())
1069 if( !ml_get_interrupts_enabled() )
1070 panic("unsafe act_set_kperf operation");
1072 act_set_ast( thread
, AST_KPERF
);
1080 act_set_ast( thread
, AST_MACF
);
1085 act_set_astledger(thread_t thread
)
1087 act_set_ast(thread
, AST_LEDGER
);
1091 * The ledger AST may need to be set while already holding
1092 * the thread lock. This routine skips sending the IPI,
1093 * allowing us to avoid the lock hold.
1095 * However, it means the targeted thread must context switch
1096 * to recognize the ledger AST.
1099 act_set_astledger_async(thread_t thread
)
1101 act_set_ast_async(thread
, AST_LEDGER
);
1105 act_set_io_telemetry_ast(thread_t thread
)
1107 act_set_ast(thread
, AST_TELEMETRY_IO
);