2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
48 * Author: Bryan Ford, University of Utah CSS
50 * Thread management routines
53 #include <mach/mach_types.h>
54 #include <mach/kern_return.h>
55 #include <mach/thread_act_server.h>
57 #include <kern/kern_types.h>
59 #include <kern/mach_param.h>
60 #include <kern/zalloc.h>
61 #include <kern/extmod_statistics.h>
62 #include <kern/thread.h>
63 #include <kern/task.h>
64 #include <kern/sched_prim.h>
65 #include <kern/misc_protos.h>
66 #include <kern/assert.h>
67 #include <kern/exception.h>
68 #include <kern/ipc_mig.h>
69 #include <kern/ipc_tt.h>
70 #include <kern/machine.h>
72 #include <kern/syscall_subr.h>
73 #include <kern/sync_lock.h>
74 #include <kern/processor.h>
75 #include <kern/timer.h>
76 #include <kern/affinity.h>
77 #include <kern/host.h>
79 #include <stdatomic.h>
81 #include <security/mac_mach_internal.h>
83 static void act_abort(thread_t thread
);
85 static void thread_suspended(void *arg
, wait_result_t result
);
86 static void thread_set_apc_ast(thread_t thread
);
87 static void thread_set_apc_ast_locked(thread_t thread
);
90 * Internal routine to mark a thread as started.
91 * Always called with the thread mutex locked.
97 clear_wait(thread
, THREAD_AWAKENED
);
98 thread
->started
= TRUE
;
102 * Internal routine to mark a thread as waiting
103 * right after it has been created. The caller
104 * is responsible to call wakeup()/thread_wakeup()
105 * or thread_terminate() to get it going.
107 * Always called with the thread mutex locked.
109 * Task and task_threads mutexes also held
110 * (so nobody can set the thread running before
113 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
114 * to allow termination from this point forward.
117 thread_start_in_assert_wait(
120 wait_interrupt_t interruptible
)
122 struct waitq
*waitq
= assert_wait_queue(event
);
123 wait_result_t wait_result
;
129 /* clear out startup condition (safe because thread not started yet) */
131 assert(!thread
->started
);
132 assert((thread
->state
& (TH_WAIT
| TH_UNINT
)) == (TH_WAIT
| TH_UNINT
));
133 thread
->state
&= ~(TH_WAIT
| TH_UNINT
);
134 thread_unlock(thread
);
136 /* assert wait interruptibly forever */
137 wait_result
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
139 TIMEOUT_URGENCY_SYS_NORMAL
,
140 TIMEOUT_WAIT_FOREVER
,
143 assert(wait_result
== THREAD_WAITING
);
145 /* mark thread started while we still hold the waitq lock */
147 thread
->started
= TRUE
;
148 thread_unlock(thread
);
155 * Internal routine to terminate a thread.
156 * Sometimes called with task already locked.
159 thread_terminate_internal(
161 thread_terminate_options_t options
)
163 kern_return_t result
= KERN_SUCCESS
;
164 boolean_t test_pin_bit
= false;
166 thread_mtx_lock(thread
);
168 if (thread
->active
) {
169 thread
->active
= FALSE
;
173 if (thread
->started
) {
174 clear_wait(thread
, THREAD_INTERRUPTED
);
176 thread_start(thread
);
178 /* This bit can be reliably tested only if the thread is still active */
179 test_pin_bit
= (options
== TH_TERMINATE_OPTION_UNPIN
) ? true : false;
181 result
= KERN_TERMINATED
;
184 if (thread
->affinity_set
!= NULL
) {
185 thread_affinity_terminate(thread
);
189 * <rdar://problem/53562036> thread_terminate shouldn't be allowed on pthread
190 * Until thread_terminate is disallowed for pthreads, always unpin the pinned port
191 * when the thread is being terminated.
193 ipc_thread_port_unpin(thread
->ith_self
, test_pin_bit
);
195 thread_mtx_unlock(thread
);
197 if (thread
!= current_thread() && result
== KERN_SUCCESS
) {
198 thread_wait(thread
, FALSE
);
205 * Terminate a thread.
211 if (thread
== THREAD_NULL
) {
212 return KERN_INVALID_ARGUMENT
;
215 /* Kernel threads can't be terminated without their own cooperation */
216 if (thread
->task
== kernel_task
&& thread
!= current_thread()) {
220 kern_return_t result
= thread_terminate_internal(thread
, TH_TERMINATE_OPTION_NONE
);
223 * If a kernel thread is terminating itself, force handle the APC_AST here.
224 * Kernel threads don't pass through the return-to-user AST checking code,
225 * but all threads must finish their own termination in thread_apc_ast.
227 if (thread
->task
== kernel_task
) {
228 assert(thread
->active
== FALSE
);
229 thread_ast_clear(thread
, AST_APC
);
230 thread_apc_ast(thread
);
232 panic("thread_terminate");
240 thread_terminate_pinned(
243 if (thread
== THREAD_NULL
) {
244 return KERN_INVALID_ARGUMENT
;
247 assert(thread
->task
!= kernel_task
);
249 kern_return_t result
= thread_terminate_internal(thread
, TH_TERMINATE_OPTION_UNPIN
);
254 * Suspend execution of the specified thread.
255 * This is a recursive-style suspension of the thread, a count of
256 * suspends is maintained.
258 * Called with thread mutex held.
261 thread_hold(thread_t thread
)
263 if (thread
->suspend_count
++ == 0) {
264 thread_set_apc_ast(thread
);
265 assert(thread
->suspend_parked
== FALSE
);
270 * Decrement internal suspension count, setting thread
271 * runnable when count falls to zero.
273 * Because the wait is abortsafe, we can't be guaranteed that the thread
274 * is currently actually waiting even if suspend_parked is set.
276 * Called with thread mutex held.
279 thread_release(thread_t thread
)
281 assertf(thread
->suspend_count
> 0, "thread %p over-resumed", thread
);
283 /* fail-safe on non-assert builds */
284 if (thread
->suspend_count
== 0) {
288 if (--thread
->suspend_count
== 0) {
289 if (!thread
->started
) {
290 thread_start(thread
);
291 } else if (thread
->suspend_parked
) {
292 thread
->suspend_parked
= FALSE
;
293 thread_wakeup_thread(&thread
->suspend_count
, thread
);
299 thread_suspend(thread_t thread
)
301 kern_return_t result
= KERN_SUCCESS
;
303 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
) {
304 return KERN_INVALID_ARGUMENT
;
307 thread_mtx_lock(thread
);
309 if (thread
->active
) {
310 if (thread
->user_stop_count
++ == 0) {
314 result
= KERN_TERMINATED
;
317 thread_mtx_unlock(thread
);
319 if (thread
!= current_thread() && result
== KERN_SUCCESS
) {
320 thread_wait(thread
, FALSE
);
327 thread_resume(thread_t thread
)
329 kern_return_t result
= KERN_SUCCESS
;
331 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
) {
332 return KERN_INVALID_ARGUMENT
;
335 thread_mtx_lock(thread
);
337 if (thread
->active
) {
338 if (thread
->user_stop_count
> 0) {
339 if (--thread
->user_stop_count
== 0) {
340 thread_release(thread
);
343 result
= KERN_FAILURE
;
346 result
= KERN_TERMINATED
;
349 thread_mtx_unlock(thread
);
355 * thread_depress_abort_from_user:
357 * Prematurely abort priority depression if there is one.
360 thread_depress_abort_from_user(thread_t thread
)
362 kern_return_t result
;
364 if (thread
== THREAD_NULL
) {
365 return KERN_INVALID_ARGUMENT
;
368 thread_mtx_lock(thread
);
370 if (thread
->active
) {
371 result
= thread_depress_abort(thread
);
373 result
= KERN_TERMINATED
;
376 thread_mtx_unlock(thread
);
383 * Indicate that the thread should run the AST_APC callback
384 * to detect an abort condition.
386 * Called with thread mutex held.
392 spl_t s
= splsched();
396 if (!(thread
->sched_flags
& TH_SFLAG_ABORT
)) {
397 thread
->sched_flags
|= TH_SFLAG_ABORT
;
398 thread_set_apc_ast_locked(thread
);
399 thread_depress_abort_locked(thread
);
401 thread
->sched_flags
&= ~TH_SFLAG_ABORTSAFELY
;
404 thread_unlock(thread
);
412 kern_return_t result
= KERN_SUCCESS
;
414 if (thread
== THREAD_NULL
) {
415 return KERN_INVALID_ARGUMENT
;
418 thread_mtx_lock(thread
);
420 if (thread
->active
) {
422 clear_wait(thread
, THREAD_INTERRUPTED
);
424 result
= KERN_TERMINATED
;
427 thread_mtx_unlock(thread
);
436 kern_return_t result
= KERN_SUCCESS
;
438 if (thread
== THREAD_NULL
) {
439 return KERN_INVALID_ARGUMENT
;
442 thread_mtx_lock(thread
);
444 if (thread
->active
) {
445 spl_t s
= splsched();
448 if (!thread
->at_safe_point
||
449 clear_wait_internal(thread
, THREAD_INTERRUPTED
) != KERN_SUCCESS
) {
450 if (!(thread
->sched_flags
& TH_SFLAG_ABORT
)) {
451 thread
->sched_flags
|= TH_SFLAG_ABORTED_MASK
;
452 thread_set_apc_ast_locked(thread
);
453 thread_depress_abort_locked(thread
);
456 thread_unlock(thread
);
459 result
= KERN_TERMINATED
;
462 thread_mtx_unlock(thread
);
467 /*** backward compatibility hacks ***/
468 #include <mach/thread_info.h>
469 #include <mach/thread_special_ports.h>
470 #include <ipc/ipc_port.h>
475 thread_flavor_t flavor
,
476 thread_info_t thread_info_out
,
477 mach_msg_type_number_t
*thread_info_count
)
479 kern_return_t result
;
481 if (thread
== THREAD_NULL
) {
482 return KERN_INVALID_ARGUMENT
;
485 thread_mtx_lock(thread
);
487 if (thread
->active
|| thread
->inspection
) {
488 result
= thread_info_internal(
489 thread
, flavor
, thread_info_out
, thread_info_count
);
491 result
= KERN_TERMINATED
;
494 thread_mtx_unlock(thread
);
499 static inline kern_return_t
500 thread_get_state_internal(
503 thread_state_t state
, /* pointer to OUT array */
504 mach_msg_type_number_t
*state_count
, /*IN/OUT*/
507 kern_return_t result
= KERN_SUCCESS
;
509 if (thread
== THREAD_NULL
) {
510 return KERN_INVALID_ARGUMENT
;
513 thread_mtx_lock(thread
);
515 if (thread
->active
) {
516 if (thread
!= current_thread()) {
519 thread_mtx_unlock(thread
);
521 if (thread_stop(thread
, FALSE
)) {
522 thread_mtx_lock(thread
);
523 result
= machine_thread_get_state(
524 thread
, flavor
, state
, state_count
);
525 thread_unstop(thread
);
527 thread_mtx_lock(thread
);
528 result
= KERN_ABORTED
;
531 thread_release(thread
);
533 result
= machine_thread_get_state(
534 thread
, flavor
, state
, state_count
);
536 } else if (thread
->inspection
) {
537 result
= machine_thread_get_state(
538 thread
, flavor
, state
, state_count
);
540 result
= KERN_TERMINATED
;
543 if (to_user
&& result
== KERN_SUCCESS
) {
544 result
= machine_thread_state_convert_to_user(thread
, flavor
, state
,
548 thread_mtx_unlock(thread
);
553 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
559 thread_state_t state
,
560 mach_msg_type_number_t
*state_count
);
566 thread_state_t state
, /* pointer to OUT array */
567 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
569 return thread_get_state_internal(thread
, flavor
, state
, state_count
, FALSE
);
573 thread_get_state_to_user(
576 thread_state_t state
, /* pointer to OUT array */
577 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
579 return thread_get_state_internal(thread
, flavor
, state
, state_count
, TRUE
);
583 * Change thread's machine-dependent state. Called with nothing
584 * locked. Returns same way.
586 static inline kern_return_t
587 thread_set_state_internal(
590 thread_state_t state
,
591 mach_msg_type_number_t state_count
,
594 kern_return_t result
= KERN_SUCCESS
;
596 if (thread
== THREAD_NULL
) {
597 return KERN_INVALID_ARGUMENT
;
600 thread_mtx_lock(thread
);
602 if (thread
->active
) {
604 result
= machine_thread_state_convert_from_user(thread
, flavor
,
606 if (result
!= KERN_SUCCESS
) {
610 if (thread
!= current_thread()) {
613 thread_mtx_unlock(thread
);
615 if (thread_stop(thread
, TRUE
)) {
616 thread_mtx_lock(thread
);
617 result
= machine_thread_set_state(
618 thread
, flavor
, state
, state_count
);
619 thread_unstop(thread
);
621 thread_mtx_lock(thread
);
622 result
= KERN_ABORTED
;
625 thread_release(thread
);
627 result
= machine_thread_set_state(
628 thread
, flavor
, state
, state_count
);
631 result
= KERN_TERMINATED
;
634 if ((result
== KERN_SUCCESS
) && from_user
) {
635 extmod_statistics_incr_thread_set_state(thread
);
639 thread_mtx_unlock(thread
);
644 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
649 thread_state_t state
,
650 mach_msg_type_number_t state_count
);
656 thread_state_t state
,
657 mach_msg_type_number_t state_count
)
659 return thread_set_state_internal(thread
, flavor
, state
, state_count
, FALSE
);
663 thread_set_state_from_user(
666 thread_state_t state
,
667 mach_msg_type_number_t state_count
)
669 return thread_set_state_internal(thread
, flavor
, state
, state_count
, TRUE
);
673 thread_convert_thread_state(
676 thread_state_flavor_t flavor
,
677 thread_state_t in_state
, /* pointer to IN array */
678 mach_msg_type_number_t in_state_count
,
679 thread_state_t out_state
, /* pointer to OUT array */
680 mach_msg_type_number_t
*out_state_count
) /*IN/OUT*/
683 thread_t to_thread
= THREAD_NULL
;
684 thread_t from_thread
= THREAD_NULL
;
685 mach_msg_type_number_t state_count
= in_state_count
;
687 if (direction
!= THREAD_CONVERT_THREAD_STATE_TO_SELF
&&
688 direction
!= THREAD_CONVERT_THREAD_STATE_FROM_SELF
) {
689 return KERN_INVALID_ARGUMENT
;
692 if (thread
== THREAD_NULL
) {
693 return KERN_INVALID_ARGUMENT
;
696 if (state_count
> *out_state_count
) {
697 return KERN_INSUFFICIENT_BUFFER_SIZE
;
700 if (direction
== THREAD_CONVERT_THREAD_STATE_FROM_SELF
) {
702 from_thread
= current_thread();
704 to_thread
= current_thread();
705 from_thread
= thread
;
708 /* Authenticate and convert thread state to kernel representation */
709 kr
= machine_thread_state_convert_from_user(from_thread
, flavor
,
710 in_state
, state_count
);
712 /* Return early if one of the thread was jop disabled while other wasn't */
713 if (kr
!= KERN_SUCCESS
) {
717 /* Convert thread state to target thread user representation */
718 kr
= machine_thread_state_convert_to_user(to_thread
, flavor
,
719 in_state
, &state_count
);
721 if (kr
== KERN_SUCCESS
) {
722 if (state_count
<= *out_state_count
) {
723 memcpy(out_state
, in_state
, state_count
* sizeof(uint32_t));
724 *out_state_count
= state_count
;
726 kr
= KERN_INSUFFICIENT_BUFFER_SIZE
;
734 * Kernel-internal "thread" interfaces used outside this file:
737 /* Initialize (or re-initialize) a thread state. Called from execve
738 * with nothing locked, returns same way.
741 thread_state_initialize(
744 kern_return_t result
= KERN_SUCCESS
;
746 if (thread
== THREAD_NULL
) {
747 return KERN_INVALID_ARGUMENT
;
750 thread_mtx_lock(thread
);
752 if (thread
->active
) {
753 if (thread
!= current_thread()) {
756 thread_mtx_unlock(thread
);
758 if (thread_stop(thread
, TRUE
)) {
759 thread_mtx_lock(thread
);
760 result
= machine_thread_state_initialize( thread
);
761 thread_unstop(thread
);
763 thread_mtx_lock(thread
);
764 result
= KERN_ABORTED
;
767 thread_release(thread
);
769 result
= machine_thread_state_initialize( thread
);
772 result
= KERN_TERMINATED
;
775 thread_mtx_unlock(thread
);
784 thread_t self
= current_thread();
785 kern_return_t result
= KERN_SUCCESS
;
787 if (target
== THREAD_NULL
|| target
== self
) {
788 return KERN_INVALID_ARGUMENT
;
791 thread_mtx_lock(target
);
793 if (target
->active
) {
796 thread_mtx_unlock(target
);
798 if (thread_stop(target
, TRUE
)) {
799 thread_mtx_lock(target
);
800 result
= machine_thread_dup(self
, target
, FALSE
);
802 if (self
->affinity_set
!= AFFINITY_SET_NULL
) {
803 thread_affinity_dup(self
, target
);
805 thread_unstop(target
);
807 thread_mtx_lock(target
);
808 result
= KERN_ABORTED
;
811 thread_release(target
);
813 result
= KERN_TERMINATED
;
816 thread_mtx_unlock(target
);
827 kern_return_t result
= KERN_SUCCESS
;
830 if (source
== THREAD_NULL
|| target
== THREAD_NULL
|| target
== source
) {
831 return KERN_INVALID_ARGUMENT
;
834 thread_mtx_lock(source
);
835 active
= source
->active
;
836 thread_mtx_unlock(source
);
839 return KERN_TERMINATED
;
842 thread_mtx_lock(target
);
844 if (target
->active
|| target
->inspection
) {
847 thread_mtx_unlock(target
);
849 if (thread_stop(target
, TRUE
)) {
850 thread_mtx_lock(target
);
851 result
= machine_thread_dup(source
, target
, TRUE
);
852 if (source
->affinity_set
!= AFFINITY_SET_NULL
) {
853 thread_affinity_dup(source
, target
);
855 thread_unstop(target
);
857 thread_mtx_lock(target
);
858 result
= KERN_ABORTED
;
861 thread_release(target
);
863 result
= KERN_TERMINATED
;
866 thread_mtx_unlock(target
);
874 * Set the status of the specified thread.
875 * Called with (and returns with) no locks held.
881 thread_state_t tstate
,
882 mach_msg_type_number_t count
)
884 return thread_set_state(thread
, flavor
, tstate
, count
);
888 thread_setstatus_from_user(
891 thread_state_t tstate
,
892 mach_msg_type_number_t count
)
894 return thread_set_state_from_user(thread
, flavor
, tstate
, count
);
900 * Get the status of the specified thread.
906 thread_state_t tstate
,
907 mach_msg_type_number_t
*count
)
909 return thread_get_state(thread
, flavor
, tstate
, count
);
913 thread_getstatus_to_user(
916 thread_state_t tstate
,
917 mach_msg_type_number_t
*count
)
919 return thread_get_state_to_user(thread
, flavor
, tstate
, count
);
923 * Change thread's machine-dependent userspace TSD base.
924 * Called with nothing locked. Returns same way.
929 mach_vm_offset_t tsd_base
)
931 kern_return_t result
= KERN_SUCCESS
;
933 if (thread
== THREAD_NULL
) {
934 return KERN_INVALID_ARGUMENT
;
937 thread_mtx_lock(thread
);
939 if (thread
->active
) {
940 if (thread
!= current_thread()) {
943 thread_mtx_unlock(thread
);
945 if (thread_stop(thread
, TRUE
)) {
946 thread_mtx_lock(thread
);
947 result
= machine_thread_set_tsd_base(thread
, tsd_base
);
948 thread_unstop(thread
);
950 thread_mtx_lock(thread
);
951 result
= KERN_ABORTED
;
954 thread_release(thread
);
956 result
= machine_thread_set_tsd_base(thread
, tsd_base
);
959 result
= KERN_TERMINATED
;
962 thread_mtx_unlock(thread
);
968 * thread_set_apc_ast:
970 * Register the AST_APC callback that handles suspension and
971 * termination, if it hasn't been installed already.
973 * Called with the thread mutex held.
976 thread_set_apc_ast(thread_t thread
)
978 spl_t s
= splsched();
981 thread_set_apc_ast_locked(thread
);
982 thread_unlock(thread
);
988 * thread_set_apc_ast_locked:
990 * Do the work of registering for the AST_APC callback.
992 * Called with the thread mutex and scheduling lock held.
995 thread_set_apc_ast_locked(thread_t thread
)
997 thread_ast_set(thread
, AST_APC
);
999 if (thread
== current_thread()) {
1000 ast_propagate(thread
);
1002 processor_t processor
= thread
->last_processor
;
1004 if (processor
!= PROCESSOR_NULL
&&
1005 processor
->state
== PROCESSOR_RUNNING
&&
1006 processor
->active_thread
== thread
) {
1007 cause_ast_check(processor
);
1013 * Activation control support routines internal to this file:
1020 * Continuation routine for thread suspension. It checks
1021 * to see whether there has been any new suspensions. If so, it
1022 * installs the AST_APC handler again.
1024 __attribute__((noreturn
))
1026 thread_suspended(__unused
void *parameter
, wait_result_t result
)
1028 thread_t thread
= current_thread();
1030 thread_mtx_lock(thread
);
1032 if (result
== THREAD_INTERRUPTED
) {
1033 thread
->suspend_parked
= FALSE
;
1035 assert(thread
->suspend_parked
== FALSE
);
1038 if (thread
->suspend_count
> 0) {
1039 thread_set_apc_ast(thread
);
1042 thread_mtx_unlock(thread
);
1044 thread_exception_return();
1049 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1050 * Called with nothing locked. Returns (if it returns) the same way.
1053 thread_apc_ast(thread_t thread
)
1055 thread_mtx_lock(thread
);
1057 assert(thread
->suspend_parked
== FALSE
);
1059 spl_t s
= splsched();
1060 thread_lock(thread
);
1062 /* TH_SFLAG_POLLDEPRESS is OK to have here */
1063 assert((thread
->sched_flags
& TH_SFLAG_DEPRESS
) == 0);
1065 thread
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
1066 thread_unlock(thread
);
1069 if (!thread
->active
) {
1070 /* Thread is ready to terminate, time to tear it down */
1071 thread_mtx_unlock(thread
);
1073 thread_terminate_self();
1077 /* If we're suspended, go to sleep and wait for someone to wake us up. */
1078 if (thread
->suspend_count
> 0) {
1079 thread
->suspend_parked
= TRUE
;
1080 assert_wait(&thread
->suspend_count
,
1081 THREAD_ABORTSAFE
| THREAD_WAIT_NOREPORT_USER
);
1082 thread_mtx_unlock(thread
);
1084 thread_block(thread_suspended
);
1088 thread_mtx_unlock(thread
);
1092 /* Prototype, see justification above */
1097 thread_state_t state
,
1098 mach_msg_type_number_t count
);
1104 thread_state_t state
,
1105 mach_msg_type_number_t count
)
1107 if (thread
== current_thread()) {
1108 return KERN_INVALID_ARGUMENT
;
1111 return thread_set_state(thread
, flavor
, state
, count
);
1115 act_set_state_from_user(
1118 thread_state_t state
,
1119 mach_msg_type_number_t count
)
1121 if (thread
== current_thread()) {
1122 return KERN_INVALID_ARGUMENT
;
1125 return thread_set_state_from_user(thread
, flavor
, state
, count
);
1128 /* Prototype, see justification above */
1133 thread_state_t state
,
1134 mach_msg_type_number_t
*count
);
1140 thread_state_t state
,
1141 mach_msg_type_number_t
*count
)
1143 if (thread
== current_thread()) {
1144 return KERN_INVALID_ARGUMENT
;
1147 return thread_get_state(thread
, flavor
, state
, count
);
1151 act_get_state_to_user(
1154 thread_state_t state
,
1155 mach_msg_type_number_t
*count
)
1157 if (thread
== current_thread()) {
1158 return KERN_INVALID_ARGUMENT
;
1161 return thread_get_state_to_user(thread
, flavor
, state
, count
);
1169 spl_t s
= splsched();
1171 if (thread
== current_thread()) {
1172 thread_ast_set(thread
, ast
);
1173 ast_propagate(thread
);
1175 processor_t processor
;
1177 thread_lock(thread
);
1178 thread_ast_set(thread
, ast
);
1179 processor
= thread
->last_processor
;
1180 if (processor
!= PROCESSOR_NULL
&&
1181 processor
->state
== PROCESSOR_RUNNING
&&
1182 processor
->active_thread
== thread
) {
1183 cause_ast_check(processor
);
1185 thread_unlock(thread
);
1192 * set AST on thread without causing an AST check
1193 * and without taking the thread lock
1195 * If thread is not the current thread, then it may take
1196 * up until the next context switch or quantum expiration
1197 * on that thread for it to notice the AST.
1200 act_set_ast_async(thread_t thread
,
1203 thread_ast_set(thread
, ast
);
1205 if (thread
== current_thread()) {
1206 spl_t s
= splsched();
1207 ast_propagate(thread
);
1216 act_set_ast( thread
, AST_BSD
);
1220 act_set_astkevent(thread_t thread
, uint16_t bits
)
1222 os_atomic_or(&thread
->kevent_ast_bits
, bits
, relaxed
);
1224 /* kevent AST shouldn't send immediate IPIs */
1225 act_set_ast_async(thread
, AST_KEVENT
);
1229 act_clear_astkevent(thread_t thread
, uint16_t bits
)
1232 * avoid the atomic operation if none of the bits is set,
1233 * which will be the common case.
1235 uint16_t cur
= os_atomic_load(&thread
->kevent_ast_bits
, relaxed
);
1237 cur
= os_atomic_andnot_orig(&thread
->kevent_ast_bits
, bits
, relaxed
);
1243 act_set_ast_reset_pcs(thread_t thread
)
1245 act_set_ast(thread
, AST_RESET_PCS
);
1253 if (thread
!= current_thread()) {
1254 if (!ml_get_interrupts_enabled()) {
1255 panic("unsafe act_set_kperf operation");
1259 act_set_ast( thread
, AST_KPERF
);
1267 act_set_ast( thread
, AST_MACF
);
1272 act_set_astledger(thread_t thread
)
1274 act_set_ast(thread
, AST_LEDGER
);
1278 * The ledger AST may need to be set while already holding
1279 * the thread lock. This routine skips sending the IPI,
1280 * allowing us to avoid the lock hold.
1282 * However, it means the targeted thread must context switch
1283 * to recognize the ledger AST.
1286 act_set_astledger_async(thread_t thread
)
1288 act_set_ast_async(thread
, AST_LEDGER
);
1292 act_set_io_telemetry_ast(thread_t thread
)
1294 act_set_ast(thread
, AST_TELEMETRY_IO
);