2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
48 * Author: Bryan Ford, University of Utah CSS
50 * Thread management routines
52 #include <mach/mach_types.h>
53 #include <mach/kern_return.h>
54 #include <mach/thread_act_server.h>
56 #include <kern/kern_types.h>
58 #include <kern/mach_param.h>
59 #include <kern/zalloc.h>
60 #include <kern/extmod_statistics.h>
61 #include <kern/thread.h>
62 #include <kern/task.h>
63 #include <kern/sched_prim.h>
64 #include <kern/misc_protos.h>
65 #include <kern/assert.h>
66 #include <kern/exception.h>
67 #include <kern/ipc_mig.h>
68 #include <kern/ipc_tt.h>
69 #include <kern/machine.h>
71 #include <kern/syscall_subr.h>
72 #include <kern/sync_lock.h>
73 #include <kern/processor.h>
74 #include <kern/timer.h>
75 #include <kern/affinity.h>
77 #include <security/mac_mach_internal.h>
79 static void act_abort(thread_t thread
);
81 static void thread_suspended(void *arg
, wait_result_t result
);
82 static void thread_set_apc_ast(thread_t thread
);
83 static void thread_set_apc_ast_locked(thread_t thread
);
86 * Internal routine to mark a thread as started.
87 * Always called with the thread mutex locked.
93 clear_wait(thread
, THREAD_AWAKENED
);
94 thread
->started
= TRUE
;
98 * Internal routine to mark a thread as waiting
99 * right after it has been created. The caller
100 * is responsible to call wakeup()/thread_wakeup()
101 * or thread_terminate() to get it going.
103 * Always called with the thread mutex locked.
105 * Task and task_threads mutexes also held
106 * (so nobody can set the thread running before
109 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
110 * to allow termination from this point forward.
113 thread_start_in_assert_wait(
116 wait_interrupt_t interruptible
)
118 struct waitq
*waitq
= assert_wait_queue(event
);
119 wait_result_t wait_result
;
125 /* clear out startup condition (safe because thread not started yet) */
127 assert(!thread
->started
);
128 assert((thread
->state
& (TH_WAIT
| TH_UNINT
)) == (TH_WAIT
| TH_UNINT
));
129 thread
->state
&= ~(TH_WAIT
| TH_UNINT
);
130 thread_unlock(thread
);
132 /* assert wait interruptibly forever */
133 wait_result
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
135 TIMEOUT_URGENCY_SYS_NORMAL
,
136 TIMEOUT_WAIT_FOREVER
,
139 assert (wait_result
== THREAD_WAITING
);
141 /* mark thread started while we still hold the waitq lock */
143 thread
->started
= TRUE
;
144 thread_unlock(thread
);
151 * Internal routine to terminate a thread.
152 * Sometimes called with task already locked.
155 thread_terminate_internal(
158 kern_return_t result
= KERN_SUCCESS
;
160 thread_mtx_lock(thread
);
162 if (thread
->active
) {
163 thread
->active
= FALSE
;
168 clear_wait(thread
, THREAD_INTERRUPTED
);
170 thread_start(thread
);
174 result
= KERN_TERMINATED
;
176 if (thread
->affinity_set
!= NULL
)
177 thread_affinity_terminate(thread
);
179 thread_mtx_unlock(thread
);
181 if (thread
!= current_thread() && result
== KERN_SUCCESS
)
182 thread_wait(thread
, FALSE
);
188 * Terminate a thread.
194 kern_return_t result
;
196 if (thread
== THREAD_NULL
)
197 return (KERN_INVALID_ARGUMENT
);
199 if ( thread
->task
== kernel_task
&&
200 thread
!= current_thread() )
201 return (KERN_FAILURE
);
203 result
= thread_terminate_internal(thread
);
206 * If a kernel thread is terminating itself, force an AST here.
207 * Kernel threads don't normally pass through the AST checking
208 * code - and all threads finish their own termination in mach_apc_ast.
210 if (thread
->task
== kernel_task
) {
211 ml_set_interrupts_enabled(FALSE
);
212 ast_taken(AST_APC
, TRUE
);
213 panic("thread_terminate");
220 * Suspend execution of the specified thread.
221 * This is a recursive-style suspension of the thread, a count of
222 * suspends is maintained.
224 * Called with thread mutex held.
227 thread_hold(thread_t thread
)
229 if (thread
->suspend_count
++ == 0) {
230 thread_set_apc_ast(thread
);
231 assert(thread
->suspend_parked
== FALSE
);
236 * Decrement internal suspension count, setting thread
237 * runnable when count falls to zero.
239 * Because the wait is abortsafe, we can't be guaranteed that the thread
240 * is currently actually waiting even if suspend_parked is set.
242 * Called with thread mutex held.
245 thread_release(thread_t thread
)
247 assertf(thread
->suspend_count
> 0, "thread %p over-resumed", thread
);
249 /* fail-safe on non-assert builds */
250 if (thread
->suspend_count
== 0)
253 if (--thread
->suspend_count
== 0) {
254 if (!thread
->started
) {
255 thread_start(thread
);
256 } else if (thread
->suspend_parked
) {
257 thread
->suspend_parked
= FALSE
;
258 thread_wakeup_thread(&thread
->suspend_count
, thread
);
264 thread_suspend(thread_t thread
)
266 kern_return_t result
= KERN_SUCCESS
;
268 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
)
269 return (KERN_INVALID_ARGUMENT
);
271 thread_mtx_lock(thread
);
273 if (thread
->active
) {
274 if (thread
->user_stop_count
++ == 0)
277 result
= KERN_TERMINATED
;
280 thread_mtx_unlock(thread
);
282 if (thread
!= current_thread() && result
== KERN_SUCCESS
)
283 thread_wait(thread
, FALSE
);
289 thread_resume(thread_t thread
)
291 kern_return_t result
= KERN_SUCCESS
;
293 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
)
294 return (KERN_INVALID_ARGUMENT
);
296 thread_mtx_lock(thread
);
298 if (thread
->active
) {
299 if (thread
->user_stop_count
> 0) {
300 if (--thread
->user_stop_count
== 0)
301 thread_release(thread
);
303 result
= KERN_FAILURE
;
306 result
= KERN_TERMINATED
;
309 thread_mtx_unlock(thread
);
315 * thread_depress_abort:
317 * Prematurely abort priority depression if there is one.
320 thread_depress_abort(
323 kern_return_t result
;
325 if (thread
== THREAD_NULL
)
326 return (KERN_INVALID_ARGUMENT
);
328 thread_mtx_lock(thread
);
331 result
= thread_depress_abort_internal(thread
);
333 result
= KERN_TERMINATED
;
335 thread_mtx_unlock(thread
);
342 * Indicate that the thread should run the AST_APC callback
343 * to detect an abort condition.
345 * Called with thread mutex held.
351 spl_t s
= splsched();
355 if (!(thread
->sched_flags
& TH_SFLAG_ABORT
)) {
356 thread
->sched_flags
|= TH_SFLAG_ABORT
;
357 thread_set_apc_ast_locked(thread
);
359 thread
->sched_flags
&= ~TH_SFLAG_ABORTSAFELY
;
362 thread_unlock(thread
);
370 kern_return_t result
= KERN_SUCCESS
;
372 if (thread
== THREAD_NULL
)
373 return (KERN_INVALID_ARGUMENT
);
375 thread_mtx_lock(thread
);
377 if (thread
->active
) {
379 clear_wait(thread
, THREAD_INTERRUPTED
);
382 result
= KERN_TERMINATED
;
384 thread_mtx_unlock(thread
);
393 kern_return_t result
= KERN_SUCCESS
;
395 if (thread
== THREAD_NULL
)
396 return (KERN_INVALID_ARGUMENT
);
398 thread_mtx_lock(thread
);
400 if (thread
->active
) {
401 spl_t s
= splsched();
404 if (!thread
->at_safe_point
||
405 clear_wait_internal(thread
, THREAD_INTERRUPTED
) != KERN_SUCCESS
) {
406 if (!(thread
->sched_flags
& TH_SFLAG_ABORT
)) {
407 thread
->sched_flags
|= TH_SFLAG_ABORTED_MASK
;
408 thread_set_apc_ast_locked(thread
);
411 thread_unlock(thread
);
414 result
= KERN_TERMINATED
;
417 thread_mtx_unlock(thread
);
422 /*** backward compatibility hacks ***/
423 #include <mach/thread_info.h>
424 #include <mach/thread_special_ports.h>
425 #include <ipc/ipc_port.h>
430 thread_flavor_t flavor
,
431 thread_info_t thread_info_out
,
432 mach_msg_type_number_t
*thread_info_count
)
434 kern_return_t result
;
436 if (thread
== THREAD_NULL
)
437 return (KERN_INVALID_ARGUMENT
);
439 thread_mtx_lock(thread
);
441 if (thread
->active
|| thread
->inspection
)
442 result
= thread_info_internal(
443 thread
, flavor
, thread_info_out
, thread_info_count
);
445 result
= KERN_TERMINATED
;
447 thread_mtx_unlock(thread
);
456 thread_state_t state
, /* pointer to OUT array */
457 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
459 kern_return_t result
= KERN_SUCCESS
;
461 if (thread
== THREAD_NULL
)
462 return (KERN_INVALID_ARGUMENT
);
464 thread_mtx_lock(thread
);
466 if (thread
->active
) {
467 if (thread
!= current_thread()) {
470 thread_mtx_unlock(thread
);
472 if (thread_stop(thread
, FALSE
)) {
473 thread_mtx_lock(thread
);
474 result
= machine_thread_get_state(
475 thread
, flavor
, state
, state_count
);
476 thread_unstop(thread
);
479 thread_mtx_lock(thread
);
480 result
= KERN_ABORTED
;
483 thread_release(thread
);
486 result
= machine_thread_get_state(
487 thread
, flavor
, state
, state_count
);
489 else if (thread
->inspection
)
491 result
= machine_thread_get_state(
492 thread
, flavor
, state
, state_count
);
495 result
= KERN_TERMINATED
;
497 thread_mtx_unlock(thread
);
503 * Change thread's machine-dependent state. Called with nothing
504 * locked. Returns same way.
507 thread_set_state_internal(
510 thread_state_t state
,
511 mach_msg_type_number_t state_count
,
514 kern_return_t result
= KERN_SUCCESS
;
516 if (thread
== THREAD_NULL
)
517 return (KERN_INVALID_ARGUMENT
);
519 thread_mtx_lock(thread
);
521 if (thread
->active
) {
522 if (thread
!= current_thread()) {
525 thread_mtx_unlock(thread
);
527 if (thread_stop(thread
, TRUE
)) {
528 thread_mtx_lock(thread
);
529 result
= machine_thread_set_state(
530 thread
, flavor
, state
, state_count
);
531 thread_unstop(thread
);
534 thread_mtx_lock(thread
);
535 result
= KERN_ABORTED
;
538 thread_release(thread
);
541 result
= machine_thread_set_state(
542 thread
, flavor
, state
, state_count
);
545 result
= KERN_TERMINATED
;
547 if ((result
== KERN_SUCCESS
) && from_user
)
548 extmod_statistics_incr_thread_set_state(thread
);
550 thread_mtx_unlock(thread
);
555 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
560 thread_state_t state
,
561 mach_msg_type_number_t state_count
);
567 thread_state_t state
,
568 mach_msg_type_number_t state_count
)
570 return thread_set_state_internal(thread
, flavor
, state
, state_count
, FALSE
);
574 thread_set_state_from_user(
577 thread_state_t state
,
578 mach_msg_type_number_t state_count
)
580 return thread_set_state_internal(thread
, flavor
, state
, state_count
, TRUE
);
584 * Kernel-internal "thread" interfaces used outside this file:
587 /* Initialize (or re-initialize) a thread state. Called from execve
588 * with nothing locked, returns same way.
591 thread_state_initialize(
594 kern_return_t result
= KERN_SUCCESS
;
596 if (thread
== THREAD_NULL
)
597 return (KERN_INVALID_ARGUMENT
);
599 thread_mtx_lock(thread
);
601 if (thread
->active
) {
602 if (thread
!= current_thread()) {
605 thread_mtx_unlock(thread
);
607 if (thread_stop(thread
, TRUE
)) {
608 thread_mtx_lock(thread
);
609 result
= machine_thread_state_initialize( thread
);
610 thread_unstop(thread
);
613 thread_mtx_lock(thread
);
614 result
= KERN_ABORTED
;
617 thread_release(thread
);
620 result
= machine_thread_state_initialize( thread
);
623 result
= KERN_TERMINATED
;
625 thread_mtx_unlock(thread
);
635 thread_t self
= current_thread();
636 kern_return_t result
= KERN_SUCCESS
;
638 if (target
== THREAD_NULL
|| target
== self
)
639 return (KERN_INVALID_ARGUMENT
);
641 thread_mtx_lock(target
);
643 if (target
->active
) {
646 thread_mtx_unlock(target
);
648 if (thread_stop(target
, TRUE
)) {
649 thread_mtx_lock(target
);
650 result
= machine_thread_dup(self
, target
);
651 if (self
->affinity_set
!= AFFINITY_SET_NULL
)
652 thread_affinity_dup(self
, target
);
653 thread_unstop(target
);
656 thread_mtx_lock(target
);
657 result
= KERN_ABORTED
;
660 thread_release(target
);
663 result
= KERN_TERMINATED
;
665 thread_mtx_unlock(target
);
676 kern_return_t result
= KERN_SUCCESS
;
679 if (source
== THREAD_NULL
|| target
== THREAD_NULL
|| target
== source
)
680 return (KERN_INVALID_ARGUMENT
);
682 thread_mtx_lock(source
);
683 active
= source
->active
;
684 thread_mtx_unlock(source
);
687 return KERN_TERMINATED
;
690 thread_mtx_lock(target
);
692 if (target
->active
|| target
->inspection
) {
695 thread_mtx_unlock(target
);
697 if (thread_stop(target
, TRUE
)) {
698 thread_mtx_lock(target
);
699 result
= machine_thread_dup(source
, target
);
700 if (source
->affinity_set
!= AFFINITY_SET_NULL
)
701 thread_affinity_dup(source
, target
);
702 thread_unstop(target
);
705 thread_mtx_lock(target
);
706 result
= KERN_ABORTED
;
709 thread_release(target
);
712 result
= KERN_TERMINATED
;
714 thread_mtx_unlock(target
);
722 * Set the status of the specified thread.
723 * Called with (and returns with) no locks held.
729 thread_state_t tstate
,
730 mach_msg_type_number_t count
)
733 return (thread_set_state(thread
, flavor
, tstate
, count
));
739 * Get the status of the specified thread.
745 thread_state_t tstate
,
746 mach_msg_type_number_t
*count
)
748 return (thread_get_state(thread
, flavor
, tstate
, count
));
752 * Change thread's machine-dependent userspace TSD base.
753 * Called with nothing locked. Returns same way.
758 mach_vm_offset_t tsd_base
)
760 kern_return_t result
= KERN_SUCCESS
;
762 if (thread
== THREAD_NULL
)
763 return (KERN_INVALID_ARGUMENT
);
765 thread_mtx_lock(thread
);
767 if (thread
->active
) {
768 if (thread
!= current_thread()) {
771 thread_mtx_unlock(thread
);
773 if (thread_stop(thread
, TRUE
)) {
774 thread_mtx_lock(thread
);
775 result
= machine_thread_set_tsd_base(thread
, tsd_base
);
776 thread_unstop(thread
);
779 thread_mtx_lock(thread
);
780 result
= KERN_ABORTED
;
783 thread_release(thread
);
786 result
= machine_thread_set_tsd_base(thread
, tsd_base
);
789 result
= KERN_TERMINATED
;
791 thread_mtx_unlock(thread
);
797 * thread_set_apc_ast:
799 * Register the AST_APC callback that handles suspension and
800 * termination, if it hasn't been installed already.
802 * Called with the thread mutex held.
805 thread_set_apc_ast(thread_t thread
)
807 spl_t s
= splsched();
810 thread_set_apc_ast_locked(thread
);
811 thread_unlock(thread
);
817 * thread_set_apc_ast_locked:
819 * Do the work of registering for the AST_APC callback.
821 * Called with the thread mutex and scheduling lock held.
824 thread_set_apc_ast_locked(thread_t thread
)
827 * Temporarily undepress, so target has
828 * a chance to do locking required to
829 * block itself in thread_suspended.
831 * Leaves the depress flag set so we can reinstate when it's blocked.
833 if (thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
)
834 thread_recompute_sched_pri(thread
, TRUE
);
836 thread_ast_set(thread
, AST_APC
);
838 if (thread
== current_thread()) {
839 ast_propagate(thread
->ast
);
841 processor_t processor
= thread
->last_processor
;
843 if (processor
!= PROCESSOR_NULL
&&
844 processor
->state
== PROCESSOR_RUNNING
&&
845 processor
->active_thread
== thread
) {
846 cause_ast_check(processor
);
852 * Activation control support routines internal to this file:
859 * Continuation routine for thread suspension. It checks
860 * to see whether there has been any new suspensions. If so, it
861 * installs the AST_APC handler again. Otherwise, it checks to see
862 * if the current depression needs to be re-instated (it may have
863 * been temporarily removed in order to get to this point in a hurry).
865 __attribute__((noreturn
))
867 thread_suspended(__unused
void *parameter
, wait_result_t result
)
869 thread_t thread
= current_thread();
871 thread_mtx_lock(thread
);
873 if (result
== THREAD_INTERRUPTED
)
874 thread
->suspend_parked
= FALSE
;
876 assert(thread
->suspend_parked
== FALSE
);
878 if (thread
->suspend_count
> 0) {
879 thread_set_apc_ast(thread
);
881 spl_t s
= splsched();
884 if (thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) {
885 thread
->sched_pri
= DEPRESSPRI
;
886 thread
->last_processor
->current_pri
= thread
->sched_pri
;
888 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHANGE_PRIORITY
),
889 (uintptr_t)thread_tid(thread
),
892 0, /* eventually, 'reason' */
895 thread_unlock(thread
);
899 thread_mtx_unlock(thread
);
901 thread_exception_return();
906 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
907 * Called with nothing locked. Returns (if it returns) the same way.
910 thread_apc_ast(thread_t thread
)
912 thread_mtx_lock(thread
);
914 assert(thread
->suspend_parked
== FALSE
);
916 spl_t s
= splsched();
919 /* TH_SFLAG_POLLDEPRESS is OK to have here */
920 assert((thread
->sched_flags
& TH_SFLAG_DEPRESS
) == 0);
922 thread
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
923 thread_unlock(thread
);
926 if (!thread
->active
) {
927 /* Thread is ready to terminate, time to tear it down */
928 thread_mtx_unlock(thread
);
930 thread_terminate_self();
934 /* If we're suspended, go to sleep and wait for someone to wake us up. */
935 if (thread
->suspend_count
> 0) {
936 thread
->suspend_parked
= TRUE
;
937 assert_wait(&thread
->suspend_count
, THREAD_ABORTSAFE
);
938 thread_mtx_unlock(thread
);
940 thread_block(thread_suspended
);
944 thread_mtx_unlock(thread
);
947 /* Prototype, see justification above */
952 thread_state_t state
,
953 mach_msg_type_number_t count
);
959 thread_state_t state
,
960 mach_msg_type_number_t count
)
962 if (thread
== current_thread())
963 return (KERN_INVALID_ARGUMENT
);
965 return (thread_set_state(thread
, flavor
, state
, count
));
970 act_set_state_from_user(
973 thread_state_t state
,
974 mach_msg_type_number_t count
)
976 if (thread
== current_thread())
977 return (KERN_INVALID_ARGUMENT
);
979 return (thread_set_state_from_user(thread
, flavor
, state
, count
));
987 thread_state_t state
,
988 mach_msg_type_number_t
*count
)
990 if (thread
== current_thread())
991 return (KERN_INVALID_ARGUMENT
);
993 return (thread_get_state(thread
, flavor
, state
, count
));
1001 spl_t s
= splsched();
1003 if (thread
== current_thread()) {
1004 thread_ast_set(thread
, ast
);
1005 ast_propagate(thread
->ast
);
1007 processor_t processor
;
1009 thread_lock(thread
);
1010 thread_ast_set(thread
, ast
);
1011 processor
= thread
->last_processor
;
1012 if ( processor
!= PROCESSOR_NULL
&&
1013 processor
->state
== PROCESSOR_RUNNING
&&
1014 processor
->active_thread
== thread
)
1015 cause_ast_check(processor
);
1016 thread_unlock(thread
);
1026 act_set_ast( thread
, AST_BSD
);
1034 if (thread
!= current_thread())
1035 if( !ml_get_interrupts_enabled() )
1036 panic("unsafe act_set_kperf operation");
1038 act_set_ast( thread
, AST_KPERF
);
1046 act_set_ast( thread
, AST_MACF
);
1051 set_astledger(thread_t thread
)
1053 act_set_ast(thread
, AST_LEDGER
);
1057 act_set_io_telemetry_ast(thread_t thread
)
1059 act_set_ast(thread
, AST_TELEMETRY_IO
);