2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
48 * Author: Bryan Ford, University of Utah CSS
50 * Thread management routines
52 #include <mach/mach_types.h>
53 #include <mach/kern_return.h>
54 #include <mach/alert.h>
56 #include <mach/thread_act_server.h>
58 #include <kern/kern_types.h>
60 #include <kern/mach_param.h>
61 #include <kern/zalloc.h>
62 #include <kern/thread.h>
63 #include <kern/task.h>
64 #include <kern/sched_prim.h>
65 #include <kern/misc_protos.h>
66 #include <kern/assert.h>
67 #include <kern/exception.h>
68 #include <kern/ipc_mig.h>
69 #include <kern/ipc_tt.h>
70 #include <kern/machine.h>
72 #include <kern/syscall_subr.h>
73 #include <kern/sync_lock.h>
74 #include <kern/processor.h>
75 #include <kern/timer.h>
76 #include <kern/affinity.h>
80 void act_abort(thread_t
);
81 void install_special_handler_locked(thread_t
);
82 void special_handler_continue(void);
85 * Internal routine to mark a thread as started.
86 * Always called with the thread locked.
88 * Note: function intentionally declared with the noinline attribute to
89 * prevent multiple declaration of probe symbols in this file; we would
90 * prefer "#pragma noinline", but gcc does not support it.
91 * PR-6385749 -- the lwp-start probe should fire from within the context
92 * of the newly created thread. Commented out for now, in case we
93 * turn it into a dead code probe.
96 thread_start_internal(
99 clear_wait(thread
, THREAD_AWAKENED
);
100 thread
->started
= TRUE
;
101 // DTRACE_PROC1(lwp__start, thread_t, thread);
105 * Internal routine to terminate a thread.
106 * Sometimes called with task already locked.
109 thread_terminate_internal(
112 kern_return_t result
= KERN_SUCCESS
;
114 thread_mtx_lock(thread
);
116 if (thread
->active
) {
117 thread
->active
= FALSE
;
122 clear_wait(thread
, THREAD_INTERRUPTED
);
124 thread_start_internal(thread
);
128 result
= KERN_TERMINATED
;
130 if (thread
->affinity_set
!= NULL
)
131 thread_affinity_terminate(thread
);
133 thread_mtx_unlock(thread
);
135 if (thread
!= current_thread() && result
== KERN_SUCCESS
)
142 * Terminate a thread.
148 kern_return_t result
;
150 if (thread
== THREAD_NULL
)
151 return (KERN_INVALID_ARGUMENT
);
153 if ( thread
->task
== kernel_task
&&
154 thread
!= current_thread() )
155 return (KERN_FAILURE
);
157 result
= thread_terminate_internal(thread
);
160 * If a kernel thread is terminating itself, force an AST here.
161 * Kernel threads don't normally pass through the AST checking
162 * code - and all threads finish their own termination in the
163 * special handler APC.
165 if (thread
->task
== kernel_task
) {
166 ml_set_interrupts_enabled(FALSE
);
167 ast_taken(AST_APC
, TRUE
);
168 panic("thread_terminate");
175 * Suspend execution of the specified thread.
176 * This is a recursive-style suspension of the thread, a count of
177 * suspends is maintained.
179 * Called with thread mutex held.
183 register thread_t thread
)
185 if (thread
->suspend_count
++ == 0) {
186 install_special_handler(thread
);
188 thread_wakeup_one(&thread
->suspend_count
);
193 * Decrement internal suspension count, setting thread
194 * runnable when count falls to zero.
196 * Called with thread mutex held.
200 register thread_t thread
)
202 if ( thread
->suspend_count
> 0 &&
203 --thread
->suspend_count
== 0 ) {
205 thread_wakeup_one(&thread
->suspend_count
);
207 thread_start_internal(thread
);
214 register thread_t thread
)
216 thread_t self
= current_thread();
217 kern_return_t result
= KERN_SUCCESS
;
219 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
)
220 return (KERN_INVALID_ARGUMENT
);
222 thread_mtx_lock(thread
);
224 if (thread
->active
) {
225 if ( thread
->user_stop_count
++ == 0 &&
226 thread
->suspend_count
++ == 0 ) {
227 install_special_handler(thread
);
229 thread_wakeup_one(&thread
->suspend_count
);
233 result
= KERN_TERMINATED
;
235 thread_mtx_unlock(thread
);
237 if (thread
!= self
&& result
== KERN_SUCCESS
)
245 register thread_t thread
)
247 kern_return_t result
= KERN_SUCCESS
;
249 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
)
250 return (KERN_INVALID_ARGUMENT
);
252 thread_mtx_lock(thread
);
254 if (thread
->active
) {
255 if (thread
->user_stop_count
> 0) {
256 if ( --thread
->user_stop_count
== 0 &&
257 --thread
->suspend_count
== 0 ) {
259 thread_wakeup_one(&thread
->suspend_count
);
261 thread_start_internal(thread
);
266 result
= KERN_FAILURE
;
269 result
= KERN_TERMINATED
;
271 thread_mtx_unlock(thread
);
277 * thread_depress_abort:
279 * Prematurely abort priority depression if there is one.
282 thread_depress_abort(
283 register thread_t thread
)
285 kern_return_t result
;
287 if (thread
== THREAD_NULL
)
288 return (KERN_INVALID_ARGUMENT
);
290 thread_mtx_lock(thread
);
293 result
= thread_depress_abort_internal(thread
);
295 result
= KERN_TERMINATED
;
297 thread_mtx_unlock(thread
);
304 * Indicate that the activation should run its
305 * special handler to detect a condition.
307 * Called with thread mutex held.
313 spl_t s
= splsched();
317 if (!(thread
->sched_mode
& TH_MODE_ABORT
)) {
318 thread
->sched_mode
|= TH_MODE_ABORT
;
319 install_special_handler_locked(thread
);
322 thread
->sched_mode
&= ~TH_MODE_ABORTSAFELY
;
324 thread_unlock(thread
);
330 register thread_t thread
)
332 kern_return_t result
= KERN_SUCCESS
;
334 if (thread
== THREAD_NULL
)
335 return (KERN_INVALID_ARGUMENT
);
337 thread_mtx_lock(thread
);
339 if (thread
->active
) {
341 clear_wait(thread
, THREAD_INTERRUPTED
);
344 result
= KERN_TERMINATED
;
346 thread_mtx_unlock(thread
);
355 kern_return_t result
= KERN_SUCCESS
;
357 if (thread
== THREAD_NULL
)
358 return (KERN_INVALID_ARGUMENT
);
360 thread_mtx_lock(thread
);
362 if (thread
->active
) {
363 spl_t s
= splsched();
366 if (!thread
->at_safe_point
||
367 clear_wait_internal(thread
, THREAD_INTERRUPTED
) != KERN_SUCCESS
) {
368 if (!(thread
->sched_mode
& TH_MODE_ABORT
)) {
369 thread
->sched_mode
|= TH_MODE_ISABORTED
;
370 install_special_handler_locked(thread
);
373 thread_unlock(thread
);
377 result
= KERN_TERMINATED
;
379 thread_mtx_unlock(thread
);
384 /*** backward compatibility hacks ***/
385 #include <mach/thread_info.h>
386 #include <mach/thread_special_ports.h>
387 #include <ipc/ipc_port.h>
392 thread_flavor_t flavor
,
393 thread_info_t thread_info_out
,
394 mach_msg_type_number_t
*thread_info_count
)
396 kern_return_t result
;
398 if (thread
== THREAD_NULL
)
399 return (KERN_INVALID_ARGUMENT
);
401 thread_mtx_lock(thread
);
404 result
= thread_info_internal(
405 thread
, flavor
, thread_info_out
, thread_info_count
);
407 result
= KERN_TERMINATED
;
409 thread_mtx_unlock(thread
);
416 register thread_t thread
,
418 thread_state_t state
, /* pointer to OUT array */
419 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
421 kern_return_t result
= KERN_SUCCESS
;
423 if (thread
== THREAD_NULL
)
424 return (KERN_INVALID_ARGUMENT
);
426 thread_mtx_lock(thread
);
428 if (thread
->active
) {
429 if (thread
!= current_thread()) {
432 thread_mtx_unlock(thread
);
434 if (thread_stop(thread
)) {
435 thread_mtx_lock(thread
);
436 result
= machine_thread_get_state(
437 thread
, flavor
, state
, state_count
);
438 thread_unstop(thread
);
441 thread_mtx_lock(thread
);
442 result
= KERN_ABORTED
;
445 thread_release(thread
);
448 result
= machine_thread_get_state(
449 thread
, flavor
, state
, state_count
);
452 result
= KERN_TERMINATED
;
454 thread_mtx_unlock(thread
);
460 * Change thread's machine-dependent state. Called with nothing
461 * locked. Returns same way.
465 register thread_t thread
,
467 thread_state_t state
,
468 mach_msg_type_number_t state_count
)
470 kern_return_t result
= KERN_SUCCESS
;
472 if (thread
== THREAD_NULL
)
473 return (KERN_INVALID_ARGUMENT
);
475 thread_mtx_lock(thread
);
477 if (thread
->active
) {
478 if (thread
!= current_thread()) {
481 thread_mtx_unlock(thread
);
483 if (thread_stop(thread
)) {
484 thread_mtx_lock(thread
);
485 result
= machine_thread_set_state(
486 thread
, flavor
, state
, state_count
);
487 thread_unstop(thread
);
490 thread_mtx_lock(thread
);
491 result
= KERN_ABORTED
;
494 thread_release(thread
);
497 result
= machine_thread_set_state(
498 thread
, flavor
, state
, state_count
);
501 result
= KERN_TERMINATED
;
503 thread_mtx_unlock(thread
);
510 * Kernel-internal "thread" interfaces used outside this file:
513 /* Initialize (or re-initialize) a thread state. Called from execve
514 * with nothing locked, returns same way.
517 thread_state_initialize(
518 register thread_t thread
)
520 kern_return_t result
= KERN_SUCCESS
;
522 if (thread
== THREAD_NULL
)
523 return (KERN_INVALID_ARGUMENT
);
525 thread_mtx_lock(thread
);
527 if (thread
->active
) {
528 if (thread
!= current_thread()) {
531 thread_mtx_unlock(thread
);
533 if (thread_stop(thread
)) {
534 thread_mtx_lock(thread
);
535 result
= machine_thread_state_initialize( thread
);
536 thread_unstop(thread
);
539 thread_mtx_lock(thread
);
540 result
= KERN_ABORTED
;
543 thread_release(thread
);
546 result
= machine_thread_state_initialize( thread
);
549 result
= KERN_TERMINATED
;
551 thread_mtx_unlock(thread
);
559 register thread_t target
)
561 thread_t self
= current_thread();
562 kern_return_t result
= KERN_SUCCESS
;
564 if (target
== THREAD_NULL
|| target
== self
)
565 return (KERN_INVALID_ARGUMENT
);
567 thread_mtx_lock(target
);
569 if (target
->active
) {
572 thread_mtx_unlock(target
);
574 if (thread_stop(target
)) {
575 thread_mtx_lock(target
);
576 result
= machine_thread_dup(self
, target
);
577 if (self
->affinity_set
!= AFFINITY_SET_NULL
)
578 thread_affinity_dup(self
, target
);
579 thread_unstop(target
);
582 thread_mtx_lock(target
);
583 result
= KERN_ABORTED
;
586 thread_release(target
);
589 result
= KERN_TERMINATED
;
591 thread_mtx_unlock(target
);
600 * Set the status of the specified thread.
601 * Called with (and returns with) no locks held.
605 register thread_t thread
,
607 thread_state_t tstate
,
608 mach_msg_type_number_t count
)
611 return (thread_set_state(thread
, flavor
, tstate
, count
));
617 * Get the status of the specified thread.
621 register thread_t thread
,
623 thread_state_t tstate
,
624 mach_msg_type_number_t
*count
)
626 return (thread_get_state(thread
, flavor
, tstate
, count
));
630 * install_special_handler:
632 * Install the special returnhandler that handles suspension and
633 * termination, if it hasn't been installed already.
635 * Called with the thread mutex held.
638 install_special_handler(
641 spl_t s
= splsched();
644 install_special_handler_locked(thread
);
645 thread_unlock(thread
);
650 * install_special_handler_locked:
652 * Do the work of installing the special_handler.
654 * Called with the thread mutex and scheduling lock held.
657 install_special_handler_locked(
662 /* The work handler must always be the last ReturnHandler on the list,
663 because it can do tricky things like detach the thr_act. */
664 for (rh
= &thread
->handlers
; *rh
; rh
= &(*rh
)->next
)
667 if (rh
!= &thread
->special_handler
.next
)
668 *rh
= &thread
->special_handler
;
671 * Temporarily undepress, so target has
672 * a chance to do locking required to
673 * block itself in special_handler().
675 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
)
676 compute_priority(thread
, TRUE
);
678 thread_ast_set(thread
, AST_APC
);
680 if (thread
== current_thread())
681 ast_propagate(thread
->ast
);
683 processor_t processor
= thread
->last_processor
;
685 if ( processor
!= PROCESSOR_NULL
&&
686 processor
->state
== PROCESSOR_RUNNING
&&
687 processor
->active_thread
== thread
)
688 cause_ast_check(processor
);
693 * Activation control support routines internal to this file:
697 act_execute_returnhandlers(void)
699 thread_t thread
= current_thread();
701 thread_ast_clear(thread
, AST_APC
);
707 thread_mtx_lock(thread
);
712 rh
= thread
->handlers
;
714 thread
->handlers
= rh
->next
;
716 thread_unlock(thread
);
719 thread_mtx_unlock(thread
);
722 (*rh
->handler
)(rh
, thread
);
728 thread_unlock(thread
);
731 thread_mtx_unlock(thread
);
735 * special_handler_continue
737 * Continuation routine for the special handler blocks. It checks
738 * to see whether there has been any new suspensions. If so, it
739 * installs the special handler again. Otherwise, it checks to see
740 * if the current depression needs to be re-instated (it may have
741 * been temporarily removed in order to get to this point in a hurry).
744 special_handler_continue(void)
746 thread_t thread
= current_thread();
748 thread_mtx_lock(thread
);
750 if (thread
->suspend_count
> 0)
751 install_special_handler(thread
);
753 spl_t s
= splsched();
756 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
) {
757 processor_t myprocessor
= thread
->last_processor
;
759 thread
->sched_pri
= DEPRESSPRI
;
760 myprocessor
->current_pri
= thread
->sched_pri
;
762 thread_unlock(thread
);
766 thread_mtx_unlock(thread
);
768 thread_exception_return();
773 * special_handler - handles suspension, termination. Called
774 * with nothing locked. Returns (if it returns) the same way.
778 __unused ReturnHandler
*rh
,
783 thread_mtx_lock(thread
);
787 thread
->sched_mode
&= ~TH_MODE_ISABORTED
;
788 thread_unlock(thread
);
792 * If we're suspended, go to sleep and wait for someone to wake us up.
794 if (thread
->active
) {
795 if (thread
->suspend_count
> 0) {
796 if (thread
->handlers
== NULL
) {
797 assert_wait(&thread
->suspend_count
, THREAD_ABORTSAFE
);
798 thread_mtx_unlock(thread
);
799 thread_block((thread_continue_t
)special_handler_continue
);
803 thread_mtx_unlock(thread
);
805 special_handler_continue();
810 thread_mtx_unlock(thread
);
812 thread_terminate_self();
816 thread_mtx_unlock(thread
);
823 thread_state_t state
,
824 mach_msg_type_number_t count
)
826 if (thread
== current_thread())
827 return (KERN_INVALID_ARGUMENT
);
829 return (thread_set_state(thread
, flavor
, state
, count
));
837 thread_state_t state
,
838 mach_msg_type_number_t
*count
)
840 if (thread
== current_thread())
841 return (KERN_INVALID_ARGUMENT
);
843 return (thread_get_state(thread
, flavor
, state
, count
));
850 spl_t s
= splsched();
852 if (thread
== current_thread()) {
853 thread_ast_set(thread
, AST_BSD
);
854 ast_propagate(thread
->ast
);
857 processor_t processor
;
860 thread_ast_set(thread
, AST_BSD
);
861 processor
= thread
->last_processor
;
862 if ( processor
!= PROCESSOR_NULL
&&
863 processor
->state
== PROCESSOR_RUNNING
&&
864 processor
->active_thread
== thread
)
865 cause_ast_check(processor
);
866 thread_unlock(thread
);
876 spl_t s
= splsched();
878 if (thread
== current_thread()) {
879 thread_ast_set(thread
, AST_APC
);
880 ast_propagate(thread
->ast
);
883 processor_t processor
;
886 thread_ast_set(thread
, AST_APC
);
887 processor
= thread
->last_processor
;
888 if ( processor
!= PROCESSOR_NULL
&&
889 processor
->state
== PROCESSOR_RUNNING
&&
890 processor
->active_thread
== thread
)
891 cause_ast_check(processor
);
892 thread_unlock(thread
);