2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Copyright (c) 1993 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
29 * Permission to use, copy, modify and distribute this software and its
30 * documentation is hereby granted, provided that both the copyright
31 * notice and this permission notice appear in all copies of the
32 * software, derivative works or modified versions, and any portions
33 * thereof, and that both notices appear in supporting documentation.
35 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
36 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
37 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 * CSS requests users of this software to return to css-dist@cs.utah.edu any
40 * improvements that they make and grant CSS redistribution rights.
42 * Author: Bryan Ford, University of Utah CSS
44 * Thread management routines
46 #include <mach/mach_types.h>
47 #include <mach/kern_return.h>
48 #include <mach/alert.h>
49 #include <mach_prof.h>
51 #include <mach/thread_act_server.h>
53 #include <kern/kern_types.h>
55 #include <kern/mach_param.h>
56 #include <kern/zalloc.h>
57 #include <kern/thread.h>
58 #include <kern/task.h>
59 #include <kern/sched_prim.h>
60 #include <kern/misc_protos.h>
61 #include <kern/assert.h>
62 #include <kern/exception.h>
63 #include <kern/ipc_mig.h>
64 #include <kern/ipc_tt.h>
65 #include <kern/profile.h>
66 #include <kern/machine.h>
68 #include <kern/syscall_subr.h>
69 #include <kern/sync_lock.h>
70 #include <kern/processor.h>
71 #include <kern/timer.h>
72 #include <mach_prof.h>
75 void act_abort(thread_t
);
76 void act_set_apc(thread_t
);
77 void install_special_handler_locked(thread_t
);
78 void special_handler_continue(void);
81 * Internal routine to terminate a thread.
82 * Sometimes called with task already locked.
85 thread_terminate_internal(
88 kern_return_t result
= KERN_SUCCESS
;
90 thread_mtx_lock(thread
);
93 thread
->active
= FALSE
;
98 clear_wait(thread
, THREAD_INTERRUPTED
);
100 clear_wait(thread
, THREAD_AWAKENED
);
101 thread
->started
= TRUE
;
105 result
= KERN_TERMINATED
;
107 thread_mtx_unlock(thread
);
109 if (thread
!= current_thread() && result
== KERN_SUCCESS
)
116 * Terminate a thread.
122 kern_return_t result
;
124 if (thread
== THREAD_NULL
)
125 return (KERN_INVALID_ARGUMENT
);
127 if ( thread
->task
== kernel_task
&&
128 thread
!= current_thread() )
129 return (KERN_FAILURE
);
131 result
= thread_terminate_internal(thread
);
134 * If a kernel thread is terminating itself, force an AST here.
135 * Kernel threads don't normally pass through the AST checking
136 * code - and all threads finish their own termination in the
137 * special handler APC.
139 if (thread
->task
== kernel_task
) {
140 ml_set_interrupts_enabled(FALSE
);
141 ast_taken(AST_APC
, TRUE
);
142 panic("thread_terminate");
149 * Suspend execution of the specified thread.
150 * This is a recursive-style suspension of the thread, a count of
151 * suspends is maintained.
153 * Called with thread mutex held.
157 register thread_t thread
)
159 if (thread
->suspend_count
++ == 0) {
160 install_special_handler(thread
);
162 thread_wakeup_one(&thread
->suspend_count
);
167 * Decrement internal suspension count, setting thread
168 * runnable when count falls to zero.
170 * Called with thread mutex held.
174 register thread_t thread
)
176 if ( thread
->suspend_count
> 0 &&
177 --thread
->suspend_count
== 0 ) {
179 thread_wakeup_one(&thread
->suspend_count
);
181 clear_wait(thread
, THREAD_AWAKENED
);
182 thread
->started
= TRUE
;
189 register thread_t thread
)
191 thread_t self
= current_thread();
192 kern_return_t result
= KERN_SUCCESS
;
194 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
)
195 return (KERN_INVALID_ARGUMENT
);
197 thread_mtx_lock(thread
);
199 if (thread
->active
) {
200 if ( thread
->user_stop_count
++ == 0 &&
201 thread
->suspend_count
++ == 0 ) {
202 install_special_handler(thread
);
204 thread_wakeup_one(&thread
->suspend_count
);
208 result
= KERN_TERMINATED
;
210 thread_mtx_unlock(thread
);
212 if (thread
!= self
&& result
== KERN_SUCCESS
)
220 register thread_t thread
)
222 kern_return_t result
= KERN_SUCCESS
;
224 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
)
225 return (KERN_INVALID_ARGUMENT
);
227 thread_mtx_lock(thread
);
229 if (thread
->active
) {
230 if (thread
->user_stop_count
> 0) {
231 if ( --thread
->user_stop_count
== 0 &&
232 --thread
->suspend_count
== 0 ) {
234 thread_wakeup_one(&thread
->suspend_count
);
236 clear_wait(thread
, THREAD_AWAKENED
);
237 thread
->started
= TRUE
;
242 result
= KERN_FAILURE
;
245 result
= KERN_TERMINATED
;
247 thread_mtx_unlock(thread
);
253 * thread_depress_abort:
255 * Prematurely abort priority depression if there is one.
258 thread_depress_abort(
259 register thread_t thread
)
261 kern_return_t result
;
263 if (thread
== THREAD_NULL
)
264 return (KERN_INVALID_ARGUMENT
);
266 thread_mtx_lock(thread
);
269 result
= thread_depress_abort_internal(thread
);
271 result
= KERN_TERMINATED
;
273 thread_mtx_unlock(thread
);
280 * Indicate that the activation should run its
281 * special handler to detect a condition.
283 * Called with thread mutex held.
289 spl_t s
= splsched();
293 if (!(thread
->state
& TH_ABORT
)) {
294 thread
->state
|= TH_ABORT
;
295 install_special_handler_locked(thread
);
298 thread
->state
&= ~TH_ABORT_SAFELY
;
300 thread_unlock(thread
);
306 register thread_t thread
)
308 kern_return_t result
= KERN_SUCCESS
;
310 if (thread
== THREAD_NULL
)
311 return (KERN_INVALID_ARGUMENT
);
313 thread_mtx_lock(thread
);
315 if (thread
->active
) {
317 clear_wait(thread
, THREAD_INTERRUPTED
);
320 result
= KERN_TERMINATED
;
322 thread_mtx_unlock(thread
);
331 kern_return_t result
= KERN_SUCCESS
;
333 if (thread
== THREAD_NULL
)
334 return (KERN_INVALID_ARGUMENT
);
336 thread_mtx_lock(thread
);
338 if (thread
->active
) {
339 spl_t s
= splsched();
342 if (!thread
->at_safe_point
||
343 clear_wait_internal(thread
, THREAD_INTERRUPTED
) != KERN_SUCCESS
) {
344 if (!(thread
->state
& TH_ABORT
)) {
345 thread
->state
|= (TH_ABORT
|TH_ABORT_SAFELY
);
346 install_special_handler_locked(thread
);
349 thread_unlock(thread
);
353 result
= KERN_TERMINATED
;
355 thread_mtx_unlock(thread
);
360 /*** backward compatibility hacks ***/
361 #include <mach/thread_info.h>
362 #include <mach/thread_special_ports.h>
363 #include <ipc/ipc_port.h>
368 thread_flavor_t flavor
,
369 thread_info_t thread_info_out
,
370 mach_msg_type_number_t
*thread_info_count
)
372 kern_return_t result
;
374 if (thread
== THREAD_NULL
)
375 return (KERN_INVALID_ARGUMENT
);
377 thread_mtx_lock(thread
);
380 result
= thread_info_internal(
381 thread
, flavor
, thread_info_out
, thread_info_count
);
383 result
= KERN_TERMINATED
;
385 thread_mtx_unlock(thread
);
392 register thread_t thread
,
394 thread_state_t state
, /* pointer to OUT array */
395 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
397 kern_return_t result
= KERN_SUCCESS
;
399 if (thread
== THREAD_NULL
)
400 return (KERN_INVALID_ARGUMENT
);
402 thread_mtx_lock(thread
);
404 if (thread
->active
) {
405 if (thread
!= current_thread()) {
408 thread_mtx_unlock(thread
);
410 if (thread_stop(thread
)) {
411 thread_mtx_lock(thread
);
412 result
= machine_thread_get_state(
413 thread
, flavor
, state
, state_count
);
414 thread_unstop(thread
);
417 thread_mtx_lock(thread
);
418 result
= KERN_ABORTED
;
421 thread_release(thread
);
424 result
= machine_thread_get_state(
425 thread
, flavor
, state
, state_count
);
428 result
= KERN_TERMINATED
;
430 thread_mtx_unlock(thread
);
436 * Change thread's machine-dependent state. Called with nothing
437 * locked. Returns same way.
441 register thread_t thread
,
443 thread_state_t state
,
444 mach_msg_type_number_t state_count
)
446 kern_return_t result
= KERN_SUCCESS
;
448 if (thread
== THREAD_NULL
)
449 return (KERN_INVALID_ARGUMENT
);
451 thread_mtx_lock(thread
);
453 if (thread
->active
) {
454 if (thread
!= current_thread()) {
457 thread_mtx_unlock(thread
);
459 if (thread_stop(thread
)) {
460 thread_mtx_lock(thread
);
461 result
= machine_thread_set_state(
462 thread
, flavor
, state
, state_count
);
463 thread_unstop(thread
);
466 thread_mtx_lock(thread
);
467 result
= KERN_ABORTED
;
470 thread_release(thread
);
473 result
= machine_thread_set_state(
474 thread
, flavor
, state
, state_count
);
477 result
= KERN_TERMINATED
;
479 thread_mtx_unlock(thread
);
486 * Kernel-internal "thread" interfaces used outside this file:
489 /* Initialize (or re-initialize) a thread state. Called from execve
490 * with nothing locked, returns same way.
493 thread_state_initialize(
494 register thread_t thread
)
496 kern_return_t result
= KERN_SUCCESS
;
498 if (thread
== THREAD_NULL
)
499 return (KERN_INVALID_ARGUMENT
);
501 thread_mtx_lock(thread
);
503 if (thread
->active
) {
504 if (thread
!= current_thread()) {
507 thread_mtx_unlock(thread
);
509 if (thread_stop(thread
)) {
510 thread_mtx_lock(thread
);
511 result
= machine_thread_state_initialize( thread
);
512 thread_unstop(thread
);
515 thread_mtx_lock(thread
);
516 result
= KERN_ABORTED
;
519 thread_release(thread
);
522 result
= machine_thread_state_initialize( thread
);
525 result
= KERN_TERMINATED
;
527 thread_mtx_unlock(thread
);
535 register thread_t target
)
537 thread_t self
= current_thread();
538 kern_return_t result
= KERN_SUCCESS
;
540 if (target
== THREAD_NULL
|| target
== self
)
541 return (KERN_INVALID_ARGUMENT
);
543 thread_mtx_lock(target
);
545 if (target
->active
) {
548 thread_mtx_unlock(target
);
550 if (thread_stop(target
)) {
551 thread_mtx_lock(target
);
552 result
= machine_thread_dup(self
, target
);
553 thread_unstop(target
);
556 thread_mtx_lock(target
);
557 result
= KERN_ABORTED
;
560 thread_release(target
);
563 result
= KERN_TERMINATED
;
565 thread_mtx_unlock(target
);
574 * Set the status of the specified thread.
575 * Called with (and returns with) no locks held.
579 register thread_t thread
,
581 thread_state_t tstate
,
582 mach_msg_type_number_t count
)
585 return (thread_set_state(thread
, flavor
, tstate
, count
));
591 * Get the status of the specified thread.
595 register thread_t thread
,
597 thread_state_t tstate
,
598 mach_msg_type_number_t
*count
)
600 return (thread_get_state(thread
, flavor
, tstate
, count
));
604 * install_special_handler:
606 * Install the special returnhandler that handles suspension and
607 * termination, if it hasn't been installed already.
609 * Called with the thread mutex held.
612 install_special_handler(
615 spl_t s
= splsched();
618 install_special_handler_locked(thread
);
619 thread_unlock(thread
);
624 * install_special_handler_locked:
626 * Do the work of installing the special_handler.
628 * Called with the thread mutex and scheduling lock held.
631 install_special_handler_locked(
636 /* The work handler must always be the last ReturnHandler on the list,
637 because it can do tricky things like detach the thr_act. */
638 for (rh
= &thread
->handlers
; *rh
; rh
= &(*rh
)->next
)
641 if (rh
!= &thread
->special_handler
.next
)
642 *rh
= &thread
->special_handler
;
645 * Temporarily undepress, so target has
646 * a chance to do locking required to
647 * block itself in special_handler().
649 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
)
650 compute_priority(thread
, TRUE
);
652 thread_ast_set(thread
, AST_APC
);
654 if (thread
== current_thread())
655 ast_propagate(thread
->ast
);
657 processor_t processor
= thread
->last_processor
;
659 if ( processor
!= PROCESSOR_NULL
&&
660 processor
->state
== PROCESSOR_RUNNING
&&
661 processor
->active_thread
== thread
)
662 cause_ast_check(processor
);
667 * Activation control support routines internal to this file:
671 act_execute_returnhandlers(void)
673 thread_t thread
= current_thread();
675 thread_ast_clear(thread
, AST_APC
);
681 thread_mtx_lock(thread
);
686 rh
= thread
->handlers
;
688 thread
->handlers
= rh
->next
;
690 thread_unlock(thread
);
693 thread_mtx_unlock(thread
);
696 (*rh
->handler
)(rh
, thread
);
702 thread_unlock(thread
);
705 thread_mtx_unlock(thread
);
709 * special_handler_continue
711 * Continuation routine for the special handler blocks. It checks
712 * to see whether there has been any new suspensions. If so, it
713 * installs the special handler again. Otherwise, it checks to see
714 * if the current depression needs to be re-instated (it may have
715 * been temporarily removed in order to get to this point in a hurry).
718 special_handler_continue(void)
720 thread_t thread
= current_thread();
722 thread_mtx_lock(thread
);
724 if (thread
->suspend_count
> 0)
725 install_special_handler(thread
);
727 spl_t s
= splsched();
730 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
) {
731 processor_t myprocessor
= thread
->last_processor
;
733 thread
->sched_pri
= DEPRESSPRI
;
734 myprocessor
->current_pri
= thread
->sched_pri
;
735 thread
->sched_mode
&= ~TH_MODE_PREEMPT
;
737 thread_unlock(thread
);
741 thread_mtx_unlock(thread
);
743 thread_exception_return();
748 * special_handler - handles suspension, termination. Called
749 * with nothing locked. Returns (if it returns) the same way.
753 __unused ReturnHandler
*rh
,
758 thread_mtx_lock(thread
);
762 thread
->state
&= ~(TH_ABORT
|TH_ABORT_SAFELY
); /* clear any aborts */
763 thread_unlock(thread
);
767 * If we're suspended, go to sleep and wait for someone to wake us up.
769 if (thread
->active
) {
770 if (thread
->suspend_count
> 0) {
771 if (thread
->handlers
== NULL
) {
772 assert_wait(&thread
->suspend_count
, THREAD_ABORTSAFE
);
773 thread_mtx_unlock(thread
);
774 thread_block((thread_continue_t
)special_handler_continue
);
778 thread_mtx_unlock(thread
);
780 special_handler_continue();
785 thread_mtx_unlock(thread
);
787 thread_terminate_self();
791 thread_mtx_unlock(thread
);
798 thread_state_t state
,
799 mach_msg_type_number_t count
)
801 if (thread
== current_thread())
802 return (KERN_INVALID_ARGUMENT
);
804 return (thread_set_state(thread
, flavor
, state
, count
));
812 thread_state_t state
,
813 mach_msg_type_number_t
*count
)
815 if (thread
== current_thread())
816 return (KERN_INVALID_ARGUMENT
);
818 return (thread_get_state(thread
, flavor
, state
, count
));
825 spl_t s
= splsched();
827 if (thread
== current_thread()) {
828 thread_ast_set(thread
, AST_BSD
);
829 ast_propagate(thread
->ast
);
832 processor_t processor
;
835 thread_ast_set(thread
, AST_BSD
);
836 processor
= thread
->last_processor
;
837 if ( processor
!= PROCESSOR_NULL
&&
838 processor
->state
== PROCESSOR_RUNNING
&&
839 processor
->active_thread
== thread
)
840 cause_ast_check(processor
);
841 thread_unlock(thread
);
851 spl_t s
= splsched();
853 if (thread
== current_thread()) {
854 thread_ast_set(thread
, AST_APC
);
855 ast_propagate(thread
->ast
);
858 processor_t processor
;
861 thread_ast_set(thread
, AST_APC
);
862 processor
= thread
->last_processor
;
863 if ( processor
!= PROCESSOR_NULL
&&
864 processor
->state
== PROCESSOR_RUNNING
&&
865 processor
->active_thread
== thread
)
866 cause_ast_check(processor
);
867 thread_unlock(thread
);