2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
48 * Author: Bryan Ford, University of Utah CSS
50 * Thread management routines
52 #include <mach/mach_types.h>
53 #include <mach/kern_return.h>
54 #include <mach/alert.h>
56 #include <mach/thread_act_server.h>
58 #include <kern/kern_types.h>
60 #include <kern/mach_param.h>
61 #include <kern/zalloc.h>
62 #include <kern/extmod_statistics.h>
63 #include <kern/thread.h>
64 #include <kern/task.h>
65 #include <kern/sched_prim.h>
66 #include <kern/misc_protos.h>
67 #include <kern/assert.h>
68 #include <kern/exception.h>
69 #include <kern/ipc_mig.h>
70 #include <kern/ipc_tt.h>
71 #include <kern/machine.h>
73 #include <kern/syscall_subr.h>
74 #include <kern/sync_lock.h>
75 #include <kern/processor.h>
76 #include <kern/timer.h>
77 #include <kern/affinity.h>
81 #include <security/mac_mach_internal.h>
83 void act_abort(thread_t
);
84 void install_special_handler_locked(thread_t
);
85 void special_handler_continue(void);
88 * Internal routine to mark a thread as started.
89 * Always called with the thread locked.
91 * Note: function intentionally declared with the noinline attribute to
92 * prevent multiple declaration of probe symbols in this file; we would
93 * prefer "#pragma noinline", but gcc does not support it.
94 * PR-6385749 -- the lwp-start probe should fire from within the context
95 * of the newly created thread. Commented out for now, in case we
96 * turn it into a dead code probe.
99 thread_start_internal(
102 clear_wait(thread
, THREAD_AWAKENED
);
103 thread
->started
= TRUE
;
104 // DTRACE_PROC1(lwp__start, thread_t, thread);
108 * Internal routine to terminate a thread.
109 * Sometimes called with task already locked.
112 thread_terminate_internal(
115 kern_return_t result
= KERN_SUCCESS
;
117 thread_mtx_lock(thread
);
119 if (thread
->active
) {
120 thread
->active
= FALSE
;
125 clear_wait(thread
, THREAD_INTERRUPTED
);
127 thread_start_internal(thread
);
131 result
= KERN_TERMINATED
;
133 if (thread
->affinity_set
!= NULL
)
134 thread_affinity_terminate(thread
);
136 thread_mtx_unlock(thread
);
138 if (thread
!= current_thread() && result
== KERN_SUCCESS
)
139 thread_wait(thread
, FALSE
);
145 * Terminate a thread.
151 kern_return_t result
;
153 if (thread
== THREAD_NULL
)
154 return (KERN_INVALID_ARGUMENT
);
156 if ( thread
->task
== kernel_task
&&
157 thread
!= current_thread() )
158 return (KERN_FAILURE
);
160 result
= thread_terminate_internal(thread
);
163 * If a kernel thread is terminating itself, force an AST here.
164 * Kernel threads don't normally pass through the AST checking
165 * code - and all threads finish their own termination in the
166 * special handler APC.
168 if (thread
->task
== kernel_task
) {
169 ml_set_interrupts_enabled(FALSE
);
170 ast_taken(AST_APC
, TRUE
);
171 panic("thread_terminate");
178 * Suspend execution of the specified thread.
179 * This is a recursive-style suspension of the thread, a count of
180 * suspends is maintained.
182 * Called with thread mutex held.
186 register thread_t thread
)
188 if (thread
->suspend_count
++ == 0) {
189 install_special_handler(thread
);
191 thread_wakeup_one(&thread
->suspend_count
);
196 * Decrement internal suspension count, setting thread
197 * runnable when count falls to zero.
199 * Called with thread mutex held.
203 register thread_t thread
)
205 if ( thread
->suspend_count
> 0 &&
206 --thread
->suspend_count
== 0 ) {
208 thread_wakeup_one(&thread
->suspend_count
);
210 thread_start_internal(thread
);
217 register thread_t thread
)
219 thread_t self
= current_thread();
220 kern_return_t result
= KERN_SUCCESS
;
222 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
)
223 return (KERN_INVALID_ARGUMENT
);
225 thread_mtx_lock(thread
);
227 if (thread
->active
) {
228 if ( thread
->user_stop_count
++ == 0 &&
229 thread
->suspend_count
++ == 0 ) {
230 install_special_handler(thread
);
232 thread_wakeup_one(&thread
->suspend_count
);
236 result
= KERN_TERMINATED
;
238 thread_mtx_unlock(thread
);
240 if (thread
!= self
&& result
== KERN_SUCCESS
)
241 thread_wait(thread
, TRUE
);
248 register thread_t thread
)
250 kern_return_t result
= KERN_SUCCESS
;
252 if (thread
== THREAD_NULL
|| thread
->task
== kernel_task
)
253 return (KERN_INVALID_ARGUMENT
);
255 thread_mtx_lock(thread
);
257 if (thread
->active
) {
258 if (thread
->user_stop_count
> 0) {
259 if ( --thread
->user_stop_count
== 0 &&
260 --thread
->suspend_count
== 0 ) {
262 thread_wakeup_one(&thread
->suspend_count
);
264 thread_start_internal(thread
);
269 result
= KERN_FAILURE
;
272 result
= KERN_TERMINATED
;
274 thread_mtx_unlock(thread
);
280 * thread_depress_abort:
282 * Prematurely abort priority depression if there is one.
285 thread_depress_abort(
286 register thread_t thread
)
288 kern_return_t result
;
290 if (thread
== THREAD_NULL
)
291 return (KERN_INVALID_ARGUMENT
);
293 thread_mtx_lock(thread
);
296 result
= thread_depress_abort_internal(thread
);
298 result
= KERN_TERMINATED
;
300 thread_mtx_unlock(thread
);
307 * Indicate that the activation should run its
308 * special handler to detect a condition.
310 * Called with thread mutex held.
316 spl_t s
= splsched();
320 if (!(thread
->sched_flags
& TH_SFLAG_ABORT
)) {
321 thread
->sched_flags
|= TH_SFLAG_ABORT
;
322 install_special_handler_locked(thread
);
325 thread
->sched_flags
&= ~TH_SFLAG_ABORTSAFELY
;
327 thread_unlock(thread
);
333 register thread_t thread
)
335 kern_return_t result
= KERN_SUCCESS
;
337 if (thread
== THREAD_NULL
)
338 return (KERN_INVALID_ARGUMENT
);
340 thread_mtx_lock(thread
);
342 if (thread
->active
) {
344 clear_wait(thread
, THREAD_INTERRUPTED
);
347 result
= KERN_TERMINATED
;
349 thread_mtx_unlock(thread
);
358 kern_return_t result
= KERN_SUCCESS
;
360 if (thread
== THREAD_NULL
)
361 return (KERN_INVALID_ARGUMENT
);
363 thread_mtx_lock(thread
);
365 if (thread
->active
) {
366 spl_t s
= splsched();
369 if (!thread
->at_safe_point
||
370 clear_wait_internal(thread
, THREAD_INTERRUPTED
) != KERN_SUCCESS
) {
371 if (!(thread
->sched_flags
& TH_SFLAG_ABORT
)) {
372 thread
->sched_flags
|= TH_SFLAG_ABORTED_MASK
;
373 install_special_handler_locked(thread
);
376 thread_unlock(thread
);
380 result
= KERN_TERMINATED
;
382 thread_mtx_unlock(thread
);
387 /*** backward compatibility hacks ***/
388 #include <mach/thread_info.h>
389 #include <mach/thread_special_ports.h>
390 #include <ipc/ipc_port.h>
395 thread_flavor_t flavor
,
396 thread_info_t thread_info_out
,
397 mach_msg_type_number_t
*thread_info_count
)
399 kern_return_t result
;
401 if (thread
== THREAD_NULL
)
402 return (KERN_INVALID_ARGUMENT
);
404 thread_mtx_lock(thread
);
407 result
= thread_info_internal(
408 thread
, flavor
, thread_info_out
, thread_info_count
);
410 result
= KERN_TERMINATED
;
412 thread_mtx_unlock(thread
);
419 register thread_t thread
,
421 thread_state_t state
, /* pointer to OUT array */
422 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
424 kern_return_t result
= KERN_SUCCESS
;
426 if (thread
== THREAD_NULL
)
427 return (KERN_INVALID_ARGUMENT
);
429 thread_mtx_lock(thread
);
431 if (thread
->active
) {
432 if (thread
!= current_thread()) {
435 thread_mtx_unlock(thread
);
437 if (thread_stop(thread
)) {
438 thread_mtx_lock(thread
);
439 result
= machine_thread_get_state(
440 thread
, flavor
, state
, state_count
);
441 thread_unstop(thread
);
444 thread_mtx_lock(thread
);
445 result
= KERN_ABORTED
;
448 thread_release(thread
);
451 result
= machine_thread_get_state(
452 thread
, flavor
, state
, state_count
);
455 result
= KERN_TERMINATED
;
457 thread_mtx_unlock(thread
);
463 * Change thread's machine-dependent state. Called with nothing
464 * locked. Returns same way.
467 thread_set_state_internal(
468 register thread_t thread
,
470 thread_state_t state
,
471 mach_msg_type_number_t state_count
,
474 kern_return_t result
= KERN_SUCCESS
;
476 if (thread
== THREAD_NULL
)
477 return (KERN_INVALID_ARGUMENT
);
479 thread_mtx_lock(thread
);
481 if (thread
->active
) {
482 if (thread
!= current_thread()) {
485 thread_mtx_unlock(thread
);
487 if (thread_stop(thread
)) {
488 thread_mtx_lock(thread
);
489 result
= machine_thread_set_state(
490 thread
, flavor
, state
, state_count
);
491 thread_unstop(thread
);
494 thread_mtx_lock(thread
);
495 result
= KERN_ABORTED
;
498 thread_release(thread
);
501 result
= machine_thread_set_state(
502 thread
, flavor
, state
, state_count
);
505 result
= KERN_TERMINATED
;
507 if ((result
== KERN_SUCCESS
) && from_user
)
508 extmod_statistics_incr_thread_set_state(thread
);
510 thread_mtx_unlock(thread
);
515 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
518 register thread_t thread
,
520 thread_state_t state
,
521 mach_msg_type_number_t state_count
);
525 register thread_t thread
,
527 thread_state_t state
,
528 mach_msg_type_number_t state_count
)
530 return thread_set_state_internal(thread
, flavor
, state
, state_count
, FALSE
);
534 thread_set_state_from_user(
535 register thread_t thread
,
537 thread_state_t state
,
538 mach_msg_type_number_t state_count
)
540 return thread_set_state_internal(thread
, flavor
, state
, state_count
, TRUE
);
544 * Kernel-internal "thread" interfaces used outside this file:
547 /* Initialize (or re-initialize) a thread state. Called from execve
548 * with nothing locked, returns same way.
551 thread_state_initialize(
552 register thread_t thread
)
554 kern_return_t result
= KERN_SUCCESS
;
556 if (thread
== THREAD_NULL
)
557 return (KERN_INVALID_ARGUMENT
);
559 thread_mtx_lock(thread
);
561 if (thread
->active
) {
562 if (thread
!= current_thread()) {
565 thread_mtx_unlock(thread
);
567 if (thread_stop(thread
)) {
568 thread_mtx_lock(thread
);
569 result
= machine_thread_state_initialize( thread
);
570 thread_unstop(thread
);
573 thread_mtx_lock(thread
);
574 result
= KERN_ABORTED
;
577 thread_release(thread
);
580 result
= machine_thread_state_initialize( thread
);
583 result
= KERN_TERMINATED
;
585 thread_mtx_unlock(thread
);
593 register thread_t target
)
595 thread_t self
= current_thread();
596 kern_return_t result
= KERN_SUCCESS
;
598 if (target
== THREAD_NULL
|| target
== self
)
599 return (KERN_INVALID_ARGUMENT
);
601 thread_mtx_lock(target
);
603 if (target
->active
) {
606 thread_mtx_unlock(target
);
608 if (thread_stop(target
)) {
609 thread_mtx_lock(target
);
610 result
= machine_thread_dup(self
, target
);
611 if (self
->affinity_set
!= AFFINITY_SET_NULL
)
612 thread_affinity_dup(self
, target
);
613 thread_unstop(target
);
616 thread_mtx_lock(target
);
617 result
= KERN_ABORTED
;
620 thread_release(target
);
623 result
= KERN_TERMINATED
;
625 thread_mtx_unlock(target
);
634 * Set the status of the specified thread.
635 * Called with (and returns with) no locks held.
639 register thread_t thread
,
641 thread_state_t tstate
,
642 mach_msg_type_number_t count
)
645 return (thread_set_state(thread
, flavor
, tstate
, count
));
651 * Get the status of the specified thread.
655 register thread_t thread
,
657 thread_state_t tstate
,
658 mach_msg_type_number_t
*count
)
660 return (thread_get_state(thread
, flavor
, tstate
, count
));
664 * install_special_handler:
666 * Install the special returnhandler that handles suspension and
667 * termination, if it hasn't been installed already.
669 * Called with the thread mutex held.
672 install_special_handler(
675 spl_t s
= splsched();
678 install_special_handler_locked(thread
);
679 thread_unlock(thread
);
684 * install_special_handler_locked:
686 * Do the work of installing the special_handler.
688 * Called with the thread mutex and scheduling lock held.
691 install_special_handler_locked(
696 /* The work handler must always be the last ReturnHandler on the list,
697 because it can do tricky things like detach the thr_act. */
698 for (rh
= &thread
->handlers
; *rh
; rh
= &(*rh
)->next
)
701 if (rh
!= &thread
->special_handler
.next
)
702 *rh
= &thread
->special_handler
;
705 * Temporarily undepress, so target has
706 * a chance to do locking required to
707 * block itself in special_handler().
709 if (thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
)
710 SCHED(compute_priority
)(thread
, TRUE
);
712 thread_ast_set(thread
, AST_APC
);
714 if (thread
== current_thread())
715 ast_propagate(thread
->ast
);
717 processor_t processor
= thread
->last_processor
;
719 if ( processor
!= PROCESSOR_NULL
&&
720 processor
->state
== PROCESSOR_RUNNING
&&
721 processor
->active_thread
== thread
)
722 cause_ast_check(processor
);
727 * Activation control support routines internal to this file:
731 act_execute_returnhandlers(void)
733 thread_t thread
= current_thread();
735 thread_ast_clear(thread
, AST_APC
);
741 thread_mtx_lock(thread
);
746 rh
= thread
->handlers
;
748 thread
->handlers
= rh
->next
;
750 thread_unlock(thread
);
753 thread_mtx_unlock(thread
);
756 (*rh
->handler
)(rh
, thread
);
762 thread_unlock(thread
);
765 thread_mtx_unlock(thread
);
769 * special_handler_continue
771 * Continuation routine for the special handler blocks. It checks
772 * to see whether there has been any new suspensions. If so, it
773 * installs the special handler again. Otherwise, it checks to see
774 * if the current depression needs to be re-instated (it may have
775 * been temporarily removed in order to get to this point in a hurry).
778 special_handler_continue(void)
780 thread_t thread
= current_thread();
782 thread_mtx_lock(thread
);
784 if (thread
->suspend_count
> 0)
785 install_special_handler(thread
);
787 spl_t s
= splsched();
790 if (thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) {
791 processor_t myprocessor
= thread
->last_processor
;
793 thread
->sched_pri
= DEPRESSPRI
;
794 myprocessor
->current_pri
= thread
->sched_pri
;
796 thread_unlock(thread
);
800 thread_mtx_unlock(thread
);
802 thread_exception_return();
807 * special_handler - handles suspension, termination. Called
808 * with nothing locked. Returns (if it returns) the same way.
812 __unused ReturnHandler
*rh
,
817 thread_mtx_lock(thread
);
821 thread
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
822 thread_unlock(thread
);
826 * If we're suspended, go to sleep and wait for someone to wake us up.
828 if (thread
->active
) {
829 if (thread
->suspend_count
> 0) {
830 if (thread
->handlers
== NULL
) {
831 assert_wait(&thread
->suspend_count
, THREAD_ABORTSAFE
);
832 thread_mtx_unlock(thread
);
833 thread_block((thread_continue_t
)special_handler_continue
);
837 thread_mtx_unlock(thread
);
839 special_handler_continue();
844 thread_mtx_unlock(thread
);
846 thread_terminate_self();
850 thread_mtx_unlock(thread
);
853 /* Prototype, see justification above */
858 thread_state_t state
,
859 mach_msg_type_number_t count
);
865 thread_state_t state
,
866 mach_msg_type_number_t count
)
868 if (thread
== current_thread())
869 return (KERN_INVALID_ARGUMENT
);
871 return (thread_set_state(thread
, flavor
, state
, count
));
876 act_set_state_from_user(
879 thread_state_t state
,
880 mach_msg_type_number_t count
)
882 if (thread
== current_thread())
883 return (KERN_INVALID_ARGUMENT
);
885 return (thread_set_state_from_user(thread
, flavor
, state
, count
));
893 thread_state_t state
,
894 mach_msg_type_number_t
*count
)
896 if (thread
== current_thread())
897 return (KERN_INVALID_ARGUMENT
);
899 return (thread_get_state(thread
, flavor
, state
, count
));
907 spl_t s
= splsched();
909 if (thread
== current_thread()) {
910 thread_ast_set(thread
, ast
);
911 ast_propagate(thread
->ast
);
914 processor_t processor
;
917 thread_ast_set(thread
, ast
);
918 processor
= thread
->last_processor
;
919 if ( processor
!= PROCESSOR_NULL
&&
920 processor
->state
== PROCESSOR_RUNNING
&&
921 processor
->active_thread
== thread
)
922 cause_ast_check(processor
);
923 thread_unlock(thread
);
933 act_set_ast( thread
, AST_BSD
);
940 act_set_ast( thread
, AST_APC
);
948 if (thread
!= current_thread())
949 if( !ml_get_interrupts_enabled() )
950 panic("unsafe act_set_kperf operation");
952 act_set_ast( thread
, AST_KPERF
);
960 act_set_ast( thread
, AST_MACF
);