2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Copyright (c) 1993 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
29 * Permission to use, copy, modify and distribute this software and its
30 * documentation is hereby granted, provided that both the copyright
31 * notice and this permission notice appear in all copies of the
32 * software, derivative works or modified versions, and any portions
33 * thereof, and that both notices appear in supporting documentation.
35 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
36 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
37 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 * CSS requests users of this software to return to css-dist@cs.utah.edu any
40 * improvements that they make and grant CSS redistribution rights.
42 * Author: Bryan Ford, University of Utah CSS
44 * Thread_Activation management routines
48 #include <task_swapper.h>
49 #include <mach/kern_return.h>
50 #include <mach/alert.h>
51 #include <kern/etap_macros.h>
52 #include <kern/mach_param.h>
53 #include <kern/zalloc.h>
54 #include <kern/thread.h>
55 #include <kern/thread_swap.h>
56 #include <kern/task.h>
57 #include <kern/task_swap.h>
58 #include <kern/thread_act.h>
59 #include <kern/sched_prim.h>
60 #include <kern/misc_protos.h>
61 #include <kern/assert.h>
62 #include <kern/exception.h>
63 #include <kern/ipc_mig.h>
64 #include <kern/ipc_tt.h>
65 #include <kern/profile.h>
66 #include <kern/machine.h>
68 #include <kern/syscall_subr.h>
69 #include <kern/sync_lock.h>
70 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
71 #include <kern/processor.h>
72 #include <mach_prof.h>
76 * Debugging printf control
79 unsigned int watchacts
= 0 /* WA_ALL */
80 ; /* Do-it-yourself & patchable */
84 * Track the number of times we need to swapin a thread to deallocate it.
86 int act_free_swapin
= 0;
90 * Forward declarations for functions local to this file.
92 kern_return_t
act_abort( thread_act_t
, boolean_t
);
93 void special_handler(ReturnHandler
*, thread_act_t
);
94 kern_return_t
act_set_state_locked(thread_act_t
, int,
96 mach_msg_type_number_t
);
97 kern_return_t
act_get_state_locked(thread_act_t
, int,
99 mach_msg_type_number_t
*);
100 void act_set_astbsd(thread_act_t
);
101 void act_set_apc(thread_act_t
);
102 void act_user_to_kernel(thread_act_t
);
103 void act_ulock_release_all(thread_act_t thr_act
);
105 void install_special_handler_locked(thread_act_t
);
107 static void act_disable(thread_act_t
);
109 struct thread_activation pageout_act
;
111 static zone_t thr_act_zone
;
114 * Thread interfaces accessed via a thread_activation:
119 * Internal routine to terminate a thread.
120 * Sometimes called with task already locked.
123 thread_terminate_internal(
124 register thread_act_t act
)
126 kern_return_t result
;
129 thread
= act_lock_thread(act
);
132 act_unlock_thread(act
);
133 return (KERN_TERMINATED
);
137 result
= act_abort(act
, FALSE
);
140 * Make sure this thread enters the kernel
141 * Must unlock the act, but leave the shuttle
142 * captured in this act.
144 if (thread
!= current_thread()) {
147 if (thread_stop(thread
))
148 thread_unstop(thread
);
150 result
= KERN_ABORTED
;
155 clear_wait(thread
, act
->inited
? THREAD_INTERRUPTED
: THREAD_AWAKENED
);
156 act_unlock_thread(act
);
162 * Terminate a thread.
166 register thread_act_t act
)
168 kern_return_t result
;
170 if (act
== THR_ACT_NULL
)
171 return (KERN_INVALID_ARGUMENT
);
173 if ( (act
->task
== kernel_task
||
174 act
->kernel_loaded
) &&
175 act
!= current_act() )
176 return (KERN_FAILURE
);
178 result
= thread_terminate_internal(act
);
181 * If a kernel thread is terminating itself, force an AST here.
182 * Kernel threads don't normally pass through the AST checking
183 * code - and all threads finish their own termination in the
184 * special handler APC.
186 if ( act
->task
== kernel_task
||
187 act
->kernel_loaded
) {
188 assert(act
== current_act());
189 ast_taken(AST_APC
, FALSE
);
190 panic("thread_terminate");
197 * Suspend execution of the specified thread.
198 * This is a recursive-style suspension of the thread, a count of
199 * suspends is maintained.
201 * Called with act_lock held.
205 register thread_act_t act
)
207 thread_t thread
= act
->thread
;
209 if (act
->suspend_count
++ == 0) {
210 install_special_handler(act
);
212 thread
!= THREAD_NULL
&&
213 thread
->top_act
== act
)
214 thread_wakeup_one(&act
->suspend_count
);
219 * Decrement internal suspension count for thr_act, setting thread
220 * runnable when count falls to zero.
222 * Called with act_lock held.
226 register thread_act_t act
)
228 thread_t thread
= act
->thread
;
230 if ( act
->suspend_count
> 0 &&
231 --act
->suspend_count
== 0 &&
232 thread
!= THREAD_NULL
&&
233 thread
->top_act
== act
) {
235 clear_wait(thread
, THREAD_AWAKENED
);
239 thread_wakeup_one(&act
->suspend_count
);
245 register thread_act_t act
)
249 if (act
== THR_ACT_NULL
)
250 return (KERN_INVALID_ARGUMENT
);
252 thread
= act_lock_thread(act
);
255 act_unlock_thread(act
);
256 return (KERN_TERMINATED
);
259 if ( act
->user_stop_count
++ == 0 &&
260 act
->suspend_count
++ == 0 ) {
261 install_special_handler(act
);
262 if ( thread
!= current_thread() &&
263 thread
!= THREAD_NULL
&&
264 thread
->top_act
== act
) {
266 thread_wakeup_one(&act
->suspend_count
);
267 act_unlock_thread(act
);
272 act_unlock_thread(act
);
275 act_unlock_thread(act
);
277 return (KERN_SUCCESS
);
282 register thread_act_t act
)
284 kern_return_t result
= KERN_SUCCESS
;
287 if (act
== THR_ACT_NULL
)
288 return (KERN_INVALID_ARGUMENT
);
290 thread
= act_lock_thread(act
);
293 if (act
->user_stop_count
> 0) {
294 if ( --act
->user_stop_count
== 0 &&
295 --act
->suspend_count
== 0 &&
296 thread
!= THREAD_NULL
&&
297 thread
->top_act
== act
) {
299 clear_wait(thread
, THREAD_AWAKENED
);
303 thread_wakeup_one(&act
->suspend_count
);
307 result
= KERN_FAILURE
;
310 result
= KERN_TERMINATED
;
312 act_unlock_thread(act
);
318 * This routine walks toward the head of an RPC chain starting at
319 * a specified thread activation. An alert bit is set and a special
320 * handler is installed for each thread it encounters.
322 * The target thread act and thread shuttle are already locked.
326 register thread_act_t act
,
333 * thread_depress_abort:
335 * Prematurely abort priority depression if there is one.
338 thread_depress_abort(
339 register thread_act_t thr_act
)
341 register thread_t thread
;
342 kern_return_t result
;
344 if (thr_act
== THR_ACT_NULL
)
345 return (KERN_INVALID_ARGUMENT
);
347 thread
= act_lock_thread(thr_act
);
348 /* if activation is terminating, this operation is not meaningful */
349 if (!thr_act
->active
) {
350 act_unlock_thread(thr_act
);
352 return (KERN_TERMINATED
);
355 result
= _mk_sp_thread_depress_abort(thread
, FALSE
);
357 act_unlock_thread(thr_act
);
364 * Indicate that the activation should run its
365 * special handler to detect the condition.
367 * Called with act_lock held.
372 boolean_t chain_break
)
374 thread_t thread
= act
->thread
;
375 spl_t s
= splsched();
377 assert(thread
->top_act
== act
);
380 if (!(thread
->state
& TH_ABORT
)) {
381 thread
->state
|= TH_ABORT
;
382 install_special_handler_locked(act
);
384 thread
->state
&= ~TH_ABORT_SAFELY
;
386 thread_unlock(thread
);
389 return (KERN_SUCCESS
);
394 register thread_act_t act
)
396 kern_return_t result
;
399 if (act
== THR_ACT_NULL
)
400 return (KERN_INVALID_ARGUMENT
);
402 thread
= act_lock_thread(act
);
405 act_unlock_thread(act
);
406 return (KERN_TERMINATED
);
409 result
= act_abort(act
, FALSE
);
410 clear_wait(thread
, THREAD_INTERRUPTED
);
411 act_unlock_thread(act
);
424 if ( act
== THR_ACT_NULL
)
425 return (KERN_INVALID_ARGUMENT
);
427 thread
= act_lock_thread(act
);
430 act_unlock_thread(act
);
431 return (KERN_TERMINATED
);
436 if (!thread
->at_safe_point
||
437 clear_wait_internal(thread
, THREAD_INTERRUPTED
) != KERN_SUCCESS
) {
438 if (!(thread
->state
& TH_ABORT
)) {
439 thread
->state
|= (TH_ABORT
|TH_ABORT_SAFELY
);
440 install_special_handler_locked(act
);
443 thread_unlock(thread
);
446 act_unlock_thread(act
);
448 return (KERN_SUCCESS
);
451 /*** backward compatibility hacks ***/
452 #include <mach/thread_info.h>
453 #include <mach/thread_special_ports.h>
454 #include <ipc/ipc_port.h>
455 #include <mach/thread_act_server.h>
459 thread_act_t thr_act
,
460 thread_flavor_t flavor
,
461 thread_info_t thread_info_out
,
462 mach_msg_type_number_t
*thread_info_count
)
464 register thread_t thread
;
465 kern_return_t result
;
467 if (thr_act
== THR_ACT_NULL
)
468 return (KERN_INVALID_ARGUMENT
);
470 thread
= act_lock_thread(thr_act
);
471 if (!thr_act
->active
) {
472 act_unlock_thread(thr_act
);
474 return (KERN_TERMINATED
);
477 result
= thread_info_shuttle(thr_act
, flavor
,
478 thread_info_out
, thread_info_count
);
480 act_unlock_thread(thr_act
);
486 * Routine: thread_get_special_port [kernel call]
488 * Clones a send right for one of the thread's
493 * KERN_SUCCESS Extracted a send right.
494 * KERN_INVALID_ARGUMENT The thread is null.
495 * KERN_FAILURE The thread is dead.
496 * KERN_INVALID_ARGUMENT Invalid special port.
500 thread_get_special_port(
501 thread_act_t thr_act
,
510 if (watchacts
& WA_PORT
)
511 printf("thread_get_special_port(thr_act=%x, which=%x port@%x=%x\n",
512 thr_act
, which
, portp
, (portp
? *portp
: 0));
513 #endif /* MACH_ASSERT */
516 return KERN_INVALID_ARGUMENT
;
517 thread
= act_lock_thread(thr_act
);
519 case THREAD_KERNEL_PORT
:
520 whichp
= &thr_act
->ith_sself
;
524 act_unlock_thread(thr_act
);
525 return KERN_INVALID_ARGUMENT
;
528 if (!thr_act
->active
) {
529 act_unlock_thread(thr_act
);
533 port
= ipc_port_copy_send(*whichp
);
534 act_unlock_thread(thr_act
);
541 * Routine: thread_set_special_port [kernel call]
543 * Changes one of the thread's special ports,
544 * setting it to the supplied send right.
546 * Nothing locked. If successful, consumes
547 * the supplied send right.
549 * KERN_SUCCESS Changed the special port.
550 * KERN_INVALID_ARGUMENT The thread is null.
551 * KERN_FAILURE The thread is dead.
552 * KERN_INVALID_ARGUMENT Invalid special port.
556 thread_set_special_port(
557 thread_act_t thr_act
,
566 if (watchacts
& WA_PORT
)
567 printf("thread_set_special_port(thr_act=%x,which=%x,port=%x\n",
568 thr_act
, which
, port
);
569 #endif /* MACH_ASSERT */
572 return KERN_INVALID_ARGUMENT
;
574 thread
= act_lock_thread(thr_act
);
576 case THREAD_KERNEL_PORT
:
577 whichp
= &thr_act
->ith_self
;
581 act_unlock_thread(thr_act
);
582 return KERN_INVALID_ARGUMENT
;
585 if (!thr_act
->active
) {
586 act_unlock_thread(thr_act
);
592 act_unlock_thread(thr_act
);
595 ipc_port_release_send(old
);
600 * thread state should always be accessible by locking the thread
601 * and copying it. The activation messes things up so for right
602 * now if it's not the top of the chain, use a special handler to
603 * get the information when the shuttle returns to the activation.
607 register thread_act_t act
,
609 thread_state_t state
, /* pointer to OUT array */
610 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
612 kern_return_t result
= KERN_SUCCESS
;
615 if (act
== THR_ACT_NULL
|| act
== current_act())
616 return (KERN_INVALID_ARGUMENT
);
618 thread
= act_lock_thread(act
);
621 act_unlock_thread(act
);
622 return (KERN_TERMINATED
);
630 if ( thread
== THREAD_NULL
||
631 thread
->top_act
!= act
)
633 act_unlock_thread(act
);
635 if (!thread_stop(thread
)) {
636 result
= KERN_ABORTED
;
637 (void)act_lock_thread(act
);
638 thread
= THREAD_NULL
;
642 thread1
= act_lock_thread(act
);
643 if (thread1
== thread
)
646 thread_unstop(thread
);
650 if (result
== KERN_SUCCESS
)
651 result
= act_machine_get_state(act
, flavor
, state
, state_count
);
653 if ( thread
!= THREAD_NULL
&&
654 thread
->top_act
== act
)
655 thread_unstop(thread
);
658 act_unlock_thread(act
);
664 * Change thread's machine-dependent state. Called with nothing
665 * locked. Returns same way.
669 register thread_act_t act
,
671 thread_state_t state
,
672 mach_msg_type_number_t state_count
)
674 kern_return_t result
= KERN_SUCCESS
;
677 if (act
== THR_ACT_NULL
|| act
== current_act())
678 return (KERN_INVALID_ARGUMENT
);
680 thread
= act_lock_thread(act
);
683 act_unlock_thread(act
);
684 return (KERN_TERMINATED
);
692 if ( thread
== THREAD_NULL
||
693 thread
->top_act
!= act
)
695 act_unlock_thread(act
);
697 if (!thread_stop(thread
)) {
698 result
= KERN_ABORTED
;
699 (void)act_lock_thread(act
);
700 thread
= THREAD_NULL
;
704 thread1
= act_lock_thread(act
);
705 if (thread1
== thread
)
708 thread_unstop(thread
);
712 if (result
== KERN_SUCCESS
)
713 result
= act_machine_set_state(act
, flavor
, state
, state_count
);
715 if ( thread
!= THREAD_NULL
&&
716 thread
->top_act
== act
)
717 thread_unstop(thread
);
720 act_unlock_thread(act
);
726 * Kernel-internal "thread" interfaces used outside this file:
731 register thread_act_t target
)
733 kern_return_t result
= KERN_SUCCESS
;
734 thread_act_t self
= current_act();
737 if (target
== THR_ACT_NULL
|| target
== self
)
738 return (KERN_INVALID_ARGUMENT
);
740 thread
= act_lock_thread(target
);
742 if (!target
->active
) {
743 act_unlock_thread(target
);
744 return (KERN_TERMINATED
);
752 if ( thread
== THREAD_NULL
||
753 thread
->top_act
!= target
)
755 act_unlock_thread(target
);
757 if (!thread_stop(thread
)) {
758 result
= KERN_ABORTED
;
759 (void)act_lock_thread(target
);
760 thread
= THREAD_NULL
;
764 thread1
= act_lock_thread(target
);
765 if (thread1
== thread
)
768 thread_unstop(thread
);
772 if (result
== KERN_SUCCESS
)
773 result
= act_thread_dup(self
, target
);
775 if ( thread
!= THREAD_NULL
&&
776 thread
->top_act
== target
)
777 thread_unstop(thread
);
779 thread_release(target
);
780 act_unlock_thread(target
);
789 * Set the status of the specified thread.
790 * Called with (and returns with) no locks held.
794 register thread_act_t act
,
796 thread_state_t tstate
,
797 mach_msg_type_number_t count
)
799 kern_return_t result
= KERN_SUCCESS
;
802 thread
= act_lock_thread(act
);
804 if ( act
!= current_act() &&
805 (act
->suspend_count
== 0 ||
806 thread
== THREAD_NULL
||
807 (thread
->state
& TH_RUN
) ||
808 thread
->top_act
!= act
) )
809 result
= KERN_FAILURE
;
811 if (result
== KERN_SUCCESS
)
812 result
= act_machine_set_state(act
, flavor
, tstate
, count
);
814 act_unlock_thread(act
);
822 * Get the status of the specified thread.
826 register thread_act_t act
,
828 thread_state_t tstate
,
829 mach_msg_type_number_t
*count
)
831 kern_return_t result
= KERN_SUCCESS
;
834 thread
= act_lock_thread(act
);
836 if ( act
!= current_act() &&
837 (act
->suspend_count
== 0 ||
838 thread
== THREAD_NULL
||
839 (thread
->state
& TH_RUN
) ||
840 thread
->top_act
!= act
) )
841 result
= KERN_FAILURE
;
843 if (result
== KERN_SUCCESS
)
844 result
= act_machine_get_state(act
, flavor
, tstate
, count
);
846 act_unlock_thread(act
);
852 * Kernel-internal thread_activation interfaces used outside this file:
856 * act_init() - Initialize activation handling code
861 thr_act_zone
= zinit(
862 sizeof(struct thread_activation
),
863 ACT_MAX
* sizeof(struct thread_activation
), /* XXX */
864 ACT_CHUNK
* sizeof(struct thread_activation
),
872 * act_create - Create a new activation in a specific task.
875 act_create(task_t task
,
876 thread_act_t
*new_act
)
878 thread_act_t thr_act
;
883 thr_act
= &pageout_act
;
886 thr_act
= (thread_act_t
)zalloc(thr_act_zone
);
888 return(KERN_RESOURCE_SHORTAGE
);
891 if (watchacts
& WA_ACT_LNK
)
892 printf("act_create(task=%x,thr_act@%x=%x)\n",
893 task
, new_act
, thr_act
);
894 #endif /* MACH_ASSERT */
896 /* Start by zeroing everything; then init non-zero items only */
897 bzero((char *)thr_act
, sizeof(*thr_act
));
899 if (thr_act
== &pageout_act
)
900 thr_act
->thread
= &pageout_thread
;
905 * Take care of the uthread allocation
906 * do it early in order to make KERN_RESOURCE_SHORTAGE
908 * uthread_alloc() will bzero the storage allocated.
910 extern void *uthread_alloc(task_t
, thread_act_t
);
912 thr_act
->uthread
= uthread_alloc(task
, thr_act
);
913 if(thr_act
->uthread
== 0) {
914 /* Put the thr_act back on the thr_act zone */
915 zfree(thr_act_zone
, (vm_offset_t
)thr_act
);
916 return(KERN_RESOURCE_SHORTAGE
);
919 #endif /* MACH_BSD */
922 * Start with one reference for the caller and one for the
925 act_lock_init(thr_act
);
926 thr_act
->ref_count
= 2;
928 /* Latch onto the task. */
929 thr_act
->task
= task
;
930 task_reference(task
);
932 /* special_handler will always be last on the returnhandlers list. */
933 thr_act
->special_handler
.next
= 0;
934 thr_act
->special_handler
.handler
= special_handler
;
937 thr_act
->act_profiled
= FALSE
;
938 thr_act
->act_profiled_own
= FALSE
;
939 thr_act
->profil_buffer
= NULLPROFDATA
;
942 /* Initialize the held_ulocks queue as empty */
943 queue_init(&thr_act
->held_ulocks
);
945 /* Inherit the profiling status of the parent task */
946 act_prof_init(thr_act
, task
);
948 ipc_thr_act_init(task
, thr_act
);
949 act_machine_create(task
, thr_act
);
952 * If thr_act created in kernel-loaded task, alter its saved
953 * state to so indicate
955 if (task
->kernel_loaded
) {
956 act_user_to_kernel(thr_act
);
959 /* Cache the task's map and take a reference to it */
963 /* Inline vm_map_reference cause we don't want to increment res_count */
964 mutex_lock(&map
->s_lock
);
966 mutex_unlock(&map
->s_lock
);
973 * act_free - called when an thr_act's ref_count drops to zero.
975 * This can only happen after the activation has been reaped, and
976 * all other references to it have gone away. We can now release
977 * the last critical resources, unlink the activation from the
978 * task, and release the reference on the thread shuttle itself.
980 * Called with activation locked.
983 int dangerous_bzero
= 1; /* paranoia & safety */
987 act_free(thread_act_t thr_act
)
996 if (watchacts
& WA_EXIT
)
997 printf("act_free(%x(%d)) thr=%x tsk=%x(%d) %sactive\n",
998 thr_act
, thr_act
->ref_count
, thr_act
->thread
,
1000 thr_act
->task
? thr_act
->task
->ref_count
: 0,
1001 thr_act
->active
? " " : " !");
1002 #endif /* MACH_ASSERT */
1004 assert(!thr_act
->active
);
1006 task
= thr_act
->task
;
1009 task_proc
= task
->bsd_info
;
1010 if (thr
= thr_act
->thread
) {
1011 time_value_t user_time
, system_time
;
1013 thread_read_times(thr
, &user_time
, &system_time
);
1014 time_value_add(&task
->total_user_time
, &user_time
);
1015 time_value_add(&task
->total_system_time
, &system_time
);
1017 /* Unlink the thr_act from the task's thr_act list,
1018 * so it doesn't appear in calls to task_threads and such.
1019 * The thr_act still keeps its ref on the task, however.
1021 queue_remove(&task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
);
1022 thr_act
->thr_acts
.next
= NULL
;
1023 task
->thr_act_count
--;
1024 task
->res_act_count
--;
1026 task_deallocate(task
);
1027 thread_deallocate(thr
);
1028 act_machine_destroy(thr_act
);
1031 * Must have never really gotten started
1032 * no unlinking from the task and no need
1033 * to free the shuttle.
1036 task_deallocate(task
);
1039 act_prof_deallocate(thr_act
);
1040 ipc_thr_act_terminate(thr_act
);
1043 * Drop the cached map reference.
1044 * Inline version of vm_map_deallocate() because we
1045 * don't want to decrement the map's residence count here.
1048 mutex_lock(&map
->s_lock
);
1049 ref
= --map
->ref_count
;
1050 mutex_unlock(&map
->s_lock
);
1052 vm_map_destroy(map
);
1057 * Free uthread BEFORE the bzero.
1058 * Not doing so will result in a leak.
1060 extern void uthread_free(task_t
, void *, void *);
1062 void *ut
= thr_act
->uthread
;
1063 thr_act
->uthread
= 0;
1064 uthread_free(task
, ut
, task_proc
);
1066 #endif /* MACH_BSD */
1069 if (dangerous_bzero
) /* dangerous if we're still using it! */
1070 bzero((char *)thr_act
, sizeof(*thr_act
));
1071 #endif /* MACH_ASSERT */
1072 /* Put the thr_act back on the thr_act zone */
1073 zfree(thr_act_zone
, (vm_offset_t
)thr_act
);
1078 * act_attach - Attach an thr_act to the top of a thread ("push the stack").
1080 * The thread_shuttle must be either the current one or a brand-new one.
1081 * Assumes the thr_act is active but not in use.
1083 * Already locked: thr_act plus "appropriate" thread-related locks
1084 * (see act_lock_thread()).
1088 thread_act_t thr_act
,
1090 unsigned init_alert_mask
)
1095 assert(thread
== current_thread() || thread
->top_act
== THR_ACT_NULL
);
1096 if (watchacts
& WA_ACT_LNK
)
1097 printf("act_attach(thr_act %x(%d) thread %x(%d) mask %d)\n",
1098 thr_act
, thr_act
->ref_count
, thread
, thread
->ref_count
,
1100 #endif /* MACH_ASSERT */
1103 * Chain the thr_act onto the thread's thr_act stack.
1104 * Set mask and auto-propagate alerts from below.
1106 thr_act
->ref_count
++;
1107 thr_act
->thread
= thread
;
1108 thr_act
->higher
= THR_ACT_NULL
; /*safety*/
1109 thr_act
->alerts
= 0;
1110 thr_act
->alert_mask
= init_alert_mask
;
1111 lower
= thr_act
->lower
= thread
->top_act
;
1113 if (lower
!= THR_ACT_NULL
) {
1114 lower
->higher
= thr_act
;
1115 thr_act
->alerts
= (lower
->alerts
& init_alert_mask
);
1118 thread
->top_act
= thr_act
;
1124 * Remove the current thr_act from the top of the current thread, i.e.
1125 * "pop the stack". Assumes already locked: thr_act plus "appropriate"
1126 * thread-related locks (see act_lock_thread).
1130 thread_act_t cur_act
)
1132 thread_t cur_thread
= cur_act
->thread
;
1135 if (watchacts
& (WA_EXIT
|WA_ACT_LNK
))
1136 printf("act_detach: thr_act %x(%d), thrd %x(%d) task=%x(%d)\n",
1137 cur_act
, cur_act
->ref_count
,
1138 cur_thread
, cur_thread
->ref_count
,
1140 cur_act
->task
? cur_act
->task
->ref_count
: 0);
1141 #endif /* MACH_ASSERT */
1143 /* Unlink the thr_act from the thread's thr_act stack */
1144 cur_thread
->top_act
= cur_act
->lower
;
1145 cur_act
->thread
= 0;
1146 cur_act
->ref_count
--;
1147 assert(cur_act
->ref_count
> 0);
1150 cur_act
->lower
= cur_act
->higher
= THR_ACT_NULL
;
1151 if (cur_thread
->top_act
)
1152 cur_thread
->top_act
->higher
= THR_ACT_NULL
;
1153 #endif /* MACH_ASSERT */
1160 * Synchronize a thread operation with migration.
1161 * Called with nothing locked.
1162 * Returns with thr_act locked.
1166 thread_act_t thr_act
)
1170 * JMM - We have moved away from explicit RPC locks
1171 * and towards a generic migration approach. The wait
1172 * queue lock will be the point of synchronization for
1173 * the shuttle linkage when this is rolled out. Until
1174 * then, just lock the act.
1177 return (thr_act
->thread
);
1181 * Unsynchronize with migration (i.e., undo an act_lock_thread() call).
1182 * Called with thr_act locked, plus thread locks held that are
1183 * "correct" for thr_act's state. Returns with nothing locked.
1186 act_unlock_thread(thread_act_t thr_act
)
1188 act_unlock(thr_act
);
1192 * Synchronize with migration given a pointer to a shuttle (instead of an
1193 * activation). Called with nothing locked; returns with all
1194 * "appropriate" thread-related locks held (see act_lock_thread()).
1200 thread_act_t thr_act
;
1203 thr_act
= thread
->top_act
;
1206 if (!act_lock_try(thr_act
)) {
1216 * Unsynchronize with an activation starting from a pointer to
1223 thread_act_t thr_act
;
1225 if (thr_act
= thread
->top_act
) {
1226 act_unlock(thr_act
);
1233 * If a new activation is given, switch to it. If not,
1234 * switch to the lower activation (pop). Returns the old
1235 * activation. This is for migration support.
1242 thread_act_t old
, new;
1247 disable_preemption();
1250 thread
= current_thread();
1253 * Find the old and new activation for switch.
1255 old
= thread
->top_act
;
1259 new->thread
= thread
;
1265 assert(new != THR_ACT_NULL
);
1266 assert(cpu_to_processor(cpu
)->cpu_data
->active_thread
== thread
);
1267 active_kloaded
[cpu
] = (new->kernel_loaded
) ? new : 0;
1269 /* This is where all the work happens */
1270 machine_switch_act(thread
, old
, new, cpu
);
1273 * Push or pop an activation on the chain.
1276 act_attach(new, thread
, 0);
1282 enable_preemption();
1288 * install_special_handler
1289 * Install the special returnhandler that handles suspension and
1290 * termination, if it hasn't been installed already.
1292 * Already locked: RPC-related locks for thr_act, but not
1293 * scheduling lock (thread_lock()) of the associated thread.
1296 install_special_handler(
1297 thread_act_t thr_act
)
1300 thread_t thread
= thr_act
->thread
;
1303 if (watchacts
& WA_ACT_HDLR
)
1304 printf("act_%x: install_special_hdlr(%x)\n",current_act(),thr_act
);
1305 #endif /* MACH_ASSERT */
1308 thread_lock(thread
);
1309 install_special_handler_locked(thr_act
);
1310 thread_unlock(thread
);
1315 * install_special_handler_locked
1316 * Do the work of installing the special_handler.
1318 * Already locked: RPC-related locks for thr_act, plus the
1319 * scheduling lock (thread_lock()) of the associated thread.
1322 install_special_handler_locked(
1325 thread_t thread
= act
->thread
;
1328 /* The work handler must always be the last ReturnHandler on the list,
1329 because it can do tricky things like detach the thr_act. */
1330 for (rh
= &act
->handlers
; *rh
; rh
= &(*rh
)->next
)
1332 if (rh
!= &act
->special_handler
.next
)
1333 *rh
= &act
->special_handler
;
1335 if (act
== thread
->top_act
) {
1337 * Temporarily undepress, so target has
1338 * a chance to do locking required to
1339 * block itself in special_handler().
1341 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
)
1342 compute_priority(thread
, TRUE
);
1345 thread_ast_set(act
, AST_APC
);
1346 if (act
== current_act())
1347 ast_propagate(act
->ast
);
1349 processor_t processor
= thread
->last_processor
;
1351 if ( processor
!= PROCESSOR_NULL
&&
1352 processor
->state
== PROCESSOR_RUNNING
&&
1353 processor
->cpu_data
->active_thread
== thread
)
1354 cause_ast_check(processor
);
1361 thread_apc_handler_t apc
)
1363 extern thread_apc_handler_t bsd_ast
;
1365 assert(apc
== bsd_ast
);
1366 return (KERN_FAILURE
);
1372 thread_apc_handler_t apc
)
1374 extern thread_apc_handler_t bsd_ast
;
1376 assert(apc
== bsd_ast
);
1377 return (KERN_FAILURE
);
1381 * Activation control support routines internal to this file:
1385 * act_execute_returnhandlers() - does just what the name says
1387 * This is called by system-dependent code when it detects that
1388 * thr_act->handlers is non-null while returning into user mode.
1391 act_execute_returnhandlers(void)
1393 thread_act_t act
= current_act();
1396 if (watchacts
& WA_ACT_HDLR
)
1397 printf("execute_rtn_hdlrs: act=%x\n", act
);
1398 #endif /* MACH_ASSERT */
1400 thread_ast_clear(act
, AST_APC
);
1405 thread_t thread
= act_lock_thread(act
);
1408 thread_lock(thread
);
1411 thread_unlock(thread
);
1413 act_unlock_thread(act
);
1416 act
->handlers
= rh
->next
;
1417 thread_unlock(thread
);
1419 act_unlock_thread(act
);
1422 if (watchacts
& WA_ACT_HDLR
)
1423 printf( (rh
== &act
->special_handler
) ?
1424 "\tspecial_handler\n" : "\thandler=%x\n", rh
->handler
);
1425 #endif /* MACH_ASSERT */
1428 (*rh
->handler
)(rh
, act
);
1433 * special_handler_continue
1435 * Continuation routine for the special handler blocks. It checks
1436 * to see whether there has been any new suspensions. If so, it
1437 * installs the special handler again. Otherwise, it checks to see
1438 * if the current depression needs to be re-instated (it may have
1439 * been temporarily removed in order to get to this point in a hurry).
1442 special_handler_continue(void)
1444 thread_act_t self
= current_act();
1446 if (self
->suspend_count
> 0)
1447 install_special_handler(self
);
1449 thread_t thread
= self
->thread
;
1450 spl_t s
= splsched();
1452 thread_lock(thread
);
1453 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
) {
1454 processor_t myprocessor
= thread
->last_processor
;
1456 thread
->sched_pri
= DEPRESSPRI
;
1457 myprocessor
->current_pri
= thread
->sched_pri
;
1458 thread
->sched_mode
&= ~TH_MODE_PREEMPT
;
1460 thread_unlock(thread
);
1464 thread_exception_return();
1469 * special_handler - handles suspension, termination. Called
1470 * with nothing locked. Returns (if it returns) the same way.
1477 thread_t thread
= act_lock_thread(self
);
1480 assert(thread
!= THREAD_NULL
);
1483 thread_lock(thread
);
1484 thread
->state
&= ~(TH_ABORT
|TH_ABORT_SAFELY
); /* clear any aborts */
1485 thread_unlock(thread
);
1489 * If someone has killed this invocation,
1490 * invoke the return path with a terminated exception.
1492 if (!self
->active
) {
1493 act_unlock_thread(self
);
1494 act_machine_return(KERN_TERMINATED
);
1498 * If we're suspended, go to sleep and wait for someone to wake us up.
1500 if (self
->suspend_count
> 0) {
1501 if (self
->handlers
== NULL
) {
1502 assert_wait(&self
->suspend_count
, THREAD_ABORTSAFE
);
1503 act_unlock_thread(self
);
1504 thread_block(special_handler_continue
);
1508 act_unlock_thread(self
);
1510 special_handler_continue();
1514 act_unlock_thread(self
);
1518 * Update activation that belongs to a task created via kernel_task_create().
1522 thread_act_t thr_act
)
1524 pcb_user_to_kernel(thr_act
);
1525 thr_act
->kernel_loading
= TRUE
;
1529 * Already locked: activation (shuttle frozen within)
1531 * Mark an activation inactive, and prepare it to terminate
1536 thread_act_t thr_act
)
1540 if (watchacts
& WA_EXIT
) {
1541 printf("act_%x: act_disable_tl(thr_act=%x(%d))%sactive",
1542 current_act(), thr_act
, thr_act
->ref_count
,
1543 (thr_act
->active
? " " : " !"));
1545 (void) dump_act(thr_act
);
1547 #endif /* MACH_ASSERT */
1549 thr_act
->active
= 0;
1551 /* Drop the thr_act reference taken for being active.
1552 * (There is still at least one reference left:
1553 * the one we were passed.)
1554 * Inline the deallocate because thr_act is locked.
1556 act_locked_act_deallocate(thr_act
);
1560 * act_alert - Register an alert from this activation.
1562 * Each set bit is propagated upward from (but not including) this activation,
1563 * until the top of the chain is reached or the bit is masked.
1566 act_alert(thread_act_t thr_act
, unsigned alerts
)
1568 thread_t thread
= act_lock_thread(thr_act
);
1571 if (watchacts
& WA_ACT_LNK
)
1572 printf("act_alert %x: %x\n", thr_act
, alerts
);
1573 #endif /* MACH_ASSERT */
1576 thread_act_t act_up
= thr_act
;
1577 while ((alerts
) && (act_up
!= thread
->top_act
)) {
1578 act_up
= act_up
->higher
;
1579 alerts
&= act_up
->alert_mask
;
1580 act_up
->alerts
|= alerts
;
1583 * XXXX If we reach the top, and it is blocked in glue
1584 * code, do something to kick it. XXXX
1587 act_unlock_thread(thr_act
);
1589 return KERN_SUCCESS
;
1592 kern_return_t
act_alert_mask(thread_act_t thr_act
, unsigned alert_mask
)
1594 panic("act_alert_mask NOT YET IMPLEMENTED\n");
1595 return KERN_SUCCESS
;
1598 typedef struct GetSetState
{
1599 struct ReturnHandler rh
;
1606 /* Local Forward decls */
1607 kern_return_t
get_set_state(
1608 thread_act_t thr_act
, int flavor
,
1609 thread_state_t state
, int *pcount
,
1610 void (*handler
)(ReturnHandler
*rh
, thread_act_t thr_act
));
1611 void get_state_handler(ReturnHandler
*rh
, thread_act_t thr_act
);
1612 void set_state_handler(ReturnHandler
*rh
, thread_act_t thr_act
);
1615 * get_set_state(thr_act ...)
1617 * General code to install g/set_state handler.
1618 * Called with thr_act's act_lock() and "appropriate"
1619 * thread-related locks held. (See act_lock_thread().)
1625 thread_state_t state
,
1633 /* Initialize a small parameter structure */
1634 gss
.rh
.handler
= handler
;
1635 gss
.flavor
= flavor
;
1637 gss
.pcount
= pcount
;
1638 gss
.result
= KERN_ABORTED
; /* iff wait below is interrupted */
1640 /* Add it to the thr_act's return handler list */
1641 gss
.rh
.next
= act
->handlers
;
1642 act
->handlers
= &gss
.rh
;
1647 if (watchacts
& WA_ACT_HDLR
) {
1648 printf("act_%x: get_set_state(act=%x flv=%x state=%x ptr@%x=%x)",
1649 current_act(), act
, flavor
, state
,
1650 pcount
, (pcount
? *pcount
: 0));
1651 printf((handler
== get_state_handler
? "get_state_hdlr\n" :
1652 (handler
== set_state_handler
? "set_state_hdlr\n" :
1653 "hndler=%x\n")), handler
);
1655 #endif /* MACH_ASSERT */
1657 assert(act
->thread
);
1658 assert(act
!= current_act());
1661 wait_result_t result
;
1664 act
->thread
->top_act
== act
)
1665 thread_wakeup_one(&act
->suspend_count
);
1668 * Wait must be interruptible to avoid deadlock (e.g.) with
1669 * task_suspend() when caller and target of get_set_state()
1672 result
= assert_wait(&gss
, THREAD_ABORTSAFE
);
1673 act_unlock_thread(act
);
1675 if (result
== THREAD_WAITING
)
1676 result
= thread_block(THREAD_CONTINUE_NULL
);
1678 assert(result
!= THREAD_WAITING
);
1680 if (gss
.result
!= KERN_ABORTED
) {
1681 assert(result
!= THREAD_INTERRUPTED
);
1685 /* JMM - What about other aborts (like BSD signals)? */
1686 if (current_act()->handlers
)
1687 act_execute_returnhandlers();
1689 act_lock_thread(act
);
1693 if (watchacts
& WA_ACT_HDLR
)
1694 printf("act_%x: get_set_state returns %x\n",
1695 current_act(), gss
.result
);
1696 #endif /* MACH_ASSERT */
1698 return (gss
.result
);
1702 set_state_handler(ReturnHandler
*rh
, thread_act_t thr_act
)
1704 GetSetState
*gss
= (GetSetState
*)rh
;
1707 if (watchacts
& WA_ACT_HDLR
)
1708 printf("act_%x: set_state_handler(rh=%x,thr_act=%x)\n",
1709 current_act(), rh
, thr_act
);
1710 #endif /* MACH_ASSERT */
1712 gss
->result
= act_machine_set_state(thr_act
, gss
->flavor
,
1713 gss
->state
, *gss
->pcount
);
1714 thread_wakeup((event_t
)gss
);
1718 get_state_handler(ReturnHandler
*rh
, thread_act_t thr_act
)
1720 GetSetState
*gss
= (GetSetState
*)rh
;
1723 if (watchacts
& WA_ACT_HDLR
)
1724 printf("act_%x: get_state_handler(rh=%x,thr_act=%x)\n",
1725 current_act(), rh
, thr_act
);
1726 #endif /* MACH_ASSERT */
1728 gss
->result
= act_machine_get_state(thr_act
, gss
->flavor
,
1730 (mach_msg_type_number_t
*) gss
->pcount
);
1731 thread_wakeup((event_t
)gss
);
1735 act_get_state_locked(thread_act_t thr_act
, int flavor
, thread_state_t state
,
1736 mach_msg_type_number_t
*pcount
)
1739 if (watchacts
& WA_ACT_HDLR
)
1740 printf("act_%x: act_get_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
1741 current_act(), thr_act
, flavor
, state
, pcount
,
1742 (pcount
? *pcount
: 0));
1743 #endif /* MACH_ASSERT */
1745 return(get_set_state(thr_act
, flavor
, state
, (int*)pcount
, get_state_handler
));
1749 act_set_state_locked(thread_act_t thr_act
, int flavor
, thread_state_t state
,
1750 mach_msg_type_number_t count
)
1753 if (watchacts
& WA_ACT_HDLR
)
1754 printf("act_%x: act_set_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
1755 current_act(), thr_act
, flavor
, state
, count
, count
);
1756 #endif /* MACH_ASSERT */
1758 return(get_set_state(thr_act
, flavor
, state
, (int*)&count
, set_state_handler
));
1762 act_set_state(thread_act_t thr_act
, int flavor
, thread_state_t state
,
1763 mach_msg_type_number_t count
)
1765 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
1766 return(KERN_INVALID_ARGUMENT
);
1768 act_lock_thread(thr_act
);
1769 return(act_set_state_locked(thr_act
, flavor
, state
, count
));
1774 act_get_state(thread_act_t thr_act
, int flavor
, thread_state_t state
,
1775 mach_msg_type_number_t
*pcount
)
1777 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
1778 return(KERN_INVALID_ARGUMENT
);
1780 act_lock_thread(thr_act
);
1781 return(act_get_state_locked(thr_act
, flavor
, state
, pcount
));
1788 spl_t s
= splsched();
1790 if (act
== current_act()) {
1791 thread_ast_set(act
, AST_BSD
);
1792 ast_propagate(act
->ast
);
1795 thread_t thread
= act
->thread
;
1796 processor_t processor
;
1798 thread_lock(thread
);
1799 thread_ast_set(act
, AST_BSD
);
1800 processor
= thread
->last_processor
;
1801 if ( processor
!= PROCESSOR_NULL
&&
1802 processor
->state
== PROCESSOR_RUNNING
&&
1803 processor
->cpu_data
->active_thread
== thread
)
1804 cause_ast_check(processor
);
1805 thread_unlock(thread
);
1815 spl_t s
= splsched();
1817 if (act
== current_act()) {
1818 thread_ast_set(act
, AST_APC
);
1819 ast_propagate(act
->ast
);
1822 thread_t thread
= act
->thread
;
1823 processor_t processor
;
1825 thread_lock(thread
);
1826 thread_ast_set(act
, AST_APC
);
1827 processor
= thread
->last_processor
;
1828 if ( processor
!= PROCESSOR_NULL
&&
1829 processor
->state
== PROCESSOR_RUNNING
&&
1830 processor
->cpu_data
->active_thread
== thread
)
1831 cause_ast_check(processor
);
1832 thread_unlock(thread
);
1839 act_ulock_release_all(thread_act_t thr_act
)
1843 while (!queue_empty(&thr_act
->held_ulocks
)) {
1844 ulock
= (ulock_t
) queue_first(&thr_act
->held_ulocks
);
1845 (void) lock_make_unstable(ulock
, thr_act
);
1846 (void) lock_release_internal(ulock
, thr_act
);
1851 * Provide routines (for export to other components) of things that
1852 * are implemented as macros insternally.
1857 thread_act_t self
= current_act_fast();
1859 act_reference(self
);
1864 mach_thread_self(void)
1866 thread_act_t self
= current_act_fast();
1868 act_reference(self
);
1872 #undef act_reference
1875 thread_act_t thr_act
)
1877 act_reference_fast(thr_act
);
1880 #undef act_deallocate
1883 thread_act_t thr_act
)
1885 act_deallocate_fast(thr_act
);