2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Copyright (c) 1993 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
29 * Permission to use, copy, modify and distribute this software and its
30 * documentation is hereby granted, provided that both the copyright
31 * notice and this permission notice appear in all copies of the
32 * software, derivative works or modified versions, and any portions
33 * thereof, and that both notices appear in supporting documentation.
35 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
36 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
37 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 * CSS requests users of this software to return to css-dist@cs.utah.edu any
40 * improvements that they make and grant CSS redistribution rights.
42 * Author: Bryan Ford, University of Utah CSS
44 * Thread_Activation management routines
48 #include <task_swapper.h>
49 #include <mach/kern_return.h>
50 #include <mach/alert.h>
51 #include <kern/etap_macros.h>
52 #include <kern/mach_param.h>
53 #include <kern/zalloc.h>
54 #include <kern/thread.h>
55 #include <kern/thread_swap.h>
56 #include <kern/task.h>
57 #include <kern/task_swap.h>
58 #include <kern/thread_act.h>
59 #include <kern/thread_pool.h>
60 #include <kern/sched_prim.h>
61 #include <kern/misc_protos.h>
62 #include <kern/assert.h>
63 #include <kern/exception.h>
64 #include <kern/ipc_mig.h>
65 #include <kern/ipc_tt.h>
66 #include <kern/profile.h>
67 #include <kern/machine.h>
69 #include <kern/syscall_subr.h>
70 #include <kern/sync_lock.h>
72 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
73 #include <mach_prof.h>
77 * Debugging printf control
80 unsigned int watchacts
= 0 /* WA_ALL */
81 ; /* Do-it-yourself & patchable */
85 * Track the number of times we need to swapin a thread to deallocate it.
87 int act_free_swapin
= 0;
90 * Forward declarations for functions local to this file.
92 kern_return_t
act_abort( thread_act_t
, int);
93 void special_handler(ReturnHandler
*, thread_act_t
);
94 void nudge(thread_act_t
);
95 kern_return_t
act_set_state_locked(thread_act_t
, int,
97 mach_msg_type_number_t
);
98 kern_return_t
act_get_state_locked(thread_act_t
, int,
100 mach_msg_type_number_t
*);
101 void act_set_apc(thread_act_t
);
102 void act_clr_apc(thread_act_t
);
103 void act_user_to_kernel(thread_act_t
);
104 void act_ulock_release_all(thread_act_t thr_act
);
106 void install_special_handler_locked(thread_act_t
);
108 static zone_t thr_act_zone
;
111 * Thread interfaces accessed via a thread_activation:
116 * Internal routine to terminate a thread.
117 * Called with task locked.
120 thread_terminate_internal(
121 register thread_act_t thr_act
)
125 struct ipc_port
*iplock
;
129 #endif /* NCPUS > 1 */
132 thread_swap_disable(thr_act
);
133 #endif /* THREAD_SWAPPER */
135 thread
= act_lock_thread(thr_act
);
136 if (!thr_act
->active
) {
137 act_unlock_thread(thr_act
);
138 return(KERN_TERMINATED
);
143 * Make sure this thread enters the kernel
145 if (thread
!= current_thread()) {
146 thread_hold(thr_act
);
147 act_unlock_thread(thr_act
);
149 if (!thread_stop_wait(thread
)) {
151 (void)act_lock_thread(thr_act
);
152 thread_release(thr_act
);
153 act_unlock_thread(thr_act
);
158 (void)act_lock_thread(thr_act
);
162 #endif /* NCPUS > 1 */
164 assert(thr_act
->active
);
165 act_disable_task_locked(thr_act
);
166 ret
= act_abort(thr_act
,FALSE
);
167 act_unlock_thread(thr_act
);
171 thread_unstop(thread
);
172 (void)act_lock_thread(thr_act
);
173 thread_release(thr_act
);
174 act_unlock_thread(thr_act
);
176 #endif /* NCPUS > 1 */
181 * Terminate a thread. Called with nothing locked.
186 register thread_act_t thr_act
)
191 if (thr_act
== THR_ACT_NULL
)
192 return KERN_INVALID_ARGUMENT
;
194 task
= thr_act
->task
;
195 if (((task
== kernel_task
) || (thr_act
->kernel_loaded
== TRUE
))
196 && (current_act() != thr_act
)) {
197 return(KERN_FAILURE
);
201 * Take the task lock and then call the internal routine
202 * that terminates a thread (it needs the task locked).
205 ret
= thread_terminate_internal(thr_act
);
209 * If a kernel thread is terminating itself, force an AST here.
210 * Kernel threads don't normally pass through the AST checking
211 * code - and all threads finish their own termination in the
212 * special handler APC.
214 if (((thr_act
->task
== kernel_task
) || (thr_act
->kernel_loaded
== TRUE
))
215 && (current_act() == thr_act
)) {
216 ast_taken(FALSE
, AST_APC
, 0);
217 panic("thread_terminate(): returning from ast_taken() for %x kernel activation\n", thr_act
);
226 * Suspend execution of the specified thread.
227 * This is a recursive-style suspension of the thread, a count of
228 * suspends is maintained.
230 * Called with thr_act locked "appropriately" for synchrony with
231 * RPC (see act_lock_thread()). Returns same way.
235 register thread_act_t thr_act
)
237 if (thr_act
->suspend_count
++ == 0) {
238 install_special_handler(thr_act
);
244 * Decrement internal suspension count for thr_act, setting thread
245 * runnable when count falls to zero.
247 * Called with thr_act locked "appropriately" for synchrony
248 * with RPC (see act_lock_thread()).
252 register thread_act_t thr_act
)
254 if( thr_act
->suspend_count
&&
255 (--thr_act
->suspend_count
== 0) )
261 register thread_act_t thr_act
)
265 if (thr_act
== THR_ACT_NULL
) {
266 return(KERN_INVALID_ARGUMENT
);
268 thread
= act_lock_thread(thr_act
);
269 if (!thr_act
->active
) {
270 act_unlock_thread(thr_act
);
271 return(KERN_TERMINATED
);
273 if (thr_act
->user_stop_count
++ == 0 &&
274 thr_act
->suspend_count
++ == 0 ) {
275 install_special_handler(thr_act
);
277 thr_act
== thread
->top_act
&& thread
!= current_thread()) {
279 act_unlock_thread(thr_act
);
280 (void)thread_wait(thread
);
284 * No need to wait for target thread
286 act_unlock_thread(thr_act
);
291 * Thread is already suspended
293 act_unlock_thread(thr_act
);
295 return(KERN_SUCCESS
);
300 register thread_act_t thr_act
)
302 register kern_return_t ret
;
306 if (thr_act
== THR_ACT_NULL
)
307 return(KERN_INVALID_ARGUMENT
);
308 thread
= act_lock_thread(thr_act
);
311 if (thr_act
->active
) {
312 if (thr_act
->user_stop_count
> 0) {
313 if( --thr_act
->user_stop_count
== 0 ) {
314 --thr_act
->suspend_count
;
322 ret
= KERN_TERMINATED
;
323 act_unlock_thread( thr_act
);
328 * This routine walks toward the head of an RPC chain starting at
329 * a specified thread activation. An alert bit is set and a special
330 * handler is installed for each thread it encounters.
332 * The target thread act and thread shuttle are already locked.
336 register thread_act_t thr_act
,
337 unsigned alert_bits
)
343 * Chase the chain, setting alert bits and installing
344 * special handlers for each thread act.
346 /*** Not yet SMP safe ***/
347 /*** Worse, where's the activation locking as the chain is walked? ***/
348 for (next
= thr_act
; next
!= THR_ACT_NULL
; next
= next
->higher
) {
349 next
->alerts
|= alert_bits
;
350 install_special_handler_locked(next
);
353 return(KERN_SUCCESS
);
357 * thread_depress_abort:
359 * Prematurely abort priority depression if there is one.
362 thread_depress_abort(
363 register thread_act_t thr_act
)
365 register thread_t thread
;
366 kern_return_t result
;
367 sched_policy_t
*policy
;
370 if (thr_act
== THR_ACT_NULL
)
371 return (KERN_INVALID_ARGUMENT
);
373 thread
= act_lock_thread(thr_act
);
374 /* if activation is terminating, this operation is not meaningful */
375 if (!thr_act
->active
) {
376 act_unlock_thread(thr_act
);
378 return (KERN_TERMINATED
);
383 policy
= &sched_policy
[thread
->policy
];
384 thread_unlock(thread
);
387 result
= policy
->sp_ops
.sp_thread_depress_abort(policy
, thread
);
389 act_unlock_thread(thr_act
);
396 * Already locked: all RPC-related locks for thr_act (see
397 * act_lock_thread()).
400 act_abort( thread_act_t thr_act
, int chain_break
)
404 struct ipc_port
*iplock
= thr_act
->pool_port
;
407 etap_data_t probe_data
;
409 ETAP_DATA_LOAD(probe_data
[0], thr_act
);
410 ETAP_DATA_LOAD(probe_data
[1], thr_act
->thread
);
411 ETAP_PROBE_DATA(ETAP_P_ACT_ABORT
,
418 * If the target thread activation is not the head...
420 if ( thr_act
->thread
->top_act
!= thr_act
) {
422 * mark the activation for abort,
423 * update the suspend count,
424 * always install the special handler
426 install_special_handler(thr_act
);
428 #ifdef AGRESSIVE_ABORT
429 /* release state buffer for target's outstanding invocation */
430 if (unwind_invoke_state(thr_act
) != KERN_SUCCESS
) {
431 panic("unwind_invoke_state failure");
434 /* release state buffer for target's incoming invocation */
435 if (thr_act
->lower
!= THR_ACT_NULL
) {
436 if (unwind_invoke_state(thr_act
->lower
)
438 panic("unwind_invoke_state failure");
442 /* unlink target thread activation from shuttle chain */
443 if ( thr_act
->lower
== THR_ACT_NULL
) {
445 * This is the root thread activation of the chain.
446 * Unlink the root thread act from the bottom of
449 thr_act
->higher
->lower
= THR_ACT_NULL
;
452 * This thread act is in the middle of the chain.
453 * Unlink the thread act from the middle of the chain.
455 thr_act
->higher
->lower
= thr_act
->lower
;
456 thr_act
->lower
->higher
= thr_act
->higher
;
458 /* set the terminated bit for RPC return processing */
459 thr_act
->lower
->alerts
|= SERVER_TERMINATED
;
462 orphan
= thr_act
->higher
;
464 /* remove the activation from its thread pool */
465 /* (note: this is okay for "rooted threads," too) */
466 act_locked_act_set_thread_pool(thr_act
, IP_NULL
);
468 /* (just to be thorough) release the IP lock */
469 if (iplock
!= IP_NULL
) ip_unlock(iplock
);
471 /* release one more reference for a rooted thread */
472 if (iplock
== IP_NULL
) act_locked_act_deallocate(thr_act
);
474 /* Presumably, the only reference to this activation is
475 * now held by the caller of this routine. */
476 assert(thr_act
->ref_count
== 1);
477 #else /*AGRESSIVE_ABORT*/
478 /* If there is a lower activation in the RPC chain... */
479 if (thr_act
->lower
!= THR_ACT_NULL
) {
480 /* ...indicate the server activation was terminated */
481 thr_act
->lower
->alerts
|= SERVER_TERMINATED
;
483 /* Mark (and process) any orphaned activations */
484 orphan
= thr_act
->higher
;
485 #endif /*AGRESSIVE_ABORT*/
487 /* indicate client of orphaned chain has been terminated */
488 orphan
->alerts
|= CLIENT_TERMINATED
;
491 * Set up posting of alert to headward portion of
494 /*** fix me -- orphan act is not locked ***/
495 post_alert(orphan
, ORPHANED
);
498 * Get attention of head of RPC chain.
500 nudge(thr_act
->thread
->top_act
);
501 return (KERN_SUCCESS
);
505 * If the target thread is the end of the chain, the thread
506 * has to be marked for abort and rip it out of any wait.
509 thread_lock(thr_act
->thread
);
510 if (thr_act
->thread
->top_act
== thr_act
) {
511 thr_act
->thread
->state
|= TH_ABORT
;
512 if (thr_act
->thread
->state
& TH_ABORT
)
513 clear_wait_internal(thr_act
->thread
, THREAD_INTERRUPTED
);
514 thread_unlock(thr_act
->thread
);
516 install_special_handler(thr_act
);
524 register thread_act_t thr_act
)
529 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
530 return (KERN_INVALID_ARGUMENT
);
532 * Lock the target thread and the current thread now,
533 * in case thread_halt() ends up being called below.
535 thread
= act_lock_thread(thr_act
);
536 if (!thr_act
->active
) {
537 act_unlock_thread(thr_act
);
538 return(KERN_TERMINATED
);
541 ret
= act_abort( thr_act
, FALSE
);
542 act_unlock_thread( thr_act
);
548 register thread_act_t thr_act
)
553 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
554 return(KERN_INVALID_ARGUMENT
);
556 thread
= act_lock_thread(thr_act
);
557 if (!thr_act
->active
) {
558 act_unlock_thread(thr_act
);
559 return(KERN_TERMINATED
);
561 if (thread
->top_act
!= thr_act
) {
562 act_unlock_thread(thr_act
);
563 return(KERN_FAILURE
);
568 if ( thread
->at_safe_point
) {
570 * It's an abortable wait, clear it, then
571 * let the thread go and return successfully.
573 clear_wait_internal(thread
, THREAD_INTERRUPTED
);
574 thread_unlock(thread
);
575 act_unlock_thread(thr_act
);
581 * if not stopped at a safepoint, just let it go and return failure.
583 thread_unlock(thread
);
584 act_unlock_thread(thr_act
);
589 /*** backward compatibility hacks ***/
590 #include <mach/thread_info.h>
591 #include <mach/thread_special_ports.h>
592 #include <ipc/ipc_port.h>
593 #include <mach/thread_act_server.h>
597 thread_act_t thr_act
,
598 thread_flavor_t flavor
,
599 thread_info_t thread_info_out
,
600 mach_msg_type_number_t
*thread_info_count
)
602 register thread_t thread
;
603 kern_return_t result
;
605 if (thr_act
== THR_ACT_NULL
)
606 return (KERN_INVALID_ARGUMENT
);
608 thread
= act_lock_thread(thr_act
);
609 if (!thr_act
->active
) {
610 act_unlock_thread(thr_act
);
612 return (KERN_TERMINATED
);
615 result
= thread_info_shuttle(thr_act
, flavor
,
616 thread_info_out
, thread_info_count
);
618 act_unlock_thread(thr_act
);
624 * Routine: thread_get_special_port [kernel call]
626 * Clones a send right for one of the thread's
631 * KERN_SUCCESS Extracted a send right.
632 * KERN_INVALID_ARGUMENT The thread is null.
633 * KERN_FAILURE The thread is dead.
634 * KERN_INVALID_ARGUMENT Invalid special port.
638 thread_get_special_port(
639 thread_act_t thr_act
,
648 if (watchacts
& WA_PORT
)
649 printf("thread_get_special_port(thr_act=%x, which=%x port@%x=%x\n",
650 thr_act
, which
, portp
, (portp
? *portp
: 0));
651 #endif /* MACH_ASSERT */
654 return KERN_INVALID_ARGUMENT
;
655 thread
= act_lock_thread(thr_act
);
657 case THREAD_KERNEL_PORT
:
658 whichp
= &thr_act
->ith_sself
;
662 act_unlock_thread(thr_act
);
663 return KERN_INVALID_ARGUMENT
;
666 if (!thr_act
->active
) {
667 act_unlock_thread(thr_act
);
671 port
= ipc_port_copy_send(*whichp
);
672 act_unlock_thread(thr_act
);
679 * Routine: thread_set_special_port [kernel call]
681 * Changes one of the thread's special ports,
682 * setting it to the supplied send right.
684 * Nothing locked. If successful, consumes
685 * the supplied send right.
687 * KERN_SUCCESS Changed the special port.
688 * KERN_INVALID_ARGUMENT The thread is null.
689 * KERN_FAILURE The thread is dead.
690 * KERN_INVALID_ARGUMENT Invalid special port.
694 thread_set_special_port(
695 thread_act_t thr_act
,
704 if (watchacts
& WA_PORT
)
705 printf("thread_set_special_port(thr_act=%x,which=%x,port=%x\n",
706 thr_act
, which
, port
);
707 #endif /* MACH_ASSERT */
710 return KERN_INVALID_ARGUMENT
;
712 thread
= act_lock_thread(thr_act
);
714 case THREAD_KERNEL_PORT
:
715 whichp
= &thr_act
->ith_self
;
719 act_unlock_thread(thr_act
);
720 return KERN_INVALID_ARGUMENT
;
723 if (!thr_act
->active
) {
724 act_unlock_thread(thr_act
);
730 act_unlock_thread(thr_act
);
733 ipc_port_release_send(old
);
738 * thread state should always be accessible by locking the thread
739 * and copying it. The activation messes things up so for right
740 * now if it's not the top of the chain, use a special handler to
741 * get the information when the shuttle returns to the activation.
745 register thread_act_t thr_act
,
747 thread_state_t state
, /* pointer to OUT array */
748 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
751 thread_t thread
, nthread
;
753 #if 0 /* Grenoble - why?? */
754 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
756 if (thr_act
== THR_ACT_NULL
)
758 return (KERN_INVALID_ARGUMENT
);
760 thread
= act_lock_thread(thr_act
);
761 if (!thr_act
->active
) {
762 act_unlock_thread(thr_act
);
763 return(KERN_TERMINATED
);
766 thread_hold(thr_act
);
768 if (!thread
|| thr_act
!= thread
->top_act
)
770 act_unlock_thread(thr_act
);
771 (void)thread_stop_wait(thread
);
772 nthread
= act_lock_thread(thr_act
);
773 if (nthread
== thread
)
775 thread_unstop(thread
);
778 ret
= act_machine_get_state(thr_act
, flavor
,
780 if (thread
&& thr_act
== thread
->top_act
)
781 thread_unstop(thread
);
782 thread_release(thr_act
);
783 act_unlock_thread(thr_act
);
789 * Change thread's machine-dependent state. Called with nothing
790 * locked. Returns same way.
794 register thread_act_t thr_act
,
796 thread_state_t state
,
797 mach_msg_type_number_t state_count
)
800 thread_t thread
, nthread
;
802 #if 0 /* Grenoble - why?? */
803 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
805 if (thr_act
== THR_ACT_NULL
)
807 return (KERN_INVALID_ARGUMENT
);
809 * We have no kernel activations, so Utah's MO fails for signals etc.
811 * If we're blocked in the kernel, use non-blocking method, else
812 * pass locked thr_act+thread in to "normal" act_[gs]et_state().
815 thread
= act_lock_thread(thr_act
);
816 if (!thr_act
->active
) {
817 act_unlock_thread(thr_act
);
818 return(KERN_TERMINATED
);
821 thread_hold(thr_act
);
823 if (!thread
|| thr_act
!= thread
->top_act
)
825 act_unlock_thread(thr_act
);
826 (void)thread_stop_wait(thread
);
827 nthread
= act_lock_thread(thr_act
);
828 if (nthread
== thread
)
830 thread_unstop(thread
);
833 ret
= act_machine_set_state(thr_act
, flavor
,
835 if (thread
&& thr_act
== thread
->top_act
)
836 thread_unstop(thread
);
837 thread_release(thr_act
);
838 act_unlock_thread(thr_act
);
844 * Kernel-internal "thread" interfaces used outside this file:
849 thread_act_t source_thr_act
,
850 thread_act_t target_thr_act
)
853 thread_t thread
, nthread
;
855 if (target_thr_act
== THR_ACT_NULL
|| target_thr_act
== current_act())
856 return (KERN_INVALID_ARGUMENT
);
858 thread
= act_lock_thread(target_thr_act
);
859 if (!target_thr_act
->active
) {
860 act_unlock_thread(target_thr_act
);
861 return(KERN_TERMINATED
);
864 thread_hold(target_thr_act
);
866 if (!thread
|| target_thr_act
!= thread
->top_act
)
868 act_unlock_thread(target_thr_act
);
869 (void)thread_stop_wait(thread
);
870 nthread
= act_lock_thread(target_thr_act
);
871 if (nthread
== thread
)
873 thread_unstop(thread
);
876 ret
= act_thread_dup(source_thr_act
, target_thr_act
);
877 if (thread
&& target_thr_act
== thread
->top_act
)
878 thread_unstop(thread
);
879 thread_release(target_thr_act
);
880 act_unlock_thread(target_thr_act
);
889 * Set the status of the specified thread.
890 * Called with (and returns with) no locks held.
894 thread_act_t thr_act
,
896 thread_state_t tstate
,
897 mach_msg_type_number_t count
)
902 thread
= act_lock_thread(thr_act
);
904 assert(thread
->top_act
== thr_act
);
905 kr
= act_machine_set_state(thr_act
, flavor
, tstate
, count
);
906 act_unlock_thread(thr_act
);
913 * Get the status of the specified thread.
917 thread_act_t thr_act
,
919 thread_state_t tstate
,
920 mach_msg_type_number_t
*count
)
925 thread
= act_lock_thread(thr_act
);
927 assert(thread
->top_act
== thr_act
);
928 kr
= act_machine_get_state(thr_act
, flavor
, tstate
, count
);
929 act_unlock_thread(thr_act
);
934 * Kernel-internal thread_activation interfaces used outside this file:
938 * act_init() - Initialize activation handling code
943 thr_act_zone
= zinit(
944 sizeof(struct thread_activation
),
945 ACT_MAX
* sizeof(struct thread_activation
), /* XXX */
946 ACT_CHUNK
* sizeof(struct thread_activation
),
953 * act_create - Create a new activation in a specific task.
956 act_create(task_t task
,
957 thread_act_t
*new_act
)
959 thread_act_t thr_act
;
963 thr_act
= (thread_act_t
)zalloc(thr_act_zone
);
965 return(KERN_RESOURCE_SHORTAGE
);
968 if (watchacts
& WA_ACT_LNK
)
969 printf("act_create(task=%x,thr_act@%x=%x)\n",
970 task
, new_act
, thr_act
);
971 #endif /* MACH_ASSERT */
973 /* Start by zeroing everything; then init non-zero items only */
974 bzero((char *)thr_act
, sizeof(*thr_act
));
979 * Take care of the uthread allocation
980 * do it early in order to make KERN_RESOURCE_SHORTAGE
982 * uthread_alloc() will bzero the storage allocated.
984 extern void *uthread_alloc(void);
985 thr_act
->uthread
= uthread_alloc();
986 if(thr_act
->uthread
== 0) {
987 /* Put the thr_act back on the thr_act zone */
988 zfree(thr_act_zone
, (vm_offset_t
)thr_act
);
989 return(KERN_RESOURCE_SHORTAGE
);
992 #endif /* MACH_BSD */
995 * Start with one reference for the caller and one for the
998 act_lock_init(thr_act
);
999 thr_act
->ref_count
= 2;
1001 /* Latch onto the task. */
1002 thr_act
->task
= task
;
1003 task_reference(task
);
1005 /* Initialize sigbufp for High-Watermark buffer allocation */
1006 thr_act
->r_sigbufp
= (routine_descriptor_t
) &thr_act
->r_sigbuf
;
1007 thr_act
->r_sigbuf_size
= sizeof(thr_act
->r_sigbuf
);
1010 thr_act
->swap_state
= TH_SW_IN
;
1012 thr_act
->kernel_stack_swapped_in
= TRUE
;
1013 #endif /* MACH_ASSERT */
1014 #endif /* THREAD_SWAPPER */
1016 /* special_handler will always be last on the returnhandlers list. */
1017 thr_act
->special_handler
.next
= 0;
1018 thr_act
->special_handler
.handler
= special_handler
;
1021 thr_act
->act_profiled
= FALSE
;
1022 thr_act
->act_profiled_own
= FALSE
;
1023 thr_act
->profil_buffer
= NULLPROFDATA
;
1026 /* Initialize the held_ulocks queue as empty */
1027 queue_init(&thr_act
->held_ulocks
);
1029 /* Inherit the profiling status of the parent task */
1030 act_prof_init(thr_act
, task
);
1032 ipc_thr_act_init(task
, thr_act
);
1033 act_machine_create(task
, thr_act
);
1036 * If thr_act created in kernel-loaded task, alter its saved
1037 * state to so indicate
1039 if (task
->kernel_loaded
) {
1040 act_user_to_kernel(thr_act
);
1043 /* Cache the task's map and take a reference to it */
1047 /* Inline vm_map_reference cause we don't want to increment res_count */
1048 mutex_lock(&map
->s_lock
);
1050 assert(map
->res_count
> 0);
1051 assert(map
->ref_count
>= map
->res_count
);
1052 #endif /* TASK_SWAPPER */
1054 mutex_unlock(&map
->s_lock
);
1057 return KERN_SUCCESS
;
1061 * act_free - called when an thr_act's ref_count drops to zero.
1063 * This can only happen after the activation has been reaped, and
1064 * all other references to it have gone away. We can now release
1065 * the last critical resources, unlink the activation from the
1066 * task, and release the reference on the thread shuttle itself.
1068 * Called with activation locked.
1071 int dangerous_bzero
= 1; /* paranoia & safety */
1075 act_free(thread_act_t thr_act
)
1083 if (watchacts
& WA_EXIT
)
1084 printf("act_free(%x(%d)) thr=%x tsk=%x(%d) pport=%x%sactive\n",
1085 thr_act
, thr_act
->ref_count
, thr_act
->thread
,
1087 thr_act
->task
? thr_act
->task
->ref_count
: 0,
1089 thr_act
->active
? " " : " !");
1090 #endif /* MACH_ASSERT */
1094 assert(thr_act
->kernel_stack_swapped_in
);
1095 #endif /* THREAD_SWAPPER */
1097 assert(!thr_act
->active
);
1098 assert(!thr_act
->pool_port
);
1100 task
= thr_act
->task
;
1103 if (thr
= thr_act
->thread
) {
1104 time_value_t user_time
, system_time
;
1106 thread_read_times(thr
, &user_time
, &system_time
);
1107 time_value_add(&task
->total_user_time
, &user_time
);
1108 time_value_add(&task
->total_system_time
, &system_time
);
1110 /* Unlink the thr_act from the task's thr_act list,
1111 * so it doesn't appear in calls to task_threads and such.
1112 * The thr_act still keeps its ref on the task, however.
1114 queue_remove(&task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
);
1115 thr_act
->thr_acts
.next
= NULL
;
1116 task
->thr_act_count
--;
1120 * Thread is supposed to be unswappable by now...
1122 assert(thr_act
->swap_state
== TH_SW_UNSWAPPABLE
||
1123 !thread_swap_unwire_stack
);
1124 #endif /* THREAD_SWAPPER */
1126 task
->res_act_count
--;
1128 task_deallocate(task
);
1129 thread_deallocate(thr
);
1130 act_machine_destroy(thr_act
);
1133 * Must have never really gotten started
1134 * no unlinking from the task and no need
1135 * to free the shuttle.
1138 task_deallocate(task
);
1141 sigbuf_dealloc(thr_act
);
1142 act_prof_deallocate(thr_act
);
1143 ipc_thr_act_terminate(thr_act
);
1146 * Drop the cached map reference.
1147 * Inline version of vm_map_deallocate() because we
1148 * don't want to decrement the map's residence count here.
1151 mutex_lock(&map
->s_lock
);
1153 assert(map
->res_count
>= 0);
1154 assert(map
->ref_count
> map
->res_count
);
1155 #endif /* TASK_SWAPPER */
1156 ref
= --map
->ref_count
;
1157 mutex_unlock(&map
->s_lock
);
1159 vm_map_destroy(map
);
1164 * Free uthread BEFORE the bzero.
1165 * Not doing so will result in a leak.
1167 extern void uthread_free(void *);
1168 void *ut
= thr_act
->uthread
;
1169 thr_act
->uthread
= 0;
1172 #endif /* MACH_BSD */
1175 if (dangerous_bzero
) /* dangerous if we're still using it! */
1176 bzero((char *)thr_act
, sizeof(*thr_act
));
1177 #endif /* MACH_ASSERT */
1178 /* Put the thr_act back on the thr_act zone */
1179 zfree(thr_act_zone
, (vm_offset_t
)thr_act
);
1184 * act_attach - Attach an thr_act to the top of a thread ("push the stack").
1186 * The thread_shuttle must be either the current one or a brand-new one.
1187 * Assumes the thr_act is active but not in use, also, that if it is
1188 * attached to an thread_pool (i.e. the thread_pool pointer is nonzero),
1189 * the thr_act has already been taken off the thread_pool's list.
1191 * Already locked: thr_act plus "appropriate" thread-related locks
1192 * (see act_lock_thread()).
1196 thread_act_t thr_act
,
1198 unsigned init_alert_mask
)
1203 assert(thread
== current_thread() || thread
->top_act
== THR_ACT_NULL
);
1204 if (watchacts
& WA_ACT_LNK
)
1205 printf("act_attach(thr_act %x(%d) thread %x(%d) mask %d)\n",
1206 thr_act
, thr_act
->ref_count
, thread
, thread
->ref_count
,
1208 #endif /* MACH_ASSERT */
1211 * Chain the thr_act onto the thread's thr_act stack.
1212 * Set mask and auto-propagate alerts from below.
1214 thr_act
->ref_count
++;
1215 thr_act
->thread
= thread
;
1216 thr_act
->higher
= THR_ACT_NULL
; /*safety*/
1217 thr_act
->alerts
= 0;
1218 thr_act
->alert_mask
= init_alert_mask
;
1219 lower
= thr_act
->lower
= thread
->top_act
;
1221 if (lower
!= THR_ACT_NULL
) {
1222 lower
->higher
= thr_act
;
1223 thr_act
->alerts
= (lower
->alerts
& init_alert_mask
);
1226 thread
->top_act
= thr_act
;
1232 * Remove the current thr_act from the top of the current thread, i.e.
1233 * "pop the stack". Assumes already locked: thr_act plus "appropriate"
1234 * thread-related locks (see act_lock_thread).
1238 thread_act_t cur_act
)
1240 thread_t cur_thread
= cur_act
->thread
;
1243 if (watchacts
& (WA_EXIT
|WA_ACT_LNK
))
1244 printf("act_detach: thr_act %x(%d), thrd %x(%d) task=%x(%d)\n",
1245 cur_act
, cur_act
->ref_count
,
1246 cur_thread
, cur_thread
->ref_count
,
1248 cur_act
->task
? cur_act
->task
->ref_count
: 0);
1249 #endif /* MACH_ASSERT */
1251 /* Unlink the thr_act from the thread's thr_act stack */
1252 cur_thread
->top_act
= cur_act
->lower
;
1253 cur_act
->thread
= 0;
1254 cur_act
->ref_count
--;
1255 assert(cur_act
->ref_count
> 0);
1257 thread_pool_put_act(cur_act
);
1260 cur_act
->lower
= cur_act
->higher
= THR_ACT_NULL
;
1261 if (cur_thread
->top_act
)
1262 cur_thread
->top_act
->higher
= THR_ACT_NULL
;
1263 #endif /* MACH_ASSERT */
1270 * Synchronize a thread operation with RPC. Called with nothing
1271 * locked. Returns with thr_act locked, plus one of four
1272 * combinations of other locks held:
1273 * none - for new activation not yet associated with thread_pool
1275 * rpc_lock(thr_act->thread) only - for base activation (one
1276 * without pool_port)
1277 * ip_lock(thr_act->pool_port) only - for empty activation (one
1278 * with no associated shuttle)
1279 * both locks - for "active" activation (has shuttle, lives
1281 * If thr_act has an associated shuttle, this function returns
1282 * its address. Otherwise it returns zero.
1286 thread_act_t thr_act
)
1291 * Allow the shuttle cloning code (q.v., when it
1292 * exists :-}) to obtain ip_lock()'s while holding
1297 pport
= thr_act
->pool_port
;
1298 if (!pport
|| ip_lock_try(pport
)) {
1299 if (!thr_act
->thread
)
1301 if (rpc_lock_try(thr_act
->thread
))
1306 act_unlock(thr_act
);
1309 return (thr_act
->thread
);
1313 * Unsynchronize with RPC (i.e., undo an act_lock_thread() call).
1314 * Called with thr_act locked, plus thread locks held that are
1315 * "correct" for thr_act's state. Returns with nothing locked.
1318 act_unlock_thread(thread_act_t thr_act
)
1320 if (thr_act
->thread
)
1321 rpc_unlock(thr_act
->thread
);
1322 if (thr_act
->pool_port
)
1323 ip_unlock(thr_act
->pool_port
);
1324 act_unlock(thr_act
);
1328 * Synchronize with RPC given a pointer to a shuttle (instead of an
1329 * activation). Called with nothing locked; returns with all
1330 * "appropriate" thread-related locks held (see act_lock_thread()).
1336 thread_act_t thr_act
;
1340 thr_act
= thread
->top_act
;
1343 if (!act_lock_try(thr_act
)) {
1348 if (thr_act
->pool_port
&&
1349 !ip_lock_try(thr_act
->pool_port
)) {
1351 act_unlock(thr_act
);
1361 * Unsynchronize with RPC starting from a pointer to a shuttle.
1362 * Called with RPC-related locks held that are appropriate to
1363 * shuttle's state; any activation is also locked.
1369 thread_act_t thr_act
;
1371 if (thr_act
= thread
->top_act
) {
1372 if (thr_act
->pool_port
)
1373 ip_unlock(thr_act
->pool_port
);
1374 act_unlock(thr_act
);
1382 * If a new activation is given, switch to it. If not,
1383 * switch to the lower activation (pop). Returns the old
1384 * activation. This is for RPC support.
1391 thread_act_t old
, new;
1396 disable_preemption();
1399 thread
= current_thread();
1402 * Find the old and new activation for switch.
1404 old
= thread
->top_act
;
1408 new->thread
= thread
;
1414 assert(new != THR_ACT_NULL
);
1416 assert(new->swap_state
!= TH_SW_OUT
&&
1417 new->swap_state
!= TH_SW_COMING_IN
);
1418 #endif /* THREAD_SWAPPER */
1420 assert(cpu_data
[cpu
].active_thread
== thread
);
1421 active_kloaded
[cpu
] = (new->kernel_loaded
) ? new : 0;
1423 /* This is where all the work happens */
1424 machine_switch_act(thread
, old
, new, cpu
);
1427 * Push or pop an activation on the chain.
1430 act_attach(new, thread
, 0);
1436 enable_preemption();
1442 * install_special_handler
1443 * Install the special returnhandler that handles suspension and
1444 * termination, if it hasn't been installed already.
1446 * Already locked: RPC-related locks for thr_act, but not
1447 * scheduling lock (thread_lock()) of the associated thread.
1450 install_special_handler(
1451 thread_act_t thr_act
)
1454 thread_t thread
= thr_act
->thread
;
1457 if (watchacts
& WA_ACT_HDLR
)
1458 printf("act_%x: install_special_hdlr(%x)\n",current_act(),thr_act
);
1459 #endif /* MACH_ASSERT */
1463 thread_lock(thread
);
1464 install_special_handler_locked(thr_act
);
1465 act_set_apc(thr_act
);
1467 thread_unlock(thread
);
1472 * install_special_handler_locked
1473 * Do the work of installing the special_handler.
1475 * Already locked: RPC-related locks for thr_act, plus the
1476 * scheduling lock (thread_lock()) of the associated thread.
1479 install_special_handler_locked(
1480 thread_act_t thr_act
)
1483 thread_t thread
= thr_act
->thread
;
1485 /* The work handler must always be the last ReturnHandler on the list,
1486 because it can do tricky things like detach the thr_act. */
1487 for (rh
= &thr_act
->handlers
; *rh
; rh
= &(*rh
)->next
)
1489 if (rh
!= &thr_act
->special_handler
.next
) {
1490 *rh
= &thr_act
->special_handler
;
1492 if (thread
&& thr_act
== thread
->top_act
) {
1494 * Temporarily undepress, so target has
1495 * a chance to do locking required to
1496 * block itself in special_handler().
1498 if (thread
->depress_priority
>= 0) {
1499 thread
->priority
= thread
->depress_priority
;
1502 * Use special value -2 to indicate need
1503 * to redepress priority in special_handler
1506 thread
->depress_priority
= -2;
1507 compute_priority(thread
, FALSE
);
1510 act_set_apc(thr_act
);
1515 * These two routines will be enhanced over time to call the general handler registration
1516 * mechanism used by special handlers and alerts. They are hack in for now to avoid
1517 * having to export the gory details of ASTs to the BSD code right now.
1519 extern thread_apc_handler_t bsd_ast
;
1523 thread_act_t thr_act
,
1524 thread_apc_handler_t apc
)
1526 assert(apc
== bsd_ast
);
1527 thread_ast_set(thr_act
, AST_BSD
);
1528 if (thr_act
== current_act())
1529 ast_propagate(thr_act
->ast
);
1530 return KERN_SUCCESS
;
1535 thread_act_t thr_act
,
1536 thread_apc_handler_t apc
)
1538 assert(apc
== bsd_ast
);
1539 thread_ast_clear(thr_act
, AST_BSD
);
1540 if (thr_act
== current_act())
1542 return KERN_SUCCESS
;
1546 * act_set_thread_pool - Assign an activation to a specific thread_pool.
1547 * Fails if the activation is already assigned to another pool.
1548 * If thread_pool == 0, we remove the thr_act from its thread_pool.
1550 * Called the port containing thread_pool already locked.
1551 * Returns the same way.
1553 kern_return_t
act_set_thread_pool(
1554 thread_act_t thr_act
,
1555 ipc_port_t pool_port
)
1557 thread_pool_t thread_pool
;
1560 if (watchacts
& WA_ACT_LNK
)
1561 printf("act_set_thread_pool: %x(%d) -> %x\n",
1562 thr_act
, thr_act
->ref_count
, thread_pool
);
1563 #endif /* MACH_ASSERT */
1565 if (pool_port
== 0) {
1568 if (thr_act
->pool_port
== 0)
1569 return KERN_SUCCESS
;
1570 thread_pool
= &thr_act
->pool_port
->ip_thread_pool
;
1572 for (lact
= &thread_pool
->thr_acts
; *lact
;
1573 lact
= &((*lact
)->thread_pool_next
)) {
1574 if (thr_act
== *lact
) {
1575 *lact
= thr_act
->thread_pool_next
;
1580 thr_act
->pool_port
= 0;
1581 thr_act
->thread_pool_next
= 0;
1582 act_unlock(thr_act
);
1583 act_deallocate(thr_act
);
1584 return KERN_SUCCESS
;
1586 if (thr_act
->pool_port
!= pool_port
) {
1587 thread_pool
= &pool_port
->ip_thread_pool
;
1588 if (thr_act
->pool_port
!= 0) {
1590 if (watchacts
& WA_ACT_LNK
)
1591 printf("act_set_thread_pool found %x!\n",
1592 thr_act
->pool_port
);
1593 #endif /* MACH_ASSERT */
1594 return(KERN_FAILURE
);
1597 thr_act
->pool_port
= pool_port
;
1599 /* The pool gets a ref to the activation -- have
1600 * to inline operation because thr_act is already
1603 act_locked_act_reference(thr_act
);
1605 /* If it is available,
1606 * add it to the thread_pool's available-activation list.
1608 if ((thr_act
->thread
== 0) && (thr_act
->suspend_count
== 0)) {
1609 thr_act
->thread_pool_next
= thread_pool
->thr_acts
;
1610 pool_port
->ip_thread_pool
.thr_acts
= thr_act
;
1611 if (thread_pool
->waiting
)
1612 thread_pool_wakeup(thread_pool
);
1614 act_unlock(thr_act
);
1617 return KERN_SUCCESS
;
1621 * act_locked_act_set_thread_pool- Assign activation to a specific thread_pool.
1622 * Fails if the activation is already assigned to another pool.
1623 * If thread_pool == 0, we remove the thr_act from its thread_pool.
1625 * Called the port containing thread_pool already locked.
1626 * Also called with the thread activation locked.
1627 * Returns the same way.
1629 * This routine is the same as `act_set_thread_pool()' except that it does
1630 * not call `act_deallocate(),' which unconditionally tries to obtain the
1631 * thread activation lock.
1633 kern_return_t
act_locked_act_set_thread_pool(
1634 thread_act_t thr_act
,
1635 ipc_port_t pool_port
)
1637 thread_pool_t thread_pool
;
1640 if (watchacts
& WA_ACT_LNK
)
1641 printf("act_set_thread_pool: %x(%d) -> %x\n",
1642 thr_act
, thr_act
->ref_count
, thread_pool
);
1643 #endif /* MACH_ASSERT */
1645 if (pool_port
== 0) {
1648 if (thr_act
->pool_port
== 0)
1649 return KERN_SUCCESS
;
1650 thread_pool
= &thr_act
->pool_port
->ip_thread_pool
;
1652 for (lact
= &thread_pool
->thr_acts
; *lact
;
1653 lact
= &((*lact
)->thread_pool_next
)) {
1654 if (thr_act
== *lact
) {
1655 *lact
= thr_act
->thread_pool_next
;
1660 thr_act
->pool_port
= 0;
1661 thr_act
->thread_pool_next
= 0;
1662 act_locked_act_deallocate(thr_act
);
1663 return KERN_SUCCESS
;
1665 if (thr_act
->pool_port
!= pool_port
) {
1666 thread_pool
= &pool_port
->ip_thread_pool
;
1667 if (thr_act
->pool_port
!= 0) {
1669 if (watchacts
& WA_ACT_LNK
)
1670 printf("act_set_thread_pool found %x!\n",
1671 thr_act
->pool_port
);
1672 #endif /* MACH_ASSERT */
1673 return(KERN_FAILURE
);
1675 thr_act
->pool_port
= pool_port
;
1677 /* The pool gets a ref to the activation -- have
1678 * to inline operation because thr_act is already
1681 act_locked_act_reference(thr_act
);
1683 /* If it is available,
1684 * add it to the thread_pool's available-activation list.
1686 if ((thr_act
->thread
== 0) && (thr_act
->suspend_count
== 0)) {
1687 thr_act
->thread_pool_next
= thread_pool
->thr_acts
;
1688 pool_port
->ip_thread_pool
.thr_acts
= thr_act
;
1689 if (thread_pool
->waiting
)
1690 thread_pool_wakeup(thread_pool
);
1694 return KERN_SUCCESS
;
1698 * Activation control support routines internal to this file:
1702 * act_execute_returnhandlers() - does just what the name says
1704 * This is called by system-dependent code when it detects that
1705 * thr_act->handlers is non-null while returning into user mode.
1706 * Activations linked onto an thread_pool always have null thr_act->handlers,
1707 * so RPC entry paths need not check it.
1709 void act_execute_returnhandlers(
1714 thread_act_t thr_act
= current_act();
1717 if (watchacts
& WA_ACT_HDLR
)
1718 printf("execute_rtn_hdlrs: thr_act=%x\n", thr_act
);
1719 #endif /* MACH_ASSERT */
1722 act_clr_apc(thr_act
);
1727 /* Grab the next returnhandler */
1728 thread
= act_lock_thread(thr_act
);
1730 thread_lock(thread
);
1731 rh
= thr_act
->handlers
;
1733 thread_unlock(thread
);
1735 act_unlock_thread(thr_act
);
1738 thr_act
->handlers
= rh
->next
;
1739 thread_unlock(thread
);
1741 act_unlock_thread(thr_act
);
1744 if (watchacts
& WA_ACT_HDLR
)
1745 printf( (rh
== &thr_act
->special_handler
) ?
1746 "\tspecial_handler\n" : "\thandler=%x\n",
1748 #endif /* MACH_ASSERT */
1751 (*rh
->handler
)(rh
, thr_act
);
1756 * special_handler_continue
1758 * Continuation routine for the special handler blocks. It checks
1759 * to see whether there has been any new suspensions. If so, it
1760 * installs the special handler again. Otherwise, it checks to see
1761 * if the current depression needs to be re-instated (it may have
1762 * been temporarily removed in order to get to this point in a hurry).
1765 special_handler_continue(void)
1767 thread_act_t cur_act
= current_act();
1768 thread_t thread
= cur_act
->thread
;
1771 if (cur_act
->suspend_count
)
1772 install_special_handler(cur_act
);
1775 thread_lock(thread
);
1776 if (thread
->depress_priority
== -2) {
1778 * We were temporarily undepressed by
1779 * install_special_handler; restore priority
1782 thread
->depress_priority
= thread
->priority
;
1783 thread
->priority
= thread
->sched_pri
= DEPRESSPRI
;
1785 thread_unlock(thread
);
1788 thread_exception_return();
1792 * special_handler - handles suspension, termination. Called
1793 * with nothing locked. Returns (if it returns) the same way.
1798 thread_act_t cur_act
)
1802 thread_t thread
= act_lock_thread(cur_act
);
1803 unsigned alert_bits
;
1804 exception_data_type_t
1805 codes
[EXCEPTION_CODE_MAX
];
1807 kern_return_t exc_kr
;
1809 assert(thread
!= THREAD_NULL
);
1811 if (watchacts
& WA_ACT_HDLR
)
1812 printf("\t\tspecial_handler(thr_act=%x(%d))\n", cur_act
,
1813 (cur_act
? cur_act
->ref_count
: 0));
1814 #endif /* MACH_ASSERT */
1818 thread_lock(thread
);
1819 thread
->state
&= ~TH_ABORT
; /* clear any aborts */
1820 thread_unlock(thread
);
1824 * If someone has killed this invocation,
1825 * invoke the return path with a terminated exception.
1827 if (!cur_act
->active
) {
1828 act_unlock_thread(cur_act
);
1829 act_machine_return(KERN_TERMINATED
);
1832 #ifdef CALLOUT_RPC_MODEL
1834 * JMM - We don't intend to support this RPC model in Darwin.
1835 * We will support inheritance through chains of activations
1836 * on shuttles, but it will be universal and not just for RPC.
1837 * As such, each activation will always have a base shuttle.
1838 * Our RPC model will probably even support the notion of
1839 * alerts (thrown up the chain of activations to affect the
1840 * work done on our behalf), but the unlinking of the shuttles
1841 * will be completely difference because we will never have
1845 /* strip server terminated bit */
1846 alert_bits
= cur_act
->alerts
& (~SERVER_TERMINATED
);
1848 /* clear server terminated bit */
1849 cur_act
->alerts
&= ~SERVER_TERMINATED
;
1853 * currently necessary to coordinate with the exception
1856 act_unlock_thread(cur_act
);
1858 /* upcall exception/alert port */
1859 codes
[0] = alert_bits
;
1862 * Exception makes a lot of assumptions. If there is no
1863 * exception handler or the exception reply is broken, the
1864 * thread will be terminated and exception will not return. If
1865 * we decide we don't like that behavior, we need to check
1866 * for the existence of an exception port before we call
1869 exc_kr
= exception( EXC_RPC_ALERT
, codes
, 1 );
1871 /* clear the orphaned and time constraint indications */
1872 cur_act
->alerts
&= ~(ORPHANED
| TIME_CONSTRAINT_UNSATISFIED
);
1874 /* if this orphaned activation should be terminated... */
1875 if (exc_kr
== KERN_RPC_TERMINATE_ORPHAN
) {
1877 * ... terminate the activation
1879 * This is done in two steps. First, the activation is
1880 * disabled (prepared for termination); second, the
1881 * `special_handler()' is executed again -- this time
1882 * to terminate the activation.
1883 * (`act_disable_task_locked()' arranges for the
1884 * additional execution of the `special_handler().')
1888 thread_swap_disable(cur_act
);
1889 #endif /* THREAD_SWAPPER */
1891 /* acquire appropriate locks */
1892 task_lock(cur_act
->task
);
1893 act_lock_thread(cur_act
);
1895 /* detach the activation from its task */
1896 kr
= act_disable_task_locked(cur_act
);
1897 assert( kr
== KERN_SUCCESS
);
1900 task_unlock(cur_act
->task
);
1903 /* acquire activation lock again (released below) */
1904 act_lock_thread(cur_act
);
1906 thread_lock(thread
);
1907 if (thread
->depress_priority
== -2) {
1909 * We were temporarily undepressed by
1910 * install_special_handler; restore priority
1913 thread
->depress_priority
= thread
->priority
;
1914 thread
->priority
= thread
->sched_pri
= DEPRESSPRI
;
1916 thread_unlock(thread
);
1920 #endif /* CALLOUT_RPC_MODEL */
1923 * If we're suspended, go to sleep and wait for someone to wake us up.
1925 if (cur_act
->suspend_count
) {
1926 if( cur_act
->handlers
== NULL
) {
1927 assert_wait((event_t
)&cur_act
->suspend_count
,
1929 act_unlock_thread(cur_act
);
1930 thread_block(special_handler_continue
);
1933 special_handler_continue();
1936 act_unlock_thread(cur_act
);
1940 * Try to nudge a thr_act into executing its returnhandler chain.
1941 * Ensures that the activation will execute its returnhandlers
1942 * before it next executes any of its user-level code.
1944 * Called with thr_act's act_lock() and "appropriate" thread-related
1945 * locks held. (See act_lock_thread().) Returns same way.
1948 nudge(thread_act_t thr_act
)
1951 if (watchacts
& WA_ACT_HDLR
)
1952 printf("\tact_%x: nudge(%x)\n", current_act(), thr_act
);
1953 #endif /* MACH_ASSERT */
1956 * Don't need to do anything at all if this thr_act isn't the topmost.
1958 if (thr_act
->thread
&& thr_act
->thread
->top_act
== thr_act
) {
1960 * If it's suspended, wake it up.
1961 * This should nudge it even on another CPU.
1963 thread_wakeup((event_t
)&thr_act
->suspend_count
);
1968 * Update activation that belongs to a task created via kernel_task_create().
1972 thread_act_t thr_act
)
1974 pcb_user_to_kernel(thr_act
);
1975 thr_act
->kernel_loading
= TRUE
;
1979 * Already locked: thr_act->task, RPC-related locks for thr_act
1981 * Detach an activation from its task, and prepare it to terminate
1985 act_disable_task_locked(
1986 thread_act_t thr_act
)
1988 thread_t thread
= thr_act
->thread
;
1989 task_t task
= thr_act
->task
;
1992 if (watchacts
& WA_EXIT
) {
1993 printf("act_%x: act_disable_tl(thr_act=%x(%d))%sactive task=%x(%d)",
1994 current_act(), thr_act
, thr_act
->ref_count
,
1995 (thr_act
->active
? " " : " !"),
1996 thr_act
->task
, thr_act
->task
? thr_act
->task
->ref_count
: 0);
1997 if (thr_act
->pool_port
)
1998 printf(", pool_port %x", thr_act
->pool_port
);
2000 (void) dump_act(thr_act
);
2002 #endif /* MACH_ASSERT */
2004 /* This will allow no more control ops on this thr_act. */
2005 thr_act
->active
= 0;
2006 ipc_thr_act_disable(thr_act
);
2008 /* Clean-up any ulocks that are still owned by the thread
2009 * activation (acquired but not released or handed-off).
2011 act_ulock_release_all(thr_act
);
2013 /* When the special_handler gets executed,
2014 * it will see the terminated condition and exit
2017 install_special_handler(thr_act
);
2020 /* If the target happens to be suspended,
2021 * give it a nudge so it can exit.
2023 if (thr_act
->suspend_count
)
2026 /* Drop the thr_act reference taken for being active.
2027 * (There is still at least one reference left:
2028 * the one we were passed.)
2029 * Inline the deallocate because thr_act is locked.
2031 act_locked_act_deallocate(thr_act
);
2033 return(KERN_SUCCESS
);
2037 * act_alert - Register an alert from this activation.
2039 * Each set bit is propagated upward from (but not including) this activation,
2040 * until the top of the chain is reached or the bit is masked.
2043 act_alert(thread_act_t thr_act
, unsigned alerts
)
2045 thread_t thread
= act_lock_thread(thr_act
);
2048 if (watchacts
& WA_ACT_LNK
)
2049 printf("act_alert %x: %x\n", thr_act
, alerts
);
2050 #endif /* MACH_ASSERT */
2053 thread_act_t act_up
= thr_act
;
2054 while ((alerts
) && (act_up
!= thread
->top_act
)) {
2055 act_up
= act_up
->higher
;
2056 alerts
&= act_up
->alert_mask
;
2057 act_up
->alerts
|= alerts
;
2060 * XXXX If we reach the top, and it is blocked in glue
2061 * code, do something to kick it. XXXX
2064 act_unlock_thread(thr_act
);
2066 return KERN_SUCCESS
;
2069 kern_return_t
act_alert_mask(thread_act_t thr_act
, unsigned alert_mask
)
2071 panic("act_alert_mask NOT YET IMPLEMENTED\n");
2072 return KERN_SUCCESS
;
2075 typedef struct GetSetState
{
2076 struct ReturnHandler rh
;
2083 /* Local Forward decls */
2084 kern_return_t
get_set_state(
2085 thread_act_t thr_act
, int flavor
,
2086 thread_state_t state
, int *pcount
,
2087 void (*handler
)(ReturnHandler
*rh
, thread_act_t thr_act
));
2088 void get_state_handler(ReturnHandler
*rh
, thread_act_t thr_act
);
2089 void set_state_handler(ReturnHandler
*rh
, thread_act_t thr_act
);
2092 * get_set_state(thr_act ...)
2094 * General code to install g/set_state handler.
2095 * Called with thr_act's act_lock() and "appropriate"
2096 * thread-related locks held. (See act_lock_thread().)
2099 get_set_state(thread_act_t thr_act
, int flavor
, thread_state_t state
, int *pcount
,
2100 void (*handler
)(ReturnHandler
*rh
, thread_act_t thr_act
))
2105 /* Initialize a small parameter structure */
2106 gss
.rh
.handler
= handler
;
2107 gss
.flavor
= flavor
;
2109 gss
.pcount
= pcount
;
2110 gss
.result
= KERN_ABORTED
; /* iff wait below is interrupted */
2112 /* Add it to the thr_act's return handler list */
2113 gss
.rh
.next
= thr_act
->handlers
;
2114 thr_act
->handlers
= &gss
.rh
;
2117 act_set_apc(thr_act
);
2121 if (watchacts
& WA_ACT_HDLR
) {
2122 printf("act_%x: get_set_state(thr_act=%x flv=%x state=%x ptr@%x=%x)",
2123 current_act(), thr_act
, flavor
, state
,
2124 pcount
, (pcount
? *pcount
: 0));
2125 printf((handler
== get_state_handler
? "get_state_hdlr\n" :
2126 (handler
== set_state_handler
? "set_state_hdlr\n" :
2127 "hndler=%x\n")), handler
);
2129 #endif /* MACH_ASSERT */
2131 assert(thr_act
->thread
); /* Callers must ensure these */
2132 assert(thr_act
!= current_act());
2136 * Wait must be interruptible to avoid deadlock (e.g.) with
2137 * task_suspend() when caller and target of get_set_state()
2140 assert_wait((event_t
)&gss
, THREAD_ABORTSAFE
);
2141 act_unlock_thread(thr_act
);
2142 thread_block((void (*)(void))0);
2143 if (gss
.result
!= KERN_ABORTED
)
2145 if (current_act()->handlers
)
2146 act_execute_returnhandlers();
2147 act_lock_thread(thr_act
);
2151 if (watchacts
& WA_ACT_HDLR
)
2152 printf("act_%x: get_set_state returns %x\n",
2153 current_act(), gss
.result
);
2154 #endif /* MACH_ASSERT */
2160 set_state_handler(ReturnHandler
*rh
, thread_act_t thr_act
)
2162 GetSetState
*gss
= (GetSetState
*)rh
;
2165 if (watchacts
& WA_ACT_HDLR
)
2166 printf("act_%x: set_state_handler(rh=%x,thr_act=%x)\n",
2167 current_act(), rh
, thr_act
);
2168 #endif /* MACH_ASSERT */
2170 gss
->result
= act_machine_set_state(thr_act
, gss
->flavor
,
2171 gss
->state
, *gss
->pcount
);
2172 thread_wakeup((event_t
)gss
);
2176 get_state_handler(ReturnHandler
*rh
, thread_act_t thr_act
)
2178 GetSetState
*gss
= (GetSetState
*)rh
;
2181 if (watchacts
& WA_ACT_HDLR
)
2182 printf("act_%x: get_state_handler(rh=%x,thr_act=%x)\n",
2183 current_act(), rh
, thr_act
);
2184 #endif /* MACH_ASSERT */
2186 gss
->result
= act_machine_get_state(thr_act
, gss
->flavor
,
2188 (mach_msg_type_number_t
*) gss
->pcount
);
2189 thread_wakeup((event_t
)gss
);
2193 act_get_state_locked(thread_act_t thr_act
, int flavor
, thread_state_t state
,
2194 mach_msg_type_number_t
*pcount
)
2197 if (watchacts
& WA_ACT_HDLR
)
2198 printf("act_%x: act_get_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
2199 current_act(), thr_act
, flavor
, state
, pcount
,
2200 (pcount
? *pcount
: 0));
2201 #endif /* MACH_ASSERT */
2203 return(get_set_state(thr_act
, flavor
, state
, (int*)pcount
, get_state_handler
));
2207 act_set_state_locked(thread_act_t thr_act
, int flavor
, thread_state_t state
,
2208 mach_msg_type_number_t count
)
2211 if (watchacts
& WA_ACT_HDLR
)
2212 printf("act_%x: act_set_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
2213 current_act(), thr_act
, flavor
, state
, count
, count
);
2214 #endif /* MACH_ASSERT */
2216 return(get_set_state(thr_act
, flavor
, state
, (int*)&count
, set_state_handler
));
2220 act_set_state(thread_act_t thr_act
, int flavor
, thread_state_t state
,
2221 mach_msg_type_number_t count
)
2223 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
2224 return(KERN_INVALID_ARGUMENT
);
2226 act_lock_thread(thr_act
);
2227 return(act_set_state_locked(thr_act
, flavor
, state
, count
));
2232 act_get_state(thread_act_t thr_act
, int flavor
, thread_state_t state
,
2233 mach_msg_type_number_t
*pcount
)
2235 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
2236 return(KERN_INVALID_ARGUMENT
);
2238 act_lock_thread(thr_act
);
2239 return(act_get_state_locked(thr_act
, flavor
, state
, pcount
));
2243 * These two should be called at splsched()
2244 * Set/clear indicator to run APC (layered on ASTs)
2247 act_set_apc(thread_act_t thr_act
)
2249 thread_ast_set(thr_act
, AST_APC
);
2250 if (thr_act
== current_act()) {
2251 mp_disable_preemption();
2252 ast_propagate(thr_act
->ast
);
2253 mp_enable_preemption();
2258 act_clr_apc(thread_act_t thr_act
)
2260 thread_ast_clear(thr_act
, AST_APC
);
2264 act_ulock_release_all(thread_act_t thr_act
)
2268 while (!queue_empty(&thr_act
->held_ulocks
)) {
2269 ulock
= (ulock_t
) queue_first(&thr_act
->held_ulocks
);
2270 (void) lock_make_unstable(ulock
, thr_act
);
2271 (void) lock_release_internal(ulock
, thr_act
);
2276 * Provide routines (for export to other components) of things that
2277 * are implemented as macros insternally.
2283 return(current_act_fast());
2289 thread_act_t self
= current_act_fast();
2291 act_reference(self
);
2296 mach_thread_self(void)
2298 thread_act_t self
= current_act_fast();
2300 act_reference(self
);
2304 #undef act_reference
2307 thread_act_t thr_act
)
2309 act_reference_fast(thr_act
);
2312 #undef act_deallocate
2315 thread_act_t thr_act
)
2317 act_deallocate_fast(thr_act
);