2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Copyright (c) 1993 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
29 * Permission to use, copy, modify and distribute this software and its
30 * documentation is hereby granted, provided that both the copyright
31 * notice and this permission notice appear in all copies of the
32 * software, derivative works or modified versions, and any portions
33 * thereof, and that both notices appear in supporting documentation.
35 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
36 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
37 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 * CSS requests users of this software to return to css-dist@cs.utah.edu any
40 * improvements that they make and grant CSS redistribution rights.
42 * Author: Bryan Ford, University of Utah CSS
44 * Thread_Activation management routines
48 #include <task_swapper.h>
49 #include <mach/kern_return.h>
50 #include <mach/alert.h>
51 #include <kern/etap_macros.h>
52 #include <kern/mach_param.h>
53 #include <kern/zalloc.h>
54 #include <kern/thread.h>
55 #include <kern/thread_swap.h>
56 #include <kern/task.h>
57 #include <kern/task_swap.h>
58 #include <kern/thread_act.h>
59 #include <kern/thread_pool.h>
60 #include <kern/sched_prim.h>
61 #include <kern/misc_protos.h>
62 #include <kern/assert.h>
63 #include <kern/exception.h>
64 #include <kern/ipc_mig.h>
65 #include <kern/ipc_tt.h>
66 #include <kern/profile.h>
67 #include <kern/machine.h>
69 #include <kern/syscall_subr.h>
70 #include <kern/sync_lock.h>
71 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
72 #include <kern/processor.h>
73 #include <mach_prof.h>
77 * Debugging printf control
80 unsigned int watchacts
= 0 /* WA_ALL */
81 ; /* Do-it-yourself & patchable */
85 * Track the number of times we need to swapin a thread to deallocate it.
87 int act_free_swapin
= 0;
90 * Forward declarations for functions local to this file.
92 kern_return_t
act_abort( thread_act_t
, int);
93 void special_handler(ReturnHandler
*, thread_act_t
);
94 void nudge(thread_act_t
);
95 kern_return_t
act_set_state_locked(thread_act_t
, int,
97 mach_msg_type_number_t
);
98 kern_return_t
act_get_state_locked(thread_act_t
, int,
100 mach_msg_type_number_t
*);
101 void act_set_apc(thread_act_t
);
102 void act_clr_apc(thread_act_t
);
103 void act_user_to_kernel(thread_act_t
);
104 void act_ulock_release_all(thread_act_t thr_act
);
106 void install_special_handler_locked(thread_act_t
);
108 static zone_t thr_act_zone
;
111 * Thread interfaces accessed via a thread_activation:
116 * Internal routine to terminate a thread.
117 * Called with task locked.
120 thread_terminate_internal(
121 register thread_act_t thr_act
)
125 struct ipc_port
*iplock
;
129 thread_swap_disable(thr_act
);
130 #endif /* THREAD_SWAPPER */
132 thread
= act_lock_thread(thr_act
);
133 if (!thr_act
->active
) {
134 act_unlock_thread(thr_act
);
135 return(KERN_TERMINATED
);
138 act_disable_task_locked(thr_act
);
139 ret
= act_abort(thr_act
,FALSE
);
143 * Make sure this thread enters the kernel
145 if (thread
!= current_thread()) {
146 thread_hold(thr_act
);
147 act_unlock_thread(thr_act
);
149 if (thread_stop_wait(thread
))
150 thread_unstop(thread
);
154 (void)act_lock_thread(thr_act
);
155 thread_release(thr_act
);
157 #endif /* NCPUS > 1 */
159 act_unlock_thread(thr_act
);
164 * Terminate a thread. Called with nothing locked.
169 register thread_act_t thr_act
)
174 if (thr_act
== THR_ACT_NULL
)
175 return KERN_INVALID_ARGUMENT
;
177 task
= thr_act
->task
;
178 if (((task
== kernel_task
) || (thr_act
->kernel_loaded
== TRUE
))
179 && (current_act() != thr_act
)) {
180 return(KERN_FAILURE
);
184 * Take the task lock and then call the internal routine
185 * that terminates a thread (it needs the task locked).
188 ret
= thread_terminate_internal(thr_act
);
192 * If a kernel thread is terminating itself, force an AST here.
193 * Kernel threads don't normally pass through the AST checking
194 * code - and all threads finish their own termination in the
195 * special handler APC.
197 if ( ( thr_act
->task
== kernel_task
||
198 thr_act
->kernel_loaded
== TRUE
) &&
199 current_act() == thr_act
) {
200 ast_taken(AST_APC
, FALSE
);
201 panic("thread_terminate(): returning from ast_taken() for %x kernel activation\n", thr_act
);
210 * Suspend execution of the specified thread.
211 * This is a recursive-style suspension of the thread, a count of
212 * suspends is maintained.
214 * Called with thr_act locked "appropriately" for synchrony with
215 * RPC (see act_lock_thread()). Returns same way.
219 register thread_act_t thr_act
)
221 if (thr_act
->suspend_count
++ == 0) {
222 install_special_handler(thr_act
);
228 * Decrement internal suspension count for thr_act, setting thread
229 * runnable when count falls to zero.
231 * Called with thr_act locked "appropriately" for synchrony
232 * with RPC (see act_lock_thread()).
236 register thread_act_t thr_act
)
238 if( thr_act
->suspend_count
&&
239 (--thr_act
->suspend_count
== 0) )
245 register thread_act_t thr_act
)
249 if (thr_act
== THR_ACT_NULL
) {
250 return(KERN_INVALID_ARGUMENT
);
252 thread
= act_lock_thread(thr_act
);
253 if (!thr_act
->active
) {
254 act_unlock_thread(thr_act
);
255 return(KERN_TERMINATED
);
257 if (thr_act
->user_stop_count
++ == 0 &&
258 thr_act
->suspend_count
++ == 0 ) {
259 install_special_handler(thr_act
);
261 thr_act
== thread
->top_act
&& thread
!= current_thread()) {
263 act_unlock_thread(thr_act
);
264 (void)thread_wait(thread
);
268 * No need to wait for target thread
270 act_unlock_thread(thr_act
);
275 * Thread is already suspended
277 act_unlock_thread(thr_act
);
279 return(KERN_SUCCESS
);
284 register thread_act_t thr_act
)
286 register kern_return_t ret
;
290 if (thr_act
== THR_ACT_NULL
)
291 return(KERN_INVALID_ARGUMENT
);
292 thread
= act_lock_thread(thr_act
);
295 if (thr_act
->active
) {
296 if (thr_act
->user_stop_count
> 0) {
297 if( --thr_act
->user_stop_count
== 0 ) {
298 --thr_act
->suspend_count
;
306 ret
= KERN_TERMINATED
;
307 act_unlock_thread( thr_act
);
312 * This routine walks toward the head of an RPC chain starting at
313 * a specified thread activation. An alert bit is set and a special
314 * handler is installed for each thread it encounters.
316 * The target thread act and thread shuttle are already locked.
320 register thread_act_t thr_act
,
321 unsigned alert_bits
)
327 * Chase the chain, setting alert bits and installing
328 * special handlers for each thread act.
330 /*** Not yet SMP safe ***/
331 /*** Worse, where's the activation locking as the chain is walked? ***/
332 for (next
= thr_act
; next
!= THR_ACT_NULL
; next
= next
->higher
) {
333 next
->alerts
|= alert_bits
;
334 install_special_handler_locked(next
);
337 return(KERN_SUCCESS
);
341 * thread_depress_abort:
343 * Prematurely abort priority depression if there is one.
346 thread_depress_abort(
347 register thread_act_t thr_act
)
349 register thread_t thread
;
350 kern_return_t result
;
352 if (thr_act
== THR_ACT_NULL
)
353 return (KERN_INVALID_ARGUMENT
);
355 thread
= act_lock_thread(thr_act
);
356 /* if activation is terminating, this operation is not meaningful */
357 if (!thr_act
->active
) {
358 act_unlock_thread(thr_act
);
360 return (KERN_TERMINATED
);
363 result
= _mk_sp_thread_depress_abort(thread
, FALSE
);
365 act_unlock_thread(thr_act
);
372 * Already locked: all RPC-related locks for thr_act (see
373 * act_lock_thread()).
376 act_abort( thread_act_t thr_act
, int chain_break
)
380 struct ipc_port
*iplock
= thr_act
->pool_port
;
383 etap_data_t probe_data
;
385 ETAP_DATA_LOAD(probe_data
[0], thr_act
);
386 ETAP_DATA_LOAD(probe_data
[1], thr_act
->thread
);
387 ETAP_PROBE_DATA(ETAP_P_ACT_ABORT
,
394 * If the target thread activation is not the head...
396 if ( thr_act
->thread
->top_act
!= thr_act
) {
398 * mark the activation for abort,
399 * update the suspend count,
400 * always install the special handler
402 install_special_handler(thr_act
);
404 #ifdef AGRESSIVE_ABORT
405 /* release state buffer for target's outstanding invocation */
406 if (unwind_invoke_state(thr_act
) != KERN_SUCCESS
) {
407 panic("unwind_invoke_state failure");
410 /* release state buffer for target's incoming invocation */
411 if (thr_act
->lower
!= THR_ACT_NULL
) {
412 if (unwind_invoke_state(thr_act
->lower
)
414 panic("unwind_invoke_state failure");
418 /* unlink target thread activation from shuttle chain */
419 if ( thr_act
->lower
== THR_ACT_NULL
) {
421 * This is the root thread activation of the chain.
422 * Unlink the root thread act from the bottom of
425 thr_act
->higher
->lower
= THR_ACT_NULL
;
428 * This thread act is in the middle of the chain.
429 * Unlink the thread act from the middle of the chain.
431 thr_act
->higher
->lower
= thr_act
->lower
;
432 thr_act
->lower
->higher
= thr_act
->higher
;
434 /* set the terminated bit for RPC return processing */
435 thr_act
->lower
->alerts
|= SERVER_TERMINATED
;
438 orphan
= thr_act
->higher
;
440 /* remove the activation from its thread pool */
441 /* (note: this is okay for "rooted threads," too) */
442 act_locked_act_set_thread_pool(thr_act
, IP_NULL
);
444 /* (just to be thorough) release the IP lock */
445 if (iplock
!= IP_NULL
) ip_unlock(iplock
);
447 /* release one more reference for a rooted thread */
448 if (iplock
== IP_NULL
) act_locked_act_deallocate(thr_act
);
450 /* Presumably, the only reference to this activation is
451 * now held by the caller of this routine. */
452 assert(thr_act
->ref_count
== 1);
453 #else /*AGRESSIVE_ABORT*/
454 /* If there is a lower activation in the RPC chain... */
455 if (thr_act
->lower
!= THR_ACT_NULL
) {
456 /* ...indicate the server activation was terminated */
457 thr_act
->lower
->alerts
|= SERVER_TERMINATED
;
459 /* Mark (and process) any orphaned activations */
460 orphan
= thr_act
->higher
;
461 #endif /*AGRESSIVE_ABORT*/
463 /* indicate client of orphaned chain has been terminated */
464 orphan
->alerts
|= CLIENT_TERMINATED
;
467 * Set up posting of alert to headward portion of
470 /*** fix me -- orphan act is not locked ***/
471 post_alert(orphan
, ORPHANED
);
474 * Get attention of head of RPC chain.
476 nudge(thr_act
->thread
->top_act
);
477 return (KERN_SUCCESS
);
481 * If the target thread is the end of the chain, the thread
482 * has to be marked for abort and rip it out of any wait.
485 thread_lock(thr_act
->thread
);
486 if (thr_act
->thread
->top_act
== thr_act
) {
487 thr_act
->thread
->state
|= TH_ABORT
;
488 clear_wait_internal(thr_act
->thread
, THREAD_INTERRUPTED
);
489 thread_unlock(thr_act
->thread
);
491 install_special_handler(thr_act
);
499 register thread_act_t thr_act
)
504 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
505 return (KERN_INVALID_ARGUMENT
);
507 * Lock the target thread and the current thread now,
508 * in case thread_halt() ends up being called below.
510 thread
= act_lock_thread(thr_act
);
511 if (!thr_act
->active
) {
512 act_unlock_thread(thr_act
);
513 return(KERN_TERMINATED
);
516 ret
= act_abort( thr_act
, FALSE
);
517 act_unlock_thread( thr_act
);
523 register thread_act_t thr_act
)
528 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
529 return(KERN_INVALID_ARGUMENT
);
531 thread
= act_lock_thread(thr_act
);
532 if (!thr_act
->active
) {
533 act_unlock_thread(thr_act
);
534 return(KERN_TERMINATED
);
536 if (thread
->top_act
!= thr_act
) {
537 act_unlock_thread(thr_act
);
538 return(KERN_FAILURE
);
543 if ( thread
->at_safe_point
) {
545 * It's an abortable wait, clear it, then
546 * let the thread go and return successfully.
548 clear_wait_internal(thread
, THREAD_INTERRUPTED
);
549 thread_unlock(thread
);
550 act_unlock_thread(thr_act
);
556 * if not stopped at a safepoint, just let it go and return failure.
558 thread_unlock(thread
);
559 act_unlock_thread(thr_act
);
564 /*** backward compatibility hacks ***/
565 #include <mach/thread_info.h>
566 #include <mach/thread_special_ports.h>
567 #include <ipc/ipc_port.h>
568 #include <mach/thread_act_server.h>
572 thread_act_t thr_act
,
573 thread_flavor_t flavor
,
574 thread_info_t thread_info_out
,
575 mach_msg_type_number_t
*thread_info_count
)
577 register thread_t thread
;
578 kern_return_t result
;
580 if (thr_act
== THR_ACT_NULL
)
581 return (KERN_INVALID_ARGUMENT
);
583 thread
= act_lock_thread(thr_act
);
584 if (!thr_act
->active
) {
585 act_unlock_thread(thr_act
);
587 return (KERN_TERMINATED
);
590 result
= thread_info_shuttle(thr_act
, flavor
,
591 thread_info_out
, thread_info_count
);
593 act_unlock_thread(thr_act
);
599 * Routine: thread_get_special_port [kernel call]
601 * Clones a send right for one of the thread's
606 * KERN_SUCCESS Extracted a send right.
607 * KERN_INVALID_ARGUMENT The thread is null.
608 * KERN_FAILURE The thread is dead.
609 * KERN_INVALID_ARGUMENT Invalid special port.
613 thread_get_special_port(
614 thread_act_t thr_act
,
623 if (watchacts
& WA_PORT
)
624 printf("thread_get_special_port(thr_act=%x, which=%x port@%x=%x\n",
625 thr_act
, which
, portp
, (portp
? *portp
: 0));
626 #endif /* MACH_ASSERT */
629 return KERN_INVALID_ARGUMENT
;
630 thread
= act_lock_thread(thr_act
);
632 case THREAD_KERNEL_PORT
:
633 whichp
= &thr_act
->ith_sself
;
637 act_unlock_thread(thr_act
);
638 return KERN_INVALID_ARGUMENT
;
641 if (!thr_act
->active
) {
642 act_unlock_thread(thr_act
);
646 port
= ipc_port_copy_send(*whichp
);
647 act_unlock_thread(thr_act
);
654 * Routine: thread_set_special_port [kernel call]
656 * Changes one of the thread's special ports,
657 * setting it to the supplied send right.
659 * Nothing locked. If successful, consumes
660 * the supplied send right.
662 * KERN_SUCCESS Changed the special port.
663 * KERN_INVALID_ARGUMENT The thread is null.
664 * KERN_FAILURE The thread is dead.
665 * KERN_INVALID_ARGUMENT Invalid special port.
669 thread_set_special_port(
670 thread_act_t thr_act
,
679 if (watchacts
& WA_PORT
)
680 printf("thread_set_special_port(thr_act=%x,which=%x,port=%x\n",
681 thr_act
, which
, port
);
682 #endif /* MACH_ASSERT */
685 return KERN_INVALID_ARGUMENT
;
687 thread
= act_lock_thread(thr_act
);
689 case THREAD_KERNEL_PORT
:
690 whichp
= &thr_act
->ith_self
;
694 act_unlock_thread(thr_act
);
695 return KERN_INVALID_ARGUMENT
;
698 if (!thr_act
->active
) {
699 act_unlock_thread(thr_act
);
705 act_unlock_thread(thr_act
);
708 ipc_port_release_send(old
);
713 * thread state should always be accessible by locking the thread
714 * and copying it. The activation messes things up so for right
715 * now if it's not the top of the chain, use a special handler to
716 * get the information when the shuttle returns to the activation.
720 register thread_act_t thr_act
,
722 thread_state_t state
, /* pointer to OUT array */
723 mach_msg_type_number_t
*state_count
) /*IN/OUT*/
726 thread_t thread
, nthread
;
728 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
729 return (KERN_INVALID_ARGUMENT
);
731 thread
= act_lock_thread(thr_act
);
732 if (!thr_act
->active
) {
733 act_unlock_thread(thr_act
);
734 return(KERN_TERMINATED
);
737 thread_hold(thr_act
);
739 if (!thread
|| thr_act
!= thread
->top_act
)
741 act_unlock_thread(thr_act
);
742 (void)thread_stop_wait(thread
);
743 nthread
= act_lock_thread(thr_act
);
744 if (nthread
== thread
)
746 thread_unstop(thread
);
749 ret
= act_machine_get_state(thr_act
, flavor
,
751 if (thread
&& thr_act
== thread
->top_act
)
752 thread_unstop(thread
);
753 thread_release(thr_act
);
754 act_unlock_thread(thr_act
);
760 * Change thread's machine-dependent state. Called with nothing
761 * locked. Returns same way.
765 register thread_act_t thr_act
,
767 thread_state_t state
,
768 mach_msg_type_number_t state_count
)
771 thread_t thread
, nthread
;
773 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
774 return (KERN_INVALID_ARGUMENT
);
776 * We have no kernel activations, so Utah's MO fails for signals etc.
778 * If we're blocked in the kernel, use non-blocking method, else
779 * pass locked thr_act+thread in to "normal" act_[gs]et_state().
782 thread
= act_lock_thread(thr_act
);
783 if (!thr_act
->active
) {
784 act_unlock_thread(thr_act
);
785 return(KERN_TERMINATED
);
788 thread_hold(thr_act
);
790 if (!thread
|| thr_act
!= thread
->top_act
)
792 act_unlock_thread(thr_act
);
793 (void)thread_stop_wait(thread
);
794 nthread
= act_lock_thread(thr_act
);
795 if (nthread
== thread
)
797 thread_unstop(thread
);
800 ret
= act_machine_set_state(thr_act
, flavor
,
802 if (thread
&& thr_act
== thread
->top_act
)
803 thread_unstop(thread
);
804 thread_release(thr_act
);
805 act_unlock_thread(thr_act
);
811 * Kernel-internal "thread" interfaces used outside this file:
816 thread_act_t source_thr_act
,
817 thread_act_t target_thr_act
)
820 thread_t thread
, nthread
;
822 if (target_thr_act
== THR_ACT_NULL
|| target_thr_act
== current_act())
823 return (KERN_INVALID_ARGUMENT
);
825 thread
= act_lock_thread(target_thr_act
);
826 if (!target_thr_act
->active
) {
827 act_unlock_thread(target_thr_act
);
828 return(KERN_TERMINATED
);
831 thread_hold(target_thr_act
);
833 if (!thread
|| target_thr_act
!= thread
->top_act
)
835 act_unlock_thread(target_thr_act
);
836 (void)thread_stop_wait(thread
);
837 nthread
= act_lock_thread(target_thr_act
);
838 if (nthread
== thread
)
840 thread_unstop(thread
);
843 ret
= act_thread_dup(source_thr_act
, target_thr_act
);
844 if (thread
&& target_thr_act
== thread
->top_act
)
845 thread_unstop(thread
);
846 thread_release(target_thr_act
);
847 act_unlock_thread(target_thr_act
);
856 * Set the status of the specified thread.
857 * Called with (and returns with) no locks held.
861 thread_act_t thr_act
,
863 thread_state_t tstate
,
864 mach_msg_type_number_t count
)
869 thread
= act_lock_thread(thr_act
);
871 assert(thread
->top_act
== thr_act
);
872 kr
= act_machine_set_state(thr_act
, flavor
, tstate
, count
);
873 act_unlock_thread(thr_act
);
880 * Get the status of the specified thread.
884 thread_act_t thr_act
,
886 thread_state_t tstate
,
887 mach_msg_type_number_t
*count
)
892 thread
= act_lock_thread(thr_act
);
894 assert(thread
->top_act
== thr_act
);
895 kr
= act_machine_get_state(thr_act
, flavor
, tstate
, count
);
896 act_unlock_thread(thr_act
);
901 * Kernel-internal thread_activation interfaces used outside this file:
905 * act_init() - Initialize activation handling code
910 thr_act_zone
= zinit(
911 sizeof(struct thread_activation
),
912 ACT_MAX
* sizeof(struct thread_activation
), /* XXX */
913 ACT_CHUNK
* sizeof(struct thread_activation
),
920 * act_create - Create a new activation in a specific task.
923 act_create(task_t task
,
924 thread_act_t
*new_act
)
926 thread_act_t thr_act
;
930 thr_act
= (thread_act_t
)zalloc(thr_act_zone
);
932 return(KERN_RESOURCE_SHORTAGE
);
935 if (watchacts
& WA_ACT_LNK
)
936 printf("act_create(task=%x,thr_act@%x=%x)\n",
937 task
, new_act
, thr_act
);
938 #endif /* MACH_ASSERT */
940 /* Start by zeroing everything; then init non-zero items only */
941 bzero((char *)thr_act
, sizeof(*thr_act
));
946 * Take care of the uthread allocation
947 * do it early in order to make KERN_RESOURCE_SHORTAGE
949 * uthread_alloc() will bzero the storage allocated.
951 extern void *uthread_alloc(void);
952 thr_act
->uthread
= uthread_alloc();
953 if(thr_act
->uthread
== 0) {
954 /* Put the thr_act back on the thr_act zone */
955 zfree(thr_act_zone
, (vm_offset_t
)thr_act
);
956 return(KERN_RESOURCE_SHORTAGE
);
959 #endif /* MACH_BSD */
962 * Start with one reference for the caller and one for the
965 act_lock_init(thr_act
);
966 thr_act
->ref_count
= 2;
968 /* Latch onto the task. */
969 thr_act
->task
= task
;
970 task_reference(task
);
972 /* Initialize sigbufp for High-Watermark buffer allocation */
973 thr_act
->r_sigbufp
= (routine_descriptor_t
) &thr_act
->r_sigbuf
;
974 thr_act
->r_sigbuf_size
= sizeof(thr_act
->r_sigbuf
);
977 thr_act
->swap_state
= TH_SW_IN
;
979 thr_act
->kernel_stack_swapped_in
= TRUE
;
980 #endif /* MACH_ASSERT */
981 #endif /* THREAD_SWAPPER */
983 /* special_handler will always be last on the returnhandlers list. */
984 thr_act
->special_handler
.next
= 0;
985 thr_act
->special_handler
.handler
= special_handler
;
988 thr_act
->act_profiled
= FALSE
;
989 thr_act
->act_profiled_own
= FALSE
;
990 thr_act
->profil_buffer
= NULLPROFDATA
;
993 /* Initialize the held_ulocks queue as empty */
994 queue_init(&thr_act
->held_ulocks
);
996 /* Inherit the profiling status of the parent task */
997 act_prof_init(thr_act
, task
);
999 ipc_thr_act_init(task
, thr_act
);
1000 act_machine_create(task
, thr_act
);
1003 * If thr_act created in kernel-loaded task, alter its saved
1004 * state to so indicate
1006 if (task
->kernel_loaded
) {
1007 act_user_to_kernel(thr_act
);
1010 /* Cache the task's map and take a reference to it */
1014 /* Inline vm_map_reference cause we don't want to increment res_count */
1015 mutex_lock(&map
->s_lock
);
1017 assert(map
->res_count
> 0);
1018 assert(map
->ref_count
>= map
->res_count
);
1019 #endif /* TASK_SWAPPER */
1021 mutex_unlock(&map
->s_lock
);
1024 return KERN_SUCCESS
;
1028 * act_free - called when an thr_act's ref_count drops to zero.
1030 * This can only happen after the activation has been reaped, and
1031 * all other references to it have gone away. We can now release
1032 * the last critical resources, unlink the activation from the
1033 * task, and release the reference on the thread shuttle itself.
1035 * Called with activation locked.
1038 int dangerous_bzero
= 1; /* paranoia & safety */
1042 act_free(thread_act_t thr_act
)
1050 if (watchacts
& WA_EXIT
)
1051 printf("act_free(%x(%d)) thr=%x tsk=%x(%d) pport=%x%sactive\n",
1052 thr_act
, thr_act
->ref_count
, thr_act
->thread
,
1054 thr_act
->task
? thr_act
->task
->ref_count
: 0,
1056 thr_act
->active
? " " : " !");
1057 #endif /* MACH_ASSERT */
1061 assert(thr_act
->kernel_stack_swapped_in
);
1062 #endif /* THREAD_SWAPPER */
1064 assert(!thr_act
->active
);
1065 assert(!thr_act
->pool_port
);
1067 task
= thr_act
->task
;
1070 if (thr
= thr_act
->thread
) {
1071 time_value_t user_time
, system_time
;
1073 thread_read_times(thr
, &user_time
, &system_time
);
1074 time_value_add(&task
->total_user_time
, &user_time
);
1075 time_value_add(&task
->total_system_time
, &system_time
);
1077 /* Unlink the thr_act from the task's thr_act list,
1078 * so it doesn't appear in calls to task_threads and such.
1079 * The thr_act still keeps its ref on the task, however.
1081 queue_remove(&task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
);
1082 thr_act
->thr_acts
.next
= NULL
;
1083 task
->thr_act_count
--;
1087 * Thread is supposed to be unswappable by now...
1089 assert(thr_act
->swap_state
== TH_SW_UNSWAPPABLE
||
1090 !thread_swap_unwire_stack
);
1091 #endif /* THREAD_SWAPPER */
1093 task
->res_act_count
--;
1095 task_deallocate(task
);
1096 thread_deallocate(thr
);
1097 act_machine_destroy(thr_act
);
1100 * Must have never really gotten started
1101 * no unlinking from the task and no need
1102 * to free the shuttle.
1105 task_deallocate(task
);
1108 sigbuf_dealloc(thr_act
);
1109 act_prof_deallocate(thr_act
);
1110 ipc_thr_act_terminate(thr_act
);
1113 * Drop the cached map reference.
1114 * Inline version of vm_map_deallocate() because we
1115 * don't want to decrement the map's residence count here.
1118 mutex_lock(&map
->s_lock
);
1120 assert(map
->res_count
>= 0);
1121 assert(map
->ref_count
> map
->res_count
);
1122 #endif /* TASK_SWAPPER */
1123 ref
= --map
->ref_count
;
1124 mutex_unlock(&map
->s_lock
);
1126 vm_map_destroy(map
);
1131 * Free uthread BEFORE the bzero.
1132 * Not doing so will result in a leak.
1134 extern void uthread_free(void *);
1135 void *ut
= thr_act
->uthread
;
1136 thr_act
->uthread
= 0;
1139 #endif /* MACH_BSD */
1142 if (dangerous_bzero
) /* dangerous if we're still using it! */
1143 bzero((char *)thr_act
, sizeof(*thr_act
));
1144 #endif /* MACH_ASSERT */
1145 /* Put the thr_act back on the thr_act zone */
1146 zfree(thr_act_zone
, (vm_offset_t
)thr_act
);
1151 * act_attach - Attach an thr_act to the top of a thread ("push the stack").
1153 * The thread_shuttle must be either the current one or a brand-new one.
1154 * Assumes the thr_act is active but not in use, also, that if it is
1155 * attached to an thread_pool (i.e. the thread_pool pointer is nonzero),
1156 * the thr_act has already been taken off the thread_pool's list.
1158 * Already locked: thr_act plus "appropriate" thread-related locks
1159 * (see act_lock_thread()).
1163 thread_act_t thr_act
,
1165 unsigned init_alert_mask
)
1170 assert(thread
== current_thread() || thread
->top_act
== THR_ACT_NULL
);
1171 if (watchacts
& WA_ACT_LNK
)
1172 printf("act_attach(thr_act %x(%d) thread %x(%d) mask %d)\n",
1173 thr_act
, thr_act
->ref_count
, thread
, thread
->ref_count
,
1175 #endif /* MACH_ASSERT */
1178 * Chain the thr_act onto the thread's thr_act stack.
1179 * Set mask and auto-propagate alerts from below.
1181 thr_act
->ref_count
++;
1182 thr_act
->thread
= thread
;
1183 thr_act
->higher
= THR_ACT_NULL
; /*safety*/
1184 thr_act
->alerts
= 0;
1185 thr_act
->alert_mask
= init_alert_mask
;
1186 lower
= thr_act
->lower
= thread
->top_act
;
1188 if (lower
!= THR_ACT_NULL
) {
1189 lower
->higher
= thr_act
;
1190 thr_act
->alerts
= (lower
->alerts
& init_alert_mask
);
1193 thread
->top_act
= thr_act
;
1199 * Remove the current thr_act from the top of the current thread, i.e.
1200 * "pop the stack". Assumes already locked: thr_act plus "appropriate"
1201 * thread-related locks (see act_lock_thread).
1205 thread_act_t cur_act
)
1207 thread_t cur_thread
= cur_act
->thread
;
1210 if (watchacts
& (WA_EXIT
|WA_ACT_LNK
))
1211 printf("act_detach: thr_act %x(%d), thrd %x(%d) task=%x(%d)\n",
1212 cur_act
, cur_act
->ref_count
,
1213 cur_thread
, cur_thread
->ref_count
,
1215 cur_act
->task
? cur_act
->task
->ref_count
: 0);
1216 #endif /* MACH_ASSERT */
1218 /* Unlink the thr_act from the thread's thr_act stack */
1219 cur_thread
->top_act
= cur_act
->lower
;
1220 cur_act
->thread
= 0;
1221 cur_act
->ref_count
--;
1222 assert(cur_act
->ref_count
> 0);
1224 thread_pool_put_act(cur_act
);
1227 cur_act
->lower
= cur_act
->higher
= THR_ACT_NULL
;
1228 if (cur_thread
->top_act
)
1229 cur_thread
->top_act
->higher
= THR_ACT_NULL
;
1230 #endif /* MACH_ASSERT */
1237 * Synchronize a thread operation with RPC. Called with nothing
1238 * locked. Returns with thr_act locked, plus one of four
1239 * combinations of other locks held:
1240 * none - for new activation not yet associated with thread_pool
1242 * rpc_lock(thr_act->thread) only - for base activation (one
1243 * without pool_port)
1244 * ip_lock(thr_act->pool_port) only - for empty activation (one
1245 * with no associated shuttle)
1246 * both locks - for "active" activation (has shuttle, lives
1248 * If thr_act has an associated shuttle, this function returns
1249 * its address. Otherwise it returns zero.
1253 thread_act_t thr_act
)
1258 * Allow the shuttle cloning code (q.v., when it
1259 * exists :-}) to obtain ip_lock()'s while holding
1264 pport
= thr_act
->pool_port
;
1265 if (!pport
|| ip_lock_try(pport
)) {
1266 if (!thr_act
->thread
)
1268 if (rpc_lock_try(thr_act
->thread
))
1273 act_unlock(thr_act
);
1276 return (thr_act
->thread
);
1280 * Unsynchronize with RPC (i.e., undo an act_lock_thread() call).
1281 * Called with thr_act locked, plus thread locks held that are
1282 * "correct" for thr_act's state. Returns with nothing locked.
1285 act_unlock_thread(thread_act_t thr_act
)
1287 if (thr_act
->thread
)
1288 rpc_unlock(thr_act
->thread
);
1289 if (thr_act
->pool_port
)
1290 ip_unlock(thr_act
->pool_port
);
1291 act_unlock(thr_act
);
1295 * Synchronize with RPC given a pointer to a shuttle (instead of an
1296 * activation). Called with nothing locked; returns with all
1297 * "appropriate" thread-related locks held (see act_lock_thread()).
1303 thread_act_t thr_act
;
1307 thr_act
= thread
->top_act
;
1310 if (!act_lock_try(thr_act
)) {
1315 if (thr_act
->pool_port
&&
1316 !ip_lock_try(thr_act
->pool_port
)) {
1318 act_unlock(thr_act
);
1328 * Unsynchronize with RPC starting from a pointer to a shuttle.
1329 * Called with RPC-related locks held that are appropriate to
1330 * shuttle's state; any activation is also locked.
1336 thread_act_t thr_act
;
1338 if (thr_act
= thread
->top_act
) {
1339 if (thr_act
->pool_port
)
1340 ip_unlock(thr_act
->pool_port
);
1341 act_unlock(thr_act
);
1349 * If a new activation is given, switch to it. If not,
1350 * switch to the lower activation (pop). Returns the old
1351 * activation. This is for RPC support.
1358 thread_act_t old
, new;
1363 disable_preemption();
1366 thread
= current_thread();
1369 * Find the old and new activation for switch.
1371 old
= thread
->top_act
;
1375 new->thread
= thread
;
1381 assert(new != THR_ACT_NULL
);
1383 assert(new->swap_state
!= TH_SW_OUT
&&
1384 new->swap_state
!= TH_SW_COMING_IN
);
1385 #endif /* THREAD_SWAPPER */
1387 assert(cpu_data
[cpu
].active_thread
== thread
);
1388 active_kloaded
[cpu
] = (new->kernel_loaded
) ? new : 0;
1390 /* This is where all the work happens */
1391 machine_switch_act(thread
, old
, new, cpu
);
1394 * Push or pop an activation on the chain.
1397 act_attach(new, thread
, 0);
1403 enable_preemption();
1409 * install_special_handler
1410 * Install the special returnhandler that handles suspension and
1411 * termination, if it hasn't been installed already.
1413 * Already locked: RPC-related locks for thr_act, but not
1414 * scheduling lock (thread_lock()) of the associated thread.
1417 install_special_handler(
1418 thread_act_t thr_act
)
1421 thread_t thread
= thr_act
->thread
;
1424 if (watchacts
& WA_ACT_HDLR
)
1425 printf("act_%x: install_special_hdlr(%x)\n",current_act(),thr_act
);
1426 #endif /* MACH_ASSERT */
1429 thread_lock(thread
);
1430 install_special_handler_locked(thr_act
);
1431 thread_unlock(thread
);
1436 * install_special_handler_locked
1437 * Do the work of installing the special_handler.
1439 * Already locked: RPC-related locks for thr_act, plus the
1440 * scheduling lock (thread_lock()) of the associated thread.
1443 install_special_handler_locked(
1444 thread_act_t thr_act
)
1447 thread_t thread
= thr_act
->thread
;
1449 /* The work handler must always be the last ReturnHandler on the list,
1450 because it can do tricky things like detach the thr_act. */
1451 for (rh
= &thr_act
->handlers
; *rh
; rh
= &(*rh
)->next
)
1453 if (rh
!= &thr_act
->special_handler
.next
) {
1454 *rh
= &thr_act
->special_handler
;
1456 if (thread
&& thr_act
== thread
->top_act
) {
1458 * Temporarily undepress, so target has
1459 * a chance to do locking required to
1460 * block itself in special_handler().
1462 if (thread
->depress_priority
>= 0) {
1463 thread
->priority
= thread
->depress_priority
;
1466 * Use special value -2 to indicate need
1467 * to redepress priority in special_handler
1470 thread
->depress_priority
= -2;
1471 compute_priority(thread
, FALSE
);
1474 act_set_apc(thr_act
);
1479 * These two routines will be enhanced over time to call the general handler registration
1480 * mechanism used by special handlers and alerts. They are hack in for now to avoid
1481 * having to export the gory details of ASTs to the BSD code right now.
1483 extern thread_apc_handler_t bsd_ast
;
1487 thread_act_t thr_act
,
1488 thread_apc_handler_t apc
)
1490 assert(apc
== bsd_ast
);
1491 thread_ast_set(thr_act
, AST_BSD
);
1492 if (thr_act
== current_act())
1493 ast_propagate(thr_act
->ast
);
1494 return KERN_SUCCESS
;
1499 thread_act_t thr_act
,
1500 thread_apc_handler_t apc
)
1502 assert(apc
== bsd_ast
);
1503 thread_ast_clear(thr_act
, AST_BSD
);
1504 if (thr_act
== current_act())
1506 return KERN_SUCCESS
;
1510 * act_set_thread_pool - Assign an activation to a specific thread_pool.
1511 * Fails if the activation is already assigned to another pool.
1512 * If thread_pool == 0, we remove the thr_act from its thread_pool.
1514 * Called the port containing thread_pool already locked.
1515 * Returns the same way.
1517 kern_return_t
act_set_thread_pool(
1518 thread_act_t thr_act
,
1519 ipc_port_t pool_port
)
1521 thread_pool_t thread_pool
;
1524 if (watchacts
& WA_ACT_LNK
)
1525 printf("act_set_thread_pool: %x(%d) -> %x\n",
1526 thr_act
, thr_act
->ref_count
, thread_pool
);
1527 #endif /* MACH_ASSERT */
1529 if (pool_port
== 0) {
1532 if (thr_act
->pool_port
== 0)
1533 return KERN_SUCCESS
;
1534 thread_pool
= &thr_act
->pool_port
->ip_thread_pool
;
1536 for (lact
= &thread_pool
->thr_acts
; *lact
;
1537 lact
= &((*lact
)->thread_pool_next
)) {
1538 if (thr_act
== *lact
) {
1539 *lact
= thr_act
->thread_pool_next
;
1544 thr_act
->pool_port
= 0;
1545 thr_act
->thread_pool_next
= 0;
1546 act_unlock(thr_act
);
1547 act_deallocate(thr_act
);
1548 return KERN_SUCCESS
;
1550 if (thr_act
->pool_port
!= pool_port
) {
1551 thread_pool
= &pool_port
->ip_thread_pool
;
1552 if (thr_act
->pool_port
!= 0) {
1554 if (watchacts
& WA_ACT_LNK
)
1555 printf("act_set_thread_pool found %x!\n",
1556 thr_act
->pool_port
);
1557 #endif /* MACH_ASSERT */
1558 return(KERN_FAILURE
);
1561 thr_act
->pool_port
= pool_port
;
1563 /* The pool gets a ref to the activation -- have
1564 * to inline operation because thr_act is already
1567 act_locked_act_reference(thr_act
);
1569 /* If it is available,
1570 * add it to the thread_pool's available-activation list.
1572 if ((thr_act
->thread
== 0) && (thr_act
->suspend_count
== 0)) {
1573 thr_act
->thread_pool_next
= thread_pool
->thr_acts
;
1574 pool_port
->ip_thread_pool
.thr_acts
= thr_act
;
1575 if (thread_pool
->waiting
)
1576 thread_pool_wakeup(thread_pool
);
1578 act_unlock(thr_act
);
1581 return KERN_SUCCESS
;
1585 * act_locked_act_set_thread_pool- Assign activation to a specific thread_pool.
1586 * Fails if the activation is already assigned to another pool.
1587 * If thread_pool == 0, we remove the thr_act from its thread_pool.
1589 * Called the port containing thread_pool already locked.
1590 * Also called with the thread activation locked.
1591 * Returns the same way.
1593 * This routine is the same as `act_set_thread_pool()' except that it does
1594 * not call `act_deallocate(),' which unconditionally tries to obtain the
1595 * thread activation lock.
1597 kern_return_t
act_locked_act_set_thread_pool(
1598 thread_act_t thr_act
,
1599 ipc_port_t pool_port
)
1601 thread_pool_t thread_pool
;
1604 if (watchacts
& WA_ACT_LNK
)
1605 printf("act_set_thread_pool: %x(%d) -> %x\n",
1606 thr_act
, thr_act
->ref_count
, thread_pool
);
1607 #endif /* MACH_ASSERT */
1609 if (pool_port
== 0) {
1612 if (thr_act
->pool_port
== 0)
1613 return KERN_SUCCESS
;
1614 thread_pool
= &thr_act
->pool_port
->ip_thread_pool
;
1616 for (lact
= &thread_pool
->thr_acts
; *lact
;
1617 lact
= &((*lact
)->thread_pool_next
)) {
1618 if (thr_act
== *lact
) {
1619 *lact
= thr_act
->thread_pool_next
;
1624 thr_act
->pool_port
= 0;
1625 thr_act
->thread_pool_next
= 0;
1626 act_locked_act_deallocate(thr_act
);
1627 return KERN_SUCCESS
;
1629 if (thr_act
->pool_port
!= pool_port
) {
1630 thread_pool
= &pool_port
->ip_thread_pool
;
1631 if (thr_act
->pool_port
!= 0) {
1633 if (watchacts
& WA_ACT_LNK
)
1634 printf("act_set_thread_pool found %x!\n",
1635 thr_act
->pool_port
);
1636 #endif /* MACH_ASSERT */
1637 return(KERN_FAILURE
);
1639 thr_act
->pool_port
= pool_port
;
1641 /* The pool gets a ref to the activation -- have
1642 * to inline operation because thr_act is already
1645 act_locked_act_reference(thr_act
);
1647 /* If it is available,
1648 * add it to the thread_pool's available-activation list.
1650 if ((thr_act
->thread
== 0) && (thr_act
->suspend_count
== 0)) {
1651 thr_act
->thread_pool_next
= thread_pool
->thr_acts
;
1652 pool_port
->ip_thread_pool
.thr_acts
= thr_act
;
1653 if (thread_pool
->waiting
)
1654 thread_pool_wakeup(thread_pool
);
1658 return KERN_SUCCESS
;
1662 * Activation control support routines internal to this file:
1666 * act_execute_returnhandlers() - does just what the name says
1668 * This is called by system-dependent code when it detects that
1669 * thr_act->handlers is non-null while returning into user mode.
1670 * Activations linked onto an thread_pool always have null thr_act->handlers,
1671 * so RPC entry paths need not check it.
1673 void act_execute_returnhandlers(
1678 thread_act_t thr_act
= current_act();
1681 if (watchacts
& WA_ACT_HDLR
)
1682 printf("execute_rtn_hdlrs: thr_act=%x\n", thr_act
);
1683 #endif /* MACH_ASSERT */
1686 act_clr_apc(thr_act
);
1691 /* Grab the next returnhandler */
1692 thread
= act_lock_thread(thr_act
);
1694 thread_lock(thread
);
1695 rh
= thr_act
->handlers
;
1697 thread_unlock(thread
);
1699 act_unlock_thread(thr_act
);
1702 thr_act
->handlers
= rh
->next
;
1703 thread_unlock(thread
);
1705 act_unlock_thread(thr_act
);
1708 if (watchacts
& WA_ACT_HDLR
)
1709 printf( (rh
== &thr_act
->special_handler
) ?
1710 "\tspecial_handler\n" : "\thandler=%x\n",
1712 #endif /* MACH_ASSERT */
1715 (*rh
->handler
)(rh
, thr_act
);
1720 * special_handler_continue
1722 * Continuation routine for the special handler blocks. It checks
1723 * to see whether there has been any new suspensions. If so, it
1724 * installs the special handler again. Otherwise, it checks to see
1725 * if the current depression needs to be re-instated (it may have
1726 * been temporarily removed in order to get to this point in a hurry).
1729 special_handler_continue(void)
1731 thread_act_t cur_act
= current_act();
1732 thread_t thread
= cur_act
->thread
;
1735 if (cur_act
->suspend_count
)
1736 install_special_handler(cur_act
);
1739 thread_lock(thread
);
1740 if (thread
->depress_priority
== -2) {
1742 * We were temporarily undepressed by
1743 * install_special_handler; restore priority
1746 thread
->depress_priority
= thread
->priority
;
1747 thread
->priority
= thread
->sched_pri
= DEPRESSPRI
;
1749 thread_unlock(thread
);
1752 thread_exception_return();
1756 * special_handler - handles suspension, termination. Called
1757 * with nothing locked. Returns (if it returns) the same way.
1762 thread_act_t cur_act
)
1766 thread_t thread
= act_lock_thread(cur_act
);
1767 unsigned alert_bits
;
1768 exception_data_type_t
1769 codes
[EXCEPTION_CODE_MAX
];
1771 kern_return_t exc_kr
;
1773 assert(thread
!= THREAD_NULL
);
1775 if (watchacts
& WA_ACT_HDLR
)
1776 printf("\t\tspecial_handler(thr_act=%x(%d))\n", cur_act
,
1777 (cur_act
? cur_act
->ref_count
: 0));
1778 #endif /* MACH_ASSERT */
1782 thread_lock(thread
);
1783 thread
->state
&= ~TH_ABORT
; /* clear any aborts */
1784 thread_unlock(thread
);
1788 * If someone has killed this invocation,
1789 * invoke the return path with a terminated exception.
1791 if (!cur_act
->active
) {
1792 act_unlock_thread(cur_act
);
1793 act_machine_return(KERN_TERMINATED
);
1796 #ifdef CALLOUT_RPC_MODEL
1798 * JMM - We don't intend to support this RPC model in Darwin.
1799 * We will support inheritance through chains of activations
1800 * on shuttles, but it will be universal and not just for RPC.
1801 * As such, each activation will always have a base shuttle.
1802 * Our RPC model will probably even support the notion of
1803 * alerts (thrown up the chain of activations to affect the
1804 * work done on our behalf), but the unlinking of the shuttles
1805 * will be completely difference because we will never have
1809 /* strip server terminated bit */
1810 alert_bits
= cur_act
->alerts
& (~SERVER_TERMINATED
);
1812 /* clear server terminated bit */
1813 cur_act
->alerts
&= ~SERVER_TERMINATED
;
1817 * currently necessary to coordinate with the exception
1820 act_unlock_thread(cur_act
);
1822 /* upcall exception/alert port */
1823 codes
[0] = alert_bits
;
1826 * Exception makes a lot of assumptions. If there is no
1827 * exception handler or the exception reply is broken, the
1828 * thread will be terminated and exception will not return. If
1829 * we decide we don't like that behavior, we need to check
1830 * for the existence of an exception port before we call
1833 exc_kr
= exception( EXC_RPC_ALERT
, codes
, 1 );
1835 /* clear the orphaned and time constraint indications */
1836 cur_act
->alerts
&= ~(ORPHANED
| TIME_CONSTRAINT_UNSATISFIED
);
1838 /* if this orphaned activation should be terminated... */
1839 if (exc_kr
== KERN_RPC_TERMINATE_ORPHAN
) {
1841 * ... terminate the activation
1843 * This is done in two steps. First, the activation is
1844 * disabled (prepared for termination); second, the
1845 * `special_handler()' is executed again -- this time
1846 * to terminate the activation.
1847 * (`act_disable_task_locked()' arranges for the
1848 * additional execution of the `special_handler().')
1852 thread_swap_disable(cur_act
);
1853 #endif /* THREAD_SWAPPER */
1855 /* acquire appropriate locks */
1856 task_lock(cur_act
->task
);
1857 act_lock_thread(cur_act
);
1859 /* detach the activation from its task */
1860 kr
= act_disable_task_locked(cur_act
);
1861 assert( kr
== KERN_SUCCESS
);
1864 task_unlock(cur_act
->task
);
1867 /* acquire activation lock again (released below) */
1868 act_lock_thread(cur_act
);
1870 thread_lock(thread
);
1871 if (thread
->depress_priority
== -2) {
1873 * We were temporarily undepressed by
1874 * install_special_handler; restore priority
1877 thread
->depress_priority
= thread
->priority
;
1878 thread
->priority
= thread
->sched_pri
= DEPRESSPRI
;
1880 thread_unlock(thread
);
1884 #endif /* CALLOUT_RPC_MODEL */
1887 * If we're suspended, go to sleep and wait for someone to wake us up.
1889 if (cur_act
->suspend_count
) {
1890 if( cur_act
->handlers
== NULL
) {
1891 assert_wait((event_t
)&cur_act
->suspend_count
,
1893 act_unlock_thread(cur_act
);
1894 thread_block(special_handler_continue
);
1897 special_handler_continue();
1900 act_unlock_thread(cur_act
);
1904 * Try to nudge a thr_act into executing its returnhandler chain.
1905 * Ensures that the activation will execute its returnhandlers
1906 * before it next executes any of its user-level code.
1908 * Called with thr_act's act_lock() and "appropriate" thread-related
1909 * locks held. (See act_lock_thread().) Returns same way.
1912 nudge(thread_act_t thr_act
)
1915 if (watchacts
& WA_ACT_HDLR
)
1916 printf("\tact_%x: nudge(%x)\n", current_act(), thr_act
);
1917 #endif /* MACH_ASSERT */
1920 * Don't need to do anything at all if this thr_act isn't the topmost.
1922 if (thr_act
->thread
&& thr_act
->thread
->top_act
== thr_act
) {
1924 * If it's suspended, wake it up.
1925 * This should nudge it even on another CPU.
1927 thread_wakeup((event_t
)&thr_act
->suspend_count
);
1932 * Update activation that belongs to a task created via kernel_task_create().
1936 thread_act_t thr_act
)
1938 pcb_user_to_kernel(thr_act
);
1939 thr_act
->kernel_loading
= TRUE
;
1943 * Already locked: thr_act->task, RPC-related locks for thr_act
1945 * Detach an activation from its task, and prepare it to terminate
1949 act_disable_task_locked(
1950 thread_act_t thr_act
)
1952 thread_t thread
= thr_act
->thread
;
1953 task_t task
= thr_act
->task
;
1956 if (watchacts
& WA_EXIT
) {
1957 printf("act_%x: act_disable_tl(thr_act=%x(%d))%sactive task=%x(%d)",
1958 current_act(), thr_act
, thr_act
->ref_count
,
1959 (thr_act
->active
? " " : " !"),
1960 thr_act
->task
, thr_act
->task
? thr_act
->task
->ref_count
: 0);
1961 if (thr_act
->pool_port
)
1962 printf(", pool_port %x", thr_act
->pool_port
);
1964 (void) dump_act(thr_act
);
1966 #endif /* MACH_ASSERT */
1968 /* This will allow no more control ops on this thr_act. */
1969 thr_act
->active
= 0;
1970 ipc_thr_act_disable(thr_act
);
1972 /* Clean-up any ulocks that are still owned by the thread
1973 * activation (acquired but not released or handed-off).
1975 act_ulock_release_all(thr_act
);
1977 /* When the special_handler gets executed,
1978 * it will see the terminated condition and exit
1981 install_special_handler(thr_act
);
1984 /* If the target happens to be suspended,
1985 * give it a nudge so it can exit.
1987 if (thr_act
->suspend_count
)
1990 /* Drop the thr_act reference taken for being active.
1991 * (There is still at least one reference left:
1992 * the one we were passed.)
1993 * Inline the deallocate because thr_act is locked.
1995 act_locked_act_deallocate(thr_act
);
1997 return(KERN_SUCCESS
);
2001 * act_alert - Register an alert from this activation.
2003 * Each set bit is propagated upward from (but not including) this activation,
2004 * until the top of the chain is reached or the bit is masked.
2007 act_alert(thread_act_t thr_act
, unsigned alerts
)
2009 thread_t thread
= act_lock_thread(thr_act
);
2012 if (watchacts
& WA_ACT_LNK
)
2013 printf("act_alert %x: %x\n", thr_act
, alerts
);
2014 #endif /* MACH_ASSERT */
2017 thread_act_t act_up
= thr_act
;
2018 while ((alerts
) && (act_up
!= thread
->top_act
)) {
2019 act_up
= act_up
->higher
;
2020 alerts
&= act_up
->alert_mask
;
2021 act_up
->alerts
|= alerts
;
2024 * XXXX If we reach the top, and it is blocked in glue
2025 * code, do something to kick it. XXXX
2028 act_unlock_thread(thr_act
);
2030 return KERN_SUCCESS
;
2033 kern_return_t
act_alert_mask(thread_act_t thr_act
, unsigned alert_mask
)
2035 panic("act_alert_mask NOT YET IMPLEMENTED\n");
2036 return KERN_SUCCESS
;
2039 typedef struct GetSetState
{
2040 struct ReturnHandler rh
;
2047 /* Local Forward decls */
2048 kern_return_t
get_set_state(
2049 thread_act_t thr_act
, int flavor
,
2050 thread_state_t state
, int *pcount
,
2051 void (*handler
)(ReturnHandler
*rh
, thread_act_t thr_act
));
2052 void get_state_handler(ReturnHandler
*rh
, thread_act_t thr_act
);
2053 void set_state_handler(ReturnHandler
*rh
, thread_act_t thr_act
);
2056 * get_set_state(thr_act ...)
2058 * General code to install g/set_state handler.
2059 * Called with thr_act's act_lock() and "appropriate"
2060 * thread-related locks held. (See act_lock_thread().)
2063 get_set_state(thread_act_t thr_act
, int flavor
, thread_state_t state
, int *pcount
,
2064 void (*handler
)(ReturnHandler
*rh
, thread_act_t thr_act
))
2069 /* Initialize a small parameter structure */
2070 gss
.rh
.handler
= handler
;
2071 gss
.flavor
= flavor
;
2073 gss
.pcount
= pcount
;
2074 gss
.result
= KERN_ABORTED
; /* iff wait below is interrupted */
2076 /* Add it to the thr_act's return handler list */
2077 gss
.rh
.next
= thr_act
->handlers
;
2078 thr_act
->handlers
= &gss
.rh
;
2081 act_set_apc(thr_act
);
2085 if (watchacts
& WA_ACT_HDLR
) {
2086 printf("act_%x: get_set_state(thr_act=%x flv=%x state=%x ptr@%x=%x)",
2087 current_act(), thr_act
, flavor
, state
,
2088 pcount
, (pcount
? *pcount
: 0));
2089 printf((handler
== get_state_handler
? "get_state_hdlr\n" :
2090 (handler
== set_state_handler
? "set_state_hdlr\n" :
2091 "hndler=%x\n")), handler
);
2093 #endif /* MACH_ASSERT */
2095 assert(thr_act
->thread
); /* Callers must ensure these */
2096 assert(thr_act
!= current_act());
2100 * Wait must be interruptible to avoid deadlock (e.g.) with
2101 * task_suspend() when caller and target of get_set_state()
2104 assert_wait((event_t
)&gss
, THREAD_ABORTSAFE
);
2105 act_unlock_thread(thr_act
);
2106 thread_block((void (*)(void))0);
2107 if (gss
.result
!= KERN_ABORTED
)
2109 if (current_act()->handlers
)
2110 act_execute_returnhandlers();
2111 act_lock_thread(thr_act
);
2115 if (watchacts
& WA_ACT_HDLR
)
2116 printf("act_%x: get_set_state returns %x\n",
2117 current_act(), gss
.result
);
2118 #endif /* MACH_ASSERT */
2124 set_state_handler(ReturnHandler
*rh
, thread_act_t thr_act
)
2126 GetSetState
*gss
= (GetSetState
*)rh
;
2129 if (watchacts
& WA_ACT_HDLR
)
2130 printf("act_%x: set_state_handler(rh=%x,thr_act=%x)\n",
2131 current_act(), rh
, thr_act
);
2132 #endif /* MACH_ASSERT */
2134 gss
->result
= act_machine_set_state(thr_act
, gss
->flavor
,
2135 gss
->state
, *gss
->pcount
);
2136 thread_wakeup((event_t
)gss
);
2140 get_state_handler(ReturnHandler
*rh
, thread_act_t thr_act
)
2142 GetSetState
*gss
= (GetSetState
*)rh
;
2145 if (watchacts
& WA_ACT_HDLR
)
2146 printf("act_%x: get_state_handler(rh=%x,thr_act=%x)\n",
2147 current_act(), rh
, thr_act
);
2148 #endif /* MACH_ASSERT */
2150 gss
->result
= act_machine_get_state(thr_act
, gss
->flavor
,
2152 (mach_msg_type_number_t
*) gss
->pcount
);
2153 thread_wakeup((event_t
)gss
);
2157 act_get_state_locked(thread_act_t thr_act
, int flavor
, thread_state_t state
,
2158 mach_msg_type_number_t
*pcount
)
2161 if (watchacts
& WA_ACT_HDLR
)
2162 printf("act_%x: act_get_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
2163 current_act(), thr_act
, flavor
, state
, pcount
,
2164 (pcount
? *pcount
: 0));
2165 #endif /* MACH_ASSERT */
2167 return(get_set_state(thr_act
, flavor
, state
, (int*)pcount
, get_state_handler
));
2171 act_set_state_locked(thread_act_t thr_act
, int flavor
, thread_state_t state
,
2172 mach_msg_type_number_t count
)
2175 if (watchacts
& WA_ACT_HDLR
)
2176 printf("act_%x: act_set_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
2177 current_act(), thr_act
, flavor
, state
, count
, count
);
2178 #endif /* MACH_ASSERT */
2180 return(get_set_state(thr_act
, flavor
, state
, (int*)&count
, set_state_handler
));
2184 act_set_state(thread_act_t thr_act
, int flavor
, thread_state_t state
,
2185 mach_msg_type_number_t count
)
2187 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
2188 return(KERN_INVALID_ARGUMENT
);
2190 act_lock_thread(thr_act
);
2191 return(act_set_state_locked(thr_act
, flavor
, state
, count
));
2196 act_get_state(thread_act_t thr_act
, int flavor
, thread_state_t state
,
2197 mach_msg_type_number_t
*pcount
)
2199 if (thr_act
== THR_ACT_NULL
|| thr_act
== current_act())
2200 return(KERN_INVALID_ARGUMENT
);
2202 act_lock_thread(thr_act
);
2203 return(act_get_state_locked(thr_act
, flavor
, state
, pcount
));
2207 * These two should be called at splsched()
2208 * Set/clear indicator to run APC (layered on ASTs)
2211 act_set_apc(thread_act_t thr_act
)
2217 mp_disable_preemption();
2219 thread_ast_set(thr_act
, AST_APC
);
2220 if (thr_act
== current_act()) {
2221 ast_propagate(thr_act
->ast
);
2222 mp_enable_preemption();
2223 return; /* If we are current act, we can't be on the other processor so leave now */
2227 * Here we want to make sure that the apc is taken quickly. Therefore, we check
2228 * if, and where, the activation is running. If it is not running, we don't need to do
2229 * anything. If it is, we need to signal the other processor to trigger it to
2230 * check the asts. Note that there is a race here and we may end up sending a signal
2231 * after the thread has been switched off. Hopefully this is no big deal.
2234 thread
= thr_act
->thread
; /* Get the thread for the signaled activation */
2235 prssr
= thread
->last_processor
; /* get the processor it was last on */
2236 if(prssr
&& (cpu_data
[prssr
->slot_num
].active_thread
== thread
)) { /* Is the thread active on its processor? */
2237 cause_ast_check(prssr
); /* Yes, kick it */
2240 mp_enable_preemption();
2244 act_clr_apc(thread_act_t thr_act
)
2246 thread_ast_clear(thr_act
, AST_APC
);
2250 act_ulock_release_all(thread_act_t thr_act
)
2254 while (!queue_empty(&thr_act
->held_ulocks
)) {
2255 ulock
= (ulock_t
) queue_first(&thr_act
->held_ulocks
);
2256 (void) lock_make_unstable(ulock
, thr_act
);
2257 (void) lock_release_internal(ulock
, thr_act
);
2262 * Provide routines (for export to other components) of things that
2263 * are implemented as macros insternally.
2269 return(current_act_fast());
2275 thread_act_t self
= current_act_fast();
2277 act_reference(self
);
2282 mach_thread_self(void)
2284 thread_act_t self
= current_act_fast();
2286 act_reference(self
);
2290 #undef act_reference
2293 thread_act_t thr_act
)
2295 act_reference_fast(thr_act
);
2298 #undef act_deallocate
2301 thread_act_t thr_act
)
2303 act_deallocate_fast(thr_act
);