]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
9a97f50a6eaba53bc028c7309cb4664e5e6a2d9b
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Copyright (c) 1993 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
28 *
29 * Permission to use, copy, modify and distribute this software and its
30 * documentation is hereby granted, provided that both the copyright
31 * notice and this permission notice appear in all copies of the
32 * software, derivative works or modified versions, and any portions
33 * thereof, and that both notices appear in supporting documentation.
34 *
35 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
36 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
37 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
38 *
39 * CSS requests users of this software to return to css-dist@cs.utah.edu any
40 * improvements that they make and grant CSS redistribution rights.
41 *
42 * Author: Bryan Ford, University of Utah CSS
43 *
44 * Thread_Activation management routines
45 */
46
47 #include <cpus.h>
48 #include <task_swapper.h>
49 #include <mach/kern_return.h>
50 #include <mach/alert.h>
51 #include <kern/etap_macros.h>
52 #include <kern/mach_param.h>
53 #include <kern/zalloc.h>
54 #include <kern/thread.h>
55 #include <kern/thread_swap.h>
56 #include <kern/task.h>
57 #include <kern/task_swap.h>
58 #include <kern/thread_act.h>
59 #include <kern/thread_pool.h>
60 #include <kern/sched_prim.h>
61 #include <kern/misc_protos.h>
62 #include <kern/assert.h>
63 #include <kern/exception.h>
64 #include <kern/ipc_mig.h>
65 #include <kern/ipc_tt.h>
66 #include <kern/profile.h>
67 #include <kern/machine.h>
68 #include <kern/spl.h>
69 #include <kern/syscall_subr.h>
70 #include <kern/sync_lock.h>
71 #include <kern/sf.h>
72 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
73 #include <mach_prof.h>
74 #include <mach/rpc.h>
75
76 /*
77 * Debugging printf control
78 */
79 #if MACH_ASSERT
80 unsigned int watchacts = 0 /* WA_ALL */
81 ; /* Do-it-yourself & patchable */
82 #endif
83
84 /*
85 * Track the number of times we need to swapin a thread to deallocate it.
86 */
87 int act_free_swapin = 0;
88
89 /*
90 * Forward declarations for functions local to this file.
91 */
92 kern_return_t act_abort( thread_act_t, int);
93 void special_handler(ReturnHandler *, thread_act_t);
94 void nudge(thread_act_t);
95 kern_return_t act_set_state_locked(thread_act_t, int,
96 thread_state_t,
97 mach_msg_type_number_t);
98 kern_return_t act_get_state_locked(thread_act_t, int,
99 thread_state_t,
100 mach_msg_type_number_t *);
101 void act_set_apc(thread_act_t);
102 void act_clr_apc(thread_act_t);
103 void act_user_to_kernel(thread_act_t);
104 void act_ulock_release_all(thread_act_t thr_act);
105
106 void install_special_handler_locked(thread_act_t);
107
108 static zone_t thr_act_zone;
109
110 /*
111 * Thread interfaces accessed via a thread_activation:
112 */
113
114
115 /*
116 * Internal routine to terminate a thread.
117 * Called with task locked.
118 */
119 kern_return_t
120 thread_terminate_internal(
121 register thread_act_t thr_act)
122 {
123 thread_t thread;
124 task_t task;
125 struct ipc_port *iplock;
126 kern_return_t ret;
127
128 #if THREAD_SWAPPER
129 thread_swap_disable(thr_act);
130 #endif /* THREAD_SWAPPER */
131
132 thread = act_lock_thread(thr_act);
133 if (!thr_act->active) {
134 act_unlock_thread(thr_act);
135 return(KERN_TERMINATED);
136 }
137
138 act_disable_task_locked(thr_act);
139 ret = act_abort(thr_act,FALSE);
140
141 #if NCPUS > 1
142 /*
143 * Make sure this thread enters the kernel
144 */
145 if (thread != current_thread()) {
146 thread_hold(thr_act);
147 act_unlock_thread(thr_act);
148
149 if (thread_stop_wait(thread))
150 thread_unstop(thread);
151 else
152 ret = KERN_ABORTED;
153
154 (void)act_lock_thread(thr_act);
155 thread_release(thr_act);
156 }
157 #endif /* NCPUS > 1 */
158
159 act_unlock_thread(thr_act);
160 return(ret);
161 }
162
163 /*
164 * Terminate a thread. Called with nothing locked.
165 * Returns same way.
166 */
167 kern_return_t
168 thread_terminate(
169 register thread_act_t thr_act)
170 {
171 task_t task;
172 kern_return_t ret;
173
174 if (thr_act == THR_ACT_NULL)
175 return KERN_INVALID_ARGUMENT;
176
177 task = thr_act->task;
178 if (((task == kernel_task) || (thr_act->kernel_loaded == TRUE))
179 && (current_act() != thr_act)) {
180 return(KERN_FAILURE);
181 }
182
183 /*
184 * Take the task lock and then call the internal routine
185 * that terminates a thread (it needs the task locked).
186 */
187 task_lock(task);
188 ret = thread_terminate_internal(thr_act);
189 task_unlock(task);
190
191 /*
192 * If a kernel thread is terminating itself, force an AST here.
193 * Kernel threads don't normally pass through the AST checking
194 * code - and all threads finish their own termination in the
195 * special handler APC.
196 */
197 if (((thr_act->task == kernel_task) || (thr_act->kernel_loaded == TRUE))
198 && (current_act() == thr_act)) {
199 ast_taken(FALSE, AST_APC, 0);
200 panic("thread_terminate(): returning from ast_taken() for %x kernel activation\n", thr_act);
201 }
202
203 return ret;
204 }
205
206 /*
207 * thread_hold:
208 *
209 * Suspend execution of the specified thread.
210 * This is a recursive-style suspension of the thread, a count of
211 * suspends is maintained.
212 *
213 * Called with thr_act locked "appropriately" for synchrony with
214 * RPC (see act_lock_thread()). Returns same way.
215 */
216 void
217 thread_hold(
218 register thread_act_t thr_act)
219 {
220 if (thr_act->suspend_count++ == 0) {
221 install_special_handler(thr_act);
222 nudge(thr_act);
223 }
224 }
225
226 /*
227 * Decrement internal suspension count for thr_act, setting thread
228 * runnable when count falls to zero.
229 *
230 * Called with thr_act locked "appropriately" for synchrony
231 * with RPC (see act_lock_thread()).
232 */
233 void
234 thread_release(
235 register thread_act_t thr_act)
236 {
237 if( thr_act->suspend_count &&
238 (--thr_act->suspend_count == 0) )
239 nudge( thr_act );
240 }
241
242 kern_return_t
243 thread_suspend(
244 register thread_act_t thr_act)
245 {
246 thread_t thread;
247
248 if (thr_act == THR_ACT_NULL) {
249 return(KERN_INVALID_ARGUMENT);
250 }
251 thread = act_lock_thread(thr_act);
252 if (!thr_act->active) {
253 act_unlock_thread(thr_act);
254 return(KERN_TERMINATED);
255 }
256 if (thr_act->user_stop_count++ == 0 &&
257 thr_act->suspend_count++ == 0 ) {
258 install_special_handler(thr_act);
259 if (thread &&
260 thr_act == thread->top_act && thread != current_thread()) {
261 nudge(thr_act);
262 act_unlock_thread(thr_act);
263 (void)thread_wait(thread);
264 }
265 else {
266 /*
267 * No need to wait for target thread
268 */
269 act_unlock_thread(thr_act);
270 }
271 }
272 else {
273 /*
274 * Thread is already suspended
275 */
276 act_unlock_thread(thr_act);
277 }
278 return(KERN_SUCCESS);
279 }
280
281 kern_return_t
282 thread_resume(
283 register thread_act_t thr_act)
284 {
285 register kern_return_t ret;
286 spl_t s;
287 thread_t thread;
288
289 if (thr_act == THR_ACT_NULL)
290 return(KERN_INVALID_ARGUMENT);
291 thread = act_lock_thread(thr_act);
292 ret = KERN_SUCCESS;
293
294 if (thr_act->active) {
295 if (thr_act->user_stop_count > 0) {
296 if( --thr_act->user_stop_count == 0 ) {
297 --thr_act->suspend_count;
298 nudge( thr_act );
299 }
300 }
301 else
302 ret = KERN_FAILURE;
303 }
304 else
305 ret = KERN_TERMINATED;
306 act_unlock_thread( thr_act );
307 return ret;
308 }
309
310 /*
311 * This routine walks toward the head of an RPC chain starting at
312 * a specified thread activation. An alert bit is set and a special
313 * handler is installed for each thread it encounters.
314 *
315 * The target thread act and thread shuttle are already locked.
316 */
317 kern_return_t
318 post_alert(
319 register thread_act_t thr_act,
320 unsigned alert_bits )
321 {
322 thread_act_t next;
323 thread_t thread;
324
325 /*
326 * Chase the chain, setting alert bits and installing
327 * special handlers for each thread act.
328 */
329 /*** Not yet SMP safe ***/
330 /*** Worse, where's the activation locking as the chain is walked? ***/
331 for (next = thr_act; next != THR_ACT_NULL; next = next->higher) {
332 next->alerts |= alert_bits;
333 install_special_handler_locked(next);
334 }
335
336 return(KERN_SUCCESS);
337 }
338
339 /*
340 * thread_depress_abort:
341 *
342 * Prematurely abort priority depression if there is one.
343 */
344 kern_return_t
345 thread_depress_abort(
346 register thread_act_t thr_act)
347 {
348 register thread_t thread;
349 kern_return_t result;
350 sched_policy_t *policy;
351 spl_t s;
352
353 if (thr_act == THR_ACT_NULL)
354 return (KERN_INVALID_ARGUMENT);
355
356 thread = act_lock_thread(thr_act);
357 /* if activation is terminating, this operation is not meaningful */
358 if (!thr_act->active) {
359 act_unlock_thread(thr_act);
360
361 return (KERN_TERMINATED);
362 }
363
364 s = splsched();
365 thread_lock(thread);
366 policy = &sched_policy[thread->policy];
367 thread_unlock(thread);
368 splx(s);
369
370 result = policy->sp_ops.sp_thread_depress_abort(policy, thread);
371
372 act_unlock_thread(thr_act);
373
374 return (result);
375 }
376
377
378 /*
379 * Already locked: all RPC-related locks for thr_act (see
380 * act_lock_thread()).
381 */
382 kern_return_t
383 act_abort( thread_act_t thr_act, int chain_break )
384 {
385 spl_t spl;
386 thread_t thread;
387 struct ipc_port *iplock = thr_act->pool_port;
388 thread_act_t orphan;
389 kern_return_t kr;
390 etap_data_t probe_data;
391
392 ETAP_DATA_LOAD(probe_data[0], thr_act);
393 ETAP_DATA_LOAD(probe_data[1], thr_act->thread);
394 ETAP_PROBE_DATA(ETAP_P_ACT_ABORT,
395 0,
396 current_thread(),
397 &probe_data,
398 ETAP_DATA_ENTRY*2);
399
400 /*
401 * If the target thread activation is not the head...
402 */
403 if ( thr_act->thread->top_act != thr_act ) {
404 /*
405 * mark the activation for abort,
406 * update the suspend count,
407 * always install the special handler
408 */
409 install_special_handler(thr_act);
410
411 #ifdef AGRESSIVE_ABORT
412 /* release state buffer for target's outstanding invocation */
413 if (unwind_invoke_state(thr_act) != KERN_SUCCESS) {
414 panic("unwind_invoke_state failure");
415 }
416
417 /* release state buffer for target's incoming invocation */
418 if (thr_act->lower != THR_ACT_NULL) {
419 if (unwind_invoke_state(thr_act->lower)
420 != KERN_SUCCESS) {
421 panic("unwind_invoke_state failure");
422 }
423 }
424
425 /* unlink target thread activation from shuttle chain */
426 if ( thr_act->lower == THR_ACT_NULL ) {
427 /*
428 * This is the root thread activation of the chain.
429 * Unlink the root thread act from the bottom of
430 * the chain.
431 */
432 thr_act->higher->lower = THR_ACT_NULL;
433 } else {
434 /*
435 * This thread act is in the middle of the chain.
436 * Unlink the thread act from the middle of the chain.
437 */
438 thr_act->higher->lower = thr_act->lower;
439 thr_act->lower->higher = thr_act->higher;
440
441 /* set the terminated bit for RPC return processing */
442 thr_act->lower->alerts |= SERVER_TERMINATED;
443 }
444
445 orphan = thr_act->higher;
446
447 /* remove the activation from its thread pool */
448 /* (note: this is okay for "rooted threads," too) */
449 act_locked_act_set_thread_pool(thr_act, IP_NULL);
450
451 /* (just to be thorough) release the IP lock */
452 if (iplock != IP_NULL) ip_unlock(iplock);
453
454 /* release one more reference for a rooted thread */
455 if (iplock == IP_NULL) act_locked_act_deallocate(thr_act);
456
457 /* Presumably, the only reference to this activation is
458 * now held by the caller of this routine. */
459 assert(thr_act->ref_count == 1);
460 #else /*AGRESSIVE_ABORT*/
461 /* If there is a lower activation in the RPC chain... */
462 if (thr_act->lower != THR_ACT_NULL) {
463 /* ...indicate the server activation was terminated */
464 thr_act->lower->alerts |= SERVER_TERMINATED;
465 }
466 /* Mark (and process) any orphaned activations */
467 orphan = thr_act->higher;
468 #endif /*AGRESSIVE_ABORT*/
469
470 /* indicate client of orphaned chain has been terminated */
471 orphan->alerts |= CLIENT_TERMINATED;
472
473 /*
474 * Set up posting of alert to headward portion of
475 * the RPC chain.
476 */
477 /*** fix me -- orphan act is not locked ***/
478 post_alert(orphan, ORPHANED);
479
480 /*
481 * Get attention of head of RPC chain.
482 */
483 nudge(thr_act->thread->top_act);
484 return (KERN_SUCCESS);
485 }
486
487 /*
488 * If the target thread is the end of the chain, the thread
489 * has to be marked for abort and rip it out of any wait.
490 */
491 spl = splsched();
492 thread_lock(thr_act->thread);
493 if (thr_act->thread->top_act == thr_act) {
494 thr_act->thread->state |= TH_ABORT;
495 clear_wait_internal(thr_act->thread, THREAD_INTERRUPTED);
496 thread_unlock(thr_act->thread);
497 splx(spl);
498 install_special_handler(thr_act);
499 nudge( thr_act );
500 }
501 return KERN_SUCCESS;
502 }
503
504 kern_return_t
505 thread_abort(
506 register thread_act_t thr_act)
507 {
508 int ret;
509 thread_t thread;
510
511 if (thr_act == THR_ACT_NULL || thr_act == current_act())
512 return (KERN_INVALID_ARGUMENT);
513 /*
514 * Lock the target thread and the current thread now,
515 * in case thread_halt() ends up being called below.
516 */
517 thread = act_lock_thread(thr_act);
518 if (!thr_act->active) {
519 act_unlock_thread(thr_act);
520 return(KERN_TERMINATED);
521 }
522
523 ret = act_abort( thr_act, FALSE );
524 act_unlock_thread( thr_act );
525 return ret;
526 }
527
528 kern_return_t
529 thread_abort_safely(
530 register thread_act_t thr_act)
531 {
532 thread_t thread;
533 spl_t s;
534
535 if (thr_act == THR_ACT_NULL || thr_act == current_act())
536 return(KERN_INVALID_ARGUMENT);
537
538 thread = act_lock_thread(thr_act);
539 if (!thr_act->active) {
540 act_unlock_thread(thr_act);
541 return(KERN_TERMINATED);
542 }
543 if (thread->top_act != thr_act) {
544 act_unlock_thread(thr_act);
545 return(KERN_FAILURE);
546 }
547 s = splsched();
548 thread_lock(thread);
549
550 if ( thread->at_safe_point ) {
551 /*
552 * It's an abortable wait, clear it, then
553 * let the thread go and return successfully.
554 */
555 clear_wait_internal(thread, THREAD_INTERRUPTED);
556 thread_unlock(thread);
557 act_unlock_thread(thr_act);
558 splx(s);
559 return KERN_SUCCESS;
560 }
561
562 /*
563 * if not stopped at a safepoint, just let it go and return failure.
564 */
565 thread_unlock(thread);
566 act_unlock_thread(thr_act);
567 splx(s);
568 return KERN_FAILURE;
569 }
570
571 /*** backward compatibility hacks ***/
572 #include <mach/thread_info.h>
573 #include <mach/thread_special_ports.h>
574 #include <ipc/ipc_port.h>
575 #include <mach/thread_act_server.h>
576
577 kern_return_t
578 thread_info(
579 thread_act_t thr_act,
580 thread_flavor_t flavor,
581 thread_info_t thread_info_out,
582 mach_msg_type_number_t *thread_info_count)
583 {
584 register thread_t thread;
585 kern_return_t result;
586
587 if (thr_act == THR_ACT_NULL)
588 return (KERN_INVALID_ARGUMENT);
589
590 thread = act_lock_thread(thr_act);
591 if (!thr_act->active) {
592 act_unlock_thread(thr_act);
593
594 return (KERN_TERMINATED);
595 }
596
597 result = thread_info_shuttle(thr_act, flavor,
598 thread_info_out, thread_info_count);
599
600 act_unlock_thread(thr_act);
601
602 return (result);
603 }
604
605 /*
606 * Routine: thread_get_special_port [kernel call]
607 * Purpose:
608 * Clones a send right for one of the thread's
609 * special ports.
610 * Conditions:
611 * Nothing locked.
612 * Returns:
613 * KERN_SUCCESS Extracted a send right.
614 * KERN_INVALID_ARGUMENT The thread is null.
615 * KERN_FAILURE The thread is dead.
616 * KERN_INVALID_ARGUMENT Invalid special port.
617 */
618
619 kern_return_t
620 thread_get_special_port(
621 thread_act_t thr_act,
622 int which,
623 ipc_port_t *portp)
624 {
625 ipc_port_t *whichp;
626 ipc_port_t port;
627 thread_t thread;
628
629 #if MACH_ASSERT
630 if (watchacts & WA_PORT)
631 printf("thread_get_special_port(thr_act=%x, which=%x port@%x=%x\n",
632 thr_act, which, portp, (portp ? *portp : 0));
633 #endif /* MACH_ASSERT */
634
635 if (!thr_act)
636 return KERN_INVALID_ARGUMENT;
637 thread = act_lock_thread(thr_act);
638 switch (which) {
639 case THREAD_KERNEL_PORT:
640 whichp = &thr_act->ith_sself;
641 break;
642
643 default:
644 act_unlock_thread(thr_act);
645 return KERN_INVALID_ARGUMENT;
646 }
647
648 if (!thr_act->active) {
649 act_unlock_thread(thr_act);
650 return KERN_FAILURE;
651 }
652
653 port = ipc_port_copy_send(*whichp);
654 act_unlock_thread(thr_act);
655
656 *portp = port;
657 return KERN_SUCCESS;
658 }
659
660 /*
661 * Routine: thread_set_special_port [kernel call]
662 * Purpose:
663 * Changes one of the thread's special ports,
664 * setting it to the supplied send right.
665 * Conditions:
666 * Nothing locked. If successful, consumes
667 * the supplied send right.
668 * Returns:
669 * KERN_SUCCESS Changed the special port.
670 * KERN_INVALID_ARGUMENT The thread is null.
671 * KERN_FAILURE The thread is dead.
672 * KERN_INVALID_ARGUMENT Invalid special port.
673 */
674
675 kern_return_t
676 thread_set_special_port(
677 thread_act_t thr_act,
678 int which,
679 ipc_port_t port)
680 {
681 ipc_port_t *whichp;
682 ipc_port_t old;
683 thread_t thread;
684
685 #if MACH_ASSERT
686 if (watchacts & WA_PORT)
687 printf("thread_set_special_port(thr_act=%x,which=%x,port=%x\n",
688 thr_act, which, port);
689 #endif /* MACH_ASSERT */
690
691 if (thr_act == 0)
692 return KERN_INVALID_ARGUMENT;
693
694 thread = act_lock_thread(thr_act);
695 switch (which) {
696 case THREAD_KERNEL_PORT:
697 whichp = &thr_act->ith_self;
698 break;
699
700 default:
701 act_unlock_thread(thr_act);
702 return KERN_INVALID_ARGUMENT;
703 }
704
705 if (!thr_act->active) {
706 act_unlock_thread(thr_act);
707 return KERN_FAILURE;
708 }
709
710 old = *whichp;
711 *whichp = port;
712 act_unlock_thread(thr_act);
713
714 if (IP_VALID(old))
715 ipc_port_release_send(old);
716 return KERN_SUCCESS;
717 }
718
719 /*
720 * thread state should always be accessible by locking the thread
721 * and copying it. The activation messes things up so for right
722 * now if it's not the top of the chain, use a special handler to
723 * get the information when the shuttle returns to the activation.
724 */
725 kern_return_t
726 thread_get_state(
727 register thread_act_t thr_act,
728 int flavor,
729 thread_state_t state, /* pointer to OUT array */
730 mach_msg_type_number_t *state_count) /*IN/OUT*/
731 {
732 kern_return_t ret;
733 thread_t thread, nthread;
734
735 #if 0 /* Grenoble - why?? */
736 if (thr_act == THR_ACT_NULL || thr_act == current_act())
737 #else
738 if (thr_act == THR_ACT_NULL)
739 #endif
740 return (KERN_INVALID_ARGUMENT);
741
742 thread = act_lock_thread(thr_act);
743 if (!thr_act->active) {
744 act_unlock_thread(thr_act);
745 return(KERN_TERMINATED);
746 }
747
748 thread_hold(thr_act);
749 while (1) {
750 if (!thread || thr_act != thread->top_act)
751 break;
752 act_unlock_thread(thr_act);
753 (void)thread_stop_wait(thread);
754 nthread = act_lock_thread(thr_act);
755 if (nthread == thread)
756 break;
757 thread_unstop(thread);
758 thread = nthread;
759 }
760 ret = act_machine_get_state(thr_act, flavor,
761 state, state_count);
762 if (thread && thr_act == thread->top_act)
763 thread_unstop(thread);
764 thread_release(thr_act);
765 act_unlock_thread(thr_act);
766
767 return(ret);
768 }
769
770 /*
771 * Change thread's machine-dependent state. Called with nothing
772 * locked. Returns same way.
773 */
774 kern_return_t
775 thread_set_state(
776 register thread_act_t thr_act,
777 int flavor,
778 thread_state_t state,
779 mach_msg_type_number_t state_count)
780 {
781 kern_return_t ret;
782 thread_t thread, nthread;
783
784 #if 0 /* Grenoble - why?? */
785 if (thr_act == THR_ACT_NULL || thr_act == current_act())
786 #else
787 if (thr_act == THR_ACT_NULL)
788 #endif
789 return (KERN_INVALID_ARGUMENT);
790 /*
791 * We have no kernel activations, so Utah's MO fails for signals etc.
792 *
793 * If we're blocked in the kernel, use non-blocking method, else
794 * pass locked thr_act+thread in to "normal" act_[gs]et_state().
795 */
796
797 thread = act_lock_thread(thr_act);
798 if (!thr_act->active) {
799 act_unlock_thread(thr_act);
800 return(KERN_TERMINATED);
801 }
802
803 thread_hold(thr_act);
804 while (1) {
805 if (!thread || thr_act != thread->top_act)
806 break;
807 act_unlock_thread(thr_act);
808 (void)thread_stop_wait(thread);
809 nthread = act_lock_thread(thr_act);
810 if (nthread == thread)
811 break;
812 thread_unstop(thread);
813 thread = nthread;
814 }
815 ret = act_machine_set_state(thr_act, flavor,
816 state, state_count);
817 if (thread && thr_act == thread->top_act)
818 thread_unstop(thread);
819 thread_release(thr_act);
820 act_unlock_thread(thr_act);
821
822 return(ret);
823 }
824
825 /*
826 * Kernel-internal "thread" interfaces used outside this file:
827 */
828
829 kern_return_t
830 thread_dup(
831 thread_act_t source_thr_act,
832 thread_act_t target_thr_act)
833 {
834 kern_return_t ret;
835 thread_t thread, nthread;
836
837 if (target_thr_act == THR_ACT_NULL || target_thr_act == current_act())
838 return (KERN_INVALID_ARGUMENT);
839
840 thread = act_lock_thread(target_thr_act);
841 if (!target_thr_act->active) {
842 act_unlock_thread(target_thr_act);
843 return(KERN_TERMINATED);
844 }
845
846 thread_hold(target_thr_act);
847 while (1) {
848 if (!thread || target_thr_act != thread->top_act)
849 break;
850 act_unlock_thread(target_thr_act);
851 (void)thread_stop_wait(thread);
852 nthread = act_lock_thread(target_thr_act);
853 if (nthread == thread)
854 break;
855 thread_unstop(thread);
856 thread = nthread;
857 }
858 ret = act_thread_dup(source_thr_act, target_thr_act);
859 if (thread && target_thr_act == thread->top_act)
860 thread_unstop(thread);
861 thread_release(target_thr_act);
862 act_unlock_thread(target_thr_act);
863
864 return(ret);
865 }
866
867
868 /*
869 * thread_setstatus:
870 *
871 * Set the status of the specified thread.
872 * Called with (and returns with) no locks held.
873 */
874 kern_return_t
875 thread_setstatus(
876 thread_act_t thr_act,
877 int flavor,
878 thread_state_t tstate,
879 mach_msg_type_number_t count)
880 {
881 kern_return_t kr;
882 thread_t thread;
883
884 thread = act_lock_thread(thr_act);
885 assert(thread);
886 assert(thread->top_act == thr_act);
887 kr = act_machine_set_state(thr_act, flavor, tstate, count);
888 act_unlock_thread(thr_act);
889 return(kr);
890 }
891
892 /*
893 * thread_getstatus:
894 *
895 * Get the status of the specified thread.
896 */
897 kern_return_t
898 thread_getstatus(
899 thread_act_t thr_act,
900 int flavor,
901 thread_state_t tstate,
902 mach_msg_type_number_t *count)
903 {
904 kern_return_t kr;
905 thread_t thread;
906
907 thread = act_lock_thread(thr_act);
908 assert(thread);
909 assert(thread->top_act == thr_act);
910 kr = act_machine_get_state(thr_act, flavor, tstate, count);
911 act_unlock_thread(thr_act);
912 return(kr);
913 }
914
915 /*
916 * Kernel-internal thread_activation interfaces used outside this file:
917 */
918
919 /*
920 * act_init() - Initialize activation handling code
921 */
922 void
923 act_init()
924 {
925 thr_act_zone = zinit(
926 sizeof(struct thread_activation),
927 ACT_MAX * sizeof(struct thread_activation), /* XXX */
928 ACT_CHUNK * sizeof(struct thread_activation),
929 "activations");
930 act_machine_init();
931 }
932
933
934 /*
935 * act_create - Create a new activation in a specific task.
936 */
937 kern_return_t
938 act_create(task_t task,
939 thread_act_t *new_act)
940 {
941 thread_act_t thr_act;
942 int rc;
943 vm_map_t map;
944
945 thr_act = (thread_act_t)zalloc(thr_act_zone);
946 if (thr_act == 0)
947 return(KERN_RESOURCE_SHORTAGE);
948
949 #if MACH_ASSERT
950 if (watchacts & WA_ACT_LNK)
951 printf("act_create(task=%x,thr_act@%x=%x)\n",
952 task, new_act, thr_act);
953 #endif /* MACH_ASSERT */
954
955 /* Start by zeroing everything; then init non-zero items only */
956 bzero((char *)thr_act, sizeof(*thr_act));
957
958 #ifdef MACH_BSD
959 {
960 /*
961 * Take care of the uthread allocation
962 * do it early in order to make KERN_RESOURCE_SHORTAGE
963 * handling trivial
964 * uthread_alloc() will bzero the storage allocated.
965 */
966 extern void *uthread_alloc(void);
967 thr_act->uthread = uthread_alloc();
968 if(thr_act->uthread == 0) {
969 /* Put the thr_act back on the thr_act zone */
970 zfree(thr_act_zone, (vm_offset_t)thr_act);
971 return(KERN_RESOURCE_SHORTAGE);
972 }
973 }
974 #endif /* MACH_BSD */
975
976 /*
977 * Start with one reference for the caller and one for the
978 * act being alive.
979 */
980 act_lock_init(thr_act);
981 thr_act->ref_count = 2;
982
983 /* Latch onto the task. */
984 thr_act->task = task;
985 task_reference(task);
986
987 /* Initialize sigbufp for High-Watermark buffer allocation */
988 thr_act->r_sigbufp = (routine_descriptor_t) &thr_act->r_sigbuf;
989 thr_act->r_sigbuf_size = sizeof(thr_act->r_sigbuf);
990
991 #if THREAD_SWAPPER
992 thr_act->swap_state = TH_SW_IN;
993 #if MACH_ASSERT
994 thr_act->kernel_stack_swapped_in = TRUE;
995 #endif /* MACH_ASSERT */
996 #endif /* THREAD_SWAPPER */
997
998 /* special_handler will always be last on the returnhandlers list. */
999 thr_act->special_handler.next = 0;
1000 thr_act->special_handler.handler = special_handler;
1001
1002 #if MACH_PROF
1003 thr_act->act_profiled = FALSE;
1004 thr_act->act_profiled_own = FALSE;
1005 thr_act->profil_buffer = NULLPROFDATA;
1006 #endif
1007
1008 /* Initialize the held_ulocks queue as empty */
1009 queue_init(&thr_act->held_ulocks);
1010
1011 /* Inherit the profiling status of the parent task */
1012 act_prof_init(thr_act, task);
1013
1014 ipc_thr_act_init(task, thr_act);
1015 act_machine_create(task, thr_act);
1016
1017 /*
1018 * If thr_act created in kernel-loaded task, alter its saved
1019 * state to so indicate
1020 */
1021 if (task->kernel_loaded) {
1022 act_user_to_kernel(thr_act);
1023 }
1024
1025 /* Cache the task's map and take a reference to it */
1026 map = task->map;
1027 thr_act->map = map;
1028
1029 /* Inline vm_map_reference cause we don't want to increment res_count */
1030 mutex_lock(&map->s_lock);
1031 #if TASK_SWAPPER
1032 assert(map->res_count > 0);
1033 assert(map->ref_count >= map->res_count);
1034 #endif /* TASK_SWAPPER */
1035 map->ref_count++;
1036 mutex_unlock(&map->s_lock);
1037
1038 *new_act = thr_act;
1039 return KERN_SUCCESS;
1040 }
1041
1042 /*
1043 * act_free - called when an thr_act's ref_count drops to zero.
1044 *
1045 * This can only happen after the activation has been reaped, and
1046 * all other references to it have gone away. We can now release
1047 * the last critical resources, unlink the activation from the
1048 * task, and release the reference on the thread shuttle itself.
1049 *
1050 * Called with activation locked.
1051 */
1052 #if MACH_ASSERT
1053 int dangerous_bzero = 1; /* paranoia & safety */
1054 #endif
1055
1056 void
1057 act_free(thread_act_t thr_act)
1058 {
1059 task_t task;
1060 thread_t thr;
1061 vm_map_t map;
1062 unsigned int ref;
1063
1064 #if MACH_ASSERT
1065 if (watchacts & WA_EXIT)
1066 printf("act_free(%x(%d)) thr=%x tsk=%x(%d) pport=%x%sactive\n",
1067 thr_act, thr_act->ref_count, thr_act->thread,
1068 thr_act->task,
1069 thr_act->task ? thr_act->task->ref_count : 0,
1070 thr_act->pool_port,
1071 thr_act->active ? " " : " !");
1072 #endif /* MACH_ASSERT */
1073
1074
1075 #if THREAD_SWAPPER
1076 assert(thr_act->kernel_stack_swapped_in);
1077 #endif /* THREAD_SWAPPER */
1078
1079 assert(!thr_act->active);
1080 assert(!thr_act->pool_port);
1081
1082 task = thr_act->task;
1083 task_lock(task);
1084
1085 if (thr = thr_act->thread) {
1086 time_value_t user_time, system_time;
1087
1088 thread_read_times(thr, &user_time, &system_time);
1089 time_value_add(&task->total_user_time, &user_time);
1090 time_value_add(&task->total_system_time, &system_time);
1091
1092 /* Unlink the thr_act from the task's thr_act list,
1093 * so it doesn't appear in calls to task_threads and such.
1094 * The thr_act still keeps its ref on the task, however.
1095 */
1096 queue_remove(&task->thr_acts, thr_act, thread_act_t, thr_acts);
1097 thr_act->thr_acts.next = NULL;
1098 task->thr_act_count--;
1099
1100 #if THREAD_SWAPPER
1101 /*
1102 * Thread is supposed to be unswappable by now...
1103 */
1104 assert(thr_act->swap_state == TH_SW_UNSWAPPABLE ||
1105 !thread_swap_unwire_stack);
1106 #endif /* THREAD_SWAPPER */
1107
1108 task->res_act_count--;
1109 task_unlock(task);
1110 task_deallocate(task);
1111 thread_deallocate(thr);
1112 act_machine_destroy(thr_act);
1113 } else {
1114 /*
1115 * Must have never really gotten started
1116 * no unlinking from the task and no need
1117 * to free the shuttle.
1118 */
1119 task_unlock(task);
1120 task_deallocate(task);
1121 }
1122
1123 sigbuf_dealloc(thr_act);
1124 act_prof_deallocate(thr_act);
1125 ipc_thr_act_terminate(thr_act);
1126
1127 /*
1128 * Drop the cached map reference.
1129 * Inline version of vm_map_deallocate() because we
1130 * don't want to decrement the map's residence count here.
1131 */
1132 map = thr_act->map;
1133 mutex_lock(&map->s_lock);
1134 #if TASK_SWAPPER
1135 assert(map->res_count >= 0);
1136 assert(map->ref_count > map->res_count);
1137 #endif /* TASK_SWAPPER */
1138 ref = --map->ref_count;
1139 mutex_unlock(&map->s_lock);
1140 if (ref == 0)
1141 vm_map_destroy(map);
1142
1143 #ifdef MACH_BSD
1144 {
1145 /*
1146 * Free uthread BEFORE the bzero.
1147 * Not doing so will result in a leak.
1148 */
1149 extern void uthread_free(void *);
1150 void *ut = thr_act->uthread;
1151 thr_act->uthread = 0;
1152 uthread_free(ut);
1153 }
1154 #endif /* MACH_BSD */
1155
1156 #if MACH_ASSERT
1157 if (dangerous_bzero) /* dangerous if we're still using it! */
1158 bzero((char *)thr_act, sizeof(*thr_act));
1159 #endif /* MACH_ASSERT */
1160 /* Put the thr_act back on the thr_act zone */
1161 zfree(thr_act_zone, (vm_offset_t)thr_act);
1162 }
1163
1164
1165 /*
1166 * act_attach - Attach an thr_act to the top of a thread ("push the stack").
1167 *
1168 * The thread_shuttle must be either the current one or a brand-new one.
1169 * Assumes the thr_act is active but not in use, also, that if it is
1170 * attached to an thread_pool (i.e. the thread_pool pointer is nonzero),
1171 * the thr_act has already been taken off the thread_pool's list.
1172 *
1173 * Already locked: thr_act plus "appropriate" thread-related locks
1174 * (see act_lock_thread()).
1175 */
1176 void
1177 act_attach(
1178 thread_act_t thr_act,
1179 thread_t thread,
1180 unsigned init_alert_mask)
1181 {
1182 thread_act_t lower;
1183
1184 #if MACH_ASSERT
1185 assert(thread == current_thread() || thread->top_act == THR_ACT_NULL);
1186 if (watchacts & WA_ACT_LNK)
1187 printf("act_attach(thr_act %x(%d) thread %x(%d) mask %d)\n",
1188 thr_act, thr_act->ref_count, thread, thread->ref_count,
1189 init_alert_mask);
1190 #endif /* MACH_ASSERT */
1191
1192 /*
1193 * Chain the thr_act onto the thread's thr_act stack.
1194 * Set mask and auto-propagate alerts from below.
1195 */
1196 thr_act->ref_count++;
1197 thr_act->thread = thread;
1198 thr_act->higher = THR_ACT_NULL; /*safety*/
1199 thr_act->alerts = 0;
1200 thr_act->alert_mask = init_alert_mask;
1201 lower = thr_act->lower = thread->top_act;
1202
1203 if (lower != THR_ACT_NULL) {
1204 lower->higher = thr_act;
1205 thr_act->alerts = (lower->alerts & init_alert_mask);
1206 }
1207
1208 thread->top_act = thr_act;
1209 }
1210
1211 /*
1212 * act_detach
1213 *
1214 * Remove the current thr_act from the top of the current thread, i.e.
1215 * "pop the stack". Assumes already locked: thr_act plus "appropriate"
1216 * thread-related locks (see act_lock_thread).
1217 */
1218 void
1219 act_detach(
1220 thread_act_t cur_act)
1221 {
1222 thread_t cur_thread = cur_act->thread;
1223
1224 #if MACH_ASSERT
1225 if (watchacts & (WA_EXIT|WA_ACT_LNK))
1226 printf("act_detach: thr_act %x(%d), thrd %x(%d) task=%x(%d)\n",
1227 cur_act, cur_act->ref_count,
1228 cur_thread, cur_thread->ref_count,
1229 cur_act->task,
1230 cur_act->task ? cur_act->task->ref_count : 0);
1231 #endif /* MACH_ASSERT */
1232
1233 /* Unlink the thr_act from the thread's thr_act stack */
1234 cur_thread->top_act = cur_act->lower;
1235 cur_act->thread = 0;
1236 cur_act->ref_count--;
1237 assert(cur_act->ref_count > 0);
1238
1239 thread_pool_put_act(cur_act);
1240
1241 #if MACH_ASSERT
1242 cur_act->lower = cur_act->higher = THR_ACT_NULL;
1243 if (cur_thread->top_act)
1244 cur_thread->top_act->higher = THR_ACT_NULL;
1245 #endif /* MACH_ASSERT */
1246
1247 return;
1248 }
1249
1250
1251 /*
1252 * Synchronize a thread operation with RPC. Called with nothing
1253 * locked. Returns with thr_act locked, plus one of four
1254 * combinations of other locks held:
1255 * none - for new activation not yet associated with thread_pool
1256 * or shuttle
1257 * rpc_lock(thr_act->thread) only - for base activation (one
1258 * without pool_port)
1259 * ip_lock(thr_act->pool_port) only - for empty activation (one
1260 * with no associated shuttle)
1261 * both locks - for "active" activation (has shuttle, lives
1262 * on thread_pool)
1263 * If thr_act has an associated shuttle, this function returns
1264 * its address. Otherwise it returns zero.
1265 */
1266 thread_t
1267 act_lock_thread(
1268 thread_act_t thr_act)
1269 {
1270 ipc_port_t pport;
1271
1272 /*
1273 * Allow the shuttle cloning code (q.v., when it
1274 * exists :-}) to obtain ip_lock()'s while holding
1275 * an rpc_lock().
1276 */
1277 while (1) {
1278 act_lock(thr_act);
1279 pport = thr_act->pool_port;
1280 if (!pport || ip_lock_try(pport)) {
1281 if (!thr_act->thread)
1282 break;
1283 if (rpc_lock_try(thr_act->thread))
1284 break;
1285 if (pport)
1286 ip_unlock(pport);
1287 }
1288 act_unlock(thr_act);
1289 mutex_pause();
1290 }
1291 return (thr_act->thread);
1292 }
1293
1294 /*
1295 * Unsynchronize with RPC (i.e., undo an act_lock_thread() call).
1296 * Called with thr_act locked, plus thread locks held that are
1297 * "correct" for thr_act's state. Returns with nothing locked.
1298 */
1299 void
1300 act_unlock_thread(thread_act_t thr_act)
1301 {
1302 if (thr_act->thread)
1303 rpc_unlock(thr_act->thread);
1304 if (thr_act->pool_port)
1305 ip_unlock(thr_act->pool_port);
1306 act_unlock(thr_act);
1307 }
1308
1309 /*
1310 * Synchronize with RPC given a pointer to a shuttle (instead of an
1311 * activation). Called with nothing locked; returns with all
1312 * "appropriate" thread-related locks held (see act_lock_thread()).
1313 */
1314 thread_act_t
1315 thread_lock_act(
1316 thread_t thread)
1317 {
1318 thread_act_t thr_act;
1319
1320 while (1) {
1321 rpc_lock(thread);
1322 thr_act = thread->top_act;
1323 if (!thr_act)
1324 break;
1325 if (!act_lock_try(thr_act)) {
1326 rpc_unlock(thread);
1327 mutex_pause();
1328 continue;
1329 }
1330 if (thr_act->pool_port &&
1331 !ip_lock_try(thr_act->pool_port)) {
1332 rpc_unlock(thread);
1333 act_unlock(thr_act);
1334 mutex_pause();
1335 continue;
1336 }
1337 break;
1338 }
1339 return (thr_act);
1340 }
1341
1342 /*
1343 * Unsynchronize with RPC starting from a pointer to a shuttle.
1344 * Called with RPC-related locks held that are appropriate to
1345 * shuttle's state; any activation is also locked.
1346 */
1347 void
1348 thread_unlock_act(
1349 thread_t thread)
1350 {
1351 thread_act_t thr_act;
1352
1353 if (thr_act = thread->top_act) {
1354 if (thr_act->pool_port)
1355 ip_unlock(thr_act->pool_port);
1356 act_unlock(thr_act);
1357 }
1358 rpc_unlock(thread);
1359 }
1360
1361 /*
1362 * switch_act
1363 *
1364 * If a new activation is given, switch to it. If not,
1365 * switch to the lower activation (pop). Returns the old
1366 * activation. This is for RPC support.
1367 */
1368 thread_act_t
1369 switch_act(
1370 thread_act_t act)
1371 {
1372 thread_t thread;
1373 thread_act_t old, new;
1374 unsigned cpu;
1375 spl_t spl;
1376
1377
1378 disable_preemption();
1379
1380 cpu = cpu_number();
1381 thread = current_thread();
1382
1383 /*
1384 * Find the old and new activation for switch.
1385 */
1386 old = thread->top_act;
1387
1388 if (act) {
1389 new = act;
1390 new->thread = thread;
1391 }
1392 else {
1393 new = old->lower;
1394 }
1395
1396 assert(new != THR_ACT_NULL);
1397 #if THREAD_SWAPPER
1398 assert(new->swap_state != TH_SW_OUT &&
1399 new->swap_state != TH_SW_COMING_IN);
1400 #endif /* THREAD_SWAPPER */
1401
1402 assert(cpu_data[cpu].active_thread == thread);
1403 active_kloaded[cpu] = (new->kernel_loaded) ? new : 0;
1404
1405 /* This is where all the work happens */
1406 machine_switch_act(thread, old, new, cpu);
1407
1408 /*
1409 * Push or pop an activation on the chain.
1410 */
1411 if (act) {
1412 act_attach(new, thread, 0);
1413 }
1414 else {
1415 act_detach(old);
1416 }
1417
1418 enable_preemption();
1419
1420 return(old);
1421 }
1422
1423 /*
1424 * install_special_handler
1425 * Install the special returnhandler that handles suspension and
1426 * termination, if it hasn't been installed already.
1427 *
1428 * Already locked: RPC-related locks for thr_act, but not
1429 * scheduling lock (thread_lock()) of the associated thread.
1430 */
1431 void
1432 install_special_handler(
1433 thread_act_t thr_act)
1434 {
1435 spl_t spl;
1436 thread_t thread = thr_act->thread;
1437
1438 #if MACH_ASSERT
1439 if (watchacts & WA_ACT_HDLR)
1440 printf("act_%x: install_special_hdlr(%x)\n",current_act(),thr_act);
1441 #endif /* MACH_ASSERT */
1442
1443 spl = splsched();
1444 thread_lock(thread);
1445 install_special_handler_locked(thr_act);
1446 thread_unlock(thread);
1447 splx(spl);
1448 }
1449
1450 /*
1451 * install_special_handler_locked
1452 * Do the work of installing the special_handler.
1453 *
1454 * Already locked: RPC-related locks for thr_act, plus the
1455 * scheduling lock (thread_lock()) of the associated thread.
1456 */
1457 void
1458 install_special_handler_locked(
1459 thread_act_t thr_act)
1460 {
1461 ReturnHandler **rh;
1462 thread_t thread = thr_act->thread;
1463
1464 /* The work handler must always be the last ReturnHandler on the list,
1465 because it can do tricky things like detach the thr_act. */
1466 for (rh = &thr_act->handlers; *rh; rh = &(*rh)->next)
1467 /* */ ;
1468 if (rh != &thr_act->special_handler.next) {
1469 *rh = &thr_act->special_handler;
1470 }
1471 if (thread && thr_act == thread->top_act) {
1472 /*
1473 * Temporarily undepress, so target has
1474 * a chance to do locking required to
1475 * block itself in special_handler().
1476 */
1477 if (thread->depress_priority >= 0) {
1478 thread->priority = thread->depress_priority;
1479
1480 /*
1481 * Use special value -2 to indicate need
1482 * to redepress priority in special_handler
1483 * as thread blocks
1484 */
1485 thread->depress_priority = -2;
1486 compute_priority(thread, FALSE);
1487 }
1488 }
1489 act_set_apc(thr_act);
1490 }
1491
1492 /*
1493 * JMM -
1494 * These two routines will be enhanced over time to call the general handler registration
1495 * mechanism used by special handlers and alerts. They are hack in for now to avoid
1496 * having to export the gory details of ASTs to the BSD code right now.
1497 */
1498 extern thread_apc_handler_t bsd_ast;
1499
1500 kern_return_t
1501 thread_apc_set(
1502 thread_act_t thr_act,
1503 thread_apc_handler_t apc)
1504 {
1505 assert(apc == bsd_ast);
1506 thread_ast_set(thr_act, AST_BSD);
1507 if (thr_act == current_act())
1508 ast_propagate(thr_act->ast);
1509 return KERN_SUCCESS;
1510 }
1511
1512 kern_return_t
1513 thread_apc_clear(
1514 thread_act_t thr_act,
1515 thread_apc_handler_t apc)
1516 {
1517 assert(apc == bsd_ast);
1518 thread_ast_clear(thr_act, AST_BSD);
1519 if (thr_act == current_act())
1520 ast_off(AST_BSD);
1521 return KERN_SUCCESS;
1522 }
1523
1524 /*
1525 * act_set_thread_pool - Assign an activation to a specific thread_pool.
1526 * Fails if the activation is already assigned to another pool.
1527 * If thread_pool == 0, we remove the thr_act from its thread_pool.
1528 *
1529 * Called the port containing thread_pool already locked.
1530 * Returns the same way.
1531 */
1532 kern_return_t act_set_thread_pool(
1533 thread_act_t thr_act,
1534 ipc_port_t pool_port)
1535 {
1536 thread_pool_t thread_pool;
1537
1538 #if MACH_ASSERT
1539 if (watchacts & WA_ACT_LNK)
1540 printf("act_set_thread_pool: %x(%d) -> %x\n",
1541 thr_act, thr_act->ref_count, thread_pool);
1542 #endif /* MACH_ASSERT */
1543
1544 if (pool_port == 0) {
1545 thread_act_t *lact;
1546
1547 if (thr_act->pool_port == 0)
1548 return KERN_SUCCESS;
1549 thread_pool = &thr_act->pool_port->ip_thread_pool;
1550
1551 for (lact = &thread_pool->thr_acts; *lact;
1552 lact = &((*lact)->thread_pool_next)) {
1553 if (thr_act == *lact) {
1554 *lact = thr_act->thread_pool_next;
1555 break;
1556 }
1557 }
1558 act_lock(thr_act);
1559 thr_act->pool_port = 0;
1560 thr_act->thread_pool_next = 0;
1561 act_unlock(thr_act);
1562 act_deallocate(thr_act);
1563 return KERN_SUCCESS;
1564 }
1565 if (thr_act->pool_port != pool_port) {
1566 thread_pool = &pool_port->ip_thread_pool;
1567 if (thr_act->pool_port != 0) {
1568 #if MACH_ASSERT
1569 if (watchacts & WA_ACT_LNK)
1570 printf("act_set_thread_pool found %x!\n",
1571 thr_act->pool_port);
1572 #endif /* MACH_ASSERT */
1573 return(KERN_FAILURE);
1574 }
1575 act_lock(thr_act);
1576 thr_act->pool_port = pool_port;
1577
1578 /* The pool gets a ref to the activation -- have
1579 * to inline operation because thr_act is already
1580 * locked.
1581 */
1582 act_locked_act_reference(thr_act);
1583
1584 /* If it is available,
1585 * add it to the thread_pool's available-activation list.
1586 */
1587 if ((thr_act->thread == 0) && (thr_act->suspend_count == 0)) {
1588 thr_act->thread_pool_next = thread_pool->thr_acts;
1589 pool_port->ip_thread_pool.thr_acts = thr_act;
1590 if (thread_pool->waiting)
1591 thread_pool_wakeup(thread_pool);
1592 }
1593 act_unlock(thr_act);
1594 }
1595
1596 return KERN_SUCCESS;
1597 }
1598
1599 /*
1600 * act_locked_act_set_thread_pool- Assign activation to a specific thread_pool.
1601 * Fails if the activation is already assigned to another pool.
1602 * If thread_pool == 0, we remove the thr_act from its thread_pool.
1603 *
1604 * Called the port containing thread_pool already locked.
1605 * Also called with the thread activation locked.
1606 * Returns the same way.
1607 *
1608 * This routine is the same as `act_set_thread_pool()' except that it does
1609 * not call `act_deallocate(),' which unconditionally tries to obtain the
1610 * thread activation lock.
1611 */
1612 kern_return_t act_locked_act_set_thread_pool(
1613 thread_act_t thr_act,
1614 ipc_port_t pool_port)
1615 {
1616 thread_pool_t thread_pool;
1617
1618 #if MACH_ASSERT
1619 if (watchacts & WA_ACT_LNK)
1620 printf("act_set_thread_pool: %x(%d) -> %x\n",
1621 thr_act, thr_act->ref_count, thread_pool);
1622 #endif /* MACH_ASSERT */
1623
1624 if (pool_port == 0) {
1625 thread_act_t *lact;
1626
1627 if (thr_act->pool_port == 0)
1628 return KERN_SUCCESS;
1629 thread_pool = &thr_act->pool_port->ip_thread_pool;
1630
1631 for (lact = &thread_pool->thr_acts; *lact;
1632 lact = &((*lact)->thread_pool_next)) {
1633 if (thr_act == *lact) {
1634 *lact = thr_act->thread_pool_next;
1635 break;
1636 }
1637 }
1638
1639 thr_act->pool_port = 0;
1640 thr_act->thread_pool_next = 0;
1641 act_locked_act_deallocate(thr_act);
1642 return KERN_SUCCESS;
1643 }
1644 if (thr_act->pool_port != pool_port) {
1645 thread_pool = &pool_port->ip_thread_pool;
1646 if (thr_act->pool_port != 0) {
1647 #if MACH_ASSERT
1648 if (watchacts & WA_ACT_LNK)
1649 printf("act_set_thread_pool found %x!\n",
1650 thr_act->pool_port);
1651 #endif /* MACH_ASSERT */
1652 return(KERN_FAILURE);
1653 }
1654 thr_act->pool_port = pool_port;
1655
1656 /* The pool gets a ref to the activation -- have
1657 * to inline operation because thr_act is already
1658 * locked.
1659 */
1660 act_locked_act_reference(thr_act);
1661
1662 /* If it is available,
1663 * add it to the thread_pool's available-activation list.
1664 */
1665 if ((thr_act->thread == 0) && (thr_act->suspend_count == 0)) {
1666 thr_act->thread_pool_next = thread_pool->thr_acts;
1667 pool_port->ip_thread_pool.thr_acts = thr_act;
1668 if (thread_pool->waiting)
1669 thread_pool_wakeup(thread_pool);
1670 }
1671 }
1672
1673 return KERN_SUCCESS;
1674 }
1675
1676 /*
1677 * Activation control support routines internal to this file:
1678 */
1679
1680 /*
1681 * act_execute_returnhandlers() - does just what the name says
1682 *
1683 * This is called by system-dependent code when it detects that
1684 * thr_act->handlers is non-null while returning into user mode.
1685 * Activations linked onto an thread_pool always have null thr_act->handlers,
1686 * so RPC entry paths need not check it.
1687 */
1688 void act_execute_returnhandlers(
1689 void)
1690 {
1691 spl_t s;
1692 thread_t thread;
1693 thread_act_t thr_act = current_act();
1694
1695 #if MACH_ASSERT
1696 if (watchacts & WA_ACT_HDLR)
1697 printf("execute_rtn_hdlrs: thr_act=%x\n", thr_act);
1698 #endif /* MACH_ASSERT */
1699
1700 s = splsched();
1701 act_clr_apc(thr_act);
1702 spllo();
1703 while (1) {
1704 ReturnHandler *rh;
1705
1706 /* Grab the next returnhandler */
1707 thread = act_lock_thread(thr_act);
1708 (void)splsched();
1709 thread_lock(thread);
1710 rh = thr_act->handlers;
1711 if (!rh) {
1712 thread_unlock(thread);
1713 splx(s);
1714 act_unlock_thread(thr_act);
1715 return;
1716 }
1717 thr_act->handlers = rh->next;
1718 thread_unlock(thread);
1719 spllo();
1720 act_unlock_thread(thr_act);
1721
1722 #if MACH_ASSERT
1723 if (watchacts & WA_ACT_HDLR)
1724 printf( (rh == &thr_act->special_handler) ?
1725 "\tspecial_handler\n" : "\thandler=%x\n",
1726 rh->handler);
1727 #endif /* MACH_ASSERT */
1728
1729 /* Execute it */
1730 (*rh->handler)(rh, thr_act);
1731 }
1732 }
1733
1734 /*
1735 * special_handler_continue
1736 *
1737 * Continuation routine for the special handler blocks. It checks
1738 * to see whether there has been any new suspensions. If so, it
1739 * installs the special handler again. Otherwise, it checks to see
1740 * if the current depression needs to be re-instated (it may have
1741 * been temporarily removed in order to get to this point in a hurry).
1742 */
1743 void
1744 special_handler_continue(void)
1745 {
1746 thread_act_t cur_act = current_act();
1747 thread_t thread = cur_act->thread;
1748 spl_t s;
1749
1750 if (cur_act->suspend_count)
1751 install_special_handler(cur_act);
1752 else {
1753 s = splsched();
1754 thread_lock(thread);
1755 if (thread->depress_priority == -2) {
1756 /*
1757 * We were temporarily undepressed by
1758 * install_special_handler; restore priority
1759 * depression.
1760 */
1761 thread->depress_priority = thread->priority;
1762 thread->priority = thread->sched_pri = DEPRESSPRI;
1763 }
1764 thread_unlock(thread);
1765 splx(s);
1766 }
1767 thread_exception_return();
1768 }
1769
1770 /*
1771 * special_handler - handles suspension, termination. Called
1772 * with nothing locked. Returns (if it returns) the same way.
1773 */
1774 void
1775 special_handler(
1776 ReturnHandler *rh,
1777 thread_act_t cur_act)
1778 {
1779 spl_t s;
1780 thread_t lthread;
1781 thread_t thread = act_lock_thread(cur_act);
1782 unsigned alert_bits;
1783 exception_data_type_t
1784 codes[EXCEPTION_CODE_MAX];
1785 kern_return_t kr;
1786 kern_return_t exc_kr;
1787
1788 assert(thread != THREAD_NULL);
1789 #if MACH_ASSERT
1790 if (watchacts & WA_ACT_HDLR)
1791 printf("\t\tspecial_handler(thr_act=%x(%d))\n", cur_act,
1792 (cur_act ? cur_act->ref_count : 0));
1793 #endif /* MACH_ASSERT */
1794
1795 s = splsched();
1796
1797 thread_lock(thread);
1798 thread->state &= ~TH_ABORT; /* clear any aborts */
1799 thread_unlock(thread);
1800 splx(s);
1801
1802 /*
1803 * If someone has killed this invocation,
1804 * invoke the return path with a terminated exception.
1805 */
1806 if (!cur_act->active) {
1807 act_unlock_thread(cur_act);
1808 act_machine_return(KERN_TERMINATED);
1809 }
1810
1811 #ifdef CALLOUT_RPC_MODEL
1812 /*
1813 * JMM - We don't intend to support this RPC model in Darwin.
1814 * We will support inheritance through chains of activations
1815 * on shuttles, but it will be universal and not just for RPC.
1816 * As such, each activation will always have a base shuttle.
1817 * Our RPC model will probably even support the notion of
1818 * alerts (thrown up the chain of activations to affect the
1819 * work done on our behalf), but the unlinking of the shuttles
1820 * will be completely difference because we will never have
1821 * to clone them.
1822 */
1823
1824 /* strip server terminated bit */
1825 alert_bits = cur_act->alerts & (~SERVER_TERMINATED);
1826
1827 /* clear server terminated bit */
1828 cur_act->alerts &= ~SERVER_TERMINATED;
1829
1830 if ( alert_bits ) {
1831 /*
1832 * currently necessary to coordinate with the exception
1833 * code -fdr
1834 */
1835 act_unlock_thread(cur_act);
1836
1837 /* upcall exception/alert port */
1838 codes[0] = alert_bits;
1839
1840 /*
1841 * Exception makes a lot of assumptions. If there is no
1842 * exception handler or the exception reply is broken, the
1843 * thread will be terminated and exception will not return. If
1844 * we decide we don't like that behavior, we need to check
1845 * for the existence of an exception port before we call
1846 * exception.
1847 */
1848 exc_kr = exception( EXC_RPC_ALERT, codes, 1 );
1849
1850 /* clear the orphaned and time constraint indications */
1851 cur_act->alerts &= ~(ORPHANED | TIME_CONSTRAINT_UNSATISFIED);
1852
1853 /* if this orphaned activation should be terminated... */
1854 if (exc_kr == KERN_RPC_TERMINATE_ORPHAN) {
1855 /*
1856 * ... terminate the activation
1857 *
1858 * This is done in two steps. First, the activation is
1859 * disabled (prepared for termination); second, the
1860 * `special_handler()' is executed again -- this time
1861 * to terminate the activation.
1862 * (`act_disable_task_locked()' arranges for the
1863 * additional execution of the `special_handler().')
1864 */
1865
1866 #if THREAD_SWAPPER
1867 thread_swap_disable(cur_act);
1868 #endif /* THREAD_SWAPPER */
1869
1870 /* acquire appropriate locks */
1871 task_lock(cur_act->task);
1872 act_lock_thread(cur_act);
1873
1874 /* detach the activation from its task */
1875 kr = act_disable_task_locked(cur_act);
1876 assert( kr == KERN_SUCCESS );
1877
1878 /* release locks */
1879 task_unlock(cur_act->task);
1880 }
1881 else {
1882 /* acquire activation lock again (released below) */
1883 act_lock_thread(cur_act);
1884 s = splsched();
1885 thread_lock(thread);
1886 if (thread->depress_priority == -2) {
1887 /*
1888 * We were temporarily undepressed by
1889 * install_special_handler; restore priority
1890 * depression.
1891 */
1892 thread->depress_priority = thread->priority;
1893 thread->priority = thread->sched_pri = DEPRESSPRI;
1894 }
1895 thread_unlock(thread);
1896 splx(s);
1897 }
1898 }
1899 #endif /* CALLOUT_RPC_MODEL */
1900
1901 /*
1902 * If we're suspended, go to sleep and wait for someone to wake us up.
1903 */
1904 if (cur_act->suspend_count) {
1905 if( cur_act->handlers == NULL ) {
1906 assert_wait((event_t)&cur_act->suspend_count,
1907 THREAD_ABORTSAFE);
1908 act_unlock_thread(cur_act);
1909 thread_block(special_handler_continue);
1910 /* NOTREACHED */
1911 }
1912 special_handler_continue();
1913 }
1914
1915 act_unlock_thread(cur_act);
1916 }
1917
1918 /*
1919 * Try to nudge a thr_act into executing its returnhandler chain.
1920 * Ensures that the activation will execute its returnhandlers
1921 * before it next executes any of its user-level code.
1922 *
1923 * Called with thr_act's act_lock() and "appropriate" thread-related
1924 * locks held. (See act_lock_thread().) Returns same way.
1925 */
1926 void
1927 nudge(thread_act_t thr_act)
1928 {
1929 #if MACH_ASSERT
1930 if (watchacts & WA_ACT_HDLR)
1931 printf("\tact_%x: nudge(%x)\n", current_act(), thr_act);
1932 #endif /* MACH_ASSERT */
1933
1934 /*
1935 * Don't need to do anything at all if this thr_act isn't the topmost.
1936 */
1937 if (thr_act->thread && thr_act->thread->top_act == thr_act) {
1938 /*
1939 * If it's suspended, wake it up.
1940 * This should nudge it even on another CPU.
1941 */
1942 thread_wakeup((event_t)&thr_act->suspend_count);
1943 }
1944 }
1945
1946 /*
1947 * Update activation that belongs to a task created via kernel_task_create().
1948 */
1949 void
1950 act_user_to_kernel(
1951 thread_act_t thr_act)
1952 {
1953 pcb_user_to_kernel(thr_act);
1954 thr_act->kernel_loading = TRUE;
1955 }
1956
1957 /*
1958 * Already locked: thr_act->task, RPC-related locks for thr_act
1959 *
1960 * Detach an activation from its task, and prepare it to terminate
1961 * itself.
1962 */
1963 kern_return_t
1964 act_disable_task_locked(
1965 thread_act_t thr_act)
1966 {
1967 thread_t thread = thr_act->thread;
1968 task_t task = thr_act->task;
1969
1970 #if MACH_ASSERT
1971 if (watchacts & WA_EXIT) {
1972 printf("act_%x: act_disable_tl(thr_act=%x(%d))%sactive task=%x(%d)",
1973 current_act(), thr_act, thr_act->ref_count,
1974 (thr_act->active ? " " : " !"),
1975 thr_act->task, thr_act->task? thr_act->task->ref_count : 0);
1976 if (thr_act->pool_port)
1977 printf(", pool_port %x", thr_act->pool_port);
1978 printf("\n");
1979 (void) dump_act(thr_act);
1980 }
1981 #endif /* MACH_ASSERT */
1982
1983 /* This will allow no more control ops on this thr_act. */
1984 thr_act->active = 0;
1985 ipc_thr_act_disable(thr_act);
1986
1987 /* Clean-up any ulocks that are still owned by the thread
1988 * activation (acquired but not released or handed-off).
1989 */
1990 act_ulock_release_all(thr_act);
1991
1992 /* When the special_handler gets executed,
1993 * it will see the terminated condition and exit
1994 * immediately.
1995 */
1996 install_special_handler(thr_act);
1997
1998
1999 /* If the target happens to be suspended,
2000 * give it a nudge so it can exit.
2001 */
2002 if (thr_act->suspend_count)
2003 nudge(thr_act);
2004
2005 /* Drop the thr_act reference taken for being active.
2006 * (There is still at least one reference left:
2007 * the one we were passed.)
2008 * Inline the deallocate because thr_act is locked.
2009 */
2010 act_locked_act_deallocate(thr_act);
2011
2012 return(KERN_SUCCESS);
2013 }
2014
2015 /*
2016 * act_alert - Register an alert from this activation.
2017 *
2018 * Each set bit is propagated upward from (but not including) this activation,
2019 * until the top of the chain is reached or the bit is masked.
2020 */
2021 kern_return_t
2022 act_alert(thread_act_t thr_act, unsigned alerts)
2023 {
2024 thread_t thread = act_lock_thread(thr_act);
2025
2026 #if MACH_ASSERT
2027 if (watchacts & WA_ACT_LNK)
2028 printf("act_alert %x: %x\n", thr_act, alerts);
2029 #endif /* MACH_ASSERT */
2030
2031 if (thread) {
2032 thread_act_t act_up = thr_act;
2033 while ((alerts) && (act_up != thread->top_act)) {
2034 act_up = act_up->higher;
2035 alerts &= act_up->alert_mask;
2036 act_up->alerts |= alerts;
2037 }
2038 /*
2039 * XXXX If we reach the top, and it is blocked in glue
2040 * code, do something to kick it. XXXX
2041 */
2042 }
2043 act_unlock_thread(thr_act);
2044
2045 return KERN_SUCCESS;
2046 }
2047
2048 kern_return_t act_alert_mask(thread_act_t thr_act, unsigned alert_mask)
2049 {
2050 panic("act_alert_mask NOT YET IMPLEMENTED\n");
2051 return KERN_SUCCESS;
2052 }
2053
2054 typedef struct GetSetState {
2055 struct ReturnHandler rh;
2056 int flavor;
2057 void *state;
2058 int *pcount;
2059 int result;
2060 } GetSetState;
2061
2062 /* Local Forward decls */
2063 kern_return_t get_set_state(
2064 thread_act_t thr_act, int flavor,
2065 thread_state_t state, int *pcount,
2066 void (*handler)(ReturnHandler *rh, thread_act_t thr_act));
2067 void get_state_handler(ReturnHandler *rh, thread_act_t thr_act);
2068 void set_state_handler(ReturnHandler *rh, thread_act_t thr_act);
2069
2070 /*
2071 * get_set_state(thr_act ...)
2072 *
2073 * General code to install g/set_state handler.
2074 * Called with thr_act's act_lock() and "appropriate"
2075 * thread-related locks held. (See act_lock_thread().)
2076 */
2077 kern_return_t
2078 get_set_state(thread_act_t thr_act, int flavor, thread_state_t state, int *pcount,
2079 void (*handler)(ReturnHandler *rh, thread_act_t thr_act))
2080 {
2081 GetSetState gss;
2082 spl_t s;
2083
2084 /* Initialize a small parameter structure */
2085 gss.rh.handler = handler;
2086 gss.flavor = flavor;
2087 gss.state = state;
2088 gss.pcount = pcount;
2089 gss.result = KERN_ABORTED; /* iff wait below is interrupted */
2090
2091 /* Add it to the thr_act's return handler list */
2092 gss.rh.next = thr_act->handlers;
2093 thr_act->handlers = &gss.rh;
2094
2095 s = splsched();
2096 act_set_apc(thr_act);
2097 splx(s);
2098
2099 #if MACH_ASSERT
2100 if (watchacts & WA_ACT_HDLR) {
2101 printf("act_%x: get_set_state(thr_act=%x flv=%x state=%x ptr@%x=%x)",
2102 current_act(), thr_act, flavor, state,
2103 pcount, (pcount ? *pcount : 0));
2104 printf((handler == get_state_handler ? "get_state_hdlr\n" :
2105 (handler == set_state_handler ? "set_state_hdlr\n" :
2106 "hndler=%x\n")), handler);
2107 }
2108 #endif /* MACH_ASSERT */
2109
2110 assert(thr_act->thread); /* Callers must ensure these */
2111 assert(thr_act != current_act());
2112 for (;;) {
2113 nudge(thr_act);
2114 /*
2115 * Wait must be interruptible to avoid deadlock (e.g.) with
2116 * task_suspend() when caller and target of get_set_state()
2117 * are in same task.
2118 */
2119 assert_wait((event_t)&gss, THREAD_ABORTSAFE);
2120 act_unlock_thread(thr_act);
2121 thread_block((void (*)(void))0);
2122 if (gss.result != KERN_ABORTED)
2123 break;
2124 if (current_act()->handlers)
2125 act_execute_returnhandlers();
2126 act_lock_thread(thr_act);
2127 }
2128
2129 #if MACH_ASSERT
2130 if (watchacts & WA_ACT_HDLR)
2131 printf("act_%x: get_set_state returns %x\n",
2132 current_act(), gss.result);
2133 #endif /* MACH_ASSERT */
2134
2135 return gss.result;
2136 }
2137
2138 void
2139 set_state_handler(ReturnHandler *rh, thread_act_t thr_act)
2140 {
2141 GetSetState *gss = (GetSetState*)rh;
2142
2143 #if MACH_ASSERT
2144 if (watchacts & WA_ACT_HDLR)
2145 printf("act_%x: set_state_handler(rh=%x,thr_act=%x)\n",
2146 current_act(), rh, thr_act);
2147 #endif /* MACH_ASSERT */
2148
2149 gss->result = act_machine_set_state(thr_act, gss->flavor,
2150 gss->state, *gss->pcount);
2151 thread_wakeup((event_t)gss);
2152 }
2153
2154 void
2155 get_state_handler(ReturnHandler *rh, thread_act_t thr_act)
2156 {
2157 GetSetState *gss = (GetSetState*)rh;
2158
2159 #if MACH_ASSERT
2160 if (watchacts & WA_ACT_HDLR)
2161 printf("act_%x: get_state_handler(rh=%x,thr_act=%x)\n",
2162 current_act(), rh, thr_act);
2163 #endif /* MACH_ASSERT */
2164
2165 gss->result = act_machine_get_state(thr_act, gss->flavor,
2166 gss->state,
2167 (mach_msg_type_number_t *) gss->pcount);
2168 thread_wakeup((event_t)gss);
2169 }
2170
2171 kern_return_t
2172 act_get_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
2173 mach_msg_type_number_t *pcount)
2174 {
2175 #if MACH_ASSERT
2176 if (watchacts & WA_ACT_HDLR)
2177 printf("act_%x: act_get_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
2178 current_act(), thr_act, flavor, state, pcount,
2179 (pcount? *pcount : 0));
2180 #endif /* MACH_ASSERT */
2181
2182 return(get_set_state(thr_act, flavor, state, (int*)pcount, get_state_handler));
2183 }
2184
2185 kern_return_t
2186 act_set_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
2187 mach_msg_type_number_t count)
2188 {
2189 #if MACH_ASSERT
2190 if (watchacts & WA_ACT_HDLR)
2191 printf("act_%x: act_set_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
2192 current_act(), thr_act, flavor, state, count, count);
2193 #endif /* MACH_ASSERT */
2194
2195 return(get_set_state(thr_act, flavor, state, (int*)&count, set_state_handler));
2196 }
2197
2198 kern_return_t
2199 act_set_state(thread_act_t thr_act, int flavor, thread_state_t state,
2200 mach_msg_type_number_t count)
2201 {
2202 if (thr_act == THR_ACT_NULL || thr_act == current_act())
2203 return(KERN_INVALID_ARGUMENT);
2204
2205 act_lock_thread(thr_act);
2206 return(act_set_state_locked(thr_act, flavor, state, count));
2207
2208 }
2209
2210 kern_return_t
2211 act_get_state(thread_act_t thr_act, int flavor, thread_state_t state,
2212 mach_msg_type_number_t *pcount)
2213 {
2214 if (thr_act == THR_ACT_NULL || thr_act == current_act())
2215 return(KERN_INVALID_ARGUMENT);
2216
2217 act_lock_thread(thr_act);
2218 return(act_get_state_locked(thr_act, flavor, state, pcount));
2219 }
2220
2221 /*
2222 * These two should be called at splsched()
2223 * Set/clear indicator to run APC (layered on ASTs)
2224 */
2225 void
2226 act_set_apc(thread_act_t thr_act)
2227 {
2228 thread_ast_set(thr_act, AST_APC);
2229 if (thr_act == current_act()) {
2230 mp_disable_preemption();
2231 ast_propagate(thr_act->ast);
2232 mp_enable_preemption();
2233 }
2234 }
2235
2236 void
2237 act_clr_apc(thread_act_t thr_act)
2238 {
2239 thread_ast_clear(thr_act, AST_APC);
2240 }
2241
2242 void
2243 act_ulock_release_all(thread_act_t thr_act)
2244 {
2245 ulock_t ulock;
2246
2247 while (!queue_empty(&thr_act->held_ulocks)) {
2248 ulock = (ulock_t) queue_first(&thr_act->held_ulocks);
2249 (void) lock_make_unstable(ulock, thr_act);
2250 (void) lock_release_internal(ulock, thr_act);
2251 }
2252 }
2253
2254 /*
2255 * Provide routines (for export to other components) of things that
2256 * are implemented as macros insternally.
2257 */
2258 #undef current_act
2259 thread_act_t
2260 current_act(void)
2261 {
2262 return(current_act_fast());
2263 }
2264
2265 thread_act_t
2266 thread_self(void)
2267 {
2268 thread_act_t self = current_act_fast();
2269
2270 act_reference(self);
2271 return self;
2272 }
2273
2274 thread_act_t
2275 mach_thread_self(void)
2276 {
2277 thread_act_t self = current_act_fast();
2278
2279 act_reference(self);
2280 return self;
2281 }
2282
2283 #undef act_reference
2284 void
2285 act_reference(
2286 thread_act_t thr_act)
2287 {
2288 act_reference_fast(thr_act);
2289 }
2290
2291 #undef act_deallocate
2292 void
2293 act_deallocate(
2294 thread_act_t thr_act)
2295 {
2296 act_deallocate_fast(thr_act);
2297 }
2298