]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_act.c
xnu-201.42.3.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_FREE_COPYRIGHT@
24 */
25/*
26 * Copyright (c) 1993 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
28 *
29 * Permission to use, copy, modify and distribute this software and its
30 * documentation is hereby granted, provided that both the copyright
31 * notice and this permission notice appear in all copies of the
32 * software, derivative works or modified versions, and any portions
33 * thereof, and that both notices appear in supporting documentation.
34 *
35 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
36 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
37 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
38 *
39 * CSS requests users of this software to return to css-dist@cs.utah.edu any
40 * improvements that they make and grant CSS redistribution rights.
41 *
42 * Author: Bryan Ford, University of Utah CSS
43 *
44 * Thread_Activation management routines
45 */
46
47#include <cpus.h>
48#include <task_swapper.h>
49#include <mach/kern_return.h>
50#include <mach/alert.h>
51#include <kern/etap_macros.h>
52#include <kern/mach_param.h>
53#include <kern/zalloc.h>
54#include <kern/thread.h>
55#include <kern/thread_swap.h>
56#include <kern/task.h>
57#include <kern/task_swap.h>
58#include <kern/thread_act.h>
59#include <kern/thread_pool.h>
60#include <kern/sched_prim.h>
61#include <kern/misc_protos.h>
62#include <kern/assert.h>
63#include <kern/exception.h>
64#include <kern/ipc_mig.h>
65#include <kern/ipc_tt.h>
66#include <kern/profile.h>
67#include <kern/machine.h>
68#include <kern/spl.h>
69#include <kern/syscall_subr.h>
70#include <kern/sync_lock.h>
1c79356b 71#include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
0b4e3aa0 72#include <kern/processor.h>
1c79356b
A
73#include <mach_prof.h>
74#include <mach/rpc.h>
75
76/*
77 * Debugging printf control
78 */
79#if MACH_ASSERT
80unsigned int watchacts = 0 /* WA_ALL */
81 ; /* Do-it-yourself & patchable */
82#endif
83
84/*
85 * Track the number of times we need to swapin a thread to deallocate it.
86 */
87int act_free_swapin = 0;
88
89/*
90 * Forward declarations for functions local to this file.
91 */
92kern_return_t act_abort( thread_act_t, int);
93void special_handler(ReturnHandler *, thread_act_t);
94void nudge(thread_act_t);
95kern_return_t act_set_state_locked(thread_act_t, int,
96 thread_state_t,
97 mach_msg_type_number_t);
98kern_return_t act_get_state_locked(thread_act_t, int,
99 thread_state_t,
100 mach_msg_type_number_t *);
101void act_set_apc(thread_act_t);
102void act_clr_apc(thread_act_t);
103void act_user_to_kernel(thread_act_t);
104void act_ulock_release_all(thread_act_t thr_act);
105
106void install_special_handler_locked(thread_act_t);
107
108static zone_t thr_act_zone;
109
110/*
111 * Thread interfaces accessed via a thread_activation:
112 */
113
114
115/*
116 * Internal routine to terminate a thread.
117 * Called with task locked.
118 */
119kern_return_t
120thread_terminate_internal(
121 register thread_act_t thr_act)
122{
123 thread_t thread;
124 task_t task;
125 struct ipc_port *iplock;
126 kern_return_t ret;
1c79356b
A
127
128#if THREAD_SWAPPER
129 thread_swap_disable(thr_act);
130#endif /* THREAD_SWAPPER */
131
132 thread = act_lock_thread(thr_act);
133 if (!thr_act->active) {
134 act_unlock_thread(thr_act);
135 return(KERN_TERMINATED);
136 }
137
e7c99d92
A
138 act_disable_task_locked(thr_act);
139 ret = act_abort(thr_act,FALSE);
140
1c79356b
A
141#if NCPUS > 1
142 /*
143 * Make sure this thread enters the kernel
144 */
145 if (thread != current_thread()) {
146 thread_hold(thr_act);
147 act_unlock_thread(thr_act);
148
e7c99d92
A
149 if (thread_stop_wait(thread))
150 thread_unstop(thread);
151 else
1c79356b 152 ret = KERN_ABORTED;
1c79356b 153
1c79356b 154 (void)act_lock_thread(thr_act);
e7c99d92 155 thread_release(thr_act);
1c79356b
A
156 }
157#endif /* NCPUS > 1 */
158
1c79356b 159 act_unlock_thread(thr_act);
1c79356b
A
160 return(ret);
161}
162
163/*
164 * Terminate a thread. Called with nothing locked.
165 * Returns same way.
166 */
167kern_return_t
168thread_terminate(
169 register thread_act_t thr_act)
170{
171 task_t task;
172 kern_return_t ret;
173
174 if (thr_act == THR_ACT_NULL)
175 return KERN_INVALID_ARGUMENT;
176
177 task = thr_act->task;
178 if (((task == kernel_task) || (thr_act->kernel_loaded == TRUE))
179 && (current_act() != thr_act)) {
180 return(KERN_FAILURE);
181 }
182
183 /*
184 * Take the task lock and then call the internal routine
185 * that terminates a thread (it needs the task locked).
186 */
187 task_lock(task);
188 ret = thread_terminate_internal(thr_act);
189 task_unlock(task);
190
191 /*
192 * If a kernel thread is terminating itself, force an AST here.
193 * Kernel threads don't normally pass through the AST checking
194 * code - and all threads finish their own termination in the
195 * special handler APC.
196 */
0b4e3aa0
A
197 if ( ( thr_act->task == kernel_task ||
198 thr_act->kernel_loaded == TRUE ) &&
199 current_act() == thr_act ) {
200 ast_taken(AST_APC, FALSE);
1c79356b
A
201 panic("thread_terminate(): returning from ast_taken() for %x kernel activation\n", thr_act);
202 }
203
204 return ret;
205}
206
207/*
208 * thread_hold:
209 *
210 * Suspend execution of the specified thread.
211 * This is a recursive-style suspension of the thread, a count of
212 * suspends is maintained.
213 *
214 * Called with thr_act locked "appropriately" for synchrony with
215 * RPC (see act_lock_thread()). Returns same way.
216 */
217void
218thread_hold(
219 register thread_act_t thr_act)
220{
221 if (thr_act->suspend_count++ == 0) {
222 install_special_handler(thr_act);
223 nudge(thr_act);
224 }
225}
226
227/*
228 * Decrement internal suspension count for thr_act, setting thread
229 * runnable when count falls to zero.
230 *
231 * Called with thr_act locked "appropriately" for synchrony
232 * with RPC (see act_lock_thread()).
233 */
234void
235thread_release(
236 register thread_act_t thr_act)
237{
238 if( thr_act->suspend_count &&
239 (--thr_act->suspend_count == 0) )
240 nudge( thr_act );
241}
242
243kern_return_t
244thread_suspend(
245 register thread_act_t thr_act)
246{
247 thread_t thread;
248
249 if (thr_act == THR_ACT_NULL) {
250 return(KERN_INVALID_ARGUMENT);
251 }
252 thread = act_lock_thread(thr_act);
253 if (!thr_act->active) {
254 act_unlock_thread(thr_act);
255 return(KERN_TERMINATED);
256 }
257 if (thr_act->user_stop_count++ == 0 &&
258 thr_act->suspend_count++ == 0 ) {
259 install_special_handler(thr_act);
260 if (thread &&
261 thr_act == thread->top_act && thread != current_thread()) {
262 nudge(thr_act);
263 act_unlock_thread(thr_act);
264 (void)thread_wait(thread);
265 }
266 else {
267 /*
268 * No need to wait for target thread
269 */
270 act_unlock_thread(thr_act);
271 }
272 }
273 else {
274 /*
275 * Thread is already suspended
276 */
277 act_unlock_thread(thr_act);
278 }
279 return(KERN_SUCCESS);
280}
281
282kern_return_t
283thread_resume(
284 register thread_act_t thr_act)
285{
286 register kern_return_t ret;
287 spl_t s;
288 thread_t thread;
289
290 if (thr_act == THR_ACT_NULL)
291 return(KERN_INVALID_ARGUMENT);
292 thread = act_lock_thread(thr_act);
293 ret = KERN_SUCCESS;
294
295 if (thr_act->active) {
296 if (thr_act->user_stop_count > 0) {
297 if( --thr_act->user_stop_count == 0 ) {
298 --thr_act->suspend_count;
299 nudge( thr_act );
300 }
301 }
302 else
303 ret = KERN_FAILURE;
304 }
305 else
306 ret = KERN_TERMINATED;
307 act_unlock_thread( thr_act );
308 return ret;
309}
310
311/*
312 * This routine walks toward the head of an RPC chain starting at
313 * a specified thread activation. An alert bit is set and a special
314 * handler is installed for each thread it encounters.
315 *
316 * The target thread act and thread shuttle are already locked.
317 */
318kern_return_t
319post_alert(
320 register thread_act_t thr_act,
321 unsigned alert_bits )
322{
323 thread_act_t next;
324 thread_t thread;
325
326 /*
327 * Chase the chain, setting alert bits and installing
328 * special handlers for each thread act.
329 */
330 /*** Not yet SMP safe ***/
331 /*** Worse, where's the activation locking as the chain is walked? ***/
332 for (next = thr_act; next != THR_ACT_NULL; next = next->higher) {
333 next->alerts |= alert_bits;
334 install_special_handler_locked(next);
335 }
336
337 return(KERN_SUCCESS);
338}
339
340/*
341 * thread_depress_abort:
342 *
343 * Prematurely abort priority depression if there is one.
344 */
345kern_return_t
346thread_depress_abort(
347 register thread_act_t thr_act)
348{
349 register thread_t thread;
350 kern_return_t result;
1c79356b
A
351
352 if (thr_act == THR_ACT_NULL)
353 return (KERN_INVALID_ARGUMENT);
354
355 thread = act_lock_thread(thr_act);
356 /* if activation is terminating, this operation is not meaningful */
357 if (!thr_act->active) {
358 act_unlock_thread(thr_act);
359
360 return (KERN_TERMINATED);
361 }
362
0b4e3aa0 363 result = _mk_sp_thread_depress_abort(thread, FALSE);
1c79356b
A
364
365 act_unlock_thread(thr_act);
366
367 return (result);
368}
369
370
371/*
372 * Already locked: all RPC-related locks for thr_act (see
373 * act_lock_thread()).
374 */
375kern_return_t
376act_abort( thread_act_t thr_act, int chain_break )
377{
378 spl_t spl;
379 thread_t thread;
380 struct ipc_port *iplock = thr_act->pool_port;
381 thread_act_t orphan;
382 kern_return_t kr;
383 etap_data_t probe_data;
384
385 ETAP_DATA_LOAD(probe_data[0], thr_act);
386 ETAP_DATA_LOAD(probe_data[1], thr_act->thread);
387 ETAP_PROBE_DATA(ETAP_P_ACT_ABORT,
388 0,
389 current_thread(),
390 &probe_data,
391 ETAP_DATA_ENTRY*2);
392
393 /*
394 * If the target thread activation is not the head...
395 */
396 if ( thr_act->thread->top_act != thr_act ) {
397 /*
398 * mark the activation for abort,
399 * update the suspend count,
400 * always install the special handler
401 */
402 install_special_handler(thr_act);
403
404#ifdef AGRESSIVE_ABORT
405 /* release state buffer for target's outstanding invocation */
406 if (unwind_invoke_state(thr_act) != KERN_SUCCESS) {
407 panic("unwind_invoke_state failure");
408 }
409
410 /* release state buffer for target's incoming invocation */
411 if (thr_act->lower != THR_ACT_NULL) {
412 if (unwind_invoke_state(thr_act->lower)
413 != KERN_SUCCESS) {
414 panic("unwind_invoke_state failure");
415 }
416 }
417
418 /* unlink target thread activation from shuttle chain */
419 if ( thr_act->lower == THR_ACT_NULL ) {
420 /*
421 * This is the root thread activation of the chain.
422 * Unlink the root thread act from the bottom of
423 * the chain.
424 */
425 thr_act->higher->lower = THR_ACT_NULL;
426 } else {
427 /*
428 * This thread act is in the middle of the chain.
429 * Unlink the thread act from the middle of the chain.
430 */
431 thr_act->higher->lower = thr_act->lower;
432 thr_act->lower->higher = thr_act->higher;
433
434 /* set the terminated bit for RPC return processing */
435 thr_act->lower->alerts |= SERVER_TERMINATED;
436 }
437
438 orphan = thr_act->higher;
439
440 /* remove the activation from its thread pool */
441 /* (note: this is okay for "rooted threads," too) */
442 act_locked_act_set_thread_pool(thr_act, IP_NULL);
443
444 /* (just to be thorough) release the IP lock */
445 if (iplock != IP_NULL) ip_unlock(iplock);
446
447 /* release one more reference for a rooted thread */
448 if (iplock == IP_NULL) act_locked_act_deallocate(thr_act);
449
450 /* Presumably, the only reference to this activation is
451 * now held by the caller of this routine. */
452 assert(thr_act->ref_count == 1);
453#else /*AGRESSIVE_ABORT*/
454 /* If there is a lower activation in the RPC chain... */
455 if (thr_act->lower != THR_ACT_NULL) {
456 /* ...indicate the server activation was terminated */
457 thr_act->lower->alerts |= SERVER_TERMINATED;
458 }
459 /* Mark (and process) any orphaned activations */
460 orphan = thr_act->higher;
461#endif /*AGRESSIVE_ABORT*/
462
463 /* indicate client of orphaned chain has been terminated */
464 orphan->alerts |= CLIENT_TERMINATED;
465
466 /*
467 * Set up posting of alert to headward portion of
468 * the RPC chain.
469 */
470 /*** fix me -- orphan act is not locked ***/
471 post_alert(orphan, ORPHANED);
472
473 /*
474 * Get attention of head of RPC chain.
475 */
476 nudge(thr_act->thread->top_act);
477 return (KERN_SUCCESS);
478 }
479
480 /*
481 * If the target thread is the end of the chain, the thread
482 * has to be marked for abort and rip it out of any wait.
483 */
484 spl = splsched();
485 thread_lock(thr_act->thread);
486 if (thr_act->thread->top_act == thr_act) {
487 thr_act->thread->state |= TH_ABORT;
e7c99d92 488 clear_wait_internal(thr_act->thread, THREAD_INTERRUPTED);
1c79356b
A
489 thread_unlock(thr_act->thread);
490 splx(spl);
491 install_special_handler(thr_act);
492 nudge( thr_act );
493 }
494 return KERN_SUCCESS;
495}
496
497kern_return_t
498thread_abort(
499 register thread_act_t thr_act)
500{
501 int ret;
502 thread_t thread;
503
504 if (thr_act == THR_ACT_NULL || thr_act == current_act())
505 return (KERN_INVALID_ARGUMENT);
506 /*
507 * Lock the target thread and the current thread now,
508 * in case thread_halt() ends up being called below.
509 */
510 thread = act_lock_thread(thr_act);
511 if (!thr_act->active) {
512 act_unlock_thread(thr_act);
513 return(KERN_TERMINATED);
514 }
515
516 ret = act_abort( thr_act, FALSE );
517 act_unlock_thread( thr_act );
518 return ret;
519}
520
521kern_return_t
522thread_abort_safely(
523 register thread_act_t thr_act)
524{
525 thread_t thread;
526 spl_t s;
527
528 if (thr_act == THR_ACT_NULL || thr_act == current_act())
529 return(KERN_INVALID_ARGUMENT);
530
531 thread = act_lock_thread(thr_act);
532 if (!thr_act->active) {
533 act_unlock_thread(thr_act);
534 return(KERN_TERMINATED);
535 }
536 if (thread->top_act != thr_act) {
537 act_unlock_thread(thr_act);
538 return(KERN_FAILURE);
539 }
540 s = splsched();
541 thread_lock(thread);
542
543 if ( thread->at_safe_point ) {
544 /*
545 * It's an abortable wait, clear it, then
546 * let the thread go and return successfully.
547 */
548 clear_wait_internal(thread, THREAD_INTERRUPTED);
549 thread_unlock(thread);
550 act_unlock_thread(thr_act);
551 splx(s);
552 return KERN_SUCCESS;
553 }
554
555 /*
556 * if not stopped at a safepoint, just let it go and return failure.
557 */
558 thread_unlock(thread);
559 act_unlock_thread(thr_act);
560 splx(s);
561 return KERN_FAILURE;
562}
563
564/*** backward compatibility hacks ***/
565#include <mach/thread_info.h>
566#include <mach/thread_special_ports.h>
567#include <ipc/ipc_port.h>
568#include <mach/thread_act_server.h>
569
570kern_return_t
571thread_info(
572 thread_act_t thr_act,
573 thread_flavor_t flavor,
574 thread_info_t thread_info_out,
575 mach_msg_type_number_t *thread_info_count)
576{
577 register thread_t thread;
578 kern_return_t result;
579
580 if (thr_act == THR_ACT_NULL)
581 return (KERN_INVALID_ARGUMENT);
582
583 thread = act_lock_thread(thr_act);
584 if (!thr_act->active) {
585 act_unlock_thread(thr_act);
586
587 return (KERN_TERMINATED);
588 }
589
590 result = thread_info_shuttle(thr_act, flavor,
591 thread_info_out, thread_info_count);
592
593 act_unlock_thread(thr_act);
594
595 return (result);
596}
597
598/*
599 * Routine: thread_get_special_port [kernel call]
600 * Purpose:
601 * Clones a send right for one of the thread's
602 * special ports.
603 * Conditions:
604 * Nothing locked.
605 * Returns:
606 * KERN_SUCCESS Extracted a send right.
607 * KERN_INVALID_ARGUMENT The thread is null.
608 * KERN_FAILURE The thread is dead.
609 * KERN_INVALID_ARGUMENT Invalid special port.
610 */
611
612kern_return_t
613thread_get_special_port(
614 thread_act_t thr_act,
615 int which,
616 ipc_port_t *portp)
617{
618 ipc_port_t *whichp;
619 ipc_port_t port;
620 thread_t thread;
621
622#if MACH_ASSERT
623 if (watchacts & WA_PORT)
624 printf("thread_get_special_port(thr_act=%x, which=%x port@%x=%x\n",
625 thr_act, which, portp, (portp ? *portp : 0));
626#endif /* MACH_ASSERT */
627
628 if (!thr_act)
629 return KERN_INVALID_ARGUMENT;
630 thread = act_lock_thread(thr_act);
631 switch (which) {
632 case THREAD_KERNEL_PORT:
633 whichp = &thr_act->ith_sself;
634 break;
635
636 default:
637 act_unlock_thread(thr_act);
638 return KERN_INVALID_ARGUMENT;
639 }
640
641 if (!thr_act->active) {
642 act_unlock_thread(thr_act);
643 return KERN_FAILURE;
644 }
645
646 port = ipc_port_copy_send(*whichp);
647 act_unlock_thread(thr_act);
648
649 *portp = port;
650 return KERN_SUCCESS;
651}
652
653/*
654 * Routine: thread_set_special_port [kernel call]
655 * Purpose:
656 * Changes one of the thread's special ports,
657 * setting it to the supplied send right.
658 * Conditions:
659 * Nothing locked. If successful, consumes
660 * the supplied send right.
661 * Returns:
662 * KERN_SUCCESS Changed the special port.
663 * KERN_INVALID_ARGUMENT The thread is null.
664 * KERN_FAILURE The thread is dead.
665 * KERN_INVALID_ARGUMENT Invalid special port.
666 */
667
668kern_return_t
669thread_set_special_port(
670 thread_act_t thr_act,
671 int which,
672 ipc_port_t port)
673{
674 ipc_port_t *whichp;
675 ipc_port_t old;
676 thread_t thread;
677
678#if MACH_ASSERT
679 if (watchacts & WA_PORT)
680 printf("thread_set_special_port(thr_act=%x,which=%x,port=%x\n",
681 thr_act, which, port);
682#endif /* MACH_ASSERT */
683
684 if (thr_act == 0)
685 return KERN_INVALID_ARGUMENT;
686
687 thread = act_lock_thread(thr_act);
688 switch (which) {
689 case THREAD_KERNEL_PORT:
690 whichp = &thr_act->ith_self;
691 break;
692
693 default:
694 act_unlock_thread(thr_act);
695 return KERN_INVALID_ARGUMENT;
696 }
697
698 if (!thr_act->active) {
699 act_unlock_thread(thr_act);
700 return KERN_FAILURE;
701 }
702
703 old = *whichp;
704 *whichp = port;
705 act_unlock_thread(thr_act);
706
707 if (IP_VALID(old))
708 ipc_port_release_send(old);
709 return KERN_SUCCESS;
710}
711
712/*
713 * thread state should always be accessible by locking the thread
714 * and copying it. The activation messes things up so for right
715 * now if it's not the top of the chain, use a special handler to
716 * get the information when the shuttle returns to the activation.
717 */
718kern_return_t
719thread_get_state(
720 register thread_act_t thr_act,
721 int flavor,
722 thread_state_t state, /* pointer to OUT array */
723 mach_msg_type_number_t *state_count) /*IN/OUT*/
724{
725 kern_return_t ret;
726 thread_t thread, nthread;
727
1c79356b 728 if (thr_act == THR_ACT_NULL || thr_act == current_act())
1c79356b
A
729 return (KERN_INVALID_ARGUMENT);
730
731 thread = act_lock_thread(thr_act);
732 if (!thr_act->active) {
733 act_unlock_thread(thr_act);
734 return(KERN_TERMINATED);
735 }
736
737 thread_hold(thr_act);
738 while (1) {
0b4e3aa0 739 if (!thread || thr_act != thread->top_act)
1c79356b
A
740 break;
741 act_unlock_thread(thr_act);
742 (void)thread_stop_wait(thread);
743 nthread = act_lock_thread(thr_act);
744 if (nthread == thread)
745 break;
746 thread_unstop(thread);
747 thread = nthread;
748 }
749 ret = act_machine_get_state(thr_act, flavor,
750 state, state_count);
751 if (thread && thr_act == thread->top_act)
752 thread_unstop(thread);
753 thread_release(thr_act);
754 act_unlock_thread(thr_act);
755
756 return(ret);
757}
758
759/*
760 * Change thread's machine-dependent state. Called with nothing
761 * locked. Returns same way.
762 */
763kern_return_t
764thread_set_state(
765 register thread_act_t thr_act,
766 int flavor,
767 thread_state_t state,
768 mach_msg_type_number_t state_count)
769{
770 kern_return_t ret;
771 thread_t thread, nthread;
772
1c79356b 773 if (thr_act == THR_ACT_NULL || thr_act == current_act())
1c79356b
A
774 return (KERN_INVALID_ARGUMENT);
775 /*
776 * We have no kernel activations, so Utah's MO fails for signals etc.
777 *
778 * If we're blocked in the kernel, use non-blocking method, else
779 * pass locked thr_act+thread in to "normal" act_[gs]et_state().
780 */
781
782 thread = act_lock_thread(thr_act);
783 if (!thr_act->active) {
784 act_unlock_thread(thr_act);
785 return(KERN_TERMINATED);
786 }
787
788 thread_hold(thr_act);
789 while (1) {
0b4e3aa0 790 if (!thread || thr_act != thread->top_act)
1c79356b
A
791 break;
792 act_unlock_thread(thr_act);
793 (void)thread_stop_wait(thread);
794 nthread = act_lock_thread(thr_act);
795 if (nthread == thread)
796 break;
797 thread_unstop(thread);
798 thread = nthread;
799 }
800 ret = act_machine_set_state(thr_act, flavor,
801 state, state_count);
802 if (thread && thr_act == thread->top_act)
803 thread_unstop(thread);
804 thread_release(thr_act);
805 act_unlock_thread(thr_act);
806
807 return(ret);
808}
809
810/*
811 * Kernel-internal "thread" interfaces used outside this file:
812 */
813
814kern_return_t
815thread_dup(
816 thread_act_t source_thr_act,
817 thread_act_t target_thr_act)
818{
819 kern_return_t ret;
820 thread_t thread, nthread;
821
822 if (target_thr_act == THR_ACT_NULL || target_thr_act == current_act())
823 return (KERN_INVALID_ARGUMENT);
824
825 thread = act_lock_thread(target_thr_act);
826 if (!target_thr_act->active) {
827 act_unlock_thread(target_thr_act);
828 return(KERN_TERMINATED);
829 }
830
831 thread_hold(target_thr_act);
832 while (1) {
833 if (!thread || target_thr_act != thread->top_act)
834 break;
835 act_unlock_thread(target_thr_act);
836 (void)thread_stop_wait(thread);
837 nthread = act_lock_thread(target_thr_act);
838 if (nthread == thread)
839 break;
840 thread_unstop(thread);
841 thread = nthread;
842 }
843 ret = act_thread_dup(source_thr_act, target_thr_act);
844 if (thread && target_thr_act == thread->top_act)
845 thread_unstop(thread);
846 thread_release(target_thr_act);
847 act_unlock_thread(target_thr_act);
848
849 return(ret);
850}
851
852
853/*
854 * thread_setstatus:
855 *
856 * Set the status of the specified thread.
857 * Called with (and returns with) no locks held.
858 */
859kern_return_t
860thread_setstatus(
861 thread_act_t thr_act,
862 int flavor,
863 thread_state_t tstate,
864 mach_msg_type_number_t count)
865{
866 kern_return_t kr;
867 thread_t thread;
868
869 thread = act_lock_thread(thr_act);
870 assert(thread);
871 assert(thread->top_act == thr_act);
872 kr = act_machine_set_state(thr_act, flavor, tstate, count);
873 act_unlock_thread(thr_act);
874 return(kr);
875}
876
877/*
878 * thread_getstatus:
879 *
880 * Get the status of the specified thread.
881 */
882kern_return_t
883thread_getstatus(
884 thread_act_t thr_act,
885 int flavor,
886 thread_state_t tstate,
887 mach_msg_type_number_t *count)
888{
889 kern_return_t kr;
890 thread_t thread;
891
892 thread = act_lock_thread(thr_act);
893 assert(thread);
894 assert(thread->top_act == thr_act);
895 kr = act_machine_get_state(thr_act, flavor, tstate, count);
896 act_unlock_thread(thr_act);
897 return(kr);
898}
899
900/*
901 * Kernel-internal thread_activation interfaces used outside this file:
902 */
903
904/*
905 * act_init() - Initialize activation handling code
906 */
907void
908act_init()
909{
910 thr_act_zone = zinit(
911 sizeof(struct thread_activation),
912 ACT_MAX * sizeof(struct thread_activation), /* XXX */
913 ACT_CHUNK * sizeof(struct thread_activation),
914 "activations");
915 act_machine_init();
916}
917
918
919/*
920 * act_create - Create a new activation in a specific task.
921 */
922kern_return_t
923act_create(task_t task,
924 thread_act_t *new_act)
925{
926 thread_act_t thr_act;
927 int rc;
928 vm_map_t map;
929
930 thr_act = (thread_act_t)zalloc(thr_act_zone);
931 if (thr_act == 0)
932 return(KERN_RESOURCE_SHORTAGE);
933
934#if MACH_ASSERT
935 if (watchacts & WA_ACT_LNK)
936 printf("act_create(task=%x,thr_act@%x=%x)\n",
937 task, new_act, thr_act);
938#endif /* MACH_ASSERT */
939
940 /* Start by zeroing everything; then init non-zero items only */
941 bzero((char *)thr_act, sizeof(*thr_act));
942
943#ifdef MACH_BSD
944 {
945 /*
946 * Take care of the uthread allocation
947 * do it early in order to make KERN_RESOURCE_SHORTAGE
948 * handling trivial
949 * uthread_alloc() will bzero the storage allocated.
950 */
951 extern void *uthread_alloc(void);
952 thr_act->uthread = uthread_alloc();
953 if(thr_act->uthread == 0) {
954 /* Put the thr_act back on the thr_act zone */
955 zfree(thr_act_zone, (vm_offset_t)thr_act);
956 return(KERN_RESOURCE_SHORTAGE);
957 }
958 }
959#endif /* MACH_BSD */
960
961 /*
962 * Start with one reference for the caller and one for the
963 * act being alive.
964 */
965 act_lock_init(thr_act);
966 thr_act->ref_count = 2;
967
968 /* Latch onto the task. */
969 thr_act->task = task;
970 task_reference(task);
971
972 /* Initialize sigbufp for High-Watermark buffer allocation */
973 thr_act->r_sigbufp = (routine_descriptor_t) &thr_act->r_sigbuf;
974 thr_act->r_sigbuf_size = sizeof(thr_act->r_sigbuf);
975
976#if THREAD_SWAPPER
977 thr_act->swap_state = TH_SW_IN;
978#if MACH_ASSERT
979 thr_act->kernel_stack_swapped_in = TRUE;
980#endif /* MACH_ASSERT */
981#endif /* THREAD_SWAPPER */
982
983 /* special_handler will always be last on the returnhandlers list. */
984 thr_act->special_handler.next = 0;
985 thr_act->special_handler.handler = special_handler;
986
987#if MACH_PROF
988 thr_act->act_profiled = FALSE;
989 thr_act->act_profiled_own = FALSE;
990 thr_act->profil_buffer = NULLPROFDATA;
991#endif
992
993 /* Initialize the held_ulocks queue as empty */
994 queue_init(&thr_act->held_ulocks);
995
996 /* Inherit the profiling status of the parent task */
997 act_prof_init(thr_act, task);
998
999 ipc_thr_act_init(task, thr_act);
1000 act_machine_create(task, thr_act);
1001
1002 /*
1003 * If thr_act created in kernel-loaded task, alter its saved
1004 * state to so indicate
1005 */
1006 if (task->kernel_loaded) {
1007 act_user_to_kernel(thr_act);
1008 }
1009
1010 /* Cache the task's map and take a reference to it */
1011 map = task->map;
1012 thr_act->map = map;
1013
1014 /* Inline vm_map_reference cause we don't want to increment res_count */
1015 mutex_lock(&map->s_lock);
1016#if TASK_SWAPPER
1017 assert(map->res_count > 0);
1018 assert(map->ref_count >= map->res_count);
1019#endif /* TASK_SWAPPER */
1020 map->ref_count++;
1021 mutex_unlock(&map->s_lock);
1022
1023 *new_act = thr_act;
1024 return KERN_SUCCESS;
1025}
1026
1027/*
1028 * act_free - called when an thr_act's ref_count drops to zero.
1029 *
1030 * This can only happen after the activation has been reaped, and
1031 * all other references to it have gone away. We can now release
1032 * the last critical resources, unlink the activation from the
1033 * task, and release the reference on the thread shuttle itself.
1034 *
1035 * Called with activation locked.
1036 */
1037#if MACH_ASSERT
1038int dangerous_bzero = 1; /* paranoia & safety */
1039#endif
1040
1041void
1042act_free(thread_act_t thr_act)
1043{
1044 task_t task;
1045 thread_t thr;
1046 vm_map_t map;
1047 unsigned int ref;
1048
1049#if MACH_ASSERT
1050 if (watchacts & WA_EXIT)
1051 printf("act_free(%x(%d)) thr=%x tsk=%x(%d) pport=%x%sactive\n",
1052 thr_act, thr_act->ref_count, thr_act->thread,
1053 thr_act->task,
1054 thr_act->task ? thr_act->task->ref_count : 0,
1055 thr_act->pool_port,
1056 thr_act->active ? " " : " !");
1057#endif /* MACH_ASSERT */
1058
1059
1060#if THREAD_SWAPPER
1061 assert(thr_act->kernel_stack_swapped_in);
1062#endif /* THREAD_SWAPPER */
1063
1064 assert(!thr_act->active);
1065 assert(!thr_act->pool_port);
1066
1067 task = thr_act->task;
1068 task_lock(task);
1069
1070 if (thr = thr_act->thread) {
1071 time_value_t user_time, system_time;
1072
1073 thread_read_times(thr, &user_time, &system_time);
1074 time_value_add(&task->total_user_time, &user_time);
1075 time_value_add(&task->total_system_time, &system_time);
1076
1077 /* Unlink the thr_act from the task's thr_act list,
1078 * so it doesn't appear in calls to task_threads and such.
1079 * The thr_act still keeps its ref on the task, however.
1080 */
1081 queue_remove(&task->thr_acts, thr_act, thread_act_t, thr_acts);
1082 thr_act->thr_acts.next = NULL;
1083 task->thr_act_count--;
1084
1085#if THREAD_SWAPPER
1086 /*
1087 * Thread is supposed to be unswappable by now...
1088 */
1089 assert(thr_act->swap_state == TH_SW_UNSWAPPABLE ||
1090 !thread_swap_unwire_stack);
1091#endif /* THREAD_SWAPPER */
1092
1093 task->res_act_count--;
1094 task_unlock(task);
1095 task_deallocate(task);
1096 thread_deallocate(thr);
1097 act_machine_destroy(thr_act);
1098 } else {
1099 /*
1100 * Must have never really gotten started
1101 * no unlinking from the task and no need
1102 * to free the shuttle.
1103 */
1104 task_unlock(task);
1105 task_deallocate(task);
1106 }
1107
1108 sigbuf_dealloc(thr_act);
1109 act_prof_deallocate(thr_act);
1110 ipc_thr_act_terminate(thr_act);
1111
1112 /*
1113 * Drop the cached map reference.
1114 * Inline version of vm_map_deallocate() because we
1115 * don't want to decrement the map's residence count here.
1116 */
1117 map = thr_act->map;
1118 mutex_lock(&map->s_lock);
1119#if TASK_SWAPPER
1120 assert(map->res_count >= 0);
1121 assert(map->ref_count > map->res_count);
1122#endif /* TASK_SWAPPER */
1123 ref = --map->ref_count;
1124 mutex_unlock(&map->s_lock);
1125 if (ref == 0)
1126 vm_map_destroy(map);
1127
1128#ifdef MACH_BSD
1129 {
1130 /*
1131 * Free uthread BEFORE the bzero.
1132 * Not doing so will result in a leak.
1133 */
1134 extern void uthread_free(void *);
1135 void *ut = thr_act->uthread;
1136 thr_act->uthread = 0;
1137 uthread_free(ut);
1138 }
1139#endif /* MACH_BSD */
1140
1141#if MACH_ASSERT
1142 if (dangerous_bzero) /* dangerous if we're still using it! */
1143 bzero((char *)thr_act, sizeof(*thr_act));
1144#endif /* MACH_ASSERT */
1145 /* Put the thr_act back on the thr_act zone */
1146 zfree(thr_act_zone, (vm_offset_t)thr_act);
1147}
1148
1149
1150/*
1151 * act_attach - Attach an thr_act to the top of a thread ("push the stack").
1152 *
1153 * The thread_shuttle must be either the current one or a brand-new one.
1154 * Assumes the thr_act is active but not in use, also, that if it is
1155 * attached to an thread_pool (i.e. the thread_pool pointer is nonzero),
1156 * the thr_act has already been taken off the thread_pool's list.
1157 *
1158 * Already locked: thr_act plus "appropriate" thread-related locks
1159 * (see act_lock_thread()).
1160 */
1161void
1162act_attach(
1163 thread_act_t thr_act,
1164 thread_t thread,
1165 unsigned init_alert_mask)
1166{
1167 thread_act_t lower;
1168
1169#if MACH_ASSERT
1170 assert(thread == current_thread() || thread->top_act == THR_ACT_NULL);
1171 if (watchacts & WA_ACT_LNK)
1172 printf("act_attach(thr_act %x(%d) thread %x(%d) mask %d)\n",
1173 thr_act, thr_act->ref_count, thread, thread->ref_count,
1174 init_alert_mask);
1175#endif /* MACH_ASSERT */
1176
1177 /*
1178 * Chain the thr_act onto the thread's thr_act stack.
1179 * Set mask and auto-propagate alerts from below.
1180 */
1181 thr_act->ref_count++;
1182 thr_act->thread = thread;
1183 thr_act->higher = THR_ACT_NULL; /*safety*/
1184 thr_act->alerts = 0;
1185 thr_act->alert_mask = init_alert_mask;
1186 lower = thr_act->lower = thread->top_act;
1187
1188 if (lower != THR_ACT_NULL) {
1189 lower->higher = thr_act;
1190 thr_act->alerts = (lower->alerts & init_alert_mask);
1191 }
1192
1193 thread->top_act = thr_act;
1194}
1195
1196/*
1197 * act_detach
1198 *
1199 * Remove the current thr_act from the top of the current thread, i.e.
1200 * "pop the stack". Assumes already locked: thr_act plus "appropriate"
1201 * thread-related locks (see act_lock_thread).
1202 */
1203void
1204act_detach(
1205 thread_act_t cur_act)
1206{
1207 thread_t cur_thread = cur_act->thread;
1208
1209#if MACH_ASSERT
1210 if (watchacts & (WA_EXIT|WA_ACT_LNK))
1211 printf("act_detach: thr_act %x(%d), thrd %x(%d) task=%x(%d)\n",
1212 cur_act, cur_act->ref_count,
1213 cur_thread, cur_thread->ref_count,
1214 cur_act->task,
1215 cur_act->task ? cur_act->task->ref_count : 0);
1216#endif /* MACH_ASSERT */
1217
1218 /* Unlink the thr_act from the thread's thr_act stack */
1219 cur_thread->top_act = cur_act->lower;
1220 cur_act->thread = 0;
1221 cur_act->ref_count--;
1222 assert(cur_act->ref_count > 0);
1223
1224 thread_pool_put_act(cur_act);
1225
1226#if MACH_ASSERT
1227 cur_act->lower = cur_act->higher = THR_ACT_NULL;
1228 if (cur_thread->top_act)
1229 cur_thread->top_act->higher = THR_ACT_NULL;
1230#endif /* MACH_ASSERT */
1231
1232 return;
1233}
1234
1235
1236/*
1237 * Synchronize a thread operation with RPC. Called with nothing
1238 * locked. Returns with thr_act locked, plus one of four
1239 * combinations of other locks held:
1240 * none - for new activation not yet associated with thread_pool
1241 * or shuttle
1242 * rpc_lock(thr_act->thread) only - for base activation (one
1243 * without pool_port)
1244 * ip_lock(thr_act->pool_port) only - for empty activation (one
1245 * with no associated shuttle)
1246 * both locks - for "active" activation (has shuttle, lives
1247 * on thread_pool)
1248 * If thr_act has an associated shuttle, this function returns
1249 * its address. Otherwise it returns zero.
1250 */
1251thread_t
1252act_lock_thread(
1253 thread_act_t thr_act)
1254{
1255 ipc_port_t pport;
1256
1257 /*
1258 * Allow the shuttle cloning code (q.v., when it
1259 * exists :-}) to obtain ip_lock()'s while holding
1260 * an rpc_lock().
1261 */
1262 while (1) {
1263 act_lock(thr_act);
1264 pport = thr_act->pool_port;
1265 if (!pport || ip_lock_try(pport)) {
1266 if (!thr_act->thread)
1267 break;
1268 if (rpc_lock_try(thr_act->thread))
1269 break;
1270 if (pport)
1271 ip_unlock(pport);
1272 }
1273 act_unlock(thr_act);
1274 mutex_pause();
1275 }
1276 return (thr_act->thread);
1277}
1278
1279/*
1280 * Unsynchronize with RPC (i.e., undo an act_lock_thread() call).
1281 * Called with thr_act locked, plus thread locks held that are
1282 * "correct" for thr_act's state. Returns with nothing locked.
1283 */
1284void
1285act_unlock_thread(thread_act_t thr_act)
1286{
1287 if (thr_act->thread)
1288 rpc_unlock(thr_act->thread);
1289 if (thr_act->pool_port)
1290 ip_unlock(thr_act->pool_port);
1291 act_unlock(thr_act);
1292}
1293
1294/*
1295 * Synchronize with RPC given a pointer to a shuttle (instead of an
1296 * activation). Called with nothing locked; returns with all
1297 * "appropriate" thread-related locks held (see act_lock_thread()).
1298 */
1299thread_act_t
1300thread_lock_act(
1301 thread_t thread)
1302{
1303 thread_act_t thr_act;
1304
1305 while (1) {
1306 rpc_lock(thread);
1307 thr_act = thread->top_act;
1308 if (!thr_act)
1309 break;
1310 if (!act_lock_try(thr_act)) {
1311 rpc_unlock(thread);
1312 mutex_pause();
1313 continue;
1314 }
1315 if (thr_act->pool_port &&
1316 !ip_lock_try(thr_act->pool_port)) {
1317 rpc_unlock(thread);
1318 act_unlock(thr_act);
1319 mutex_pause();
1320 continue;
1321 }
1322 break;
1323 }
1324 return (thr_act);
1325}
1326
1327/*
1328 * Unsynchronize with RPC starting from a pointer to a shuttle.
1329 * Called with RPC-related locks held that are appropriate to
1330 * shuttle's state; any activation is also locked.
1331 */
1332void
1333thread_unlock_act(
1334 thread_t thread)
1335{
1336 thread_act_t thr_act;
1337
1338 if (thr_act = thread->top_act) {
1339 if (thr_act->pool_port)
1340 ip_unlock(thr_act->pool_port);
1341 act_unlock(thr_act);
1342 }
1343 rpc_unlock(thread);
1344}
1345
1346/*
1347 * switch_act
1348 *
1349 * If a new activation is given, switch to it. If not,
1350 * switch to the lower activation (pop). Returns the old
1351 * activation. This is for RPC support.
1352 */
1353thread_act_t
1354switch_act(
1355 thread_act_t act)
1356{
1357 thread_t thread;
1358 thread_act_t old, new;
1359 unsigned cpu;
1360 spl_t spl;
1361
1362
1363 disable_preemption();
1364
1365 cpu = cpu_number();
1366 thread = current_thread();
1367
1368 /*
1369 * Find the old and new activation for switch.
1370 */
1371 old = thread->top_act;
1372
1373 if (act) {
1374 new = act;
1375 new->thread = thread;
1376 }
1377 else {
1378 new = old->lower;
1379 }
1380
1381 assert(new != THR_ACT_NULL);
1382#if THREAD_SWAPPER
1383 assert(new->swap_state != TH_SW_OUT &&
1384 new->swap_state != TH_SW_COMING_IN);
1385#endif /* THREAD_SWAPPER */
1386
1387 assert(cpu_data[cpu].active_thread == thread);
1388 active_kloaded[cpu] = (new->kernel_loaded) ? new : 0;
1389
1390 /* This is where all the work happens */
1391 machine_switch_act(thread, old, new, cpu);
1392
1393 /*
1394 * Push or pop an activation on the chain.
1395 */
1396 if (act) {
1397 act_attach(new, thread, 0);
1398 }
1399 else {
1400 act_detach(old);
1401 }
1402
1403 enable_preemption();
1404
1405 return(old);
1406}
1407
1408/*
1409 * install_special_handler
1410 * Install the special returnhandler that handles suspension and
1411 * termination, if it hasn't been installed already.
1412 *
1413 * Already locked: RPC-related locks for thr_act, but not
1414 * scheduling lock (thread_lock()) of the associated thread.
1415 */
1416void
1417install_special_handler(
1418 thread_act_t thr_act)
1419{
1420 spl_t spl;
1421 thread_t thread = thr_act->thread;
1422
1423#if MACH_ASSERT
1424 if (watchacts & WA_ACT_HDLR)
1425 printf("act_%x: install_special_hdlr(%x)\n",current_act(),thr_act);
1426#endif /* MACH_ASSERT */
1427
1428 spl = splsched();
e7c99d92 1429 thread_lock(thread);
1c79356b 1430 install_special_handler_locked(thr_act);
e7c99d92 1431 thread_unlock(thread);
1c79356b
A
1432 splx(spl);
1433}
1434
1435/*
1436 * install_special_handler_locked
1437 * Do the work of installing the special_handler.
1438 *
1439 * Already locked: RPC-related locks for thr_act, plus the
1440 * scheduling lock (thread_lock()) of the associated thread.
1441 */
1442void
1443install_special_handler_locked(
1444 thread_act_t thr_act)
1445{
1446 ReturnHandler **rh;
1447 thread_t thread = thr_act->thread;
1448
1449 /* The work handler must always be the last ReturnHandler on the list,
1450 because it can do tricky things like detach the thr_act. */
1451 for (rh = &thr_act->handlers; *rh; rh = &(*rh)->next)
1452 /* */ ;
1453 if (rh != &thr_act->special_handler.next) {
1454 *rh = &thr_act->special_handler;
1455 }
1456 if (thread && thr_act == thread->top_act) {
1457 /*
1458 * Temporarily undepress, so target has
1459 * a chance to do locking required to
1460 * block itself in special_handler().
1461 */
1462 if (thread->depress_priority >= 0) {
1463 thread->priority = thread->depress_priority;
1464
1465 /*
1466 * Use special value -2 to indicate need
1467 * to redepress priority in special_handler
1468 * as thread blocks
1469 */
1470 thread->depress_priority = -2;
1471 compute_priority(thread, FALSE);
1472 }
1473 }
1474 act_set_apc(thr_act);
1475}
1476
1477/*
1478 * JMM -
1479 * These two routines will be enhanced over time to call the general handler registration
1480 * mechanism used by special handlers and alerts. They are hack in for now to avoid
1481 * having to export the gory details of ASTs to the BSD code right now.
1482 */
1483extern thread_apc_handler_t bsd_ast;
1484
1485kern_return_t
1486thread_apc_set(
1487 thread_act_t thr_act,
1488 thread_apc_handler_t apc)
1489{
1490 assert(apc == bsd_ast);
1491 thread_ast_set(thr_act, AST_BSD);
1492 if (thr_act == current_act())
1493 ast_propagate(thr_act->ast);
1494 return KERN_SUCCESS;
1495}
1496
1497kern_return_t
1498thread_apc_clear(
1499 thread_act_t thr_act,
1500 thread_apc_handler_t apc)
1501{
1502 assert(apc == bsd_ast);
1503 thread_ast_clear(thr_act, AST_BSD);
1504 if (thr_act == current_act())
1505 ast_off(AST_BSD);
1506 return KERN_SUCCESS;
1507}
1508
1509/*
1510 * act_set_thread_pool - Assign an activation to a specific thread_pool.
1511 * Fails if the activation is already assigned to another pool.
1512 * If thread_pool == 0, we remove the thr_act from its thread_pool.
1513 *
1514 * Called the port containing thread_pool already locked.
1515 * Returns the same way.
1516 */
1517kern_return_t act_set_thread_pool(
1518 thread_act_t thr_act,
1519 ipc_port_t pool_port)
1520{
1521 thread_pool_t thread_pool;
1522
1523#if MACH_ASSERT
1524 if (watchacts & WA_ACT_LNK)
1525 printf("act_set_thread_pool: %x(%d) -> %x\n",
1526 thr_act, thr_act->ref_count, thread_pool);
1527#endif /* MACH_ASSERT */
1528
1529 if (pool_port == 0) {
1530 thread_act_t *lact;
1531
1532 if (thr_act->pool_port == 0)
1533 return KERN_SUCCESS;
1534 thread_pool = &thr_act->pool_port->ip_thread_pool;
1535
1536 for (lact = &thread_pool->thr_acts; *lact;
1537 lact = &((*lact)->thread_pool_next)) {
1538 if (thr_act == *lact) {
1539 *lact = thr_act->thread_pool_next;
1540 break;
1541 }
1542 }
1543 act_lock(thr_act);
1544 thr_act->pool_port = 0;
1545 thr_act->thread_pool_next = 0;
1546 act_unlock(thr_act);
1547 act_deallocate(thr_act);
1548 return KERN_SUCCESS;
1549 }
1550 if (thr_act->pool_port != pool_port) {
1551 thread_pool = &pool_port->ip_thread_pool;
1552 if (thr_act->pool_port != 0) {
1553#if MACH_ASSERT
1554 if (watchacts & WA_ACT_LNK)
1555 printf("act_set_thread_pool found %x!\n",
1556 thr_act->pool_port);
1557#endif /* MACH_ASSERT */
1558 return(KERN_FAILURE);
1559 }
1560 act_lock(thr_act);
1561 thr_act->pool_port = pool_port;
1562
1563 /* The pool gets a ref to the activation -- have
1564 * to inline operation because thr_act is already
1565 * locked.
1566 */
1567 act_locked_act_reference(thr_act);
1568
1569 /* If it is available,
1570 * add it to the thread_pool's available-activation list.
1571 */
1572 if ((thr_act->thread == 0) && (thr_act->suspend_count == 0)) {
1573 thr_act->thread_pool_next = thread_pool->thr_acts;
1574 pool_port->ip_thread_pool.thr_acts = thr_act;
1575 if (thread_pool->waiting)
1576 thread_pool_wakeup(thread_pool);
1577 }
1578 act_unlock(thr_act);
1579 }
1580
1581 return KERN_SUCCESS;
1582}
1583
1584/*
1585 * act_locked_act_set_thread_pool- Assign activation to a specific thread_pool.
1586 * Fails if the activation is already assigned to another pool.
1587 * If thread_pool == 0, we remove the thr_act from its thread_pool.
1588 *
1589 * Called the port containing thread_pool already locked.
1590 * Also called with the thread activation locked.
1591 * Returns the same way.
1592 *
1593 * This routine is the same as `act_set_thread_pool()' except that it does
1594 * not call `act_deallocate(),' which unconditionally tries to obtain the
1595 * thread activation lock.
1596 */
1597kern_return_t act_locked_act_set_thread_pool(
1598 thread_act_t thr_act,
1599 ipc_port_t pool_port)
1600{
1601 thread_pool_t thread_pool;
1602
1603#if MACH_ASSERT
1604 if (watchacts & WA_ACT_LNK)
1605 printf("act_set_thread_pool: %x(%d) -> %x\n",
1606 thr_act, thr_act->ref_count, thread_pool);
1607#endif /* MACH_ASSERT */
1608
1609 if (pool_port == 0) {
1610 thread_act_t *lact;
1611
1612 if (thr_act->pool_port == 0)
1613 return KERN_SUCCESS;
1614 thread_pool = &thr_act->pool_port->ip_thread_pool;
1615
1616 for (lact = &thread_pool->thr_acts; *lact;
1617 lact = &((*lact)->thread_pool_next)) {
1618 if (thr_act == *lact) {
1619 *lact = thr_act->thread_pool_next;
1620 break;
1621 }
1622 }
1623
1624 thr_act->pool_port = 0;
1625 thr_act->thread_pool_next = 0;
1626 act_locked_act_deallocate(thr_act);
1627 return KERN_SUCCESS;
1628 }
1629 if (thr_act->pool_port != pool_port) {
1630 thread_pool = &pool_port->ip_thread_pool;
1631 if (thr_act->pool_port != 0) {
1632#if MACH_ASSERT
1633 if (watchacts & WA_ACT_LNK)
1634 printf("act_set_thread_pool found %x!\n",
1635 thr_act->pool_port);
1636#endif /* MACH_ASSERT */
1637 return(KERN_FAILURE);
1638 }
1639 thr_act->pool_port = pool_port;
1640
1641 /* The pool gets a ref to the activation -- have
1642 * to inline operation because thr_act is already
1643 * locked.
1644 */
1645 act_locked_act_reference(thr_act);
1646
1647 /* If it is available,
1648 * add it to the thread_pool's available-activation list.
1649 */
1650 if ((thr_act->thread == 0) && (thr_act->suspend_count == 0)) {
1651 thr_act->thread_pool_next = thread_pool->thr_acts;
1652 pool_port->ip_thread_pool.thr_acts = thr_act;
1653 if (thread_pool->waiting)
1654 thread_pool_wakeup(thread_pool);
1655 }
1656 }
1657
1658 return KERN_SUCCESS;
1659}
1660
1661/*
1662 * Activation control support routines internal to this file:
1663 */
1664
1665/*
1666 * act_execute_returnhandlers() - does just what the name says
1667 *
1668 * This is called by system-dependent code when it detects that
1669 * thr_act->handlers is non-null while returning into user mode.
1670 * Activations linked onto an thread_pool always have null thr_act->handlers,
1671 * so RPC entry paths need not check it.
1672 */
1673void act_execute_returnhandlers(
1674 void)
1675{
1676 spl_t s;
1677 thread_t thread;
1678 thread_act_t thr_act = current_act();
1679
1680#if MACH_ASSERT
1681 if (watchacts & WA_ACT_HDLR)
1682 printf("execute_rtn_hdlrs: thr_act=%x\n", thr_act);
1683#endif /* MACH_ASSERT */
1684
1685 s = splsched();
1686 act_clr_apc(thr_act);
1687 spllo();
1688 while (1) {
1689 ReturnHandler *rh;
1690
1691 /* Grab the next returnhandler */
1692 thread = act_lock_thread(thr_act);
1693 (void)splsched();
1694 thread_lock(thread);
1695 rh = thr_act->handlers;
1696 if (!rh) {
1697 thread_unlock(thread);
1698 splx(s);
1699 act_unlock_thread(thr_act);
1700 return;
1701 }
1702 thr_act->handlers = rh->next;
1703 thread_unlock(thread);
1704 spllo();
1705 act_unlock_thread(thr_act);
1706
1707#if MACH_ASSERT
1708 if (watchacts & WA_ACT_HDLR)
1709 printf( (rh == &thr_act->special_handler) ?
1710 "\tspecial_handler\n" : "\thandler=%x\n",
1711 rh->handler);
1712#endif /* MACH_ASSERT */
1713
1714 /* Execute it */
1715 (*rh->handler)(rh, thr_act);
1716 }
1717}
1718
1719/*
1720 * special_handler_continue
1721 *
1722 * Continuation routine for the special handler blocks. It checks
1723 * to see whether there has been any new suspensions. If so, it
1724 * installs the special handler again. Otherwise, it checks to see
1725 * if the current depression needs to be re-instated (it may have
1726 * been temporarily removed in order to get to this point in a hurry).
1727 */
1728void
1729special_handler_continue(void)
1730{
1731 thread_act_t cur_act = current_act();
1732 thread_t thread = cur_act->thread;
1733 spl_t s;
1734
1735 if (cur_act->suspend_count)
1736 install_special_handler(cur_act);
1737 else {
1738 s = splsched();
1739 thread_lock(thread);
1740 if (thread->depress_priority == -2) {
1741 /*
1742 * We were temporarily undepressed by
1743 * install_special_handler; restore priority
1744 * depression.
1745 */
1746 thread->depress_priority = thread->priority;
1747 thread->priority = thread->sched_pri = DEPRESSPRI;
1748 }
1749 thread_unlock(thread);
1750 splx(s);
1751 }
1752 thread_exception_return();
1753}
1754
1755/*
1756 * special_handler - handles suspension, termination. Called
1757 * with nothing locked. Returns (if it returns) the same way.
1758 */
1759void
1760special_handler(
1761 ReturnHandler *rh,
1762 thread_act_t cur_act)
1763{
1764 spl_t s;
1765 thread_t lthread;
1766 thread_t thread = act_lock_thread(cur_act);
1767 unsigned alert_bits;
1768 exception_data_type_t
1769 codes[EXCEPTION_CODE_MAX];
1770 kern_return_t kr;
1771 kern_return_t exc_kr;
1772
1773 assert(thread != THREAD_NULL);
1774#if MACH_ASSERT
1775 if (watchacts & WA_ACT_HDLR)
1776 printf("\t\tspecial_handler(thr_act=%x(%d))\n", cur_act,
1777 (cur_act ? cur_act->ref_count : 0));
1778#endif /* MACH_ASSERT */
1779
1780 s = splsched();
1781
1782 thread_lock(thread);
1783 thread->state &= ~TH_ABORT; /* clear any aborts */
1784 thread_unlock(thread);
1785 splx(s);
1786
1787 /*
1788 * If someone has killed this invocation,
1789 * invoke the return path with a terminated exception.
1790 */
1791 if (!cur_act->active) {
1792 act_unlock_thread(cur_act);
1793 act_machine_return(KERN_TERMINATED);
1794 }
1795
1796#ifdef CALLOUT_RPC_MODEL
1797 /*
1798 * JMM - We don't intend to support this RPC model in Darwin.
1799 * We will support inheritance through chains of activations
1800 * on shuttles, but it will be universal and not just for RPC.
1801 * As such, each activation will always have a base shuttle.
1802 * Our RPC model will probably even support the notion of
1803 * alerts (thrown up the chain of activations to affect the
1804 * work done on our behalf), but the unlinking of the shuttles
1805 * will be completely difference because we will never have
1806 * to clone them.
1807 */
1808
1809 /* strip server terminated bit */
1810 alert_bits = cur_act->alerts & (~SERVER_TERMINATED);
1811
1812 /* clear server terminated bit */
1813 cur_act->alerts &= ~SERVER_TERMINATED;
1814
1815 if ( alert_bits ) {
1816 /*
1817 * currently necessary to coordinate with the exception
1818 * code -fdr
1819 */
1820 act_unlock_thread(cur_act);
1821
1822 /* upcall exception/alert port */
1823 codes[0] = alert_bits;
1824
1825 /*
1826 * Exception makes a lot of assumptions. If there is no
1827 * exception handler or the exception reply is broken, the
1828 * thread will be terminated and exception will not return. If
1829 * we decide we don't like that behavior, we need to check
1830 * for the existence of an exception port before we call
1831 * exception.
1832 */
1833 exc_kr = exception( EXC_RPC_ALERT, codes, 1 );
1834
1835 /* clear the orphaned and time constraint indications */
1836 cur_act->alerts &= ~(ORPHANED | TIME_CONSTRAINT_UNSATISFIED);
1837
1838 /* if this orphaned activation should be terminated... */
1839 if (exc_kr == KERN_RPC_TERMINATE_ORPHAN) {
1840 /*
1841 * ... terminate the activation
1842 *
1843 * This is done in two steps. First, the activation is
1844 * disabled (prepared for termination); second, the
1845 * `special_handler()' is executed again -- this time
1846 * to terminate the activation.
1847 * (`act_disable_task_locked()' arranges for the
1848 * additional execution of the `special_handler().')
1849 */
1850
1851#if THREAD_SWAPPER
1852 thread_swap_disable(cur_act);
1853#endif /* THREAD_SWAPPER */
1854
1855 /* acquire appropriate locks */
1856 task_lock(cur_act->task);
1857 act_lock_thread(cur_act);
1858
1859 /* detach the activation from its task */
1860 kr = act_disable_task_locked(cur_act);
1861 assert( kr == KERN_SUCCESS );
1862
1863 /* release locks */
1864 task_unlock(cur_act->task);
1865 }
1866 else {
1867 /* acquire activation lock again (released below) */
1868 act_lock_thread(cur_act);
1869 s = splsched();
1870 thread_lock(thread);
1871 if (thread->depress_priority == -2) {
1872 /*
1873 * We were temporarily undepressed by
1874 * install_special_handler; restore priority
1875 * depression.
1876 */
1877 thread->depress_priority = thread->priority;
1878 thread->priority = thread->sched_pri = DEPRESSPRI;
1879 }
1880 thread_unlock(thread);
1881 splx(s);
1882 }
1883 }
1884#endif /* CALLOUT_RPC_MODEL */
1885
1886 /*
1887 * If we're suspended, go to sleep and wait for someone to wake us up.
1888 */
1889 if (cur_act->suspend_count) {
1890 if( cur_act->handlers == NULL ) {
1891 assert_wait((event_t)&cur_act->suspend_count,
1892 THREAD_ABORTSAFE);
1893 act_unlock_thread(cur_act);
1894 thread_block(special_handler_continue);
1895 /* NOTREACHED */
1896 }
1897 special_handler_continue();
1898 }
1899
1900 act_unlock_thread(cur_act);
1901}
1902
1903/*
1904 * Try to nudge a thr_act into executing its returnhandler chain.
1905 * Ensures that the activation will execute its returnhandlers
1906 * before it next executes any of its user-level code.
1907 *
1908 * Called with thr_act's act_lock() and "appropriate" thread-related
1909 * locks held. (See act_lock_thread().) Returns same way.
1910 */
1911void
1912nudge(thread_act_t thr_act)
1913{
1914#if MACH_ASSERT
1915 if (watchacts & WA_ACT_HDLR)
1916 printf("\tact_%x: nudge(%x)\n", current_act(), thr_act);
1917#endif /* MACH_ASSERT */
1918
1919 /*
1920 * Don't need to do anything at all if this thr_act isn't the topmost.
1921 */
1922 if (thr_act->thread && thr_act->thread->top_act == thr_act) {
1923 /*
1924 * If it's suspended, wake it up.
1925 * This should nudge it even on another CPU.
1926 */
1927 thread_wakeup((event_t)&thr_act->suspend_count);
1928 }
1929}
1930
1931/*
1932 * Update activation that belongs to a task created via kernel_task_create().
1933 */
1934void
1935act_user_to_kernel(
1936 thread_act_t thr_act)
1937{
1938 pcb_user_to_kernel(thr_act);
1939 thr_act->kernel_loading = TRUE;
1940}
1941
1942/*
1943 * Already locked: thr_act->task, RPC-related locks for thr_act
1944 *
1945 * Detach an activation from its task, and prepare it to terminate
1946 * itself.
1947 */
1948kern_return_t
1949act_disable_task_locked(
1950 thread_act_t thr_act)
1951{
1952 thread_t thread = thr_act->thread;
1953 task_t task = thr_act->task;
1954
1955#if MACH_ASSERT
1956 if (watchacts & WA_EXIT) {
1957 printf("act_%x: act_disable_tl(thr_act=%x(%d))%sactive task=%x(%d)",
1958 current_act(), thr_act, thr_act->ref_count,
1959 (thr_act->active ? " " : " !"),
1960 thr_act->task, thr_act->task? thr_act->task->ref_count : 0);
1961 if (thr_act->pool_port)
1962 printf(", pool_port %x", thr_act->pool_port);
1963 printf("\n");
1964 (void) dump_act(thr_act);
1965 }
1966#endif /* MACH_ASSERT */
1967
1968 /* This will allow no more control ops on this thr_act. */
1969 thr_act->active = 0;
1970 ipc_thr_act_disable(thr_act);
1971
1972 /* Clean-up any ulocks that are still owned by the thread
1973 * activation (acquired but not released or handed-off).
1974 */
1975 act_ulock_release_all(thr_act);
1976
1977 /* When the special_handler gets executed,
1978 * it will see the terminated condition and exit
1979 * immediately.
1980 */
1981 install_special_handler(thr_act);
1982
1983
1984 /* If the target happens to be suspended,
1985 * give it a nudge so it can exit.
1986 */
1987 if (thr_act->suspend_count)
1988 nudge(thr_act);
1989
1990 /* Drop the thr_act reference taken for being active.
1991 * (There is still at least one reference left:
1992 * the one we were passed.)
1993 * Inline the deallocate because thr_act is locked.
1994 */
1995 act_locked_act_deallocate(thr_act);
1996
1997 return(KERN_SUCCESS);
1998}
1999
2000/*
2001 * act_alert - Register an alert from this activation.
2002 *
2003 * Each set bit is propagated upward from (but not including) this activation,
2004 * until the top of the chain is reached or the bit is masked.
2005 */
2006kern_return_t
2007act_alert(thread_act_t thr_act, unsigned alerts)
2008{
2009 thread_t thread = act_lock_thread(thr_act);
2010
2011#if MACH_ASSERT
2012 if (watchacts & WA_ACT_LNK)
2013 printf("act_alert %x: %x\n", thr_act, alerts);
2014#endif /* MACH_ASSERT */
2015
2016 if (thread) {
2017 thread_act_t act_up = thr_act;
2018 while ((alerts) && (act_up != thread->top_act)) {
2019 act_up = act_up->higher;
2020 alerts &= act_up->alert_mask;
2021 act_up->alerts |= alerts;
2022 }
2023 /*
2024 * XXXX If we reach the top, and it is blocked in glue
2025 * code, do something to kick it. XXXX
2026 */
2027 }
2028 act_unlock_thread(thr_act);
2029
2030 return KERN_SUCCESS;
2031}
2032
2033kern_return_t act_alert_mask(thread_act_t thr_act, unsigned alert_mask)
2034{
2035 panic("act_alert_mask NOT YET IMPLEMENTED\n");
2036 return KERN_SUCCESS;
2037}
2038
2039typedef struct GetSetState {
2040 struct ReturnHandler rh;
2041 int flavor;
2042 void *state;
2043 int *pcount;
2044 int result;
2045} GetSetState;
2046
2047/* Local Forward decls */
2048kern_return_t get_set_state(
2049 thread_act_t thr_act, int flavor,
2050 thread_state_t state, int *pcount,
2051 void (*handler)(ReturnHandler *rh, thread_act_t thr_act));
2052void get_state_handler(ReturnHandler *rh, thread_act_t thr_act);
2053void set_state_handler(ReturnHandler *rh, thread_act_t thr_act);
2054
2055/*
2056 * get_set_state(thr_act ...)
2057 *
2058 * General code to install g/set_state handler.
2059 * Called with thr_act's act_lock() and "appropriate"
2060 * thread-related locks held. (See act_lock_thread().)
2061 */
2062kern_return_t
2063get_set_state(thread_act_t thr_act, int flavor, thread_state_t state, int *pcount,
2064 void (*handler)(ReturnHandler *rh, thread_act_t thr_act))
2065{
2066 GetSetState gss;
2067 spl_t s;
2068
2069 /* Initialize a small parameter structure */
2070 gss.rh.handler = handler;
2071 gss.flavor = flavor;
2072 gss.state = state;
2073 gss.pcount = pcount;
2074 gss.result = KERN_ABORTED; /* iff wait below is interrupted */
2075
2076 /* Add it to the thr_act's return handler list */
2077 gss.rh.next = thr_act->handlers;
2078 thr_act->handlers = &gss.rh;
2079
2080 s = splsched();
2081 act_set_apc(thr_act);
2082 splx(s);
2083
2084#if MACH_ASSERT
2085 if (watchacts & WA_ACT_HDLR) {
2086 printf("act_%x: get_set_state(thr_act=%x flv=%x state=%x ptr@%x=%x)",
2087 current_act(), thr_act, flavor, state,
2088 pcount, (pcount ? *pcount : 0));
2089 printf((handler == get_state_handler ? "get_state_hdlr\n" :
2090 (handler == set_state_handler ? "set_state_hdlr\n" :
2091 "hndler=%x\n")), handler);
2092 }
2093#endif /* MACH_ASSERT */
2094
2095 assert(thr_act->thread); /* Callers must ensure these */
2096 assert(thr_act != current_act());
2097 for (;;) {
2098 nudge(thr_act);
2099 /*
2100 * Wait must be interruptible to avoid deadlock (e.g.) with
2101 * task_suspend() when caller and target of get_set_state()
2102 * are in same task.
2103 */
2104 assert_wait((event_t)&gss, THREAD_ABORTSAFE);
2105 act_unlock_thread(thr_act);
2106 thread_block((void (*)(void))0);
2107 if (gss.result != KERN_ABORTED)
2108 break;
2109 if (current_act()->handlers)
2110 act_execute_returnhandlers();
2111 act_lock_thread(thr_act);
2112 }
2113
2114#if MACH_ASSERT
2115 if (watchacts & WA_ACT_HDLR)
2116 printf("act_%x: get_set_state returns %x\n",
2117 current_act(), gss.result);
2118#endif /* MACH_ASSERT */
2119
2120 return gss.result;
2121}
2122
2123void
2124set_state_handler(ReturnHandler *rh, thread_act_t thr_act)
2125{
2126 GetSetState *gss = (GetSetState*)rh;
2127
2128#if MACH_ASSERT
2129 if (watchacts & WA_ACT_HDLR)
2130 printf("act_%x: set_state_handler(rh=%x,thr_act=%x)\n",
2131 current_act(), rh, thr_act);
2132#endif /* MACH_ASSERT */
2133
2134 gss->result = act_machine_set_state(thr_act, gss->flavor,
2135 gss->state, *gss->pcount);
2136 thread_wakeup((event_t)gss);
2137}
2138
2139void
2140get_state_handler(ReturnHandler *rh, thread_act_t thr_act)
2141{
2142 GetSetState *gss = (GetSetState*)rh;
2143
2144#if MACH_ASSERT
2145 if (watchacts & WA_ACT_HDLR)
2146 printf("act_%x: get_state_handler(rh=%x,thr_act=%x)\n",
2147 current_act(), rh, thr_act);
2148#endif /* MACH_ASSERT */
2149
2150 gss->result = act_machine_get_state(thr_act, gss->flavor,
2151 gss->state,
2152 (mach_msg_type_number_t *) gss->pcount);
2153 thread_wakeup((event_t)gss);
2154}
2155
2156kern_return_t
2157act_get_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
2158 mach_msg_type_number_t *pcount)
2159{
2160#if MACH_ASSERT
2161 if (watchacts & WA_ACT_HDLR)
2162 printf("act_%x: act_get_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
2163 current_act(), thr_act, flavor, state, pcount,
2164 (pcount? *pcount : 0));
2165#endif /* MACH_ASSERT */
2166
2167 return(get_set_state(thr_act, flavor, state, (int*)pcount, get_state_handler));
2168}
2169
2170kern_return_t
2171act_set_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
2172 mach_msg_type_number_t count)
2173{
2174#if MACH_ASSERT
2175 if (watchacts & WA_ACT_HDLR)
2176 printf("act_%x: act_set_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
2177 current_act(), thr_act, flavor, state, count, count);
2178#endif /* MACH_ASSERT */
2179
2180 return(get_set_state(thr_act, flavor, state, (int*)&count, set_state_handler));
2181}
2182
2183kern_return_t
2184act_set_state(thread_act_t thr_act, int flavor, thread_state_t state,
2185 mach_msg_type_number_t count)
2186{
2187 if (thr_act == THR_ACT_NULL || thr_act == current_act())
2188 return(KERN_INVALID_ARGUMENT);
2189
2190 act_lock_thread(thr_act);
2191 return(act_set_state_locked(thr_act, flavor, state, count));
2192
2193}
2194
2195kern_return_t
2196act_get_state(thread_act_t thr_act, int flavor, thread_state_t state,
2197 mach_msg_type_number_t *pcount)
2198{
2199 if (thr_act == THR_ACT_NULL || thr_act == current_act())
2200 return(KERN_INVALID_ARGUMENT);
2201
2202 act_lock_thread(thr_act);
2203 return(act_get_state_locked(thr_act, flavor, state, pcount));
2204}
2205
2206/*
2207 * These two should be called at splsched()
2208 * Set/clear indicator to run APC (layered on ASTs)
2209 */
2210void
2211act_set_apc(thread_act_t thr_act)
2212{
0b4e3aa0
A
2213
2214 processor_t prssr;
2215 thread_t thread;
2216
2217 mp_disable_preemption();
2218
1c79356b
A
2219 thread_ast_set(thr_act, AST_APC);
2220 if (thr_act == current_act()) {
1c79356b
A
2221 ast_propagate(thr_act->ast);
2222 mp_enable_preemption();
0b4e3aa0 2223 return; /* If we are current act, we can't be on the other processor so leave now */
1c79356b 2224 }
0b4e3aa0
A
2225
2226/*
2227 * Here we want to make sure that the apc is taken quickly. Therefore, we check
2228 * if, and where, the activation is running. If it is not running, we don't need to do
2229 * anything. If it is, we need to signal the other processor to trigger it to
2230 * check the asts. Note that there is a race here and we may end up sending a signal
2231 * after the thread has been switched off. Hopefully this is no big deal.
2232 */
2233
2234 thread = thr_act->thread; /* Get the thread for the signaled activation */
2235 prssr = thread->last_processor; /* get the processor it was last on */
2236 if(prssr && (cpu_data[prssr->slot_num].active_thread == thread)) { /* Is the thread active on its processor? */
2237 cause_ast_check(prssr); /* Yes, kick it */
2238 }
2239
2240 mp_enable_preemption();
1c79356b
A
2241}
2242
2243void
2244act_clr_apc(thread_act_t thr_act)
2245{
2246 thread_ast_clear(thr_act, AST_APC);
2247}
2248
2249void
2250act_ulock_release_all(thread_act_t thr_act)
2251{
2252 ulock_t ulock;
2253
2254 while (!queue_empty(&thr_act->held_ulocks)) {
2255 ulock = (ulock_t) queue_first(&thr_act->held_ulocks);
2256 (void) lock_make_unstable(ulock, thr_act);
2257 (void) lock_release_internal(ulock, thr_act);
2258 }
2259}
2260
2261/*
2262 * Provide routines (for export to other components) of things that
2263 * are implemented as macros insternally.
2264 */
2265#undef current_act
2266thread_act_t
2267current_act(void)
2268{
2269 return(current_act_fast());
2270}
2271
2272thread_act_t
2273thread_self(void)
2274{
2275 thread_act_t self = current_act_fast();
2276
2277 act_reference(self);
2278 return self;
2279}
2280
2281thread_act_t
2282mach_thread_self(void)
2283{
2284 thread_act_t self = current_act_fast();
2285
2286 act_reference(self);
2287 return self;
2288}
2289
2290#undef act_reference
2291void
2292act_reference(
2293 thread_act_t thr_act)
2294{
2295 act_reference_fast(thr_act);
2296}
2297
2298#undef act_deallocate
2299void
2300act_deallocate(
2301 thread_act_t thr_act)
2302{
2303 act_deallocate_fast(thr_act);
2304}
2305