]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
459cf608af68e5afd4d844512a65a32c44bcc5a0
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Copyright (c) 1993 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
28 *
29 * Permission to use, copy, modify and distribute this software and its
30 * documentation is hereby granted, provided that both the copyright
31 * notice and this permission notice appear in all copies of the
32 * software, derivative works or modified versions, and any portions
33 * thereof, and that both notices appear in supporting documentation.
34 *
35 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
36 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
37 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
38 *
39 * CSS requests users of this software to return to css-dist@cs.utah.edu any
40 * improvements that they make and grant CSS redistribution rights.
41 *
42 * Author: Bryan Ford, University of Utah CSS
43 *
44 * Thread_Activation management routines
45 */
46
47 #include <cpus.h>
48 #include <task_swapper.h>
49 #include <mach/kern_return.h>
50 #include <mach/alert.h>
51 #include <kern/etap_macros.h>
52 #include <kern/mach_param.h>
53 #include <kern/zalloc.h>
54 #include <kern/thread.h>
55 #include <kern/thread_swap.h>
56 #include <kern/task.h>
57 #include <kern/task_swap.h>
58 #include <kern/thread_act.h>
59 #include <kern/thread_pool.h>
60 #include <kern/sched_prim.h>
61 #include <kern/misc_protos.h>
62 #include <kern/assert.h>
63 #include <kern/exception.h>
64 #include <kern/ipc_mig.h>
65 #include <kern/ipc_tt.h>
66 #include <kern/profile.h>
67 #include <kern/machine.h>
68 #include <kern/spl.h>
69 #include <kern/syscall_subr.h>
70 #include <kern/sync_lock.h>
71 #include <kern/sf.h>
72 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
73 #include <mach_prof.h>
74 #include <mach/rpc.h>
75
76 /*
77 * Debugging printf control
78 */
79 #if MACH_ASSERT
80 unsigned int watchacts = 0 /* WA_ALL */
81 ; /* Do-it-yourself & patchable */
82 #endif
83
84 /*
85 * Track the number of times we need to swapin a thread to deallocate it.
86 */
87 int act_free_swapin = 0;
88
89 /*
90 * Forward declarations for functions local to this file.
91 */
92 kern_return_t act_abort( thread_act_t, int);
93 void special_handler(ReturnHandler *, thread_act_t);
94 void nudge(thread_act_t);
95 kern_return_t act_set_state_locked(thread_act_t, int,
96 thread_state_t,
97 mach_msg_type_number_t);
98 kern_return_t act_get_state_locked(thread_act_t, int,
99 thread_state_t,
100 mach_msg_type_number_t *);
101 void act_set_apc(thread_act_t);
102 void act_clr_apc(thread_act_t);
103 void act_user_to_kernel(thread_act_t);
104 void act_ulock_release_all(thread_act_t thr_act);
105
106 void install_special_handler_locked(thread_act_t);
107
108 static zone_t thr_act_zone;
109
110 /*
111 * Thread interfaces accessed via a thread_activation:
112 */
113
114
115 /*
116 * Internal routine to terminate a thread.
117 * Called with task locked.
118 */
119 kern_return_t
120 thread_terminate_internal(
121 register thread_act_t thr_act)
122 {
123 thread_t thread;
124 task_t task;
125 struct ipc_port *iplock;
126 kern_return_t ret;
127 #if NCPUS > 1
128 boolean_t held;
129 #endif /* NCPUS > 1 */
130
131 #if THREAD_SWAPPER
132 thread_swap_disable(thr_act);
133 #endif /* THREAD_SWAPPER */
134
135 thread = act_lock_thread(thr_act);
136 if (!thr_act->active) {
137 act_unlock_thread(thr_act);
138 return(KERN_TERMINATED);
139 }
140
141 #if NCPUS > 1
142 /*
143 * Make sure this thread enters the kernel
144 */
145 if (thread != current_thread()) {
146 thread_hold(thr_act);
147 act_unlock_thread(thr_act);
148
149 if (!thread_stop_wait(thread)) {
150 ret = KERN_ABORTED;
151 (void)act_lock_thread(thr_act);
152 thread_release(thr_act);
153 act_unlock_thread(thr_act);
154 return (ret);
155 }
156
157 held = TRUE;
158 (void)act_lock_thread(thr_act);
159 } else {
160 held = FALSE;
161 }
162 #endif /* NCPUS > 1 */
163
164 assert(thr_act->active);
165 act_disable_task_locked(thr_act);
166 ret = act_abort(thr_act,FALSE);
167 act_unlock_thread(thr_act);
168
169 #if NCPUS > 1
170 if (held) {
171 thread_unstop(thread);
172 (void)act_lock_thread(thr_act);
173 thread_release(thr_act);
174 act_unlock_thread(thr_act);
175 }
176 #endif /* NCPUS > 1 */
177 return(ret);
178 }
179
180 /*
181 * Terminate a thread. Called with nothing locked.
182 * Returns same way.
183 */
184 kern_return_t
185 thread_terminate(
186 register thread_act_t thr_act)
187 {
188 task_t task;
189 kern_return_t ret;
190
191 if (thr_act == THR_ACT_NULL)
192 return KERN_INVALID_ARGUMENT;
193
194 task = thr_act->task;
195 if (((task == kernel_task) || (thr_act->kernel_loaded == TRUE))
196 && (current_act() != thr_act)) {
197 return(KERN_FAILURE);
198 }
199
200 /*
201 * Take the task lock and then call the internal routine
202 * that terminates a thread (it needs the task locked).
203 */
204 task_lock(task);
205 ret = thread_terminate_internal(thr_act);
206 task_unlock(task);
207
208 /*
209 * If a kernel thread is terminating itself, force an AST here.
210 * Kernel threads don't normally pass through the AST checking
211 * code - and all threads finish their own termination in the
212 * special handler APC.
213 */
214 if (((thr_act->task == kernel_task) || (thr_act->kernel_loaded == TRUE))
215 && (current_act() == thr_act)) {
216 ast_taken(FALSE, AST_APC, 0);
217 panic("thread_terminate(): returning from ast_taken() for %x kernel activation\n", thr_act);
218 }
219
220 return ret;
221 }
222
223 /*
224 * thread_hold:
225 *
226 * Suspend execution of the specified thread.
227 * This is a recursive-style suspension of the thread, a count of
228 * suspends is maintained.
229 *
230 * Called with thr_act locked "appropriately" for synchrony with
231 * RPC (see act_lock_thread()). Returns same way.
232 */
233 void
234 thread_hold(
235 register thread_act_t thr_act)
236 {
237 if (thr_act->suspend_count++ == 0) {
238 install_special_handler(thr_act);
239 nudge(thr_act);
240 }
241 }
242
243 /*
244 * Decrement internal suspension count for thr_act, setting thread
245 * runnable when count falls to zero.
246 *
247 * Called with thr_act locked "appropriately" for synchrony
248 * with RPC (see act_lock_thread()).
249 */
250 void
251 thread_release(
252 register thread_act_t thr_act)
253 {
254 if( thr_act->suspend_count &&
255 (--thr_act->suspend_count == 0) )
256 nudge( thr_act );
257 }
258
259 kern_return_t
260 thread_suspend(
261 register thread_act_t thr_act)
262 {
263 thread_t thread;
264
265 if (thr_act == THR_ACT_NULL) {
266 return(KERN_INVALID_ARGUMENT);
267 }
268 thread = act_lock_thread(thr_act);
269 if (!thr_act->active) {
270 act_unlock_thread(thr_act);
271 return(KERN_TERMINATED);
272 }
273 if (thr_act->user_stop_count++ == 0 &&
274 thr_act->suspend_count++ == 0 ) {
275 install_special_handler(thr_act);
276 if (thread &&
277 thr_act == thread->top_act && thread != current_thread()) {
278 nudge(thr_act);
279 act_unlock_thread(thr_act);
280 (void)thread_wait(thread);
281 }
282 else {
283 /*
284 * No need to wait for target thread
285 */
286 act_unlock_thread(thr_act);
287 }
288 }
289 else {
290 /*
291 * Thread is already suspended
292 */
293 act_unlock_thread(thr_act);
294 }
295 return(KERN_SUCCESS);
296 }
297
298 kern_return_t
299 thread_resume(
300 register thread_act_t thr_act)
301 {
302 register kern_return_t ret;
303 spl_t s;
304 thread_t thread;
305
306 if (thr_act == THR_ACT_NULL)
307 return(KERN_INVALID_ARGUMENT);
308 thread = act_lock_thread(thr_act);
309 ret = KERN_SUCCESS;
310
311 if (thr_act->active) {
312 if (thr_act->user_stop_count > 0) {
313 if( --thr_act->user_stop_count == 0 ) {
314 --thr_act->suspend_count;
315 nudge( thr_act );
316 }
317 }
318 else
319 ret = KERN_FAILURE;
320 }
321 else
322 ret = KERN_TERMINATED;
323 act_unlock_thread( thr_act );
324 return ret;
325 }
326
327 /*
328 * This routine walks toward the head of an RPC chain starting at
329 * a specified thread activation. An alert bit is set and a special
330 * handler is installed for each thread it encounters.
331 *
332 * The target thread act and thread shuttle are already locked.
333 */
334 kern_return_t
335 post_alert(
336 register thread_act_t thr_act,
337 unsigned alert_bits )
338 {
339 thread_act_t next;
340 thread_t thread;
341
342 /*
343 * Chase the chain, setting alert bits and installing
344 * special handlers for each thread act.
345 */
346 /*** Not yet SMP safe ***/
347 /*** Worse, where's the activation locking as the chain is walked? ***/
348 for (next = thr_act; next != THR_ACT_NULL; next = next->higher) {
349 next->alerts |= alert_bits;
350 install_special_handler_locked(next);
351 }
352
353 return(KERN_SUCCESS);
354 }
355
356 /*
357 * thread_depress_abort:
358 *
359 * Prematurely abort priority depression if there is one.
360 */
361 kern_return_t
362 thread_depress_abort(
363 register thread_act_t thr_act)
364 {
365 register thread_t thread;
366 kern_return_t result;
367 sched_policy_t *policy;
368 spl_t s;
369
370 if (thr_act == THR_ACT_NULL)
371 return (KERN_INVALID_ARGUMENT);
372
373 thread = act_lock_thread(thr_act);
374 /* if activation is terminating, this operation is not meaningful */
375 if (!thr_act->active) {
376 act_unlock_thread(thr_act);
377
378 return (KERN_TERMINATED);
379 }
380
381 s = splsched();
382 thread_lock(thread);
383 policy = &sched_policy[thread->policy];
384 thread_unlock(thread);
385 splx(s);
386
387 result = policy->sp_ops.sp_thread_depress_abort(policy, thread);
388
389 act_unlock_thread(thr_act);
390
391 return (result);
392 }
393
394
395 /*
396 * Already locked: all RPC-related locks for thr_act (see
397 * act_lock_thread()).
398 */
399 kern_return_t
400 act_abort( thread_act_t thr_act, int chain_break )
401 {
402 spl_t spl;
403 thread_t thread;
404 struct ipc_port *iplock = thr_act->pool_port;
405 thread_act_t orphan;
406 kern_return_t kr;
407 etap_data_t probe_data;
408
409 ETAP_DATA_LOAD(probe_data[0], thr_act);
410 ETAP_DATA_LOAD(probe_data[1], thr_act->thread);
411 ETAP_PROBE_DATA(ETAP_P_ACT_ABORT,
412 0,
413 current_thread(),
414 &probe_data,
415 ETAP_DATA_ENTRY*2);
416
417 /*
418 * If the target thread activation is not the head...
419 */
420 if ( thr_act->thread->top_act != thr_act ) {
421 /*
422 * mark the activation for abort,
423 * update the suspend count,
424 * always install the special handler
425 */
426 install_special_handler(thr_act);
427
428 #ifdef AGRESSIVE_ABORT
429 /* release state buffer for target's outstanding invocation */
430 if (unwind_invoke_state(thr_act) != KERN_SUCCESS) {
431 panic("unwind_invoke_state failure");
432 }
433
434 /* release state buffer for target's incoming invocation */
435 if (thr_act->lower != THR_ACT_NULL) {
436 if (unwind_invoke_state(thr_act->lower)
437 != KERN_SUCCESS) {
438 panic("unwind_invoke_state failure");
439 }
440 }
441
442 /* unlink target thread activation from shuttle chain */
443 if ( thr_act->lower == THR_ACT_NULL ) {
444 /*
445 * This is the root thread activation of the chain.
446 * Unlink the root thread act from the bottom of
447 * the chain.
448 */
449 thr_act->higher->lower = THR_ACT_NULL;
450 } else {
451 /*
452 * This thread act is in the middle of the chain.
453 * Unlink the thread act from the middle of the chain.
454 */
455 thr_act->higher->lower = thr_act->lower;
456 thr_act->lower->higher = thr_act->higher;
457
458 /* set the terminated bit for RPC return processing */
459 thr_act->lower->alerts |= SERVER_TERMINATED;
460 }
461
462 orphan = thr_act->higher;
463
464 /* remove the activation from its thread pool */
465 /* (note: this is okay for "rooted threads," too) */
466 act_locked_act_set_thread_pool(thr_act, IP_NULL);
467
468 /* (just to be thorough) release the IP lock */
469 if (iplock != IP_NULL) ip_unlock(iplock);
470
471 /* release one more reference for a rooted thread */
472 if (iplock == IP_NULL) act_locked_act_deallocate(thr_act);
473
474 /* Presumably, the only reference to this activation is
475 * now held by the caller of this routine. */
476 assert(thr_act->ref_count == 1);
477 #else /*AGRESSIVE_ABORT*/
478 /* If there is a lower activation in the RPC chain... */
479 if (thr_act->lower != THR_ACT_NULL) {
480 /* ...indicate the server activation was terminated */
481 thr_act->lower->alerts |= SERVER_TERMINATED;
482 }
483 /* Mark (and process) any orphaned activations */
484 orphan = thr_act->higher;
485 #endif /*AGRESSIVE_ABORT*/
486
487 /* indicate client of orphaned chain has been terminated */
488 orphan->alerts |= CLIENT_TERMINATED;
489
490 /*
491 * Set up posting of alert to headward portion of
492 * the RPC chain.
493 */
494 /*** fix me -- orphan act is not locked ***/
495 post_alert(orphan, ORPHANED);
496
497 /*
498 * Get attention of head of RPC chain.
499 */
500 nudge(thr_act->thread->top_act);
501 return (KERN_SUCCESS);
502 }
503
504 /*
505 * If the target thread is the end of the chain, the thread
506 * has to be marked for abort and rip it out of any wait.
507 */
508 spl = splsched();
509 thread_lock(thr_act->thread);
510 if (thr_act->thread->top_act == thr_act) {
511 thr_act->thread->state |= TH_ABORT;
512 if (thr_act->thread->state & TH_ABORT)
513 clear_wait_internal(thr_act->thread, THREAD_INTERRUPTED);
514 thread_unlock(thr_act->thread);
515 splx(spl);
516 install_special_handler(thr_act);
517 nudge( thr_act );
518 }
519 return KERN_SUCCESS;
520 }
521
522 kern_return_t
523 thread_abort(
524 register thread_act_t thr_act)
525 {
526 int ret;
527 thread_t thread;
528
529 if (thr_act == THR_ACT_NULL || thr_act == current_act())
530 return (KERN_INVALID_ARGUMENT);
531 /*
532 * Lock the target thread and the current thread now,
533 * in case thread_halt() ends up being called below.
534 */
535 thread = act_lock_thread(thr_act);
536 if (!thr_act->active) {
537 act_unlock_thread(thr_act);
538 return(KERN_TERMINATED);
539 }
540
541 ret = act_abort( thr_act, FALSE );
542 act_unlock_thread( thr_act );
543 return ret;
544 }
545
546 kern_return_t
547 thread_abort_safely(
548 register thread_act_t thr_act)
549 {
550 thread_t thread;
551 spl_t s;
552
553 if (thr_act == THR_ACT_NULL || thr_act == current_act())
554 return(KERN_INVALID_ARGUMENT);
555
556 thread = act_lock_thread(thr_act);
557 if (!thr_act->active) {
558 act_unlock_thread(thr_act);
559 return(KERN_TERMINATED);
560 }
561 if (thread->top_act != thr_act) {
562 act_unlock_thread(thr_act);
563 return(KERN_FAILURE);
564 }
565 s = splsched();
566 thread_lock(thread);
567
568 if ( thread->at_safe_point ) {
569 /*
570 * It's an abortable wait, clear it, then
571 * let the thread go and return successfully.
572 */
573 clear_wait_internal(thread, THREAD_INTERRUPTED);
574 thread_unlock(thread);
575 act_unlock_thread(thr_act);
576 splx(s);
577 return KERN_SUCCESS;
578 }
579
580 /*
581 * if not stopped at a safepoint, just let it go and return failure.
582 */
583 thread_unlock(thread);
584 act_unlock_thread(thr_act);
585 splx(s);
586 return KERN_FAILURE;
587 }
588
589 /*** backward compatibility hacks ***/
590 #include <mach/thread_info.h>
591 #include <mach/thread_special_ports.h>
592 #include <ipc/ipc_port.h>
593 #include <mach/thread_act_server.h>
594
595 kern_return_t
596 thread_info(
597 thread_act_t thr_act,
598 thread_flavor_t flavor,
599 thread_info_t thread_info_out,
600 mach_msg_type_number_t *thread_info_count)
601 {
602 register thread_t thread;
603 kern_return_t result;
604
605 if (thr_act == THR_ACT_NULL)
606 return (KERN_INVALID_ARGUMENT);
607
608 thread = act_lock_thread(thr_act);
609 if (!thr_act->active) {
610 act_unlock_thread(thr_act);
611
612 return (KERN_TERMINATED);
613 }
614
615 result = thread_info_shuttle(thr_act, flavor,
616 thread_info_out, thread_info_count);
617
618 act_unlock_thread(thr_act);
619
620 return (result);
621 }
622
623 /*
624 * Routine: thread_get_special_port [kernel call]
625 * Purpose:
626 * Clones a send right for one of the thread's
627 * special ports.
628 * Conditions:
629 * Nothing locked.
630 * Returns:
631 * KERN_SUCCESS Extracted a send right.
632 * KERN_INVALID_ARGUMENT The thread is null.
633 * KERN_FAILURE The thread is dead.
634 * KERN_INVALID_ARGUMENT Invalid special port.
635 */
636
637 kern_return_t
638 thread_get_special_port(
639 thread_act_t thr_act,
640 int which,
641 ipc_port_t *portp)
642 {
643 ipc_port_t *whichp;
644 ipc_port_t port;
645 thread_t thread;
646
647 #if MACH_ASSERT
648 if (watchacts & WA_PORT)
649 printf("thread_get_special_port(thr_act=%x, which=%x port@%x=%x\n",
650 thr_act, which, portp, (portp ? *portp : 0));
651 #endif /* MACH_ASSERT */
652
653 if (!thr_act)
654 return KERN_INVALID_ARGUMENT;
655 thread = act_lock_thread(thr_act);
656 switch (which) {
657 case THREAD_KERNEL_PORT:
658 whichp = &thr_act->ith_sself;
659 break;
660
661 default:
662 act_unlock_thread(thr_act);
663 return KERN_INVALID_ARGUMENT;
664 }
665
666 if (!thr_act->active) {
667 act_unlock_thread(thr_act);
668 return KERN_FAILURE;
669 }
670
671 port = ipc_port_copy_send(*whichp);
672 act_unlock_thread(thr_act);
673
674 *portp = port;
675 return KERN_SUCCESS;
676 }
677
678 /*
679 * Routine: thread_set_special_port [kernel call]
680 * Purpose:
681 * Changes one of the thread's special ports,
682 * setting it to the supplied send right.
683 * Conditions:
684 * Nothing locked. If successful, consumes
685 * the supplied send right.
686 * Returns:
687 * KERN_SUCCESS Changed the special port.
688 * KERN_INVALID_ARGUMENT The thread is null.
689 * KERN_FAILURE The thread is dead.
690 * KERN_INVALID_ARGUMENT Invalid special port.
691 */
692
693 kern_return_t
694 thread_set_special_port(
695 thread_act_t thr_act,
696 int which,
697 ipc_port_t port)
698 {
699 ipc_port_t *whichp;
700 ipc_port_t old;
701 thread_t thread;
702
703 #if MACH_ASSERT
704 if (watchacts & WA_PORT)
705 printf("thread_set_special_port(thr_act=%x,which=%x,port=%x\n",
706 thr_act, which, port);
707 #endif /* MACH_ASSERT */
708
709 if (thr_act == 0)
710 return KERN_INVALID_ARGUMENT;
711
712 thread = act_lock_thread(thr_act);
713 switch (which) {
714 case THREAD_KERNEL_PORT:
715 whichp = &thr_act->ith_self;
716 break;
717
718 default:
719 act_unlock_thread(thr_act);
720 return KERN_INVALID_ARGUMENT;
721 }
722
723 if (!thr_act->active) {
724 act_unlock_thread(thr_act);
725 return KERN_FAILURE;
726 }
727
728 old = *whichp;
729 *whichp = port;
730 act_unlock_thread(thr_act);
731
732 if (IP_VALID(old))
733 ipc_port_release_send(old);
734 return KERN_SUCCESS;
735 }
736
737 /*
738 * thread state should always be accessible by locking the thread
739 * and copying it. The activation messes things up so for right
740 * now if it's not the top of the chain, use a special handler to
741 * get the information when the shuttle returns to the activation.
742 */
743 kern_return_t
744 thread_get_state(
745 register thread_act_t thr_act,
746 int flavor,
747 thread_state_t state, /* pointer to OUT array */
748 mach_msg_type_number_t *state_count) /*IN/OUT*/
749 {
750 kern_return_t ret;
751 thread_t thread, nthread;
752
753 #if 0 /* Grenoble - why?? */
754 if (thr_act == THR_ACT_NULL || thr_act == current_act())
755 #else
756 if (thr_act == THR_ACT_NULL)
757 #endif
758 return (KERN_INVALID_ARGUMENT);
759
760 thread = act_lock_thread(thr_act);
761 if (!thr_act->active) {
762 act_unlock_thread(thr_act);
763 return(KERN_TERMINATED);
764 }
765
766 thread_hold(thr_act);
767 while (1) {
768 if (!thread || thr_act != thread->top_act)
769 break;
770 act_unlock_thread(thr_act);
771 (void)thread_stop_wait(thread);
772 nthread = act_lock_thread(thr_act);
773 if (nthread == thread)
774 break;
775 thread_unstop(thread);
776 thread = nthread;
777 }
778 ret = act_machine_get_state(thr_act, flavor,
779 state, state_count);
780 if (thread && thr_act == thread->top_act)
781 thread_unstop(thread);
782 thread_release(thr_act);
783 act_unlock_thread(thr_act);
784
785 return(ret);
786 }
787
788 /*
789 * Change thread's machine-dependent state. Called with nothing
790 * locked. Returns same way.
791 */
792 kern_return_t
793 thread_set_state(
794 register thread_act_t thr_act,
795 int flavor,
796 thread_state_t state,
797 mach_msg_type_number_t state_count)
798 {
799 kern_return_t ret;
800 thread_t thread, nthread;
801
802 #if 0 /* Grenoble - why?? */
803 if (thr_act == THR_ACT_NULL || thr_act == current_act())
804 #else
805 if (thr_act == THR_ACT_NULL)
806 #endif
807 return (KERN_INVALID_ARGUMENT);
808 /*
809 * We have no kernel activations, so Utah's MO fails for signals etc.
810 *
811 * If we're blocked in the kernel, use non-blocking method, else
812 * pass locked thr_act+thread in to "normal" act_[gs]et_state().
813 */
814
815 thread = act_lock_thread(thr_act);
816 if (!thr_act->active) {
817 act_unlock_thread(thr_act);
818 return(KERN_TERMINATED);
819 }
820
821 thread_hold(thr_act);
822 while (1) {
823 if (!thread || thr_act != thread->top_act)
824 break;
825 act_unlock_thread(thr_act);
826 (void)thread_stop_wait(thread);
827 nthread = act_lock_thread(thr_act);
828 if (nthread == thread)
829 break;
830 thread_unstop(thread);
831 thread = nthread;
832 }
833 ret = act_machine_set_state(thr_act, flavor,
834 state, state_count);
835 if (thread && thr_act == thread->top_act)
836 thread_unstop(thread);
837 thread_release(thr_act);
838 act_unlock_thread(thr_act);
839
840 return(ret);
841 }
842
843 /*
844 * Kernel-internal "thread" interfaces used outside this file:
845 */
846
847 kern_return_t
848 thread_dup(
849 thread_act_t source_thr_act,
850 thread_act_t target_thr_act)
851 {
852 kern_return_t ret;
853 thread_t thread, nthread;
854
855 if (target_thr_act == THR_ACT_NULL || target_thr_act == current_act())
856 return (KERN_INVALID_ARGUMENT);
857
858 thread = act_lock_thread(target_thr_act);
859 if (!target_thr_act->active) {
860 act_unlock_thread(target_thr_act);
861 return(KERN_TERMINATED);
862 }
863
864 thread_hold(target_thr_act);
865 while (1) {
866 if (!thread || target_thr_act != thread->top_act)
867 break;
868 act_unlock_thread(target_thr_act);
869 (void)thread_stop_wait(thread);
870 nthread = act_lock_thread(target_thr_act);
871 if (nthread == thread)
872 break;
873 thread_unstop(thread);
874 thread = nthread;
875 }
876 ret = act_thread_dup(source_thr_act, target_thr_act);
877 if (thread && target_thr_act == thread->top_act)
878 thread_unstop(thread);
879 thread_release(target_thr_act);
880 act_unlock_thread(target_thr_act);
881
882 return(ret);
883 }
884
885
886 /*
887 * thread_setstatus:
888 *
889 * Set the status of the specified thread.
890 * Called with (and returns with) no locks held.
891 */
892 kern_return_t
893 thread_setstatus(
894 thread_act_t thr_act,
895 int flavor,
896 thread_state_t tstate,
897 mach_msg_type_number_t count)
898 {
899 kern_return_t kr;
900 thread_t thread;
901
902 thread = act_lock_thread(thr_act);
903 assert(thread);
904 assert(thread->top_act == thr_act);
905 kr = act_machine_set_state(thr_act, flavor, tstate, count);
906 act_unlock_thread(thr_act);
907 return(kr);
908 }
909
910 /*
911 * thread_getstatus:
912 *
913 * Get the status of the specified thread.
914 */
915 kern_return_t
916 thread_getstatus(
917 thread_act_t thr_act,
918 int flavor,
919 thread_state_t tstate,
920 mach_msg_type_number_t *count)
921 {
922 kern_return_t kr;
923 thread_t thread;
924
925 thread = act_lock_thread(thr_act);
926 assert(thread);
927 assert(thread->top_act == thr_act);
928 kr = act_machine_get_state(thr_act, flavor, tstate, count);
929 act_unlock_thread(thr_act);
930 return(kr);
931 }
932
933 /*
934 * Kernel-internal thread_activation interfaces used outside this file:
935 */
936
937 /*
938 * act_init() - Initialize activation handling code
939 */
940 void
941 act_init()
942 {
943 thr_act_zone = zinit(
944 sizeof(struct thread_activation),
945 ACT_MAX * sizeof(struct thread_activation), /* XXX */
946 ACT_CHUNK * sizeof(struct thread_activation),
947 "activations");
948 act_machine_init();
949 }
950
951
952 /*
953 * act_create - Create a new activation in a specific task.
954 */
955 kern_return_t
956 act_create(task_t task,
957 thread_act_t *new_act)
958 {
959 thread_act_t thr_act;
960 int rc;
961 vm_map_t map;
962
963 thr_act = (thread_act_t)zalloc(thr_act_zone);
964 if (thr_act == 0)
965 return(KERN_RESOURCE_SHORTAGE);
966
967 #if MACH_ASSERT
968 if (watchacts & WA_ACT_LNK)
969 printf("act_create(task=%x,thr_act@%x=%x)\n",
970 task, new_act, thr_act);
971 #endif /* MACH_ASSERT */
972
973 /* Start by zeroing everything; then init non-zero items only */
974 bzero((char *)thr_act, sizeof(*thr_act));
975
976 #ifdef MACH_BSD
977 {
978 /*
979 * Take care of the uthread allocation
980 * do it early in order to make KERN_RESOURCE_SHORTAGE
981 * handling trivial
982 * uthread_alloc() will bzero the storage allocated.
983 */
984 extern void *uthread_alloc(void);
985 thr_act->uthread = uthread_alloc();
986 if(thr_act->uthread == 0) {
987 /* Put the thr_act back on the thr_act zone */
988 zfree(thr_act_zone, (vm_offset_t)thr_act);
989 return(KERN_RESOURCE_SHORTAGE);
990 }
991 }
992 #endif /* MACH_BSD */
993
994 /*
995 * Start with one reference for the caller and one for the
996 * act being alive.
997 */
998 act_lock_init(thr_act);
999 thr_act->ref_count = 2;
1000
1001 /* Latch onto the task. */
1002 thr_act->task = task;
1003 task_reference(task);
1004
1005 /* Initialize sigbufp for High-Watermark buffer allocation */
1006 thr_act->r_sigbufp = (routine_descriptor_t) &thr_act->r_sigbuf;
1007 thr_act->r_sigbuf_size = sizeof(thr_act->r_sigbuf);
1008
1009 #if THREAD_SWAPPER
1010 thr_act->swap_state = TH_SW_IN;
1011 #if MACH_ASSERT
1012 thr_act->kernel_stack_swapped_in = TRUE;
1013 #endif /* MACH_ASSERT */
1014 #endif /* THREAD_SWAPPER */
1015
1016 /* special_handler will always be last on the returnhandlers list. */
1017 thr_act->special_handler.next = 0;
1018 thr_act->special_handler.handler = special_handler;
1019
1020 #if MACH_PROF
1021 thr_act->act_profiled = FALSE;
1022 thr_act->act_profiled_own = FALSE;
1023 thr_act->profil_buffer = NULLPROFDATA;
1024 #endif
1025
1026 /* Initialize the held_ulocks queue as empty */
1027 queue_init(&thr_act->held_ulocks);
1028
1029 /* Inherit the profiling status of the parent task */
1030 act_prof_init(thr_act, task);
1031
1032 ipc_thr_act_init(task, thr_act);
1033 act_machine_create(task, thr_act);
1034
1035 /*
1036 * If thr_act created in kernel-loaded task, alter its saved
1037 * state to so indicate
1038 */
1039 if (task->kernel_loaded) {
1040 act_user_to_kernel(thr_act);
1041 }
1042
1043 /* Cache the task's map and take a reference to it */
1044 map = task->map;
1045 thr_act->map = map;
1046
1047 /* Inline vm_map_reference cause we don't want to increment res_count */
1048 mutex_lock(&map->s_lock);
1049 #if TASK_SWAPPER
1050 assert(map->res_count > 0);
1051 assert(map->ref_count >= map->res_count);
1052 #endif /* TASK_SWAPPER */
1053 map->ref_count++;
1054 mutex_unlock(&map->s_lock);
1055
1056 *new_act = thr_act;
1057 return KERN_SUCCESS;
1058 }
1059
1060 /*
1061 * act_free - called when an thr_act's ref_count drops to zero.
1062 *
1063 * This can only happen after the activation has been reaped, and
1064 * all other references to it have gone away. We can now release
1065 * the last critical resources, unlink the activation from the
1066 * task, and release the reference on the thread shuttle itself.
1067 *
1068 * Called with activation locked.
1069 */
1070 #if MACH_ASSERT
1071 int dangerous_bzero = 1; /* paranoia & safety */
1072 #endif
1073
1074 void
1075 act_free(thread_act_t thr_act)
1076 {
1077 task_t task;
1078 thread_t thr;
1079 vm_map_t map;
1080 unsigned int ref;
1081
1082 #if MACH_ASSERT
1083 if (watchacts & WA_EXIT)
1084 printf("act_free(%x(%d)) thr=%x tsk=%x(%d) pport=%x%sactive\n",
1085 thr_act, thr_act->ref_count, thr_act->thread,
1086 thr_act->task,
1087 thr_act->task ? thr_act->task->ref_count : 0,
1088 thr_act->pool_port,
1089 thr_act->active ? " " : " !");
1090 #endif /* MACH_ASSERT */
1091
1092
1093 #if THREAD_SWAPPER
1094 assert(thr_act->kernel_stack_swapped_in);
1095 #endif /* THREAD_SWAPPER */
1096
1097 assert(!thr_act->active);
1098 assert(!thr_act->pool_port);
1099
1100 task = thr_act->task;
1101 task_lock(task);
1102
1103 if (thr = thr_act->thread) {
1104 time_value_t user_time, system_time;
1105
1106 thread_read_times(thr, &user_time, &system_time);
1107 time_value_add(&task->total_user_time, &user_time);
1108 time_value_add(&task->total_system_time, &system_time);
1109
1110 /* Unlink the thr_act from the task's thr_act list,
1111 * so it doesn't appear in calls to task_threads and such.
1112 * The thr_act still keeps its ref on the task, however.
1113 */
1114 queue_remove(&task->thr_acts, thr_act, thread_act_t, thr_acts);
1115 thr_act->thr_acts.next = NULL;
1116 task->thr_act_count--;
1117
1118 #if THREAD_SWAPPER
1119 /*
1120 * Thread is supposed to be unswappable by now...
1121 */
1122 assert(thr_act->swap_state == TH_SW_UNSWAPPABLE ||
1123 !thread_swap_unwire_stack);
1124 #endif /* THREAD_SWAPPER */
1125
1126 task->res_act_count--;
1127 task_unlock(task);
1128 task_deallocate(task);
1129 thread_deallocate(thr);
1130 act_machine_destroy(thr_act);
1131 } else {
1132 /*
1133 * Must have never really gotten started
1134 * no unlinking from the task and no need
1135 * to free the shuttle.
1136 */
1137 task_unlock(task);
1138 task_deallocate(task);
1139 }
1140
1141 sigbuf_dealloc(thr_act);
1142 act_prof_deallocate(thr_act);
1143 ipc_thr_act_terminate(thr_act);
1144
1145 /*
1146 * Drop the cached map reference.
1147 * Inline version of vm_map_deallocate() because we
1148 * don't want to decrement the map's residence count here.
1149 */
1150 map = thr_act->map;
1151 mutex_lock(&map->s_lock);
1152 #if TASK_SWAPPER
1153 assert(map->res_count >= 0);
1154 assert(map->ref_count > map->res_count);
1155 #endif /* TASK_SWAPPER */
1156 ref = --map->ref_count;
1157 mutex_unlock(&map->s_lock);
1158 if (ref == 0)
1159 vm_map_destroy(map);
1160
1161 #ifdef MACH_BSD
1162 {
1163 /*
1164 * Free uthread BEFORE the bzero.
1165 * Not doing so will result in a leak.
1166 */
1167 extern void uthread_free(void *);
1168 void *ut = thr_act->uthread;
1169 thr_act->uthread = 0;
1170 uthread_free(ut);
1171 }
1172 #endif /* MACH_BSD */
1173
1174 #if MACH_ASSERT
1175 if (dangerous_bzero) /* dangerous if we're still using it! */
1176 bzero((char *)thr_act, sizeof(*thr_act));
1177 #endif /* MACH_ASSERT */
1178 /* Put the thr_act back on the thr_act zone */
1179 zfree(thr_act_zone, (vm_offset_t)thr_act);
1180 }
1181
1182
1183 /*
1184 * act_attach - Attach an thr_act to the top of a thread ("push the stack").
1185 *
1186 * The thread_shuttle must be either the current one or a brand-new one.
1187 * Assumes the thr_act is active but not in use, also, that if it is
1188 * attached to an thread_pool (i.e. the thread_pool pointer is nonzero),
1189 * the thr_act has already been taken off the thread_pool's list.
1190 *
1191 * Already locked: thr_act plus "appropriate" thread-related locks
1192 * (see act_lock_thread()).
1193 */
1194 void
1195 act_attach(
1196 thread_act_t thr_act,
1197 thread_t thread,
1198 unsigned init_alert_mask)
1199 {
1200 thread_act_t lower;
1201
1202 #if MACH_ASSERT
1203 assert(thread == current_thread() || thread->top_act == THR_ACT_NULL);
1204 if (watchacts & WA_ACT_LNK)
1205 printf("act_attach(thr_act %x(%d) thread %x(%d) mask %d)\n",
1206 thr_act, thr_act->ref_count, thread, thread->ref_count,
1207 init_alert_mask);
1208 #endif /* MACH_ASSERT */
1209
1210 /*
1211 * Chain the thr_act onto the thread's thr_act stack.
1212 * Set mask and auto-propagate alerts from below.
1213 */
1214 thr_act->ref_count++;
1215 thr_act->thread = thread;
1216 thr_act->higher = THR_ACT_NULL; /*safety*/
1217 thr_act->alerts = 0;
1218 thr_act->alert_mask = init_alert_mask;
1219 lower = thr_act->lower = thread->top_act;
1220
1221 if (lower != THR_ACT_NULL) {
1222 lower->higher = thr_act;
1223 thr_act->alerts = (lower->alerts & init_alert_mask);
1224 }
1225
1226 thread->top_act = thr_act;
1227 }
1228
1229 /*
1230 * act_detach
1231 *
1232 * Remove the current thr_act from the top of the current thread, i.e.
1233 * "pop the stack". Assumes already locked: thr_act plus "appropriate"
1234 * thread-related locks (see act_lock_thread).
1235 */
1236 void
1237 act_detach(
1238 thread_act_t cur_act)
1239 {
1240 thread_t cur_thread = cur_act->thread;
1241
1242 #if MACH_ASSERT
1243 if (watchacts & (WA_EXIT|WA_ACT_LNK))
1244 printf("act_detach: thr_act %x(%d), thrd %x(%d) task=%x(%d)\n",
1245 cur_act, cur_act->ref_count,
1246 cur_thread, cur_thread->ref_count,
1247 cur_act->task,
1248 cur_act->task ? cur_act->task->ref_count : 0);
1249 #endif /* MACH_ASSERT */
1250
1251 /* Unlink the thr_act from the thread's thr_act stack */
1252 cur_thread->top_act = cur_act->lower;
1253 cur_act->thread = 0;
1254 cur_act->ref_count--;
1255 assert(cur_act->ref_count > 0);
1256
1257 thread_pool_put_act(cur_act);
1258
1259 #if MACH_ASSERT
1260 cur_act->lower = cur_act->higher = THR_ACT_NULL;
1261 if (cur_thread->top_act)
1262 cur_thread->top_act->higher = THR_ACT_NULL;
1263 #endif /* MACH_ASSERT */
1264
1265 return;
1266 }
1267
1268
1269 /*
1270 * Synchronize a thread operation with RPC. Called with nothing
1271 * locked. Returns with thr_act locked, plus one of four
1272 * combinations of other locks held:
1273 * none - for new activation not yet associated with thread_pool
1274 * or shuttle
1275 * rpc_lock(thr_act->thread) only - for base activation (one
1276 * without pool_port)
1277 * ip_lock(thr_act->pool_port) only - for empty activation (one
1278 * with no associated shuttle)
1279 * both locks - for "active" activation (has shuttle, lives
1280 * on thread_pool)
1281 * If thr_act has an associated shuttle, this function returns
1282 * its address. Otherwise it returns zero.
1283 */
1284 thread_t
1285 act_lock_thread(
1286 thread_act_t thr_act)
1287 {
1288 ipc_port_t pport;
1289
1290 /*
1291 * Allow the shuttle cloning code (q.v., when it
1292 * exists :-}) to obtain ip_lock()'s while holding
1293 * an rpc_lock().
1294 */
1295 while (1) {
1296 act_lock(thr_act);
1297 pport = thr_act->pool_port;
1298 if (!pport || ip_lock_try(pport)) {
1299 if (!thr_act->thread)
1300 break;
1301 if (rpc_lock_try(thr_act->thread))
1302 break;
1303 if (pport)
1304 ip_unlock(pport);
1305 }
1306 act_unlock(thr_act);
1307 mutex_pause();
1308 }
1309 return (thr_act->thread);
1310 }
1311
1312 /*
1313 * Unsynchronize with RPC (i.e., undo an act_lock_thread() call).
1314 * Called with thr_act locked, plus thread locks held that are
1315 * "correct" for thr_act's state. Returns with nothing locked.
1316 */
1317 void
1318 act_unlock_thread(thread_act_t thr_act)
1319 {
1320 if (thr_act->thread)
1321 rpc_unlock(thr_act->thread);
1322 if (thr_act->pool_port)
1323 ip_unlock(thr_act->pool_port);
1324 act_unlock(thr_act);
1325 }
1326
1327 /*
1328 * Synchronize with RPC given a pointer to a shuttle (instead of an
1329 * activation). Called with nothing locked; returns with all
1330 * "appropriate" thread-related locks held (see act_lock_thread()).
1331 */
1332 thread_act_t
1333 thread_lock_act(
1334 thread_t thread)
1335 {
1336 thread_act_t thr_act;
1337
1338 while (1) {
1339 rpc_lock(thread);
1340 thr_act = thread->top_act;
1341 if (!thr_act)
1342 break;
1343 if (!act_lock_try(thr_act)) {
1344 rpc_unlock(thread);
1345 mutex_pause();
1346 continue;
1347 }
1348 if (thr_act->pool_port &&
1349 !ip_lock_try(thr_act->pool_port)) {
1350 rpc_unlock(thread);
1351 act_unlock(thr_act);
1352 mutex_pause();
1353 continue;
1354 }
1355 break;
1356 }
1357 return (thr_act);
1358 }
1359
1360 /*
1361 * Unsynchronize with RPC starting from a pointer to a shuttle.
1362 * Called with RPC-related locks held that are appropriate to
1363 * shuttle's state; any activation is also locked.
1364 */
1365 void
1366 thread_unlock_act(
1367 thread_t thread)
1368 {
1369 thread_act_t thr_act;
1370
1371 if (thr_act = thread->top_act) {
1372 if (thr_act->pool_port)
1373 ip_unlock(thr_act->pool_port);
1374 act_unlock(thr_act);
1375 }
1376 rpc_unlock(thread);
1377 }
1378
1379 /*
1380 * switch_act
1381 *
1382 * If a new activation is given, switch to it. If not,
1383 * switch to the lower activation (pop). Returns the old
1384 * activation. This is for RPC support.
1385 */
1386 thread_act_t
1387 switch_act(
1388 thread_act_t act)
1389 {
1390 thread_t thread;
1391 thread_act_t old, new;
1392 unsigned cpu;
1393 spl_t spl;
1394
1395
1396 disable_preemption();
1397
1398 cpu = cpu_number();
1399 thread = current_thread();
1400
1401 /*
1402 * Find the old and new activation for switch.
1403 */
1404 old = thread->top_act;
1405
1406 if (act) {
1407 new = act;
1408 new->thread = thread;
1409 }
1410 else {
1411 new = old->lower;
1412 }
1413
1414 assert(new != THR_ACT_NULL);
1415 #if THREAD_SWAPPER
1416 assert(new->swap_state != TH_SW_OUT &&
1417 new->swap_state != TH_SW_COMING_IN);
1418 #endif /* THREAD_SWAPPER */
1419
1420 assert(cpu_data[cpu].active_thread == thread);
1421 active_kloaded[cpu] = (new->kernel_loaded) ? new : 0;
1422
1423 /* This is where all the work happens */
1424 machine_switch_act(thread, old, new, cpu);
1425
1426 /*
1427 * Push or pop an activation on the chain.
1428 */
1429 if (act) {
1430 act_attach(new, thread, 0);
1431 }
1432 else {
1433 act_detach(old);
1434 }
1435
1436 enable_preemption();
1437
1438 return(old);
1439 }
1440
1441 /*
1442 * install_special_handler
1443 * Install the special returnhandler that handles suspension and
1444 * termination, if it hasn't been installed already.
1445 *
1446 * Already locked: RPC-related locks for thr_act, but not
1447 * scheduling lock (thread_lock()) of the associated thread.
1448 */
1449 void
1450 install_special_handler(
1451 thread_act_t thr_act)
1452 {
1453 spl_t spl;
1454 thread_t thread = thr_act->thread;
1455
1456 #if MACH_ASSERT
1457 if (watchacts & WA_ACT_HDLR)
1458 printf("act_%x: install_special_hdlr(%x)\n",current_act(),thr_act);
1459 #endif /* MACH_ASSERT */
1460
1461 spl = splsched();
1462 if (thread)
1463 thread_lock(thread);
1464 install_special_handler_locked(thr_act);
1465 act_set_apc(thr_act);
1466 if (thread)
1467 thread_unlock(thread);
1468 splx(spl);
1469 }
1470
1471 /*
1472 * install_special_handler_locked
1473 * Do the work of installing the special_handler.
1474 *
1475 * Already locked: RPC-related locks for thr_act, plus the
1476 * scheduling lock (thread_lock()) of the associated thread.
1477 */
1478 void
1479 install_special_handler_locked(
1480 thread_act_t thr_act)
1481 {
1482 ReturnHandler **rh;
1483 thread_t thread = thr_act->thread;
1484
1485 /* The work handler must always be the last ReturnHandler on the list,
1486 because it can do tricky things like detach the thr_act. */
1487 for (rh = &thr_act->handlers; *rh; rh = &(*rh)->next)
1488 /* */ ;
1489 if (rh != &thr_act->special_handler.next) {
1490 *rh = &thr_act->special_handler;
1491 }
1492 if (thread && thr_act == thread->top_act) {
1493 /*
1494 * Temporarily undepress, so target has
1495 * a chance to do locking required to
1496 * block itself in special_handler().
1497 */
1498 if (thread->depress_priority >= 0) {
1499 thread->priority = thread->depress_priority;
1500
1501 /*
1502 * Use special value -2 to indicate need
1503 * to redepress priority in special_handler
1504 * as thread blocks
1505 */
1506 thread->depress_priority = -2;
1507 compute_priority(thread, FALSE);
1508 }
1509 }
1510 act_set_apc(thr_act);
1511 }
1512
1513 /*
1514 * JMM -
1515 * These two routines will be enhanced over time to call the general handler registration
1516 * mechanism used by special handlers and alerts. They are hack in for now to avoid
1517 * having to export the gory details of ASTs to the BSD code right now.
1518 */
1519 extern thread_apc_handler_t bsd_ast;
1520
1521 kern_return_t
1522 thread_apc_set(
1523 thread_act_t thr_act,
1524 thread_apc_handler_t apc)
1525 {
1526 assert(apc == bsd_ast);
1527 thread_ast_set(thr_act, AST_BSD);
1528 if (thr_act == current_act())
1529 ast_propagate(thr_act->ast);
1530 return KERN_SUCCESS;
1531 }
1532
1533 kern_return_t
1534 thread_apc_clear(
1535 thread_act_t thr_act,
1536 thread_apc_handler_t apc)
1537 {
1538 assert(apc == bsd_ast);
1539 thread_ast_clear(thr_act, AST_BSD);
1540 if (thr_act == current_act())
1541 ast_off(AST_BSD);
1542 return KERN_SUCCESS;
1543 }
1544
1545 /*
1546 * act_set_thread_pool - Assign an activation to a specific thread_pool.
1547 * Fails if the activation is already assigned to another pool.
1548 * If thread_pool == 0, we remove the thr_act from its thread_pool.
1549 *
1550 * Called the port containing thread_pool already locked.
1551 * Returns the same way.
1552 */
1553 kern_return_t act_set_thread_pool(
1554 thread_act_t thr_act,
1555 ipc_port_t pool_port)
1556 {
1557 thread_pool_t thread_pool;
1558
1559 #if MACH_ASSERT
1560 if (watchacts & WA_ACT_LNK)
1561 printf("act_set_thread_pool: %x(%d) -> %x\n",
1562 thr_act, thr_act->ref_count, thread_pool);
1563 #endif /* MACH_ASSERT */
1564
1565 if (pool_port == 0) {
1566 thread_act_t *lact;
1567
1568 if (thr_act->pool_port == 0)
1569 return KERN_SUCCESS;
1570 thread_pool = &thr_act->pool_port->ip_thread_pool;
1571
1572 for (lact = &thread_pool->thr_acts; *lact;
1573 lact = &((*lact)->thread_pool_next)) {
1574 if (thr_act == *lact) {
1575 *lact = thr_act->thread_pool_next;
1576 break;
1577 }
1578 }
1579 act_lock(thr_act);
1580 thr_act->pool_port = 0;
1581 thr_act->thread_pool_next = 0;
1582 act_unlock(thr_act);
1583 act_deallocate(thr_act);
1584 return KERN_SUCCESS;
1585 }
1586 if (thr_act->pool_port != pool_port) {
1587 thread_pool = &pool_port->ip_thread_pool;
1588 if (thr_act->pool_port != 0) {
1589 #if MACH_ASSERT
1590 if (watchacts & WA_ACT_LNK)
1591 printf("act_set_thread_pool found %x!\n",
1592 thr_act->pool_port);
1593 #endif /* MACH_ASSERT */
1594 return(KERN_FAILURE);
1595 }
1596 act_lock(thr_act);
1597 thr_act->pool_port = pool_port;
1598
1599 /* The pool gets a ref to the activation -- have
1600 * to inline operation because thr_act is already
1601 * locked.
1602 */
1603 act_locked_act_reference(thr_act);
1604
1605 /* If it is available,
1606 * add it to the thread_pool's available-activation list.
1607 */
1608 if ((thr_act->thread == 0) && (thr_act->suspend_count == 0)) {
1609 thr_act->thread_pool_next = thread_pool->thr_acts;
1610 pool_port->ip_thread_pool.thr_acts = thr_act;
1611 if (thread_pool->waiting)
1612 thread_pool_wakeup(thread_pool);
1613 }
1614 act_unlock(thr_act);
1615 }
1616
1617 return KERN_SUCCESS;
1618 }
1619
1620 /*
1621 * act_locked_act_set_thread_pool- Assign activation to a specific thread_pool.
1622 * Fails if the activation is already assigned to another pool.
1623 * If thread_pool == 0, we remove the thr_act from its thread_pool.
1624 *
1625 * Called the port containing thread_pool already locked.
1626 * Also called with the thread activation locked.
1627 * Returns the same way.
1628 *
1629 * This routine is the same as `act_set_thread_pool()' except that it does
1630 * not call `act_deallocate(),' which unconditionally tries to obtain the
1631 * thread activation lock.
1632 */
1633 kern_return_t act_locked_act_set_thread_pool(
1634 thread_act_t thr_act,
1635 ipc_port_t pool_port)
1636 {
1637 thread_pool_t thread_pool;
1638
1639 #if MACH_ASSERT
1640 if (watchacts & WA_ACT_LNK)
1641 printf("act_set_thread_pool: %x(%d) -> %x\n",
1642 thr_act, thr_act->ref_count, thread_pool);
1643 #endif /* MACH_ASSERT */
1644
1645 if (pool_port == 0) {
1646 thread_act_t *lact;
1647
1648 if (thr_act->pool_port == 0)
1649 return KERN_SUCCESS;
1650 thread_pool = &thr_act->pool_port->ip_thread_pool;
1651
1652 for (lact = &thread_pool->thr_acts; *lact;
1653 lact = &((*lact)->thread_pool_next)) {
1654 if (thr_act == *lact) {
1655 *lact = thr_act->thread_pool_next;
1656 break;
1657 }
1658 }
1659
1660 thr_act->pool_port = 0;
1661 thr_act->thread_pool_next = 0;
1662 act_locked_act_deallocate(thr_act);
1663 return KERN_SUCCESS;
1664 }
1665 if (thr_act->pool_port != pool_port) {
1666 thread_pool = &pool_port->ip_thread_pool;
1667 if (thr_act->pool_port != 0) {
1668 #if MACH_ASSERT
1669 if (watchacts & WA_ACT_LNK)
1670 printf("act_set_thread_pool found %x!\n",
1671 thr_act->pool_port);
1672 #endif /* MACH_ASSERT */
1673 return(KERN_FAILURE);
1674 }
1675 thr_act->pool_port = pool_port;
1676
1677 /* The pool gets a ref to the activation -- have
1678 * to inline operation because thr_act is already
1679 * locked.
1680 */
1681 act_locked_act_reference(thr_act);
1682
1683 /* If it is available,
1684 * add it to the thread_pool's available-activation list.
1685 */
1686 if ((thr_act->thread == 0) && (thr_act->suspend_count == 0)) {
1687 thr_act->thread_pool_next = thread_pool->thr_acts;
1688 pool_port->ip_thread_pool.thr_acts = thr_act;
1689 if (thread_pool->waiting)
1690 thread_pool_wakeup(thread_pool);
1691 }
1692 }
1693
1694 return KERN_SUCCESS;
1695 }
1696
1697 /*
1698 * Activation control support routines internal to this file:
1699 */
1700
1701 /*
1702 * act_execute_returnhandlers() - does just what the name says
1703 *
1704 * This is called by system-dependent code when it detects that
1705 * thr_act->handlers is non-null while returning into user mode.
1706 * Activations linked onto an thread_pool always have null thr_act->handlers,
1707 * so RPC entry paths need not check it.
1708 */
1709 void act_execute_returnhandlers(
1710 void)
1711 {
1712 spl_t s;
1713 thread_t thread;
1714 thread_act_t thr_act = current_act();
1715
1716 #if MACH_ASSERT
1717 if (watchacts & WA_ACT_HDLR)
1718 printf("execute_rtn_hdlrs: thr_act=%x\n", thr_act);
1719 #endif /* MACH_ASSERT */
1720
1721 s = splsched();
1722 act_clr_apc(thr_act);
1723 spllo();
1724 while (1) {
1725 ReturnHandler *rh;
1726
1727 /* Grab the next returnhandler */
1728 thread = act_lock_thread(thr_act);
1729 (void)splsched();
1730 thread_lock(thread);
1731 rh = thr_act->handlers;
1732 if (!rh) {
1733 thread_unlock(thread);
1734 splx(s);
1735 act_unlock_thread(thr_act);
1736 return;
1737 }
1738 thr_act->handlers = rh->next;
1739 thread_unlock(thread);
1740 spllo();
1741 act_unlock_thread(thr_act);
1742
1743 #if MACH_ASSERT
1744 if (watchacts & WA_ACT_HDLR)
1745 printf( (rh == &thr_act->special_handler) ?
1746 "\tspecial_handler\n" : "\thandler=%x\n",
1747 rh->handler);
1748 #endif /* MACH_ASSERT */
1749
1750 /* Execute it */
1751 (*rh->handler)(rh, thr_act);
1752 }
1753 }
1754
1755 /*
1756 * special_handler_continue
1757 *
1758 * Continuation routine for the special handler blocks. It checks
1759 * to see whether there has been any new suspensions. If so, it
1760 * installs the special handler again. Otherwise, it checks to see
1761 * if the current depression needs to be re-instated (it may have
1762 * been temporarily removed in order to get to this point in a hurry).
1763 */
1764 void
1765 special_handler_continue(void)
1766 {
1767 thread_act_t cur_act = current_act();
1768 thread_t thread = cur_act->thread;
1769 spl_t s;
1770
1771 if (cur_act->suspend_count)
1772 install_special_handler(cur_act);
1773 else {
1774 s = splsched();
1775 thread_lock(thread);
1776 if (thread->depress_priority == -2) {
1777 /*
1778 * We were temporarily undepressed by
1779 * install_special_handler; restore priority
1780 * depression.
1781 */
1782 thread->depress_priority = thread->priority;
1783 thread->priority = thread->sched_pri = DEPRESSPRI;
1784 }
1785 thread_unlock(thread);
1786 splx(s);
1787 }
1788 thread_exception_return();
1789 }
1790
1791 /*
1792 * special_handler - handles suspension, termination. Called
1793 * with nothing locked. Returns (if it returns) the same way.
1794 */
1795 void
1796 special_handler(
1797 ReturnHandler *rh,
1798 thread_act_t cur_act)
1799 {
1800 spl_t s;
1801 thread_t lthread;
1802 thread_t thread = act_lock_thread(cur_act);
1803 unsigned alert_bits;
1804 exception_data_type_t
1805 codes[EXCEPTION_CODE_MAX];
1806 kern_return_t kr;
1807 kern_return_t exc_kr;
1808
1809 assert(thread != THREAD_NULL);
1810 #if MACH_ASSERT
1811 if (watchacts & WA_ACT_HDLR)
1812 printf("\t\tspecial_handler(thr_act=%x(%d))\n", cur_act,
1813 (cur_act ? cur_act->ref_count : 0));
1814 #endif /* MACH_ASSERT */
1815
1816 s = splsched();
1817
1818 thread_lock(thread);
1819 thread->state &= ~TH_ABORT; /* clear any aborts */
1820 thread_unlock(thread);
1821 splx(s);
1822
1823 /*
1824 * If someone has killed this invocation,
1825 * invoke the return path with a terminated exception.
1826 */
1827 if (!cur_act->active) {
1828 act_unlock_thread(cur_act);
1829 act_machine_return(KERN_TERMINATED);
1830 }
1831
1832 #ifdef CALLOUT_RPC_MODEL
1833 /*
1834 * JMM - We don't intend to support this RPC model in Darwin.
1835 * We will support inheritance through chains of activations
1836 * on shuttles, but it will be universal and not just for RPC.
1837 * As such, each activation will always have a base shuttle.
1838 * Our RPC model will probably even support the notion of
1839 * alerts (thrown up the chain of activations to affect the
1840 * work done on our behalf), but the unlinking of the shuttles
1841 * will be completely difference because we will never have
1842 * to clone them.
1843 */
1844
1845 /* strip server terminated bit */
1846 alert_bits = cur_act->alerts & (~SERVER_TERMINATED);
1847
1848 /* clear server terminated bit */
1849 cur_act->alerts &= ~SERVER_TERMINATED;
1850
1851 if ( alert_bits ) {
1852 /*
1853 * currently necessary to coordinate with the exception
1854 * code -fdr
1855 */
1856 act_unlock_thread(cur_act);
1857
1858 /* upcall exception/alert port */
1859 codes[0] = alert_bits;
1860
1861 /*
1862 * Exception makes a lot of assumptions. If there is no
1863 * exception handler or the exception reply is broken, the
1864 * thread will be terminated and exception will not return. If
1865 * we decide we don't like that behavior, we need to check
1866 * for the existence of an exception port before we call
1867 * exception.
1868 */
1869 exc_kr = exception( EXC_RPC_ALERT, codes, 1 );
1870
1871 /* clear the orphaned and time constraint indications */
1872 cur_act->alerts &= ~(ORPHANED | TIME_CONSTRAINT_UNSATISFIED);
1873
1874 /* if this orphaned activation should be terminated... */
1875 if (exc_kr == KERN_RPC_TERMINATE_ORPHAN) {
1876 /*
1877 * ... terminate the activation
1878 *
1879 * This is done in two steps. First, the activation is
1880 * disabled (prepared for termination); second, the
1881 * `special_handler()' is executed again -- this time
1882 * to terminate the activation.
1883 * (`act_disable_task_locked()' arranges for the
1884 * additional execution of the `special_handler().')
1885 */
1886
1887 #if THREAD_SWAPPER
1888 thread_swap_disable(cur_act);
1889 #endif /* THREAD_SWAPPER */
1890
1891 /* acquire appropriate locks */
1892 task_lock(cur_act->task);
1893 act_lock_thread(cur_act);
1894
1895 /* detach the activation from its task */
1896 kr = act_disable_task_locked(cur_act);
1897 assert( kr == KERN_SUCCESS );
1898
1899 /* release locks */
1900 task_unlock(cur_act->task);
1901 }
1902 else {
1903 /* acquire activation lock again (released below) */
1904 act_lock_thread(cur_act);
1905 s = splsched();
1906 thread_lock(thread);
1907 if (thread->depress_priority == -2) {
1908 /*
1909 * We were temporarily undepressed by
1910 * install_special_handler; restore priority
1911 * depression.
1912 */
1913 thread->depress_priority = thread->priority;
1914 thread->priority = thread->sched_pri = DEPRESSPRI;
1915 }
1916 thread_unlock(thread);
1917 splx(s);
1918 }
1919 }
1920 #endif /* CALLOUT_RPC_MODEL */
1921
1922 /*
1923 * If we're suspended, go to sleep and wait for someone to wake us up.
1924 */
1925 if (cur_act->suspend_count) {
1926 if( cur_act->handlers == NULL ) {
1927 assert_wait((event_t)&cur_act->suspend_count,
1928 THREAD_ABORTSAFE);
1929 act_unlock_thread(cur_act);
1930 thread_block(special_handler_continue);
1931 /* NOTREACHED */
1932 }
1933 special_handler_continue();
1934 }
1935
1936 act_unlock_thread(cur_act);
1937 }
1938
1939 /*
1940 * Try to nudge a thr_act into executing its returnhandler chain.
1941 * Ensures that the activation will execute its returnhandlers
1942 * before it next executes any of its user-level code.
1943 *
1944 * Called with thr_act's act_lock() and "appropriate" thread-related
1945 * locks held. (See act_lock_thread().) Returns same way.
1946 */
1947 void
1948 nudge(thread_act_t thr_act)
1949 {
1950 #if MACH_ASSERT
1951 if (watchacts & WA_ACT_HDLR)
1952 printf("\tact_%x: nudge(%x)\n", current_act(), thr_act);
1953 #endif /* MACH_ASSERT */
1954
1955 /*
1956 * Don't need to do anything at all if this thr_act isn't the topmost.
1957 */
1958 if (thr_act->thread && thr_act->thread->top_act == thr_act) {
1959 /*
1960 * If it's suspended, wake it up.
1961 * This should nudge it even on another CPU.
1962 */
1963 thread_wakeup((event_t)&thr_act->suspend_count);
1964 }
1965 }
1966
1967 /*
1968 * Update activation that belongs to a task created via kernel_task_create().
1969 */
1970 void
1971 act_user_to_kernel(
1972 thread_act_t thr_act)
1973 {
1974 pcb_user_to_kernel(thr_act);
1975 thr_act->kernel_loading = TRUE;
1976 }
1977
1978 /*
1979 * Already locked: thr_act->task, RPC-related locks for thr_act
1980 *
1981 * Detach an activation from its task, and prepare it to terminate
1982 * itself.
1983 */
1984 kern_return_t
1985 act_disable_task_locked(
1986 thread_act_t thr_act)
1987 {
1988 thread_t thread = thr_act->thread;
1989 task_t task = thr_act->task;
1990
1991 #if MACH_ASSERT
1992 if (watchacts & WA_EXIT) {
1993 printf("act_%x: act_disable_tl(thr_act=%x(%d))%sactive task=%x(%d)",
1994 current_act(), thr_act, thr_act->ref_count,
1995 (thr_act->active ? " " : " !"),
1996 thr_act->task, thr_act->task? thr_act->task->ref_count : 0);
1997 if (thr_act->pool_port)
1998 printf(", pool_port %x", thr_act->pool_port);
1999 printf("\n");
2000 (void) dump_act(thr_act);
2001 }
2002 #endif /* MACH_ASSERT */
2003
2004 /* This will allow no more control ops on this thr_act. */
2005 thr_act->active = 0;
2006 ipc_thr_act_disable(thr_act);
2007
2008 /* Clean-up any ulocks that are still owned by the thread
2009 * activation (acquired but not released or handed-off).
2010 */
2011 act_ulock_release_all(thr_act);
2012
2013 /* When the special_handler gets executed,
2014 * it will see the terminated condition and exit
2015 * immediately.
2016 */
2017 install_special_handler(thr_act);
2018
2019
2020 /* If the target happens to be suspended,
2021 * give it a nudge so it can exit.
2022 */
2023 if (thr_act->suspend_count)
2024 nudge(thr_act);
2025
2026 /* Drop the thr_act reference taken for being active.
2027 * (There is still at least one reference left:
2028 * the one we were passed.)
2029 * Inline the deallocate because thr_act is locked.
2030 */
2031 act_locked_act_deallocate(thr_act);
2032
2033 return(KERN_SUCCESS);
2034 }
2035
2036 /*
2037 * act_alert - Register an alert from this activation.
2038 *
2039 * Each set bit is propagated upward from (but not including) this activation,
2040 * until the top of the chain is reached or the bit is masked.
2041 */
2042 kern_return_t
2043 act_alert(thread_act_t thr_act, unsigned alerts)
2044 {
2045 thread_t thread = act_lock_thread(thr_act);
2046
2047 #if MACH_ASSERT
2048 if (watchacts & WA_ACT_LNK)
2049 printf("act_alert %x: %x\n", thr_act, alerts);
2050 #endif /* MACH_ASSERT */
2051
2052 if (thread) {
2053 thread_act_t act_up = thr_act;
2054 while ((alerts) && (act_up != thread->top_act)) {
2055 act_up = act_up->higher;
2056 alerts &= act_up->alert_mask;
2057 act_up->alerts |= alerts;
2058 }
2059 /*
2060 * XXXX If we reach the top, and it is blocked in glue
2061 * code, do something to kick it. XXXX
2062 */
2063 }
2064 act_unlock_thread(thr_act);
2065
2066 return KERN_SUCCESS;
2067 }
2068
2069 kern_return_t act_alert_mask(thread_act_t thr_act, unsigned alert_mask)
2070 {
2071 panic("act_alert_mask NOT YET IMPLEMENTED\n");
2072 return KERN_SUCCESS;
2073 }
2074
2075 typedef struct GetSetState {
2076 struct ReturnHandler rh;
2077 int flavor;
2078 void *state;
2079 int *pcount;
2080 int result;
2081 } GetSetState;
2082
2083 /* Local Forward decls */
2084 kern_return_t get_set_state(
2085 thread_act_t thr_act, int flavor,
2086 thread_state_t state, int *pcount,
2087 void (*handler)(ReturnHandler *rh, thread_act_t thr_act));
2088 void get_state_handler(ReturnHandler *rh, thread_act_t thr_act);
2089 void set_state_handler(ReturnHandler *rh, thread_act_t thr_act);
2090
2091 /*
2092 * get_set_state(thr_act ...)
2093 *
2094 * General code to install g/set_state handler.
2095 * Called with thr_act's act_lock() and "appropriate"
2096 * thread-related locks held. (See act_lock_thread().)
2097 */
2098 kern_return_t
2099 get_set_state(thread_act_t thr_act, int flavor, thread_state_t state, int *pcount,
2100 void (*handler)(ReturnHandler *rh, thread_act_t thr_act))
2101 {
2102 GetSetState gss;
2103 spl_t s;
2104
2105 /* Initialize a small parameter structure */
2106 gss.rh.handler = handler;
2107 gss.flavor = flavor;
2108 gss.state = state;
2109 gss.pcount = pcount;
2110 gss.result = KERN_ABORTED; /* iff wait below is interrupted */
2111
2112 /* Add it to the thr_act's return handler list */
2113 gss.rh.next = thr_act->handlers;
2114 thr_act->handlers = &gss.rh;
2115
2116 s = splsched();
2117 act_set_apc(thr_act);
2118 splx(s);
2119
2120 #if MACH_ASSERT
2121 if (watchacts & WA_ACT_HDLR) {
2122 printf("act_%x: get_set_state(thr_act=%x flv=%x state=%x ptr@%x=%x)",
2123 current_act(), thr_act, flavor, state,
2124 pcount, (pcount ? *pcount : 0));
2125 printf((handler == get_state_handler ? "get_state_hdlr\n" :
2126 (handler == set_state_handler ? "set_state_hdlr\n" :
2127 "hndler=%x\n")), handler);
2128 }
2129 #endif /* MACH_ASSERT */
2130
2131 assert(thr_act->thread); /* Callers must ensure these */
2132 assert(thr_act != current_act());
2133 for (;;) {
2134 nudge(thr_act);
2135 /*
2136 * Wait must be interruptible to avoid deadlock (e.g.) with
2137 * task_suspend() when caller and target of get_set_state()
2138 * are in same task.
2139 */
2140 assert_wait((event_t)&gss, THREAD_ABORTSAFE);
2141 act_unlock_thread(thr_act);
2142 thread_block((void (*)(void))0);
2143 if (gss.result != KERN_ABORTED)
2144 break;
2145 if (current_act()->handlers)
2146 act_execute_returnhandlers();
2147 act_lock_thread(thr_act);
2148 }
2149
2150 #if MACH_ASSERT
2151 if (watchacts & WA_ACT_HDLR)
2152 printf("act_%x: get_set_state returns %x\n",
2153 current_act(), gss.result);
2154 #endif /* MACH_ASSERT */
2155
2156 return gss.result;
2157 }
2158
2159 void
2160 set_state_handler(ReturnHandler *rh, thread_act_t thr_act)
2161 {
2162 GetSetState *gss = (GetSetState*)rh;
2163
2164 #if MACH_ASSERT
2165 if (watchacts & WA_ACT_HDLR)
2166 printf("act_%x: set_state_handler(rh=%x,thr_act=%x)\n",
2167 current_act(), rh, thr_act);
2168 #endif /* MACH_ASSERT */
2169
2170 gss->result = act_machine_set_state(thr_act, gss->flavor,
2171 gss->state, *gss->pcount);
2172 thread_wakeup((event_t)gss);
2173 }
2174
2175 void
2176 get_state_handler(ReturnHandler *rh, thread_act_t thr_act)
2177 {
2178 GetSetState *gss = (GetSetState*)rh;
2179
2180 #if MACH_ASSERT
2181 if (watchacts & WA_ACT_HDLR)
2182 printf("act_%x: get_state_handler(rh=%x,thr_act=%x)\n",
2183 current_act(), rh, thr_act);
2184 #endif /* MACH_ASSERT */
2185
2186 gss->result = act_machine_get_state(thr_act, gss->flavor,
2187 gss->state,
2188 (mach_msg_type_number_t *) gss->pcount);
2189 thread_wakeup((event_t)gss);
2190 }
2191
2192 kern_return_t
2193 act_get_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
2194 mach_msg_type_number_t *pcount)
2195 {
2196 #if MACH_ASSERT
2197 if (watchacts & WA_ACT_HDLR)
2198 printf("act_%x: act_get_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
2199 current_act(), thr_act, flavor, state, pcount,
2200 (pcount? *pcount : 0));
2201 #endif /* MACH_ASSERT */
2202
2203 return(get_set_state(thr_act, flavor, state, (int*)pcount, get_state_handler));
2204 }
2205
2206 kern_return_t
2207 act_set_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
2208 mach_msg_type_number_t count)
2209 {
2210 #if MACH_ASSERT
2211 if (watchacts & WA_ACT_HDLR)
2212 printf("act_%x: act_set_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
2213 current_act(), thr_act, flavor, state, count, count);
2214 #endif /* MACH_ASSERT */
2215
2216 return(get_set_state(thr_act, flavor, state, (int*)&count, set_state_handler));
2217 }
2218
2219 kern_return_t
2220 act_set_state(thread_act_t thr_act, int flavor, thread_state_t state,
2221 mach_msg_type_number_t count)
2222 {
2223 if (thr_act == THR_ACT_NULL || thr_act == current_act())
2224 return(KERN_INVALID_ARGUMENT);
2225
2226 act_lock_thread(thr_act);
2227 return(act_set_state_locked(thr_act, flavor, state, count));
2228
2229 }
2230
2231 kern_return_t
2232 act_get_state(thread_act_t thr_act, int flavor, thread_state_t state,
2233 mach_msg_type_number_t *pcount)
2234 {
2235 if (thr_act == THR_ACT_NULL || thr_act == current_act())
2236 return(KERN_INVALID_ARGUMENT);
2237
2238 act_lock_thread(thr_act);
2239 return(act_get_state_locked(thr_act, flavor, state, pcount));
2240 }
2241
2242 /*
2243 * These two should be called at splsched()
2244 * Set/clear indicator to run APC (layered on ASTs)
2245 */
2246 void
2247 act_set_apc(thread_act_t thr_act)
2248 {
2249 thread_ast_set(thr_act, AST_APC);
2250 if (thr_act == current_act()) {
2251 mp_disable_preemption();
2252 ast_propagate(thr_act->ast);
2253 mp_enable_preemption();
2254 }
2255 }
2256
2257 void
2258 act_clr_apc(thread_act_t thr_act)
2259 {
2260 thread_ast_clear(thr_act, AST_APC);
2261 }
2262
2263 void
2264 act_ulock_release_all(thread_act_t thr_act)
2265 {
2266 ulock_t ulock;
2267
2268 while (!queue_empty(&thr_act->held_ulocks)) {
2269 ulock = (ulock_t) queue_first(&thr_act->held_ulocks);
2270 (void) lock_make_unstable(ulock, thr_act);
2271 (void) lock_release_internal(ulock, thr_act);
2272 }
2273 }
2274
2275 /*
2276 * Provide routines (for export to other components) of things that
2277 * are implemented as macros insternally.
2278 */
2279 #undef current_act
2280 thread_act_t
2281 current_act(void)
2282 {
2283 return(current_act_fast());
2284 }
2285
2286 thread_act_t
2287 thread_self(void)
2288 {
2289 thread_act_t self = current_act_fast();
2290
2291 act_reference(self);
2292 return self;
2293 }
2294
2295 thread_act_t
2296 mach_thread_self(void)
2297 {
2298 thread_act_t self = current_act_fast();
2299
2300 act_reference(self);
2301 return self;
2302 }
2303
2304 #undef act_reference
2305 void
2306 act_reference(
2307 thread_act_t thr_act)
2308 {
2309 act_reference_fast(thr_act);
2310 }
2311
2312 #undef act_deallocate
2313 void
2314 act_deallocate(
2315 thread_act_t thr_act)
2316 {
2317 act_deallocate_fast(thr_act);
2318 }
2319