]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
xnu-344.32.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Copyright (c) 1993 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
28 *
29 * Permission to use, copy, modify and distribute this software and its
30 * documentation is hereby granted, provided that both the copyright
31 * notice and this permission notice appear in all copies of the
32 * software, derivative works or modified versions, and any portions
33 * thereof, and that both notices appear in supporting documentation.
34 *
35 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
36 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
37 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
38 *
39 * CSS requests users of this software to return to css-dist@cs.utah.edu any
40 * improvements that they make and grant CSS redistribution rights.
41 *
42 * Author: Bryan Ford, University of Utah CSS
43 *
44 * Thread_Activation management routines
45 */
46
47 #include <cpus.h>
48 #include <task_swapper.h>
49 #include <mach/kern_return.h>
50 #include <mach/alert.h>
51 #include <kern/etap_macros.h>
52 #include <kern/mach_param.h>
53 #include <kern/zalloc.h>
54 #include <kern/thread.h>
55 #include <kern/thread_swap.h>
56 #include <kern/task.h>
57 #include <kern/task_swap.h>
58 #include <kern/thread_act.h>
59 #include <kern/sched_prim.h>
60 #include <kern/misc_protos.h>
61 #include <kern/assert.h>
62 #include <kern/exception.h>
63 #include <kern/ipc_mig.h>
64 #include <kern/ipc_tt.h>
65 #include <kern/profile.h>
66 #include <kern/machine.h>
67 #include <kern/spl.h>
68 #include <kern/syscall_subr.h>
69 #include <kern/sync_lock.h>
70 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
71 #include <kern/processor.h>
72 #include <mach_prof.h>
73 #include <mach/rpc.h>
74
75 /*
76 * Debugging printf control
77 */
78 #if MACH_ASSERT
79 unsigned int watchacts = 0 /* WA_ALL */
80 ; /* Do-it-yourself & patchable */
81 #endif
82
83 /*
84 * Track the number of times we need to swapin a thread to deallocate it.
85 */
86 int act_free_swapin = 0;
87 boolean_t first_act;
88
89 /*
90 * Forward declarations for functions local to this file.
91 */
92 kern_return_t act_abort( thread_act_t, boolean_t);
93 void special_handler(ReturnHandler *, thread_act_t);
94 kern_return_t act_set_state_locked(thread_act_t, int,
95 thread_state_t,
96 mach_msg_type_number_t);
97 kern_return_t act_get_state_locked(thread_act_t, int,
98 thread_state_t,
99 mach_msg_type_number_t *);
100 void act_set_astbsd(thread_act_t);
101 void act_set_apc(thread_act_t);
102 void act_user_to_kernel(thread_act_t);
103 void act_ulock_release_all(thread_act_t thr_act);
104
105 void install_special_handler_locked(thread_act_t);
106
107 static void act_disable(thread_act_t);
108
109 struct thread_activation pageout_act;
110
111 static zone_t thr_act_zone;
112
113 /*
114 * Thread interfaces accessed via a thread_activation:
115 */
116
117
118 /*
119 * Internal routine to terminate a thread.
120 * Sometimes called with task already locked.
121 */
122 kern_return_t
123 thread_terminate_internal(
124 register thread_act_t act)
125 {
126 kern_return_t result;
127 thread_t thread;
128
129 thread = act_lock_thread(act);
130
131 if (!act->active) {
132 act_unlock_thread(act);
133 return (KERN_TERMINATED);
134 }
135
136 act_disable(act);
137 result = act_abort(act, FALSE);
138
139 /*
140 * Make sure this thread enters the kernel
141 * Must unlock the act, but leave the shuttle
142 * captured in this act.
143 */
144 if (thread != current_thread()) {
145 act_unlock(act);
146
147 if (thread_stop(thread))
148 thread_unstop(thread);
149 else
150 result = KERN_ABORTED;
151
152 act_lock(act);
153 }
154
155 clear_wait(thread, act->inited? THREAD_INTERRUPTED: THREAD_AWAKENED);
156 act_unlock_thread(act);
157
158 return (result);
159 }
160
161 /*
162 * Terminate a thread.
163 */
164 kern_return_t
165 thread_terminate(
166 register thread_act_t act)
167 {
168 kern_return_t result;
169
170 if (act == THR_ACT_NULL)
171 return (KERN_INVALID_ARGUMENT);
172
173 if ( (act->task == kernel_task ||
174 act->kernel_loaded ) &&
175 act != current_act() )
176 return (KERN_FAILURE);
177
178 result = thread_terminate_internal(act);
179
180 /*
181 * If a kernel thread is terminating itself, force an AST here.
182 * Kernel threads don't normally pass through the AST checking
183 * code - and all threads finish their own termination in the
184 * special handler APC.
185 */
186 if ( act->task == kernel_task ||
187 act->kernel_loaded ) {
188 assert(act == current_act());
189 ast_taken(AST_APC, FALSE);
190 panic("thread_terminate");
191 }
192
193 return (result);
194 }
195
196 /*
197 * Suspend execution of the specified thread.
198 * This is a recursive-style suspension of the thread, a count of
199 * suspends is maintained.
200 *
201 * Called with act_lock held.
202 */
203 void
204 thread_hold(
205 register thread_act_t act)
206 {
207 thread_t thread = act->thread;
208
209 if (act->suspend_count++ == 0) {
210 install_special_handler(act);
211 if ( act->inited &&
212 thread != THREAD_NULL &&
213 thread->top_act == act )
214 thread_wakeup_one(&act->suspend_count);
215 }
216 }
217
218 /*
219 * Decrement internal suspension count for thr_act, setting thread
220 * runnable when count falls to zero.
221 *
222 * Called with act_lock held.
223 */
224 void
225 thread_release(
226 register thread_act_t act)
227 {
228 thread_t thread = act->thread;
229
230 if ( act->suspend_count > 0 &&
231 --act->suspend_count == 0 &&
232 thread != THREAD_NULL &&
233 thread->top_act == act ) {
234 if (!act->inited) {
235 clear_wait(thread, THREAD_AWAKENED);
236 act->inited = TRUE;
237 }
238 else
239 thread_wakeup_one(&act->suspend_count);
240 }
241 }
242
243 kern_return_t
244 thread_suspend(
245 register thread_act_t act)
246 {
247 thread_t thread;
248
249 if (act == THR_ACT_NULL)
250 return (KERN_INVALID_ARGUMENT);
251
252 thread = act_lock_thread(act);
253
254 if (!act->active) {
255 act_unlock_thread(act);
256 return (KERN_TERMINATED);
257 }
258
259 if ( act->user_stop_count++ == 0 &&
260 act->suspend_count++ == 0 ) {
261 install_special_handler(act);
262 if ( thread != current_thread() &&
263 thread != THREAD_NULL &&
264 thread->top_act == act ) {
265 assert(act->inited);
266 thread_wakeup_one(&act->suspend_count);
267 act_unlock_thread(act);
268
269 thread_wait(thread);
270 }
271 else
272 act_unlock_thread(act);
273 }
274 else
275 act_unlock_thread(act);
276
277 return (KERN_SUCCESS);
278 }
279
280 kern_return_t
281 thread_resume(
282 register thread_act_t act)
283 {
284 kern_return_t result = KERN_SUCCESS;
285 thread_t thread;
286
287 if (act == THR_ACT_NULL)
288 return (KERN_INVALID_ARGUMENT);
289
290 thread = act_lock_thread(act);
291
292 if (act->active) {
293 if (act->user_stop_count > 0) {
294 if ( --act->user_stop_count == 0 &&
295 --act->suspend_count == 0 &&
296 thread != THREAD_NULL &&
297 thread->top_act == act ) {
298 if (!act->inited) {
299 clear_wait(thread, THREAD_AWAKENED);
300 act->inited = TRUE;
301 }
302 else
303 thread_wakeup_one(&act->suspend_count);
304 }
305 }
306 else
307 result = KERN_FAILURE;
308 }
309 else
310 result = KERN_TERMINATED;
311
312 act_unlock_thread(act);
313
314 return (result);
315 }
316
317 /*
318 * This routine walks toward the head of an RPC chain starting at
319 * a specified thread activation. An alert bit is set and a special
320 * handler is installed for each thread it encounters.
321 *
322 * The target thread act and thread shuttle are already locked.
323 */
324 kern_return_t
325 post_alert(
326 register thread_act_t act,
327 unsigned alert_bits)
328 {
329 panic("post_alert");
330 }
331
332 /*
333 * thread_depress_abort:
334 *
335 * Prematurely abort priority depression if there is one.
336 */
337 kern_return_t
338 thread_depress_abort(
339 register thread_act_t thr_act)
340 {
341 register thread_t thread;
342 kern_return_t result;
343
344 if (thr_act == THR_ACT_NULL)
345 return (KERN_INVALID_ARGUMENT);
346
347 thread = act_lock_thread(thr_act);
348 /* if activation is terminating, this operation is not meaningful */
349 if (!thr_act->active) {
350 act_unlock_thread(thr_act);
351
352 return (KERN_TERMINATED);
353 }
354
355 result = _mk_sp_thread_depress_abort(thread, FALSE);
356
357 act_unlock_thread(thr_act);
358
359 return (result);
360 }
361
362
363 /*
364 * Indicate that the activation should run its
365 * special handler to detect the condition.
366 *
367 * Called with act_lock held.
368 */
369 kern_return_t
370 act_abort(
371 thread_act_t act,
372 boolean_t chain_break )
373 {
374 thread_t thread = act->thread;
375 spl_t s = splsched();
376
377 assert(thread->top_act == act);
378
379 thread_lock(thread);
380 if (!(thread->state & TH_ABORT)) {
381 thread->state |= TH_ABORT;
382 install_special_handler_locked(act);
383 } else {
384 thread->state &= ~TH_ABORT_SAFELY;
385 }
386 thread_unlock(thread);
387 splx(s);
388
389 return (KERN_SUCCESS);
390 }
391
392 kern_return_t
393 thread_abort(
394 register thread_act_t act)
395 {
396 kern_return_t result;
397 thread_t thread;
398
399 if (act == THR_ACT_NULL)
400 return (KERN_INVALID_ARGUMENT);
401
402 thread = act_lock_thread(act);
403
404 if (!act->active) {
405 act_unlock_thread(act);
406 return (KERN_TERMINATED);
407 }
408
409 result = act_abort(act, FALSE);
410 clear_wait(thread, THREAD_INTERRUPTED);
411 act_unlock_thread(act);
412
413 return (result);
414 }
415
416 kern_return_t
417 thread_abort_safely(
418 thread_act_t act)
419 {
420 thread_t thread;
421 kern_return_t ret;
422 spl_t s;
423
424 if ( act == THR_ACT_NULL )
425 return (KERN_INVALID_ARGUMENT);
426
427 thread = act_lock_thread(act);
428
429 if (!act->active) {
430 act_unlock_thread(act);
431 return (KERN_TERMINATED);
432 }
433
434 s = splsched();
435 thread_lock(thread);
436 if (!thread->at_safe_point ||
437 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
438 if (!(thread->state & TH_ABORT)) {
439 thread->state |= (TH_ABORT|TH_ABORT_SAFELY);
440 install_special_handler_locked(act);
441 }
442 }
443 thread_unlock(thread);
444 splx(s);
445
446 act_unlock_thread(act);
447
448 return (KERN_SUCCESS);
449 }
450
451 /*** backward compatibility hacks ***/
452 #include <mach/thread_info.h>
453 #include <mach/thread_special_ports.h>
454 #include <ipc/ipc_port.h>
455 #include <mach/thread_act_server.h>
456
457 kern_return_t
458 thread_info(
459 thread_act_t thr_act,
460 thread_flavor_t flavor,
461 thread_info_t thread_info_out,
462 mach_msg_type_number_t *thread_info_count)
463 {
464 register thread_t thread;
465 kern_return_t result;
466
467 if (thr_act == THR_ACT_NULL)
468 return (KERN_INVALID_ARGUMENT);
469
470 thread = act_lock_thread(thr_act);
471 if (!thr_act->active) {
472 act_unlock_thread(thr_act);
473
474 return (KERN_TERMINATED);
475 }
476
477 result = thread_info_shuttle(thr_act, flavor,
478 thread_info_out, thread_info_count);
479
480 act_unlock_thread(thr_act);
481
482 return (result);
483 }
484
485 /*
486 * Routine: thread_get_special_port [kernel call]
487 * Purpose:
488 * Clones a send right for one of the thread's
489 * special ports.
490 * Conditions:
491 * Nothing locked.
492 * Returns:
493 * KERN_SUCCESS Extracted a send right.
494 * KERN_INVALID_ARGUMENT The thread is null.
495 * KERN_FAILURE The thread is dead.
496 * KERN_INVALID_ARGUMENT Invalid special port.
497 */
498
499 kern_return_t
500 thread_get_special_port(
501 thread_act_t thr_act,
502 int which,
503 ipc_port_t *portp)
504 {
505 ipc_port_t *whichp;
506 ipc_port_t port;
507 thread_t thread;
508
509 #if MACH_ASSERT
510 if (watchacts & WA_PORT)
511 printf("thread_get_special_port(thr_act=%x, which=%x port@%x=%x\n",
512 thr_act, which, portp, (portp ? *portp : 0));
513 #endif /* MACH_ASSERT */
514
515 if (!thr_act)
516 return KERN_INVALID_ARGUMENT;
517 thread = act_lock_thread(thr_act);
518 switch (which) {
519 case THREAD_KERNEL_PORT:
520 whichp = &thr_act->ith_sself;
521 break;
522
523 default:
524 act_unlock_thread(thr_act);
525 return KERN_INVALID_ARGUMENT;
526 }
527
528 if (!thr_act->active) {
529 act_unlock_thread(thr_act);
530 return KERN_FAILURE;
531 }
532
533 port = ipc_port_copy_send(*whichp);
534 act_unlock_thread(thr_act);
535
536 *portp = port;
537 return KERN_SUCCESS;
538 }
539
540 /*
541 * Routine: thread_set_special_port [kernel call]
542 * Purpose:
543 * Changes one of the thread's special ports,
544 * setting it to the supplied send right.
545 * Conditions:
546 * Nothing locked. If successful, consumes
547 * the supplied send right.
548 * Returns:
549 * KERN_SUCCESS Changed the special port.
550 * KERN_INVALID_ARGUMENT The thread is null.
551 * KERN_FAILURE The thread is dead.
552 * KERN_INVALID_ARGUMENT Invalid special port.
553 */
554
555 kern_return_t
556 thread_set_special_port(
557 thread_act_t thr_act,
558 int which,
559 ipc_port_t port)
560 {
561 ipc_port_t *whichp;
562 ipc_port_t old;
563 thread_t thread;
564
565 #if MACH_ASSERT
566 if (watchacts & WA_PORT)
567 printf("thread_set_special_port(thr_act=%x,which=%x,port=%x\n",
568 thr_act, which, port);
569 #endif /* MACH_ASSERT */
570
571 if (thr_act == 0)
572 return KERN_INVALID_ARGUMENT;
573
574 thread = act_lock_thread(thr_act);
575 switch (which) {
576 case THREAD_KERNEL_PORT:
577 whichp = &thr_act->ith_self;
578 break;
579
580 default:
581 act_unlock_thread(thr_act);
582 return KERN_INVALID_ARGUMENT;
583 }
584
585 if (!thr_act->active) {
586 act_unlock_thread(thr_act);
587 return KERN_FAILURE;
588 }
589
590 old = *whichp;
591 *whichp = port;
592 act_unlock_thread(thr_act);
593
594 if (IP_VALID(old))
595 ipc_port_release_send(old);
596 return KERN_SUCCESS;
597 }
598
599 /*
600 * thread state should always be accessible by locking the thread
601 * and copying it. The activation messes things up so for right
602 * now if it's not the top of the chain, use a special handler to
603 * get the information when the shuttle returns to the activation.
604 */
605 kern_return_t
606 thread_get_state(
607 register thread_act_t act,
608 int flavor,
609 thread_state_t state, /* pointer to OUT array */
610 mach_msg_type_number_t *state_count) /*IN/OUT*/
611 {
612 kern_return_t result = KERN_SUCCESS;
613 thread_t thread;
614
615 if (act == THR_ACT_NULL || act == current_act())
616 return (KERN_INVALID_ARGUMENT);
617
618 thread = act_lock_thread(act);
619
620 if (!act->active) {
621 act_unlock_thread(act);
622 return (KERN_TERMINATED);
623 }
624
625 thread_hold(act);
626
627 for (;;) {
628 thread_t thread1;
629
630 if ( thread == THREAD_NULL ||
631 thread->top_act != act )
632 break;
633 act_unlock_thread(act);
634
635 if (!thread_stop(thread)) {
636 result = KERN_ABORTED;
637 (void)act_lock_thread(act);
638 thread = THREAD_NULL;
639 break;
640 }
641
642 thread1 = act_lock_thread(act);
643 if (thread1 == thread)
644 break;
645
646 thread_unstop(thread);
647 thread = thread1;
648 }
649
650 if (result == KERN_SUCCESS)
651 result = act_machine_get_state(act, flavor, state, state_count);
652
653 if ( thread != THREAD_NULL &&
654 thread->top_act == act )
655 thread_unstop(thread);
656
657 thread_release(act);
658 act_unlock_thread(act);
659
660 return (result);
661 }
662
663 /*
664 * Change thread's machine-dependent state. Called with nothing
665 * locked. Returns same way.
666 */
667 kern_return_t
668 thread_set_state(
669 register thread_act_t act,
670 int flavor,
671 thread_state_t state,
672 mach_msg_type_number_t state_count)
673 {
674 kern_return_t result = KERN_SUCCESS;
675 thread_t thread;
676
677 if (act == THR_ACT_NULL || act == current_act())
678 return (KERN_INVALID_ARGUMENT);
679
680 thread = act_lock_thread(act);
681
682 if (!act->active) {
683 act_unlock_thread(act);
684 return (KERN_TERMINATED);
685 }
686
687 thread_hold(act);
688
689 for (;;) {
690 thread_t thread1;
691
692 if ( thread == THREAD_NULL ||
693 thread->top_act != act )
694 break;
695 act_unlock_thread(act);
696
697 if (!thread_stop(thread)) {
698 result = KERN_ABORTED;
699 (void)act_lock_thread(act);
700 thread = THREAD_NULL;
701 break;
702 }
703
704 thread1 = act_lock_thread(act);
705 if (thread1 == thread)
706 break;
707
708 thread_unstop(thread);
709 thread = thread1;
710 }
711
712 if (result == KERN_SUCCESS)
713 result = act_machine_set_state(act, flavor, state, state_count);
714
715 if ( thread != THREAD_NULL &&
716 thread->top_act == act )
717 thread_unstop(thread);
718
719 thread_release(act);
720 act_unlock_thread(act);
721
722 return (result);
723 }
724
725 /*
726 * Kernel-internal "thread" interfaces used outside this file:
727 */
728
729 kern_return_t
730 thread_dup(
731 register thread_act_t target)
732 {
733 kern_return_t result = KERN_SUCCESS;
734 thread_act_t self = current_act();
735 thread_t thread;
736
737 if (target == THR_ACT_NULL || target == self)
738 return (KERN_INVALID_ARGUMENT);
739
740 thread = act_lock_thread(target);
741
742 if (!target->active) {
743 act_unlock_thread(target);
744 return (KERN_TERMINATED);
745 }
746
747 thread_hold(target);
748
749 for (;;) {
750 thread_t thread1;
751
752 if ( thread == THREAD_NULL ||
753 thread->top_act != target )
754 break;
755 act_unlock_thread(target);
756
757 if (!thread_stop(thread)) {
758 result = KERN_ABORTED;
759 (void)act_lock_thread(target);
760 thread = THREAD_NULL;
761 break;
762 }
763
764 thread1 = act_lock_thread(target);
765 if (thread1 == thread)
766 break;
767
768 thread_unstop(thread);
769 thread = thread1;
770 }
771
772 if (result == KERN_SUCCESS)
773 result = act_thread_dup(self, target);
774
775 if ( thread != THREAD_NULL &&
776 thread->top_act == target )
777 thread_unstop(thread);
778
779 thread_release(target);
780 act_unlock_thread(target);
781
782 return (result);
783 }
784
785
786 /*
787 * thread_setstatus:
788 *
789 * Set the status of the specified thread.
790 * Called with (and returns with) no locks held.
791 */
792 kern_return_t
793 thread_setstatus(
794 register thread_act_t act,
795 int flavor,
796 thread_state_t tstate,
797 mach_msg_type_number_t count)
798 {
799 kern_return_t result = KERN_SUCCESS;
800 thread_t thread;
801
802 thread = act_lock_thread(act);
803
804 if ( act != current_act() &&
805 (act->suspend_count == 0 ||
806 thread == THREAD_NULL ||
807 (thread->state & TH_RUN) ||
808 thread->top_act != act) )
809 result = KERN_FAILURE;
810
811 if (result == KERN_SUCCESS)
812 result = act_machine_set_state(act, flavor, tstate, count);
813
814 act_unlock_thread(act);
815
816 return (result);
817 }
818
819 /*
820 * thread_getstatus:
821 *
822 * Get the status of the specified thread.
823 */
824 kern_return_t
825 thread_getstatus(
826 register thread_act_t act,
827 int flavor,
828 thread_state_t tstate,
829 mach_msg_type_number_t *count)
830 {
831 kern_return_t result = KERN_SUCCESS;
832 thread_t thread;
833
834 thread = act_lock_thread(act);
835
836 if ( act != current_act() &&
837 (act->suspend_count == 0 ||
838 thread == THREAD_NULL ||
839 (thread->state & TH_RUN) ||
840 thread->top_act != act) )
841 result = KERN_FAILURE;
842
843 if (result == KERN_SUCCESS)
844 result = act_machine_get_state(act, flavor, tstate, count);
845
846 act_unlock_thread(act);
847
848 return (result);
849 }
850
851 /*
852 * Kernel-internal thread_activation interfaces used outside this file:
853 */
854
855 /*
856 * act_init() - Initialize activation handling code
857 */
858 void
859 act_init()
860 {
861 thr_act_zone = zinit(
862 sizeof(struct thread_activation),
863 ACT_MAX * sizeof(struct thread_activation), /* XXX */
864 ACT_CHUNK * sizeof(struct thread_activation),
865 "activations");
866 first_act = TRUE;
867 act_machine_init();
868 }
869
870
871 /*
872 * act_create - Create a new activation in a specific task.
873 */
874 kern_return_t
875 act_create(task_t task,
876 thread_act_t *new_act)
877 {
878 thread_act_t thr_act;
879 int rc;
880 vm_map_t map;
881
882 if (first_act) {
883 thr_act = &pageout_act;
884 first_act = FALSE;
885 } else
886 thr_act = (thread_act_t)zalloc(thr_act_zone);
887 if (thr_act == 0)
888 return(KERN_RESOURCE_SHORTAGE);
889
890 #if MACH_ASSERT
891 if (watchacts & WA_ACT_LNK)
892 printf("act_create(task=%x,thr_act@%x=%x)\n",
893 task, new_act, thr_act);
894 #endif /* MACH_ASSERT */
895
896 /* Start by zeroing everything; then init non-zero items only */
897 bzero((char *)thr_act, sizeof(*thr_act));
898
899 if (thr_act == &pageout_act)
900 thr_act->thread = &pageout_thread;
901
902 #ifdef MACH_BSD
903 {
904 /*
905 * Take care of the uthread allocation
906 * do it early in order to make KERN_RESOURCE_SHORTAGE
907 * handling trivial
908 * uthread_alloc() will bzero the storage allocated.
909 */
910 extern void *uthread_alloc(task_t, thread_act_t);
911
912 thr_act->uthread = uthread_alloc(task, thr_act);
913 if(thr_act->uthread == 0) {
914 /* Put the thr_act back on the thr_act zone */
915 zfree(thr_act_zone, (vm_offset_t)thr_act);
916 return(KERN_RESOURCE_SHORTAGE);
917 }
918 }
919 #endif /* MACH_BSD */
920
921 /*
922 * Start with one reference for the caller and one for the
923 * act being alive.
924 */
925 act_lock_init(thr_act);
926 thr_act->ref_count = 2;
927
928 /* Latch onto the task. */
929 thr_act->task = task;
930 task_reference(task);
931
932 /* special_handler will always be last on the returnhandlers list. */
933 thr_act->special_handler.next = 0;
934 thr_act->special_handler.handler = special_handler;
935
936 #if MACH_PROF
937 thr_act->act_profiled = FALSE;
938 thr_act->act_profiled_own = FALSE;
939 thr_act->profil_buffer = NULLPROFDATA;
940 #endif
941
942 /* Initialize the held_ulocks queue as empty */
943 queue_init(&thr_act->held_ulocks);
944
945 /* Inherit the profiling status of the parent task */
946 act_prof_init(thr_act, task);
947
948 ipc_thr_act_init(task, thr_act);
949 act_machine_create(task, thr_act);
950
951 /*
952 * If thr_act created in kernel-loaded task, alter its saved
953 * state to so indicate
954 */
955 if (task->kernel_loaded) {
956 act_user_to_kernel(thr_act);
957 }
958
959 /* Cache the task's map and take a reference to it */
960 map = task->map;
961 thr_act->map = map;
962
963 /* Inline vm_map_reference cause we don't want to increment res_count */
964 mutex_lock(&map->s_lock);
965 map->ref_count++;
966 mutex_unlock(&map->s_lock);
967
968 *new_act = thr_act;
969 return KERN_SUCCESS;
970 }
971
972 /*
973 * act_free - called when an thr_act's ref_count drops to zero.
974 *
975 * This can only happen after the activation has been reaped, and
976 * all other references to it have gone away. We can now release
977 * the last critical resources, unlink the activation from the
978 * task, and release the reference on the thread shuttle itself.
979 *
980 * Called with activation locked.
981 */
982 #if MACH_ASSERT
983 int dangerous_bzero = 1; /* paranoia & safety */
984 #endif
985
986 void
987 act_free(thread_act_t thr_act)
988 {
989 task_t task;
990 thread_t thr;
991 vm_map_t map;
992 unsigned int ref;
993 void * task_proc;
994
995 #if MACH_ASSERT
996 if (watchacts & WA_EXIT)
997 printf("act_free(%x(%d)) thr=%x tsk=%x(%d) %sactive\n",
998 thr_act, thr_act->ref_count, thr_act->thread,
999 thr_act->task,
1000 thr_act->task ? thr_act->task->ref_count : 0,
1001 thr_act->active ? " " : " !");
1002 #endif /* MACH_ASSERT */
1003
1004 assert(!thr_act->active);
1005
1006 task = thr_act->task;
1007 task_lock(task);
1008
1009 task_proc = task->bsd_info;
1010 if (thr = thr_act->thread) {
1011 time_value_t user_time, system_time;
1012
1013 thread_read_times(thr, &user_time, &system_time);
1014 time_value_add(&task->total_user_time, &user_time);
1015 time_value_add(&task->total_system_time, &system_time);
1016
1017 /* Unlink the thr_act from the task's thr_act list,
1018 * so it doesn't appear in calls to task_threads and such.
1019 * The thr_act still keeps its ref on the task, however.
1020 */
1021 queue_remove(&task->thr_acts, thr_act, thread_act_t, thr_acts);
1022 thr_act->thr_acts.next = NULL;
1023 task->thr_act_count--;
1024 task->res_act_count--;
1025 task_unlock(task);
1026 task_deallocate(task);
1027 thread_deallocate(thr);
1028 act_machine_destroy(thr_act);
1029 } else {
1030 /*
1031 * Must have never really gotten started
1032 * no unlinking from the task and no need
1033 * to free the shuttle.
1034 */
1035 task_unlock(task);
1036 task_deallocate(task);
1037 }
1038
1039 act_prof_deallocate(thr_act);
1040 ipc_thr_act_terminate(thr_act);
1041
1042 /*
1043 * Drop the cached map reference.
1044 * Inline version of vm_map_deallocate() because we
1045 * don't want to decrement the map's residence count here.
1046 */
1047 map = thr_act->map;
1048 mutex_lock(&map->s_lock);
1049 ref = --map->ref_count;
1050 mutex_unlock(&map->s_lock);
1051 if (ref == 0)
1052 vm_map_destroy(map);
1053
1054 #ifdef MACH_BSD
1055 {
1056 /*
1057 * Free uthread BEFORE the bzero.
1058 * Not doing so will result in a leak.
1059 */
1060 extern void uthread_free(task_t, void *, void *);
1061
1062 void *ut = thr_act->uthread;
1063 thr_act->uthread = 0;
1064 uthread_free(task, ut, task_proc);
1065 }
1066 #endif /* MACH_BSD */
1067
1068 #if MACH_ASSERT
1069 if (dangerous_bzero) /* dangerous if we're still using it! */
1070 bzero((char *)thr_act, sizeof(*thr_act));
1071 #endif /* MACH_ASSERT */
1072 /* Put the thr_act back on the thr_act zone */
1073 zfree(thr_act_zone, (vm_offset_t)thr_act);
1074 }
1075
1076
1077 /*
1078 * act_attach - Attach an thr_act to the top of a thread ("push the stack").
1079 *
1080 * The thread_shuttle must be either the current one or a brand-new one.
1081 * Assumes the thr_act is active but not in use.
1082 *
1083 * Already locked: thr_act plus "appropriate" thread-related locks
1084 * (see act_lock_thread()).
1085 */
1086 void
1087 act_attach(
1088 thread_act_t thr_act,
1089 thread_t thread,
1090 unsigned init_alert_mask)
1091 {
1092 thread_act_t lower;
1093
1094 #if MACH_ASSERT
1095 assert(thread == current_thread() || thread->top_act == THR_ACT_NULL);
1096 if (watchacts & WA_ACT_LNK)
1097 printf("act_attach(thr_act %x(%d) thread %x(%d) mask %d)\n",
1098 thr_act, thr_act->ref_count, thread, thread->ref_count,
1099 init_alert_mask);
1100 #endif /* MACH_ASSERT */
1101
1102 /*
1103 * Chain the thr_act onto the thread's thr_act stack.
1104 * Set mask and auto-propagate alerts from below.
1105 */
1106 thr_act->ref_count++;
1107 thr_act->thread = thread;
1108 thr_act->higher = THR_ACT_NULL; /*safety*/
1109 thr_act->alerts = 0;
1110 thr_act->alert_mask = init_alert_mask;
1111 lower = thr_act->lower = thread->top_act;
1112
1113 if (lower != THR_ACT_NULL) {
1114 lower->higher = thr_act;
1115 thr_act->alerts = (lower->alerts & init_alert_mask);
1116 }
1117
1118 thread->top_act = thr_act;
1119 }
1120
1121 /*
1122 * act_detach
1123 *
1124 * Remove the current thr_act from the top of the current thread, i.e.
1125 * "pop the stack". Assumes already locked: thr_act plus "appropriate"
1126 * thread-related locks (see act_lock_thread).
1127 */
1128 void
1129 act_detach(
1130 thread_act_t cur_act)
1131 {
1132 thread_t cur_thread = cur_act->thread;
1133
1134 #if MACH_ASSERT
1135 if (watchacts & (WA_EXIT|WA_ACT_LNK))
1136 printf("act_detach: thr_act %x(%d), thrd %x(%d) task=%x(%d)\n",
1137 cur_act, cur_act->ref_count,
1138 cur_thread, cur_thread->ref_count,
1139 cur_act->task,
1140 cur_act->task ? cur_act->task->ref_count : 0);
1141 #endif /* MACH_ASSERT */
1142
1143 /* Unlink the thr_act from the thread's thr_act stack */
1144 cur_thread->top_act = cur_act->lower;
1145 cur_act->thread = 0;
1146 cur_act->ref_count--;
1147 assert(cur_act->ref_count > 0);
1148
1149 #if MACH_ASSERT
1150 cur_act->lower = cur_act->higher = THR_ACT_NULL;
1151 if (cur_thread->top_act)
1152 cur_thread->top_act->higher = THR_ACT_NULL;
1153 #endif /* MACH_ASSERT */
1154
1155 return;
1156 }
1157
1158
1159 /*
1160 * Synchronize a thread operation with migration.
1161 * Called with nothing locked.
1162 * Returns with thr_act locked.
1163 */
1164 thread_t
1165 act_lock_thread(
1166 thread_act_t thr_act)
1167 {
1168
1169 /*
1170 * JMM - We have moved away from explicit RPC locks
1171 * and towards a generic migration approach. The wait
1172 * queue lock will be the point of synchronization for
1173 * the shuttle linkage when this is rolled out. Until
1174 * then, just lock the act.
1175 */
1176 act_lock(thr_act);
1177 return (thr_act->thread);
1178 }
1179
1180 /*
1181 * Unsynchronize with migration (i.e., undo an act_lock_thread() call).
1182 * Called with thr_act locked, plus thread locks held that are
1183 * "correct" for thr_act's state. Returns with nothing locked.
1184 */
1185 void
1186 act_unlock_thread(thread_act_t thr_act)
1187 {
1188 act_unlock(thr_act);
1189 }
1190
1191 /*
1192 * Synchronize with migration given a pointer to a shuttle (instead of an
1193 * activation). Called with nothing locked; returns with all
1194 * "appropriate" thread-related locks held (see act_lock_thread()).
1195 */
1196 thread_act_t
1197 thread_lock_act(
1198 thread_t thread)
1199 {
1200 thread_act_t thr_act;
1201
1202 while (1) {
1203 thr_act = thread->top_act;
1204 if (!thr_act)
1205 break;
1206 if (!act_lock_try(thr_act)) {
1207 mutex_pause();
1208 continue;
1209 }
1210 break;
1211 }
1212 return (thr_act);
1213 }
1214
1215 /*
1216 * Unsynchronize with an activation starting from a pointer to
1217 * a shuttle.
1218 */
1219 void
1220 thread_unlock_act(
1221 thread_t thread)
1222 {
1223 thread_act_t thr_act;
1224
1225 if (thr_act = thread->top_act) {
1226 act_unlock(thr_act);
1227 }
1228 }
1229
1230 /*
1231 * switch_act
1232 *
1233 * If a new activation is given, switch to it. If not,
1234 * switch to the lower activation (pop). Returns the old
1235 * activation. This is for migration support.
1236 */
1237 thread_act_t
1238 switch_act(
1239 thread_act_t act)
1240 {
1241 thread_t thread;
1242 thread_act_t old, new;
1243 unsigned cpu;
1244 spl_t spl;
1245
1246
1247 disable_preemption();
1248
1249 cpu = cpu_number();
1250 thread = current_thread();
1251
1252 /*
1253 * Find the old and new activation for switch.
1254 */
1255 old = thread->top_act;
1256
1257 if (act) {
1258 new = act;
1259 new->thread = thread;
1260 }
1261 else {
1262 new = old->lower;
1263 }
1264
1265 assert(new != THR_ACT_NULL);
1266 assert(cpu_to_processor(cpu)->cpu_data->active_thread == thread);
1267 active_kloaded[cpu] = (new->kernel_loaded) ? new : 0;
1268
1269 /* This is where all the work happens */
1270 machine_switch_act(thread, old, new, cpu);
1271
1272 /*
1273 * Push or pop an activation on the chain.
1274 */
1275 if (act) {
1276 act_attach(new, thread, 0);
1277 }
1278 else {
1279 act_detach(old);
1280 }
1281
1282 enable_preemption();
1283
1284 return(old);
1285 }
1286
1287 /*
1288 * install_special_handler
1289 * Install the special returnhandler that handles suspension and
1290 * termination, if it hasn't been installed already.
1291 *
1292 * Already locked: RPC-related locks for thr_act, but not
1293 * scheduling lock (thread_lock()) of the associated thread.
1294 */
1295 void
1296 install_special_handler(
1297 thread_act_t thr_act)
1298 {
1299 spl_t spl;
1300 thread_t thread = thr_act->thread;
1301
1302 #if MACH_ASSERT
1303 if (watchacts & WA_ACT_HDLR)
1304 printf("act_%x: install_special_hdlr(%x)\n",current_act(),thr_act);
1305 #endif /* MACH_ASSERT */
1306
1307 spl = splsched();
1308 thread_lock(thread);
1309 install_special_handler_locked(thr_act);
1310 thread_unlock(thread);
1311 splx(spl);
1312 }
1313
1314 /*
1315 * install_special_handler_locked
1316 * Do the work of installing the special_handler.
1317 *
1318 * Already locked: RPC-related locks for thr_act, plus the
1319 * scheduling lock (thread_lock()) of the associated thread.
1320 */
1321 void
1322 install_special_handler_locked(
1323 thread_act_t act)
1324 {
1325 thread_t thread = act->thread;
1326 ReturnHandler **rh;
1327
1328 /* The work handler must always be the last ReturnHandler on the list,
1329 because it can do tricky things like detach the thr_act. */
1330 for (rh = &act->handlers; *rh; rh = &(*rh)->next)
1331 continue;
1332 if (rh != &act->special_handler.next)
1333 *rh = &act->special_handler;
1334
1335 if (act == thread->top_act) {
1336 /*
1337 * Temporarily undepress, so target has
1338 * a chance to do locking required to
1339 * block itself in special_handler().
1340 */
1341 if (thread->sched_mode & TH_MODE_ISDEPRESSED)
1342 compute_priority(thread, TRUE);
1343 }
1344
1345 thread_ast_set(act, AST_APC);
1346 if (act == current_act())
1347 ast_propagate(act->ast);
1348 else {
1349 processor_t processor = thread->last_processor;
1350
1351 if ( processor != PROCESSOR_NULL &&
1352 processor->state == PROCESSOR_RUNNING &&
1353 processor->cpu_data->active_thread == thread )
1354 cause_ast_check(processor);
1355 }
1356 }
1357
1358 kern_return_t
1359 thread_apc_set(
1360 thread_act_t act,
1361 thread_apc_handler_t apc)
1362 {
1363 extern thread_apc_handler_t bsd_ast;
1364
1365 assert(apc == bsd_ast);
1366 return (KERN_FAILURE);
1367 }
1368
1369 kern_return_t
1370 thread_apc_clear(
1371 thread_act_t act,
1372 thread_apc_handler_t apc)
1373 {
1374 extern thread_apc_handler_t bsd_ast;
1375
1376 assert(apc == bsd_ast);
1377 return (KERN_FAILURE);
1378 }
1379
1380 /*
1381 * Activation control support routines internal to this file:
1382 */
1383
1384 /*
1385 * act_execute_returnhandlers() - does just what the name says
1386 *
1387 * This is called by system-dependent code when it detects that
1388 * thr_act->handlers is non-null while returning into user mode.
1389 */
1390 void
1391 act_execute_returnhandlers(void)
1392 {
1393 thread_act_t act = current_act();
1394
1395 #if MACH_ASSERT
1396 if (watchacts & WA_ACT_HDLR)
1397 printf("execute_rtn_hdlrs: act=%x\n", act);
1398 #endif /* MACH_ASSERT */
1399
1400 thread_ast_clear(act, AST_APC);
1401 spllo();
1402
1403 for (;;) {
1404 ReturnHandler *rh;
1405 thread_t thread = act_lock_thread(act);
1406
1407 (void)splsched();
1408 thread_lock(thread);
1409 rh = act->handlers;
1410 if (!rh) {
1411 thread_unlock(thread);
1412 spllo();
1413 act_unlock_thread(act);
1414 return;
1415 }
1416 act->handlers = rh->next;
1417 thread_unlock(thread);
1418 spllo();
1419 act_unlock_thread(act);
1420
1421 #if MACH_ASSERT
1422 if (watchacts & WA_ACT_HDLR)
1423 printf( (rh == &act->special_handler) ?
1424 "\tspecial_handler\n" : "\thandler=%x\n", rh->handler);
1425 #endif /* MACH_ASSERT */
1426
1427 /* Execute it */
1428 (*rh->handler)(rh, act);
1429 }
1430 }
1431
1432 /*
1433 * special_handler_continue
1434 *
1435 * Continuation routine for the special handler blocks. It checks
1436 * to see whether there has been any new suspensions. If so, it
1437 * installs the special handler again. Otherwise, it checks to see
1438 * if the current depression needs to be re-instated (it may have
1439 * been temporarily removed in order to get to this point in a hurry).
1440 */
1441 void
1442 special_handler_continue(void)
1443 {
1444 thread_act_t self = current_act();
1445
1446 if (self->suspend_count > 0)
1447 install_special_handler(self);
1448 else {
1449 thread_t thread = self->thread;
1450 spl_t s = splsched();
1451
1452 thread_lock(thread);
1453 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
1454 processor_t myprocessor = thread->last_processor;
1455
1456 thread->sched_pri = DEPRESSPRI;
1457 myprocessor->current_pri = thread->sched_pri;
1458 thread->sched_mode &= ~TH_MODE_PREEMPT;
1459 }
1460 thread_unlock(thread);
1461 splx(s);
1462 }
1463
1464 thread_exception_return();
1465 /*NOTREACHED*/
1466 }
1467
1468 /*
1469 * special_handler - handles suspension, termination. Called
1470 * with nothing locked. Returns (if it returns) the same way.
1471 */
1472 void
1473 special_handler(
1474 ReturnHandler *rh,
1475 thread_act_t self)
1476 {
1477 thread_t thread = act_lock_thread(self);
1478 spl_t s;
1479
1480 assert(thread != THREAD_NULL);
1481
1482 s = splsched();
1483 thread_lock(thread);
1484 thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY); /* clear any aborts */
1485 thread_unlock(thread);
1486 splx(s);
1487
1488 /*
1489 * If someone has killed this invocation,
1490 * invoke the return path with a terminated exception.
1491 */
1492 if (!self->active) {
1493 act_unlock_thread(self);
1494 act_machine_return(KERN_TERMINATED);
1495 }
1496
1497 /*
1498 * If we're suspended, go to sleep and wait for someone to wake us up.
1499 */
1500 if (self->suspend_count > 0) {
1501 if (self->handlers == NULL) {
1502 assert_wait(&self->suspend_count, THREAD_ABORTSAFE);
1503 act_unlock_thread(self);
1504 thread_block(special_handler_continue);
1505 /* NOTREACHED */
1506 }
1507
1508 act_unlock_thread(self);
1509
1510 special_handler_continue();
1511 /*NOTREACHED*/
1512 }
1513
1514 act_unlock_thread(self);
1515 }
1516
1517 /*
1518 * Update activation that belongs to a task created via kernel_task_create().
1519 */
1520 void
1521 act_user_to_kernel(
1522 thread_act_t thr_act)
1523 {
1524 pcb_user_to_kernel(thr_act);
1525 thr_act->kernel_loading = TRUE;
1526 }
1527
1528 /*
1529 * Already locked: activation (shuttle frozen within)
1530 *
1531 * Mark an activation inactive, and prepare it to terminate
1532 * itself.
1533 */
1534 static void
1535 act_disable(
1536 thread_act_t thr_act)
1537 {
1538
1539 #if MACH_ASSERT
1540 if (watchacts & WA_EXIT) {
1541 printf("act_%x: act_disable_tl(thr_act=%x(%d))%sactive",
1542 current_act(), thr_act, thr_act->ref_count,
1543 (thr_act->active ? " " : " !"));
1544 printf("\n");
1545 (void) dump_act(thr_act);
1546 }
1547 #endif /* MACH_ASSERT */
1548
1549 thr_act->active = 0;
1550
1551 /* Drop the thr_act reference taken for being active.
1552 * (There is still at least one reference left:
1553 * the one we were passed.)
1554 * Inline the deallocate because thr_act is locked.
1555 */
1556 act_locked_act_deallocate(thr_act);
1557 }
1558
1559 /*
1560 * act_alert - Register an alert from this activation.
1561 *
1562 * Each set bit is propagated upward from (but not including) this activation,
1563 * until the top of the chain is reached or the bit is masked.
1564 */
1565 kern_return_t
1566 act_alert(thread_act_t thr_act, unsigned alerts)
1567 {
1568 thread_t thread = act_lock_thread(thr_act);
1569
1570 #if MACH_ASSERT
1571 if (watchacts & WA_ACT_LNK)
1572 printf("act_alert %x: %x\n", thr_act, alerts);
1573 #endif /* MACH_ASSERT */
1574
1575 if (thread) {
1576 thread_act_t act_up = thr_act;
1577 while ((alerts) && (act_up != thread->top_act)) {
1578 act_up = act_up->higher;
1579 alerts &= act_up->alert_mask;
1580 act_up->alerts |= alerts;
1581 }
1582 /*
1583 * XXXX If we reach the top, and it is blocked in glue
1584 * code, do something to kick it. XXXX
1585 */
1586 }
1587 act_unlock_thread(thr_act);
1588
1589 return KERN_SUCCESS;
1590 }
1591
1592 kern_return_t act_alert_mask(thread_act_t thr_act, unsigned alert_mask)
1593 {
1594 panic("act_alert_mask NOT YET IMPLEMENTED\n");
1595 return KERN_SUCCESS;
1596 }
1597
1598 typedef struct GetSetState {
1599 struct ReturnHandler rh;
1600 int flavor;
1601 void *state;
1602 int *pcount;
1603 int result;
1604 } GetSetState;
1605
1606 /* Local Forward decls */
1607 kern_return_t get_set_state(
1608 thread_act_t thr_act, int flavor,
1609 thread_state_t state, int *pcount,
1610 void (*handler)(ReturnHandler *rh, thread_act_t thr_act));
1611 void get_state_handler(ReturnHandler *rh, thread_act_t thr_act);
1612 void set_state_handler(ReturnHandler *rh, thread_act_t thr_act);
1613
1614 /*
1615 * get_set_state(thr_act ...)
1616 *
1617 * General code to install g/set_state handler.
1618 * Called with thr_act's act_lock() and "appropriate"
1619 * thread-related locks held. (See act_lock_thread().)
1620 */
1621 kern_return_t
1622 get_set_state(
1623 thread_act_t act,
1624 int flavor,
1625 thread_state_t state,
1626 int *pcount,
1627 void (*handler)(
1628 ReturnHandler *rh,
1629 thread_act_t act))
1630 {
1631 GetSetState gss;
1632
1633 /* Initialize a small parameter structure */
1634 gss.rh.handler = handler;
1635 gss.flavor = flavor;
1636 gss.state = state;
1637 gss.pcount = pcount;
1638 gss.result = KERN_ABORTED; /* iff wait below is interrupted */
1639
1640 /* Add it to the thr_act's return handler list */
1641 gss.rh.next = act->handlers;
1642 act->handlers = &gss.rh;
1643
1644 act_set_apc(act);
1645
1646 #if MACH_ASSERT
1647 if (watchacts & WA_ACT_HDLR) {
1648 printf("act_%x: get_set_state(act=%x flv=%x state=%x ptr@%x=%x)",
1649 current_act(), act, flavor, state,
1650 pcount, (pcount ? *pcount : 0));
1651 printf((handler == get_state_handler ? "get_state_hdlr\n" :
1652 (handler == set_state_handler ? "set_state_hdlr\n" :
1653 "hndler=%x\n")), handler);
1654 }
1655 #endif /* MACH_ASSERT */
1656
1657 assert(act->thread);
1658 assert(act != current_act());
1659
1660 for (;;) {
1661 wait_result_t result;
1662
1663 if ( act->inited &&
1664 act->thread->top_act == act )
1665 thread_wakeup_one(&act->suspend_count);
1666
1667 /*
1668 * Wait must be interruptible to avoid deadlock (e.g.) with
1669 * task_suspend() when caller and target of get_set_state()
1670 * are in same task.
1671 */
1672 result = assert_wait(&gss, THREAD_ABORTSAFE);
1673 act_unlock_thread(act);
1674
1675 if (result == THREAD_WAITING)
1676 result = thread_block(THREAD_CONTINUE_NULL);
1677
1678 assert(result != THREAD_WAITING);
1679
1680 if (gss.result != KERN_ABORTED) {
1681 assert(result != THREAD_INTERRUPTED);
1682 break;
1683 }
1684
1685 /* JMM - What about other aborts (like BSD signals)? */
1686 if (current_act()->handlers)
1687 act_execute_returnhandlers();
1688
1689 act_lock_thread(act);
1690 }
1691
1692 #if MACH_ASSERT
1693 if (watchacts & WA_ACT_HDLR)
1694 printf("act_%x: get_set_state returns %x\n",
1695 current_act(), gss.result);
1696 #endif /* MACH_ASSERT */
1697
1698 return (gss.result);
1699 }
1700
1701 void
1702 set_state_handler(ReturnHandler *rh, thread_act_t thr_act)
1703 {
1704 GetSetState *gss = (GetSetState*)rh;
1705
1706 #if MACH_ASSERT
1707 if (watchacts & WA_ACT_HDLR)
1708 printf("act_%x: set_state_handler(rh=%x,thr_act=%x)\n",
1709 current_act(), rh, thr_act);
1710 #endif /* MACH_ASSERT */
1711
1712 gss->result = act_machine_set_state(thr_act, gss->flavor,
1713 gss->state, *gss->pcount);
1714 thread_wakeup((event_t)gss);
1715 }
1716
1717 void
1718 get_state_handler(ReturnHandler *rh, thread_act_t thr_act)
1719 {
1720 GetSetState *gss = (GetSetState*)rh;
1721
1722 #if MACH_ASSERT
1723 if (watchacts & WA_ACT_HDLR)
1724 printf("act_%x: get_state_handler(rh=%x,thr_act=%x)\n",
1725 current_act(), rh, thr_act);
1726 #endif /* MACH_ASSERT */
1727
1728 gss->result = act_machine_get_state(thr_act, gss->flavor,
1729 gss->state,
1730 (mach_msg_type_number_t *) gss->pcount);
1731 thread_wakeup((event_t)gss);
1732 }
1733
1734 kern_return_t
1735 act_get_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
1736 mach_msg_type_number_t *pcount)
1737 {
1738 #if MACH_ASSERT
1739 if (watchacts & WA_ACT_HDLR)
1740 printf("act_%x: act_get_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
1741 current_act(), thr_act, flavor, state, pcount,
1742 (pcount? *pcount : 0));
1743 #endif /* MACH_ASSERT */
1744
1745 return(get_set_state(thr_act, flavor, state, (int*)pcount, get_state_handler));
1746 }
1747
1748 kern_return_t
1749 act_set_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
1750 mach_msg_type_number_t count)
1751 {
1752 #if MACH_ASSERT
1753 if (watchacts & WA_ACT_HDLR)
1754 printf("act_%x: act_set_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
1755 current_act(), thr_act, flavor, state, count, count);
1756 #endif /* MACH_ASSERT */
1757
1758 return(get_set_state(thr_act, flavor, state, (int*)&count, set_state_handler));
1759 }
1760
1761 kern_return_t
1762 act_set_state(thread_act_t thr_act, int flavor, thread_state_t state,
1763 mach_msg_type_number_t count)
1764 {
1765 if (thr_act == THR_ACT_NULL || thr_act == current_act())
1766 return(KERN_INVALID_ARGUMENT);
1767
1768 act_lock_thread(thr_act);
1769 return(act_set_state_locked(thr_act, flavor, state, count));
1770
1771 }
1772
1773 kern_return_t
1774 act_get_state(thread_act_t thr_act, int flavor, thread_state_t state,
1775 mach_msg_type_number_t *pcount)
1776 {
1777 if (thr_act == THR_ACT_NULL || thr_act == current_act())
1778 return(KERN_INVALID_ARGUMENT);
1779
1780 act_lock_thread(thr_act);
1781 return(act_get_state_locked(thr_act, flavor, state, pcount));
1782 }
1783
1784 void
1785 act_set_astbsd(
1786 thread_act_t act)
1787 {
1788 spl_t s = splsched();
1789
1790 if (act == current_act()) {
1791 thread_ast_set(act, AST_BSD);
1792 ast_propagate(act->ast);
1793 }
1794 else {
1795 thread_t thread = act->thread;
1796 processor_t processor;
1797
1798 thread_lock(thread);
1799 thread_ast_set(act, AST_BSD);
1800 processor = thread->last_processor;
1801 if ( processor != PROCESSOR_NULL &&
1802 processor->state == PROCESSOR_RUNNING &&
1803 processor->cpu_data->active_thread == thread )
1804 cause_ast_check(processor);
1805 thread_unlock(thread);
1806 }
1807
1808 splx(s);
1809 }
1810
1811 void
1812 act_set_apc(
1813 thread_act_t act)
1814 {
1815 spl_t s = splsched();
1816
1817 if (act == current_act()) {
1818 thread_ast_set(act, AST_APC);
1819 ast_propagate(act->ast);
1820 }
1821 else {
1822 thread_t thread = act->thread;
1823 processor_t processor;
1824
1825 thread_lock(thread);
1826 thread_ast_set(act, AST_APC);
1827 processor = thread->last_processor;
1828 if ( processor != PROCESSOR_NULL &&
1829 processor->state == PROCESSOR_RUNNING &&
1830 processor->cpu_data->active_thread == thread )
1831 cause_ast_check(processor);
1832 thread_unlock(thread);
1833 }
1834
1835 splx(s);
1836 }
1837
1838 void
1839 act_ulock_release_all(thread_act_t thr_act)
1840 {
1841 ulock_t ulock;
1842
1843 while (!queue_empty(&thr_act->held_ulocks)) {
1844 ulock = (ulock_t) queue_first(&thr_act->held_ulocks);
1845 (void) lock_make_unstable(ulock, thr_act);
1846 (void) lock_release_internal(ulock, thr_act);
1847 }
1848 }
1849
1850 /*
1851 * Provide routines (for export to other components) of things that
1852 * are implemented as macros insternally.
1853 */
1854 thread_act_t
1855 thread_self(void)
1856 {
1857 thread_act_t self = current_act_fast();
1858
1859 act_reference(self);
1860 return self;
1861 }
1862
1863 thread_act_t
1864 mach_thread_self(void)
1865 {
1866 thread_act_t self = current_act_fast();
1867
1868 act_reference(self);
1869 return self;
1870 }
1871
1872 #undef act_reference
1873 void
1874 act_reference(
1875 thread_act_t thr_act)
1876 {
1877 act_reference_fast(thr_act);
1878 }
1879
1880 #undef act_deallocate
1881 void
1882 act_deallocate(
1883 thread_act_t thr_act)
1884 {
1885 act_deallocate_fast(thr_act);
1886 }