]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
xnu-517.3.7.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_FREE_COPYRIGHT@
27 */
28 /*
29 * Copyright (c) 1993 The University of Utah and
30 * the Center for Software Science (CSS). All rights reserved.
31 *
32 * Permission to use, copy, modify and distribute this software and its
33 * documentation is hereby granted, provided that both the copyright
34 * notice and this permission notice appear in all copies of the
35 * software, derivative works or modified versions, and any portions
36 * thereof, and that both notices appear in supporting documentation.
37 *
38 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
39 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
40 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 *
42 * CSS requests users of this software to return to css-dist@cs.utah.edu any
43 * improvements that they make and grant CSS redistribution rights.
44 *
45 * Author: Bryan Ford, University of Utah CSS
46 *
47 * Thread_Activation management routines
48 */
49
50 #include <cpus.h>
51 #include <task_swapper.h>
52 #include <mach/kern_return.h>
53 #include <mach/alert.h>
54 #include <kern/etap_macros.h>
55 #include <kern/mach_param.h>
56 #include <kern/zalloc.h>
57 #include <kern/thread.h>
58 #include <kern/thread_swap.h>
59 #include <kern/task.h>
60 #include <kern/task_swap.h>
61 #include <kern/thread_act.h>
62 #include <kern/sched_prim.h>
63 #include <kern/misc_protos.h>
64 #include <kern/assert.h>
65 #include <kern/exception.h>
66 #include <kern/ipc_mig.h>
67 #include <kern/ipc_tt.h>
68 #include <kern/profile.h>
69 #include <kern/machine.h>
70 #include <kern/spl.h>
71 #include <kern/syscall_subr.h>
72 #include <kern/sync_lock.h>
73 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
74 #include <kern/processor.h>
75 #include <mach_prof.h>
76 #include <mach/rpc.h>
77
78 /*
79 * Track the number of times we need to swapin a thread to deallocate it.
80 */
81 int act_free_swapin = 0;
82
83 /*
84 * Forward declarations for functions local to this file.
85 */
86 kern_return_t act_abort( thread_act_t, boolean_t);
87 void special_handler(ReturnHandler *, thread_act_t);
88 kern_return_t act_set_state_locked(thread_act_t, int,
89 thread_state_t,
90 mach_msg_type_number_t);
91 kern_return_t act_get_state_locked(thread_act_t, int,
92 thread_state_t,
93 mach_msg_type_number_t *);
94 void act_set_astbsd(thread_act_t);
95 void act_set_apc(thread_act_t);
96 void act_ulock_release_all(thread_act_t thr_act);
97
98 void install_special_handler_locked(thread_act_t);
99
100 static void act_disable(thread_act_t);
101
102 /*
103 * Thread interfaces accessed via a thread_activation:
104 */
105
106
107 /*
108 * Internal routine to terminate a thread.
109 * Sometimes called with task already locked.
110 */
111 kern_return_t
112 thread_terminate_internal(
113 register thread_act_t act)
114 {
115 kern_return_t result;
116 thread_t thread;
117
118 thread = act_lock_thread(act);
119
120 if (!act->active) {
121 act_unlock_thread(act);
122 return (KERN_TERMINATED);
123 }
124
125 act_disable(act);
126 result = act_abort(act, FALSE);
127
128 /*
129 * Make sure this thread enters the kernel
130 * Must unlock the act, but leave the shuttle
131 * captured in this act.
132 */
133 if (thread != current_thread()) {
134 act_unlock(act);
135
136 if (thread_stop(thread))
137 thread_unstop(thread);
138 else
139 result = KERN_ABORTED;
140
141 act_lock(act);
142 }
143
144 clear_wait(thread, act->started? THREAD_INTERRUPTED: THREAD_AWAKENED);
145 act_unlock_thread(act);
146
147 return (result);
148 }
149
150 /*
151 * Terminate a thread.
152 */
153 kern_return_t
154 thread_terminate(
155 register thread_act_t act)
156 {
157 kern_return_t result;
158
159 if (act == THR_ACT_NULL)
160 return (KERN_INVALID_ARGUMENT);
161
162 if ( act->task == kernel_task &&
163 act != current_act() )
164 return (KERN_FAILURE);
165
166 result = thread_terminate_internal(act);
167
168 /*
169 * If a kernel thread is terminating itself, force an AST here.
170 * Kernel threads don't normally pass through the AST checking
171 * code - and all threads finish their own termination in the
172 * special handler APC.
173 */
174 if (act->task == kernel_task) {
175 ml_set_interrupts_enabled(FALSE);
176 assert(act == current_act());
177 ast_taken(AST_APC, TRUE);
178 panic("thread_terminate");
179 }
180
181 return (result);
182 }
183
184 /*
185 * Suspend execution of the specified thread.
186 * This is a recursive-style suspension of the thread, a count of
187 * suspends is maintained.
188 *
189 * Called with act_lock held.
190 */
191 void
192 thread_hold(
193 register thread_act_t act)
194 {
195 thread_t thread = act->thread;
196
197 if (act->suspend_count++ == 0) {
198 install_special_handler(act);
199 if ( act->started &&
200 thread != THREAD_NULL &&
201 thread->top_act == act )
202 thread_wakeup_one(&act->suspend_count);
203 }
204 }
205
206 /*
207 * Decrement internal suspension count for thr_act, setting thread
208 * runnable when count falls to zero.
209 *
210 * Called with act_lock held.
211 */
212 void
213 thread_release(
214 register thread_act_t act)
215 {
216 thread_t thread = act->thread;
217
218 if ( act->suspend_count > 0 &&
219 --act->suspend_count == 0 &&
220 thread != THREAD_NULL &&
221 thread->top_act == act ) {
222 if (!act->started) {
223 clear_wait(thread, THREAD_AWAKENED);
224 act->started = TRUE;
225 }
226 else
227 thread_wakeup_one(&act->suspend_count);
228 }
229 }
230
231 kern_return_t
232 thread_suspend(
233 register thread_act_t act)
234 {
235 thread_t thread;
236
237 if (act == THR_ACT_NULL || act->task == kernel_task)
238 return (KERN_INVALID_ARGUMENT);
239
240 thread = act_lock_thread(act);
241
242 if (!act->active) {
243 act_unlock_thread(act);
244 return (KERN_TERMINATED);
245 }
246
247 if ( act->user_stop_count++ == 0 &&
248 act->suspend_count++ == 0 ) {
249 install_special_handler(act);
250 if ( thread != current_thread() &&
251 thread != THREAD_NULL &&
252 thread->top_act == act ) {
253 assert(act->started);
254 thread_wakeup_one(&act->suspend_count);
255 act_unlock_thread(act);
256
257 thread_wait(thread);
258 }
259 else
260 act_unlock_thread(act);
261 }
262 else
263 act_unlock_thread(act);
264
265 return (KERN_SUCCESS);
266 }
267
268 kern_return_t
269 thread_resume(
270 register thread_act_t act)
271 {
272 kern_return_t result = KERN_SUCCESS;
273 thread_t thread;
274
275 if (act == THR_ACT_NULL || act->task == kernel_task)
276 return (KERN_INVALID_ARGUMENT);
277
278 thread = act_lock_thread(act);
279
280 if (act->active) {
281 if (act->user_stop_count > 0) {
282 if ( --act->user_stop_count == 0 &&
283 --act->suspend_count == 0 &&
284 thread != THREAD_NULL &&
285 thread->top_act == act ) {
286 if (!act->started) {
287 clear_wait(thread, THREAD_AWAKENED);
288 act->started = TRUE;
289 }
290 else
291 thread_wakeup_one(&act->suspend_count);
292 }
293 }
294 else
295 result = KERN_FAILURE;
296 }
297 else
298 result = KERN_TERMINATED;
299
300 act_unlock_thread(act);
301
302 return (result);
303 }
304
305 /*
306 * thread_depress_abort:
307 *
308 * Prematurely abort priority depression if there is one.
309 */
310 kern_return_t
311 thread_depress_abort(
312 register thread_act_t thr_act)
313 {
314 register thread_t thread;
315 kern_return_t result;
316
317 if (thr_act == THR_ACT_NULL)
318 return (KERN_INVALID_ARGUMENT);
319
320 thread = act_lock_thread(thr_act);
321 /* if activation is terminating, this operation is not meaningful */
322 if (!thr_act->active) {
323 act_unlock_thread(thr_act);
324
325 return (KERN_TERMINATED);
326 }
327
328 result = _mk_sp_thread_depress_abort(thread, FALSE);
329
330 act_unlock_thread(thr_act);
331
332 return (result);
333 }
334
335
336 /*
337 * Indicate that the activation should run its
338 * special handler to detect the condition.
339 *
340 * Called with act_lock held.
341 */
342 kern_return_t
343 act_abort(
344 thread_act_t act,
345 boolean_t chain_break )
346 {
347 thread_t thread = act->thread;
348 spl_t s = splsched();
349
350 assert(thread->top_act == act);
351
352 thread_lock(thread);
353 if (!(thread->state & TH_ABORT)) {
354 thread->state |= TH_ABORT;
355 install_special_handler_locked(act);
356 } else {
357 thread->state &= ~TH_ABORT_SAFELY;
358 }
359 thread_unlock(thread);
360 splx(s);
361
362 return (KERN_SUCCESS);
363 }
364
365 kern_return_t
366 thread_abort(
367 register thread_act_t act)
368 {
369 kern_return_t result;
370 thread_t thread;
371
372 if (act == THR_ACT_NULL)
373 return (KERN_INVALID_ARGUMENT);
374
375 thread = act_lock_thread(act);
376
377 if (!act->active) {
378 act_unlock_thread(act);
379 return (KERN_TERMINATED);
380 }
381
382 result = act_abort(act, FALSE);
383 clear_wait(thread, THREAD_INTERRUPTED);
384 act_unlock_thread(act);
385
386 return (result);
387 }
388
389 kern_return_t
390 thread_abort_safely(
391 thread_act_t act)
392 {
393 thread_t thread;
394 kern_return_t ret;
395 spl_t s;
396
397 if ( act == THR_ACT_NULL )
398 return (KERN_INVALID_ARGUMENT);
399
400 thread = act_lock_thread(act);
401
402 if (!act->active) {
403 act_unlock_thread(act);
404 return (KERN_TERMINATED);
405 }
406
407 s = splsched();
408 thread_lock(thread);
409 if (!thread->at_safe_point ||
410 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
411 if (!(thread->state & TH_ABORT)) {
412 thread->state |= (TH_ABORT|TH_ABORT_SAFELY);
413 install_special_handler_locked(act);
414 }
415 }
416 thread_unlock(thread);
417 splx(s);
418
419 act_unlock_thread(act);
420
421 return (KERN_SUCCESS);
422 }
423
424 /*** backward compatibility hacks ***/
425 #include <mach/thread_info.h>
426 #include <mach/thread_special_ports.h>
427 #include <ipc/ipc_port.h>
428 #include <mach/thread_act_server.h>
429
430 kern_return_t
431 thread_info(
432 thread_act_t thr_act,
433 thread_flavor_t flavor,
434 thread_info_t thread_info_out,
435 mach_msg_type_number_t *thread_info_count)
436 {
437 register thread_t thread;
438 kern_return_t result;
439
440 if (thr_act == THR_ACT_NULL)
441 return (KERN_INVALID_ARGUMENT);
442
443 thread = act_lock_thread(thr_act);
444 if (!thr_act->active) {
445 act_unlock_thread(thr_act);
446
447 return (KERN_TERMINATED);
448 }
449
450 result = thread_info_shuttle(thr_act, flavor,
451 thread_info_out, thread_info_count);
452
453 act_unlock_thread(thr_act);
454
455 return (result);
456 }
457
458 /*
459 * Routine: thread_get_special_port [kernel call]
460 * Purpose:
461 * Clones a send right for one of the thread's
462 * special ports.
463 * Conditions:
464 * Nothing locked.
465 * Returns:
466 * KERN_SUCCESS Extracted a send right.
467 * KERN_INVALID_ARGUMENT The thread is null.
468 * KERN_FAILURE The thread is dead.
469 * KERN_INVALID_ARGUMENT Invalid special port.
470 */
471
472 kern_return_t
473 thread_get_special_port(
474 thread_act_t thr_act,
475 int which,
476 ipc_port_t *portp)
477 {
478 ipc_port_t *whichp;
479 ipc_port_t port;
480 thread_t thread;
481
482 if (!thr_act)
483 return KERN_INVALID_ARGUMENT;
484 thread = act_lock_thread(thr_act);
485 switch (which) {
486 case THREAD_KERNEL_PORT:
487 whichp = &thr_act->ith_sself;
488 break;
489
490 default:
491 act_unlock_thread(thr_act);
492 return KERN_INVALID_ARGUMENT;
493 }
494
495 if (!thr_act->active) {
496 act_unlock_thread(thr_act);
497 return KERN_FAILURE;
498 }
499
500 port = ipc_port_copy_send(*whichp);
501 act_unlock_thread(thr_act);
502
503 *portp = port;
504 return KERN_SUCCESS;
505 }
506
507 /*
508 * Routine: thread_set_special_port [kernel call]
509 * Purpose:
510 * Changes one of the thread's special ports,
511 * setting it to the supplied send right.
512 * Conditions:
513 * Nothing locked. If successful, consumes
514 * the supplied send right.
515 * Returns:
516 * KERN_SUCCESS Changed the special port.
517 * KERN_INVALID_ARGUMENT The thread is null.
518 * KERN_FAILURE The thread is dead.
519 * KERN_INVALID_ARGUMENT Invalid special port.
520 */
521
522 kern_return_t
523 thread_set_special_port(
524 thread_act_t thr_act,
525 int which,
526 ipc_port_t port)
527 {
528 ipc_port_t *whichp;
529 ipc_port_t old;
530 thread_t thread;
531
532 if (thr_act == 0)
533 return KERN_INVALID_ARGUMENT;
534
535 thread = act_lock_thread(thr_act);
536 switch (which) {
537 case THREAD_KERNEL_PORT:
538 whichp = &thr_act->ith_self;
539 break;
540
541 default:
542 act_unlock_thread(thr_act);
543 return KERN_INVALID_ARGUMENT;
544 }
545
546 if (!thr_act->active) {
547 act_unlock_thread(thr_act);
548 return KERN_FAILURE;
549 }
550
551 old = *whichp;
552 *whichp = port;
553 act_unlock_thread(thr_act);
554
555 if (IP_VALID(old))
556 ipc_port_release_send(old);
557 return KERN_SUCCESS;
558 }
559
560 /*
561 * thread state should always be accessible by locking the thread
562 * and copying it. The activation messes things up so for right
563 * now if it's not the top of the chain, use a special handler to
564 * get the information when the shuttle returns to the activation.
565 */
566 kern_return_t
567 thread_get_state(
568 register thread_act_t act,
569 int flavor,
570 thread_state_t state, /* pointer to OUT array */
571 mach_msg_type_number_t *state_count) /*IN/OUT*/
572 {
573 kern_return_t result = KERN_SUCCESS;
574 thread_t thread;
575
576 if (act == THR_ACT_NULL || act == current_act())
577 return (KERN_INVALID_ARGUMENT);
578
579 thread = act_lock_thread(act);
580
581 if (!act->active) {
582 act_unlock_thread(act);
583 return (KERN_TERMINATED);
584 }
585
586 thread_hold(act);
587
588 for (;;) {
589 thread_t thread1;
590
591 if ( thread == THREAD_NULL ||
592 thread->top_act != act )
593 break;
594 act_unlock_thread(act);
595
596 if (!thread_stop(thread)) {
597 result = KERN_ABORTED;
598 (void)act_lock_thread(act);
599 thread = THREAD_NULL;
600 break;
601 }
602
603 thread1 = act_lock_thread(act);
604 if (thread1 == thread)
605 break;
606
607 thread_unstop(thread);
608 thread = thread1;
609 }
610
611 if (result == KERN_SUCCESS)
612 result = machine_thread_get_state(act, flavor, state, state_count);
613
614 if ( thread != THREAD_NULL &&
615 thread->top_act == act )
616 thread_unstop(thread);
617
618 thread_release(act);
619 act_unlock_thread(act);
620
621 return (result);
622 }
623
624 /*
625 * Change thread's machine-dependent state. Called with nothing
626 * locked. Returns same way.
627 */
628 kern_return_t
629 thread_set_state(
630 register thread_act_t act,
631 int flavor,
632 thread_state_t state,
633 mach_msg_type_number_t state_count)
634 {
635 kern_return_t result = KERN_SUCCESS;
636 thread_t thread;
637
638 if (act == THR_ACT_NULL || act == current_act())
639 return (KERN_INVALID_ARGUMENT);
640
641 thread = act_lock_thread(act);
642
643 if (!act->active) {
644 act_unlock_thread(act);
645 return (KERN_TERMINATED);
646 }
647
648 thread_hold(act);
649
650 for (;;) {
651 thread_t thread1;
652
653 if ( thread == THREAD_NULL ||
654 thread->top_act != act )
655 break;
656 act_unlock_thread(act);
657
658 if (!thread_stop(thread)) {
659 result = KERN_ABORTED;
660 (void)act_lock_thread(act);
661 thread = THREAD_NULL;
662 break;
663 }
664
665 thread1 = act_lock_thread(act);
666 if (thread1 == thread)
667 break;
668
669 thread_unstop(thread);
670 thread = thread1;
671 }
672
673 if (result == KERN_SUCCESS)
674 result = machine_thread_set_state(act, flavor, state, state_count);
675
676 if ( thread != THREAD_NULL &&
677 thread->top_act == act )
678 thread_unstop(thread);
679
680 thread_release(act);
681 act_unlock_thread(act);
682
683 return (result);
684 }
685
686 /*
687 * Kernel-internal "thread" interfaces used outside this file:
688 */
689
690 kern_return_t
691 thread_dup(
692 register thread_act_t target)
693 {
694 kern_return_t result = KERN_SUCCESS;
695 thread_act_t self = current_act();
696 thread_t thread;
697
698 if (target == THR_ACT_NULL || target == self)
699 return (KERN_INVALID_ARGUMENT);
700
701 thread = act_lock_thread(target);
702
703 if (!target->active) {
704 act_unlock_thread(target);
705 return (KERN_TERMINATED);
706 }
707
708 thread_hold(target);
709
710 for (;;) {
711 thread_t thread1;
712
713 if ( thread == THREAD_NULL ||
714 thread->top_act != target )
715 break;
716 act_unlock_thread(target);
717
718 if (!thread_stop(thread)) {
719 result = KERN_ABORTED;
720 (void)act_lock_thread(target);
721 thread = THREAD_NULL;
722 break;
723 }
724
725 thread1 = act_lock_thread(target);
726 if (thread1 == thread)
727 break;
728
729 thread_unstop(thread);
730 thread = thread1;
731 }
732
733 if (result == KERN_SUCCESS)
734 result = machine_thread_dup(self, target);
735
736 if ( thread != THREAD_NULL &&
737 thread->top_act == target )
738 thread_unstop(thread);
739
740 thread_release(target);
741 act_unlock_thread(target);
742
743 return (result);
744 }
745
746
747 /*
748 * thread_setstatus:
749 *
750 * Set the status of the specified thread.
751 * Called with (and returns with) no locks held.
752 */
753 kern_return_t
754 thread_setstatus(
755 register thread_act_t act,
756 int flavor,
757 thread_state_t tstate,
758 mach_msg_type_number_t count)
759 {
760 kern_return_t result = KERN_SUCCESS;
761 thread_t thread;
762
763 thread = act_lock_thread(act);
764
765 if ( act != current_act() &&
766 (act->suspend_count == 0 ||
767 thread == THREAD_NULL ||
768 (thread->state & TH_RUN) ||
769 thread->top_act != act) )
770 result = KERN_FAILURE;
771
772 if (result == KERN_SUCCESS)
773 result = machine_thread_set_state(act, flavor, tstate, count);
774
775 act_unlock_thread(act);
776
777 return (result);
778 }
779
780 /*
781 * thread_getstatus:
782 *
783 * Get the status of the specified thread.
784 */
785 kern_return_t
786 thread_getstatus(
787 register thread_act_t act,
788 int flavor,
789 thread_state_t tstate,
790 mach_msg_type_number_t *count)
791 {
792 kern_return_t result = KERN_SUCCESS;
793 thread_t thread;
794
795 thread = act_lock_thread(act);
796
797 if ( act != current_act() &&
798 (act->suspend_count == 0 ||
799 thread == THREAD_NULL ||
800 (thread->state & TH_RUN) ||
801 thread->top_act != act) )
802 result = KERN_FAILURE;
803
804 if (result == KERN_SUCCESS)
805 result = machine_thread_get_state(act, flavor, tstate, count);
806
807 act_unlock_thread(act);
808
809 return (result);
810 }
811
812 /*
813 * Kernel-internal thread_activation interfaces used outside this file:
814 */
815
816 void
817 act_reference(
818 thread_act_t act)
819 {
820 if (act == NULL)
821 return;
822
823 act_lock(act);
824 act_reference_locked(act);
825 act_unlock(act);
826 }
827
828 void
829 act_deallocate(
830 thread_act_t act)
831 {
832 task_t task;
833 thread_t thread;
834 void *task_proc;
835
836 if (act == NULL)
837 return;
838
839 act_lock(act);
840
841 if (--act->act_ref_count > 0) {
842 act_unlock(act);
843 return;
844 }
845
846 assert(!act->active);
847
848 thread = act->thread;
849 assert(thread != NULL);
850
851 thread->top_act = NULL;
852
853 act_unlock(act);
854
855 task = act->task;
856 task_lock(task);
857
858 task_proc = task->bsd_info;
859
860 {
861 time_value_t user_time, system_time;
862
863 thread_read_times(thread, &user_time, &system_time);
864 time_value_add(&task->total_user_time, &user_time);
865 time_value_add(&task->total_system_time, &system_time);
866
867 queue_remove(&task->threads, act, thread_act_t, task_threads);
868 act->task_threads.next = NULL;
869 task->thread_count--;
870 task->res_thread_count--;
871 }
872
873 task_unlock(task);
874
875 act_prof_deallocate(act);
876 ipc_thr_act_terminate(act);
877
878 #ifdef MACH_BSD
879 {
880 extern void uthread_free(task_t, void *, void *);
881 void *ut = act->uthread;
882
883 act->uthread = NULL;
884 uthread_free(task, ut, task_proc);
885 }
886 #endif /* MACH_BSD */
887
888 task_deallocate(task);
889
890 thread_deallocate(thread);
891 }
892
893
894 /*
895 * act_attach - Attach an thr_act to the top of a thread ("push the stack").
896 *
897 * The thread_shuttle must be either the current one or a brand-new one.
898 * Assumes the thr_act is active but not in use.
899 *
900 * Already locked: thr_act plus "appropriate" thread-related locks
901 * (see act_lock_thread()).
902 */
903 void
904 act_attach(
905 thread_act_t act,
906 thread_t thread)
907 {
908 thread_act_t lower;
909
910 /*
911 * Chain the act onto the thread's act stack.
912 */
913 act->act_ref_count++;
914 act->thread = thread;
915 act->higher = THR_ACT_NULL;
916 lower = act->lower = thread->top_act;
917 if (lower != THR_ACT_NULL)
918 lower->higher = act;
919
920 thread->top_act = act;
921 }
922
923 /*
924 * act_detach
925 *
926 * Remove the current thr_act from the top of the current thread, i.e.
927 * "pop the stack". Assumes already locked: thr_act plus "appropriate"
928 * thread-related locks (see act_lock_thread).
929 */
930 void
931 act_detach(
932 thread_act_t cur_act)
933 {
934 thread_t cur_thread = cur_act->thread;
935
936 /* Unlink the thr_act from the thread's thr_act stack */
937 cur_thread->top_act = cur_act->lower;
938 cur_act->thread = 0;
939 cur_act->act_ref_count--;
940 assert(cur_act->act_ref_count > 0);
941
942 #if MACH_ASSERT
943 cur_act->lower = cur_act->higher = THR_ACT_NULL;
944 if (cur_thread->top_act)
945 cur_thread->top_act->higher = THR_ACT_NULL;
946 #endif /* MACH_ASSERT */
947
948 return;
949 }
950
951
952 /*
953 * Synchronize a thread operation with migration.
954 * Called with nothing locked.
955 * Returns with thr_act locked.
956 */
957 thread_t
958 act_lock_thread(
959 thread_act_t thr_act)
960 {
961
962 /*
963 * JMM - We have moved away from explicit RPC locks
964 * and towards a generic migration approach. The wait
965 * queue lock will be the point of synchronization for
966 * the shuttle linkage when this is rolled out. Until
967 * then, just lock the act.
968 */
969 act_lock(thr_act);
970 return (thr_act->thread);
971 }
972
973 /*
974 * Unsynchronize with migration (i.e., undo an act_lock_thread() call).
975 * Called with thr_act locked, plus thread locks held that are
976 * "correct" for thr_act's state. Returns with nothing locked.
977 */
978 void
979 act_unlock_thread(thread_act_t thr_act)
980 {
981 act_unlock(thr_act);
982 }
983
984 /*
985 * Synchronize with migration given a pointer to a shuttle (instead of an
986 * activation). Called with nothing locked; returns with all
987 * "appropriate" thread-related locks held (see act_lock_thread()).
988 */
989 thread_act_t
990 thread_lock_act(
991 thread_t thread)
992 {
993 thread_act_t thr_act;
994
995 while (1) {
996 thr_act = thread->top_act;
997 if (!thr_act)
998 break;
999 if (!act_lock_try(thr_act)) {
1000 mutex_pause();
1001 continue;
1002 }
1003 break;
1004 }
1005 return (thr_act);
1006 }
1007
1008 /*
1009 * Unsynchronize with an activation starting from a pointer to
1010 * a shuttle.
1011 */
1012 void
1013 thread_unlock_act(
1014 thread_t thread)
1015 {
1016 thread_act_t thr_act;
1017
1018 if (thr_act = thread->top_act) {
1019 act_unlock(thr_act);
1020 }
1021 }
1022
1023 /*
1024 * switch_act
1025 *
1026 * If a new activation is given, switch to it. If not,
1027 * switch to the lower activation (pop). Returns the old
1028 * activation. This is for migration support.
1029 */
1030 thread_act_t
1031 switch_act(
1032 thread_act_t act)
1033 {
1034 thread_act_t old, new;
1035 thread_t thread;
1036
1037 disable_preemption();
1038
1039 thread = current_thread();
1040
1041 /*
1042 * Find the old and new activation for switch.
1043 */
1044 old = thread->top_act;
1045
1046 if (act) {
1047 new = act;
1048 new->thread = thread;
1049 }
1050 else {
1051 new = old->lower;
1052 }
1053
1054 assert(new != THR_ACT_NULL);
1055 assert(current_processor()->active_thread == thread);
1056
1057 /* This is where all the work happens */
1058 machine_switch_act(thread, old, new);
1059
1060 /*
1061 * Push or pop an activation on the chain.
1062 */
1063 if (act) {
1064 act_attach(new, thread);
1065 }
1066 else {
1067 act_detach(old);
1068 }
1069
1070 enable_preemption();
1071
1072 return(old);
1073 }
1074
1075 /*
1076 * install_special_handler
1077 * Install the special returnhandler that handles suspension and
1078 * termination, if it hasn't been installed already.
1079 *
1080 * Already locked: RPC-related locks for thr_act, but not
1081 * scheduling lock (thread_lock()) of the associated thread.
1082 */
1083 void
1084 install_special_handler(
1085 thread_act_t thr_act)
1086 {
1087 spl_t spl;
1088 thread_t thread = thr_act->thread;
1089
1090 spl = splsched();
1091 thread_lock(thread);
1092 install_special_handler_locked(thr_act);
1093 thread_unlock(thread);
1094 splx(spl);
1095 }
1096
1097 /*
1098 * install_special_handler_locked
1099 * Do the work of installing the special_handler.
1100 *
1101 * Already locked: RPC-related locks for thr_act, plus the
1102 * scheduling lock (thread_lock()) of the associated thread.
1103 */
1104 void
1105 install_special_handler_locked(
1106 thread_act_t act)
1107 {
1108 thread_t thread = act->thread;
1109 ReturnHandler **rh;
1110
1111 /* The work handler must always be the last ReturnHandler on the list,
1112 because it can do tricky things like detach the thr_act. */
1113 for (rh = &act->handlers; *rh; rh = &(*rh)->next)
1114 continue;
1115 if (rh != &act->special_handler.next)
1116 *rh = &act->special_handler;
1117
1118 if (act == thread->top_act) {
1119 /*
1120 * Temporarily undepress, so target has
1121 * a chance to do locking required to
1122 * block itself in special_handler().
1123 */
1124 if (thread->sched_mode & TH_MODE_ISDEPRESSED)
1125 compute_priority(thread, TRUE);
1126 }
1127
1128 thread_ast_set(act, AST_APC);
1129 if (act == current_act())
1130 ast_propagate(act->ast);
1131 else {
1132 processor_t processor = thread->last_processor;
1133
1134 if ( processor != PROCESSOR_NULL &&
1135 processor->state == PROCESSOR_RUNNING &&
1136 processor->active_thread == thread )
1137 cause_ast_check(processor);
1138 }
1139 }
1140
1141 kern_return_t
1142 thread_apc_set(
1143 thread_act_t act,
1144 thread_apc_handler_t apc)
1145 {
1146 extern thread_apc_handler_t bsd_ast;
1147
1148 assert(apc == bsd_ast);
1149 return (KERN_FAILURE);
1150 }
1151
1152 kern_return_t
1153 thread_apc_clear(
1154 thread_act_t act,
1155 thread_apc_handler_t apc)
1156 {
1157 extern thread_apc_handler_t bsd_ast;
1158
1159 assert(apc == bsd_ast);
1160 return (KERN_FAILURE);
1161 }
1162
1163 /*
1164 * Activation control support routines internal to this file:
1165 */
1166
1167 /*
1168 * act_execute_returnhandlers() - does just what the name says
1169 *
1170 * This is called by system-dependent code when it detects that
1171 * thr_act->handlers is non-null while returning into user mode.
1172 */
1173 void
1174 act_execute_returnhandlers(void)
1175 {
1176 thread_act_t act = current_act();
1177
1178 thread_ast_clear(act, AST_APC);
1179 spllo();
1180
1181 for (;;) {
1182 ReturnHandler *rh;
1183 thread_t thread = act_lock_thread(act);
1184
1185 (void)splsched();
1186 thread_lock(thread);
1187 rh = act->handlers;
1188 if (!rh) {
1189 thread_unlock(thread);
1190 spllo();
1191 act_unlock_thread(act);
1192 return;
1193 }
1194 act->handlers = rh->next;
1195 thread_unlock(thread);
1196 spllo();
1197 act_unlock_thread(act);
1198
1199 /* Execute it */
1200 (*rh->handler)(rh, act);
1201 }
1202 }
1203
1204 /*
1205 * special_handler_continue
1206 *
1207 * Continuation routine for the special handler blocks. It checks
1208 * to see whether there has been any new suspensions. If so, it
1209 * installs the special handler again. Otherwise, it checks to see
1210 * if the current depression needs to be re-instated (it may have
1211 * been temporarily removed in order to get to this point in a hurry).
1212 */
1213 void
1214 special_handler_continue(void)
1215 {
1216 thread_act_t self = current_act();
1217
1218 if (self->suspend_count > 0)
1219 install_special_handler(self);
1220 else {
1221 thread_t thread = self->thread;
1222 spl_t s = splsched();
1223
1224 thread_lock(thread);
1225 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
1226 processor_t myprocessor = thread->last_processor;
1227
1228 thread->sched_pri = DEPRESSPRI;
1229 myprocessor->current_pri = thread->sched_pri;
1230 thread->sched_mode &= ~TH_MODE_PREEMPT;
1231 }
1232 thread_unlock(thread);
1233 splx(s);
1234 }
1235
1236 thread_exception_return();
1237 /*NOTREACHED*/
1238 }
1239
1240 /*
1241 * special_handler - handles suspension, termination. Called
1242 * with nothing locked. Returns (if it returns) the same way.
1243 */
1244 void
1245 special_handler(
1246 ReturnHandler *rh,
1247 thread_act_t self)
1248 {
1249 thread_t thread = act_lock_thread(self);
1250 spl_t s;
1251
1252 assert(thread != THREAD_NULL);
1253
1254 s = splsched();
1255 thread_lock(thread);
1256 thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY); /* clear any aborts */
1257 thread_unlock(thread);
1258 splx(s);
1259
1260 if (!self->active) {
1261 act_unlock_thread(self);
1262 thread_terminate_self();
1263 /*NOTREACHED*/
1264 }
1265
1266 /*
1267 * If we're suspended, go to sleep and wait for someone to wake us up.
1268 */
1269 if (self->suspend_count > 0) {
1270 if (self->handlers == NULL) {
1271 assert_wait(&self->suspend_count, THREAD_ABORTSAFE);
1272 act_unlock_thread(self);
1273 thread_block(special_handler_continue);
1274 /*NOTREACHED*/
1275 }
1276
1277 act_unlock_thread(self);
1278
1279 special_handler_continue();
1280 /*NOTREACHED*/
1281 }
1282
1283 act_unlock_thread(self);
1284 }
1285
1286 /*
1287 * Already locked: activation (shuttle frozen within)
1288 *
1289 * Mark an activation inactive, and prepare it to terminate
1290 * itself.
1291 */
1292 static void
1293 act_disable(
1294 thread_act_t thr_act)
1295 {
1296 thr_act->active = 0;
1297
1298 /* Drop the thr_act reference taken for being active.
1299 * (There is still at least one reference left:
1300 * the one we were passed.)
1301 * Inline the deallocate because thr_act is locked.
1302 */
1303 act_deallocate_locked(thr_act);
1304 }
1305
1306 typedef struct GetSetState {
1307 struct ReturnHandler rh;
1308 int flavor;
1309 void *state;
1310 int *pcount;
1311 int result;
1312 } GetSetState;
1313
1314 /* Local Forward decls */
1315 kern_return_t get_set_state(
1316 thread_act_t thr_act, int flavor,
1317 thread_state_t state, int *pcount,
1318 void (*handler)(ReturnHandler *rh, thread_act_t thr_act));
1319 void get_state_handler(ReturnHandler *rh, thread_act_t thr_act);
1320 void set_state_handler(ReturnHandler *rh, thread_act_t thr_act);
1321
1322 /*
1323 * get_set_state(thr_act ...)
1324 *
1325 * General code to install g/set_state handler.
1326 * Called with thr_act's act_lock() and "appropriate"
1327 * thread-related locks held. (See act_lock_thread().)
1328 */
1329 kern_return_t
1330 get_set_state(
1331 thread_act_t act,
1332 int flavor,
1333 thread_state_t state,
1334 int *pcount,
1335 void (*handler)(
1336 ReturnHandler *rh,
1337 thread_act_t act))
1338 {
1339 GetSetState gss;
1340
1341 /* Initialize a small parameter structure */
1342 gss.rh.handler = handler;
1343 gss.flavor = flavor;
1344 gss.state = state;
1345 gss.pcount = pcount;
1346 gss.result = KERN_ABORTED; /* iff wait below is interrupted */
1347
1348 /* Add it to the thr_act's return handler list */
1349 gss.rh.next = act->handlers;
1350 act->handlers = &gss.rh;
1351
1352 act_set_apc(act);
1353
1354 assert(act->thread);
1355 assert(act != current_act());
1356
1357 for (;;) {
1358 wait_result_t result;
1359
1360 if ( act->started &&
1361 act->thread->top_act == act )
1362 thread_wakeup_one(&act->suspend_count);
1363
1364 /*
1365 * Wait must be interruptible to avoid deadlock (e.g.) with
1366 * task_suspend() when caller and target of get_set_state()
1367 * are in same task.
1368 */
1369 result = assert_wait(&gss, THREAD_ABORTSAFE);
1370 act_unlock_thread(act);
1371
1372 if (result == THREAD_WAITING)
1373 result = thread_block(THREAD_CONTINUE_NULL);
1374
1375 assert(result != THREAD_WAITING);
1376
1377 if (gss.result != KERN_ABORTED) {
1378 assert(result != THREAD_INTERRUPTED);
1379 break;
1380 }
1381
1382 /* JMM - What about other aborts (like BSD signals)? */
1383 if (current_act()->handlers)
1384 act_execute_returnhandlers();
1385
1386 act_lock_thread(act);
1387 }
1388
1389 return (gss.result);
1390 }
1391
1392 void
1393 set_state_handler(ReturnHandler *rh, thread_act_t thr_act)
1394 {
1395 GetSetState *gss = (GetSetState*)rh;
1396
1397 gss->result = machine_thread_set_state(thr_act, gss->flavor,
1398 gss->state, *gss->pcount);
1399 thread_wakeup((event_t)gss);
1400 }
1401
1402 void
1403 get_state_handler(ReturnHandler *rh, thread_act_t thr_act)
1404 {
1405 GetSetState *gss = (GetSetState*)rh;
1406
1407 gss->result = machine_thread_get_state(thr_act, gss->flavor,
1408 gss->state,
1409 (mach_msg_type_number_t *) gss->pcount);
1410 thread_wakeup((event_t)gss);
1411 }
1412
1413 kern_return_t
1414 act_get_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
1415 mach_msg_type_number_t *pcount)
1416 {
1417 return(get_set_state(thr_act, flavor, state, (int*)pcount, get_state_handler));
1418 }
1419
1420 kern_return_t
1421 act_set_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
1422 mach_msg_type_number_t count)
1423 {
1424 return(get_set_state(thr_act, flavor, state, (int*)&count, set_state_handler));
1425 }
1426
1427 kern_return_t
1428 act_set_state(thread_act_t thr_act, int flavor, thread_state_t state,
1429 mach_msg_type_number_t count)
1430 {
1431 if (thr_act == THR_ACT_NULL || thr_act == current_act())
1432 return(KERN_INVALID_ARGUMENT);
1433
1434 act_lock_thread(thr_act);
1435 return(act_set_state_locked(thr_act, flavor, state, count));
1436
1437 }
1438
1439 kern_return_t
1440 act_get_state(thread_act_t thr_act, int flavor, thread_state_t state,
1441 mach_msg_type_number_t *pcount)
1442 {
1443 if (thr_act == THR_ACT_NULL || thr_act == current_act())
1444 return(KERN_INVALID_ARGUMENT);
1445
1446 act_lock_thread(thr_act);
1447 return(act_get_state_locked(thr_act, flavor, state, pcount));
1448 }
1449
1450 void
1451 act_set_astbsd(
1452 thread_act_t act)
1453 {
1454 spl_t s = splsched();
1455
1456 if (act == current_act()) {
1457 thread_ast_set(act, AST_BSD);
1458 ast_propagate(act->ast);
1459 }
1460 else {
1461 thread_t thread = act->thread;
1462 processor_t processor;
1463
1464 thread_lock(thread);
1465 thread_ast_set(act, AST_BSD);
1466 processor = thread->last_processor;
1467 if ( processor != PROCESSOR_NULL &&
1468 processor->state == PROCESSOR_RUNNING &&
1469 processor->active_thread == thread )
1470 cause_ast_check(processor);
1471 thread_unlock(thread);
1472 }
1473
1474 splx(s);
1475 }
1476
1477 void
1478 act_set_apc(
1479 thread_act_t act)
1480 {
1481 spl_t s = splsched();
1482
1483 if (act == current_act()) {
1484 thread_ast_set(act, AST_APC);
1485 ast_propagate(act->ast);
1486 }
1487 else {
1488 thread_t thread = act->thread;
1489 processor_t processor;
1490
1491 thread_lock(thread);
1492 thread_ast_set(act, AST_APC);
1493 processor = thread->last_processor;
1494 if ( processor != PROCESSOR_NULL &&
1495 processor->state == PROCESSOR_RUNNING &&
1496 processor->active_thread == thread )
1497 cause_ast_check(processor);
1498 thread_unlock(thread);
1499 }
1500
1501 splx(s);
1502 }
1503
1504 void
1505 act_ulock_release_all(thread_act_t thr_act)
1506 {
1507 ulock_t ulock;
1508
1509 while (!queue_empty(&thr_act->held_ulocks)) {
1510 ulock = (ulock_t) queue_first(&thr_act->held_ulocks);
1511 (void) lock_make_unstable(ulock, thr_act);
1512 (void) lock_release_internal(ulock, thr_act);
1513 }
1514 }
1515
1516 /*
1517 * Provide routines (for export to other components) of things that
1518 * are implemented as macros insternally.
1519 */
1520 thread_act_t
1521 thread_self(void)
1522 {
1523 thread_act_t self = current_act_fast();
1524
1525 act_reference(self);
1526 return self;
1527 }
1528
1529 thread_act_t
1530 mach_thread_self(void)
1531 {
1532 thread_act_t self = current_act_fast();
1533
1534 act_reference(self);
1535 return self;
1536 }