]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
xnu-517.9.4.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Copyright (c) 1993 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
28 *
29 * Permission to use, copy, modify and distribute this software and its
30 * documentation is hereby granted, provided that both the copyright
31 * notice and this permission notice appear in all copies of the
32 * software, derivative works or modified versions, and any portions
33 * thereof, and that both notices appear in supporting documentation.
34 *
35 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
36 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
37 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
38 *
39 * CSS requests users of this software to return to css-dist@cs.utah.edu any
40 * improvements that they make and grant CSS redistribution rights.
41 *
42 * Author: Bryan Ford, University of Utah CSS
43 *
44 * Thread_Activation management routines
45 */
46
47 #include <cpus.h>
48 #include <task_swapper.h>
49 #include <mach/kern_return.h>
50 #include <mach/alert.h>
51 #include <kern/etap_macros.h>
52 #include <kern/mach_param.h>
53 #include <kern/zalloc.h>
54 #include <kern/thread.h>
55 #include <kern/thread_swap.h>
56 #include <kern/task.h>
57 #include <kern/task_swap.h>
58 #include <kern/thread_act.h>
59 #include <kern/sched_prim.h>
60 #include <kern/misc_protos.h>
61 #include <kern/assert.h>
62 #include <kern/exception.h>
63 #include <kern/ipc_mig.h>
64 #include <kern/ipc_tt.h>
65 #include <kern/profile.h>
66 #include <kern/machine.h>
67 #include <kern/spl.h>
68 #include <kern/syscall_subr.h>
69 #include <kern/sync_lock.h>
70 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
71 #include <kern/processor.h>
72 #include <mach_prof.h>
73 #include <mach/rpc.h>
74
75 /*
76 * Track the number of times we need to swapin a thread to deallocate it.
77 */
78 int act_free_swapin = 0;
79
80 /*
81 * Forward declarations for functions local to this file.
82 */
83 kern_return_t act_abort( thread_act_t, boolean_t);
84 void special_handler(ReturnHandler *, thread_act_t);
85 kern_return_t act_set_state_locked(thread_act_t, int,
86 thread_state_t,
87 mach_msg_type_number_t);
88 kern_return_t act_get_state_locked(thread_act_t, int,
89 thread_state_t,
90 mach_msg_type_number_t *);
91 void act_set_astbsd(thread_act_t);
92 void act_set_apc(thread_act_t);
93 void act_ulock_release_all(thread_act_t thr_act);
94
95 void install_special_handler_locked(thread_act_t);
96
97 static void act_disable(thread_act_t);
98
99 /*
100 * Thread interfaces accessed via a thread_activation:
101 */
102
103
104 /*
105 * Internal routine to terminate a thread.
106 * Sometimes called with task already locked.
107 */
108 kern_return_t
109 thread_terminate_internal(
110 register thread_act_t act)
111 {
112 kern_return_t result;
113 thread_t thread;
114
115 thread = act_lock_thread(act);
116
117 if (!act->active) {
118 act_unlock_thread(act);
119 return (KERN_TERMINATED);
120 }
121
122 act_disable(act);
123 result = act_abort(act, FALSE);
124
125 /*
126 * Make sure this thread enters the kernel
127 * Must unlock the act, but leave the shuttle
128 * captured in this act.
129 */
130 if (thread != current_thread()) {
131 act_unlock(act);
132
133 if (thread_stop(thread))
134 thread_unstop(thread);
135 else
136 result = KERN_ABORTED;
137
138 act_lock(act);
139 }
140
141 clear_wait(thread, act->started? THREAD_INTERRUPTED: THREAD_AWAKENED);
142 act_unlock_thread(act);
143
144 return (result);
145 }
146
147 /*
148 * Terminate a thread.
149 */
150 kern_return_t
151 thread_terminate(
152 register thread_act_t act)
153 {
154 kern_return_t result;
155
156 if (act == THR_ACT_NULL)
157 return (KERN_INVALID_ARGUMENT);
158
159 if ( act->task == kernel_task &&
160 act != current_act() )
161 return (KERN_FAILURE);
162
163 result = thread_terminate_internal(act);
164
165 /*
166 * If a kernel thread is terminating itself, force an AST here.
167 * Kernel threads don't normally pass through the AST checking
168 * code - and all threads finish their own termination in the
169 * special handler APC.
170 */
171 if (act->task == kernel_task) {
172 ml_set_interrupts_enabled(FALSE);
173 assert(act == current_act());
174 ast_taken(AST_APC, TRUE);
175 panic("thread_terminate");
176 }
177
178 return (result);
179 }
180
181 /*
182 * Suspend execution of the specified thread.
183 * This is a recursive-style suspension of the thread, a count of
184 * suspends is maintained.
185 *
186 * Called with act_lock held.
187 */
188 void
189 thread_hold(
190 register thread_act_t act)
191 {
192 thread_t thread = act->thread;
193
194 if (act->suspend_count++ == 0) {
195 install_special_handler(act);
196 if ( act->started &&
197 thread != THREAD_NULL &&
198 thread->top_act == act )
199 thread_wakeup_one(&act->suspend_count);
200 }
201 }
202
203 /*
204 * Decrement internal suspension count for thr_act, setting thread
205 * runnable when count falls to zero.
206 *
207 * Called with act_lock held.
208 */
209 void
210 thread_release(
211 register thread_act_t act)
212 {
213 thread_t thread = act->thread;
214
215 if ( act->suspend_count > 0 &&
216 --act->suspend_count == 0 &&
217 thread != THREAD_NULL &&
218 thread->top_act == act ) {
219 if (!act->started) {
220 clear_wait(thread, THREAD_AWAKENED);
221 act->started = TRUE;
222 }
223 else
224 thread_wakeup_one(&act->suspend_count);
225 }
226 }
227
228 kern_return_t
229 thread_suspend(
230 register thread_act_t act)
231 {
232 thread_t thread;
233
234 if (act == THR_ACT_NULL || act->task == kernel_task)
235 return (KERN_INVALID_ARGUMENT);
236
237 thread = act_lock_thread(act);
238
239 if (!act->active) {
240 act_unlock_thread(act);
241 return (KERN_TERMINATED);
242 }
243
244 if ( act->user_stop_count++ == 0 &&
245 act->suspend_count++ == 0 ) {
246 install_special_handler(act);
247 if ( thread != current_thread() &&
248 thread != THREAD_NULL &&
249 thread->top_act == act ) {
250 assert(act->started);
251 thread_wakeup_one(&act->suspend_count);
252 act_unlock_thread(act);
253
254 thread_wait(thread);
255 }
256 else
257 act_unlock_thread(act);
258 }
259 else
260 act_unlock_thread(act);
261
262 return (KERN_SUCCESS);
263 }
264
265 kern_return_t
266 thread_resume(
267 register thread_act_t act)
268 {
269 kern_return_t result = KERN_SUCCESS;
270 thread_t thread;
271
272 if (act == THR_ACT_NULL || act->task == kernel_task)
273 return (KERN_INVALID_ARGUMENT);
274
275 thread = act_lock_thread(act);
276
277 if (act->active) {
278 if (act->user_stop_count > 0) {
279 if ( --act->user_stop_count == 0 &&
280 --act->suspend_count == 0 &&
281 thread != THREAD_NULL &&
282 thread->top_act == act ) {
283 if (!act->started) {
284 clear_wait(thread, THREAD_AWAKENED);
285 act->started = TRUE;
286 }
287 else
288 thread_wakeup_one(&act->suspend_count);
289 }
290 }
291 else
292 result = KERN_FAILURE;
293 }
294 else
295 result = KERN_TERMINATED;
296
297 act_unlock_thread(act);
298
299 return (result);
300 }
301
302 /*
303 * thread_depress_abort:
304 *
305 * Prematurely abort priority depression if there is one.
306 */
307 kern_return_t
308 thread_depress_abort(
309 register thread_act_t thr_act)
310 {
311 register thread_t thread;
312 kern_return_t result;
313
314 if (thr_act == THR_ACT_NULL)
315 return (KERN_INVALID_ARGUMENT);
316
317 thread = act_lock_thread(thr_act);
318 /* if activation is terminating, this operation is not meaningful */
319 if (!thr_act->active) {
320 act_unlock_thread(thr_act);
321
322 return (KERN_TERMINATED);
323 }
324
325 result = _mk_sp_thread_depress_abort(thread, FALSE);
326
327 act_unlock_thread(thr_act);
328
329 return (result);
330 }
331
332
333 /*
334 * Indicate that the activation should run its
335 * special handler to detect the condition.
336 *
337 * Called with act_lock held.
338 */
339 kern_return_t
340 act_abort(
341 thread_act_t act,
342 boolean_t chain_break )
343 {
344 thread_t thread = act->thread;
345 spl_t s = splsched();
346
347 assert(thread->top_act == act);
348
349 thread_lock(thread);
350 if (!(thread->state & TH_ABORT)) {
351 thread->state |= TH_ABORT;
352 install_special_handler_locked(act);
353 } else {
354 thread->state &= ~TH_ABORT_SAFELY;
355 }
356 thread_unlock(thread);
357 splx(s);
358
359 return (KERN_SUCCESS);
360 }
361
362 kern_return_t
363 thread_abort(
364 register thread_act_t act)
365 {
366 kern_return_t result;
367 thread_t thread;
368
369 if (act == THR_ACT_NULL)
370 return (KERN_INVALID_ARGUMENT);
371
372 thread = act_lock_thread(act);
373
374 if (!act->active) {
375 act_unlock_thread(act);
376 return (KERN_TERMINATED);
377 }
378
379 result = act_abort(act, FALSE);
380 clear_wait(thread, THREAD_INTERRUPTED);
381 act_unlock_thread(act);
382
383 return (result);
384 }
385
386 kern_return_t
387 thread_abort_safely(
388 thread_act_t act)
389 {
390 thread_t thread;
391 kern_return_t ret;
392 spl_t s;
393
394 if ( act == THR_ACT_NULL )
395 return (KERN_INVALID_ARGUMENT);
396
397 thread = act_lock_thread(act);
398
399 if (!act->active) {
400 act_unlock_thread(act);
401 return (KERN_TERMINATED);
402 }
403
404 s = splsched();
405 thread_lock(thread);
406 if (!thread->at_safe_point ||
407 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
408 if (!(thread->state & TH_ABORT)) {
409 thread->state |= (TH_ABORT|TH_ABORT_SAFELY);
410 install_special_handler_locked(act);
411 }
412 }
413 thread_unlock(thread);
414 splx(s);
415
416 act_unlock_thread(act);
417
418 return (KERN_SUCCESS);
419 }
420
421 /*** backward compatibility hacks ***/
422 #include <mach/thread_info.h>
423 #include <mach/thread_special_ports.h>
424 #include <ipc/ipc_port.h>
425 #include <mach/thread_act_server.h>
426
427 kern_return_t
428 thread_info(
429 thread_act_t thr_act,
430 thread_flavor_t flavor,
431 thread_info_t thread_info_out,
432 mach_msg_type_number_t *thread_info_count)
433 {
434 register thread_t thread;
435 kern_return_t result;
436
437 if (thr_act == THR_ACT_NULL)
438 return (KERN_INVALID_ARGUMENT);
439
440 thread = act_lock_thread(thr_act);
441 if (!thr_act->active) {
442 act_unlock_thread(thr_act);
443
444 return (KERN_TERMINATED);
445 }
446
447 result = thread_info_shuttle(thr_act, flavor,
448 thread_info_out, thread_info_count);
449
450 act_unlock_thread(thr_act);
451
452 return (result);
453 }
454
455 /*
456 * Routine: thread_get_special_port [kernel call]
457 * Purpose:
458 * Clones a send right for one of the thread's
459 * special ports.
460 * Conditions:
461 * Nothing locked.
462 * Returns:
463 * KERN_SUCCESS Extracted a send right.
464 * KERN_INVALID_ARGUMENT The thread is null.
465 * KERN_FAILURE The thread is dead.
466 * KERN_INVALID_ARGUMENT Invalid special port.
467 */
468
469 kern_return_t
470 thread_get_special_port(
471 thread_act_t thr_act,
472 int which,
473 ipc_port_t *portp)
474 {
475 ipc_port_t *whichp;
476 ipc_port_t port;
477 thread_t thread;
478
479 if (!thr_act)
480 return KERN_INVALID_ARGUMENT;
481 thread = act_lock_thread(thr_act);
482 switch (which) {
483 case THREAD_KERNEL_PORT:
484 whichp = &thr_act->ith_sself;
485 break;
486
487 default:
488 act_unlock_thread(thr_act);
489 return KERN_INVALID_ARGUMENT;
490 }
491
492 if (!thr_act->active) {
493 act_unlock_thread(thr_act);
494 return KERN_FAILURE;
495 }
496
497 port = ipc_port_copy_send(*whichp);
498 act_unlock_thread(thr_act);
499
500 *portp = port;
501 return KERN_SUCCESS;
502 }
503
504 /*
505 * Routine: thread_set_special_port [kernel call]
506 * Purpose:
507 * Changes one of the thread's special ports,
508 * setting it to the supplied send right.
509 * Conditions:
510 * Nothing locked. If successful, consumes
511 * the supplied send right.
512 * Returns:
513 * KERN_SUCCESS Changed the special port.
514 * KERN_INVALID_ARGUMENT The thread is null.
515 * KERN_FAILURE The thread is dead.
516 * KERN_INVALID_ARGUMENT Invalid special port.
517 */
518
519 kern_return_t
520 thread_set_special_port(
521 thread_act_t thr_act,
522 int which,
523 ipc_port_t port)
524 {
525 ipc_port_t *whichp;
526 ipc_port_t old;
527 thread_t thread;
528
529 if (thr_act == 0)
530 return KERN_INVALID_ARGUMENT;
531
532 thread = act_lock_thread(thr_act);
533 switch (which) {
534 case THREAD_KERNEL_PORT:
535 whichp = &thr_act->ith_self;
536 break;
537
538 default:
539 act_unlock_thread(thr_act);
540 return KERN_INVALID_ARGUMENT;
541 }
542
543 if (!thr_act->active) {
544 act_unlock_thread(thr_act);
545 return KERN_FAILURE;
546 }
547
548 old = *whichp;
549 *whichp = port;
550 act_unlock_thread(thr_act);
551
552 if (IP_VALID(old))
553 ipc_port_release_send(old);
554 return KERN_SUCCESS;
555 }
556
557 /*
558 * thread state should always be accessible by locking the thread
559 * and copying it. The activation messes things up so for right
560 * now if it's not the top of the chain, use a special handler to
561 * get the information when the shuttle returns to the activation.
562 */
563 kern_return_t
564 thread_get_state(
565 register thread_act_t act,
566 int flavor,
567 thread_state_t state, /* pointer to OUT array */
568 mach_msg_type_number_t *state_count) /*IN/OUT*/
569 {
570 kern_return_t result = KERN_SUCCESS;
571 thread_t thread;
572
573 if (act == THR_ACT_NULL || act == current_act())
574 return (KERN_INVALID_ARGUMENT);
575
576 thread = act_lock_thread(act);
577
578 if (!act->active) {
579 act_unlock_thread(act);
580 return (KERN_TERMINATED);
581 }
582
583 thread_hold(act);
584
585 for (;;) {
586 thread_t thread1;
587
588 if ( thread == THREAD_NULL ||
589 thread->top_act != act )
590 break;
591 act_unlock_thread(act);
592
593 if (!thread_stop(thread)) {
594 result = KERN_ABORTED;
595 (void)act_lock_thread(act);
596 thread = THREAD_NULL;
597 break;
598 }
599
600 thread1 = act_lock_thread(act);
601 if (thread1 == thread)
602 break;
603
604 thread_unstop(thread);
605 thread = thread1;
606 }
607
608 if (result == KERN_SUCCESS)
609 result = machine_thread_get_state(act, flavor, state, state_count);
610
611 if ( thread != THREAD_NULL &&
612 thread->top_act == act )
613 thread_unstop(thread);
614
615 thread_release(act);
616 act_unlock_thread(act);
617
618 return (result);
619 }
620
621 /*
622 * Change thread's machine-dependent state. Called with nothing
623 * locked. Returns same way.
624 */
625 kern_return_t
626 thread_set_state(
627 register thread_act_t act,
628 int flavor,
629 thread_state_t state,
630 mach_msg_type_number_t state_count)
631 {
632 kern_return_t result = KERN_SUCCESS;
633 thread_t thread;
634
635 if (act == THR_ACT_NULL || act == current_act())
636 return (KERN_INVALID_ARGUMENT);
637
638 thread = act_lock_thread(act);
639
640 if (!act->active) {
641 act_unlock_thread(act);
642 return (KERN_TERMINATED);
643 }
644
645 thread_hold(act);
646
647 for (;;) {
648 thread_t thread1;
649
650 if ( thread == THREAD_NULL ||
651 thread->top_act != act )
652 break;
653 act_unlock_thread(act);
654
655 if (!thread_stop(thread)) {
656 result = KERN_ABORTED;
657 (void)act_lock_thread(act);
658 thread = THREAD_NULL;
659 break;
660 }
661
662 thread1 = act_lock_thread(act);
663 if (thread1 == thread)
664 break;
665
666 thread_unstop(thread);
667 thread = thread1;
668 }
669
670 if (result == KERN_SUCCESS)
671 result = machine_thread_set_state(act, flavor, state, state_count);
672
673 if ( thread != THREAD_NULL &&
674 thread->top_act == act )
675 thread_unstop(thread);
676
677 thread_release(act);
678 act_unlock_thread(act);
679
680 return (result);
681 }
682
683 /*
684 * Kernel-internal "thread" interfaces used outside this file:
685 */
686
687 kern_return_t
688 thread_dup(
689 register thread_act_t target)
690 {
691 kern_return_t result = KERN_SUCCESS;
692 thread_act_t self = current_act();
693 thread_t thread;
694
695 if (target == THR_ACT_NULL || target == self)
696 return (KERN_INVALID_ARGUMENT);
697
698 thread = act_lock_thread(target);
699
700 if (!target->active) {
701 act_unlock_thread(target);
702 return (KERN_TERMINATED);
703 }
704
705 thread_hold(target);
706
707 for (;;) {
708 thread_t thread1;
709
710 if ( thread == THREAD_NULL ||
711 thread->top_act != target )
712 break;
713 act_unlock_thread(target);
714
715 if (!thread_stop(thread)) {
716 result = KERN_ABORTED;
717 (void)act_lock_thread(target);
718 thread = THREAD_NULL;
719 break;
720 }
721
722 thread1 = act_lock_thread(target);
723 if (thread1 == thread)
724 break;
725
726 thread_unstop(thread);
727 thread = thread1;
728 }
729
730 if (result == KERN_SUCCESS)
731 result = machine_thread_dup(self, target);
732
733 if ( thread != THREAD_NULL &&
734 thread->top_act == target )
735 thread_unstop(thread);
736
737 thread_release(target);
738 act_unlock_thread(target);
739
740 return (result);
741 }
742
743
744 /*
745 * thread_setstatus:
746 *
747 * Set the status of the specified thread.
748 * Called with (and returns with) no locks held.
749 */
750 kern_return_t
751 thread_setstatus(
752 register thread_act_t act,
753 int flavor,
754 thread_state_t tstate,
755 mach_msg_type_number_t count)
756 {
757 kern_return_t result = KERN_SUCCESS;
758 thread_t thread;
759
760 thread = act_lock_thread(act);
761
762 if ( act != current_act() &&
763 (act->suspend_count == 0 ||
764 thread == THREAD_NULL ||
765 (thread->state & TH_RUN) ||
766 thread->top_act != act) )
767 result = KERN_FAILURE;
768
769 if (result == KERN_SUCCESS)
770 result = machine_thread_set_state(act, flavor, tstate, count);
771
772 act_unlock_thread(act);
773
774 return (result);
775 }
776
777 /*
778 * thread_getstatus:
779 *
780 * Get the status of the specified thread.
781 */
782 kern_return_t
783 thread_getstatus(
784 register thread_act_t act,
785 int flavor,
786 thread_state_t tstate,
787 mach_msg_type_number_t *count)
788 {
789 kern_return_t result = KERN_SUCCESS;
790 thread_t thread;
791
792 thread = act_lock_thread(act);
793
794 if ( act != current_act() &&
795 (act->suspend_count == 0 ||
796 thread == THREAD_NULL ||
797 (thread->state & TH_RUN) ||
798 thread->top_act != act) )
799 result = KERN_FAILURE;
800
801 if (result == KERN_SUCCESS)
802 result = machine_thread_get_state(act, flavor, tstate, count);
803
804 act_unlock_thread(act);
805
806 return (result);
807 }
808
809 /*
810 * Kernel-internal thread_activation interfaces used outside this file:
811 */
812
813 void
814 act_reference(
815 thread_act_t act)
816 {
817 if (act == NULL)
818 return;
819
820 act_lock(act);
821 act_reference_locked(act);
822 act_unlock(act);
823 }
824
825 void
826 act_deallocate(
827 thread_act_t act)
828 {
829 task_t task;
830 thread_t thread;
831 void *task_proc;
832
833 if (act == NULL)
834 return;
835
836 act_lock(act);
837
838 if (--act->act_ref_count > 0) {
839 act_unlock(act);
840 return;
841 }
842
843 assert(!act->active);
844
845 thread = act->thread;
846 assert(thread != NULL);
847
848 thread->top_act = NULL;
849
850 act_unlock(act);
851
852 task = act->task;
853 task_lock(task);
854
855 task_proc = task->bsd_info;
856
857 {
858 time_value_t user_time, system_time;
859
860 thread_read_times(thread, &user_time, &system_time);
861 time_value_add(&task->total_user_time, &user_time);
862 time_value_add(&task->total_system_time, &system_time);
863
864 queue_remove(&task->threads, act, thread_act_t, task_threads);
865 act->task_threads.next = NULL;
866 task->thread_count--;
867 task->res_thread_count--;
868 }
869
870 task_unlock(task);
871
872 act_prof_deallocate(act);
873 ipc_thr_act_terminate(act);
874
875 #ifdef MACH_BSD
876 {
877 extern void uthread_free(task_t, void *, void *, void *);
878 void *ut = act->uthread;
879
880 uthread_free(task, act, ut, task_proc);
881 act->uthread = NULL;
882 }
883 #endif /* MACH_BSD */
884
885 task_deallocate(task);
886
887 thread_deallocate(thread);
888 }
889
890
891 /*
892 * act_attach - Attach an thr_act to the top of a thread ("push the stack").
893 *
894 * The thread_shuttle must be either the current one or a brand-new one.
895 * Assumes the thr_act is active but not in use.
896 *
897 * Already locked: thr_act plus "appropriate" thread-related locks
898 * (see act_lock_thread()).
899 */
900 void
901 act_attach(
902 thread_act_t act,
903 thread_t thread)
904 {
905 thread_act_t lower;
906
907 /*
908 * Chain the act onto the thread's act stack.
909 */
910 act->act_ref_count++;
911 act->thread = thread;
912 act->higher = THR_ACT_NULL;
913 lower = act->lower = thread->top_act;
914 if (lower != THR_ACT_NULL)
915 lower->higher = act;
916
917 thread->top_act = act;
918 }
919
920 /*
921 * act_detach
922 *
923 * Remove the current thr_act from the top of the current thread, i.e.
924 * "pop the stack". Assumes already locked: thr_act plus "appropriate"
925 * thread-related locks (see act_lock_thread).
926 */
927 void
928 act_detach(
929 thread_act_t cur_act)
930 {
931 thread_t cur_thread = cur_act->thread;
932
933 /* Unlink the thr_act from the thread's thr_act stack */
934 cur_thread->top_act = cur_act->lower;
935 cur_act->thread = 0;
936 cur_act->act_ref_count--;
937 assert(cur_act->act_ref_count > 0);
938
939 #if MACH_ASSERT
940 cur_act->lower = cur_act->higher = THR_ACT_NULL;
941 if (cur_thread->top_act)
942 cur_thread->top_act->higher = THR_ACT_NULL;
943 #endif /* MACH_ASSERT */
944
945 return;
946 }
947
948
949 /*
950 * Synchronize a thread operation with migration.
951 * Called with nothing locked.
952 * Returns with thr_act locked.
953 */
954 thread_t
955 act_lock_thread(
956 thread_act_t thr_act)
957 {
958
959 /*
960 * JMM - We have moved away from explicit RPC locks
961 * and towards a generic migration approach. The wait
962 * queue lock will be the point of synchronization for
963 * the shuttle linkage when this is rolled out. Until
964 * then, just lock the act.
965 */
966 act_lock(thr_act);
967 return (thr_act->thread);
968 }
969
970 /*
971 * Unsynchronize with migration (i.e., undo an act_lock_thread() call).
972 * Called with thr_act locked, plus thread locks held that are
973 * "correct" for thr_act's state. Returns with nothing locked.
974 */
975 void
976 act_unlock_thread(thread_act_t thr_act)
977 {
978 act_unlock(thr_act);
979 }
980
981 /*
982 * Synchronize with migration given a pointer to a shuttle (instead of an
983 * activation). Called with nothing locked; returns with all
984 * "appropriate" thread-related locks held (see act_lock_thread()).
985 */
986 thread_act_t
987 thread_lock_act(
988 thread_t thread)
989 {
990 thread_act_t thr_act;
991
992 while (1) {
993 thr_act = thread->top_act;
994 if (!thr_act)
995 break;
996 if (!act_lock_try(thr_act)) {
997 mutex_pause();
998 continue;
999 }
1000 break;
1001 }
1002 return (thr_act);
1003 }
1004
1005 /*
1006 * Unsynchronize with an activation starting from a pointer to
1007 * a shuttle.
1008 */
1009 void
1010 thread_unlock_act(
1011 thread_t thread)
1012 {
1013 thread_act_t thr_act;
1014
1015 if (thr_act = thread->top_act) {
1016 act_unlock(thr_act);
1017 }
1018 }
1019
1020 /*
1021 * switch_act
1022 *
1023 * If a new activation is given, switch to it. If not,
1024 * switch to the lower activation (pop). Returns the old
1025 * activation. This is for migration support.
1026 */
1027 thread_act_t
1028 switch_act(
1029 thread_act_t act)
1030 {
1031 thread_act_t old, new;
1032 thread_t thread;
1033
1034 disable_preemption();
1035
1036 thread = current_thread();
1037
1038 /*
1039 * Find the old and new activation for switch.
1040 */
1041 old = thread->top_act;
1042
1043 if (act) {
1044 new = act;
1045 new->thread = thread;
1046 }
1047 else {
1048 new = old->lower;
1049 }
1050
1051 assert(new != THR_ACT_NULL);
1052 assert(current_processor()->active_thread == thread);
1053
1054 /* This is where all the work happens */
1055 machine_switch_act(thread, old, new);
1056
1057 /*
1058 * Push or pop an activation on the chain.
1059 */
1060 if (act) {
1061 act_attach(new, thread);
1062 }
1063 else {
1064 act_detach(old);
1065 }
1066
1067 enable_preemption();
1068
1069 return(old);
1070 }
1071
1072 /*
1073 * install_special_handler
1074 * Install the special returnhandler that handles suspension and
1075 * termination, if it hasn't been installed already.
1076 *
1077 * Already locked: RPC-related locks for thr_act, but not
1078 * scheduling lock (thread_lock()) of the associated thread.
1079 */
1080 void
1081 install_special_handler(
1082 thread_act_t thr_act)
1083 {
1084 spl_t spl;
1085 thread_t thread = thr_act->thread;
1086
1087 spl = splsched();
1088 thread_lock(thread);
1089 install_special_handler_locked(thr_act);
1090 thread_unlock(thread);
1091 splx(spl);
1092 }
1093
1094 /*
1095 * install_special_handler_locked
1096 * Do the work of installing the special_handler.
1097 *
1098 * Already locked: RPC-related locks for thr_act, plus the
1099 * scheduling lock (thread_lock()) of the associated thread.
1100 */
1101 void
1102 install_special_handler_locked(
1103 thread_act_t act)
1104 {
1105 thread_t thread = act->thread;
1106 ReturnHandler **rh;
1107
1108 /* The work handler must always be the last ReturnHandler on the list,
1109 because it can do tricky things like detach the thr_act. */
1110 for (rh = &act->handlers; *rh; rh = &(*rh)->next)
1111 continue;
1112 if (rh != &act->special_handler.next)
1113 *rh = &act->special_handler;
1114
1115 if (act == thread->top_act) {
1116 /*
1117 * Temporarily undepress, so target has
1118 * a chance to do locking required to
1119 * block itself in special_handler().
1120 */
1121 if (thread->sched_mode & TH_MODE_ISDEPRESSED)
1122 compute_priority(thread, TRUE);
1123 }
1124
1125 thread_ast_set(act, AST_APC);
1126 if (act == current_act())
1127 ast_propagate(act->ast);
1128 else {
1129 processor_t processor = thread->last_processor;
1130
1131 if ( processor != PROCESSOR_NULL &&
1132 processor->state == PROCESSOR_RUNNING &&
1133 processor->active_thread == thread )
1134 cause_ast_check(processor);
1135 }
1136 }
1137
1138 kern_return_t
1139 thread_apc_set(
1140 thread_act_t act,
1141 thread_apc_handler_t apc)
1142 {
1143 extern thread_apc_handler_t bsd_ast;
1144
1145 assert(apc == bsd_ast);
1146 return (KERN_FAILURE);
1147 }
1148
1149 kern_return_t
1150 thread_apc_clear(
1151 thread_act_t act,
1152 thread_apc_handler_t apc)
1153 {
1154 extern thread_apc_handler_t bsd_ast;
1155
1156 assert(apc == bsd_ast);
1157 return (KERN_FAILURE);
1158 }
1159
1160 /*
1161 * Activation control support routines internal to this file:
1162 */
1163
1164 /*
1165 * act_execute_returnhandlers() - does just what the name says
1166 *
1167 * This is called by system-dependent code when it detects that
1168 * thr_act->handlers is non-null while returning into user mode.
1169 */
1170 void
1171 act_execute_returnhandlers(void)
1172 {
1173 thread_act_t act = current_act();
1174
1175 thread_ast_clear(act, AST_APC);
1176 spllo();
1177
1178 for (;;) {
1179 ReturnHandler *rh;
1180 thread_t thread = act_lock_thread(act);
1181
1182 (void)splsched();
1183 thread_lock(thread);
1184 rh = act->handlers;
1185 if (!rh) {
1186 thread_unlock(thread);
1187 spllo();
1188 act_unlock_thread(act);
1189 return;
1190 }
1191 act->handlers = rh->next;
1192 thread_unlock(thread);
1193 spllo();
1194 act_unlock_thread(act);
1195
1196 /* Execute it */
1197 (*rh->handler)(rh, act);
1198 }
1199 }
1200
1201 /*
1202 * special_handler_continue
1203 *
1204 * Continuation routine for the special handler blocks. It checks
1205 * to see whether there has been any new suspensions. If so, it
1206 * installs the special handler again. Otherwise, it checks to see
1207 * if the current depression needs to be re-instated (it may have
1208 * been temporarily removed in order to get to this point in a hurry).
1209 */
1210 void
1211 special_handler_continue(void)
1212 {
1213 thread_act_t self = current_act();
1214
1215 if (self->suspend_count > 0)
1216 install_special_handler(self);
1217 else {
1218 thread_t thread = self->thread;
1219 spl_t s = splsched();
1220
1221 thread_lock(thread);
1222 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
1223 processor_t myprocessor = thread->last_processor;
1224
1225 thread->sched_pri = DEPRESSPRI;
1226 myprocessor->current_pri = thread->sched_pri;
1227 thread->sched_mode &= ~TH_MODE_PREEMPT;
1228 }
1229 thread_unlock(thread);
1230 splx(s);
1231 }
1232
1233 thread_exception_return();
1234 /*NOTREACHED*/
1235 }
1236
1237 /*
1238 * special_handler - handles suspension, termination. Called
1239 * with nothing locked. Returns (if it returns) the same way.
1240 */
1241 void
1242 special_handler(
1243 ReturnHandler *rh,
1244 thread_act_t self)
1245 {
1246 thread_t thread = act_lock_thread(self);
1247 spl_t s;
1248
1249 assert(thread != THREAD_NULL);
1250
1251 s = splsched();
1252 thread_lock(thread);
1253 thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY); /* clear any aborts */
1254 thread_unlock(thread);
1255 splx(s);
1256
1257 if (!self->active) {
1258 act_unlock_thread(self);
1259 thread_terminate_self();
1260 /*NOTREACHED*/
1261 }
1262
1263 /*
1264 * If we're suspended, go to sleep and wait for someone to wake us up.
1265 */
1266 if (self->suspend_count > 0) {
1267 if (self->handlers == NULL) {
1268 assert_wait(&self->suspend_count, THREAD_ABORTSAFE);
1269 act_unlock_thread(self);
1270 thread_block(special_handler_continue);
1271 /*NOTREACHED*/
1272 }
1273
1274 act_unlock_thread(self);
1275
1276 special_handler_continue();
1277 /*NOTREACHED*/
1278 }
1279
1280 act_unlock_thread(self);
1281 }
1282
1283 /*
1284 * Already locked: activation (shuttle frozen within)
1285 *
1286 * Mark an activation inactive, and prepare it to terminate
1287 * itself.
1288 */
1289 static void
1290 act_disable(
1291 thread_act_t thr_act)
1292 {
1293 thr_act->active = 0;
1294
1295 /* Drop the thr_act reference taken for being active.
1296 * (There is still at least one reference left:
1297 * the one we were passed.)
1298 * Inline the deallocate because thr_act is locked.
1299 */
1300 act_deallocate_locked(thr_act);
1301 }
1302
1303 typedef struct GetSetState {
1304 struct ReturnHandler rh;
1305 int flavor;
1306 void *state;
1307 int *pcount;
1308 int result;
1309 } GetSetState;
1310
1311 /* Local Forward decls */
1312 kern_return_t get_set_state(
1313 thread_act_t thr_act, int flavor,
1314 thread_state_t state, int *pcount,
1315 void (*handler)(ReturnHandler *rh, thread_act_t thr_act));
1316 void get_state_handler(ReturnHandler *rh, thread_act_t thr_act);
1317 void set_state_handler(ReturnHandler *rh, thread_act_t thr_act);
1318
1319 /*
1320 * get_set_state(thr_act ...)
1321 *
1322 * General code to install g/set_state handler.
1323 * Called with thr_act's act_lock() and "appropriate"
1324 * thread-related locks held. (See act_lock_thread().)
1325 */
1326 kern_return_t
1327 get_set_state(
1328 thread_act_t act,
1329 int flavor,
1330 thread_state_t state,
1331 int *pcount,
1332 void (*handler)(
1333 ReturnHandler *rh,
1334 thread_act_t act))
1335 {
1336 GetSetState gss;
1337
1338 /* Initialize a small parameter structure */
1339 gss.rh.handler = handler;
1340 gss.flavor = flavor;
1341 gss.state = state;
1342 gss.pcount = pcount;
1343 gss.result = KERN_ABORTED; /* iff wait below is interrupted */
1344
1345 /* Add it to the thr_act's return handler list */
1346 gss.rh.next = act->handlers;
1347 act->handlers = &gss.rh;
1348
1349 act_set_apc(act);
1350
1351 assert(act->thread);
1352 assert(act != current_act());
1353
1354 for (;;) {
1355 wait_result_t result;
1356
1357 if ( act->started &&
1358 act->thread->top_act == act )
1359 thread_wakeup_one(&act->suspend_count);
1360
1361 /*
1362 * Wait must be interruptible to avoid deadlock (e.g.) with
1363 * task_suspend() when caller and target of get_set_state()
1364 * are in same task.
1365 */
1366 result = assert_wait(&gss, THREAD_ABORTSAFE);
1367 act_unlock_thread(act);
1368
1369 if (result == THREAD_WAITING)
1370 result = thread_block(THREAD_CONTINUE_NULL);
1371
1372 assert(result != THREAD_WAITING);
1373
1374 if (gss.result != KERN_ABORTED) {
1375 assert(result != THREAD_INTERRUPTED);
1376 break;
1377 }
1378
1379 /* JMM - What about other aborts (like BSD signals)? */
1380 if (current_act()->handlers)
1381 act_execute_returnhandlers();
1382
1383 act_lock_thread(act);
1384 }
1385
1386 return (gss.result);
1387 }
1388
1389 void
1390 set_state_handler(ReturnHandler *rh, thread_act_t thr_act)
1391 {
1392 GetSetState *gss = (GetSetState*)rh;
1393
1394 gss->result = machine_thread_set_state(thr_act, gss->flavor,
1395 gss->state, *gss->pcount);
1396 thread_wakeup((event_t)gss);
1397 }
1398
1399 void
1400 get_state_handler(ReturnHandler *rh, thread_act_t thr_act)
1401 {
1402 GetSetState *gss = (GetSetState*)rh;
1403
1404 gss->result = machine_thread_get_state(thr_act, gss->flavor,
1405 gss->state,
1406 (mach_msg_type_number_t *) gss->pcount);
1407 thread_wakeup((event_t)gss);
1408 }
1409
1410 kern_return_t
1411 act_get_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
1412 mach_msg_type_number_t *pcount)
1413 {
1414 return(get_set_state(thr_act, flavor, state, (int*)pcount, get_state_handler));
1415 }
1416
1417 kern_return_t
1418 act_set_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
1419 mach_msg_type_number_t count)
1420 {
1421 return(get_set_state(thr_act, flavor, state, (int*)&count, set_state_handler));
1422 }
1423
1424 kern_return_t
1425 act_set_state(thread_act_t thr_act, int flavor, thread_state_t state,
1426 mach_msg_type_number_t count)
1427 {
1428 if (thr_act == THR_ACT_NULL || thr_act == current_act())
1429 return(KERN_INVALID_ARGUMENT);
1430
1431 act_lock_thread(thr_act);
1432 return(act_set_state_locked(thr_act, flavor, state, count));
1433
1434 }
1435
1436 kern_return_t
1437 act_get_state(thread_act_t thr_act, int flavor, thread_state_t state,
1438 mach_msg_type_number_t *pcount)
1439 {
1440 if (thr_act == THR_ACT_NULL || thr_act == current_act())
1441 return(KERN_INVALID_ARGUMENT);
1442
1443 act_lock_thread(thr_act);
1444 return(act_get_state_locked(thr_act, flavor, state, pcount));
1445 }
1446
1447 void
1448 act_set_astbsd(
1449 thread_act_t act)
1450 {
1451 spl_t s = splsched();
1452
1453 if (act == current_act()) {
1454 thread_ast_set(act, AST_BSD);
1455 ast_propagate(act->ast);
1456 }
1457 else {
1458 thread_t thread = act->thread;
1459 processor_t processor;
1460
1461 thread_lock(thread);
1462 thread_ast_set(act, AST_BSD);
1463 processor = thread->last_processor;
1464 if ( processor != PROCESSOR_NULL &&
1465 processor->state == PROCESSOR_RUNNING &&
1466 processor->active_thread == thread )
1467 cause_ast_check(processor);
1468 thread_unlock(thread);
1469 }
1470
1471 splx(s);
1472 }
1473
1474 void
1475 act_set_apc(
1476 thread_act_t act)
1477 {
1478 spl_t s = splsched();
1479
1480 if (act == current_act()) {
1481 thread_ast_set(act, AST_APC);
1482 ast_propagate(act->ast);
1483 }
1484 else {
1485 thread_t thread = act->thread;
1486 processor_t processor;
1487
1488 thread_lock(thread);
1489 thread_ast_set(act, AST_APC);
1490 processor = thread->last_processor;
1491 if ( processor != PROCESSOR_NULL &&
1492 processor->state == PROCESSOR_RUNNING &&
1493 processor->active_thread == thread )
1494 cause_ast_check(processor);
1495 thread_unlock(thread);
1496 }
1497
1498 splx(s);
1499 }
1500
1501 void
1502 act_ulock_release_all(thread_act_t thr_act)
1503 {
1504 ulock_t ulock;
1505
1506 while (!queue_empty(&thr_act->held_ulocks)) {
1507 ulock = (ulock_t) queue_first(&thr_act->held_ulocks);
1508 (void) lock_make_unstable(ulock, thr_act);
1509 (void) lock_release_internal(ulock, thr_act);
1510 }
1511 }
1512
1513 /*
1514 * Provide routines (for export to other components) of things that
1515 * are implemented as macros insternally.
1516 */
1517 thread_act_t
1518 thread_self(void)
1519 {
1520 thread_act_t self = current_act_fast();
1521
1522 act_reference(self);
1523 return self;
1524 }
1525
1526 thread_act_t
1527 mach_thread_self(void)
1528 {
1529 thread_act_t self = current_act_fast();
1530
1531 act_reference(self);
1532 return self;
1533 }