]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
f9389b588bf60f2b1077d9ff31015cd3c1f7a39f
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_FREE_COPYRIGHT@
25 */
26 /*
27 * Copyright (c) 1993 The University of Utah and
28 * the Center for Software Science (CSS). All rights reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
37 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
38 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * CSS requests users of this software to return to css-dist@cs.utah.edu any
41 * improvements that they make and grant CSS redistribution rights.
42 *
43 * Author: Bryan Ford, University of Utah CSS
44 *
45 * Thread management routines
46 */
47 #include <mach/mach_types.h>
48 #include <mach/kern_return.h>
49 #include <mach/alert.h>
50 #include <mach_prof.h>
51 #include <mach/rpc.h>
52 #include <mach/thread_act_server.h>
53
54 #include <kern/kern_types.h>
55 #include <kern/ast.h>
56 #include <kern/mach_param.h>
57 #include <kern/zalloc.h>
58 #include <kern/thread.h>
59 #include <kern/task.h>
60 #include <kern/sched_prim.h>
61 #include <kern/misc_protos.h>
62 #include <kern/assert.h>
63 #include <kern/exception.h>
64 #include <kern/ipc_mig.h>
65 #include <kern/ipc_tt.h>
66 #include <kern/profile.h>
67 #include <kern/machine.h>
68 #include <kern/spl.h>
69 #include <kern/syscall_subr.h>
70 #include <kern/sync_lock.h>
71 #include <kern/processor.h>
72 #include <kern/timer.h>
73 #include <mach_prof.h>
74 #include <mach/rpc.h>
75
76 void act_abort(thread_t);
77 void act_set_apc(thread_t);
78 void install_special_handler_locked(thread_t);
79 void special_handler_continue(void);
80
81 /*
82 * Internal routine to terminate a thread.
83 * Sometimes called with task already locked.
84 */
85 kern_return_t
86 thread_terminate_internal(
87 thread_t thread)
88 {
89 kern_return_t result = KERN_SUCCESS;
90
91 thread_mtx_lock(thread);
92
93 if (thread->active) {
94 thread->active = FALSE;
95
96 act_abort(thread);
97
98 if (thread->started)
99 clear_wait(thread, THREAD_INTERRUPTED);
100 else {
101 clear_wait(thread, THREAD_AWAKENED);
102 thread->started = TRUE;
103 }
104 }
105 else
106 result = KERN_TERMINATED;
107
108 thread_mtx_unlock(thread);
109
110 if (thread != current_thread() && result == KERN_SUCCESS)
111 thread_wait(thread);
112
113 return (result);
114 }
115
116 /*
117 * Terminate a thread.
118 */
119 kern_return_t
120 thread_terminate(
121 thread_t thread)
122 {
123 kern_return_t result;
124
125 if (thread == THREAD_NULL)
126 return (KERN_INVALID_ARGUMENT);
127
128 if ( thread->task == kernel_task &&
129 thread != current_thread() )
130 return (KERN_FAILURE);
131
132 result = thread_terminate_internal(thread);
133
134 /*
135 * If a kernel thread is terminating itself, force an AST here.
136 * Kernel threads don't normally pass through the AST checking
137 * code - and all threads finish their own termination in the
138 * special handler APC.
139 */
140 if (thread->task == kernel_task) {
141 ml_set_interrupts_enabled(FALSE);
142 ast_taken(AST_APC, TRUE);
143 panic("thread_terminate");
144 }
145
146 return (result);
147 }
148
149 /*
150 * Suspend execution of the specified thread.
151 * This is a recursive-style suspension of the thread, a count of
152 * suspends is maintained.
153 *
154 * Called with thread mutex held.
155 */
156 void
157 thread_hold(
158 register thread_t thread)
159 {
160 if (thread->suspend_count++ == 0) {
161 install_special_handler(thread);
162 if (thread->started)
163 thread_wakeup_one(&thread->suspend_count);
164 }
165 }
166
167 /*
168 * Decrement internal suspension count, setting thread
169 * runnable when count falls to zero.
170 *
171 * Called with thread mutex held.
172 */
173 void
174 thread_release(
175 register thread_t thread)
176 {
177 if ( thread->suspend_count > 0 &&
178 --thread->suspend_count == 0 ) {
179 if (thread->started)
180 thread_wakeup_one(&thread->suspend_count);
181 else {
182 clear_wait(thread, THREAD_AWAKENED);
183 thread->started = TRUE;
184 }
185 }
186 }
187
188 kern_return_t
189 thread_suspend(
190 register thread_t thread)
191 {
192 thread_t self = current_thread();
193 kern_return_t result = KERN_SUCCESS;
194
195 if (thread == THREAD_NULL || thread->task == kernel_task)
196 return (KERN_INVALID_ARGUMENT);
197
198 thread_mtx_lock(thread);
199
200 if (thread->active) {
201 if ( thread->user_stop_count++ == 0 &&
202 thread->suspend_count++ == 0 ) {
203 install_special_handler(thread);
204 if (thread != self)
205 thread_wakeup_one(&thread->suspend_count);
206 }
207 }
208 else
209 result = KERN_TERMINATED;
210
211 thread_mtx_unlock(thread);
212
213 if (thread != self && result == KERN_SUCCESS)
214 thread_wait(thread);
215
216 return (result);
217 }
218
219 kern_return_t
220 thread_resume(
221 register thread_t thread)
222 {
223 kern_return_t result = KERN_SUCCESS;
224
225 if (thread == THREAD_NULL || thread->task == kernel_task)
226 return (KERN_INVALID_ARGUMENT);
227
228 thread_mtx_lock(thread);
229
230 if (thread->active) {
231 if (thread->user_stop_count > 0) {
232 if ( --thread->user_stop_count == 0 &&
233 --thread->suspend_count == 0 ) {
234 if (thread->started)
235 thread_wakeup_one(&thread->suspend_count);
236 else {
237 clear_wait(thread, THREAD_AWAKENED);
238 thread->started = TRUE;
239 }
240 }
241 }
242 else
243 result = KERN_FAILURE;
244 }
245 else
246 result = KERN_TERMINATED;
247
248 thread_mtx_unlock(thread);
249
250 return (result);
251 }
252
253 /*
254 * thread_depress_abort:
255 *
256 * Prematurely abort priority depression if there is one.
257 */
258 kern_return_t
259 thread_depress_abort(
260 register thread_t thread)
261 {
262 kern_return_t result;
263
264 if (thread == THREAD_NULL)
265 return (KERN_INVALID_ARGUMENT);
266
267 thread_mtx_lock(thread);
268
269 if (thread->active)
270 result = thread_depress_abort_internal(thread);
271 else
272 result = KERN_TERMINATED;
273
274 thread_mtx_unlock(thread);
275
276 return (result);
277 }
278
279
280 /*
281 * Indicate that the activation should run its
282 * special handler to detect a condition.
283 *
284 * Called with thread mutex held.
285 */
286 void
287 act_abort(
288 thread_t thread)
289 {
290 spl_t s = splsched();
291
292 thread_lock(thread);
293
294 if (!(thread->state & TH_ABORT)) {
295 thread->state |= TH_ABORT;
296 install_special_handler_locked(thread);
297 }
298 else
299 thread->state &= ~TH_ABORT_SAFELY;
300
301 thread_unlock(thread);
302 splx(s);
303 }
304
305 kern_return_t
306 thread_abort(
307 register thread_t thread)
308 {
309 kern_return_t result = KERN_SUCCESS;
310
311 if (thread == THREAD_NULL)
312 return (KERN_INVALID_ARGUMENT);
313
314 thread_mtx_lock(thread);
315
316 if (thread->active) {
317 act_abort(thread);
318 clear_wait(thread, THREAD_INTERRUPTED);
319 }
320 else
321 result = KERN_TERMINATED;
322
323 thread_mtx_unlock(thread);
324
325 return (result);
326 }
327
328 kern_return_t
329 thread_abort_safely(
330 thread_t thread)
331 {
332 kern_return_t result = KERN_SUCCESS;
333
334 if (thread == THREAD_NULL)
335 return (KERN_INVALID_ARGUMENT);
336
337 thread_mtx_lock(thread);
338
339 if (thread->active) {
340 spl_t s = splsched();
341
342 thread_lock(thread);
343 if (!thread->at_safe_point ||
344 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
345 if (!(thread->state & TH_ABORT)) {
346 thread->state |= (TH_ABORT|TH_ABORT_SAFELY);
347 install_special_handler_locked(thread);
348 }
349 }
350 thread_unlock(thread);
351 splx(s);
352 }
353 else
354 result = KERN_TERMINATED;
355
356 thread_mtx_unlock(thread);
357
358 return (result);
359 }
360
361 /*** backward compatibility hacks ***/
362 #include <mach/thread_info.h>
363 #include <mach/thread_special_ports.h>
364 #include <ipc/ipc_port.h>
365
366 kern_return_t
367 thread_info(
368 thread_t thread,
369 thread_flavor_t flavor,
370 thread_info_t thread_info_out,
371 mach_msg_type_number_t *thread_info_count)
372 {
373 kern_return_t result;
374
375 if (thread == THREAD_NULL)
376 return (KERN_INVALID_ARGUMENT);
377
378 thread_mtx_lock(thread);
379
380 if (thread->active)
381 result = thread_info_internal(
382 thread, flavor, thread_info_out, thread_info_count);
383 else
384 result = KERN_TERMINATED;
385
386 thread_mtx_unlock(thread);
387
388 return (result);
389 }
390
391 kern_return_t
392 thread_get_state(
393 register thread_t thread,
394 int flavor,
395 thread_state_t state, /* pointer to OUT array */
396 mach_msg_type_number_t *state_count) /*IN/OUT*/
397 {
398 kern_return_t result = KERN_SUCCESS;
399
400 if (thread == THREAD_NULL)
401 return (KERN_INVALID_ARGUMENT);
402
403 thread_mtx_lock(thread);
404
405 if (thread->active) {
406 if (thread != current_thread()) {
407 thread_hold(thread);
408
409 thread_mtx_unlock(thread);
410
411 if (thread_stop(thread)) {
412 thread_mtx_lock(thread);
413 result = machine_thread_get_state(
414 thread, flavor, state, state_count);
415 thread_unstop(thread);
416 }
417 else {
418 thread_mtx_lock(thread);
419 result = KERN_ABORTED;
420 }
421
422 thread_release(thread);
423 }
424 else
425 result = machine_thread_get_state(
426 thread, flavor, state, state_count);
427 }
428 else
429 result = KERN_TERMINATED;
430
431 thread_mtx_unlock(thread);
432
433 return (result);
434 }
435
436 /*
437 * Change thread's machine-dependent state. Called with nothing
438 * locked. Returns same way.
439 */
440 kern_return_t
441 thread_set_state(
442 register thread_t thread,
443 int flavor,
444 thread_state_t state,
445 mach_msg_type_number_t state_count)
446 {
447 kern_return_t result = KERN_SUCCESS;
448
449 if (thread == THREAD_NULL)
450 return (KERN_INVALID_ARGUMENT);
451
452 thread_mtx_lock(thread);
453
454 if (thread->active) {
455 if (thread != current_thread()) {
456 thread_hold(thread);
457
458 thread_mtx_unlock(thread);
459
460 if (thread_stop(thread)) {
461 thread_mtx_lock(thread);
462 result = machine_thread_set_state(
463 thread, flavor, state, state_count);
464 thread_unstop(thread);
465 }
466 else {
467 thread_mtx_lock(thread);
468 result = KERN_ABORTED;
469 }
470
471 thread_release(thread);
472 }
473 else
474 result = machine_thread_set_state(
475 thread, flavor, state, state_count);
476 }
477 else
478 result = KERN_TERMINATED;
479
480 thread_mtx_unlock(thread);
481
482 return (result);
483 }
484
485
486 /*
487 * Kernel-internal "thread" interfaces used outside this file:
488 */
489
490 /* Initialize (or re-initialize) a thread state. Called from execve
491 * with nothing locked, returns same way.
492 */
493 kern_return_t
494 thread_state_initialize(
495 register thread_t thread)
496 {
497 kern_return_t result = KERN_SUCCESS;
498
499 if (thread == THREAD_NULL)
500 return (KERN_INVALID_ARGUMENT);
501
502 thread_mtx_lock(thread);
503
504 if (thread->active) {
505 if (thread != current_thread()) {
506 thread_hold(thread);
507
508 thread_mtx_unlock(thread);
509
510 if (thread_stop(thread)) {
511 thread_mtx_lock(thread);
512 result = machine_thread_state_initialize( thread );
513 thread_unstop(thread);
514 }
515 else {
516 thread_mtx_lock(thread);
517 result = KERN_ABORTED;
518 }
519
520 thread_release(thread);
521 }
522 else
523 result = machine_thread_state_initialize( thread );
524 }
525 else
526 result = KERN_TERMINATED;
527
528 thread_mtx_unlock(thread);
529
530 return (result);
531 }
532
533
534 kern_return_t
535 thread_dup(
536 register thread_t target)
537 {
538 thread_t self = current_thread();
539 kern_return_t result = KERN_SUCCESS;
540
541 if (target == THREAD_NULL || target == self)
542 return (KERN_INVALID_ARGUMENT);
543
544 thread_mtx_lock(target);
545
546 if (target->active) {
547 thread_hold(target);
548
549 thread_mtx_unlock(target);
550
551 if (thread_stop(target)) {
552 thread_mtx_lock(target);
553 result = machine_thread_dup(self, target);
554 thread_unstop(target);
555 }
556 else {
557 thread_mtx_lock(target);
558 result = KERN_ABORTED;
559 }
560
561 thread_release(target);
562 }
563 else
564 result = KERN_TERMINATED;
565
566 thread_mtx_unlock(target);
567
568 return (result);
569 }
570
571
572 /*
573 * thread_setstatus:
574 *
575 * Set the status of the specified thread.
576 * Called with (and returns with) no locks held.
577 */
578 kern_return_t
579 thread_setstatus(
580 register thread_t thread,
581 int flavor,
582 thread_state_t tstate,
583 mach_msg_type_number_t count)
584 {
585
586 return (thread_set_state(thread, flavor, tstate, count));
587 }
588
589 /*
590 * thread_getstatus:
591 *
592 * Get the status of the specified thread.
593 */
594 kern_return_t
595 thread_getstatus(
596 register thread_t thread,
597 int flavor,
598 thread_state_t tstate,
599 mach_msg_type_number_t *count)
600 {
601 return (thread_get_state(thread, flavor, tstate, count));
602 }
603
604 /*
605 * install_special_handler:
606 *
607 * Install the special returnhandler that handles suspension and
608 * termination, if it hasn't been installed already.
609 *
610 * Called with the thread mutex held.
611 */
612 void
613 install_special_handler(
614 thread_t thread)
615 {
616 spl_t s = splsched();
617
618 thread_lock(thread);
619 install_special_handler_locked(thread);
620 thread_unlock(thread);
621 splx(s);
622 }
623
624 /*
625 * install_special_handler_locked:
626 *
627 * Do the work of installing the special_handler.
628 *
629 * Called with the thread mutex and scheduling lock held.
630 */
631 void
632 install_special_handler_locked(
633 thread_t thread)
634 {
635 ReturnHandler **rh;
636
637 /* The work handler must always be the last ReturnHandler on the list,
638 because it can do tricky things like detach the thr_act. */
639 for (rh = &thread->handlers; *rh; rh = &(*rh)->next)
640 continue;
641
642 if (rh != &thread->special_handler.next)
643 *rh = &thread->special_handler;
644
645 /*
646 * Temporarily undepress, so target has
647 * a chance to do locking required to
648 * block itself in special_handler().
649 */
650 if (thread->sched_mode & TH_MODE_ISDEPRESSED)
651 compute_priority(thread, TRUE);
652
653 thread_ast_set(thread, AST_APC);
654
655 if (thread == current_thread())
656 ast_propagate(thread->ast);
657 else {
658 processor_t processor = thread->last_processor;
659
660 if ( processor != PROCESSOR_NULL &&
661 processor->state == PROCESSOR_RUNNING &&
662 processor->active_thread == thread )
663 cause_ast_check(processor);
664 }
665 }
666
667 /*
668 * Activation control support routines internal to this file:
669 */
670
671 void
672 act_execute_returnhandlers(void)
673 {
674 thread_t thread = current_thread();
675
676 thread_ast_clear(thread, AST_APC);
677 spllo();
678
679 for (;;) {
680 ReturnHandler *rh;
681
682 thread_mtx_lock(thread);
683
684 (void)splsched();
685 thread_lock(thread);
686
687 rh = thread->handlers;
688 if (rh != NULL) {
689 thread->handlers = rh->next;
690
691 thread_unlock(thread);
692 spllo();
693
694 thread_mtx_unlock(thread);
695
696 /* Execute it */
697 (*rh->handler)(rh, thread);
698 }
699 else
700 break;
701 }
702
703 thread_unlock(thread);
704 spllo();
705
706 thread_mtx_unlock(thread);
707 }
708
709 /*
710 * special_handler_continue
711 *
712 * Continuation routine for the special handler blocks. It checks
713 * to see whether there has been any new suspensions. If so, it
714 * installs the special handler again. Otherwise, it checks to see
715 * if the current depression needs to be re-instated (it may have
716 * been temporarily removed in order to get to this point in a hurry).
717 */
718 void
719 special_handler_continue(void)
720 {
721 thread_t thread = current_thread();
722
723 thread_mtx_lock(thread);
724
725 if (thread->suspend_count > 0)
726 install_special_handler(thread);
727 else {
728 spl_t s = splsched();
729
730 thread_lock(thread);
731 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
732 processor_t myprocessor = thread->last_processor;
733
734 thread->sched_pri = DEPRESSPRI;
735 myprocessor->current_pri = thread->sched_pri;
736 thread->sched_mode &= ~TH_MODE_PREEMPT;
737 }
738 thread_unlock(thread);
739 splx(s);
740 }
741
742 thread_mtx_unlock(thread);
743
744 thread_exception_return();
745 /*NOTREACHED*/
746 }
747
748 /*
749 * special_handler - handles suspension, termination. Called
750 * with nothing locked. Returns (if it returns) the same way.
751 */
752 void
753 special_handler(
754 __unused ReturnHandler *rh,
755 thread_t thread)
756 {
757 spl_t s;
758
759 thread_mtx_lock(thread);
760
761 s = splsched();
762 thread_lock(thread);
763 thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY); /* clear any aborts */
764 thread_unlock(thread);
765 splx(s);
766
767 /*
768 * If we're suspended, go to sleep and wait for someone to wake us up.
769 */
770 if (thread->active) {
771 if (thread->suspend_count > 0) {
772 if (thread->handlers == NULL) {
773 assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
774 thread_mtx_unlock(thread);
775 thread_block((thread_continue_t)special_handler_continue);
776 /*NOTREACHED*/
777 }
778
779 thread_mtx_unlock(thread);
780
781 special_handler_continue();
782 /*NOTREACHED*/
783 }
784 }
785 else {
786 thread_mtx_unlock(thread);
787
788 thread_terminate_self();
789 /*NOTREACHED*/
790 }
791
792 thread_mtx_unlock(thread);
793 }
794
795 kern_return_t
796 act_set_state(
797 thread_t thread,
798 int flavor,
799 thread_state_t state,
800 mach_msg_type_number_t count)
801 {
802 if (thread == current_thread())
803 return (KERN_INVALID_ARGUMENT);
804
805 return (thread_set_state(thread, flavor, state, count));
806
807 }
808
809 kern_return_t
810 act_get_state(
811 thread_t thread,
812 int flavor,
813 thread_state_t state,
814 mach_msg_type_number_t *count)
815 {
816 if (thread == current_thread())
817 return (KERN_INVALID_ARGUMENT);
818
819 return (thread_get_state(thread, flavor, state, count));
820 }
821
822 void
823 act_set_astbsd(
824 thread_t thread)
825 {
826 spl_t s = splsched();
827
828 if (thread == current_thread()) {
829 thread_ast_set(thread, AST_BSD);
830 ast_propagate(thread->ast);
831 }
832 else {
833 processor_t processor;
834
835 thread_lock(thread);
836 thread_ast_set(thread, AST_BSD);
837 processor = thread->last_processor;
838 if ( processor != PROCESSOR_NULL &&
839 processor->state == PROCESSOR_RUNNING &&
840 processor->active_thread == thread )
841 cause_ast_check(processor);
842 thread_unlock(thread);
843 }
844
845 splx(s);
846 }
847
848 void
849 act_set_apc(
850 thread_t thread)
851 {
852 spl_t s = splsched();
853
854 if (thread == current_thread()) {
855 thread_ast_set(thread, AST_APC);
856 ast_propagate(thread->ast);
857 }
858 else {
859 processor_t processor;
860
861 thread_lock(thread);
862 thread_ast_set(thread, AST_APC);
863 processor = thread->last_processor;
864 if ( processor != PROCESSOR_NULL &&
865 processor->state == PROCESSOR_RUNNING &&
866 processor->active_thread == thread )
867 cause_ast_check(processor);
868 thread_unlock(thread);
869 }
870
871 splx(s);
872 }