]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/thread_act.c
xnu-1504.7.4.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52#include <mach/mach_types.h>
53#include <mach/kern_return.h>
54#include <mach/alert.h>
55#include <mach/rpc.h>
56#include <mach/thread_act_server.h>
57
58#include <kern/kern_types.h>
59#include <kern/ast.h>
60#include <kern/mach_param.h>
61#include <kern/zalloc.h>
62#include <kern/thread.h>
63#include <kern/task.h>
64#include <kern/sched_prim.h>
65#include <kern/misc_protos.h>
66#include <kern/assert.h>
67#include <kern/exception.h>
68#include <kern/ipc_mig.h>
69#include <kern/ipc_tt.h>
70#include <kern/machine.h>
71#include <kern/spl.h>
72#include <kern/syscall_subr.h>
73#include <kern/sync_lock.h>
74#include <kern/processor.h>
75#include <kern/timer.h>
76#include <kern/affinity.h>
77
78#include <mach/rpc.h>
79
80void act_abort(thread_t);
81void install_special_handler_locked(thread_t);
82void special_handler_continue(void);
83
84/*
85 * Internal routine to mark a thread as started.
86 * Always called with the thread locked.
87 *
88 * Note: function intentionally declared with the noinline attribute to
89 * prevent multiple declaration of probe symbols in this file; we would
90 * prefer "#pragma noinline", but gcc does not support it.
91 * PR-6385749 -- the lwp-start probe should fire from within the context
92 * of the newly created thread. Commented out for now, in case we
93 * turn it into a dead code probe.
94 */
95void
96thread_start_internal(
97 thread_t thread)
98{
99 clear_wait(thread, THREAD_AWAKENED);
100 thread->started = TRUE;
101 // DTRACE_PROC1(lwp__start, thread_t, thread);
102}
103
104/*
105 * Internal routine to terminate a thread.
106 * Sometimes called with task already locked.
107 */
108kern_return_t
109thread_terminate_internal(
110 thread_t thread)
111{
112 kern_return_t result = KERN_SUCCESS;
113
114 thread_mtx_lock(thread);
115
116 if (thread->active) {
117 thread->active = FALSE;
118
119 act_abort(thread);
120
121 if (thread->started)
122 clear_wait(thread, THREAD_INTERRUPTED);
123 else {
124 thread_start_internal(thread);
125 }
126 }
127 else
128 result = KERN_TERMINATED;
129
130 if (thread->affinity_set != NULL)
131 thread_affinity_terminate(thread);
132
133 thread_mtx_unlock(thread);
134
135 if (thread != current_thread() && result == KERN_SUCCESS)
136 thread_wait(thread);
137
138 return (result);
139}
140
141/*
142 * Terminate a thread.
143 */
144kern_return_t
145thread_terminate(
146 thread_t thread)
147{
148 kern_return_t result;
149
150 if (thread == THREAD_NULL)
151 return (KERN_INVALID_ARGUMENT);
152
153 if ( thread->task == kernel_task &&
154 thread != current_thread() )
155 return (KERN_FAILURE);
156
157 result = thread_terminate_internal(thread);
158
159 /*
160 * If a kernel thread is terminating itself, force an AST here.
161 * Kernel threads don't normally pass through the AST checking
162 * code - and all threads finish their own termination in the
163 * special handler APC.
164 */
165 if (thread->task == kernel_task) {
166 ml_set_interrupts_enabled(FALSE);
167 ast_taken(AST_APC, TRUE);
168 panic("thread_terminate");
169 }
170
171 return (result);
172}
173
174/*
175 * Suspend execution of the specified thread.
176 * This is a recursive-style suspension of the thread, a count of
177 * suspends is maintained.
178 *
179 * Called with thread mutex held.
180 */
181void
182thread_hold(
183 register thread_t thread)
184{
185 if (thread->suspend_count++ == 0) {
186 install_special_handler(thread);
187 if (thread->started)
188 thread_wakeup_one(&thread->suspend_count);
189 }
190}
191
192/*
193 * Decrement internal suspension count, setting thread
194 * runnable when count falls to zero.
195 *
196 * Called with thread mutex held.
197 */
198void
199thread_release(
200 register thread_t thread)
201{
202 if ( thread->suspend_count > 0 &&
203 --thread->suspend_count == 0 ) {
204 if (thread->started)
205 thread_wakeup_one(&thread->suspend_count);
206 else {
207 thread_start_internal(thread);
208 }
209 }
210}
211
212kern_return_t
213thread_suspend(
214 register thread_t thread)
215{
216 thread_t self = current_thread();
217 kern_return_t result = KERN_SUCCESS;
218
219 if (thread == THREAD_NULL || thread->task == kernel_task)
220 return (KERN_INVALID_ARGUMENT);
221
222 thread_mtx_lock(thread);
223
224 if (thread->active) {
225 if ( thread->user_stop_count++ == 0 &&
226 thread->suspend_count++ == 0 ) {
227 install_special_handler(thread);
228 if (thread != self)
229 thread_wakeup_one(&thread->suspend_count);
230 }
231 }
232 else
233 result = KERN_TERMINATED;
234
235 thread_mtx_unlock(thread);
236
237 if (thread != self && result == KERN_SUCCESS)
238 thread_wait(thread);
239
240 return (result);
241}
242
243kern_return_t
244thread_resume(
245 register thread_t thread)
246{
247 kern_return_t result = KERN_SUCCESS;
248
249 if (thread == THREAD_NULL || thread->task == kernel_task)
250 return (KERN_INVALID_ARGUMENT);
251
252 thread_mtx_lock(thread);
253
254 if (thread->active) {
255 if (thread->user_stop_count > 0) {
256 if ( --thread->user_stop_count == 0 &&
257 --thread->suspend_count == 0 ) {
258 if (thread->started)
259 thread_wakeup_one(&thread->suspend_count);
260 else {
261 thread_start_internal(thread);
262 }
263 }
264 }
265 else
266 result = KERN_FAILURE;
267 }
268 else
269 result = KERN_TERMINATED;
270
271 thread_mtx_unlock(thread);
272
273 return (result);
274}
275
276/*
277 * thread_depress_abort:
278 *
279 * Prematurely abort priority depression if there is one.
280 */
281kern_return_t
282thread_depress_abort(
283 register thread_t thread)
284{
285 kern_return_t result;
286
287 if (thread == THREAD_NULL)
288 return (KERN_INVALID_ARGUMENT);
289
290 thread_mtx_lock(thread);
291
292 if (thread->active)
293 result = thread_depress_abort_internal(thread);
294 else
295 result = KERN_TERMINATED;
296
297 thread_mtx_unlock(thread);
298
299 return (result);
300}
301
302
303/*
304 * Indicate that the activation should run its
305 * special handler to detect a condition.
306 *
307 * Called with thread mutex held.
308 */
309void
310act_abort(
311 thread_t thread)
312{
313 spl_t s = splsched();
314
315 thread_lock(thread);
316
317 if (!(thread->sched_mode & TH_MODE_ABORT)) {
318 thread->sched_mode |= TH_MODE_ABORT;
319 install_special_handler_locked(thread);
320 }
321 else
322 thread->sched_mode &= ~TH_MODE_ABORTSAFELY;
323
324 thread_unlock(thread);
325 splx(s);
326}
327
328kern_return_t
329thread_abort(
330 register thread_t thread)
331{
332 kern_return_t result = KERN_SUCCESS;
333
334 if (thread == THREAD_NULL)
335 return (KERN_INVALID_ARGUMENT);
336
337 thread_mtx_lock(thread);
338
339 if (thread->active) {
340 act_abort(thread);
341 clear_wait(thread, THREAD_INTERRUPTED);
342 }
343 else
344 result = KERN_TERMINATED;
345
346 thread_mtx_unlock(thread);
347
348 return (result);
349}
350
351kern_return_t
352thread_abort_safely(
353 thread_t thread)
354{
355 kern_return_t result = KERN_SUCCESS;
356
357 if (thread == THREAD_NULL)
358 return (KERN_INVALID_ARGUMENT);
359
360 thread_mtx_lock(thread);
361
362 if (thread->active) {
363 spl_t s = splsched();
364
365 thread_lock(thread);
366 if (!thread->at_safe_point ||
367 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
368 if (!(thread->sched_mode & TH_MODE_ABORT)) {
369 thread->sched_mode |= TH_MODE_ISABORTED;
370 install_special_handler_locked(thread);
371 }
372 }
373 thread_unlock(thread);
374 splx(s);
375 }
376 else
377 result = KERN_TERMINATED;
378
379 thread_mtx_unlock(thread);
380
381 return (result);
382}
383
384/*** backward compatibility hacks ***/
385#include <mach/thread_info.h>
386#include <mach/thread_special_ports.h>
387#include <ipc/ipc_port.h>
388
389kern_return_t
390thread_info(
391 thread_t thread,
392 thread_flavor_t flavor,
393 thread_info_t thread_info_out,
394 mach_msg_type_number_t *thread_info_count)
395{
396 kern_return_t result;
397
398 if (thread == THREAD_NULL)
399 return (KERN_INVALID_ARGUMENT);
400
401 thread_mtx_lock(thread);
402
403 if (thread->active)
404 result = thread_info_internal(
405 thread, flavor, thread_info_out, thread_info_count);
406 else
407 result = KERN_TERMINATED;
408
409 thread_mtx_unlock(thread);
410
411 return (result);
412}
413
414kern_return_t
415thread_get_state(
416 register thread_t thread,
417 int flavor,
418 thread_state_t state, /* pointer to OUT array */
419 mach_msg_type_number_t *state_count) /*IN/OUT*/
420{
421 kern_return_t result = KERN_SUCCESS;
422
423 if (thread == THREAD_NULL)
424 return (KERN_INVALID_ARGUMENT);
425
426 thread_mtx_lock(thread);
427
428 if (thread->active) {
429 if (thread != current_thread()) {
430 thread_hold(thread);
431
432 thread_mtx_unlock(thread);
433
434 if (thread_stop(thread)) {
435 thread_mtx_lock(thread);
436 result = machine_thread_get_state(
437 thread, flavor, state, state_count);
438 thread_unstop(thread);
439 }
440 else {
441 thread_mtx_lock(thread);
442 result = KERN_ABORTED;
443 }
444
445 thread_release(thread);
446 }
447 else
448 result = machine_thread_get_state(
449 thread, flavor, state, state_count);
450 }
451 else
452 result = KERN_TERMINATED;
453
454 thread_mtx_unlock(thread);
455
456 return (result);
457}
458
459/*
460 * Change thread's machine-dependent state. Called with nothing
461 * locked. Returns same way.
462 */
463kern_return_t
464thread_set_state(
465 register thread_t thread,
466 int flavor,
467 thread_state_t state,
468 mach_msg_type_number_t state_count)
469{
470 kern_return_t result = KERN_SUCCESS;
471
472 if (thread == THREAD_NULL)
473 return (KERN_INVALID_ARGUMENT);
474
475 thread_mtx_lock(thread);
476
477 if (thread->active) {
478 if (thread != current_thread()) {
479 thread_hold(thread);
480
481 thread_mtx_unlock(thread);
482
483 if (thread_stop(thread)) {
484 thread_mtx_lock(thread);
485 result = machine_thread_set_state(
486 thread, flavor, state, state_count);
487 thread_unstop(thread);
488 }
489 else {
490 thread_mtx_lock(thread);
491 result = KERN_ABORTED;
492 }
493
494 thread_release(thread);
495 }
496 else
497 result = machine_thread_set_state(
498 thread, flavor, state, state_count);
499 }
500 else
501 result = KERN_TERMINATED;
502
503 thread_mtx_unlock(thread);
504
505 return (result);
506}
507
508
509/*
510 * Kernel-internal "thread" interfaces used outside this file:
511 */
512
513/* Initialize (or re-initialize) a thread state. Called from execve
514 * with nothing locked, returns same way.
515 */
516kern_return_t
517thread_state_initialize(
518 register thread_t thread)
519{
520 kern_return_t result = KERN_SUCCESS;
521
522 if (thread == THREAD_NULL)
523 return (KERN_INVALID_ARGUMENT);
524
525 thread_mtx_lock(thread);
526
527 if (thread->active) {
528 if (thread != current_thread()) {
529 thread_hold(thread);
530
531 thread_mtx_unlock(thread);
532
533 if (thread_stop(thread)) {
534 thread_mtx_lock(thread);
535 result = machine_thread_state_initialize( thread );
536 thread_unstop(thread);
537 }
538 else {
539 thread_mtx_lock(thread);
540 result = KERN_ABORTED;
541 }
542
543 thread_release(thread);
544 }
545 else
546 result = machine_thread_state_initialize( thread );
547 }
548 else
549 result = KERN_TERMINATED;
550
551 thread_mtx_unlock(thread);
552
553 return (result);
554}
555
556
557kern_return_t
558thread_dup(
559 register thread_t target)
560{
561 thread_t self = current_thread();
562 kern_return_t result = KERN_SUCCESS;
563
564 if (target == THREAD_NULL || target == self)
565 return (KERN_INVALID_ARGUMENT);
566
567 thread_mtx_lock(target);
568
569 if (target->active) {
570 thread_hold(target);
571
572 thread_mtx_unlock(target);
573
574 if (thread_stop(target)) {
575 thread_mtx_lock(target);
576 result = machine_thread_dup(self, target);
577 if (self->affinity_set != AFFINITY_SET_NULL)
578 thread_affinity_dup(self, target);
579 thread_unstop(target);
580 }
581 else {
582 thread_mtx_lock(target);
583 result = KERN_ABORTED;
584 }
585
586 thread_release(target);
587 }
588 else
589 result = KERN_TERMINATED;
590
591 thread_mtx_unlock(target);
592
593 return (result);
594}
595
596
597/*
598 * thread_setstatus:
599 *
600 * Set the status of the specified thread.
601 * Called with (and returns with) no locks held.
602 */
603kern_return_t
604thread_setstatus(
605 register thread_t thread,
606 int flavor,
607 thread_state_t tstate,
608 mach_msg_type_number_t count)
609{
610
611 return (thread_set_state(thread, flavor, tstate, count));
612}
613
614/*
615 * thread_getstatus:
616 *
617 * Get the status of the specified thread.
618 */
619kern_return_t
620thread_getstatus(
621 register thread_t thread,
622 int flavor,
623 thread_state_t tstate,
624 mach_msg_type_number_t *count)
625{
626 return (thread_get_state(thread, flavor, tstate, count));
627}
628
629/*
630 * install_special_handler:
631 *
632 * Install the special returnhandler that handles suspension and
633 * termination, if it hasn't been installed already.
634 *
635 * Called with the thread mutex held.
636 */
637void
638install_special_handler(
639 thread_t thread)
640{
641 spl_t s = splsched();
642
643 thread_lock(thread);
644 install_special_handler_locked(thread);
645 thread_unlock(thread);
646 splx(s);
647}
648
649/*
650 * install_special_handler_locked:
651 *
652 * Do the work of installing the special_handler.
653 *
654 * Called with the thread mutex and scheduling lock held.
655 */
656void
657install_special_handler_locked(
658 thread_t thread)
659{
660 ReturnHandler **rh;
661
662 /* The work handler must always be the last ReturnHandler on the list,
663 because it can do tricky things like detach the thr_act. */
664 for (rh = &thread->handlers; *rh; rh = &(*rh)->next)
665 continue;
666
667 if (rh != &thread->special_handler.next)
668 *rh = &thread->special_handler;
669
670 /*
671 * Temporarily undepress, so target has
672 * a chance to do locking required to
673 * block itself in special_handler().
674 */
675 if (thread->sched_mode & TH_MODE_ISDEPRESSED)
676 compute_priority(thread, TRUE);
677
678 thread_ast_set(thread, AST_APC);
679
680 if (thread == current_thread())
681 ast_propagate(thread->ast);
682 else {
683 processor_t processor = thread->last_processor;
684
685 if ( processor != PROCESSOR_NULL &&
686 processor->state == PROCESSOR_RUNNING &&
687 processor->active_thread == thread )
688 cause_ast_check(processor);
689 }
690}
691
692/*
693 * Activation control support routines internal to this file:
694 */
695
696void
697act_execute_returnhandlers(void)
698{
699 thread_t thread = current_thread();
700
701 thread_ast_clear(thread, AST_APC);
702 spllo();
703
704 for (;;) {
705 ReturnHandler *rh;
706
707 thread_mtx_lock(thread);
708
709 (void)splsched();
710 thread_lock(thread);
711
712 rh = thread->handlers;
713 if (rh != NULL) {
714 thread->handlers = rh->next;
715
716 thread_unlock(thread);
717 spllo();
718
719 thread_mtx_unlock(thread);
720
721 /* Execute it */
722 (*rh->handler)(rh, thread);
723 }
724 else
725 break;
726 }
727
728 thread_unlock(thread);
729 spllo();
730
731 thread_mtx_unlock(thread);
732}
733
734/*
735 * special_handler_continue
736 *
737 * Continuation routine for the special handler blocks. It checks
738 * to see whether there has been any new suspensions. If so, it
739 * installs the special handler again. Otherwise, it checks to see
740 * if the current depression needs to be re-instated (it may have
741 * been temporarily removed in order to get to this point in a hurry).
742 */
743void
744special_handler_continue(void)
745{
746 thread_t thread = current_thread();
747
748 thread_mtx_lock(thread);
749
750 if (thread->suspend_count > 0)
751 install_special_handler(thread);
752 else {
753 spl_t s = splsched();
754
755 thread_lock(thread);
756 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
757 processor_t myprocessor = thread->last_processor;
758
759 thread->sched_pri = DEPRESSPRI;
760 myprocessor->current_pri = thread->sched_pri;
761 }
762 thread_unlock(thread);
763 splx(s);
764 }
765
766 thread_mtx_unlock(thread);
767
768 thread_exception_return();
769 /*NOTREACHED*/
770}
771
772/*
773 * special_handler - handles suspension, termination. Called
774 * with nothing locked. Returns (if it returns) the same way.
775 */
776void
777special_handler(
778 __unused ReturnHandler *rh,
779 thread_t thread)
780{
781 spl_t s;
782
783 thread_mtx_lock(thread);
784
785 s = splsched();
786 thread_lock(thread);
787 thread->sched_mode &= ~TH_MODE_ISABORTED;
788 thread_unlock(thread);
789 splx(s);
790
791 /*
792 * If we're suspended, go to sleep and wait for someone to wake us up.
793 */
794 if (thread->active) {
795 if (thread->suspend_count > 0) {
796 if (thread->handlers == NULL) {
797 assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
798 thread_mtx_unlock(thread);
799 thread_block((thread_continue_t)special_handler_continue);
800 /*NOTREACHED*/
801 }
802
803 thread_mtx_unlock(thread);
804
805 special_handler_continue();
806 /*NOTREACHED*/
807 }
808 }
809 else {
810 thread_mtx_unlock(thread);
811
812 thread_terminate_self();
813 /*NOTREACHED*/
814 }
815
816 thread_mtx_unlock(thread);
817}
818
819kern_return_t
820act_set_state(
821 thread_t thread,
822 int flavor,
823 thread_state_t state,
824 mach_msg_type_number_t count)
825{
826 if (thread == current_thread())
827 return (KERN_INVALID_ARGUMENT);
828
829 return (thread_set_state(thread, flavor, state, count));
830
831}
832
833kern_return_t
834act_get_state(
835 thread_t thread,
836 int flavor,
837 thread_state_t state,
838 mach_msg_type_number_t *count)
839{
840 if (thread == current_thread())
841 return (KERN_INVALID_ARGUMENT);
842
843 return (thread_get_state(thread, flavor, state, count));
844}
845
846void
847act_set_astbsd(
848 thread_t thread)
849{
850 spl_t s = splsched();
851
852 if (thread == current_thread()) {
853 thread_ast_set(thread, AST_BSD);
854 ast_propagate(thread->ast);
855 }
856 else {
857 processor_t processor;
858
859 thread_lock(thread);
860 thread_ast_set(thread, AST_BSD);
861 processor = thread->last_processor;
862 if ( processor != PROCESSOR_NULL &&
863 processor->state == PROCESSOR_RUNNING &&
864 processor->active_thread == thread )
865 cause_ast_check(processor);
866 thread_unlock(thread);
867 }
868
869 splx(s);
870}
871
872void
873act_set_apc(
874 thread_t thread)
875{
876 spl_t s = splsched();
877
878 if (thread == current_thread()) {
879 thread_ast_set(thread, AST_APC);
880 ast_propagate(thread->ast);
881 }
882 else {
883 processor_t processor;
884
885 thread_lock(thread);
886 thread_ast_set(thread, AST_APC);
887 processor = thread->last_processor;
888 if ( processor != PROCESSOR_NULL &&
889 processor->state == PROCESSOR_RUNNING &&
890 processor->active_thread == thread )
891 cause_ast_check(processor);
892 thread_unlock(thread);
893 }
894
895 splx(s);
896}