]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
xnu-3248.50.21.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52 #include <mach/mach_types.h>
53 #include <mach/kern_return.h>
54 #include <mach/alert.h>
55 #include <mach/rpc.h>
56 #include <mach/thread_act_server.h>
57
58 #include <kern/kern_types.h>
59 #include <kern/ast.h>
60 #include <kern/mach_param.h>
61 #include <kern/zalloc.h>
62 #include <kern/extmod_statistics.h>
63 #include <kern/thread.h>
64 #include <kern/task.h>
65 #include <kern/sched_prim.h>
66 #include <kern/misc_protos.h>
67 #include <kern/assert.h>
68 #include <kern/exception.h>
69 #include <kern/ipc_mig.h>
70 #include <kern/ipc_tt.h>
71 #include <kern/machine.h>
72 #include <kern/spl.h>
73 #include <kern/syscall_subr.h>
74 #include <kern/sync_lock.h>
75 #include <kern/processor.h>
76 #include <kern/timer.h>
77 #include <kern/affinity.h>
78
79 #include <mach/rpc.h>
80
81 #include <security/mac_mach_internal.h>
82
83 void act_abort(thread_t);
84 void install_special_handler_locked(thread_t);
85 void special_handler_continue(void);
86
87 /*
88 * Internal routine to mark a thread as started.
89 * Always called with the thread mutex locked.
90 *
91 * Note: function intentionally declared with the noinline attribute to
92 * prevent multiple declaration of probe symbols in this file; we would
93 * prefer "#pragma noinline", but gcc does not support it.
94 * PR-6385749 -- the lwp-start probe should fire from within the context
95 * of the newly created thread. Commented out for now, in case we
96 * turn it into a dead code probe.
97 */
98 void
99 thread_start_internal(
100 thread_t thread)
101 {
102 clear_wait(thread, THREAD_AWAKENED);
103 thread->started = TRUE;
104 // DTRACE_PROC1(lwp__start, thread_t, thread);
105 }
106
107 /*
108 * Internal routine to terminate a thread.
109 * Sometimes called with task already locked.
110 */
111 kern_return_t
112 thread_terminate_internal(
113 thread_t thread)
114 {
115 kern_return_t result = KERN_SUCCESS;
116
117 thread_mtx_lock(thread);
118
119 if (thread->active) {
120 thread->active = FALSE;
121
122 act_abort(thread);
123
124 if (thread->started)
125 clear_wait(thread, THREAD_INTERRUPTED);
126 else {
127 thread_start_internal(thread);
128 }
129 }
130 else
131 result = KERN_TERMINATED;
132
133 if (thread->affinity_set != NULL)
134 thread_affinity_terminate(thread);
135
136 thread_mtx_unlock(thread);
137
138 if (thread != current_thread() && result == KERN_SUCCESS)
139 thread_wait(thread, FALSE);
140
141 return (result);
142 }
143
144 /*
145 * Terminate a thread.
146 */
147 kern_return_t
148 thread_terminate(
149 thread_t thread)
150 {
151 kern_return_t result;
152
153 if (thread == THREAD_NULL)
154 return (KERN_INVALID_ARGUMENT);
155
156 if ( thread->task == kernel_task &&
157 thread != current_thread() )
158 return (KERN_FAILURE);
159
160 result = thread_terminate_internal(thread);
161
162 /*
163 * If a kernel thread is terminating itself, force an AST here.
164 * Kernel threads don't normally pass through the AST checking
165 * code - and all threads finish their own termination in the
166 * special handler APC.
167 */
168 if (thread->task == kernel_task) {
169 ml_set_interrupts_enabled(FALSE);
170 ast_taken(AST_APC, TRUE);
171 panic("thread_terminate");
172 }
173
174 return (result);
175 }
176
177 /*
178 * Suspend execution of the specified thread.
179 * This is a recursive-style suspension of the thread, a count of
180 * suspends is maintained.
181 *
182 * Called with thread mutex held.
183 */
184 void
185 thread_hold(
186 register thread_t thread)
187 {
188 if (thread->suspend_count++ == 0) {
189 install_special_handler(thread);
190 if (thread->started)
191 thread_wakeup_one(&thread->suspend_count);
192 }
193 }
194
195 /*
196 * Decrement internal suspension count, setting thread
197 * runnable when count falls to zero.
198 *
199 * Called with thread mutex held.
200 */
201 void
202 thread_release(
203 register thread_t thread)
204 {
205 if ( thread->suspend_count > 0 &&
206 --thread->suspend_count == 0 ) {
207 if (thread->started)
208 thread_wakeup_one(&thread->suspend_count);
209 else {
210 thread_start_internal(thread);
211 }
212 }
213 }
214
215 kern_return_t
216 thread_suspend(
217 register thread_t thread)
218 {
219 thread_t self = current_thread();
220 kern_return_t result = KERN_SUCCESS;
221
222 if (thread == THREAD_NULL || thread->task == kernel_task)
223 return (KERN_INVALID_ARGUMENT);
224
225 thread_mtx_lock(thread);
226
227 if (thread->active) {
228 if ( thread->user_stop_count++ == 0 &&
229 thread->suspend_count++ == 0 ) {
230 install_special_handler(thread);
231 if (thread != self)
232 thread_wakeup_one(&thread->suspend_count);
233 }
234 }
235 else
236 result = KERN_TERMINATED;
237
238 thread_mtx_unlock(thread);
239
240 if (thread != self && result == KERN_SUCCESS)
241 thread_wait(thread, FALSE);
242
243 return (result);
244 }
245
246 kern_return_t
247 thread_resume(
248 register thread_t thread)
249 {
250 kern_return_t result = KERN_SUCCESS;
251
252 if (thread == THREAD_NULL || thread->task == kernel_task)
253 return (KERN_INVALID_ARGUMENT);
254
255 thread_mtx_lock(thread);
256
257 if (thread->active) {
258 if (thread->user_stop_count > 0) {
259 if ( --thread->user_stop_count == 0 &&
260 --thread->suspend_count == 0 ) {
261 if (thread->started)
262 thread_wakeup_one(&thread->suspend_count);
263 else {
264 thread_start_internal(thread);
265 }
266 }
267 }
268 else
269 result = KERN_FAILURE;
270 }
271 else
272 result = KERN_TERMINATED;
273
274 thread_mtx_unlock(thread);
275
276 return (result);
277 }
278
279 /*
280 * thread_depress_abort:
281 *
282 * Prematurely abort priority depression if there is one.
283 */
284 kern_return_t
285 thread_depress_abort(
286 register thread_t thread)
287 {
288 kern_return_t result;
289
290 if (thread == THREAD_NULL)
291 return (KERN_INVALID_ARGUMENT);
292
293 thread_mtx_lock(thread);
294
295 if (thread->active)
296 result = thread_depress_abort_internal(thread);
297 else
298 result = KERN_TERMINATED;
299
300 thread_mtx_unlock(thread);
301
302 return (result);
303 }
304
305
306 /*
307 * Indicate that the activation should run its
308 * special handler to detect a condition.
309 *
310 * Called with thread mutex held.
311 */
312 void
313 act_abort(
314 thread_t thread)
315 {
316 spl_t s = splsched();
317
318 thread_lock(thread);
319
320 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
321 thread->sched_flags |= TH_SFLAG_ABORT;
322 install_special_handler_locked(thread);
323 }
324 else
325 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
326
327 thread_unlock(thread);
328 splx(s);
329 }
330
331 kern_return_t
332 thread_abort(
333 register thread_t thread)
334 {
335 kern_return_t result = KERN_SUCCESS;
336
337 if (thread == THREAD_NULL)
338 return (KERN_INVALID_ARGUMENT);
339
340 thread_mtx_lock(thread);
341
342 if (thread->active) {
343 act_abort(thread);
344 clear_wait(thread, THREAD_INTERRUPTED);
345 }
346 else
347 result = KERN_TERMINATED;
348
349 thread_mtx_unlock(thread);
350
351 return (result);
352 }
353
354 kern_return_t
355 thread_abort_safely(
356 thread_t thread)
357 {
358 kern_return_t result = KERN_SUCCESS;
359
360 if (thread == THREAD_NULL)
361 return (KERN_INVALID_ARGUMENT);
362
363 thread_mtx_lock(thread);
364
365 if (thread->active) {
366 spl_t s = splsched();
367
368 thread_lock(thread);
369 if (!thread->at_safe_point ||
370 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
371 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
372 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
373 install_special_handler_locked(thread);
374 }
375 }
376 thread_unlock(thread);
377 splx(s);
378 }
379 else
380 result = KERN_TERMINATED;
381
382 thread_mtx_unlock(thread);
383
384 return (result);
385 }
386
387 /*** backward compatibility hacks ***/
388 #include <mach/thread_info.h>
389 #include <mach/thread_special_ports.h>
390 #include <ipc/ipc_port.h>
391
392 kern_return_t
393 thread_info(
394 thread_t thread,
395 thread_flavor_t flavor,
396 thread_info_t thread_info_out,
397 mach_msg_type_number_t *thread_info_count)
398 {
399 kern_return_t result;
400
401 if (thread == THREAD_NULL)
402 return (KERN_INVALID_ARGUMENT);
403
404 thread_mtx_lock(thread);
405
406 if (thread->active || thread->inspection)
407 result = thread_info_internal(
408 thread, flavor, thread_info_out, thread_info_count);
409 else
410 result = KERN_TERMINATED;
411
412 thread_mtx_unlock(thread);
413
414 return (result);
415 }
416
417 kern_return_t
418 thread_get_state(
419 register thread_t thread,
420 int flavor,
421 thread_state_t state, /* pointer to OUT array */
422 mach_msg_type_number_t *state_count) /*IN/OUT*/
423 {
424 kern_return_t result = KERN_SUCCESS;
425
426 if (thread == THREAD_NULL)
427 return (KERN_INVALID_ARGUMENT);
428
429 thread_mtx_lock(thread);
430
431 if (thread->active) {
432 if (thread != current_thread()) {
433 thread_hold(thread);
434
435 thread_mtx_unlock(thread);
436
437 if (thread_stop(thread, FALSE)) {
438 thread_mtx_lock(thread);
439 result = machine_thread_get_state(
440 thread, flavor, state, state_count);
441 thread_unstop(thread);
442 }
443 else {
444 thread_mtx_lock(thread);
445 result = KERN_ABORTED;
446 }
447
448 thread_release(thread);
449 }
450 else
451 result = machine_thread_get_state(
452 thread, flavor, state, state_count);
453 }
454 else if (thread->inspection)
455 {
456 result = machine_thread_get_state(
457 thread, flavor, state, state_count);
458 }
459 else
460 result = KERN_TERMINATED;
461
462 thread_mtx_unlock(thread);
463
464 return (result);
465 }
466
467 /*
468 * Change thread's machine-dependent state. Called with nothing
469 * locked. Returns same way.
470 */
471 static kern_return_t
472 thread_set_state_internal(
473 register thread_t thread,
474 int flavor,
475 thread_state_t state,
476 mach_msg_type_number_t state_count,
477 boolean_t from_user)
478 {
479 kern_return_t result = KERN_SUCCESS;
480
481 if (thread == THREAD_NULL)
482 return (KERN_INVALID_ARGUMENT);
483
484 thread_mtx_lock(thread);
485
486 if (thread->active) {
487 if (thread != current_thread()) {
488 thread_hold(thread);
489
490 thread_mtx_unlock(thread);
491
492 if (thread_stop(thread, TRUE)) {
493 thread_mtx_lock(thread);
494 result = machine_thread_set_state(
495 thread, flavor, state, state_count);
496 thread_unstop(thread);
497 }
498 else {
499 thread_mtx_lock(thread);
500 result = KERN_ABORTED;
501 }
502
503 thread_release(thread);
504 }
505 else
506 result = machine_thread_set_state(
507 thread, flavor, state, state_count);
508 }
509 else
510 result = KERN_TERMINATED;
511
512 if ((result == KERN_SUCCESS) && from_user)
513 extmod_statistics_incr_thread_set_state(thread);
514
515 thread_mtx_unlock(thread);
516
517 return (result);
518 }
519
520 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
521 kern_return_t
522 thread_set_state(
523 register thread_t thread,
524 int flavor,
525 thread_state_t state,
526 mach_msg_type_number_t state_count);
527
528 kern_return_t
529 thread_set_state(
530 register thread_t thread,
531 int flavor,
532 thread_state_t state,
533 mach_msg_type_number_t state_count)
534 {
535 return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
536 }
537
538 kern_return_t
539 thread_set_state_from_user(
540 register thread_t thread,
541 int flavor,
542 thread_state_t state,
543 mach_msg_type_number_t state_count)
544 {
545 return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
546 }
547
548 /*
549 * Kernel-internal "thread" interfaces used outside this file:
550 */
551
552 /* Initialize (or re-initialize) a thread state. Called from execve
553 * with nothing locked, returns same way.
554 */
555 kern_return_t
556 thread_state_initialize(
557 register thread_t thread)
558 {
559 kern_return_t result = KERN_SUCCESS;
560
561 if (thread == THREAD_NULL)
562 return (KERN_INVALID_ARGUMENT);
563
564 thread_mtx_lock(thread);
565
566 if (thread->active) {
567 if (thread != current_thread()) {
568 thread_hold(thread);
569
570 thread_mtx_unlock(thread);
571
572 if (thread_stop(thread, TRUE)) {
573 thread_mtx_lock(thread);
574 result = machine_thread_state_initialize( thread );
575 thread_unstop(thread);
576 }
577 else {
578 thread_mtx_lock(thread);
579 result = KERN_ABORTED;
580 }
581
582 thread_release(thread);
583 }
584 else
585 result = machine_thread_state_initialize( thread );
586 }
587 else
588 result = KERN_TERMINATED;
589
590 thread_mtx_unlock(thread);
591
592 return (result);
593 }
594
595
596 kern_return_t
597 thread_dup(
598 register thread_t target)
599 {
600 thread_t self = current_thread();
601 kern_return_t result = KERN_SUCCESS;
602
603 if (target == THREAD_NULL || target == self)
604 return (KERN_INVALID_ARGUMENT);
605
606 thread_mtx_lock(target);
607
608 if (target->active) {
609 thread_hold(target);
610
611 thread_mtx_unlock(target);
612
613 if (thread_stop(target, TRUE)) {
614 thread_mtx_lock(target);
615 result = machine_thread_dup(self, target);
616 if (self->affinity_set != AFFINITY_SET_NULL)
617 thread_affinity_dup(self, target);
618 thread_unstop(target);
619 }
620 else {
621 thread_mtx_lock(target);
622 result = KERN_ABORTED;
623 }
624
625 thread_release(target);
626 }
627 else
628 result = KERN_TERMINATED;
629
630 thread_mtx_unlock(target);
631
632 return (result);
633 }
634
635
636 /*
637 * thread_setstatus:
638 *
639 * Set the status of the specified thread.
640 * Called with (and returns with) no locks held.
641 */
642 kern_return_t
643 thread_setstatus(
644 register thread_t thread,
645 int flavor,
646 thread_state_t tstate,
647 mach_msg_type_number_t count)
648 {
649
650 return (thread_set_state(thread, flavor, tstate, count));
651 }
652
653 /*
654 * thread_getstatus:
655 *
656 * Get the status of the specified thread.
657 */
658 kern_return_t
659 thread_getstatus(
660 register thread_t thread,
661 int flavor,
662 thread_state_t tstate,
663 mach_msg_type_number_t *count)
664 {
665 return (thread_get_state(thread, flavor, tstate, count));
666 }
667
668 /*
669 * Change thread's machine-dependent userspace TSD base.
670 * Called with nothing locked. Returns same way.
671 */
672 kern_return_t
673 thread_set_tsd_base(
674 thread_t thread,
675 mach_vm_offset_t tsd_base)
676 {
677 kern_return_t result = KERN_SUCCESS;
678
679 if (thread == THREAD_NULL)
680 return (KERN_INVALID_ARGUMENT);
681
682 thread_mtx_lock(thread);
683
684 if (thread->active) {
685 if (thread != current_thread()) {
686 thread_hold(thread);
687
688 thread_mtx_unlock(thread);
689
690 if (thread_stop(thread, TRUE)) {
691 thread_mtx_lock(thread);
692 result = machine_thread_set_tsd_base(thread, tsd_base);
693 thread_unstop(thread);
694 }
695 else {
696 thread_mtx_lock(thread);
697 result = KERN_ABORTED;
698 }
699
700 thread_release(thread);
701 }
702 else
703 result = machine_thread_set_tsd_base(thread, tsd_base);
704 }
705 else
706 result = KERN_TERMINATED;
707
708 thread_mtx_unlock(thread);
709
710 return (result);
711 }
712
713 /*
714 * install_special_handler:
715 *
716 * Install the special returnhandler that handles suspension and
717 * termination, if it hasn't been installed already.
718 *
719 * Called with the thread mutex held.
720 */
721 void
722 install_special_handler(
723 thread_t thread)
724 {
725 spl_t s = splsched();
726
727 thread_lock(thread);
728 install_special_handler_locked(thread);
729 thread_unlock(thread);
730 splx(s);
731 }
732
733 /*
734 * install_special_handler_locked:
735 *
736 * Do the work of installing the special_handler.
737 *
738 * Called with the thread mutex and scheduling lock held.
739 */
740 void
741 install_special_handler_locked(
742 thread_t thread)
743 {
744
745 /*
746 * Temporarily undepress, so target has
747 * a chance to do locking required to
748 * block itself in special_handler().
749 */
750 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)
751 thread_recompute_sched_pri(thread, TRUE);
752
753 thread_ast_set(thread, AST_APC);
754
755 if (thread == current_thread())
756 ast_propagate(thread->ast);
757 else {
758 processor_t processor = thread->last_processor;
759
760 if ( processor != PROCESSOR_NULL &&
761 processor->state == PROCESSOR_RUNNING &&
762 processor->active_thread == thread )
763 cause_ast_check(processor);
764 }
765 }
766
767 /*
768 * Activation control support routines internal to this file:
769 *
770 */
771
772 /*
773 * special_handler_continue
774 *
775 * Continuation routine for the special handler blocks. It checks
776 * to see whether there has been any new suspensions. If so, it
777 * installs the special handler again. Otherwise, it checks to see
778 * if the current depression needs to be re-instated (it may have
779 * been temporarily removed in order to get to this point in a hurry).
780 */
781 void
782 special_handler_continue(void)
783 {
784 thread_t thread = current_thread();
785
786 thread_mtx_lock(thread);
787
788 if (thread->suspend_count > 0)
789 install_special_handler(thread);
790 else {
791 spl_t s = splsched();
792
793 thread_lock(thread);
794 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
795 processor_t myprocessor = thread->last_processor;
796
797 thread->sched_pri = DEPRESSPRI;
798
799 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
800 (uintptr_t)thread_tid(thread),
801 thread->base_pri,
802 thread->sched_pri,
803 0, /* eventually, 'reason' */
804 0);
805
806 myprocessor->current_pri = thread->sched_pri;
807 }
808 thread_unlock(thread);
809 splx(s);
810 }
811
812 thread_mtx_unlock(thread);
813
814 thread_exception_return();
815 /*NOTREACHED*/
816 }
817
818 /*
819 * special_handler - handles suspension, termination. Called
820 * with nothing locked. Returns (if it returns) the same way.
821 */
822 void
823 special_handler(
824 thread_t thread)
825 {
826 spl_t s;
827
828 thread_mtx_lock(thread);
829
830 s = splsched();
831 thread_lock(thread);
832 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
833 thread_unlock(thread);
834 splx(s);
835
836 /*
837 * If we're suspended, go to sleep and wait for someone to wake us up.
838 */
839 if (thread->active) {
840 if (thread->suspend_count > 0) {
841 assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
842 thread_mtx_unlock(thread);
843 thread_block((thread_continue_t)special_handler_continue);
844 /*NOTREACHED*/
845 }
846 }
847 else {
848 thread_mtx_unlock(thread);
849
850 thread_terminate_self();
851 /*NOTREACHED*/
852 }
853
854 thread_mtx_unlock(thread);
855 }
856
857 /* Prototype, see justification above */
858 kern_return_t
859 act_set_state(
860 thread_t thread,
861 int flavor,
862 thread_state_t state,
863 mach_msg_type_number_t count);
864
865 kern_return_t
866 act_set_state(
867 thread_t thread,
868 int flavor,
869 thread_state_t state,
870 mach_msg_type_number_t count)
871 {
872 if (thread == current_thread())
873 return (KERN_INVALID_ARGUMENT);
874
875 return (thread_set_state(thread, flavor, state, count));
876
877 }
878
879 kern_return_t
880 act_set_state_from_user(
881 thread_t thread,
882 int flavor,
883 thread_state_t state,
884 mach_msg_type_number_t count)
885 {
886 if (thread == current_thread())
887 return (KERN_INVALID_ARGUMENT);
888
889 return (thread_set_state_from_user(thread, flavor, state, count));
890
891 }
892
893 kern_return_t
894 act_get_state(
895 thread_t thread,
896 int flavor,
897 thread_state_t state,
898 mach_msg_type_number_t *count)
899 {
900 if (thread == current_thread())
901 return (KERN_INVALID_ARGUMENT);
902
903 return (thread_get_state(thread, flavor, state, count));
904 }
905
906 static void
907 act_set_ast(
908 thread_t thread,
909 ast_t ast)
910 {
911 spl_t s = splsched();
912
913 if (thread == current_thread()) {
914 thread_ast_set(thread, ast);
915 ast_propagate(thread->ast);
916 } else {
917 processor_t processor;
918
919 thread_lock(thread);
920 thread_ast_set(thread, ast);
921 processor = thread->last_processor;
922 if ( processor != PROCESSOR_NULL &&
923 processor->state == PROCESSOR_RUNNING &&
924 processor->active_thread == thread )
925 cause_ast_check(processor);
926 thread_unlock(thread);
927 }
928
929 splx(s);
930 }
931
932 void
933 act_set_astbsd(
934 thread_t thread)
935 {
936 act_set_ast( thread, AST_BSD );
937 }
938
939 void
940 act_set_kperf(
941 thread_t thread)
942 {
943 /* safety check */
944 if (thread != current_thread())
945 if( !ml_get_interrupts_enabled() )
946 panic("unsafe act_set_kperf operation");
947
948 act_set_ast( thread, AST_KPERF );
949 }
950
951 #if CONFIG_MACF
952 void
953 act_set_astmacf(
954 thread_t thread)
955 {
956 act_set_ast( thread, AST_MACF);
957 }
958 #endif
959
960 void
961 set_astledger(thread_t thread)
962 {
963 act_set_ast(thread, AST_LEDGER);
964 }
965
966