]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
d99ee186c07c9082add778e62c6f1d428385e2ac
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52 #include <mach/mach_types.h>
53 #include <mach/kern_return.h>
54 #include <mach/alert.h>
55 #include <mach/rpc.h>
56 #include <mach/thread_act_server.h>
57
58 #include <kern/kern_types.h>
59 #include <kern/ast.h>
60 #include <kern/mach_param.h>
61 #include <kern/zalloc.h>
62 #include <kern/extmod_statistics.h>
63 #include <kern/thread.h>
64 #include <kern/task.h>
65 #include <kern/sched_prim.h>
66 #include <kern/misc_protos.h>
67 #include <kern/assert.h>
68 #include <kern/exception.h>
69 #include <kern/ipc_mig.h>
70 #include <kern/ipc_tt.h>
71 #include <kern/machine.h>
72 #include <kern/spl.h>
73 #include <kern/syscall_subr.h>
74 #include <kern/sync_lock.h>
75 #include <kern/processor.h>
76 #include <kern/timer.h>
77 #include <kern/affinity.h>
78
79 #include <mach/rpc.h>
80
81 #include <security/mac_mach_internal.h>
82
83 void act_abort(thread_t);
84 void install_special_handler_locked(thread_t);
85 void special_handler_continue(void);
86
87 /*
88 * Internal routine to mark a thread as started.
89 * Always called with the thread locked.
90 *
91 * Note: function intentionally declared with the noinline attribute to
92 * prevent multiple declaration of probe symbols in this file; we would
93 * prefer "#pragma noinline", but gcc does not support it.
94 * PR-6385749 -- the lwp-start probe should fire from within the context
95 * of the newly created thread. Commented out for now, in case we
96 * turn it into a dead code probe.
97 */
98 void
99 thread_start_internal(
100 thread_t thread)
101 {
102 clear_wait(thread, THREAD_AWAKENED);
103 thread->started = TRUE;
104 // DTRACE_PROC1(lwp__start, thread_t, thread);
105 }
106
107 /*
108 * Internal routine to terminate a thread.
109 * Sometimes called with task already locked.
110 */
111 kern_return_t
112 thread_terminate_internal(
113 thread_t thread)
114 {
115 kern_return_t result = KERN_SUCCESS;
116
117 thread_mtx_lock(thread);
118
119 if (thread->active) {
120 thread->active = FALSE;
121
122 act_abort(thread);
123
124 if (thread->started)
125 clear_wait(thread, THREAD_INTERRUPTED);
126 else {
127 thread_start_internal(thread);
128 }
129 }
130 else
131 result = KERN_TERMINATED;
132
133 if (thread->affinity_set != NULL)
134 thread_affinity_terminate(thread);
135
136 thread_mtx_unlock(thread);
137
138 if (thread != current_thread() && result == KERN_SUCCESS)
139 thread_wait(thread, FALSE);
140
141 return (result);
142 }
143
144 /*
145 * Terminate a thread.
146 */
147 kern_return_t
148 thread_terminate(
149 thread_t thread)
150 {
151 kern_return_t result;
152
153 if (thread == THREAD_NULL)
154 return (KERN_INVALID_ARGUMENT);
155
156 if ( thread->task == kernel_task &&
157 thread != current_thread() )
158 return (KERN_FAILURE);
159
160 result = thread_terminate_internal(thread);
161
162 /*
163 * If a kernel thread is terminating itself, force an AST here.
164 * Kernel threads don't normally pass through the AST checking
165 * code - and all threads finish their own termination in the
166 * special handler APC.
167 */
168 if (thread->task == kernel_task) {
169 ml_set_interrupts_enabled(FALSE);
170 ast_taken(AST_APC, TRUE);
171 panic("thread_terminate");
172 }
173
174 return (result);
175 }
176
177 /*
178 * Suspend execution of the specified thread.
179 * This is a recursive-style suspension of the thread, a count of
180 * suspends is maintained.
181 *
182 * Called with thread mutex held.
183 */
184 void
185 thread_hold(
186 register thread_t thread)
187 {
188 if (thread->suspend_count++ == 0) {
189 install_special_handler(thread);
190 if (thread->started)
191 thread_wakeup_one(&thread->suspend_count);
192 }
193 }
194
195 /*
196 * Decrement internal suspension count, setting thread
197 * runnable when count falls to zero.
198 *
199 * Called with thread mutex held.
200 */
201 void
202 thread_release(
203 register thread_t thread)
204 {
205 if ( thread->suspend_count > 0 &&
206 --thread->suspend_count == 0 ) {
207 if (thread->started)
208 thread_wakeup_one(&thread->suspend_count);
209 else {
210 thread_start_internal(thread);
211 }
212 }
213 }
214
215 kern_return_t
216 thread_suspend(
217 register thread_t thread)
218 {
219 thread_t self = current_thread();
220 kern_return_t result = KERN_SUCCESS;
221
222 if (thread == THREAD_NULL || thread->task == kernel_task)
223 return (KERN_INVALID_ARGUMENT);
224
225 thread_mtx_lock(thread);
226
227 if (thread->active) {
228 if ( thread->user_stop_count++ == 0 &&
229 thread->suspend_count++ == 0 ) {
230 install_special_handler(thread);
231 if (thread != self)
232 thread_wakeup_one(&thread->suspend_count);
233 }
234 }
235 else
236 result = KERN_TERMINATED;
237
238 thread_mtx_unlock(thread);
239
240 if (thread != self && result == KERN_SUCCESS)
241 thread_wait(thread, TRUE);
242
243 return (result);
244 }
245
246 kern_return_t
247 thread_resume(
248 register thread_t thread)
249 {
250 kern_return_t result = KERN_SUCCESS;
251
252 if (thread == THREAD_NULL || thread->task == kernel_task)
253 return (KERN_INVALID_ARGUMENT);
254
255 thread_mtx_lock(thread);
256
257 if (thread->active) {
258 if (thread->user_stop_count > 0) {
259 if ( --thread->user_stop_count == 0 &&
260 --thread->suspend_count == 0 ) {
261 if (thread->started)
262 thread_wakeup_one(&thread->suspend_count);
263 else {
264 thread_start_internal(thread);
265 }
266 }
267 }
268 else
269 result = KERN_FAILURE;
270 }
271 else
272 result = KERN_TERMINATED;
273
274 thread_mtx_unlock(thread);
275
276 return (result);
277 }
278
279 /*
280 * thread_depress_abort:
281 *
282 * Prematurely abort priority depression if there is one.
283 */
284 kern_return_t
285 thread_depress_abort(
286 register thread_t thread)
287 {
288 kern_return_t result;
289
290 if (thread == THREAD_NULL)
291 return (KERN_INVALID_ARGUMENT);
292
293 thread_mtx_lock(thread);
294
295 if (thread->active)
296 result = thread_depress_abort_internal(thread);
297 else
298 result = KERN_TERMINATED;
299
300 thread_mtx_unlock(thread);
301
302 return (result);
303 }
304
305
306 /*
307 * Indicate that the activation should run its
308 * special handler to detect a condition.
309 *
310 * Called with thread mutex held.
311 */
312 void
313 act_abort(
314 thread_t thread)
315 {
316 spl_t s = splsched();
317
318 thread_lock(thread);
319
320 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
321 thread->sched_flags |= TH_SFLAG_ABORT;
322 install_special_handler_locked(thread);
323 }
324 else
325 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
326
327 thread_unlock(thread);
328 splx(s);
329 }
330
331 kern_return_t
332 thread_abort(
333 register thread_t thread)
334 {
335 kern_return_t result = KERN_SUCCESS;
336
337 if (thread == THREAD_NULL)
338 return (KERN_INVALID_ARGUMENT);
339
340 thread_mtx_lock(thread);
341
342 if (thread->active) {
343 act_abort(thread);
344 clear_wait(thread, THREAD_INTERRUPTED);
345 }
346 else
347 result = KERN_TERMINATED;
348
349 thread_mtx_unlock(thread);
350
351 return (result);
352 }
353
354 kern_return_t
355 thread_abort_safely(
356 thread_t thread)
357 {
358 kern_return_t result = KERN_SUCCESS;
359
360 if (thread == THREAD_NULL)
361 return (KERN_INVALID_ARGUMENT);
362
363 thread_mtx_lock(thread);
364
365 if (thread->active) {
366 spl_t s = splsched();
367
368 thread_lock(thread);
369 if (!thread->at_safe_point ||
370 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
371 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
372 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
373 install_special_handler_locked(thread);
374 }
375 }
376 thread_unlock(thread);
377 splx(s);
378 }
379 else
380 result = KERN_TERMINATED;
381
382 thread_mtx_unlock(thread);
383
384 return (result);
385 }
386
387 /*** backward compatibility hacks ***/
388 #include <mach/thread_info.h>
389 #include <mach/thread_special_ports.h>
390 #include <ipc/ipc_port.h>
391
392 kern_return_t
393 thread_info(
394 thread_t thread,
395 thread_flavor_t flavor,
396 thread_info_t thread_info_out,
397 mach_msg_type_number_t *thread_info_count)
398 {
399 kern_return_t result;
400
401 if (thread == THREAD_NULL)
402 return (KERN_INVALID_ARGUMENT);
403
404 thread_mtx_lock(thread);
405
406 if (thread->active)
407 result = thread_info_internal(
408 thread, flavor, thread_info_out, thread_info_count);
409 else
410 result = KERN_TERMINATED;
411
412 thread_mtx_unlock(thread);
413
414 return (result);
415 }
416
417 kern_return_t
418 thread_get_state(
419 register thread_t thread,
420 int flavor,
421 thread_state_t state, /* pointer to OUT array */
422 mach_msg_type_number_t *state_count) /*IN/OUT*/
423 {
424 kern_return_t result = KERN_SUCCESS;
425
426 if (thread == THREAD_NULL)
427 return (KERN_INVALID_ARGUMENT);
428
429 thread_mtx_lock(thread);
430
431 if (thread->active) {
432 if (thread != current_thread()) {
433 thread_hold(thread);
434
435 thread_mtx_unlock(thread);
436
437 if (thread_stop(thread)) {
438 thread_mtx_lock(thread);
439 result = machine_thread_get_state(
440 thread, flavor, state, state_count);
441 thread_unstop(thread);
442 }
443 else {
444 thread_mtx_lock(thread);
445 result = KERN_ABORTED;
446 }
447
448 thread_release(thread);
449 }
450 else
451 result = machine_thread_get_state(
452 thread, flavor, state, state_count);
453 }
454 else
455 result = KERN_TERMINATED;
456
457 thread_mtx_unlock(thread);
458
459 return (result);
460 }
461
462 /*
463 * Change thread's machine-dependent state. Called with nothing
464 * locked. Returns same way.
465 */
466 static kern_return_t
467 thread_set_state_internal(
468 register thread_t thread,
469 int flavor,
470 thread_state_t state,
471 mach_msg_type_number_t state_count,
472 boolean_t from_user)
473 {
474 kern_return_t result = KERN_SUCCESS;
475
476 if (thread == THREAD_NULL)
477 return (KERN_INVALID_ARGUMENT);
478
479 thread_mtx_lock(thread);
480
481 if (thread->active) {
482 if (thread != current_thread()) {
483 thread_hold(thread);
484
485 thread_mtx_unlock(thread);
486
487 if (thread_stop(thread)) {
488 thread_mtx_lock(thread);
489 result = machine_thread_set_state(
490 thread, flavor, state, state_count);
491 thread_unstop(thread);
492 }
493 else {
494 thread_mtx_lock(thread);
495 result = KERN_ABORTED;
496 }
497
498 thread_release(thread);
499 }
500 else
501 result = machine_thread_set_state(
502 thread, flavor, state, state_count);
503 }
504 else
505 result = KERN_TERMINATED;
506
507 if ((result == KERN_SUCCESS) && from_user)
508 extmod_statistics_incr_thread_set_state(thread);
509
510 thread_mtx_unlock(thread);
511
512 return (result);
513 }
514
515 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
516 kern_return_t
517 thread_set_state(
518 register thread_t thread,
519 int flavor,
520 thread_state_t state,
521 mach_msg_type_number_t state_count);
522
523 kern_return_t
524 thread_set_state(
525 register thread_t thread,
526 int flavor,
527 thread_state_t state,
528 mach_msg_type_number_t state_count)
529 {
530 return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
531 }
532
533 kern_return_t
534 thread_set_state_from_user(
535 register thread_t thread,
536 int flavor,
537 thread_state_t state,
538 mach_msg_type_number_t state_count)
539 {
540 return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
541 }
542
543 /*
544 * Kernel-internal "thread" interfaces used outside this file:
545 */
546
547 /* Initialize (or re-initialize) a thread state. Called from execve
548 * with nothing locked, returns same way.
549 */
550 kern_return_t
551 thread_state_initialize(
552 register thread_t thread)
553 {
554 kern_return_t result = KERN_SUCCESS;
555
556 if (thread == THREAD_NULL)
557 return (KERN_INVALID_ARGUMENT);
558
559 thread_mtx_lock(thread);
560
561 if (thread->active) {
562 if (thread != current_thread()) {
563 thread_hold(thread);
564
565 thread_mtx_unlock(thread);
566
567 if (thread_stop(thread)) {
568 thread_mtx_lock(thread);
569 result = machine_thread_state_initialize( thread );
570 thread_unstop(thread);
571 }
572 else {
573 thread_mtx_lock(thread);
574 result = KERN_ABORTED;
575 }
576
577 thread_release(thread);
578 }
579 else
580 result = machine_thread_state_initialize( thread );
581 }
582 else
583 result = KERN_TERMINATED;
584
585 thread_mtx_unlock(thread);
586
587 return (result);
588 }
589
590
591 kern_return_t
592 thread_dup(
593 register thread_t target)
594 {
595 thread_t self = current_thread();
596 kern_return_t result = KERN_SUCCESS;
597
598 if (target == THREAD_NULL || target == self)
599 return (KERN_INVALID_ARGUMENT);
600
601 thread_mtx_lock(target);
602
603 if (target->active) {
604 thread_hold(target);
605
606 thread_mtx_unlock(target);
607
608 if (thread_stop(target)) {
609 thread_mtx_lock(target);
610 result = machine_thread_dup(self, target);
611 if (self->affinity_set != AFFINITY_SET_NULL)
612 thread_affinity_dup(self, target);
613 thread_unstop(target);
614 }
615 else {
616 thread_mtx_lock(target);
617 result = KERN_ABORTED;
618 }
619
620 thread_release(target);
621 }
622 else
623 result = KERN_TERMINATED;
624
625 thread_mtx_unlock(target);
626
627 return (result);
628 }
629
630
631 /*
632 * thread_setstatus:
633 *
634 * Set the status of the specified thread.
635 * Called with (and returns with) no locks held.
636 */
637 kern_return_t
638 thread_setstatus(
639 register thread_t thread,
640 int flavor,
641 thread_state_t tstate,
642 mach_msg_type_number_t count)
643 {
644
645 return (thread_set_state(thread, flavor, tstate, count));
646 }
647
648 /*
649 * thread_getstatus:
650 *
651 * Get the status of the specified thread.
652 */
653 kern_return_t
654 thread_getstatus(
655 register thread_t thread,
656 int flavor,
657 thread_state_t tstate,
658 mach_msg_type_number_t *count)
659 {
660 return (thread_get_state(thread, flavor, tstate, count));
661 }
662
663 /*
664 * install_special_handler:
665 *
666 * Install the special returnhandler that handles suspension and
667 * termination, if it hasn't been installed already.
668 *
669 * Called with the thread mutex held.
670 */
671 void
672 install_special_handler(
673 thread_t thread)
674 {
675 spl_t s = splsched();
676
677 thread_lock(thread);
678 install_special_handler_locked(thread);
679 thread_unlock(thread);
680 splx(s);
681 }
682
683 /*
684 * install_special_handler_locked:
685 *
686 * Do the work of installing the special_handler.
687 *
688 * Called with the thread mutex and scheduling lock held.
689 */
690 void
691 install_special_handler_locked(
692 thread_t thread)
693 {
694 ReturnHandler **rh;
695
696 /* The work handler must always be the last ReturnHandler on the list,
697 because it can do tricky things like detach the thr_act. */
698 for (rh = &thread->handlers; *rh; rh = &(*rh)->next)
699 continue;
700
701 if (rh != &thread->special_handler.next)
702 *rh = &thread->special_handler;
703
704 /*
705 * Temporarily undepress, so target has
706 * a chance to do locking required to
707 * block itself in special_handler().
708 */
709 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)
710 SCHED(compute_priority)(thread, TRUE);
711
712 thread_ast_set(thread, AST_APC);
713
714 if (thread == current_thread())
715 ast_propagate(thread->ast);
716 else {
717 processor_t processor = thread->last_processor;
718
719 if ( processor != PROCESSOR_NULL &&
720 processor->state == PROCESSOR_RUNNING &&
721 processor->active_thread == thread )
722 cause_ast_check(processor);
723 }
724 }
725
726 /*
727 * Activation control support routines internal to this file:
728 */
729
730 void
731 act_execute_returnhandlers(void)
732 {
733 thread_t thread = current_thread();
734
735 thread_ast_clear(thread, AST_APC);
736 spllo();
737
738 for (;;) {
739 ReturnHandler *rh;
740
741 thread_mtx_lock(thread);
742
743 (void)splsched();
744 thread_lock(thread);
745
746 rh = thread->handlers;
747 if (rh != NULL) {
748 thread->handlers = rh->next;
749
750 thread_unlock(thread);
751 spllo();
752
753 thread_mtx_unlock(thread);
754
755 /* Execute it */
756 (*rh->handler)(rh, thread);
757 }
758 else
759 break;
760 }
761
762 thread_unlock(thread);
763 spllo();
764
765 thread_mtx_unlock(thread);
766 }
767
768 /*
769 * special_handler_continue
770 *
771 * Continuation routine for the special handler blocks. It checks
772 * to see whether there has been any new suspensions. If so, it
773 * installs the special handler again. Otherwise, it checks to see
774 * if the current depression needs to be re-instated (it may have
775 * been temporarily removed in order to get to this point in a hurry).
776 */
777 void
778 special_handler_continue(void)
779 {
780 thread_t thread = current_thread();
781
782 thread_mtx_lock(thread);
783
784 if (thread->suspend_count > 0)
785 install_special_handler(thread);
786 else {
787 spl_t s = splsched();
788
789 thread_lock(thread);
790 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
791 processor_t myprocessor = thread->last_processor;
792
793 thread->sched_pri = DEPRESSPRI;
794 myprocessor->current_pri = thread->sched_pri;
795 }
796 thread_unlock(thread);
797 splx(s);
798 }
799
800 thread_mtx_unlock(thread);
801
802 thread_exception_return();
803 /*NOTREACHED*/
804 }
805
806 /*
807 * special_handler - handles suspension, termination. Called
808 * with nothing locked. Returns (if it returns) the same way.
809 */
810 void
811 special_handler(
812 __unused ReturnHandler *rh,
813 thread_t thread)
814 {
815 spl_t s;
816
817 thread_mtx_lock(thread);
818
819 s = splsched();
820 thread_lock(thread);
821 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
822 thread_unlock(thread);
823 splx(s);
824
825 /*
826 * If we're suspended, go to sleep and wait for someone to wake us up.
827 */
828 if (thread->active) {
829 if (thread->suspend_count > 0) {
830 if (thread->handlers == NULL) {
831 assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
832 thread_mtx_unlock(thread);
833 thread_block((thread_continue_t)special_handler_continue);
834 /*NOTREACHED*/
835 }
836
837 thread_mtx_unlock(thread);
838
839 special_handler_continue();
840 /*NOTREACHED*/
841 }
842 }
843 else {
844 thread_mtx_unlock(thread);
845
846 thread_terminate_self();
847 /*NOTREACHED*/
848 }
849
850 thread_mtx_unlock(thread);
851 }
852
853 /* Prototype, see justification above */
854 kern_return_t
855 act_set_state(
856 thread_t thread,
857 int flavor,
858 thread_state_t state,
859 mach_msg_type_number_t count);
860
861 kern_return_t
862 act_set_state(
863 thread_t thread,
864 int flavor,
865 thread_state_t state,
866 mach_msg_type_number_t count)
867 {
868 if (thread == current_thread())
869 return (KERN_INVALID_ARGUMENT);
870
871 return (thread_set_state(thread, flavor, state, count));
872
873 }
874
875 kern_return_t
876 act_set_state_from_user(
877 thread_t thread,
878 int flavor,
879 thread_state_t state,
880 mach_msg_type_number_t count)
881 {
882 if (thread == current_thread())
883 return (KERN_INVALID_ARGUMENT);
884
885 return (thread_set_state_from_user(thread, flavor, state, count));
886
887 }
888
889 kern_return_t
890 act_get_state(
891 thread_t thread,
892 int flavor,
893 thread_state_t state,
894 mach_msg_type_number_t *count)
895 {
896 if (thread == current_thread())
897 return (KERN_INVALID_ARGUMENT);
898
899 return (thread_get_state(thread, flavor, state, count));
900 }
901
902 static void
903 act_set_ast(
904 thread_t thread,
905 ast_t ast)
906 {
907 spl_t s = splsched();
908
909 if (thread == current_thread()) {
910 thread_ast_set(thread, ast);
911 ast_propagate(thread->ast);
912 }
913 else {
914 processor_t processor;
915
916 thread_lock(thread);
917 thread_ast_set(thread, ast);
918 processor = thread->last_processor;
919 if ( processor != PROCESSOR_NULL &&
920 processor->state == PROCESSOR_RUNNING &&
921 processor->active_thread == thread )
922 cause_ast_check(processor);
923 thread_unlock(thread);
924 }
925
926 splx(s);
927 }
928
929 void
930 act_set_astbsd(
931 thread_t thread)
932 {
933 act_set_ast( thread, AST_BSD );
934 }
935
936 void
937 act_set_apc(
938 thread_t thread)
939 {
940 act_set_ast( thread, AST_APC );
941 }
942
943 void
944 act_set_kperf(
945 thread_t thread)
946 {
947 /* safety check */
948 if (thread != current_thread())
949 if( !ml_get_interrupts_enabled() )
950 panic("unsafe act_set_kperf operation");
951
952 act_set_ast( thread, AST_KPERF );
953 }
954
955 #if CONFIG_MACF
956 void
957 act_set_astmacf(
958 thread_t thread)
959 {
960 act_set_ast( thread, AST_MACF);
961 }
962 #endif