]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
455a0fb016cb13a1c84dce2dcbb3787d5a9155ef
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52 #include <mach/mach_types.h>
53 #include <mach/kern_return.h>
54 #include <mach/alert.h>
55 #include <mach/rpc.h>
56 #include <mach/thread_act_server.h>
57
58 #include <kern/kern_types.h>
59 #include <kern/ast.h>
60 #include <kern/mach_param.h>
61 #include <kern/zalloc.h>
62 #include <kern/extmod_statistics.h>
63 #include <kern/thread.h>
64 #include <kern/task.h>
65 #include <kern/sched_prim.h>
66 #include <kern/misc_protos.h>
67 #include <kern/assert.h>
68 #include <kern/exception.h>
69 #include <kern/ipc_mig.h>
70 #include <kern/ipc_tt.h>
71 #include <kern/machine.h>
72 #include <kern/spl.h>
73 #include <kern/syscall_subr.h>
74 #include <kern/sync_lock.h>
75 #include <kern/processor.h>
76 #include <kern/timer.h>
77 #include <kern/affinity.h>
78
79 #include <mach/rpc.h>
80
81 void act_abort(thread_t);
82 void install_special_handler_locked(thread_t);
83 void special_handler_continue(void);
84
85 /*
86 * Internal routine to mark a thread as started.
87 * Always called with the thread locked.
88 *
89 * Note: function intentionally declared with the noinline attribute to
90 * prevent multiple declaration of probe symbols in this file; we would
91 * prefer "#pragma noinline", but gcc does not support it.
92 * PR-6385749 -- the lwp-start probe should fire from within the context
93 * of the newly created thread. Commented out for now, in case we
94 * turn it into a dead code probe.
95 */
96 void
97 thread_start_internal(
98 thread_t thread)
99 {
100 clear_wait(thread, THREAD_AWAKENED);
101 thread->started = TRUE;
102 // DTRACE_PROC1(lwp__start, thread_t, thread);
103 }
104
105 /*
106 * Internal routine to terminate a thread.
107 * Sometimes called with task already locked.
108 */
109 kern_return_t
110 thread_terminate_internal(
111 thread_t thread)
112 {
113 kern_return_t result = KERN_SUCCESS;
114
115 thread_mtx_lock(thread);
116
117 if (thread->active) {
118 thread->active = FALSE;
119
120 act_abort(thread);
121
122 if (thread->started)
123 clear_wait(thread, THREAD_INTERRUPTED);
124 else {
125 thread_start_internal(thread);
126 }
127 }
128 else
129 result = KERN_TERMINATED;
130
131 if (thread->affinity_set != NULL)
132 thread_affinity_terminate(thread);
133
134 thread_mtx_unlock(thread);
135
136 if (thread != current_thread() && result == KERN_SUCCESS)
137 thread_wait(thread);
138
139 return (result);
140 }
141
142 /*
143 * Terminate a thread.
144 */
145 kern_return_t
146 thread_terminate(
147 thread_t thread)
148 {
149 kern_return_t result;
150
151 if (thread == THREAD_NULL)
152 return (KERN_INVALID_ARGUMENT);
153
154 if ( thread->task == kernel_task &&
155 thread != current_thread() )
156 return (KERN_FAILURE);
157
158 result = thread_terminate_internal(thread);
159
160 /*
161 * If a kernel thread is terminating itself, force an AST here.
162 * Kernel threads don't normally pass through the AST checking
163 * code - and all threads finish their own termination in the
164 * special handler APC.
165 */
166 if (thread->task == kernel_task) {
167 ml_set_interrupts_enabled(FALSE);
168 ast_taken(AST_APC, TRUE);
169 panic("thread_terminate");
170 }
171
172 return (result);
173 }
174
175 /*
176 * Suspend execution of the specified thread.
177 * This is a recursive-style suspension of the thread, a count of
178 * suspends is maintained.
179 *
180 * Called with thread mutex held.
181 */
182 void
183 thread_hold(
184 register thread_t thread)
185 {
186 if (thread->suspend_count++ == 0) {
187 install_special_handler(thread);
188 if (thread->started)
189 thread_wakeup_one(&thread->suspend_count);
190 }
191 }
192
193 /*
194 * Decrement internal suspension count, setting thread
195 * runnable when count falls to zero.
196 *
197 * Called with thread mutex held.
198 */
199 void
200 thread_release(
201 register thread_t thread)
202 {
203 if ( thread->suspend_count > 0 &&
204 --thread->suspend_count == 0 ) {
205 if (thread->started)
206 thread_wakeup_one(&thread->suspend_count);
207 else {
208 thread_start_internal(thread);
209 }
210 }
211 }
212
213 kern_return_t
214 thread_suspend(
215 register thread_t thread)
216 {
217 thread_t self = current_thread();
218 kern_return_t result = KERN_SUCCESS;
219
220 if (thread == THREAD_NULL || thread->task == kernel_task)
221 return (KERN_INVALID_ARGUMENT);
222
223 thread_mtx_lock(thread);
224
225 if (thread->active) {
226 if ( thread->user_stop_count++ == 0 &&
227 thread->suspend_count++ == 0 ) {
228 install_special_handler(thread);
229 if (thread != self)
230 thread_wakeup_one(&thread->suspend_count);
231 }
232 }
233 else
234 result = KERN_TERMINATED;
235
236 thread_mtx_unlock(thread);
237
238 if (thread != self && result == KERN_SUCCESS)
239 thread_wait(thread);
240
241 return (result);
242 }
243
244 kern_return_t
245 thread_resume(
246 register thread_t thread)
247 {
248 kern_return_t result = KERN_SUCCESS;
249
250 if (thread == THREAD_NULL || thread->task == kernel_task)
251 return (KERN_INVALID_ARGUMENT);
252
253 thread_mtx_lock(thread);
254
255 if (thread->active) {
256 if (thread->user_stop_count > 0) {
257 if ( --thread->user_stop_count == 0 &&
258 --thread->suspend_count == 0 ) {
259 if (thread->started)
260 thread_wakeup_one(&thread->suspend_count);
261 else {
262 thread_start_internal(thread);
263 }
264 }
265 }
266 else
267 result = KERN_FAILURE;
268 }
269 else
270 result = KERN_TERMINATED;
271
272 thread_mtx_unlock(thread);
273
274 return (result);
275 }
276
277 /*
278 * thread_depress_abort:
279 *
280 * Prematurely abort priority depression if there is one.
281 */
282 kern_return_t
283 thread_depress_abort(
284 register thread_t thread)
285 {
286 kern_return_t result;
287
288 if (thread == THREAD_NULL)
289 return (KERN_INVALID_ARGUMENT);
290
291 thread_mtx_lock(thread);
292
293 if (thread->active)
294 result = thread_depress_abort_internal(thread);
295 else
296 result = KERN_TERMINATED;
297
298 thread_mtx_unlock(thread);
299
300 return (result);
301 }
302
303
304 /*
305 * Indicate that the activation should run its
306 * special handler to detect a condition.
307 *
308 * Called with thread mutex held.
309 */
310 void
311 act_abort(
312 thread_t thread)
313 {
314 spl_t s = splsched();
315
316 thread_lock(thread);
317
318 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
319 thread->sched_flags |= TH_SFLAG_ABORT;
320 install_special_handler_locked(thread);
321 }
322 else
323 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
324
325 thread_unlock(thread);
326 splx(s);
327 }
328
329 kern_return_t
330 thread_abort(
331 register thread_t thread)
332 {
333 kern_return_t result = KERN_SUCCESS;
334
335 if (thread == THREAD_NULL)
336 return (KERN_INVALID_ARGUMENT);
337
338 thread_mtx_lock(thread);
339
340 if (thread->active) {
341 act_abort(thread);
342 clear_wait(thread, THREAD_INTERRUPTED);
343 }
344 else
345 result = KERN_TERMINATED;
346
347 thread_mtx_unlock(thread);
348
349 return (result);
350 }
351
352 kern_return_t
353 thread_abort_safely(
354 thread_t thread)
355 {
356 kern_return_t result = KERN_SUCCESS;
357
358 if (thread == THREAD_NULL)
359 return (KERN_INVALID_ARGUMENT);
360
361 thread_mtx_lock(thread);
362
363 if (thread->active) {
364 spl_t s = splsched();
365
366 thread_lock(thread);
367 if (!thread->at_safe_point ||
368 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
369 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
370 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
371 install_special_handler_locked(thread);
372 }
373 }
374 thread_unlock(thread);
375 splx(s);
376 }
377 else
378 result = KERN_TERMINATED;
379
380 thread_mtx_unlock(thread);
381
382 return (result);
383 }
384
385 /*** backward compatibility hacks ***/
386 #include <mach/thread_info.h>
387 #include <mach/thread_special_ports.h>
388 #include <ipc/ipc_port.h>
389
390 kern_return_t
391 thread_info(
392 thread_t thread,
393 thread_flavor_t flavor,
394 thread_info_t thread_info_out,
395 mach_msg_type_number_t *thread_info_count)
396 {
397 kern_return_t result;
398
399 if (thread == THREAD_NULL)
400 return (KERN_INVALID_ARGUMENT);
401
402 thread_mtx_lock(thread);
403
404 if (thread->active)
405 result = thread_info_internal(
406 thread, flavor, thread_info_out, thread_info_count);
407 else
408 result = KERN_TERMINATED;
409
410 thread_mtx_unlock(thread);
411
412 return (result);
413 }
414
415 kern_return_t
416 thread_get_state(
417 register thread_t thread,
418 int flavor,
419 thread_state_t state, /* pointer to OUT array */
420 mach_msg_type_number_t *state_count) /*IN/OUT*/
421 {
422 kern_return_t result = KERN_SUCCESS;
423
424 if (thread == THREAD_NULL)
425 return (KERN_INVALID_ARGUMENT);
426
427 thread_mtx_lock(thread);
428
429 if (thread->active) {
430 if (thread != current_thread()) {
431 thread_hold(thread);
432
433 thread_mtx_unlock(thread);
434
435 if (thread_stop(thread)) {
436 thread_mtx_lock(thread);
437 result = machine_thread_get_state(
438 thread, flavor, state, state_count);
439 thread_unstop(thread);
440 }
441 else {
442 thread_mtx_lock(thread);
443 result = KERN_ABORTED;
444 }
445
446 thread_release(thread);
447 }
448 else
449 result = machine_thread_get_state(
450 thread, flavor, state, state_count);
451 }
452 else
453 result = KERN_TERMINATED;
454
455 thread_mtx_unlock(thread);
456
457 return (result);
458 }
459
460 /*
461 * Change thread's machine-dependent state. Called with nothing
462 * locked. Returns same way.
463 */
464 static kern_return_t
465 thread_set_state_internal(
466 register thread_t thread,
467 int flavor,
468 thread_state_t state,
469 mach_msg_type_number_t state_count,
470 boolean_t from_user)
471 {
472 kern_return_t result = KERN_SUCCESS;
473
474 if (thread == THREAD_NULL)
475 return (KERN_INVALID_ARGUMENT);
476
477 thread_mtx_lock(thread);
478
479 if (thread->active) {
480 if (thread != current_thread()) {
481 thread_hold(thread);
482
483 thread_mtx_unlock(thread);
484
485 if (thread_stop(thread)) {
486 thread_mtx_lock(thread);
487 result = machine_thread_set_state(
488 thread, flavor, state, state_count);
489 thread_unstop(thread);
490 }
491 else {
492 thread_mtx_lock(thread);
493 result = KERN_ABORTED;
494 }
495
496 thread_release(thread);
497 }
498 else
499 result = machine_thread_set_state(
500 thread, flavor, state, state_count);
501 }
502 else
503 result = KERN_TERMINATED;
504
505 if ((result == KERN_SUCCESS) && from_user)
506 extmod_statistics_incr_thread_set_state(thread);
507
508 thread_mtx_unlock(thread);
509
510 return (result);
511 }
512
513 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
514 kern_return_t
515 thread_set_state(
516 register thread_t thread,
517 int flavor,
518 thread_state_t state,
519 mach_msg_type_number_t state_count);
520
521 kern_return_t
522 thread_set_state(
523 register thread_t thread,
524 int flavor,
525 thread_state_t state,
526 mach_msg_type_number_t state_count)
527 {
528 return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
529 }
530
531 kern_return_t
532 thread_set_state_from_user(
533 register thread_t thread,
534 int flavor,
535 thread_state_t state,
536 mach_msg_type_number_t state_count)
537 {
538 return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
539 }
540
541 /*
542 * Kernel-internal "thread" interfaces used outside this file:
543 */
544
545 /* Initialize (or re-initialize) a thread state. Called from execve
546 * with nothing locked, returns same way.
547 */
548 kern_return_t
549 thread_state_initialize(
550 register thread_t thread)
551 {
552 kern_return_t result = KERN_SUCCESS;
553
554 if (thread == THREAD_NULL)
555 return (KERN_INVALID_ARGUMENT);
556
557 thread_mtx_lock(thread);
558
559 if (thread->active) {
560 if (thread != current_thread()) {
561 thread_hold(thread);
562
563 thread_mtx_unlock(thread);
564
565 if (thread_stop(thread)) {
566 thread_mtx_lock(thread);
567 result = machine_thread_state_initialize( thread );
568 thread_unstop(thread);
569 }
570 else {
571 thread_mtx_lock(thread);
572 result = KERN_ABORTED;
573 }
574
575 thread_release(thread);
576 }
577 else
578 result = machine_thread_state_initialize( thread );
579 }
580 else
581 result = KERN_TERMINATED;
582
583 thread_mtx_unlock(thread);
584
585 return (result);
586 }
587
588
589 kern_return_t
590 thread_dup(
591 register thread_t target)
592 {
593 thread_t self = current_thread();
594 kern_return_t result = KERN_SUCCESS;
595
596 if (target == THREAD_NULL || target == self)
597 return (KERN_INVALID_ARGUMENT);
598
599 thread_mtx_lock(target);
600
601 if (target->active) {
602 thread_hold(target);
603
604 thread_mtx_unlock(target);
605
606 if (thread_stop(target)) {
607 thread_mtx_lock(target);
608 result = machine_thread_dup(self, target);
609 if (self->affinity_set != AFFINITY_SET_NULL)
610 thread_affinity_dup(self, target);
611 thread_unstop(target);
612 }
613 else {
614 thread_mtx_lock(target);
615 result = KERN_ABORTED;
616 }
617
618 thread_release(target);
619 }
620 else
621 result = KERN_TERMINATED;
622
623 thread_mtx_unlock(target);
624
625 return (result);
626 }
627
628
629 /*
630 * thread_setstatus:
631 *
632 * Set the status of the specified thread.
633 * Called with (and returns with) no locks held.
634 */
635 kern_return_t
636 thread_setstatus(
637 register thread_t thread,
638 int flavor,
639 thread_state_t tstate,
640 mach_msg_type_number_t count)
641 {
642
643 return (thread_set_state(thread, flavor, tstate, count));
644 }
645
646 /*
647 * thread_getstatus:
648 *
649 * Get the status of the specified thread.
650 */
651 kern_return_t
652 thread_getstatus(
653 register thread_t thread,
654 int flavor,
655 thread_state_t tstate,
656 mach_msg_type_number_t *count)
657 {
658 return (thread_get_state(thread, flavor, tstate, count));
659 }
660
661 /*
662 * install_special_handler:
663 *
664 * Install the special returnhandler that handles suspension and
665 * termination, if it hasn't been installed already.
666 *
667 * Called with the thread mutex held.
668 */
669 void
670 install_special_handler(
671 thread_t thread)
672 {
673 spl_t s = splsched();
674
675 thread_lock(thread);
676 install_special_handler_locked(thread);
677 thread_unlock(thread);
678 splx(s);
679 }
680
681 /*
682 * install_special_handler_locked:
683 *
684 * Do the work of installing the special_handler.
685 *
686 * Called with the thread mutex and scheduling lock held.
687 */
688 void
689 install_special_handler_locked(
690 thread_t thread)
691 {
692 ReturnHandler **rh;
693
694 /* The work handler must always be the last ReturnHandler on the list,
695 because it can do tricky things like detach the thr_act. */
696 for (rh = &thread->handlers; *rh; rh = &(*rh)->next)
697 continue;
698
699 if (rh != &thread->special_handler.next)
700 *rh = &thread->special_handler;
701
702 /*
703 * Temporarily undepress, so target has
704 * a chance to do locking required to
705 * block itself in special_handler().
706 */
707 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)
708 SCHED(compute_priority)(thread, TRUE);
709
710 thread_ast_set(thread, AST_APC);
711
712 if (thread == current_thread())
713 ast_propagate(thread->ast);
714 else {
715 processor_t processor = thread->last_processor;
716
717 if ( processor != PROCESSOR_NULL &&
718 processor->state == PROCESSOR_RUNNING &&
719 processor->active_thread == thread )
720 cause_ast_check(processor);
721 }
722 }
723
724 /*
725 * Activation control support routines internal to this file:
726 */
727
728 void
729 act_execute_returnhandlers(void)
730 {
731 thread_t thread = current_thread();
732
733 thread_ast_clear(thread, AST_APC);
734 spllo();
735
736 for (;;) {
737 ReturnHandler *rh;
738
739 thread_mtx_lock(thread);
740
741 (void)splsched();
742 thread_lock(thread);
743
744 rh = thread->handlers;
745 if (rh != NULL) {
746 thread->handlers = rh->next;
747
748 thread_unlock(thread);
749 spllo();
750
751 thread_mtx_unlock(thread);
752
753 /* Execute it */
754 (*rh->handler)(rh, thread);
755 }
756 else
757 break;
758 }
759
760 thread_unlock(thread);
761 spllo();
762
763 thread_mtx_unlock(thread);
764 }
765
766 /*
767 * special_handler_continue
768 *
769 * Continuation routine for the special handler blocks. It checks
770 * to see whether there has been any new suspensions. If so, it
771 * installs the special handler again. Otherwise, it checks to see
772 * if the current depression needs to be re-instated (it may have
773 * been temporarily removed in order to get to this point in a hurry).
774 */
775 void
776 special_handler_continue(void)
777 {
778 thread_t thread = current_thread();
779
780 thread_mtx_lock(thread);
781
782 if (thread->suspend_count > 0)
783 install_special_handler(thread);
784 else {
785 spl_t s = splsched();
786
787 thread_lock(thread);
788 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
789 processor_t myprocessor = thread->last_processor;
790
791 thread->sched_pri = DEPRESSPRI;
792 myprocessor->current_pri = thread->sched_pri;
793 }
794 thread_unlock(thread);
795 splx(s);
796 }
797
798 thread_mtx_unlock(thread);
799
800 thread_exception_return();
801 /*NOTREACHED*/
802 }
803
804 /*
805 * special_handler - handles suspension, termination. Called
806 * with nothing locked. Returns (if it returns) the same way.
807 */
808 void
809 special_handler(
810 __unused ReturnHandler *rh,
811 thread_t thread)
812 {
813 spl_t s;
814
815 thread_mtx_lock(thread);
816
817 s = splsched();
818 thread_lock(thread);
819 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
820 thread_unlock(thread);
821 splx(s);
822
823 /*
824 * If we're suspended, go to sleep and wait for someone to wake us up.
825 */
826 if (thread->active) {
827 if (thread->suspend_count > 0) {
828 if (thread->handlers == NULL) {
829 assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
830 thread_mtx_unlock(thread);
831 thread_block((thread_continue_t)special_handler_continue);
832 /*NOTREACHED*/
833 }
834
835 thread_mtx_unlock(thread);
836
837 special_handler_continue();
838 /*NOTREACHED*/
839 }
840 }
841 else {
842 thread_mtx_unlock(thread);
843
844 thread_terminate_self();
845 /*NOTREACHED*/
846 }
847
848 thread_mtx_unlock(thread);
849 }
850
851 /* Prototype, see justification above */
852 kern_return_t
853 act_set_state(
854 thread_t thread,
855 int flavor,
856 thread_state_t state,
857 mach_msg_type_number_t count);
858
859 kern_return_t
860 act_set_state(
861 thread_t thread,
862 int flavor,
863 thread_state_t state,
864 mach_msg_type_number_t count)
865 {
866 if (thread == current_thread())
867 return (KERN_INVALID_ARGUMENT);
868
869 return (thread_set_state(thread, flavor, state, count));
870
871 }
872
873 kern_return_t
874 act_set_state_from_user(
875 thread_t thread,
876 int flavor,
877 thread_state_t state,
878 mach_msg_type_number_t count)
879 {
880 if (thread == current_thread())
881 return (KERN_INVALID_ARGUMENT);
882
883 return (thread_set_state_from_user(thread, flavor, state, count));
884
885 }
886
887 kern_return_t
888 act_get_state(
889 thread_t thread,
890 int flavor,
891 thread_state_t state,
892 mach_msg_type_number_t *count)
893 {
894 if (thread == current_thread())
895 return (KERN_INVALID_ARGUMENT);
896
897 return (thread_get_state(thread, flavor, state, count));
898 }
899
900 void
901 act_set_astbsd(
902 thread_t thread)
903 {
904 spl_t s = splsched();
905
906 if (thread == current_thread()) {
907 thread_ast_set(thread, AST_BSD);
908 ast_propagate(thread->ast);
909 }
910 else {
911 processor_t processor;
912
913 thread_lock(thread);
914 thread_ast_set(thread, AST_BSD);
915 processor = thread->last_processor;
916 if ( processor != PROCESSOR_NULL &&
917 processor->state == PROCESSOR_RUNNING &&
918 processor->active_thread == thread )
919 cause_ast_check(processor);
920 thread_unlock(thread);
921 }
922
923 splx(s);
924 }
925
926 void
927 act_set_apc(
928 thread_t thread)
929 {
930 spl_t s = splsched();
931
932 if (thread == current_thread()) {
933 thread_ast_set(thread, AST_APC);
934 ast_propagate(thread->ast);
935 }
936 else {
937 processor_t processor;
938
939 thread_lock(thread);
940 thread_ast_set(thread, AST_APC);
941 processor = thread->last_processor;
942 if ( processor != PROCESSOR_NULL &&
943 processor->state == PROCESSOR_RUNNING &&
944 processor->active_thread == thread )
945 cause_ast_check(processor);
946 thread_unlock(thread);
947 }
948
949 splx(s);
950 }