]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52 #include <mach/mach_types.h>
53 #include <mach/kern_return.h>
54 #include <mach/thread_act_server.h>
55
56 #include <kern/kern_types.h>
57 #include <kern/ast.h>
58 #include <kern/mach_param.h>
59 #include <kern/zalloc.h>
60 #include <kern/extmod_statistics.h>
61 #include <kern/thread.h>
62 #include <kern/task.h>
63 #include <kern/sched_prim.h>
64 #include <kern/misc_protos.h>
65 #include <kern/assert.h>
66 #include <kern/exception.h>
67 #include <kern/ipc_mig.h>
68 #include <kern/ipc_tt.h>
69 #include <kern/machine.h>
70 #include <kern/spl.h>
71 #include <kern/syscall_subr.h>
72 #include <kern/sync_lock.h>
73 #include <kern/processor.h>
74 #include <kern/timer.h>
75 #include <kern/affinity.h>
76
77 #include <security/mac_mach_internal.h>
78
79 static void act_abort(thread_t thread);
80
81 static void thread_suspended(void *arg, wait_result_t result);
82 static void thread_set_apc_ast(thread_t thread);
83 static void thread_set_apc_ast_locked(thread_t thread);
84
85 /*
86 * Internal routine to mark a thread as started.
87 * Always called with the thread mutex locked.
88 */
89 void
90 thread_start(
91 thread_t thread)
92 {
93 clear_wait(thread, THREAD_AWAKENED);
94 thread->started = TRUE;
95 }
96
97 /*
98 * Internal routine to mark a thread as waiting
99 * right after it has been created. The caller
100 * is responsible to call wakeup()/thread_wakeup()
101 * or thread_terminate() to get it going.
102 *
103 * Always called with the thread mutex locked.
104 *
105 * Task and task_threads mutexes also held
106 * (so nobody can set the thread running before
107 * this point)
108 *
109 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
110 * to allow termination from this point forward.
111 */
112 void
113 thread_start_in_assert_wait(
114 thread_t thread,
115 event_t event,
116 wait_interrupt_t interruptible)
117 {
118 struct waitq *waitq = assert_wait_queue(event);
119 wait_result_t wait_result;
120 spl_t spl;
121
122 spl = splsched();
123 waitq_lock(waitq);
124
125 /* clear out startup condition (safe because thread not started yet) */
126 thread_lock(thread);
127 assert(!thread->started);
128 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
129 thread->state &= ~(TH_WAIT | TH_UNINT);
130 thread_unlock(thread);
131
132 /* assert wait interruptibly forever */
133 wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
134 interruptible,
135 TIMEOUT_URGENCY_SYS_NORMAL,
136 TIMEOUT_WAIT_FOREVER,
137 TIMEOUT_NO_LEEWAY,
138 thread);
139 assert (wait_result == THREAD_WAITING);
140
141 /* mark thread started while we still hold the waitq lock */
142 thread_lock(thread);
143 thread->started = TRUE;
144 thread_unlock(thread);
145
146 waitq_unlock(waitq);
147 splx(spl);
148 }
149
150 /*
151 * Internal routine to terminate a thread.
152 * Sometimes called with task already locked.
153 */
154 kern_return_t
155 thread_terminate_internal(
156 thread_t thread)
157 {
158 kern_return_t result = KERN_SUCCESS;
159
160 thread_mtx_lock(thread);
161
162 if (thread->active) {
163 thread->active = FALSE;
164
165 act_abort(thread);
166
167 if (thread->started)
168 clear_wait(thread, THREAD_INTERRUPTED);
169 else {
170 thread_start(thread);
171 }
172 }
173 else
174 result = KERN_TERMINATED;
175
176 if (thread->affinity_set != NULL)
177 thread_affinity_terminate(thread);
178
179 thread_mtx_unlock(thread);
180
181 if (thread != current_thread() && result == KERN_SUCCESS)
182 thread_wait(thread, FALSE);
183
184 return (result);
185 }
186
187 /*
188 * Terminate a thread.
189 */
190 kern_return_t
191 thread_terminate(
192 thread_t thread)
193 {
194 kern_return_t result;
195
196 if (thread == THREAD_NULL)
197 return (KERN_INVALID_ARGUMENT);
198
199 if ( thread->task == kernel_task &&
200 thread != current_thread() )
201 return (KERN_FAILURE);
202
203 result = thread_terminate_internal(thread);
204
205 /*
206 * If a kernel thread is terminating itself, force an AST here.
207 * Kernel threads don't normally pass through the AST checking
208 * code - and all threads finish their own termination in mach_apc_ast.
209 */
210 if (thread->task == kernel_task) {
211 ml_set_interrupts_enabled(FALSE);
212 ast_taken(AST_APC, TRUE);
213 panic("thread_terminate");
214 }
215
216 return (result);
217 }
218
219 /*
220 * Suspend execution of the specified thread.
221 * This is a recursive-style suspension of the thread, a count of
222 * suspends is maintained.
223 *
224 * Called with thread mutex held.
225 */
226 void
227 thread_hold(thread_t thread)
228 {
229 if (thread->suspend_count++ == 0) {
230 thread_set_apc_ast(thread);
231 assert(thread->suspend_parked == FALSE);
232 }
233 }
234
235 /*
236 * Decrement internal suspension count, setting thread
237 * runnable when count falls to zero.
238 *
239 * Because the wait is abortsafe, we can't be guaranteed that the thread
240 * is currently actually waiting even if suspend_parked is set.
241 *
242 * Called with thread mutex held.
243 */
244 void
245 thread_release(thread_t thread)
246 {
247 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
248
249 /* fail-safe on non-assert builds */
250 if (thread->suspend_count == 0)
251 return;
252
253 if (--thread->suspend_count == 0) {
254 if (!thread->started) {
255 thread_start(thread);
256 } else if (thread->suspend_parked) {
257 thread->suspend_parked = FALSE;
258 thread_wakeup_thread(&thread->suspend_count, thread);
259 }
260 }
261 }
262
263 kern_return_t
264 thread_suspend(thread_t thread)
265 {
266 kern_return_t result = KERN_SUCCESS;
267
268 if (thread == THREAD_NULL || thread->task == kernel_task)
269 return (KERN_INVALID_ARGUMENT);
270
271 thread_mtx_lock(thread);
272
273 if (thread->active) {
274 if (thread->user_stop_count++ == 0)
275 thread_hold(thread);
276 } else {
277 result = KERN_TERMINATED;
278 }
279
280 thread_mtx_unlock(thread);
281
282 if (thread != current_thread() && result == KERN_SUCCESS)
283 thread_wait(thread, FALSE);
284
285 return (result);
286 }
287
288 kern_return_t
289 thread_resume(thread_t thread)
290 {
291 kern_return_t result = KERN_SUCCESS;
292
293 if (thread == THREAD_NULL || thread->task == kernel_task)
294 return (KERN_INVALID_ARGUMENT);
295
296 thread_mtx_lock(thread);
297
298 if (thread->active) {
299 if (thread->user_stop_count > 0) {
300 if (--thread->user_stop_count == 0)
301 thread_release(thread);
302 } else {
303 result = KERN_FAILURE;
304 }
305 } else {
306 result = KERN_TERMINATED;
307 }
308
309 thread_mtx_unlock(thread);
310
311 return (result);
312 }
313
314 /*
315 * thread_depress_abort:
316 *
317 * Prematurely abort priority depression if there is one.
318 */
319 kern_return_t
320 thread_depress_abort(
321 thread_t thread)
322 {
323 kern_return_t result;
324
325 if (thread == THREAD_NULL)
326 return (KERN_INVALID_ARGUMENT);
327
328 thread_mtx_lock(thread);
329
330 if (thread->active)
331 result = thread_depress_abort_internal(thread);
332 else
333 result = KERN_TERMINATED;
334
335 thread_mtx_unlock(thread);
336
337 return (result);
338 }
339
340
341 /*
342 * Indicate that the thread should run the AST_APC callback
343 * to detect an abort condition.
344 *
345 * Called with thread mutex held.
346 */
347 static void
348 act_abort(
349 thread_t thread)
350 {
351 spl_t s = splsched();
352
353 thread_lock(thread);
354
355 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
356 thread->sched_flags |= TH_SFLAG_ABORT;
357 thread_set_apc_ast_locked(thread);
358 } else {
359 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
360 }
361
362 thread_unlock(thread);
363 splx(s);
364 }
365
366 kern_return_t
367 thread_abort(
368 thread_t thread)
369 {
370 kern_return_t result = KERN_SUCCESS;
371
372 if (thread == THREAD_NULL)
373 return (KERN_INVALID_ARGUMENT);
374
375 thread_mtx_lock(thread);
376
377 if (thread->active) {
378 act_abort(thread);
379 clear_wait(thread, THREAD_INTERRUPTED);
380 }
381 else
382 result = KERN_TERMINATED;
383
384 thread_mtx_unlock(thread);
385
386 return (result);
387 }
388
389 kern_return_t
390 thread_abort_safely(
391 thread_t thread)
392 {
393 kern_return_t result = KERN_SUCCESS;
394
395 if (thread == THREAD_NULL)
396 return (KERN_INVALID_ARGUMENT);
397
398 thread_mtx_lock(thread);
399
400 if (thread->active) {
401 spl_t s = splsched();
402
403 thread_lock(thread);
404 if (!thread->at_safe_point ||
405 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
406 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
407 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
408 thread_set_apc_ast_locked(thread);
409 }
410 }
411 thread_unlock(thread);
412 splx(s);
413 } else {
414 result = KERN_TERMINATED;
415 }
416
417 thread_mtx_unlock(thread);
418
419 return (result);
420 }
421
422 /*** backward compatibility hacks ***/
423 #include <mach/thread_info.h>
424 #include <mach/thread_special_ports.h>
425 #include <ipc/ipc_port.h>
426
427 kern_return_t
428 thread_info(
429 thread_t thread,
430 thread_flavor_t flavor,
431 thread_info_t thread_info_out,
432 mach_msg_type_number_t *thread_info_count)
433 {
434 kern_return_t result;
435
436 if (thread == THREAD_NULL)
437 return (KERN_INVALID_ARGUMENT);
438
439 thread_mtx_lock(thread);
440
441 if (thread->active || thread->inspection)
442 result = thread_info_internal(
443 thread, flavor, thread_info_out, thread_info_count);
444 else
445 result = KERN_TERMINATED;
446
447 thread_mtx_unlock(thread);
448
449 return (result);
450 }
451
452 kern_return_t
453 thread_get_state(
454 thread_t thread,
455 int flavor,
456 thread_state_t state, /* pointer to OUT array */
457 mach_msg_type_number_t *state_count) /*IN/OUT*/
458 {
459 kern_return_t result = KERN_SUCCESS;
460
461 if (thread == THREAD_NULL)
462 return (KERN_INVALID_ARGUMENT);
463
464 thread_mtx_lock(thread);
465
466 if (thread->active) {
467 if (thread != current_thread()) {
468 thread_hold(thread);
469
470 thread_mtx_unlock(thread);
471
472 if (thread_stop(thread, FALSE)) {
473 thread_mtx_lock(thread);
474 result = machine_thread_get_state(
475 thread, flavor, state, state_count);
476 thread_unstop(thread);
477 }
478 else {
479 thread_mtx_lock(thread);
480 result = KERN_ABORTED;
481 }
482
483 thread_release(thread);
484 }
485 else
486 result = machine_thread_get_state(
487 thread, flavor, state, state_count);
488 }
489 else if (thread->inspection)
490 {
491 result = machine_thread_get_state(
492 thread, flavor, state, state_count);
493 }
494 else
495 result = KERN_TERMINATED;
496
497 thread_mtx_unlock(thread);
498
499 return (result);
500 }
501
502 /*
503 * Change thread's machine-dependent state. Called with nothing
504 * locked. Returns same way.
505 */
506 static kern_return_t
507 thread_set_state_internal(
508 thread_t thread,
509 int flavor,
510 thread_state_t state,
511 mach_msg_type_number_t state_count,
512 boolean_t from_user)
513 {
514 kern_return_t result = KERN_SUCCESS;
515
516 if (thread == THREAD_NULL)
517 return (KERN_INVALID_ARGUMENT);
518
519 thread_mtx_lock(thread);
520
521 if (thread->active) {
522 if (thread != current_thread()) {
523 thread_hold(thread);
524
525 thread_mtx_unlock(thread);
526
527 if (thread_stop(thread, TRUE)) {
528 thread_mtx_lock(thread);
529 result = machine_thread_set_state(
530 thread, flavor, state, state_count);
531 thread_unstop(thread);
532 }
533 else {
534 thread_mtx_lock(thread);
535 result = KERN_ABORTED;
536 }
537
538 thread_release(thread);
539 }
540 else
541 result = machine_thread_set_state(
542 thread, flavor, state, state_count);
543 }
544 else
545 result = KERN_TERMINATED;
546
547 if ((result == KERN_SUCCESS) && from_user)
548 extmod_statistics_incr_thread_set_state(thread);
549
550 thread_mtx_unlock(thread);
551
552 return (result);
553 }
554
555 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
556 kern_return_t
557 thread_set_state(
558 thread_t thread,
559 int flavor,
560 thread_state_t state,
561 mach_msg_type_number_t state_count);
562
563 kern_return_t
564 thread_set_state(
565 thread_t thread,
566 int flavor,
567 thread_state_t state,
568 mach_msg_type_number_t state_count)
569 {
570 return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
571 }
572
573 kern_return_t
574 thread_set_state_from_user(
575 thread_t thread,
576 int flavor,
577 thread_state_t state,
578 mach_msg_type_number_t state_count)
579 {
580 return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
581 }
582
583 /*
584 * Kernel-internal "thread" interfaces used outside this file:
585 */
586
587 /* Initialize (or re-initialize) a thread state. Called from execve
588 * with nothing locked, returns same way.
589 */
590 kern_return_t
591 thread_state_initialize(
592 thread_t thread)
593 {
594 kern_return_t result = KERN_SUCCESS;
595
596 if (thread == THREAD_NULL)
597 return (KERN_INVALID_ARGUMENT);
598
599 thread_mtx_lock(thread);
600
601 if (thread->active) {
602 if (thread != current_thread()) {
603 thread_hold(thread);
604
605 thread_mtx_unlock(thread);
606
607 if (thread_stop(thread, TRUE)) {
608 thread_mtx_lock(thread);
609 result = machine_thread_state_initialize( thread );
610 thread_unstop(thread);
611 }
612 else {
613 thread_mtx_lock(thread);
614 result = KERN_ABORTED;
615 }
616
617 thread_release(thread);
618 }
619 else
620 result = machine_thread_state_initialize( thread );
621 }
622 else
623 result = KERN_TERMINATED;
624
625 thread_mtx_unlock(thread);
626
627 return (result);
628 }
629
630
631 kern_return_t
632 thread_dup(
633 thread_t target)
634 {
635 thread_t self = current_thread();
636 kern_return_t result = KERN_SUCCESS;
637
638 if (target == THREAD_NULL || target == self)
639 return (KERN_INVALID_ARGUMENT);
640
641 thread_mtx_lock(target);
642
643 if (target->active) {
644 thread_hold(target);
645
646 thread_mtx_unlock(target);
647
648 if (thread_stop(target, TRUE)) {
649 thread_mtx_lock(target);
650 result = machine_thread_dup(self, target);
651 if (self->affinity_set != AFFINITY_SET_NULL)
652 thread_affinity_dup(self, target);
653 thread_unstop(target);
654 }
655 else {
656 thread_mtx_lock(target);
657 result = KERN_ABORTED;
658 }
659
660 thread_release(target);
661 }
662 else
663 result = KERN_TERMINATED;
664
665 thread_mtx_unlock(target);
666
667 return (result);
668 }
669
670
671 kern_return_t
672 thread_dup2(
673 thread_t source,
674 thread_t target)
675 {
676 kern_return_t result = KERN_SUCCESS;
677 uint32_t active = 0;
678
679 if (source == THREAD_NULL || target == THREAD_NULL || target == source)
680 return (KERN_INVALID_ARGUMENT);
681
682 thread_mtx_lock(source);
683 active = source->active;
684 thread_mtx_unlock(source);
685
686 if (!active) {
687 return KERN_TERMINATED;
688 }
689
690 thread_mtx_lock(target);
691
692 if (target->active || target->inspection) {
693 thread_hold(target);
694
695 thread_mtx_unlock(target);
696
697 if (thread_stop(target, TRUE)) {
698 thread_mtx_lock(target);
699 result = machine_thread_dup(source, target);
700 if (source->affinity_set != AFFINITY_SET_NULL)
701 thread_affinity_dup(source, target);
702 thread_unstop(target);
703 }
704 else {
705 thread_mtx_lock(target);
706 result = KERN_ABORTED;
707 }
708
709 thread_release(target);
710 }
711 else
712 result = KERN_TERMINATED;
713
714 thread_mtx_unlock(target);
715
716 return (result);
717 }
718
719 /*
720 * thread_setstatus:
721 *
722 * Set the status of the specified thread.
723 * Called with (and returns with) no locks held.
724 */
725 kern_return_t
726 thread_setstatus(
727 thread_t thread,
728 int flavor,
729 thread_state_t tstate,
730 mach_msg_type_number_t count)
731 {
732
733 return (thread_set_state(thread, flavor, tstate, count));
734 }
735
736 /*
737 * thread_getstatus:
738 *
739 * Get the status of the specified thread.
740 */
741 kern_return_t
742 thread_getstatus(
743 thread_t thread,
744 int flavor,
745 thread_state_t tstate,
746 mach_msg_type_number_t *count)
747 {
748 return (thread_get_state(thread, flavor, tstate, count));
749 }
750
751 /*
752 * Change thread's machine-dependent userspace TSD base.
753 * Called with nothing locked. Returns same way.
754 */
755 kern_return_t
756 thread_set_tsd_base(
757 thread_t thread,
758 mach_vm_offset_t tsd_base)
759 {
760 kern_return_t result = KERN_SUCCESS;
761
762 if (thread == THREAD_NULL)
763 return (KERN_INVALID_ARGUMENT);
764
765 thread_mtx_lock(thread);
766
767 if (thread->active) {
768 if (thread != current_thread()) {
769 thread_hold(thread);
770
771 thread_mtx_unlock(thread);
772
773 if (thread_stop(thread, TRUE)) {
774 thread_mtx_lock(thread);
775 result = machine_thread_set_tsd_base(thread, tsd_base);
776 thread_unstop(thread);
777 }
778 else {
779 thread_mtx_lock(thread);
780 result = KERN_ABORTED;
781 }
782
783 thread_release(thread);
784 }
785 else
786 result = machine_thread_set_tsd_base(thread, tsd_base);
787 }
788 else
789 result = KERN_TERMINATED;
790
791 thread_mtx_unlock(thread);
792
793 return (result);
794 }
795
796 /*
797 * thread_set_apc_ast:
798 *
799 * Register the AST_APC callback that handles suspension and
800 * termination, if it hasn't been installed already.
801 *
802 * Called with the thread mutex held.
803 */
804 static void
805 thread_set_apc_ast(thread_t thread)
806 {
807 spl_t s = splsched();
808
809 thread_lock(thread);
810 thread_set_apc_ast_locked(thread);
811 thread_unlock(thread);
812
813 splx(s);
814 }
815
816 /*
817 * thread_set_apc_ast_locked:
818 *
819 * Do the work of registering for the AST_APC callback.
820 *
821 * Called with the thread mutex and scheduling lock held.
822 */
823 static void
824 thread_set_apc_ast_locked(thread_t thread)
825 {
826 /*
827 * Temporarily undepress, so target has
828 * a chance to do locking required to
829 * block itself in thread_suspended.
830 *
831 * Leaves the depress flag set so we can reinstate when it's blocked.
832 */
833 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)
834 thread_recompute_sched_pri(thread, TRUE);
835
836 thread_ast_set(thread, AST_APC);
837
838 if (thread == current_thread()) {
839 ast_propagate(thread->ast);
840 } else {
841 processor_t processor = thread->last_processor;
842
843 if (processor != PROCESSOR_NULL &&
844 processor->state == PROCESSOR_RUNNING &&
845 processor->active_thread == thread) {
846 cause_ast_check(processor);
847 }
848 }
849 }
850
851 /*
852 * Activation control support routines internal to this file:
853 *
854 */
855
856 /*
857 * thread_suspended
858 *
859 * Continuation routine for thread suspension. It checks
860 * to see whether there has been any new suspensions. If so, it
861 * installs the AST_APC handler again. Otherwise, it checks to see
862 * if the current depression needs to be re-instated (it may have
863 * been temporarily removed in order to get to this point in a hurry).
864 */
865 __attribute__((noreturn))
866 static void
867 thread_suspended(__unused void *parameter, wait_result_t result)
868 {
869 thread_t thread = current_thread();
870
871 thread_mtx_lock(thread);
872
873 if (result == THREAD_INTERRUPTED)
874 thread->suspend_parked = FALSE;
875 else
876 assert(thread->suspend_parked == FALSE);
877
878 if (thread->suspend_count > 0) {
879 thread_set_apc_ast(thread);
880 } else {
881 spl_t s = splsched();
882
883 thread_lock(thread);
884 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
885 thread->sched_pri = DEPRESSPRI;
886 thread->last_processor->current_pri = thread->sched_pri;
887
888 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
889 (uintptr_t)thread_tid(thread),
890 thread->base_pri,
891 thread->sched_pri,
892 0, /* eventually, 'reason' */
893 0);
894 }
895 thread_unlock(thread);
896 splx(s);
897 }
898
899 thread_mtx_unlock(thread);
900
901 thread_exception_return();
902 /*NOTREACHED*/
903 }
904
905 /*
906 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
907 * Called with nothing locked. Returns (if it returns) the same way.
908 */
909 void
910 thread_apc_ast(thread_t thread)
911 {
912 thread_mtx_lock(thread);
913
914 assert(thread->suspend_parked == FALSE);
915
916 spl_t s = splsched();
917 thread_lock(thread);
918
919 /* TH_SFLAG_POLLDEPRESS is OK to have here */
920 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
921
922 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
923 thread_unlock(thread);
924 splx(s);
925
926 if (!thread->active) {
927 /* Thread is ready to terminate, time to tear it down */
928 thread_mtx_unlock(thread);
929
930 thread_terminate_self();
931 /*NOTREACHED*/
932 }
933
934 /* If we're suspended, go to sleep and wait for someone to wake us up. */
935 if (thread->suspend_count > 0) {
936 thread->suspend_parked = TRUE;
937 assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
938 thread_mtx_unlock(thread);
939
940 thread_block(thread_suspended);
941 /*NOTREACHED*/
942 }
943
944 thread_mtx_unlock(thread);
945 }
946
947 /* Prototype, see justification above */
948 kern_return_t
949 act_set_state(
950 thread_t thread,
951 int flavor,
952 thread_state_t state,
953 mach_msg_type_number_t count);
954
955 kern_return_t
956 act_set_state(
957 thread_t thread,
958 int flavor,
959 thread_state_t state,
960 mach_msg_type_number_t count)
961 {
962 if (thread == current_thread())
963 return (KERN_INVALID_ARGUMENT);
964
965 return (thread_set_state(thread, flavor, state, count));
966
967 }
968
969 kern_return_t
970 act_set_state_from_user(
971 thread_t thread,
972 int flavor,
973 thread_state_t state,
974 mach_msg_type_number_t count)
975 {
976 if (thread == current_thread())
977 return (KERN_INVALID_ARGUMENT);
978
979 return (thread_set_state_from_user(thread, flavor, state, count));
980
981 }
982
983 kern_return_t
984 act_get_state(
985 thread_t thread,
986 int flavor,
987 thread_state_t state,
988 mach_msg_type_number_t *count)
989 {
990 if (thread == current_thread())
991 return (KERN_INVALID_ARGUMENT);
992
993 return (thread_get_state(thread, flavor, state, count));
994 }
995
996 static void
997 act_set_ast(
998 thread_t thread,
999 ast_t ast)
1000 {
1001 spl_t s = splsched();
1002
1003 if (thread == current_thread()) {
1004 thread_ast_set(thread, ast);
1005 ast_propagate(thread->ast);
1006 } else {
1007 processor_t processor;
1008
1009 thread_lock(thread);
1010 thread_ast_set(thread, ast);
1011 processor = thread->last_processor;
1012 if ( processor != PROCESSOR_NULL &&
1013 processor->state == PROCESSOR_RUNNING &&
1014 processor->active_thread == thread )
1015 cause_ast_check(processor);
1016 thread_unlock(thread);
1017 }
1018
1019 splx(s);
1020 }
1021
1022 void
1023 act_set_astbsd(
1024 thread_t thread)
1025 {
1026 act_set_ast( thread, AST_BSD );
1027 }
1028
1029 void
1030 act_set_kperf(
1031 thread_t thread)
1032 {
1033 /* safety check */
1034 if (thread != current_thread())
1035 if( !ml_get_interrupts_enabled() )
1036 panic("unsafe act_set_kperf operation");
1037
1038 act_set_ast( thread, AST_KPERF );
1039 }
1040
1041 #if CONFIG_MACF
1042 void
1043 act_set_astmacf(
1044 thread_t thread)
1045 {
1046 act_set_ast( thread, AST_MACF);
1047 }
1048 #endif
1049
1050 void
1051 set_astledger(thread_t thread)
1052 {
1053 act_set_ast(thread, AST_LEDGER);
1054 }
1055
1056 void
1057 act_set_io_telemetry_ast(thread_t thread)
1058 {
1059 act_set_ast(thread, AST_TELEMETRY_IO);
1060 }
1061