]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/thread_act.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52#include <mach/mach_types.h>
53#include <mach/kern_return.h>
54#include <mach/thread_act_server.h>
55
56#include <kern/kern_types.h>
57#include <kern/ast.h>
58#include <kern/mach_param.h>
59#include <kern/zalloc.h>
60#include <kern/extmod_statistics.h>
61#include <kern/thread.h>
62#include <kern/task.h>
63#include <kern/sched_prim.h>
64#include <kern/misc_protos.h>
65#include <kern/assert.h>
66#include <kern/exception.h>
67#include <kern/ipc_mig.h>
68#include <kern/ipc_tt.h>
69#include <kern/machine.h>
70#include <kern/spl.h>
71#include <kern/syscall_subr.h>
72#include <kern/sync_lock.h>
73#include <kern/processor.h>
74#include <kern/timer.h>
75#include <kern/affinity.h>
76
77#include <stdatomic.h>
78
79#include <security/mac_mach_internal.h>
80
81static void act_abort(thread_t thread);
82
83static void thread_suspended(void *arg, wait_result_t result);
84static void thread_set_apc_ast(thread_t thread);
85static void thread_set_apc_ast_locked(thread_t thread);
86
87/*
88 * Internal routine to mark a thread as started.
89 * Always called with the thread mutex locked.
90 */
91void
92thread_start(
93 thread_t thread)
94{
95 clear_wait(thread, THREAD_AWAKENED);
96 thread->started = TRUE;
97}
98
99/*
100 * Internal routine to mark a thread as waiting
101 * right after it has been created. The caller
102 * is responsible to call wakeup()/thread_wakeup()
103 * or thread_terminate() to get it going.
104 *
105 * Always called with the thread mutex locked.
106 *
107 * Task and task_threads mutexes also held
108 * (so nobody can set the thread running before
109 * this point)
110 *
111 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
112 * to allow termination from this point forward.
113 */
114void
115thread_start_in_assert_wait(
116 thread_t thread,
117 event_t event,
118 wait_interrupt_t interruptible)
119{
120 struct waitq *waitq = assert_wait_queue(event);
121 wait_result_t wait_result;
122 spl_t spl;
123
124 spl = splsched();
125 waitq_lock(waitq);
126
127 /* clear out startup condition (safe because thread not started yet) */
128 thread_lock(thread);
129 assert(!thread->started);
130 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
131 thread->state &= ~(TH_WAIT | TH_UNINT);
132 thread_unlock(thread);
133
134 /* assert wait interruptibly forever */
135 wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
136 interruptible,
137 TIMEOUT_URGENCY_SYS_NORMAL,
138 TIMEOUT_WAIT_FOREVER,
139 TIMEOUT_NO_LEEWAY,
140 thread);
141 assert (wait_result == THREAD_WAITING);
142
143 /* mark thread started while we still hold the waitq lock */
144 thread_lock(thread);
145 thread->started = TRUE;
146 thread_unlock(thread);
147
148 waitq_unlock(waitq);
149 splx(spl);
150}
151
152/*
153 * Internal routine to terminate a thread.
154 * Sometimes called with task already locked.
155 */
156kern_return_t
157thread_terminate_internal(
158 thread_t thread)
159{
160 kern_return_t result = KERN_SUCCESS;
161
162 thread_mtx_lock(thread);
163
164 if (thread->active) {
165 thread->active = FALSE;
166
167 act_abort(thread);
168
169 if (thread->started)
170 clear_wait(thread, THREAD_INTERRUPTED);
171 else {
172 thread_start(thread);
173 }
174 }
175 else
176 result = KERN_TERMINATED;
177
178 if (thread->affinity_set != NULL)
179 thread_affinity_terminate(thread);
180
181 thread_mtx_unlock(thread);
182
183 if (thread != current_thread() && result == KERN_SUCCESS)
184 thread_wait(thread, FALSE);
185
186 return (result);
187}
188
189/*
190 * Terminate a thread.
191 */
192kern_return_t
193thread_terminate(
194 thread_t thread)
195{
196 if (thread == THREAD_NULL)
197 return (KERN_INVALID_ARGUMENT);
198
199 /* Kernel threads can't be terminated without their own cooperation */
200 if (thread->task == kernel_task && thread != current_thread())
201 return (KERN_FAILURE);
202
203 kern_return_t result = thread_terminate_internal(thread);
204
205 /*
206 * If a kernel thread is terminating itself, force handle the APC_AST here.
207 * Kernel threads don't pass through the return-to-user AST checking code,
208 * but all threads must finish their own termination in thread_apc_ast.
209 */
210 if (thread->task == kernel_task) {
211 assert(thread->active == FALSE);
212 thread_ast_clear(thread, AST_APC);
213 thread_apc_ast(thread);
214
215 panic("thread_terminate");
216 /* NOTREACHED */
217 }
218
219 return (result);
220}
221
222/*
223 * Suspend execution of the specified thread.
224 * This is a recursive-style suspension of the thread, a count of
225 * suspends is maintained.
226 *
227 * Called with thread mutex held.
228 */
229void
230thread_hold(thread_t thread)
231{
232 if (thread->suspend_count++ == 0) {
233 thread_set_apc_ast(thread);
234 assert(thread->suspend_parked == FALSE);
235 }
236}
237
238/*
239 * Decrement internal suspension count, setting thread
240 * runnable when count falls to zero.
241 *
242 * Because the wait is abortsafe, we can't be guaranteed that the thread
243 * is currently actually waiting even if suspend_parked is set.
244 *
245 * Called with thread mutex held.
246 */
247void
248thread_release(thread_t thread)
249{
250 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
251
252 /* fail-safe on non-assert builds */
253 if (thread->suspend_count == 0)
254 return;
255
256 if (--thread->suspend_count == 0) {
257 if (!thread->started) {
258 thread_start(thread);
259 } else if (thread->suspend_parked) {
260 thread->suspend_parked = FALSE;
261 thread_wakeup_thread(&thread->suspend_count, thread);
262 }
263 }
264}
265
266kern_return_t
267thread_suspend(thread_t thread)
268{
269 kern_return_t result = KERN_SUCCESS;
270
271 if (thread == THREAD_NULL || thread->task == kernel_task)
272 return (KERN_INVALID_ARGUMENT);
273
274 thread_mtx_lock(thread);
275
276 if (thread->active) {
277 if (thread->user_stop_count++ == 0)
278 thread_hold(thread);
279 } else {
280 result = KERN_TERMINATED;
281 }
282
283 thread_mtx_unlock(thread);
284
285 if (thread != current_thread() && result == KERN_SUCCESS)
286 thread_wait(thread, FALSE);
287
288 return (result);
289}
290
291kern_return_t
292thread_resume(thread_t thread)
293{
294 kern_return_t result = KERN_SUCCESS;
295
296 if (thread == THREAD_NULL || thread->task == kernel_task)
297 return (KERN_INVALID_ARGUMENT);
298
299 thread_mtx_lock(thread);
300
301 if (thread->active) {
302 if (thread->user_stop_count > 0) {
303 if (--thread->user_stop_count == 0)
304 thread_release(thread);
305 } else {
306 result = KERN_FAILURE;
307 }
308 } else {
309 result = KERN_TERMINATED;
310 }
311
312 thread_mtx_unlock(thread);
313
314 return (result);
315}
316
317/*
318 * thread_depress_abort:
319 *
320 * Prematurely abort priority depression if there is one.
321 */
322kern_return_t
323thread_depress_abort(
324 thread_t thread)
325{
326 kern_return_t result;
327
328 if (thread == THREAD_NULL)
329 return (KERN_INVALID_ARGUMENT);
330
331 thread_mtx_lock(thread);
332
333 if (thread->active)
334 result = thread_depress_abort_internal(thread);
335 else
336 result = KERN_TERMINATED;
337
338 thread_mtx_unlock(thread);
339
340 return (result);
341}
342
343
344/*
345 * Indicate that the thread should run the AST_APC callback
346 * to detect an abort condition.
347 *
348 * Called with thread mutex held.
349 */
350static void
351act_abort(
352 thread_t thread)
353{
354 spl_t s = splsched();
355
356 thread_lock(thread);
357
358 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
359 thread->sched_flags |= TH_SFLAG_ABORT;
360 thread_set_apc_ast_locked(thread);
361 } else {
362 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
363 }
364
365 thread_unlock(thread);
366 splx(s);
367}
368
369kern_return_t
370thread_abort(
371 thread_t thread)
372{
373 kern_return_t result = KERN_SUCCESS;
374
375 if (thread == THREAD_NULL)
376 return (KERN_INVALID_ARGUMENT);
377
378 thread_mtx_lock(thread);
379
380 if (thread->active) {
381 act_abort(thread);
382 clear_wait(thread, THREAD_INTERRUPTED);
383 }
384 else
385 result = KERN_TERMINATED;
386
387 thread_mtx_unlock(thread);
388
389 return (result);
390}
391
392kern_return_t
393thread_abort_safely(
394 thread_t thread)
395{
396 kern_return_t result = KERN_SUCCESS;
397
398 if (thread == THREAD_NULL)
399 return (KERN_INVALID_ARGUMENT);
400
401 thread_mtx_lock(thread);
402
403 if (thread->active) {
404 spl_t s = splsched();
405
406 thread_lock(thread);
407 if (!thread->at_safe_point ||
408 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
409 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
410 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
411 thread_set_apc_ast_locked(thread);
412 }
413 }
414 thread_unlock(thread);
415 splx(s);
416 } else {
417 result = KERN_TERMINATED;
418 }
419
420 thread_mtx_unlock(thread);
421
422 return (result);
423}
424
425/*** backward compatibility hacks ***/
426#include <mach/thread_info.h>
427#include <mach/thread_special_ports.h>
428#include <ipc/ipc_port.h>
429
430kern_return_t
431thread_info(
432 thread_t thread,
433 thread_flavor_t flavor,
434 thread_info_t thread_info_out,
435 mach_msg_type_number_t *thread_info_count)
436{
437 kern_return_t result;
438
439 if (thread == THREAD_NULL)
440 return (KERN_INVALID_ARGUMENT);
441
442 thread_mtx_lock(thread);
443
444 if (thread->active || thread->inspection)
445 result = thread_info_internal(
446 thread, flavor, thread_info_out, thread_info_count);
447 else
448 result = KERN_TERMINATED;
449
450 thread_mtx_unlock(thread);
451
452 return (result);
453}
454
455kern_return_t
456thread_get_state(
457 thread_t thread,
458 int flavor,
459 thread_state_t state, /* pointer to OUT array */
460 mach_msg_type_number_t *state_count) /*IN/OUT*/
461{
462 kern_return_t result = KERN_SUCCESS;
463
464 if (thread == THREAD_NULL)
465 return (KERN_INVALID_ARGUMENT);
466
467 thread_mtx_lock(thread);
468
469 if (thread->active) {
470 if (thread != current_thread()) {
471 thread_hold(thread);
472
473 thread_mtx_unlock(thread);
474
475 if (thread_stop(thread, FALSE)) {
476 thread_mtx_lock(thread);
477 result = machine_thread_get_state(
478 thread, flavor, state, state_count);
479 thread_unstop(thread);
480 }
481 else {
482 thread_mtx_lock(thread);
483 result = KERN_ABORTED;
484 }
485
486 thread_release(thread);
487 }
488 else
489 result = machine_thread_get_state(
490 thread, flavor, state, state_count);
491 }
492 else if (thread->inspection)
493 {
494 result = machine_thread_get_state(
495 thread, flavor, state, state_count);
496 }
497 else
498 result = KERN_TERMINATED;
499
500 thread_mtx_unlock(thread);
501
502 return (result);
503}
504
505/*
506 * Change thread's machine-dependent state. Called with nothing
507 * locked. Returns same way.
508 */
509static kern_return_t
510thread_set_state_internal(
511 thread_t thread,
512 int flavor,
513 thread_state_t state,
514 mach_msg_type_number_t state_count,
515 boolean_t from_user)
516{
517 kern_return_t result = KERN_SUCCESS;
518
519 if (thread == THREAD_NULL)
520 return (KERN_INVALID_ARGUMENT);
521
522 thread_mtx_lock(thread);
523
524 if (thread->active) {
525 if (thread != current_thread()) {
526 thread_hold(thread);
527
528 thread_mtx_unlock(thread);
529
530 if (thread_stop(thread, TRUE)) {
531 thread_mtx_lock(thread);
532 result = machine_thread_set_state(
533 thread, flavor, state, state_count);
534 thread_unstop(thread);
535 }
536 else {
537 thread_mtx_lock(thread);
538 result = KERN_ABORTED;
539 }
540
541 thread_release(thread);
542 }
543 else
544 result = machine_thread_set_state(
545 thread, flavor, state, state_count);
546 }
547 else
548 result = KERN_TERMINATED;
549
550 if ((result == KERN_SUCCESS) && from_user)
551 extmod_statistics_incr_thread_set_state(thread);
552
553 thread_mtx_unlock(thread);
554
555 return (result);
556}
557
558/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
559kern_return_t
560thread_set_state(
561 thread_t thread,
562 int flavor,
563 thread_state_t state,
564 mach_msg_type_number_t state_count);
565
566kern_return_t
567thread_set_state(
568 thread_t thread,
569 int flavor,
570 thread_state_t state,
571 mach_msg_type_number_t state_count)
572{
573 return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
574}
575
576kern_return_t
577thread_set_state_from_user(
578 thread_t thread,
579 int flavor,
580 thread_state_t state,
581 mach_msg_type_number_t state_count)
582{
583 return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
584}
585
586/*
587 * Kernel-internal "thread" interfaces used outside this file:
588 */
589
590/* Initialize (or re-initialize) a thread state. Called from execve
591 * with nothing locked, returns same way.
592 */
593kern_return_t
594thread_state_initialize(
595 thread_t thread)
596{
597 kern_return_t result = KERN_SUCCESS;
598
599 if (thread == THREAD_NULL)
600 return (KERN_INVALID_ARGUMENT);
601
602 thread_mtx_lock(thread);
603
604 if (thread->active) {
605 if (thread != current_thread()) {
606 thread_hold(thread);
607
608 thread_mtx_unlock(thread);
609
610 if (thread_stop(thread, TRUE)) {
611 thread_mtx_lock(thread);
612 result = machine_thread_state_initialize( thread );
613 thread_unstop(thread);
614 }
615 else {
616 thread_mtx_lock(thread);
617 result = KERN_ABORTED;
618 }
619
620 thread_release(thread);
621 }
622 else
623 result = machine_thread_state_initialize( thread );
624 }
625 else
626 result = KERN_TERMINATED;
627
628 thread_mtx_unlock(thread);
629
630 return (result);
631}
632
633
634kern_return_t
635thread_dup(
636 thread_t target)
637{
638 thread_t self = current_thread();
639 kern_return_t result = KERN_SUCCESS;
640
641 if (target == THREAD_NULL || target == self)
642 return (KERN_INVALID_ARGUMENT);
643
644 thread_mtx_lock(target);
645
646 if (target->active) {
647 thread_hold(target);
648
649 thread_mtx_unlock(target);
650
651 if (thread_stop(target, TRUE)) {
652 thread_mtx_lock(target);
653 result = machine_thread_dup(self, target);
654 if (self->affinity_set != AFFINITY_SET_NULL)
655 thread_affinity_dup(self, target);
656 thread_unstop(target);
657 }
658 else {
659 thread_mtx_lock(target);
660 result = KERN_ABORTED;
661 }
662
663 thread_release(target);
664 }
665 else
666 result = KERN_TERMINATED;
667
668 thread_mtx_unlock(target);
669
670 return (result);
671}
672
673
674kern_return_t
675thread_dup2(
676 thread_t source,
677 thread_t target)
678{
679 kern_return_t result = KERN_SUCCESS;
680 uint32_t active = 0;
681
682 if (source == THREAD_NULL || target == THREAD_NULL || target == source)
683 return (KERN_INVALID_ARGUMENT);
684
685 thread_mtx_lock(source);
686 active = source->active;
687 thread_mtx_unlock(source);
688
689 if (!active) {
690 return KERN_TERMINATED;
691 }
692
693 thread_mtx_lock(target);
694
695 if (target->active || target->inspection) {
696 thread_hold(target);
697
698 thread_mtx_unlock(target);
699
700 if (thread_stop(target, TRUE)) {
701 thread_mtx_lock(target);
702 result = machine_thread_dup(source, target);
703 if (source->affinity_set != AFFINITY_SET_NULL)
704 thread_affinity_dup(source, target);
705 thread_unstop(target);
706 }
707 else {
708 thread_mtx_lock(target);
709 result = KERN_ABORTED;
710 }
711
712 thread_release(target);
713 }
714 else
715 result = KERN_TERMINATED;
716
717 thread_mtx_unlock(target);
718
719 return (result);
720}
721
722/*
723 * thread_setstatus:
724 *
725 * Set the status of the specified thread.
726 * Called with (and returns with) no locks held.
727 */
728kern_return_t
729thread_setstatus(
730 thread_t thread,
731 int flavor,
732 thread_state_t tstate,
733 mach_msg_type_number_t count)
734{
735
736 return (thread_set_state(thread, flavor, tstate, count));
737}
738
739/*
740 * thread_getstatus:
741 *
742 * Get the status of the specified thread.
743 */
744kern_return_t
745thread_getstatus(
746 thread_t thread,
747 int flavor,
748 thread_state_t tstate,
749 mach_msg_type_number_t *count)
750{
751 return (thread_get_state(thread, flavor, tstate, count));
752}
753
754/*
755 * Change thread's machine-dependent userspace TSD base.
756 * Called with nothing locked. Returns same way.
757 */
758kern_return_t
759thread_set_tsd_base(
760 thread_t thread,
761 mach_vm_offset_t tsd_base)
762{
763 kern_return_t result = KERN_SUCCESS;
764
765 if (thread == THREAD_NULL)
766 return (KERN_INVALID_ARGUMENT);
767
768 thread_mtx_lock(thread);
769
770 if (thread->active) {
771 if (thread != current_thread()) {
772 thread_hold(thread);
773
774 thread_mtx_unlock(thread);
775
776 if (thread_stop(thread, TRUE)) {
777 thread_mtx_lock(thread);
778 result = machine_thread_set_tsd_base(thread, tsd_base);
779 thread_unstop(thread);
780 }
781 else {
782 thread_mtx_lock(thread);
783 result = KERN_ABORTED;
784 }
785
786 thread_release(thread);
787 }
788 else
789 result = machine_thread_set_tsd_base(thread, tsd_base);
790 }
791 else
792 result = KERN_TERMINATED;
793
794 thread_mtx_unlock(thread);
795
796 return (result);
797}
798
799/*
800 * thread_set_apc_ast:
801 *
802 * Register the AST_APC callback that handles suspension and
803 * termination, if it hasn't been installed already.
804 *
805 * Called with the thread mutex held.
806 */
807static void
808thread_set_apc_ast(thread_t thread)
809{
810 spl_t s = splsched();
811
812 thread_lock(thread);
813 thread_set_apc_ast_locked(thread);
814 thread_unlock(thread);
815
816 splx(s);
817}
818
819/*
820 * thread_set_apc_ast_locked:
821 *
822 * Do the work of registering for the AST_APC callback.
823 *
824 * Called with the thread mutex and scheduling lock held.
825 */
826static void
827thread_set_apc_ast_locked(thread_t thread)
828{
829 /*
830 * Temporarily undepress, so target has
831 * a chance to do locking required to
832 * block itself in thread_suspended.
833 *
834 * Leaves the depress flag set so we can reinstate when it's blocked.
835 */
836 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)
837 thread_recompute_sched_pri(thread, TRUE);
838
839 thread_ast_set(thread, AST_APC);
840
841 if (thread == current_thread()) {
842 ast_propagate(thread);
843 } else {
844 processor_t processor = thread->last_processor;
845
846 if (processor != PROCESSOR_NULL &&
847 processor->state == PROCESSOR_RUNNING &&
848 processor->active_thread == thread) {
849 cause_ast_check(processor);
850 }
851 }
852}
853
854/*
855 * Activation control support routines internal to this file:
856 *
857 */
858
859/*
860 * thread_suspended
861 *
862 * Continuation routine for thread suspension. It checks
863 * to see whether there has been any new suspensions. If so, it
864 * installs the AST_APC handler again. Otherwise, it checks to see
865 * if the current depression needs to be re-instated (it may have
866 * been temporarily removed in order to get to this point in a hurry).
867 */
868__attribute__((noreturn))
869static void
870thread_suspended(__unused void *parameter, wait_result_t result)
871{
872 thread_t thread = current_thread();
873
874 thread_mtx_lock(thread);
875
876 if (result == THREAD_INTERRUPTED)
877 thread->suspend_parked = FALSE;
878 else
879 assert(thread->suspend_parked == FALSE);
880
881 if (thread->suspend_count > 0) {
882 thread_set_apc_ast(thread);
883 } else {
884 spl_t s = splsched();
885
886 thread_lock(thread);
887 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
888 thread->sched_pri = DEPRESSPRI;
889 thread->last_processor->current_pri = thread->sched_pri;
890 thread->last_processor->current_perfctl_class = thread_get_perfcontrol_class(thread);
891
892 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
893 (uintptr_t)thread_tid(thread),
894 thread->base_pri,
895 thread->sched_pri,
896 0, /* eventually, 'reason' */
897 0);
898 }
899 thread_unlock(thread);
900 splx(s);
901 }
902
903 thread_mtx_unlock(thread);
904
905 thread_exception_return();
906 /*NOTREACHED*/
907}
908
909/*
910 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
911 * Called with nothing locked. Returns (if it returns) the same way.
912 */
913void
914thread_apc_ast(thread_t thread)
915{
916 thread_mtx_lock(thread);
917
918 assert(thread->suspend_parked == FALSE);
919
920 spl_t s = splsched();
921 thread_lock(thread);
922
923 /* TH_SFLAG_POLLDEPRESS is OK to have here */
924 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
925
926 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
927 thread_unlock(thread);
928 splx(s);
929
930 if (!thread->active) {
931 /* Thread is ready to terminate, time to tear it down */
932 thread_mtx_unlock(thread);
933
934 thread_terminate_self();
935 /*NOTREACHED*/
936 }
937
938 /* If we're suspended, go to sleep and wait for someone to wake us up. */
939 if (thread->suspend_count > 0) {
940 thread->suspend_parked = TRUE;
941 assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
942 thread_mtx_unlock(thread);
943
944 thread_block(thread_suspended);
945 /*NOTREACHED*/
946 }
947
948 thread_mtx_unlock(thread);
949}
950
951/* Prototype, see justification above */
952kern_return_t
953act_set_state(
954 thread_t thread,
955 int flavor,
956 thread_state_t state,
957 mach_msg_type_number_t count);
958
959kern_return_t
960act_set_state(
961 thread_t thread,
962 int flavor,
963 thread_state_t state,
964 mach_msg_type_number_t count)
965{
966 if (thread == current_thread())
967 return (KERN_INVALID_ARGUMENT);
968
969 return (thread_set_state(thread, flavor, state, count));
970
971}
972
973kern_return_t
974act_set_state_from_user(
975 thread_t thread,
976 int flavor,
977 thread_state_t state,
978 mach_msg_type_number_t count)
979{
980 if (thread == current_thread())
981 return (KERN_INVALID_ARGUMENT);
982
983 return (thread_set_state_from_user(thread, flavor, state, count));
984
985}
986
987kern_return_t
988act_get_state(
989 thread_t thread,
990 int flavor,
991 thread_state_t state,
992 mach_msg_type_number_t *count)
993{
994 if (thread == current_thread())
995 return (KERN_INVALID_ARGUMENT);
996
997 return (thread_get_state(thread, flavor, state, count));
998}
999
1000static void
1001act_set_ast(
1002 thread_t thread,
1003 ast_t ast)
1004{
1005 spl_t s = splsched();
1006
1007 if (thread == current_thread()) {
1008 thread_ast_set(thread, ast);
1009 ast_propagate(thread);
1010 } else {
1011 processor_t processor;
1012
1013 thread_lock(thread);
1014 thread_ast_set(thread, ast);
1015 processor = thread->last_processor;
1016 if ( processor != PROCESSOR_NULL &&
1017 processor->state == PROCESSOR_RUNNING &&
1018 processor->active_thread == thread )
1019 cause_ast_check(processor);
1020 thread_unlock(thread);
1021 }
1022
1023 splx(s);
1024}
1025
1026/*
1027 * set AST on thread without causing an AST check
1028 * and without taking the thread lock
1029 *
1030 * If thread is not the current thread, then it may take
1031 * up until the next context switch or quantum expiration
1032 * on that thread for it to notice the AST.
1033 */
1034static void
1035act_set_ast_async(thread_t thread,
1036 ast_t ast)
1037{
1038 thread_ast_set(thread, ast);
1039
1040 if (thread == current_thread()) {
1041 spl_t s = splsched();
1042 ast_propagate(thread);
1043 splx(s);
1044 }
1045}
1046
1047void
1048act_set_astbsd(
1049 thread_t thread)
1050{
1051 act_set_ast( thread, AST_BSD );
1052}
1053
1054void
1055act_set_astkevent(thread_t thread, uint16_t bits)
1056{
1057 atomic_fetch_or(&thread->kevent_ast_bits, bits);
1058
1059 /* kevent AST shouldn't send immediate IPIs */
1060 act_set_ast_async(thread, AST_KEVENT);
1061}
1062
1063void
1064act_set_kperf(
1065 thread_t thread)
1066{
1067 /* safety check */
1068 if (thread != current_thread())
1069 if( !ml_get_interrupts_enabled() )
1070 panic("unsafe act_set_kperf operation");
1071
1072 act_set_ast( thread, AST_KPERF );
1073}
1074
1075#if CONFIG_MACF
1076void
1077act_set_astmacf(
1078 thread_t thread)
1079{
1080 act_set_ast( thread, AST_MACF);
1081}
1082#endif
1083
1084void
1085act_set_astledger(thread_t thread)
1086{
1087 act_set_ast(thread, AST_LEDGER);
1088}
1089
1090/*
1091 * The ledger AST may need to be set while already holding
1092 * the thread lock. This routine skips sending the IPI,
1093 * allowing us to avoid the lock hold.
1094 *
1095 * However, it means the targeted thread must context switch
1096 * to recognize the ledger AST.
1097 */
1098void
1099act_set_astledger_async(thread_t thread)
1100{
1101 act_set_ast_async(thread, AST_LEDGER);
1102}
1103
1104void
1105act_set_io_telemetry_ast(thread_t thread)
1106{
1107 act_set_ast(thread, AST_TELEMETRY_IO);
1108}
1109