]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52
53 #include <mach/mach_types.h>
54 #include <mach/kern_return.h>
55 #include <mach/thread_act_server.h>
56
57 #include <kern/kern_types.h>
58 #include <kern/ast.h>
59 #include <kern/mach_param.h>
60 #include <kern/zalloc.h>
61 #include <kern/extmod_statistics.h>
62 #include <kern/thread.h>
63 #include <kern/task.h>
64 #include <kern/sched_prim.h>
65 #include <kern/misc_protos.h>
66 #include <kern/assert.h>
67 #include <kern/exception.h>
68 #include <kern/ipc_mig.h>
69 #include <kern/ipc_tt.h>
70 #include <kern/machine.h>
71 #include <kern/spl.h>
72 #include <kern/syscall_subr.h>
73 #include <kern/sync_lock.h>
74 #include <kern/processor.h>
75 #include <kern/timer.h>
76 #include <kern/affinity.h>
77 #include <kern/host.h>
78
79 #include <stdatomic.h>
80
81 #include <security/mac_mach_internal.h>
82
83 static void act_abort(thread_t thread);
84
85 static void thread_suspended(void *arg, wait_result_t result);
86 static void thread_set_apc_ast(thread_t thread);
87 static void thread_set_apc_ast_locked(thread_t thread);
88
89 /*
90 * Internal routine to mark a thread as started.
91 * Always called with the thread mutex locked.
92 */
93 void
94 thread_start(
95 thread_t thread)
96 {
97 clear_wait(thread, THREAD_AWAKENED);
98 thread->started = TRUE;
99 }
100
101 /*
102 * Internal routine to mark a thread as waiting
103 * right after it has been created. The caller
104 * is responsible to call wakeup()/thread_wakeup()
105 * or thread_terminate() to get it going.
106 *
107 * Always called with the thread mutex locked.
108 *
109 * Task and task_threads mutexes also held
110 * (so nobody can set the thread running before
111 * this point)
112 *
113 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
114 * to allow termination from this point forward.
115 */
116 void
117 thread_start_in_assert_wait(
118 thread_t thread,
119 event_t event,
120 wait_interrupt_t interruptible)
121 {
122 struct waitq *waitq = assert_wait_queue(event);
123 wait_result_t wait_result;
124 spl_t spl;
125
126 spl = splsched();
127 waitq_lock(waitq);
128
129 /* clear out startup condition (safe because thread not started yet) */
130 thread_lock(thread);
131 assert(!thread->started);
132 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
133 thread->state &= ~(TH_WAIT | TH_UNINT);
134 thread_unlock(thread);
135
136 /* assert wait interruptibly forever */
137 wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
138 interruptible,
139 TIMEOUT_URGENCY_SYS_NORMAL,
140 TIMEOUT_WAIT_FOREVER,
141 TIMEOUT_NO_LEEWAY,
142 thread);
143 assert(wait_result == THREAD_WAITING);
144
145 /* mark thread started while we still hold the waitq lock */
146 thread_lock(thread);
147 thread->started = TRUE;
148 thread_unlock(thread);
149
150 waitq_unlock(waitq);
151 splx(spl);
152 }
153
154 /*
155 * Internal routine to terminate a thread.
156 * Sometimes called with task already locked.
157 */
158 kern_return_t
159 thread_terminate_internal(
160 thread_t thread)
161 {
162 kern_return_t result = KERN_SUCCESS;
163
164 thread_mtx_lock(thread);
165
166 if (thread->active) {
167 thread->active = FALSE;
168
169 act_abort(thread);
170
171 if (thread->started) {
172 clear_wait(thread, THREAD_INTERRUPTED);
173 } else {
174 thread_start(thread);
175 }
176 } else {
177 result = KERN_TERMINATED;
178 }
179
180 if (thread->affinity_set != NULL) {
181 thread_affinity_terminate(thread);
182 }
183
184 thread_mtx_unlock(thread);
185
186 if (thread != current_thread() && result == KERN_SUCCESS) {
187 thread_wait(thread, FALSE);
188 }
189
190 return result;
191 }
192
193 /*
194 * Terminate a thread.
195 */
196 kern_return_t
197 thread_terminate(
198 thread_t thread)
199 {
200 if (thread == THREAD_NULL) {
201 return KERN_INVALID_ARGUMENT;
202 }
203
204 /* Kernel threads can't be terminated without their own cooperation */
205 if (thread->task == kernel_task && thread != current_thread()) {
206 return KERN_FAILURE;
207 }
208
209 kern_return_t result = thread_terminate_internal(thread);
210
211 /*
212 * If a kernel thread is terminating itself, force handle the APC_AST here.
213 * Kernel threads don't pass through the return-to-user AST checking code,
214 * but all threads must finish their own termination in thread_apc_ast.
215 */
216 if (thread->task == kernel_task) {
217 assert(thread->active == FALSE);
218 thread_ast_clear(thread, AST_APC);
219 thread_apc_ast(thread);
220
221 panic("thread_terminate");
222 /* NOTREACHED */
223 }
224
225 return result;
226 }
227
228 /*
229 * Suspend execution of the specified thread.
230 * This is a recursive-style suspension of the thread, a count of
231 * suspends is maintained.
232 *
233 * Called with thread mutex held.
234 */
235 void
236 thread_hold(thread_t thread)
237 {
238 if (thread->suspend_count++ == 0) {
239 thread_set_apc_ast(thread);
240 assert(thread->suspend_parked == FALSE);
241 }
242 }
243
244 /*
245 * Decrement internal suspension count, setting thread
246 * runnable when count falls to zero.
247 *
248 * Because the wait is abortsafe, we can't be guaranteed that the thread
249 * is currently actually waiting even if suspend_parked is set.
250 *
251 * Called with thread mutex held.
252 */
253 void
254 thread_release(thread_t thread)
255 {
256 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
257
258 /* fail-safe on non-assert builds */
259 if (thread->suspend_count == 0) {
260 return;
261 }
262
263 if (--thread->suspend_count == 0) {
264 if (!thread->started) {
265 thread_start(thread);
266 } else if (thread->suspend_parked) {
267 thread->suspend_parked = FALSE;
268 thread_wakeup_thread(&thread->suspend_count, thread);
269 }
270 }
271 }
272
273 kern_return_t
274 thread_suspend(thread_t thread)
275 {
276 kern_return_t result = KERN_SUCCESS;
277
278 if (thread == THREAD_NULL || thread->task == kernel_task) {
279 return KERN_INVALID_ARGUMENT;
280 }
281
282 thread_mtx_lock(thread);
283
284 if (thread->active) {
285 if (thread->user_stop_count++ == 0) {
286 thread_hold(thread);
287 }
288 } else {
289 result = KERN_TERMINATED;
290 }
291
292 thread_mtx_unlock(thread);
293
294 if (thread != current_thread() && result == KERN_SUCCESS) {
295 thread_wait(thread, FALSE);
296 }
297
298 return result;
299 }
300
301 kern_return_t
302 thread_resume(thread_t thread)
303 {
304 kern_return_t result = KERN_SUCCESS;
305
306 if (thread == THREAD_NULL || thread->task == kernel_task) {
307 return KERN_INVALID_ARGUMENT;
308 }
309
310 thread_mtx_lock(thread);
311
312 if (thread->active) {
313 if (thread->user_stop_count > 0) {
314 if (--thread->user_stop_count == 0) {
315 thread_release(thread);
316 }
317 } else {
318 result = KERN_FAILURE;
319 }
320 } else {
321 result = KERN_TERMINATED;
322 }
323
324 thread_mtx_unlock(thread);
325
326 return result;
327 }
328
329 /*
330 * thread_depress_abort_from_user:
331 *
332 * Prematurely abort priority depression if there is one.
333 */
334 kern_return_t
335 thread_depress_abort_from_user(thread_t thread)
336 {
337 kern_return_t result;
338
339 if (thread == THREAD_NULL) {
340 return KERN_INVALID_ARGUMENT;
341 }
342
343 thread_mtx_lock(thread);
344
345 if (thread->active) {
346 result = thread_depress_abort(thread);
347 } else {
348 result = KERN_TERMINATED;
349 }
350
351 thread_mtx_unlock(thread);
352
353 return result;
354 }
355
356
357 /*
358 * Indicate that the thread should run the AST_APC callback
359 * to detect an abort condition.
360 *
361 * Called with thread mutex held.
362 */
363 static void
364 act_abort(
365 thread_t thread)
366 {
367 spl_t s = splsched();
368
369 thread_lock(thread);
370
371 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
372 thread->sched_flags |= TH_SFLAG_ABORT;
373 thread_set_apc_ast_locked(thread);
374 thread_depress_abort_locked(thread);
375 } else {
376 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
377 }
378
379 thread_unlock(thread);
380 splx(s);
381 }
382
383 kern_return_t
384 thread_abort(
385 thread_t thread)
386 {
387 kern_return_t result = KERN_SUCCESS;
388
389 if (thread == THREAD_NULL) {
390 return KERN_INVALID_ARGUMENT;
391 }
392
393 thread_mtx_lock(thread);
394
395 if (thread->active) {
396 act_abort(thread);
397 clear_wait(thread, THREAD_INTERRUPTED);
398 } else {
399 result = KERN_TERMINATED;
400 }
401
402 thread_mtx_unlock(thread);
403
404 return result;
405 }
406
407 kern_return_t
408 thread_abort_safely(
409 thread_t thread)
410 {
411 kern_return_t result = KERN_SUCCESS;
412
413 if (thread == THREAD_NULL) {
414 return KERN_INVALID_ARGUMENT;
415 }
416
417 thread_mtx_lock(thread);
418
419 if (thread->active) {
420 spl_t s = splsched();
421
422 thread_lock(thread);
423 if (!thread->at_safe_point ||
424 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
425 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
426 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
427 thread_set_apc_ast_locked(thread);
428 thread_depress_abort_locked(thread);
429 }
430 }
431 thread_unlock(thread);
432 splx(s);
433 } else {
434 result = KERN_TERMINATED;
435 }
436
437 thread_mtx_unlock(thread);
438
439 return result;
440 }
441
442 /*** backward compatibility hacks ***/
443 #include <mach/thread_info.h>
444 #include <mach/thread_special_ports.h>
445 #include <ipc/ipc_port.h>
446
447 kern_return_t
448 thread_info(
449 thread_t thread,
450 thread_flavor_t flavor,
451 thread_info_t thread_info_out,
452 mach_msg_type_number_t *thread_info_count)
453 {
454 kern_return_t result;
455
456 if (thread == THREAD_NULL) {
457 return KERN_INVALID_ARGUMENT;
458 }
459
460 thread_mtx_lock(thread);
461
462 if (thread->active || thread->inspection) {
463 result = thread_info_internal(
464 thread, flavor, thread_info_out, thread_info_count);
465 } else {
466 result = KERN_TERMINATED;
467 }
468
469 thread_mtx_unlock(thread);
470
471 return result;
472 }
473
474 static inline kern_return_t
475 thread_get_state_internal(
476 thread_t thread,
477 int flavor,
478 thread_state_t state, /* pointer to OUT array */
479 mach_msg_type_number_t *state_count, /*IN/OUT*/
480 boolean_t to_user)
481 {
482 kern_return_t result = KERN_SUCCESS;
483
484 if (thread == THREAD_NULL) {
485 return KERN_INVALID_ARGUMENT;
486 }
487
488 thread_mtx_lock(thread);
489
490 if (thread->active) {
491 if (thread != current_thread()) {
492 thread_hold(thread);
493
494 thread_mtx_unlock(thread);
495
496 if (thread_stop(thread, FALSE)) {
497 thread_mtx_lock(thread);
498 result = machine_thread_get_state(
499 thread, flavor, state, state_count);
500 thread_unstop(thread);
501 } else {
502 thread_mtx_lock(thread);
503 result = KERN_ABORTED;
504 }
505
506 thread_release(thread);
507 } else {
508 result = machine_thread_get_state(
509 thread, flavor, state, state_count);
510 }
511 } else if (thread->inspection) {
512 result = machine_thread_get_state(
513 thread, flavor, state, state_count);
514 } else {
515 result = KERN_TERMINATED;
516 }
517
518 if (to_user && result == KERN_SUCCESS) {
519 result = machine_thread_state_convert_to_user(thread, flavor, state,
520 state_count);
521 }
522
523 thread_mtx_unlock(thread);
524
525 return result;
526 }
527
528 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
529
530 kern_return_t
531 thread_get_state(
532 thread_t thread,
533 int flavor,
534 thread_state_t state,
535 mach_msg_type_number_t *state_count);
536
537 kern_return_t
538 thread_get_state(
539 thread_t thread,
540 int flavor,
541 thread_state_t state, /* pointer to OUT array */
542 mach_msg_type_number_t *state_count) /*IN/OUT*/
543 {
544 return thread_get_state_internal(thread, flavor, state, state_count, FALSE);
545 }
546
547 kern_return_t
548 thread_get_state_to_user(
549 thread_t thread,
550 int flavor,
551 thread_state_t state, /* pointer to OUT array */
552 mach_msg_type_number_t *state_count) /*IN/OUT*/
553 {
554 return thread_get_state_internal(thread, flavor, state, state_count, TRUE);
555 }
556
557 /*
558 * Change thread's machine-dependent state. Called with nothing
559 * locked. Returns same way.
560 */
561 static inline kern_return_t
562 thread_set_state_internal(
563 thread_t thread,
564 int flavor,
565 thread_state_t state,
566 mach_msg_type_number_t state_count,
567 boolean_t from_user)
568 {
569 kern_return_t result = KERN_SUCCESS;
570
571 if (thread == THREAD_NULL) {
572 return KERN_INVALID_ARGUMENT;
573 }
574
575 thread_mtx_lock(thread);
576
577 if (thread->active) {
578 if (from_user) {
579 result = machine_thread_state_convert_from_user(thread, flavor,
580 state, state_count);
581 if (result != KERN_SUCCESS) {
582 goto out;
583 }
584 }
585 if (thread != current_thread()) {
586 thread_hold(thread);
587
588 thread_mtx_unlock(thread);
589
590 if (thread_stop(thread, TRUE)) {
591 thread_mtx_lock(thread);
592 result = machine_thread_set_state(
593 thread, flavor, state, state_count);
594 thread_unstop(thread);
595 } else {
596 thread_mtx_lock(thread);
597 result = KERN_ABORTED;
598 }
599
600 thread_release(thread);
601 } else {
602 result = machine_thread_set_state(
603 thread, flavor, state, state_count);
604 }
605 } else {
606 result = KERN_TERMINATED;
607 }
608
609 if ((result == KERN_SUCCESS) && from_user) {
610 extmod_statistics_incr_thread_set_state(thread);
611 }
612
613 out:
614 thread_mtx_unlock(thread);
615
616 return result;
617 }
618
619 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
620 kern_return_t
621 thread_set_state(
622 thread_t thread,
623 int flavor,
624 thread_state_t state,
625 mach_msg_type_number_t state_count);
626
627 kern_return_t
628 thread_set_state(
629 thread_t thread,
630 int flavor,
631 thread_state_t state,
632 mach_msg_type_number_t state_count)
633 {
634 return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
635 }
636
637 kern_return_t
638 thread_set_state_from_user(
639 thread_t thread,
640 int flavor,
641 thread_state_t state,
642 mach_msg_type_number_t state_count)
643 {
644 return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
645 }
646
647 kern_return_t
648 thread_convert_thread_state(
649 thread_t thread,
650 int direction,
651 thread_state_flavor_t flavor,
652 thread_state_t in_state, /* pointer to IN array */
653 mach_msg_type_number_t in_state_count,
654 thread_state_t out_state, /* pointer to OUT array */
655 mach_msg_type_number_t *out_state_count) /*IN/OUT*/
656 {
657 kern_return_t kr;
658 thread_t to_thread = THREAD_NULL;
659 thread_t from_thread = THREAD_NULL;
660 mach_msg_type_number_t state_count = in_state_count;
661
662 if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF &&
663 direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
664 return KERN_INVALID_ARGUMENT;
665 }
666
667 if (thread == THREAD_NULL) {
668 return KERN_INVALID_ARGUMENT;
669 }
670
671 if (state_count > *out_state_count) {
672 return KERN_INSUFFICIENT_BUFFER_SIZE;
673 }
674
675 if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
676 to_thread = thread;
677 from_thread = current_thread();
678 } else {
679 to_thread = current_thread();
680 from_thread = thread;
681 }
682
683 /* Authenticate and convert thread state to kernel representation */
684 kr = machine_thread_state_convert_from_user(from_thread, flavor,
685 in_state, state_count);
686
687 /* Return early if one of the thread was jop disabled while other wasn't */
688 if (kr != KERN_SUCCESS) {
689 return kr;
690 }
691
692 /* Convert thread state to target thread user representation */
693 kr = machine_thread_state_convert_to_user(to_thread, flavor,
694 in_state, &state_count);
695
696 if (kr == KERN_SUCCESS) {
697 if (state_count <= *out_state_count) {
698 memcpy(out_state, in_state, state_count * sizeof(uint32_t));
699 *out_state_count = state_count;
700 } else {
701 kr = KERN_INSUFFICIENT_BUFFER_SIZE;
702 }
703 }
704
705 return kr;
706 }
707
708 /*
709 * Kernel-internal "thread" interfaces used outside this file:
710 */
711
712 /* Initialize (or re-initialize) a thread state. Called from execve
713 * with nothing locked, returns same way.
714 */
715 kern_return_t
716 thread_state_initialize(
717 thread_t thread)
718 {
719 kern_return_t result = KERN_SUCCESS;
720
721 if (thread == THREAD_NULL) {
722 return KERN_INVALID_ARGUMENT;
723 }
724
725 thread_mtx_lock(thread);
726
727 if (thread->active) {
728 if (thread != current_thread()) {
729 thread_hold(thread);
730
731 thread_mtx_unlock(thread);
732
733 if (thread_stop(thread, TRUE)) {
734 thread_mtx_lock(thread);
735 result = machine_thread_state_initialize( thread );
736 thread_unstop(thread);
737 } else {
738 thread_mtx_lock(thread);
739 result = KERN_ABORTED;
740 }
741
742 thread_release(thread);
743 } else {
744 result = machine_thread_state_initialize( thread );
745 }
746 } else {
747 result = KERN_TERMINATED;
748 }
749
750 thread_mtx_unlock(thread);
751
752 return result;
753 }
754
755 kern_return_t
756 thread_dup(
757 thread_t target)
758 {
759 thread_t self = current_thread();
760 kern_return_t result = KERN_SUCCESS;
761
762 if (target == THREAD_NULL || target == self) {
763 return KERN_INVALID_ARGUMENT;
764 }
765
766 thread_mtx_lock(target);
767
768 if (target->active) {
769 thread_hold(target);
770
771 thread_mtx_unlock(target);
772
773 if (thread_stop(target, TRUE)) {
774 thread_mtx_lock(target);
775 result = machine_thread_dup(self, target, FALSE);
776
777 if (self->affinity_set != AFFINITY_SET_NULL) {
778 thread_affinity_dup(self, target);
779 }
780 thread_unstop(target);
781 } else {
782 thread_mtx_lock(target);
783 result = KERN_ABORTED;
784 }
785
786 thread_release(target);
787 } else {
788 result = KERN_TERMINATED;
789 }
790
791 thread_mtx_unlock(target);
792
793 return result;
794 }
795
796
797 kern_return_t
798 thread_dup2(
799 thread_t source,
800 thread_t target)
801 {
802 kern_return_t result = KERN_SUCCESS;
803 uint32_t active = 0;
804
805 if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
806 return KERN_INVALID_ARGUMENT;
807 }
808
809 thread_mtx_lock(source);
810 active = source->active;
811 thread_mtx_unlock(source);
812
813 if (!active) {
814 return KERN_TERMINATED;
815 }
816
817 thread_mtx_lock(target);
818
819 if (target->active || target->inspection) {
820 thread_hold(target);
821
822 thread_mtx_unlock(target);
823
824 if (thread_stop(target, TRUE)) {
825 thread_mtx_lock(target);
826 result = machine_thread_dup(source, target, TRUE);
827 if (source->affinity_set != AFFINITY_SET_NULL) {
828 thread_affinity_dup(source, target);
829 }
830 thread_unstop(target);
831 } else {
832 thread_mtx_lock(target);
833 result = KERN_ABORTED;
834 }
835
836 thread_release(target);
837 } else {
838 result = KERN_TERMINATED;
839 }
840
841 thread_mtx_unlock(target);
842
843 return result;
844 }
845
846 /*
847 * thread_setstatus:
848 *
849 * Set the status of the specified thread.
850 * Called with (and returns with) no locks held.
851 */
852 kern_return_t
853 thread_setstatus(
854 thread_t thread,
855 int flavor,
856 thread_state_t tstate,
857 mach_msg_type_number_t count)
858 {
859 return thread_set_state(thread, flavor, tstate, count);
860 }
861
862 kern_return_t
863 thread_setstatus_from_user(
864 thread_t thread,
865 int flavor,
866 thread_state_t tstate,
867 mach_msg_type_number_t count)
868 {
869 return thread_set_state_from_user(thread, flavor, tstate, count);
870 }
871
872 /*
873 * thread_getstatus:
874 *
875 * Get the status of the specified thread.
876 */
877 kern_return_t
878 thread_getstatus(
879 thread_t thread,
880 int flavor,
881 thread_state_t tstate,
882 mach_msg_type_number_t *count)
883 {
884 return thread_get_state(thread, flavor, tstate, count);
885 }
886
887 kern_return_t
888 thread_getstatus_to_user(
889 thread_t thread,
890 int flavor,
891 thread_state_t tstate,
892 mach_msg_type_number_t *count)
893 {
894 return thread_get_state_to_user(thread, flavor, tstate, count);
895 }
896
897 /*
898 * Change thread's machine-dependent userspace TSD base.
899 * Called with nothing locked. Returns same way.
900 */
901 kern_return_t
902 thread_set_tsd_base(
903 thread_t thread,
904 mach_vm_offset_t tsd_base)
905 {
906 kern_return_t result = KERN_SUCCESS;
907
908 if (thread == THREAD_NULL) {
909 return KERN_INVALID_ARGUMENT;
910 }
911
912 thread_mtx_lock(thread);
913
914 if (thread->active) {
915 if (thread != current_thread()) {
916 thread_hold(thread);
917
918 thread_mtx_unlock(thread);
919
920 if (thread_stop(thread, TRUE)) {
921 thread_mtx_lock(thread);
922 result = machine_thread_set_tsd_base(thread, tsd_base);
923 thread_unstop(thread);
924 } else {
925 thread_mtx_lock(thread);
926 result = KERN_ABORTED;
927 }
928
929 thread_release(thread);
930 } else {
931 result = machine_thread_set_tsd_base(thread, tsd_base);
932 }
933 } else {
934 result = KERN_TERMINATED;
935 }
936
937 thread_mtx_unlock(thread);
938
939 return result;
940 }
941
942 /*
943 * thread_set_apc_ast:
944 *
945 * Register the AST_APC callback that handles suspension and
946 * termination, if it hasn't been installed already.
947 *
948 * Called with the thread mutex held.
949 */
950 static void
951 thread_set_apc_ast(thread_t thread)
952 {
953 spl_t s = splsched();
954
955 thread_lock(thread);
956 thread_set_apc_ast_locked(thread);
957 thread_unlock(thread);
958
959 splx(s);
960 }
961
962 /*
963 * thread_set_apc_ast_locked:
964 *
965 * Do the work of registering for the AST_APC callback.
966 *
967 * Called with the thread mutex and scheduling lock held.
968 */
969 static void
970 thread_set_apc_ast_locked(thread_t thread)
971 {
972 thread_ast_set(thread, AST_APC);
973
974 if (thread == current_thread()) {
975 ast_propagate(thread);
976 } else {
977 processor_t processor = thread->last_processor;
978
979 if (processor != PROCESSOR_NULL &&
980 processor->state == PROCESSOR_RUNNING &&
981 processor->active_thread == thread) {
982 cause_ast_check(processor);
983 }
984 }
985 }
986
987 /*
988 * Activation control support routines internal to this file:
989 *
990 */
991
992 /*
993 * thread_suspended
994 *
995 * Continuation routine for thread suspension. It checks
996 * to see whether there has been any new suspensions. If so, it
997 * installs the AST_APC handler again.
998 */
999 __attribute__((noreturn))
1000 static void
1001 thread_suspended(__unused void *parameter, wait_result_t result)
1002 {
1003 thread_t thread = current_thread();
1004
1005 thread_mtx_lock(thread);
1006
1007 if (result == THREAD_INTERRUPTED) {
1008 thread->suspend_parked = FALSE;
1009 } else {
1010 assert(thread->suspend_parked == FALSE);
1011 }
1012
1013 if (thread->suspend_count > 0) {
1014 thread_set_apc_ast(thread);
1015 }
1016
1017 thread_mtx_unlock(thread);
1018
1019 thread_exception_return();
1020 /*NOTREACHED*/
1021 }
1022
1023 /*
1024 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1025 * Called with nothing locked. Returns (if it returns) the same way.
1026 */
1027 void
1028 thread_apc_ast(thread_t thread)
1029 {
1030 thread_mtx_lock(thread);
1031
1032 assert(thread->suspend_parked == FALSE);
1033
1034 spl_t s = splsched();
1035 thread_lock(thread);
1036
1037 /* TH_SFLAG_POLLDEPRESS is OK to have here */
1038 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
1039
1040 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1041 thread_unlock(thread);
1042 splx(s);
1043
1044 if (!thread->active) {
1045 /* Thread is ready to terminate, time to tear it down */
1046 thread_mtx_unlock(thread);
1047
1048 thread_terminate_self();
1049 /*NOTREACHED*/
1050 }
1051
1052 /* If we're suspended, go to sleep and wait for someone to wake us up. */
1053 if (thread->suspend_count > 0) {
1054 thread->suspend_parked = TRUE;
1055 assert_wait(&thread->suspend_count,
1056 THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
1057 thread_mtx_unlock(thread);
1058
1059 thread_block(thread_suspended);
1060 /*NOTREACHED*/
1061 }
1062
1063 thread_mtx_unlock(thread);
1064 }
1065
1066
1067 /* Prototype, see justification above */
1068 kern_return_t
1069 act_set_state(
1070 thread_t thread,
1071 int flavor,
1072 thread_state_t state,
1073 mach_msg_type_number_t count);
1074
1075 kern_return_t
1076 act_set_state(
1077 thread_t thread,
1078 int flavor,
1079 thread_state_t state,
1080 mach_msg_type_number_t count)
1081 {
1082 if (thread == current_thread()) {
1083 return KERN_INVALID_ARGUMENT;
1084 }
1085
1086 return thread_set_state(thread, flavor, state, count);
1087 }
1088
1089 kern_return_t
1090 act_set_state_from_user(
1091 thread_t thread,
1092 int flavor,
1093 thread_state_t state,
1094 mach_msg_type_number_t count)
1095 {
1096 if (thread == current_thread()) {
1097 return KERN_INVALID_ARGUMENT;
1098 }
1099
1100 return thread_set_state_from_user(thread, flavor, state, count);
1101 }
1102
1103 /* Prototype, see justification above */
1104 kern_return_t
1105 act_get_state(
1106 thread_t thread,
1107 int flavor,
1108 thread_state_t state,
1109 mach_msg_type_number_t *count);
1110
1111 kern_return_t
1112 act_get_state(
1113 thread_t thread,
1114 int flavor,
1115 thread_state_t state,
1116 mach_msg_type_number_t *count)
1117 {
1118 if (thread == current_thread()) {
1119 return KERN_INVALID_ARGUMENT;
1120 }
1121
1122 return thread_get_state(thread, flavor, state, count);
1123 }
1124
1125 kern_return_t
1126 act_get_state_to_user(
1127 thread_t thread,
1128 int flavor,
1129 thread_state_t state,
1130 mach_msg_type_number_t *count)
1131 {
1132 if (thread == current_thread()) {
1133 return KERN_INVALID_ARGUMENT;
1134 }
1135
1136 return thread_get_state_to_user(thread, flavor, state, count);
1137 }
1138
1139 static void
1140 act_set_ast(
1141 thread_t thread,
1142 ast_t ast)
1143 {
1144 spl_t s = splsched();
1145
1146 if (thread == current_thread()) {
1147 thread_ast_set(thread, ast);
1148 ast_propagate(thread);
1149 } else {
1150 processor_t processor;
1151
1152 thread_lock(thread);
1153 thread_ast_set(thread, ast);
1154 processor = thread->last_processor;
1155 if (processor != PROCESSOR_NULL &&
1156 processor->state == PROCESSOR_RUNNING &&
1157 processor->active_thread == thread) {
1158 cause_ast_check(processor);
1159 }
1160 thread_unlock(thread);
1161 }
1162
1163 splx(s);
1164 }
1165
1166 /*
1167 * set AST on thread without causing an AST check
1168 * and without taking the thread lock
1169 *
1170 * If thread is not the current thread, then it may take
1171 * up until the next context switch or quantum expiration
1172 * on that thread for it to notice the AST.
1173 */
1174 static void
1175 act_set_ast_async(thread_t thread,
1176 ast_t ast)
1177 {
1178 thread_ast_set(thread, ast);
1179
1180 if (thread == current_thread()) {
1181 spl_t s = splsched();
1182 ast_propagate(thread);
1183 splx(s);
1184 }
1185 }
1186
1187 void
1188 act_set_astbsd(
1189 thread_t thread)
1190 {
1191 act_set_ast( thread, AST_BSD );
1192 }
1193
1194 void
1195 act_set_astkevent(thread_t thread, uint16_t bits)
1196 {
1197 os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1198
1199 /* kevent AST shouldn't send immediate IPIs */
1200 act_set_ast_async(thread, AST_KEVENT);
1201 }
1202
1203 uint16_t
1204 act_clear_astkevent(thread_t thread, uint16_t bits)
1205 {
1206 /*
1207 * avoid the atomic operation if none of the bits is set,
1208 * which will be the common case.
1209 */
1210 uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1211 if (cur & bits) {
1212 cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1213 }
1214 return cur & bits;
1215 }
1216
1217 void
1218 act_set_ast_reset_pcs(thread_t thread)
1219 {
1220 act_set_ast(thread, AST_RESET_PCS);
1221 }
1222
1223 void
1224 act_set_kperf(
1225 thread_t thread)
1226 {
1227 /* safety check */
1228 if (thread != current_thread()) {
1229 if (!ml_get_interrupts_enabled()) {
1230 panic("unsafe act_set_kperf operation");
1231 }
1232 }
1233
1234 act_set_ast( thread, AST_KPERF );
1235 }
1236
1237 #if CONFIG_MACF
1238 void
1239 act_set_astmacf(
1240 thread_t thread)
1241 {
1242 act_set_ast( thread, AST_MACF);
1243 }
1244 #endif
1245
1246 void
1247 act_set_astledger(thread_t thread)
1248 {
1249 act_set_ast(thread, AST_LEDGER);
1250 }
1251
1252 /*
1253 * The ledger AST may need to be set while already holding
1254 * the thread lock. This routine skips sending the IPI,
1255 * allowing us to avoid the lock hold.
1256 *
1257 * However, it means the targeted thread must context switch
1258 * to recognize the ledger AST.
1259 */
1260 void
1261 act_set_astledger_async(thread_t thread)
1262 {
1263 act_set_ast_async(thread, AST_LEDGER);
1264 }
1265
1266 void
1267 act_set_io_telemetry_ast(thread_t thread)
1268 {
1269 act_set_ast(thread, AST_TELEMETRY_IO);
1270 }