]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_act.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52
53 #include <mach/mach_types.h>
54 #include <mach/kern_return.h>
55 #include <mach/thread_act_server.h>
56
57 #include <kern/kern_types.h>
58 #include <kern/ast.h>
59 #include <kern/mach_param.h>
60 #include <kern/zalloc.h>
61 #include <kern/extmod_statistics.h>
62 #include <kern/thread.h>
63 #include <kern/task.h>
64 #include <kern/sched_prim.h>
65 #include <kern/misc_protos.h>
66 #include <kern/assert.h>
67 #include <kern/exception.h>
68 #include <kern/ipc_mig.h>
69 #include <kern/ipc_tt.h>
70 #include <kern/machine.h>
71 #include <kern/spl.h>
72 #include <kern/syscall_subr.h>
73 #include <kern/sync_lock.h>
74 #include <kern/processor.h>
75 #include <kern/timer.h>
76 #include <kern/affinity.h>
77 #include <kern/host.h>
78
79 #include <stdatomic.h>
80
81 #include <security/mac_mach_internal.h>
82
83 static void act_abort(thread_t thread);
84
85 static void thread_suspended(void *arg, wait_result_t result);
86 static void thread_set_apc_ast(thread_t thread);
87 static void thread_set_apc_ast_locked(thread_t thread);
88
89 /*
90 * Internal routine to mark a thread as started.
91 * Always called with the thread mutex locked.
92 */
93 void
94 thread_start(
95 thread_t thread)
96 {
97 clear_wait(thread, THREAD_AWAKENED);
98 thread->started = TRUE;
99 }
100
101 /*
102 * Internal routine to mark a thread as waiting
103 * right after it has been created. The caller
104 * is responsible to call wakeup()/thread_wakeup()
105 * or thread_terminate() to get it going.
106 *
107 * Always called with the thread mutex locked.
108 *
109 * Task and task_threads mutexes also held
110 * (so nobody can set the thread running before
111 * this point)
112 *
113 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
114 * to allow termination from this point forward.
115 */
116 void
117 thread_start_in_assert_wait(
118 thread_t thread,
119 event_t event,
120 wait_interrupt_t interruptible)
121 {
122 struct waitq *waitq = assert_wait_queue(event);
123 wait_result_t wait_result;
124 spl_t spl;
125
126 spl = splsched();
127 waitq_lock(waitq);
128
129 /* clear out startup condition (safe because thread not started yet) */
130 thread_lock(thread);
131 assert(!thread->started);
132 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
133 thread->state &= ~(TH_WAIT | TH_UNINT);
134 thread_unlock(thread);
135
136 /* assert wait interruptibly forever */
137 wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
138 interruptible,
139 TIMEOUT_URGENCY_SYS_NORMAL,
140 TIMEOUT_WAIT_FOREVER,
141 TIMEOUT_NO_LEEWAY,
142 thread);
143 assert(wait_result == THREAD_WAITING);
144
145 /* mark thread started while we still hold the waitq lock */
146 thread_lock(thread);
147 thread->started = TRUE;
148 thread_unlock(thread);
149
150 waitq_unlock(waitq);
151 splx(spl);
152 }
153
154 /*
155 * Internal routine to terminate a thread.
156 * Sometimes called with task already locked.
157 */
158 kern_return_t
159 thread_terminate_internal(
160 thread_t thread,
161 thread_terminate_options_t options)
162 {
163 kern_return_t result = KERN_SUCCESS;
164 boolean_t test_pin_bit = false;
165
166 thread_mtx_lock(thread);
167
168 if (thread->active) {
169 thread->active = FALSE;
170
171 act_abort(thread);
172
173 if (thread->started) {
174 clear_wait(thread, THREAD_INTERRUPTED);
175 } else {
176 thread_start(thread);
177 }
178 /* This bit can be reliably tested only if the thread is still active */
179 test_pin_bit = (options == TH_TERMINATE_OPTION_UNPIN) ? true : false;
180 } else {
181 result = KERN_TERMINATED;
182 }
183
184 if (thread->affinity_set != NULL) {
185 thread_affinity_terminate(thread);
186 }
187
188 /*
189 * <rdar://problem/53562036> thread_terminate shouldn't be allowed on pthread
190 * Until thread_terminate is disallowed for pthreads, always unpin the pinned port
191 * when the thread is being terminated.
192 */
193 ipc_thread_port_unpin(thread->ith_self, test_pin_bit);
194
195 thread_mtx_unlock(thread);
196
197 if (thread != current_thread() && result == KERN_SUCCESS) {
198 thread_wait(thread, FALSE);
199 }
200
201 return result;
202 }
203
204 /*
205 * Terminate a thread.
206 */
207 kern_return_t
208 thread_terminate(
209 thread_t thread)
210 {
211 if (thread == THREAD_NULL) {
212 return KERN_INVALID_ARGUMENT;
213 }
214
215 /* Kernel threads can't be terminated without their own cooperation */
216 if (thread->task == kernel_task && thread != current_thread()) {
217 return KERN_FAILURE;
218 }
219
220 kern_return_t result = thread_terminate_internal(thread, TH_TERMINATE_OPTION_NONE);
221
222 /*
223 * If a kernel thread is terminating itself, force handle the APC_AST here.
224 * Kernel threads don't pass through the return-to-user AST checking code,
225 * but all threads must finish their own termination in thread_apc_ast.
226 */
227 if (thread->task == kernel_task) {
228 assert(thread->active == FALSE);
229 thread_ast_clear(thread, AST_APC);
230 thread_apc_ast(thread);
231
232 panic("thread_terminate");
233 /* NOTREACHED */
234 }
235
236 return result;
237 }
238
239 kern_return_t
240 thread_terminate_pinned(
241 thread_t thread)
242 {
243 if (thread == THREAD_NULL) {
244 return KERN_INVALID_ARGUMENT;
245 }
246
247 assert(thread->task != kernel_task);
248
249 kern_return_t result = thread_terminate_internal(thread, TH_TERMINATE_OPTION_UNPIN);
250 return result;
251 }
252
253 /*
254 * Suspend execution of the specified thread.
255 * This is a recursive-style suspension of the thread, a count of
256 * suspends is maintained.
257 *
258 * Called with thread mutex held.
259 */
260 void
261 thread_hold(thread_t thread)
262 {
263 if (thread->suspend_count++ == 0) {
264 thread_set_apc_ast(thread);
265 assert(thread->suspend_parked == FALSE);
266 }
267 }
268
269 /*
270 * Decrement internal suspension count, setting thread
271 * runnable when count falls to zero.
272 *
273 * Because the wait is abortsafe, we can't be guaranteed that the thread
274 * is currently actually waiting even if suspend_parked is set.
275 *
276 * Called with thread mutex held.
277 */
278 void
279 thread_release(thread_t thread)
280 {
281 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
282
283 /* fail-safe on non-assert builds */
284 if (thread->suspend_count == 0) {
285 return;
286 }
287
288 if (--thread->suspend_count == 0) {
289 if (!thread->started) {
290 thread_start(thread);
291 } else if (thread->suspend_parked) {
292 thread->suspend_parked = FALSE;
293 thread_wakeup_thread(&thread->suspend_count, thread);
294 }
295 }
296 }
297
298 kern_return_t
299 thread_suspend(thread_t thread)
300 {
301 kern_return_t result = KERN_SUCCESS;
302
303 if (thread == THREAD_NULL || thread->task == kernel_task) {
304 return KERN_INVALID_ARGUMENT;
305 }
306
307 thread_mtx_lock(thread);
308
309 if (thread->active) {
310 if (thread->user_stop_count++ == 0) {
311 thread_hold(thread);
312 }
313 } else {
314 result = KERN_TERMINATED;
315 }
316
317 thread_mtx_unlock(thread);
318
319 if (thread != current_thread() && result == KERN_SUCCESS) {
320 thread_wait(thread, FALSE);
321 }
322
323 return result;
324 }
325
326 kern_return_t
327 thread_resume(thread_t thread)
328 {
329 kern_return_t result = KERN_SUCCESS;
330
331 if (thread == THREAD_NULL || thread->task == kernel_task) {
332 return KERN_INVALID_ARGUMENT;
333 }
334
335 thread_mtx_lock(thread);
336
337 if (thread->active) {
338 if (thread->user_stop_count > 0) {
339 if (--thread->user_stop_count == 0) {
340 thread_release(thread);
341 }
342 } else {
343 result = KERN_FAILURE;
344 }
345 } else {
346 result = KERN_TERMINATED;
347 }
348
349 thread_mtx_unlock(thread);
350
351 return result;
352 }
353
354 /*
355 * thread_depress_abort_from_user:
356 *
357 * Prematurely abort priority depression if there is one.
358 */
359 kern_return_t
360 thread_depress_abort_from_user(thread_t thread)
361 {
362 kern_return_t result;
363
364 if (thread == THREAD_NULL) {
365 return KERN_INVALID_ARGUMENT;
366 }
367
368 thread_mtx_lock(thread);
369
370 if (thread->active) {
371 result = thread_depress_abort(thread);
372 } else {
373 result = KERN_TERMINATED;
374 }
375
376 thread_mtx_unlock(thread);
377
378 return result;
379 }
380
381
382 /*
383 * Indicate that the thread should run the AST_APC callback
384 * to detect an abort condition.
385 *
386 * Called with thread mutex held.
387 */
388 static void
389 act_abort(
390 thread_t thread)
391 {
392 spl_t s = splsched();
393
394 thread_lock(thread);
395
396 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
397 thread->sched_flags |= TH_SFLAG_ABORT;
398 thread_set_apc_ast_locked(thread);
399 thread_depress_abort_locked(thread);
400 } else {
401 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
402 }
403
404 thread_unlock(thread);
405 splx(s);
406 }
407
408 kern_return_t
409 thread_abort(
410 thread_t thread)
411 {
412 kern_return_t result = KERN_SUCCESS;
413
414 if (thread == THREAD_NULL) {
415 return KERN_INVALID_ARGUMENT;
416 }
417
418 thread_mtx_lock(thread);
419
420 if (thread->active) {
421 act_abort(thread);
422 clear_wait(thread, THREAD_INTERRUPTED);
423 } else {
424 result = KERN_TERMINATED;
425 }
426
427 thread_mtx_unlock(thread);
428
429 return result;
430 }
431
432 kern_return_t
433 thread_abort_safely(
434 thread_t thread)
435 {
436 kern_return_t result = KERN_SUCCESS;
437
438 if (thread == THREAD_NULL) {
439 return KERN_INVALID_ARGUMENT;
440 }
441
442 thread_mtx_lock(thread);
443
444 if (thread->active) {
445 spl_t s = splsched();
446
447 thread_lock(thread);
448 if (!thread->at_safe_point ||
449 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
450 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
451 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
452 thread_set_apc_ast_locked(thread);
453 thread_depress_abort_locked(thread);
454 }
455 }
456 thread_unlock(thread);
457 splx(s);
458 } else {
459 result = KERN_TERMINATED;
460 }
461
462 thread_mtx_unlock(thread);
463
464 return result;
465 }
466
467 /*** backward compatibility hacks ***/
468 #include <mach/thread_info.h>
469 #include <mach/thread_special_ports.h>
470 #include <ipc/ipc_port.h>
471
472 kern_return_t
473 thread_info(
474 thread_t thread,
475 thread_flavor_t flavor,
476 thread_info_t thread_info_out,
477 mach_msg_type_number_t *thread_info_count)
478 {
479 kern_return_t result;
480
481 if (thread == THREAD_NULL) {
482 return KERN_INVALID_ARGUMENT;
483 }
484
485 thread_mtx_lock(thread);
486
487 if (thread->active || thread->inspection) {
488 result = thread_info_internal(
489 thread, flavor, thread_info_out, thread_info_count);
490 } else {
491 result = KERN_TERMINATED;
492 }
493
494 thread_mtx_unlock(thread);
495
496 return result;
497 }
498
499 static inline kern_return_t
500 thread_get_state_internal(
501 thread_t thread,
502 int flavor,
503 thread_state_t state, /* pointer to OUT array */
504 mach_msg_type_number_t *state_count, /*IN/OUT*/
505 boolean_t to_user)
506 {
507 kern_return_t result = KERN_SUCCESS;
508
509 if (thread == THREAD_NULL) {
510 return KERN_INVALID_ARGUMENT;
511 }
512
513 thread_mtx_lock(thread);
514
515 if (thread->active) {
516 if (thread != current_thread()) {
517 thread_hold(thread);
518
519 thread_mtx_unlock(thread);
520
521 if (thread_stop(thread, FALSE)) {
522 thread_mtx_lock(thread);
523 result = machine_thread_get_state(
524 thread, flavor, state, state_count);
525 thread_unstop(thread);
526 } else {
527 thread_mtx_lock(thread);
528 result = KERN_ABORTED;
529 }
530
531 thread_release(thread);
532 } else {
533 result = machine_thread_get_state(
534 thread, flavor, state, state_count);
535 }
536 } else if (thread->inspection) {
537 result = machine_thread_get_state(
538 thread, flavor, state, state_count);
539 } else {
540 result = KERN_TERMINATED;
541 }
542
543 if (to_user && result == KERN_SUCCESS) {
544 result = machine_thread_state_convert_to_user(thread, flavor, state,
545 state_count);
546 }
547
548 thread_mtx_unlock(thread);
549
550 return result;
551 }
552
553 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
554
555 kern_return_t
556 thread_get_state(
557 thread_t thread,
558 int flavor,
559 thread_state_t state,
560 mach_msg_type_number_t *state_count);
561
562 kern_return_t
563 thread_get_state(
564 thread_t thread,
565 int flavor,
566 thread_state_t state, /* pointer to OUT array */
567 mach_msg_type_number_t *state_count) /*IN/OUT*/
568 {
569 return thread_get_state_internal(thread, flavor, state, state_count, FALSE);
570 }
571
572 kern_return_t
573 thread_get_state_to_user(
574 thread_t thread,
575 int flavor,
576 thread_state_t state, /* pointer to OUT array */
577 mach_msg_type_number_t *state_count) /*IN/OUT*/
578 {
579 return thread_get_state_internal(thread, flavor, state, state_count, TRUE);
580 }
581
582 /*
583 * Change thread's machine-dependent state. Called with nothing
584 * locked. Returns same way.
585 */
586 static inline kern_return_t
587 thread_set_state_internal(
588 thread_t thread,
589 int flavor,
590 thread_state_t state,
591 mach_msg_type_number_t state_count,
592 boolean_t from_user)
593 {
594 kern_return_t result = KERN_SUCCESS;
595
596 if (thread == THREAD_NULL) {
597 return KERN_INVALID_ARGUMENT;
598 }
599
600 thread_mtx_lock(thread);
601
602 if (thread->active) {
603 if (from_user) {
604 result = machine_thread_state_convert_from_user(thread, flavor,
605 state, state_count);
606 if (result != KERN_SUCCESS) {
607 goto out;
608 }
609 }
610 if (thread != current_thread()) {
611 thread_hold(thread);
612
613 thread_mtx_unlock(thread);
614
615 if (thread_stop(thread, TRUE)) {
616 thread_mtx_lock(thread);
617 result = machine_thread_set_state(
618 thread, flavor, state, state_count);
619 thread_unstop(thread);
620 } else {
621 thread_mtx_lock(thread);
622 result = KERN_ABORTED;
623 }
624
625 thread_release(thread);
626 } else {
627 result = machine_thread_set_state(
628 thread, flavor, state, state_count);
629 }
630 } else {
631 result = KERN_TERMINATED;
632 }
633
634 if ((result == KERN_SUCCESS) && from_user) {
635 extmod_statistics_incr_thread_set_state(thread);
636 }
637
638 out:
639 thread_mtx_unlock(thread);
640
641 return result;
642 }
643
644 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
645 kern_return_t
646 thread_set_state(
647 thread_t thread,
648 int flavor,
649 thread_state_t state,
650 mach_msg_type_number_t state_count);
651
652 kern_return_t
653 thread_set_state(
654 thread_t thread,
655 int flavor,
656 thread_state_t state,
657 mach_msg_type_number_t state_count)
658 {
659 return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
660 }
661
662 kern_return_t
663 thread_set_state_from_user(
664 thread_t thread,
665 int flavor,
666 thread_state_t state,
667 mach_msg_type_number_t state_count)
668 {
669 return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
670 }
671
672 kern_return_t
673 thread_convert_thread_state(
674 thread_t thread,
675 int direction,
676 thread_state_flavor_t flavor,
677 thread_state_t in_state, /* pointer to IN array */
678 mach_msg_type_number_t in_state_count,
679 thread_state_t out_state, /* pointer to OUT array */
680 mach_msg_type_number_t *out_state_count) /*IN/OUT*/
681 {
682 kern_return_t kr;
683 thread_t to_thread = THREAD_NULL;
684 thread_t from_thread = THREAD_NULL;
685 mach_msg_type_number_t state_count = in_state_count;
686
687 if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF &&
688 direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
689 return KERN_INVALID_ARGUMENT;
690 }
691
692 if (thread == THREAD_NULL) {
693 return KERN_INVALID_ARGUMENT;
694 }
695
696 if (state_count > *out_state_count) {
697 return KERN_INSUFFICIENT_BUFFER_SIZE;
698 }
699
700 if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
701 to_thread = thread;
702 from_thread = current_thread();
703 } else {
704 to_thread = current_thread();
705 from_thread = thread;
706 }
707
708 /* Authenticate and convert thread state to kernel representation */
709 kr = machine_thread_state_convert_from_user(from_thread, flavor,
710 in_state, state_count);
711
712 /* Return early if one of the thread was jop disabled while other wasn't */
713 if (kr != KERN_SUCCESS) {
714 return kr;
715 }
716
717 /* Convert thread state to target thread user representation */
718 kr = machine_thread_state_convert_to_user(to_thread, flavor,
719 in_state, &state_count);
720
721 if (kr == KERN_SUCCESS) {
722 if (state_count <= *out_state_count) {
723 memcpy(out_state, in_state, state_count * sizeof(uint32_t));
724 *out_state_count = state_count;
725 } else {
726 kr = KERN_INSUFFICIENT_BUFFER_SIZE;
727 }
728 }
729
730 return kr;
731 }
732
733 /*
734 * Kernel-internal "thread" interfaces used outside this file:
735 */
736
737 /* Initialize (or re-initialize) a thread state. Called from execve
738 * with nothing locked, returns same way.
739 */
740 kern_return_t
741 thread_state_initialize(
742 thread_t thread)
743 {
744 kern_return_t result = KERN_SUCCESS;
745
746 if (thread == THREAD_NULL) {
747 return KERN_INVALID_ARGUMENT;
748 }
749
750 thread_mtx_lock(thread);
751
752 if (thread->active) {
753 if (thread != current_thread()) {
754 thread_hold(thread);
755
756 thread_mtx_unlock(thread);
757
758 if (thread_stop(thread, TRUE)) {
759 thread_mtx_lock(thread);
760 result = machine_thread_state_initialize( thread );
761 thread_unstop(thread);
762 } else {
763 thread_mtx_lock(thread);
764 result = KERN_ABORTED;
765 }
766
767 thread_release(thread);
768 } else {
769 result = machine_thread_state_initialize( thread );
770 }
771 } else {
772 result = KERN_TERMINATED;
773 }
774
775 thread_mtx_unlock(thread);
776
777 return result;
778 }
779
780 kern_return_t
781 thread_dup(
782 thread_t target)
783 {
784 thread_t self = current_thread();
785 kern_return_t result = KERN_SUCCESS;
786
787 if (target == THREAD_NULL || target == self) {
788 return KERN_INVALID_ARGUMENT;
789 }
790
791 thread_mtx_lock(target);
792
793 if (target->active) {
794 thread_hold(target);
795
796 thread_mtx_unlock(target);
797
798 if (thread_stop(target, TRUE)) {
799 thread_mtx_lock(target);
800 result = machine_thread_dup(self, target, FALSE);
801
802 if (self->affinity_set != AFFINITY_SET_NULL) {
803 thread_affinity_dup(self, target);
804 }
805 thread_unstop(target);
806 } else {
807 thread_mtx_lock(target);
808 result = KERN_ABORTED;
809 }
810
811 thread_release(target);
812 } else {
813 result = KERN_TERMINATED;
814 }
815
816 thread_mtx_unlock(target);
817
818 return result;
819 }
820
821
822 kern_return_t
823 thread_dup2(
824 thread_t source,
825 thread_t target)
826 {
827 kern_return_t result = KERN_SUCCESS;
828 uint32_t active = 0;
829
830 if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
831 return KERN_INVALID_ARGUMENT;
832 }
833
834 thread_mtx_lock(source);
835 active = source->active;
836 thread_mtx_unlock(source);
837
838 if (!active) {
839 return KERN_TERMINATED;
840 }
841
842 thread_mtx_lock(target);
843
844 if (target->active || target->inspection) {
845 thread_hold(target);
846
847 thread_mtx_unlock(target);
848
849 if (thread_stop(target, TRUE)) {
850 thread_mtx_lock(target);
851 result = machine_thread_dup(source, target, TRUE);
852 if (source->affinity_set != AFFINITY_SET_NULL) {
853 thread_affinity_dup(source, target);
854 }
855 thread_unstop(target);
856 } else {
857 thread_mtx_lock(target);
858 result = KERN_ABORTED;
859 }
860
861 thread_release(target);
862 } else {
863 result = KERN_TERMINATED;
864 }
865
866 thread_mtx_unlock(target);
867
868 return result;
869 }
870
871 /*
872 * thread_setstatus:
873 *
874 * Set the status of the specified thread.
875 * Called with (and returns with) no locks held.
876 */
877 kern_return_t
878 thread_setstatus(
879 thread_t thread,
880 int flavor,
881 thread_state_t tstate,
882 mach_msg_type_number_t count)
883 {
884 return thread_set_state(thread, flavor, tstate, count);
885 }
886
887 kern_return_t
888 thread_setstatus_from_user(
889 thread_t thread,
890 int flavor,
891 thread_state_t tstate,
892 mach_msg_type_number_t count)
893 {
894 return thread_set_state_from_user(thread, flavor, tstate, count);
895 }
896
897 /*
898 * thread_getstatus:
899 *
900 * Get the status of the specified thread.
901 */
902 kern_return_t
903 thread_getstatus(
904 thread_t thread,
905 int flavor,
906 thread_state_t tstate,
907 mach_msg_type_number_t *count)
908 {
909 return thread_get_state(thread, flavor, tstate, count);
910 }
911
912 kern_return_t
913 thread_getstatus_to_user(
914 thread_t thread,
915 int flavor,
916 thread_state_t tstate,
917 mach_msg_type_number_t *count)
918 {
919 return thread_get_state_to_user(thread, flavor, tstate, count);
920 }
921
922 /*
923 * Change thread's machine-dependent userspace TSD base.
924 * Called with nothing locked. Returns same way.
925 */
926 kern_return_t
927 thread_set_tsd_base(
928 thread_t thread,
929 mach_vm_offset_t tsd_base)
930 {
931 kern_return_t result = KERN_SUCCESS;
932
933 if (thread == THREAD_NULL) {
934 return KERN_INVALID_ARGUMENT;
935 }
936
937 thread_mtx_lock(thread);
938
939 if (thread->active) {
940 if (thread != current_thread()) {
941 thread_hold(thread);
942
943 thread_mtx_unlock(thread);
944
945 if (thread_stop(thread, TRUE)) {
946 thread_mtx_lock(thread);
947 result = machine_thread_set_tsd_base(thread, tsd_base);
948 thread_unstop(thread);
949 } else {
950 thread_mtx_lock(thread);
951 result = KERN_ABORTED;
952 }
953
954 thread_release(thread);
955 } else {
956 result = machine_thread_set_tsd_base(thread, tsd_base);
957 }
958 } else {
959 result = KERN_TERMINATED;
960 }
961
962 thread_mtx_unlock(thread);
963
964 return result;
965 }
966
967 /*
968 * thread_set_apc_ast:
969 *
970 * Register the AST_APC callback that handles suspension and
971 * termination, if it hasn't been installed already.
972 *
973 * Called with the thread mutex held.
974 */
975 static void
976 thread_set_apc_ast(thread_t thread)
977 {
978 spl_t s = splsched();
979
980 thread_lock(thread);
981 thread_set_apc_ast_locked(thread);
982 thread_unlock(thread);
983
984 splx(s);
985 }
986
987 /*
988 * thread_set_apc_ast_locked:
989 *
990 * Do the work of registering for the AST_APC callback.
991 *
992 * Called with the thread mutex and scheduling lock held.
993 */
994 static void
995 thread_set_apc_ast_locked(thread_t thread)
996 {
997 thread_ast_set(thread, AST_APC);
998
999 if (thread == current_thread()) {
1000 ast_propagate(thread);
1001 } else {
1002 processor_t processor = thread->last_processor;
1003
1004 if (processor != PROCESSOR_NULL &&
1005 processor->state == PROCESSOR_RUNNING &&
1006 processor->active_thread == thread) {
1007 cause_ast_check(processor);
1008 }
1009 }
1010 }
1011
1012 /*
1013 * Activation control support routines internal to this file:
1014 *
1015 */
1016
1017 /*
1018 * thread_suspended
1019 *
1020 * Continuation routine for thread suspension. It checks
1021 * to see whether there has been any new suspensions. If so, it
1022 * installs the AST_APC handler again.
1023 */
1024 __attribute__((noreturn))
1025 static void
1026 thread_suspended(__unused void *parameter, wait_result_t result)
1027 {
1028 thread_t thread = current_thread();
1029
1030 thread_mtx_lock(thread);
1031
1032 if (result == THREAD_INTERRUPTED) {
1033 thread->suspend_parked = FALSE;
1034 } else {
1035 assert(thread->suspend_parked == FALSE);
1036 }
1037
1038 if (thread->suspend_count > 0) {
1039 thread_set_apc_ast(thread);
1040 }
1041
1042 thread_mtx_unlock(thread);
1043
1044 thread_exception_return();
1045 /*NOTREACHED*/
1046 }
1047
1048 /*
1049 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1050 * Called with nothing locked. Returns (if it returns) the same way.
1051 */
1052 void
1053 thread_apc_ast(thread_t thread)
1054 {
1055 thread_mtx_lock(thread);
1056
1057 assert(thread->suspend_parked == FALSE);
1058
1059 spl_t s = splsched();
1060 thread_lock(thread);
1061
1062 /* TH_SFLAG_POLLDEPRESS is OK to have here */
1063 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
1064
1065 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1066 thread_unlock(thread);
1067 splx(s);
1068
1069 if (!thread->active) {
1070 /* Thread is ready to terminate, time to tear it down */
1071 thread_mtx_unlock(thread);
1072
1073 thread_terminate_self();
1074 /*NOTREACHED*/
1075 }
1076
1077 /* If we're suspended, go to sleep and wait for someone to wake us up. */
1078 if (thread->suspend_count > 0) {
1079 thread->suspend_parked = TRUE;
1080 assert_wait(&thread->suspend_count,
1081 THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
1082 thread_mtx_unlock(thread);
1083
1084 thread_block(thread_suspended);
1085 /*NOTREACHED*/
1086 }
1087
1088 thread_mtx_unlock(thread);
1089 }
1090
1091
1092 /* Prototype, see justification above */
1093 kern_return_t
1094 act_set_state(
1095 thread_t thread,
1096 int flavor,
1097 thread_state_t state,
1098 mach_msg_type_number_t count);
1099
1100 kern_return_t
1101 act_set_state(
1102 thread_t thread,
1103 int flavor,
1104 thread_state_t state,
1105 mach_msg_type_number_t count)
1106 {
1107 if (thread == current_thread()) {
1108 return KERN_INVALID_ARGUMENT;
1109 }
1110
1111 return thread_set_state(thread, flavor, state, count);
1112 }
1113
1114 kern_return_t
1115 act_set_state_from_user(
1116 thread_t thread,
1117 int flavor,
1118 thread_state_t state,
1119 mach_msg_type_number_t count)
1120 {
1121 if (thread == current_thread()) {
1122 return KERN_INVALID_ARGUMENT;
1123 }
1124
1125 return thread_set_state_from_user(thread, flavor, state, count);
1126 }
1127
1128 /* Prototype, see justification above */
1129 kern_return_t
1130 act_get_state(
1131 thread_t thread,
1132 int flavor,
1133 thread_state_t state,
1134 mach_msg_type_number_t *count);
1135
1136 kern_return_t
1137 act_get_state(
1138 thread_t thread,
1139 int flavor,
1140 thread_state_t state,
1141 mach_msg_type_number_t *count)
1142 {
1143 if (thread == current_thread()) {
1144 return KERN_INVALID_ARGUMENT;
1145 }
1146
1147 return thread_get_state(thread, flavor, state, count);
1148 }
1149
1150 kern_return_t
1151 act_get_state_to_user(
1152 thread_t thread,
1153 int flavor,
1154 thread_state_t state,
1155 mach_msg_type_number_t *count)
1156 {
1157 if (thread == current_thread()) {
1158 return KERN_INVALID_ARGUMENT;
1159 }
1160
1161 return thread_get_state_to_user(thread, flavor, state, count);
1162 }
1163
1164 static void
1165 act_set_ast(
1166 thread_t thread,
1167 ast_t ast)
1168 {
1169 spl_t s = splsched();
1170
1171 if (thread == current_thread()) {
1172 thread_ast_set(thread, ast);
1173 ast_propagate(thread);
1174 } else {
1175 processor_t processor;
1176
1177 thread_lock(thread);
1178 thread_ast_set(thread, ast);
1179 processor = thread->last_processor;
1180 if (processor != PROCESSOR_NULL &&
1181 processor->state == PROCESSOR_RUNNING &&
1182 processor->active_thread == thread) {
1183 cause_ast_check(processor);
1184 }
1185 thread_unlock(thread);
1186 }
1187
1188 splx(s);
1189 }
1190
1191 /*
1192 * set AST on thread without causing an AST check
1193 * and without taking the thread lock
1194 *
1195 * If thread is not the current thread, then it may take
1196 * up until the next context switch or quantum expiration
1197 * on that thread for it to notice the AST.
1198 */
1199 static void
1200 act_set_ast_async(thread_t thread,
1201 ast_t ast)
1202 {
1203 thread_ast_set(thread, ast);
1204
1205 if (thread == current_thread()) {
1206 spl_t s = splsched();
1207 ast_propagate(thread);
1208 splx(s);
1209 }
1210 }
1211
1212 void
1213 act_set_astbsd(
1214 thread_t thread)
1215 {
1216 act_set_ast( thread, AST_BSD );
1217 }
1218
1219 void
1220 act_set_astkevent(thread_t thread, uint16_t bits)
1221 {
1222 os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1223
1224 /* kevent AST shouldn't send immediate IPIs */
1225 act_set_ast_async(thread, AST_KEVENT);
1226 }
1227
1228 uint16_t
1229 act_clear_astkevent(thread_t thread, uint16_t bits)
1230 {
1231 /*
1232 * avoid the atomic operation if none of the bits is set,
1233 * which will be the common case.
1234 */
1235 uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1236 if (cur & bits) {
1237 cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1238 }
1239 return cur & bits;
1240 }
1241
1242 void
1243 act_set_ast_reset_pcs(thread_t thread)
1244 {
1245 act_set_ast(thread, AST_RESET_PCS);
1246 }
1247
1248 void
1249 act_set_kperf(
1250 thread_t thread)
1251 {
1252 /* safety check */
1253 if (thread != current_thread()) {
1254 if (!ml_get_interrupts_enabled()) {
1255 panic("unsafe act_set_kperf operation");
1256 }
1257 }
1258
1259 act_set_ast( thread, AST_KPERF );
1260 }
1261
1262 #if CONFIG_MACF
1263 void
1264 act_set_astmacf(
1265 thread_t thread)
1266 {
1267 act_set_ast( thread, AST_MACF);
1268 }
1269 #endif
1270
1271 void
1272 act_set_astledger(thread_t thread)
1273 {
1274 act_set_ast(thread, AST_LEDGER);
1275 }
1276
1277 /*
1278 * The ledger AST may need to be set while already holding
1279 * the thread lock. This routine skips sending the IPI,
1280 * allowing us to avoid the lock hold.
1281 *
1282 * However, it means the targeted thread must context switch
1283 * to recognize the ledger AST.
1284 */
1285 void
1286 act_set_astledger_async(thread_t thread)
1287 {
1288 act_set_ast_async(thread, AST_LEDGER);
1289 }
1290
1291 void
1292 act_set_io_telemetry_ast(thread_t thread)
1293 {
1294 act_set_ast(thread, AST_TELEMETRY_IO);
1295 }