]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_act.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
91447636 50 * Thread management routines
1c79356b 51 */
91447636 52#include <mach/mach_types.h>
1c79356b 53#include <mach/kern_return.h>
91447636
A
54#include <mach/thread_act_server.h>
55
56#include <kern/kern_types.h>
57#include <kern/ast.h>
1c79356b
A
58#include <kern/mach_param.h>
59#include <kern/zalloc.h>
6d2010ae 60#include <kern/extmod_statistics.h>
1c79356b 61#include <kern/thread.h>
1c79356b 62#include <kern/task.h>
1c79356b
A
63#include <kern/sched_prim.h>
64#include <kern/misc_protos.h>
65#include <kern/assert.h>
66#include <kern/exception.h>
67#include <kern/ipc_mig.h>
68#include <kern/ipc_tt.h>
1c79356b
A
69#include <kern/machine.h>
70#include <kern/spl.h>
71#include <kern/syscall_subr.h>
72#include <kern/sync_lock.h>
0b4e3aa0 73#include <kern/processor.h>
91447636 74#include <kern/timer.h>
2d21ac55
A
75#include <kern/affinity.h>
76
5ba3f43e
A
77#include <stdatomic.h>
78
316670eb
A
79#include <security/mac_mach_internal.h>
80
39037602
A
81static void act_abort(thread_t thread);
82
83static void thread_suspended(void *arg, wait_result_t result);
84static void thread_set_apc_ast(thread_t thread);
85static void thread_set_apc_ast_locked(thread_t thread);
1c79356b 86
2d21ac55
A
87/*
88 * Internal routine to mark a thread as started.
3e170ce0 89 * Always called with the thread mutex locked.
2d21ac55
A
90 */
91void
39037602 92thread_start(
2d21ac55
A
93 thread_t thread)
94{
95 clear_wait(thread, THREAD_AWAKENED);
96 thread->started = TRUE;
39037602
A
97}
98
99/*
100 * Internal routine to mark a thread as waiting
101 * right after it has been created. The caller
102 * is responsible to call wakeup()/thread_wakeup()
103 * or thread_terminate() to get it going.
104 *
105 * Always called with the thread mutex locked.
106 *
107 * Task and task_threads mutexes also held
108 * (so nobody can set the thread running before
109 * this point)
110 *
111 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
112 * to allow termination from this point forward.
113 */
114void
115thread_start_in_assert_wait(
116 thread_t thread,
117 event_t event,
118 wait_interrupt_t interruptible)
119{
120 struct waitq *waitq = assert_wait_queue(event);
121 wait_result_t wait_result;
122 spl_t spl;
123
124 spl = splsched();
125 waitq_lock(waitq);
126
127 /* clear out startup condition (safe because thread not started yet) */
128 thread_lock(thread);
129 assert(!thread->started);
130 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
131 thread->state &= ~(TH_WAIT | TH_UNINT);
132 thread_unlock(thread);
133
134 /* assert wait interruptibly forever */
135 wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
136 interruptible,
137 TIMEOUT_URGENCY_SYS_NORMAL,
138 TIMEOUT_WAIT_FOREVER,
139 TIMEOUT_NO_LEEWAY,
140 thread);
141 assert (wait_result == THREAD_WAITING);
142
143 /* mark thread started while we still hold the waitq lock */
144 thread_lock(thread);
145 thread->started = TRUE;
146 thread_unlock(thread);
147
148 waitq_unlock(waitq);
149 splx(spl);
2d21ac55
A
150}
151
1c79356b
A
152/*
153 * Internal routine to terminate a thread.
9bccf70c 154 * Sometimes called with task already locked.
1c79356b
A
155 */
156kern_return_t
157thread_terminate_internal(
91447636 158 thread_t thread)
1c79356b 159{
91447636 160 kern_return_t result = KERN_SUCCESS;
1c79356b 161
91447636 162 thread_mtx_lock(thread);
1c79356b 163
91447636
A
164 if (thread->active) {
165 thread->active = FALSE;
1c79356b 166
91447636 167 act_abort(thread);
e7c99d92 168
91447636
A
169 if (thread->started)
170 clear_wait(thread, THREAD_INTERRUPTED);
171 else {
39037602 172 thread_start(thread);
91447636 173 }
1c79356b 174 }
91447636
A
175 else
176 result = KERN_TERMINATED;
1c79356b 177
2d21ac55
A
178 if (thread->affinity_set != NULL)
179 thread_affinity_terminate(thread);
180
91447636
A
181 thread_mtx_unlock(thread);
182
183 if (thread != current_thread() && result == KERN_SUCCESS)
316670eb 184 thread_wait(thread, FALSE);
9bccf70c
A
185
186 return (result);
1c79356b
A
187}
188
189/*
9bccf70c 190 * Terminate a thread.
1c79356b
A
191 */
192kern_return_t
193thread_terminate(
91447636 194 thread_t thread)
1c79356b 195{
91447636 196 if (thread == THREAD_NULL)
9bccf70c 197 return (KERN_INVALID_ARGUMENT);
1c79356b 198
5ba3f43e
A
199 /* Kernel threads can't be terminated without their own cooperation */
200 if (thread->task == kernel_task && thread != current_thread())
9bccf70c 201 return (KERN_FAILURE);
1c79356b 202
5ba3f43e 203 kern_return_t result = thread_terminate_internal(thread);
1c79356b
A
204
205 /*
5ba3f43e
A
206 * If a kernel thread is terminating itself, force handle the APC_AST here.
207 * Kernel threads don't pass through the return-to-user AST checking code,
208 * but all threads must finish their own termination in thread_apc_ast.
1c79356b 209 */
91447636 210 if (thread->task == kernel_task) {
5ba3f43e
A
211 assert(thread->active == FALSE);
212 thread_ast_clear(thread, AST_APC);
213 thread_apc_ast(thread);
214
9bccf70c 215 panic("thread_terminate");
5ba3f43e 216 /* NOTREACHED */
9bccf70c 217 }
1c79356b 218
9bccf70c 219 return (result);
1c79356b
A
220}
221
222/*
9bccf70c
A
223 * Suspend execution of the specified thread.
224 * This is a recursive-style suspension of the thread, a count of
225 * suspends is maintained.
1c79356b 226 *
91447636 227 * Called with thread mutex held.
1c79356b
A
228 */
229void
39037602 230thread_hold(thread_t thread)
1c79356b 231{
91447636 232 if (thread->suspend_count++ == 0) {
39037602
A
233 thread_set_apc_ast(thread);
234 assert(thread->suspend_parked == FALSE);
1c79356b
A
235 }
236}
237
238/*
91447636 239 * Decrement internal suspension count, setting thread
1c79356b
A
240 * runnable when count falls to zero.
241 *
39037602
A
242 * Because the wait is abortsafe, we can't be guaranteed that the thread
243 * is currently actually waiting even if suspend_parked is set.
244 *
91447636 245 * Called with thread mutex held.
1c79356b
A
246 */
247void
39037602 248thread_release(thread_t thread)
1c79356b 249{
39037602
A
250 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
251
252 /* fail-safe on non-assert builds */
253 if (thread->suspend_count == 0)
254 return;
255
256 if (--thread->suspend_count == 0) {
257 if (!thread->started) {
258 thread_start(thread);
259 } else if (thread->suspend_parked) {
260 thread->suspend_parked = FALSE;
261 thread_wakeup_thread(&thread->suspend_count, thread);
9bccf70c 262 }
9bccf70c 263 }
1c79356b
A
264}
265
266kern_return_t
39037602 267thread_suspend(thread_t thread)
1c79356b 268{
39037602 269 kern_return_t result = KERN_SUCCESS;
1c79356b 270
91447636 271 if (thread == THREAD_NULL || thread->task == kernel_task)
9bccf70c
A
272 return (KERN_INVALID_ARGUMENT);
273
91447636 274 thread_mtx_lock(thread);
9bccf70c 275
91447636 276 if (thread->active) {
39037602
A
277 if (thread->user_stop_count++ == 0)
278 thread_hold(thread);
279 } else {
91447636 280 result = KERN_TERMINATED;
39037602 281 }
91447636
A
282
283 thread_mtx_unlock(thread);
284
39037602 285 if (thread != current_thread() && result == KERN_SUCCESS)
39236c6e 286 thread_wait(thread, FALSE);
9bccf70c 287
91447636 288 return (result);
1c79356b
A
289}
290
291kern_return_t
39037602 292thread_resume(thread_t thread)
1c79356b 293{
39037602 294 kern_return_t result = KERN_SUCCESS;
1c79356b 295
91447636 296 if (thread == THREAD_NULL || thread->task == kernel_task)
9bccf70c 297 return (KERN_INVALID_ARGUMENT);
1c79356b 298
91447636 299 thread_mtx_lock(thread);
9bccf70c 300
91447636
A
301 if (thread->active) {
302 if (thread->user_stop_count > 0) {
39037602
A
303 if (--thread->user_stop_count == 0)
304 thread_release(thread);
305 } else {
9bccf70c 306 result = KERN_FAILURE;
39037602
A
307 }
308 } else {
9bccf70c 309 result = KERN_TERMINATED;
39037602 310 }
9bccf70c 311
91447636 312 thread_mtx_unlock(thread);
9bccf70c
A
313
314 return (result);
1c79356b
A
315}
316
1c79356b
A
317/*
318 * thread_depress_abort:
319 *
320 * Prematurely abort priority depression if there is one.
321 */
322kern_return_t
323thread_depress_abort(
39037602 324 thread_t thread)
1c79356b 325{
91447636 326 kern_return_t result;
1c79356b 327
91447636 328 if (thread == THREAD_NULL)
1c79356b
A
329 return (KERN_INVALID_ARGUMENT);
330
91447636 331 thread_mtx_lock(thread);
1c79356b 332
91447636
A
333 if (thread->active)
334 result = thread_depress_abort_internal(thread);
335 else
336 result = KERN_TERMINATED;
1c79356b 337
91447636 338 thread_mtx_unlock(thread);
1c79356b
A
339
340 return (result);
341}
342
343
344/*
39037602
A
345 * Indicate that the thread should run the AST_APC callback
346 * to detect an abort condition.
9bccf70c 347 *
91447636 348 * Called with thread mutex held.
1c79356b 349 */
39037602 350static void
9bccf70c 351act_abort(
91447636 352 thread_t thread)
1c79356b 353{
9bccf70c 354 spl_t s = splsched();
1c79356b 355
9bccf70c 356 thread_lock(thread);
91447636 357
6d2010ae
A
358 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
359 thread->sched_flags |= TH_SFLAG_ABORT;
39037602
A
360 thread_set_apc_ast_locked(thread);
361 } else {
6d2010ae 362 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
39037602 363 }
91447636 364
9bccf70c
A
365 thread_unlock(thread);
366 splx(s);
1c79356b 367}
39037602 368
1c79356b
A
369kern_return_t
370thread_abort(
39037602 371 thread_t thread)
1c79356b 372{
91447636 373 kern_return_t result = KERN_SUCCESS;
1c79356b 374
91447636 375 if (thread == THREAD_NULL)
1c79356b 376 return (KERN_INVALID_ARGUMENT);
9bccf70c 377
91447636 378 thread_mtx_lock(thread);
9bccf70c 379
91447636
A
380 if (thread->active) {
381 act_abort(thread);
382 clear_wait(thread, THREAD_INTERRUPTED);
1c79356b 383 }
91447636
A
384 else
385 result = KERN_TERMINATED;
1c79356b 386
91447636 387 thread_mtx_unlock(thread);
9bccf70c
A
388
389 return (result);
1c79356b
A
390}
391
392kern_return_t
393thread_abort_safely(
91447636 394 thread_t thread)
1c79356b 395{
91447636 396 kern_return_t result = KERN_SUCCESS;
1c79356b 397
91447636 398 if (thread == THREAD_NULL)
9bccf70c 399 return (KERN_INVALID_ARGUMENT);
1c79356b 400
91447636 401 thread_mtx_lock(thread);
9bccf70c 402
91447636
A
403 if (thread->active) {
404 spl_t s = splsched();
9bccf70c 405
91447636
A
406 thread_lock(thread);
407 if (!thread->at_safe_point ||
2d21ac55 408 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
6d2010ae
A
409 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
410 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
39037602 411 thread_set_apc_ast_locked(thread);
91447636 412 }
9bccf70c 413 }
91447636
A
414 thread_unlock(thread);
415 splx(s);
39037602 416 } else {
91447636 417 result = KERN_TERMINATED;
39037602
A
418 }
419
91447636 420 thread_mtx_unlock(thread);
9bccf70c 421
91447636 422 return (result);
1c79356b
A
423}
424
425/*** backward compatibility hacks ***/
426#include <mach/thread_info.h>
427#include <mach/thread_special_ports.h>
428#include <ipc/ipc_port.h>
1c79356b
A
429
430kern_return_t
431thread_info(
3e170ce0 432 thread_t thread,
1c79356b
A
433 thread_flavor_t flavor,
434 thread_info_t thread_info_out,
435 mach_msg_type_number_t *thread_info_count)
436{
1c79356b
A
437 kern_return_t result;
438
91447636 439 if (thread == THREAD_NULL)
1c79356b
A
440 return (KERN_INVALID_ARGUMENT);
441
91447636 442 thread_mtx_lock(thread);
1c79356b 443
3e170ce0 444 if (thread->active || thread->inspection)
91447636
A
445 result = thread_info_internal(
446 thread, flavor, thread_info_out, thread_info_count);
447 else
448 result = KERN_TERMINATED;
1c79356b 449
91447636 450 thread_mtx_unlock(thread);
1c79356b
A
451
452 return (result);
453}
454
1c79356b 455kern_return_t
91447636 456thread_get_state(
39037602 457 thread_t thread,
91447636
A
458 int flavor,
459 thread_state_t state, /* pointer to OUT array */
460 mach_msg_type_number_t *state_count) /*IN/OUT*/
1c79356b 461{
91447636 462 kern_return_t result = KERN_SUCCESS;
1c79356b 463
91447636
A
464 if (thread == THREAD_NULL)
465 return (KERN_INVALID_ARGUMENT);
1c79356b 466
91447636 467 thread_mtx_lock(thread);
1c79356b 468
91447636
A
469 if (thread->active) {
470 if (thread != current_thread()) {
471 thread_hold(thread);
1c79356b 472
91447636 473 thread_mtx_unlock(thread);
1c79356b 474
39236c6e 475 if (thread_stop(thread, FALSE)) {
91447636
A
476 thread_mtx_lock(thread);
477 result = machine_thread_get_state(
478 thread, flavor, state, state_count);
479 thread_unstop(thread);
480 }
481 else {
482 thread_mtx_lock(thread);
483 result = KERN_ABORTED;
484 }
1c79356b 485
91447636
A
486 thread_release(thread);
487 }
488 else
489 result = machine_thread_get_state(
490 thread, flavor, state, state_count);
1c79356b 491 }
3e170ce0
A
492 else if (thread->inspection)
493 {
494 result = machine_thread_get_state(
495 thread, flavor, state, state_count);
496 }
91447636
A
497 else
498 result = KERN_TERMINATED;
1c79356b 499
91447636 500 thread_mtx_unlock(thread);
1c79356b 501
91447636 502 return (result);
1c79356b
A
503}
504
505/*
91447636
A
506 * Change thread's machine-dependent state. Called with nothing
507 * locked. Returns same way.
1c79356b 508 */
6d2010ae
A
509static kern_return_t
510thread_set_state_internal(
39037602 511 thread_t thread,
9bccf70c 512 int flavor,
91447636 513 thread_state_t state,
6d2010ae
A
514 mach_msg_type_number_t state_count,
515 boolean_t from_user)
1c79356b 516{
9bccf70c 517 kern_return_t result = KERN_SUCCESS;
1c79356b 518
91447636 519 if (thread == THREAD_NULL)
1c79356b
A
520 return (KERN_INVALID_ARGUMENT);
521
91447636 522 thread_mtx_lock(thread);
9bccf70c 523
91447636
A
524 if (thread->active) {
525 if (thread != current_thread()) {
526 thread_hold(thread);
1c79356b 527
91447636 528 thread_mtx_unlock(thread);
9bccf70c 529
fe8ab488 530 if (thread_stop(thread, TRUE)) {
91447636
A
531 thread_mtx_lock(thread);
532 result = machine_thread_set_state(
533 thread, flavor, state, state_count);
534 thread_unstop(thread);
535 }
536 else {
537 thread_mtx_lock(thread);
538 result = KERN_ABORTED;
539 }
9bccf70c 540
91447636 541 thread_release(thread);
9bccf70c 542 }
91447636
A
543 else
544 result = machine_thread_set_state(
545 thread, flavor, state, state_count);
1c79356b 546 }
91447636
A
547 else
548 result = KERN_TERMINATED;
1c79356b 549
6d2010ae
A
550 if ((result == KERN_SUCCESS) && from_user)
551 extmod_statistics_incr_thread_set_state(thread);
552
91447636 553 thread_mtx_unlock(thread);
9bccf70c
A
554
555 return (result);
1c79356b 556}
6d2010ae
A
557
558/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
559kern_return_t
560thread_set_state(
39037602 561 thread_t thread,
6d2010ae
A
562 int flavor,
563 thread_state_t state,
564 mach_msg_type_number_t state_count);
565
566kern_return_t
567thread_set_state(
39037602 568 thread_t thread,
6d2010ae
A
569 int flavor,
570 thread_state_t state,
571 mach_msg_type_number_t state_count)
572{
573 return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
574}
91447636 575
6d2010ae
A
576kern_return_t
577thread_set_state_from_user(
39037602 578 thread_t thread,
6d2010ae
A
579 int flavor,
580 thread_state_t state,
581 mach_msg_type_number_t state_count)
582{
583 return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
584}
91447636 585
1c79356b 586/*
91447636
A
587 * Kernel-internal "thread" interfaces used outside this file:
588 */
589
590/* Initialize (or re-initialize) a thread state. Called from execve
591 * with nothing locked, returns same way.
1c79356b
A
592 */
593kern_return_t
91447636 594thread_state_initialize(
39037602 595 thread_t thread)
1c79356b 596{
9bccf70c 597 kern_return_t result = KERN_SUCCESS;
1c79356b 598
91447636 599 if (thread == THREAD_NULL)
1c79356b 600 return (KERN_INVALID_ARGUMENT);
1c79356b 601
91447636 602 thread_mtx_lock(thread);
9bccf70c 603
91447636
A
604 if (thread->active) {
605 if (thread != current_thread()) {
606 thread_hold(thread);
9bccf70c 607
91447636 608 thread_mtx_unlock(thread);
9bccf70c 609
39236c6e 610 if (thread_stop(thread, TRUE)) {
91447636
A
611 thread_mtx_lock(thread);
612 result = machine_thread_state_initialize( thread );
613 thread_unstop(thread);
614 }
615 else {
616 thread_mtx_lock(thread);
617 result = KERN_ABORTED;
618 }
9bccf70c 619
91447636 620 thread_release(thread);
9bccf70c 621 }
91447636 622 else
316670eb 623 result = machine_thread_state_initialize( thread );
1c79356b 624 }
91447636
A
625 else
626 result = KERN_TERMINATED;
9bccf70c 627
91447636 628 thread_mtx_unlock(thread);
9bccf70c
A
629
630 return (result);
1c79356b
A
631}
632
1c79356b
A
633
634kern_return_t
635thread_dup(
39037602 636 thread_t target)
1c79356b 637{
91447636 638 thread_t self = current_thread();
9bccf70c 639 kern_return_t result = KERN_SUCCESS;
1c79356b 640
91447636 641 if (target == THREAD_NULL || target == self)
1c79356b
A
642 return (KERN_INVALID_ARGUMENT);
643
91447636 644 thread_mtx_lock(target);
9bccf70c 645
91447636
A
646 if (target->active) {
647 thread_hold(target);
9bccf70c 648
91447636 649 thread_mtx_unlock(target);
9bccf70c 650
39236c6e 651 if (thread_stop(target, TRUE)) {
91447636
A
652 thread_mtx_lock(target);
653 result = machine_thread_dup(self, target);
2d21ac55
A
654 if (self->affinity_set != AFFINITY_SET_NULL)
655 thread_affinity_dup(self, target);
91447636
A
656 thread_unstop(target);
657 }
658 else {
659 thread_mtx_lock(target);
9bccf70c 660 result = KERN_ABORTED;
9bccf70c
A
661 }
662
91447636 663 thread_release(target);
1c79356b 664 }
91447636
A
665 else
666 result = KERN_TERMINATED;
9bccf70c 667
91447636 668 thread_mtx_unlock(target);
9bccf70c
A
669
670 return (result);
1c79356b
A
671}
672
673
39037602
A
674kern_return_t
675thread_dup2(
676 thread_t source,
677 thread_t target)
678{
679 kern_return_t result = KERN_SUCCESS;
680 uint32_t active = 0;
681
682 if (source == THREAD_NULL || target == THREAD_NULL || target == source)
683 return (KERN_INVALID_ARGUMENT);
684
685 thread_mtx_lock(source);
686 active = source->active;
687 thread_mtx_unlock(source);
688
689 if (!active) {
690 return KERN_TERMINATED;
691 }
692
693 thread_mtx_lock(target);
694
695 if (target->active || target->inspection) {
696 thread_hold(target);
697
698 thread_mtx_unlock(target);
699
700 if (thread_stop(target, TRUE)) {
701 thread_mtx_lock(target);
702 result = machine_thread_dup(source, target);
703 if (source->affinity_set != AFFINITY_SET_NULL)
704 thread_affinity_dup(source, target);
705 thread_unstop(target);
706 }
707 else {
708 thread_mtx_lock(target);
709 result = KERN_ABORTED;
710 }
711
712 thread_release(target);
713 }
714 else
715 result = KERN_TERMINATED;
716
717 thread_mtx_unlock(target);
718
719 return (result);
720}
721
1c79356b
A
722/*
723 * thread_setstatus:
724 *
725 * Set the status of the specified thread.
726 * Called with (and returns with) no locks held.
727 */
728kern_return_t
729thread_setstatus(
39037602 730 thread_t thread,
9bccf70c
A
731 int flavor,
732 thread_state_t tstate,
1c79356b
A
733 mach_msg_type_number_t count)
734{
9bccf70c 735
91447636 736 return (thread_set_state(thread, flavor, tstate, count));
1c79356b
A
737}
738
739/*
740 * thread_getstatus:
741 *
742 * Get the status of the specified thread.
743 */
744kern_return_t
745thread_getstatus(
39037602 746 thread_t thread,
9bccf70c
A
747 int flavor,
748 thread_state_t tstate,
1c79356b
A
749 mach_msg_type_number_t *count)
750{
91447636 751 return (thread_get_state(thread, flavor, tstate, count));
1c79356b
A
752}
753
fe8ab488
A
754/*
755 * Change thread's machine-dependent userspace TSD base.
756 * Called with nothing locked. Returns same way.
757 */
758kern_return_t
759thread_set_tsd_base(
760 thread_t thread,
761 mach_vm_offset_t tsd_base)
762{
763 kern_return_t result = KERN_SUCCESS;
764
765 if (thread == THREAD_NULL)
766 return (KERN_INVALID_ARGUMENT);
767
768 thread_mtx_lock(thread);
769
770 if (thread->active) {
771 if (thread != current_thread()) {
772 thread_hold(thread);
773
774 thread_mtx_unlock(thread);
775
776 if (thread_stop(thread, TRUE)) {
777 thread_mtx_lock(thread);
778 result = machine_thread_set_tsd_base(thread, tsd_base);
779 thread_unstop(thread);
780 }
781 else {
782 thread_mtx_lock(thread);
783 result = KERN_ABORTED;
784 }
785
786 thread_release(thread);
787 }
788 else
789 result = machine_thread_set_tsd_base(thread, tsd_base);
790 }
791 else
792 result = KERN_TERMINATED;
793
794 thread_mtx_unlock(thread);
795
796 return (result);
797}
798
1c79356b 799/*
39037602 800 * thread_set_apc_ast:
1c79356b 801 *
39037602
A
802 * Register the AST_APC callback that handles suspension and
803 * termination, if it hasn't been installed already.
1c79356b 804 *
39037602 805 * Called with the thread mutex held.
1c79356b 806 */
39037602
A
807static void
808thread_set_apc_ast(thread_t thread)
1c79356b 809{
39037602 810 spl_t s = splsched();
1c79356b 811
e7c99d92 812 thread_lock(thread);
39037602 813 thread_set_apc_ast_locked(thread);
e7c99d92 814 thread_unlock(thread);
39037602 815
91447636 816 splx(s);
1c79356b
A
817}
818
819/*
39037602 820 * thread_set_apc_ast_locked:
91447636 821 *
39037602 822 * Do the work of registering for the AST_APC callback.
1c79356b 823 *
39037602 824 * Called with the thread mutex and scheduling lock held.
1c79356b 825 */
39037602
A
826static void
827thread_set_apc_ast_locked(thread_t thread)
1c79356b 828{
91447636
A
829 /*
830 * Temporarily undepress, so target has
831 * a chance to do locking required to
39037602
A
832 * block itself in thread_suspended.
833 *
834 * Leaves the depress flag set so we can reinstate when it's blocked.
91447636 835 */
6d2010ae 836 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)
3e170ce0 837 thread_recompute_sched_pri(thread, TRUE);
91447636
A
838
839 thread_ast_set(thread, AST_APC);
840
39037602 841 if (thread == current_thread()) {
5ba3f43e 842 ast_propagate(thread);
39037602
A
843 } else {
844 processor_t processor = thread->last_processor;
9bccf70c 845
39037602
A
846 if (processor != PROCESSOR_NULL &&
847 processor->state == PROCESSOR_RUNNING &&
848 processor->active_thread == thread) {
9bccf70c 849 cause_ast_check(processor);
39037602 850 }
9bccf70c
A
851 }
852}
1c79356b 853
1c79356b
A
854/*
855 * Activation control support routines internal to this file:
3e170ce0 856 *
1c79356b
A
857 */
858
1c79356b 859/*
39037602 860 * thread_suspended
1c79356b 861 *
39037602 862 * Continuation routine for thread suspension. It checks
1c79356b 863 * to see whether there has been any new suspensions. If so, it
39037602 864 * installs the AST_APC handler again. Otherwise, it checks to see
1c79356b
A
865 * if the current depression needs to be re-instated (it may have
866 * been temporarily removed in order to get to this point in a hurry).
867 */
39037602
A
868__attribute__((noreturn))
869static void
870thread_suspended(__unused void *parameter, wait_result_t result)
1c79356b 871{
39037602 872 thread_t thread = current_thread();
91447636
A
873
874 thread_mtx_lock(thread);
1c79356b 875
39037602
A
876 if (result == THREAD_INTERRUPTED)
877 thread->suspend_parked = FALSE;
878 else
879 assert(thread->suspend_parked == FALSE);
880
881 if (thread->suspend_count > 0) {
882 thread_set_apc_ast(thread);
883 } else {
884 spl_t s = splsched();
9bccf70c 885
1c79356b 886 thread_lock(thread);
6d2010ae 887 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
9bccf70c 888 thread->sched_pri = DEPRESSPRI;
39037602 889 thread->last_processor->current_pri = thread->sched_pri;
5ba3f43e 890 thread->last_processor->current_perfctl_class = thread_get_perfcontrol_class(thread);
490019cf
A
891
892 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
893 (uintptr_t)thread_tid(thread),
894 thread->base_pri,
895 thread->sched_pri,
896 0, /* eventually, 'reason' */
897 0);
1c79356b
A
898 }
899 thread_unlock(thread);
900 splx(s);
901 }
9bccf70c 902
91447636
A
903 thread_mtx_unlock(thread);
904
1c79356b 905 thread_exception_return();
9bccf70c 906 /*NOTREACHED*/
1c79356b
A
907}
908
909/*
39037602
A
910 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
911 * Called with nothing locked. Returns (if it returns) the same way.
1c79356b
A
912 */
913void
39037602 914thread_apc_ast(thread_t thread)
1c79356b 915{
91447636 916 thread_mtx_lock(thread);
1c79356b 917
39037602
A
918 assert(thread->suspend_parked == FALSE);
919
920 spl_t s = splsched();
1c79356b 921 thread_lock(thread);
39037602
A
922
923 /* TH_SFLAG_POLLDEPRESS is OK to have here */
924 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
925
6d2010ae 926 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1c79356b
A
927 thread_unlock(thread);
928 splx(s);
929
39037602
A
930 if (!thread->active) {
931 /* Thread is ready to terminate, time to tear it down */
91447636 932 thread_mtx_unlock(thread);
1c79356b 933
91447636
A
934 thread_terminate_self();
935 /*NOTREACHED*/
936 }
1c79356b 937
39037602
A
938 /* If we're suspended, go to sleep and wait for someone to wake us up. */
939 if (thread->suspend_count > 0) {
940 thread->suspend_parked = TRUE;
941 assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
942 thread_mtx_unlock(thread);
943
944 thread_block(thread_suspended);
945 /*NOTREACHED*/
946 }
947
91447636 948 thread_mtx_unlock(thread);
1c79356b
A
949}
950
6d2010ae
A
951/* Prototype, see justification above */
952kern_return_t
953act_set_state(
954 thread_t thread,
955 int flavor,
956 thread_state_t state,
957 mach_msg_type_number_t count);
958
1c79356b 959kern_return_t
91447636
A
960act_set_state(
961 thread_t thread,
962 int flavor,
963 thread_state_t state,
964 mach_msg_type_number_t count)
1c79356b 965{
91447636
A
966 if (thread == current_thread())
967 return (KERN_INVALID_ARGUMENT);
1c79356b 968
91447636 969 return (thread_set_state(thread, flavor, state, count));
1c79356b
A
970
971}
972
6d2010ae
A
973kern_return_t
974act_set_state_from_user(
975 thread_t thread,
976 int flavor,
977 thread_state_t state,
978 mach_msg_type_number_t count)
979{
980 if (thread == current_thread())
981 return (KERN_INVALID_ARGUMENT);
982
983 return (thread_set_state_from_user(thread, flavor, state, count));
984
985}
986
1c79356b 987kern_return_t
91447636
A
988act_get_state(
989 thread_t thread,
990 int flavor,
991 thread_state_t state,
992 mach_msg_type_number_t *count)
1c79356b 993{
91447636
A
994 if (thread == current_thread())
995 return (KERN_INVALID_ARGUMENT);
1c79356b 996
91447636 997 return (thread_get_state(thread, flavor, state, count));
1c79356b
A
998}
999
316670eb
A
1000static void
1001act_set_ast(
3e170ce0 1002 thread_t thread,
316670eb 1003 ast_t ast)
1c79356b 1004{
3e170ce0
A
1005 spl_t s = splsched();
1006
91447636 1007 if (thread == current_thread()) {
316670eb 1008 thread_ast_set(thread, ast);
5ba3f43e 1009 ast_propagate(thread);
3e170ce0
A
1010 } else {
1011 processor_t processor;
0b4e3aa0 1012
9bccf70c 1013 thread_lock(thread);
316670eb 1014 thread_ast_set(thread, ast);
9bccf70c 1015 processor = thread->last_processor;
316670eb
A
1016 if ( processor != PROCESSOR_NULL &&
1017 processor->state == PROCESSOR_RUNNING &&
3e170ce0 1018 processor->active_thread == thread )
9bccf70c
A
1019 cause_ast_check(processor);
1020 thread_unlock(thread);
0b4e3aa0 1021 }
3e170ce0 1022
9bccf70c 1023 splx(s);
1c79356b
A
1024}
1025
5c9f4661
A
1026/*
1027 * set AST on thread without causing an AST check
1028 * and without taking the thread lock
1029 *
1030 * If thread is not the current thread, then it may take
1031 * up until the next context switch or quantum expiration
1032 * on that thread for it to notice the AST.
1033 */
1034static void
1035act_set_ast_async(thread_t thread,
1036 ast_t ast)
1037{
1038 thread_ast_set(thread, ast);
1039
1040 if (thread == current_thread()) {
1041 spl_t s = splsched();
1042 ast_propagate(thread);
1043 splx(s);
1044 }
1045}
1046
316670eb
A
1047void
1048act_set_astbsd(
1049 thread_t thread)
1050{
1051 act_set_ast( thread, AST_BSD );
1052}
1053
5ba3f43e
A
1054void
1055act_set_astkevent(thread_t thread, uint16_t bits)
1056{
5ba3f43e 1057 atomic_fetch_or(&thread->kevent_ast_bits, bits);
5ba3f43e 1058
5c9f4661
A
1059 /* kevent AST shouldn't send immediate IPIs */
1060 act_set_ast_async(thread, AST_KEVENT);
5ba3f43e
A
1061}
1062
316670eb
A
1063void
1064act_set_kperf(
1065 thread_t thread)
1066{
1067 /* safety check */
1068 if (thread != current_thread())
1069 if( !ml_get_interrupts_enabled() )
1070 panic("unsafe act_set_kperf operation");
1071
1072 act_set_ast( thread, AST_KPERF );
1073}
1074
1075#if CONFIG_MACF
1076void
1077act_set_astmacf(
1078 thread_t thread)
1079{
1080 act_set_ast( thread, AST_MACF);
1c79356b 1081}
316670eb 1082#endif
3e170ce0
A
1083
1084void
5c9f4661 1085act_set_astledger(thread_t thread)
3e170ce0
A
1086{
1087 act_set_ast(thread, AST_LEDGER);
1088}
1089
5c9f4661
A
1090/*
1091 * The ledger AST may need to be set while already holding
1092 * the thread lock. This routine skips sending the IPI,
1093 * allowing us to avoid the lock hold.
1094 *
1095 * However, it means the targeted thread must context switch
1096 * to recognize the ledger AST.
1097 */
1098void
1099act_set_astledger_async(thread_t thread)
1100{
1101 act_set_ast_async(thread, AST_LEDGER);
1102}
1103
39037602
A
1104void
1105act_set_io_telemetry_ast(thread_t thread)
1106{
1107 act_set_ast(thread, AST_TELEMETRY_IO);
1108}
3e170ce0 1109