]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_act.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
91447636 50 * Thread management routines
1c79356b 51 */
d9a64523 52
91447636 53#include <mach/mach_types.h>
1c79356b 54#include <mach/kern_return.h>
91447636
A
55#include <mach/thread_act_server.h>
56
57#include <kern/kern_types.h>
58#include <kern/ast.h>
1c79356b
A
59#include <kern/mach_param.h>
60#include <kern/zalloc.h>
6d2010ae 61#include <kern/extmod_statistics.h>
1c79356b 62#include <kern/thread.h>
1c79356b 63#include <kern/task.h>
1c79356b
A
64#include <kern/sched_prim.h>
65#include <kern/misc_protos.h>
66#include <kern/assert.h>
67#include <kern/exception.h>
68#include <kern/ipc_mig.h>
69#include <kern/ipc_tt.h>
1c79356b
A
70#include <kern/machine.h>
71#include <kern/spl.h>
72#include <kern/syscall_subr.h>
73#include <kern/sync_lock.h>
0b4e3aa0 74#include <kern/processor.h>
91447636 75#include <kern/timer.h>
2d21ac55
A
76#include <kern/affinity.h>
77
5ba3f43e
A
78#include <stdatomic.h>
79
316670eb
A
80#include <security/mac_mach_internal.h>
81
39037602
A
82static void act_abort(thread_t thread);
83
84static void thread_suspended(void *arg, wait_result_t result);
85static void thread_set_apc_ast(thread_t thread);
86static void thread_set_apc_ast_locked(thread_t thread);
1c79356b 87
2d21ac55
A
88/*
89 * Internal routine to mark a thread as started.
3e170ce0 90 * Always called with the thread mutex locked.
2d21ac55
A
91 */
92void
39037602 93thread_start(
2d21ac55
A
94 thread_t thread)
95{
96 clear_wait(thread, THREAD_AWAKENED);
97 thread->started = TRUE;
39037602
A
98}
99
100/*
101 * Internal routine to mark a thread as waiting
102 * right after it has been created. The caller
103 * is responsible to call wakeup()/thread_wakeup()
104 * or thread_terminate() to get it going.
105 *
106 * Always called with the thread mutex locked.
107 *
108 * Task and task_threads mutexes also held
109 * (so nobody can set the thread running before
110 * this point)
111 *
112 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
113 * to allow termination from this point forward.
114 */
115void
116thread_start_in_assert_wait(
117 thread_t thread,
118 event_t event,
119 wait_interrupt_t interruptible)
120{
121 struct waitq *waitq = assert_wait_queue(event);
122 wait_result_t wait_result;
123 spl_t spl;
124
125 spl = splsched();
126 waitq_lock(waitq);
127
128 /* clear out startup condition (safe because thread not started yet) */
129 thread_lock(thread);
130 assert(!thread->started);
131 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
132 thread->state &= ~(TH_WAIT | TH_UNINT);
133 thread_unlock(thread);
134
135 /* assert wait interruptibly forever */
136 wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
137 interruptible,
138 TIMEOUT_URGENCY_SYS_NORMAL,
139 TIMEOUT_WAIT_FOREVER,
140 TIMEOUT_NO_LEEWAY,
141 thread);
142 assert (wait_result == THREAD_WAITING);
143
144 /* mark thread started while we still hold the waitq lock */
145 thread_lock(thread);
146 thread->started = TRUE;
147 thread_unlock(thread);
148
149 waitq_unlock(waitq);
150 splx(spl);
2d21ac55
A
151}
152
1c79356b
A
153/*
154 * Internal routine to terminate a thread.
9bccf70c 155 * Sometimes called with task already locked.
1c79356b
A
156 */
157kern_return_t
158thread_terminate_internal(
91447636 159 thread_t thread)
1c79356b 160{
91447636 161 kern_return_t result = KERN_SUCCESS;
1c79356b 162
91447636 163 thread_mtx_lock(thread);
1c79356b 164
91447636
A
165 if (thread->active) {
166 thread->active = FALSE;
1c79356b 167
91447636 168 act_abort(thread);
e7c99d92 169
91447636
A
170 if (thread->started)
171 clear_wait(thread, THREAD_INTERRUPTED);
172 else {
39037602 173 thread_start(thread);
91447636 174 }
1c79356b 175 }
91447636
A
176 else
177 result = KERN_TERMINATED;
1c79356b 178
2d21ac55
A
179 if (thread->affinity_set != NULL)
180 thread_affinity_terminate(thread);
181
91447636
A
182 thread_mtx_unlock(thread);
183
184 if (thread != current_thread() && result == KERN_SUCCESS)
316670eb 185 thread_wait(thread, FALSE);
9bccf70c
A
186
187 return (result);
1c79356b
A
188}
189
190/*
9bccf70c 191 * Terminate a thread.
1c79356b
A
192 */
193kern_return_t
194thread_terminate(
91447636 195 thread_t thread)
1c79356b 196{
91447636 197 if (thread == THREAD_NULL)
9bccf70c 198 return (KERN_INVALID_ARGUMENT);
1c79356b 199
5ba3f43e
A
200 /* Kernel threads can't be terminated without their own cooperation */
201 if (thread->task == kernel_task && thread != current_thread())
9bccf70c 202 return (KERN_FAILURE);
1c79356b 203
5ba3f43e 204 kern_return_t result = thread_terminate_internal(thread);
1c79356b
A
205
206 /*
5ba3f43e
A
207 * If a kernel thread is terminating itself, force handle the APC_AST here.
208 * Kernel threads don't pass through the return-to-user AST checking code,
209 * but all threads must finish their own termination in thread_apc_ast.
1c79356b 210 */
91447636 211 if (thread->task == kernel_task) {
5ba3f43e
A
212 assert(thread->active == FALSE);
213 thread_ast_clear(thread, AST_APC);
214 thread_apc_ast(thread);
215
9bccf70c 216 panic("thread_terminate");
5ba3f43e 217 /* NOTREACHED */
9bccf70c 218 }
1c79356b 219
9bccf70c 220 return (result);
1c79356b
A
221}
222
223/*
9bccf70c
A
224 * Suspend execution of the specified thread.
225 * This is a recursive-style suspension of the thread, a count of
226 * suspends is maintained.
1c79356b 227 *
91447636 228 * Called with thread mutex held.
1c79356b
A
229 */
230void
39037602 231thread_hold(thread_t thread)
1c79356b 232{
91447636 233 if (thread->suspend_count++ == 0) {
39037602
A
234 thread_set_apc_ast(thread);
235 assert(thread->suspend_parked == FALSE);
1c79356b
A
236 }
237}
238
239/*
91447636 240 * Decrement internal suspension count, setting thread
1c79356b
A
241 * runnable when count falls to zero.
242 *
39037602
A
243 * Because the wait is abortsafe, we can't be guaranteed that the thread
244 * is currently actually waiting even if suspend_parked is set.
245 *
91447636 246 * Called with thread mutex held.
1c79356b
A
247 */
248void
39037602 249thread_release(thread_t thread)
1c79356b 250{
39037602
A
251 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
252
253 /* fail-safe on non-assert builds */
254 if (thread->suspend_count == 0)
255 return;
256
257 if (--thread->suspend_count == 0) {
258 if (!thread->started) {
259 thread_start(thread);
260 } else if (thread->suspend_parked) {
261 thread->suspend_parked = FALSE;
262 thread_wakeup_thread(&thread->suspend_count, thread);
9bccf70c 263 }
9bccf70c 264 }
1c79356b
A
265}
266
267kern_return_t
39037602 268thread_suspend(thread_t thread)
1c79356b 269{
39037602 270 kern_return_t result = KERN_SUCCESS;
1c79356b 271
91447636 272 if (thread == THREAD_NULL || thread->task == kernel_task)
9bccf70c
A
273 return (KERN_INVALID_ARGUMENT);
274
91447636 275 thread_mtx_lock(thread);
9bccf70c 276
91447636 277 if (thread->active) {
39037602
A
278 if (thread->user_stop_count++ == 0)
279 thread_hold(thread);
280 } else {
91447636 281 result = KERN_TERMINATED;
39037602 282 }
91447636
A
283
284 thread_mtx_unlock(thread);
285
39037602 286 if (thread != current_thread() && result == KERN_SUCCESS)
39236c6e 287 thread_wait(thread, FALSE);
9bccf70c 288
91447636 289 return (result);
1c79356b
A
290}
291
292kern_return_t
39037602 293thread_resume(thread_t thread)
1c79356b 294{
39037602 295 kern_return_t result = KERN_SUCCESS;
1c79356b 296
91447636 297 if (thread == THREAD_NULL || thread->task == kernel_task)
9bccf70c 298 return (KERN_INVALID_ARGUMENT);
1c79356b 299
91447636 300 thread_mtx_lock(thread);
9bccf70c 301
91447636
A
302 if (thread->active) {
303 if (thread->user_stop_count > 0) {
39037602
A
304 if (--thread->user_stop_count == 0)
305 thread_release(thread);
306 } else {
9bccf70c 307 result = KERN_FAILURE;
39037602
A
308 }
309 } else {
9bccf70c 310 result = KERN_TERMINATED;
39037602 311 }
9bccf70c 312
91447636 313 thread_mtx_unlock(thread);
9bccf70c
A
314
315 return (result);
1c79356b
A
316}
317
1c79356b 318/*
d9a64523 319 * thread_depress_abort_from_user:
1c79356b
A
320 *
321 * Prematurely abort priority depression if there is one.
322 */
323kern_return_t
d9a64523 324thread_depress_abort_from_user(thread_t thread)
1c79356b 325{
d9a64523 326 kern_return_t result;
1c79356b 327
d9a64523 328 if (thread == THREAD_NULL)
1c79356b
A
329 return (KERN_INVALID_ARGUMENT);
330
d9a64523 331 thread_mtx_lock(thread);
1c79356b 332
91447636 333 if (thread->active)
d9a64523 334 result = thread_depress_abort(thread);
91447636
A
335 else
336 result = KERN_TERMINATED;
1c79356b 337
d9a64523 338 thread_mtx_unlock(thread);
1c79356b
A
339
340 return (result);
341}
342
343
344/*
39037602
A
345 * Indicate that the thread should run the AST_APC callback
346 * to detect an abort condition.
9bccf70c 347 *
91447636 348 * Called with thread mutex held.
1c79356b 349 */
39037602 350static void
9bccf70c 351act_abort(
91447636 352 thread_t thread)
1c79356b 353{
9bccf70c 354 spl_t s = splsched();
1c79356b 355
9bccf70c 356 thread_lock(thread);
91447636 357
6d2010ae
A
358 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
359 thread->sched_flags |= TH_SFLAG_ABORT;
39037602 360 thread_set_apc_ast_locked(thread);
d9a64523 361 thread_depress_abort_locked(thread);
39037602 362 } else {
6d2010ae 363 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
39037602 364 }
91447636 365
9bccf70c
A
366 thread_unlock(thread);
367 splx(s);
1c79356b 368}
39037602 369
1c79356b
A
370kern_return_t
371thread_abort(
39037602 372 thread_t thread)
1c79356b 373{
91447636 374 kern_return_t result = KERN_SUCCESS;
1c79356b 375
91447636 376 if (thread == THREAD_NULL)
1c79356b 377 return (KERN_INVALID_ARGUMENT);
9bccf70c 378
91447636 379 thread_mtx_lock(thread);
9bccf70c 380
91447636
A
381 if (thread->active) {
382 act_abort(thread);
383 clear_wait(thread, THREAD_INTERRUPTED);
1c79356b 384 }
91447636
A
385 else
386 result = KERN_TERMINATED;
1c79356b 387
91447636 388 thread_mtx_unlock(thread);
9bccf70c
A
389
390 return (result);
1c79356b
A
391}
392
393kern_return_t
394thread_abort_safely(
91447636 395 thread_t thread)
1c79356b 396{
91447636 397 kern_return_t result = KERN_SUCCESS;
1c79356b 398
91447636 399 if (thread == THREAD_NULL)
9bccf70c 400 return (KERN_INVALID_ARGUMENT);
1c79356b 401
91447636 402 thread_mtx_lock(thread);
9bccf70c 403
91447636
A
404 if (thread->active) {
405 spl_t s = splsched();
9bccf70c 406
91447636
A
407 thread_lock(thread);
408 if (!thread->at_safe_point ||
2d21ac55 409 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
6d2010ae
A
410 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
411 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
39037602 412 thread_set_apc_ast_locked(thread);
d9a64523 413 thread_depress_abort_locked(thread);
91447636 414 }
9bccf70c 415 }
91447636
A
416 thread_unlock(thread);
417 splx(s);
39037602 418 } else {
91447636 419 result = KERN_TERMINATED;
39037602
A
420 }
421
91447636 422 thread_mtx_unlock(thread);
9bccf70c 423
91447636 424 return (result);
1c79356b
A
425}
426
427/*** backward compatibility hacks ***/
428#include <mach/thread_info.h>
429#include <mach/thread_special_ports.h>
430#include <ipc/ipc_port.h>
1c79356b
A
431
432kern_return_t
433thread_info(
3e170ce0 434 thread_t thread,
1c79356b
A
435 thread_flavor_t flavor,
436 thread_info_t thread_info_out,
437 mach_msg_type_number_t *thread_info_count)
438{
1c79356b
A
439 kern_return_t result;
440
91447636 441 if (thread == THREAD_NULL)
1c79356b
A
442 return (KERN_INVALID_ARGUMENT);
443
91447636 444 thread_mtx_lock(thread);
1c79356b 445
3e170ce0 446 if (thread->active || thread->inspection)
91447636
A
447 result = thread_info_internal(
448 thread, flavor, thread_info_out, thread_info_count);
449 else
450 result = KERN_TERMINATED;
1c79356b 451
91447636 452 thread_mtx_unlock(thread);
1c79356b
A
453
454 return (result);
455}
456
d9a64523
A
457static inline kern_return_t
458thread_get_state_internal(
39037602 459 thread_t thread,
91447636
A
460 int flavor,
461 thread_state_t state, /* pointer to OUT array */
d9a64523
A
462 mach_msg_type_number_t *state_count, /*IN/OUT*/
463 boolean_t to_user)
1c79356b 464{
91447636 465 kern_return_t result = KERN_SUCCESS;
1c79356b 466
91447636
A
467 if (thread == THREAD_NULL)
468 return (KERN_INVALID_ARGUMENT);
1c79356b 469
91447636 470 thread_mtx_lock(thread);
1c79356b 471
91447636
A
472 if (thread->active) {
473 if (thread != current_thread()) {
474 thread_hold(thread);
1c79356b 475
91447636 476 thread_mtx_unlock(thread);
1c79356b 477
39236c6e 478 if (thread_stop(thread, FALSE)) {
91447636
A
479 thread_mtx_lock(thread);
480 result = machine_thread_get_state(
481 thread, flavor, state, state_count);
482 thread_unstop(thread);
483 }
484 else {
485 thread_mtx_lock(thread);
486 result = KERN_ABORTED;
487 }
1c79356b 488
91447636
A
489 thread_release(thread);
490 }
491 else
492 result = machine_thread_get_state(
493 thread, flavor, state, state_count);
1c79356b 494 }
3e170ce0
A
495 else if (thread->inspection)
496 {
497 result = machine_thread_get_state(
498 thread, flavor, state, state_count);
499 }
91447636
A
500 else
501 result = KERN_TERMINATED;
1c79356b 502
d9a64523
A
503 if (to_user && result == KERN_SUCCESS) {
504 result = machine_thread_state_convert_to_user(thread, flavor, state,
505 state_count);
506 }
507
91447636 508 thread_mtx_unlock(thread);
1c79356b 509
91447636 510 return (result);
1c79356b
A
511}
512
d9a64523
A
513/* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
514
515kern_return_t
516thread_get_state(
517 thread_t thread,
518 int flavor,
519 thread_state_t state,
520 mach_msg_type_number_t *state_count);
521
522kern_return_t
523thread_get_state(
524 thread_t thread,
525 int flavor,
526 thread_state_t state, /* pointer to OUT array */
527 mach_msg_type_number_t *state_count) /*IN/OUT*/
528{
529 return thread_get_state_internal(thread, flavor, state, state_count, FALSE);
530}
531
532kern_return_t
533thread_get_state_to_user(
534 thread_t thread,
535 int flavor,
536 thread_state_t state, /* pointer to OUT array */
537 mach_msg_type_number_t *state_count) /*IN/OUT*/
538{
539 return thread_get_state_internal(thread, flavor, state, state_count, TRUE);
540}
541
1c79356b 542/*
91447636
A
543 * Change thread's machine-dependent state. Called with nothing
544 * locked. Returns same way.
1c79356b 545 */
d9a64523 546static inline kern_return_t
6d2010ae 547thread_set_state_internal(
39037602 548 thread_t thread,
9bccf70c 549 int flavor,
91447636 550 thread_state_t state,
6d2010ae
A
551 mach_msg_type_number_t state_count,
552 boolean_t from_user)
1c79356b 553{
9bccf70c 554 kern_return_t result = KERN_SUCCESS;
1c79356b 555
91447636 556 if (thread == THREAD_NULL)
1c79356b
A
557 return (KERN_INVALID_ARGUMENT);
558
91447636 559 thread_mtx_lock(thread);
9bccf70c 560
91447636 561 if (thread->active) {
d9a64523
A
562 if (from_user) {
563 result = machine_thread_state_convert_from_user(thread, flavor,
564 state, state_count);
565 if (result != KERN_SUCCESS) {
566 goto out;
567 }
568 }
91447636
A
569 if (thread != current_thread()) {
570 thread_hold(thread);
1c79356b 571
91447636 572 thread_mtx_unlock(thread);
9bccf70c 573
fe8ab488 574 if (thread_stop(thread, TRUE)) {
91447636
A
575 thread_mtx_lock(thread);
576 result = machine_thread_set_state(
577 thread, flavor, state, state_count);
578 thread_unstop(thread);
579 }
580 else {
581 thread_mtx_lock(thread);
582 result = KERN_ABORTED;
583 }
9bccf70c 584
91447636 585 thread_release(thread);
9bccf70c 586 }
91447636
A
587 else
588 result = machine_thread_set_state(
589 thread, flavor, state, state_count);
1c79356b 590 }
91447636
A
591 else
592 result = KERN_TERMINATED;
1c79356b 593
6d2010ae
A
594 if ((result == KERN_SUCCESS) && from_user)
595 extmod_statistics_incr_thread_set_state(thread);
596
d9a64523 597out:
91447636 598 thread_mtx_unlock(thread);
9bccf70c
A
599
600 return (result);
1c79356b 601}
6d2010ae
A
602
603/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
604kern_return_t
605thread_set_state(
39037602 606 thread_t thread,
6d2010ae
A
607 int flavor,
608 thread_state_t state,
609 mach_msg_type_number_t state_count);
610
611kern_return_t
612thread_set_state(
39037602 613 thread_t thread,
6d2010ae
A
614 int flavor,
615 thread_state_t state,
616 mach_msg_type_number_t state_count)
617{
618 return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
619}
91447636 620
6d2010ae
A
621kern_return_t
622thread_set_state_from_user(
39037602 623 thread_t thread,
6d2010ae
A
624 int flavor,
625 thread_state_t state,
626 mach_msg_type_number_t state_count)
627{
628 return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
629}
91447636 630
1c79356b 631/*
91447636
A
632 * Kernel-internal "thread" interfaces used outside this file:
633 */
634
635/* Initialize (or re-initialize) a thread state. Called from execve
636 * with nothing locked, returns same way.
1c79356b
A
637 */
638kern_return_t
91447636 639thread_state_initialize(
39037602 640 thread_t thread)
1c79356b 641{
9bccf70c 642 kern_return_t result = KERN_SUCCESS;
1c79356b 643
91447636 644 if (thread == THREAD_NULL)
1c79356b 645 return (KERN_INVALID_ARGUMENT);
1c79356b 646
91447636 647 thread_mtx_lock(thread);
9bccf70c 648
91447636
A
649 if (thread->active) {
650 if (thread != current_thread()) {
651 thread_hold(thread);
9bccf70c 652
91447636 653 thread_mtx_unlock(thread);
9bccf70c 654
39236c6e 655 if (thread_stop(thread, TRUE)) {
91447636
A
656 thread_mtx_lock(thread);
657 result = machine_thread_state_initialize( thread );
658 thread_unstop(thread);
659 }
660 else {
661 thread_mtx_lock(thread);
662 result = KERN_ABORTED;
663 }
9bccf70c 664
91447636 665 thread_release(thread);
9bccf70c 666 }
91447636 667 else
316670eb 668 result = machine_thread_state_initialize( thread );
1c79356b 669 }
91447636
A
670 else
671 result = KERN_TERMINATED;
9bccf70c 672
91447636 673 thread_mtx_unlock(thread);
9bccf70c
A
674
675 return (result);
1c79356b
A
676}
677
1c79356b
A
678
679kern_return_t
680thread_dup(
39037602 681 thread_t target)
1c79356b 682{
91447636 683 thread_t self = current_thread();
9bccf70c 684 kern_return_t result = KERN_SUCCESS;
1c79356b 685
91447636 686 if (target == THREAD_NULL || target == self)
1c79356b
A
687 return (KERN_INVALID_ARGUMENT);
688
91447636 689 thread_mtx_lock(target);
9bccf70c 690
91447636
A
691 if (target->active) {
692 thread_hold(target);
9bccf70c 693
91447636 694 thread_mtx_unlock(target);
9bccf70c 695
39236c6e 696 if (thread_stop(target, TRUE)) {
91447636 697 thread_mtx_lock(target);
d9a64523
A
698 result = machine_thread_dup(self, target, FALSE);
699
2d21ac55
A
700 if (self->affinity_set != AFFINITY_SET_NULL)
701 thread_affinity_dup(self, target);
91447636
A
702 thread_unstop(target);
703 }
704 else {
705 thread_mtx_lock(target);
9bccf70c 706 result = KERN_ABORTED;
9bccf70c
A
707 }
708
91447636 709 thread_release(target);
1c79356b 710 }
91447636
A
711 else
712 result = KERN_TERMINATED;
9bccf70c 713
91447636 714 thread_mtx_unlock(target);
9bccf70c
A
715
716 return (result);
1c79356b
A
717}
718
719
39037602
A
720kern_return_t
721thread_dup2(
722 thread_t source,
723 thread_t target)
724{
725 kern_return_t result = KERN_SUCCESS;
726 uint32_t active = 0;
727
728 if (source == THREAD_NULL || target == THREAD_NULL || target == source)
729 return (KERN_INVALID_ARGUMENT);
730
731 thread_mtx_lock(source);
732 active = source->active;
733 thread_mtx_unlock(source);
734
735 if (!active) {
736 return KERN_TERMINATED;
737 }
738
739 thread_mtx_lock(target);
740
741 if (target->active || target->inspection) {
742 thread_hold(target);
743
744 thread_mtx_unlock(target);
745
746 if (thread_stop(target, TRUE)) {
747 thread_mtx_lock(target);
d9a64523 748 result = machine_thread_dup(source, target, TRUE);
39037602
A
749 if (source->affinity_set != AFFINITY_SET_NULL)
750 thread_affinity_dup(source, target);
751 thread_unstop(target);
752 }
753 else {
754 thread_mtx_lock(target);
755 result = KERN_ABORTED;
756 }
757
758 thread_release(target);
759 }
760 else
761 result = KERN_TERMINATED;
762
763 thread_mtx_unlock(target);
764
765 return (result);
766}
767
1c79356b
A
768/*
769 * thread_setstatus:
770 *
771 * Set the status of the specified thread.
772 * Called with (and returns with) no locks held.
773 */
774kern_return_t
775thread_setstatus(
39037602 776 thread_t thread,
9bccf70c
A
777 int flavor,
778 thread_state_t tstate,
1c79356b
A
779 mach_msg_type_number_t count)
780{
9bccf70c 781
91447636 782 return (thread_set_state(thread, flavor, tstate, count));
1c79356b
A
783}
784
d9a64523
A
785kern_return_t
786thread_setstatus_from_user(
787 thread_t thread,
788 int flavor,
789 thread_state_t tstate,
790 mach_msg_type_number_t count)
791{
792
793 return (thread_set_state_from_user(thread, flavor, tstate, count));
794}
795
1c79356b
A
796/*
797 * thread_getstatus:
798 *
799 * Get the status of the specified thread.
800 */
801kern_return_t
802thread_getstatus(
39037602 803 thread_t thread,
9bccf70c
A
804 int flavor,
805 thread_state_t tstate,
1c79356b
A
806 mach_msg_type_number_t *count)
807{
91447636 808 return (thread_get_state(thread, flavor, tstate, count));
1c79356b
A
809}
810
d9a64523
A
811kern_return_t
812thread_getstatus_to_user(
813 thread_t thread,
814 int flavor,
815 thread_state_t tstate,
816 mach_msg_type_number_t *count)
817{
818 return (thread_get_state_to_user(thread, flavor, tstate, count));
819}
820
fe8ab488
A
821/*
822 * Change thread's machine-dependent userspace TSD base.
823 * Called with nothing locked. Returns same way.
824 */
825kern_return_t
826thread_set_tsd_base(
827 thread_t thread,
828 mach_vm_offset_t tsd_base)
829{
830 kern_return_t result = KERN_SUCCESS;
831
832 if (thread == THREAD_NULL)
833 return (KERN_INVALID_ARGUMENT);
834
835 thread_mtx_lock(thread);
836
837 if (thread->active) {
838 if (thread != current_thread()) {
839 thread_hold(thread);
840
841 thread_mtx_unlock(thread);
842
843 if (thread_stop(thread, TRUE)) {
844 thread_mtx_lock(thread);
845 result = machine_thread_set_tsd_base(thread, tsd_base);
846 thread_unstop(thread);
847 }
848 else {
849 thread_mtx_lock(thread);
850 result = KERN_ABORTED;
851 }
852
853 thread_release(thread);
854 }
855 else
856 result = machine_thread_set_tsd_base(thread, tsd_base);
857 }
858 else
859 result = KERN_TERMINATED;
860
861 thread_mtx_unlock(thread);
862
863 return (result);
864}
865
1c79356b 866/*
39037602 867 * thread_set_apc_ast:
1c79356b 868 *
39037602
A
869 * Register the AST_APC callback that handles suspension and
870 * termination, if it hasn't been installed already.
1c79356b 871 *
39037602 872 * Called with the thread mutex held.
1c79356b 873 */
39037602
A
874static void
875thread_set_apc_ast(thread_t thread)
1c79356b 876{
39037602 877 spl_t s = splsched();
1c79356b 878
e7c99d92 879 thread_lock(thread);
39037602 880 thread_set_apc_ast_locked(thread);
e7c99d92 881 thread_unlock(thread);
39037602 882
91447636 883 splx(s);
1c79356b
A
884}
885
886/*
39037602 887 * thread_set_apc_ast_locked:
91447636 888 *
39037602 889 * Do the work of registering for the AST_APC callback.
1c79356b 890 *
39037602 891 * Called with the thread mutex and scheduling lock held.
1c79356b 892 */
39037602
A
893static void
894thread_set_apc_ast_locked(thread_t thread)
1c79356b 895{
91447636
A
896 thread_ast_set(thread, AST_APC);
897
39037602 898 if (thread == current_thread()) {
5ba3f43e 899 ast_propagate(thread);
39037602
A
900 } else {
901 processor_t processor = thread->last_processor;
9bccf70c 902
39037602
A
903 if (processor != PROCESSOR_NULL &&
904 processor->state == PROCESSOR_RUNNING &&
905 processor->active_thread == thread) {
9bccf70c 906 cause_ast_check(processor);
39037602 907 }
9bccf70c
A
908 }
909}
1c79356b 910
1c79356b
A
911/*
912 * Activation control support routines internal to this file:
3e170ce0 913 *
1c79356b
A
914 */
915
1c79356b 916/*
39037602 917 * thread_suspended
1c79356b 918 *
39037602 919 * Continuation routine for thread suspension. It checks
1c79356b 920 * to see whether there has been any new suspensions. If so, it
d9a64523 921 * installs the AST_APC handler again.
1c79356b 922 */
39037602
A
923__attribute__((noreturn))
924static void
925thread_suspended(__unused void *parameter, wait_result_t result)
1c79356b 926{
39037602 927 thread_t thread = current_thread();
91447636
A
928
929 thread_mtx_lock(thread);
1c79356b 930
39037602
A
931 if (result == THREAD_INTERRUPTED)
932 thread->suspend_parked = FALSE;
933 else
934 assert(thread->suspend_parked == FALSE);
935
d9a64523 936 if (thread->suspend_count > 0)
39037602 937 thread_set_apc_ast(thread);
9bccf70c 938
91447636
A
939 thread_mtx_unlock(thread);
940
1c79356b 941 thread_exception_return();
9bccf70c 942 /*NOTREACHED*/
1c79356b
A
943}
944
945/*
39037602
A
946 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
947 * Called with nothing locked. Returns (if it returns) the same way.
1c79356b
A
948 */
949void
39037602 950thread_apc_ast(thread_t thread)
1c79356b 951{
91447636 952 thread_mtx_lock(thread);
1c79356b 953
39037602
A
954 assert(thread->suspend_parked == FALSE);
955
956 spl_t s = splsched();
1c79356b 957 thread_lock(thread);
39037602
A
958
959 /* TH_SFLAG_POLLDEPRESS is OK to have here */
960 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
961
6d2010ae 962 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1c79356b
A
963 thread_unlock(thread);
964 splx(s);
965
39037602
A
966 if (!thread->active) {
967 /* Thread is ready to terminate, time to tear it down */
91447636 968 thread_mtx_unlock(thread);
1c79356b 969
91447636
A
970 thread_terminate_self();
971 /*NOTREACHED*/
972 }
1c79356b 973
39037602
A
974 /* If we're suspended, go to sleep and wait for someone to wake us up. */
975 if (thread->suspend_count > 0) {
976 thread->suspend_parked = TRUE;
d9a64523
A
977 assert_wait(&thread->suspend_count,
978 THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
39037602
A
979 thread_mtx_unlock(thread);
980
981 thread_block(thread_suspended);
982 /*NOTREACHED*/
983 }
984
91447636 985 thread_mtx_unlock(thread);
1c79356b
A
986}
987
6d2010ae
A
988/* Prototype, see justification above */
989kern_return_t
990act_set_state(
991 thread_t thread,
992 int flavor,
993 thread_state_t state,
994 mach_msg_type_number_t count);
995
1c79356b 996kern_return_t
91447636
A
997act_set_state(
998 thread_t thread,
999 int flavor,
1000 thread_state_t state,
1001 mach_msg_type_number_t count)
1c79356b 1002{
91447636
A
1003 if (thread == current_thread())
1004 return (KERN_INVALID_ARGUMENT);
1c79356b 1005
91447636 1006 return (thread_set_state(thread, flavor, state, count));
1c79356b
A
1007
1008}
1009
6d2010ae
A
1010kern_return_t
1011act_set_state_from_user(
1012 thread_t thread,
1013 int flavor,
1014 thread_state_t state,
1015 mach_msg_type_number_t count)
1016{
1017 if (thread == current_thread())
1018 return (KERN_INVALID_ARGUMENT);
1019
1020 return (thread_set_state_from_user(thread, flavor, state, count));
1021
1022}
1023
d9a64523
A
1024/* Prototype, see justification above */
1025kern_return_t
1026act_get_state(
1027 thread_t thread,
1028 int flavor,
1029 thread_state_t state,
1030 mach_msg_type_number_t *count);
1031
1c79356b 1032kern_return_t
91447636
A
1033act_get_state(
1034 thread_t thread,
1035 int flavor,
1036 thread_state_t state,
1037 mach_msg_type_number_t *count)
1c79356b 1038{
91447636
A
1039 if (thread == current_thread())
1040 return (KERN_INVALID_ARGUMENT);
1c79356b 1041
91447636 1042 return (thread_get_state(thread, flavor, state, count));
1c79356b
A
1043}
1044
d9a64523
A
1045kern_return_t
1046act_get_state_to_user(
1047 thread_t thread,
1048 int flavor,
1049 thread_state_t state,
1050 mach_msg_type_number_t *count)
1051{
1052 if (thread == current_thread())
1053 return (KERN_INVALID_ARGUMENT);
1054
1055 return (thread_get_state_to_user(thread, flavor, state, count));
1056}
1057
316670eb
A
1058static void
1059act_set_ast(
3e170ce0 1060 thread_t thread,
316670eb 1061 ast_t ast)
1c79356b 1062{
3e170ce0
A
1063 spl_t s = splsched();
1064
91447636 1065 if (thread == current_thread()) {
316670eb 1066 thread_ast_set(thread, ast);
5ba3f43e 1067 ast_propagate(thread);
3e170ce0
A
1068 } else {
1069 processor_t processor;
0b4e3aa0 1070
9bccf70c 1071 thread_lock(thread);
316670eb 1072 thread_ast_set(thread, ast);
9bccf70c 1073 processor = thread->last_processor;
316670eb
A
1074 if ( processor != PROCESSOR_NULL &&
1075 processor->state == PROCESSOR_RUNNING &&
3e170ce0 1076 processor->active_thread == thread )
9bccf70c
A
1077 cause_ast_check(processor);
1078 thread_unlock(thread);
0b4e3aa0 1079 }
3e170ce0 1080
9bccf70c 1081 splx(s);
1c79356b
A
1082}
1083
5c9f4661
A
1084/*
1085 * set AST on thread without causing an AST check
1086 * and without taking the thread lock
1087 *
1088 * If thread is not the current thread, then it may take
1089 * up until the next context switch or quantum expiration
1090 * on that thread for it to notice the AST.
1091 */
1092static void
1093act_set_ast_async(thread_t thread,
1094 ast_t ast)
1095{
1096 thread_ast_set(thread, ast);
1097
1098 if (thread == current_thread()) {
1099 spl_t s = splsched();
1100 ast_propagate(thread);
1101 splx(s);
1102 }
1103}
1104
316670eb
A
1105void
1106act_set_astbsd(
1107 thread_t thread)
1108{
1109 act_set_ast( thread, AST_BSD );
1110}
1111
5ba3f43e
A
1112void
1113act_set_astkevent(thread_t thread, uint16_t bits)
1114{
5ba3f43e 1115 atomic_fetch_or(&thread->kevent_ast_bits, bits);
5ba3f43e 1116
5c9f4661
A
1117 /* kevent AST shouldn't send immediate IPIs */
1118 act_set_ast_async(thread, AST_KEVENT);
5ba3f43e
A
1119}
1120
316670eb
A
1121void
1122act_set_kperf(
1123 thread_t thread)
1124{
1125 /* safety check */
1126 if (thread != current_thread())
1127 if( !ml_get_interrupts_enabled() )
1128 panic("unsafe act_set_kperf operation");
1129
1130 act_set_ast( thread, AST_KPERF );
1131}
1132
1133#if CONFIG_MACF
1134void
1135act_set_astmacf(
1136 thread_t thread)
1137{
1138 act_set_ast( thread, AST_MACF);
1c79356b 1139}
316670eb 1140#endif
3e170ce0
A
1141
1142void
5c9f4661 1143act_set_astledger(thread_t thread)
3e170ce0
A
1144{
1145 act_set_ast(thread, AST_LEDGER);
1146}
1147
5c9f4661
A
1148/*
1149 * The ledger AST may need to be set while already holding
1150 * the thread lock. This routine skips sending the IPI,
1151 * allowing us to avoid the lock hold.
1152 *
1153 * However, it means the targeted thread must context switch
1154 * to recognize the ledger AST.
1155 */
1156void
1157act_set_astledger_async(thread_t thread)
1158{
1159 act_set_ast_async(thread, AST_LEDGER);
1160}
1161
39037602
A
1162void
1163act_set_io_telemetry_ast(thread_t thread)
1164{
1165 act_set_ast(thread, AST_TELEMETRY_IO);
1166}
3e170ce0 1167