]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_act.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
91447636 50 * Thread management routines
1c79356b 51 */
d9a64523 52
91447636 53#include <mach/mach_types.h>
1c79356b 54#include <mach/kern_return.h>
91447636
A
55#include <mach/thread_act_server.h>
56
57#include <kern/kern_types.h>
58#include <kern/ast.h>
1c79356b
A
59#include <kern/mach_param.h>
60#include <kern/zalloc.h>
6d2010ae 61#include <kern/extmod_statistics.h>
1c79356b 62#include <kern/thread.h>
1c79356b 63#include <kern/task.h>
1c79356b
A
64#include <kern/sched_prim.h>
65#include <kern/misc_protos.h>
66#include <kern/assert.h>
67#include <kern/exception.h>
68#include <kern/ipc_mig.h>
69#include <kern/ipc_tt.h>
1c79356b
A
70#include <kern/machine.h>
71#include <kern/spl.h>
72#include <kern/syscall_subr.h>
73#include <kern/sync_lock.h>
0b4e3aa0 74#include <kern/processor.h>
91447636 75#include <kern/timer.h>
2d21ac55
A
76#include <kern/affinity.h>
77
5ba3f43e
A
78#include <stdatomic.h>
79
316670eb
A
80#include <security/mac_mach_internal.h>
81
39037602
A
82static void act_abort(thread_t thread);
83
84static void thread_suspended(void *arg, wait_result_t result);
85static void thread_set_apc_ast(thread_t thread);
86static void thread_set_apc_ast_locked(thread_t thread);
1c79356b 87
2d21ac55
A
88/*
89 * Internal routine to mark a thread as started.
3e170ce0 90 * Always called with the thread mutex locked.
2d21ac55
A
91 */
92void
39037602 93thread_start(
0a7de745 94 thread_t thread)
2d21ac55
A
95{
96 clear_wait(thread, THREAD_AWAKENED);
97 thread->started = TRUE;
39037602
A
98}
99
100/*
101 * Internal routine to mark a thread as waiting
102 * right after it has been created. The caller
103 * is responsible to call wakeup()/thread_wakeup()
104 * or thread_terminate() to get it going.
105 *
106 * Always called with the thread mutex locked.
107 *
0a7de745 108 * Task and task_threads mutexes also held
39037602
A
109 * (so nobody can set the thread running before
110 * this point)
111 *
112 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
113 * to allow termination from this point forward.
114 */
115void
116thread_start_in_assert_wait(
0a7de745 117 thread_t thread,
39037602
A
118 event_t event,
119 wait_interrupt_t interruptible)
120{
121 struct waitq *waitq = assert_wait_queue(event);
122 wait_result_t wait_result;
123 spl_t spl;
124
125 spl = splsched();
126 waitq_lock(waitq);
127
128 /* clear out startup condition (safe because thread not started yet) */
129 thread_lock(thread);
130 assert(!thread->started);
131 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
132 thread->state &= ~(TH_WAIT | TH_UNINT);
133 thread_unlock(thread);
134
135 /* assert wait interruptibly forever */
136 wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
0a7de745
A
137 interruptible,
138 TIMEOUT_URGENCY_SYS_NORMAL,
139 TIMEOUT_WAIT_FOREVER,
140 TIMEOUT_NO_LEEWAY,
141 thread);
142 assert(wait_result == THREAD_WAITING);
39037602
A
143
144 /* mark thread started while we still hold the waitq lock */
145 thread_lock(thread);
146 thread->started = TRUE;
147 thread_unlock(thread);
148
149 waitq_unlock(waitq);
150 splx(spl);
2d21ac55
A
151}
152
1c79356b
A
153/*
154 * Internal routine to terminate a thread.
9bccf70c 155 * Sometimes called with task already locked.
1c79356b
A
156 */
157kern_return_t
158thread_terminate_internal(
0a7de745 159 thread_t thread)
1c79356b 160{
0a7de745 161 kern_return_t result = KERN_SUCCESS;
1c79356b 162
91447636 163 thread_mtx_lock(thread);
1c79356b 164
91447636
A
165 if (thread->active) {
166 thread->active = FALSE;
1c79356b 167
91447636 168 act_abort(thread);
e7c99d92 169
0a7de745 170 if (thread->started) {
91447636 171 clear_wait(thread, THREAD_INTERRUPTED);
0a7de745 172 } else {
39037602 173 thread_start(thread);
91447636 174 }
0a7de745 175 } else {
91447636 176 result = KERN_TERMINATED;
0a7de745 177 }
1c79356b 178
0a7de745 179 if (thread->affinity_set != NULL) {
2d21ac55 180 thread_affinity_terminate(thread);
0a7de745 181 }
2d21ac55 182
91447636
A
183 thread_mtx_unlock(thread);
184
0a7de745 185 if (thread != current_thread() && result == KERN_SUCCESS) {
316670eb 186 thread_wait(thread, FALSE);
0a7de745 187 }
9bccf70c 188
0a7de745 189 return result;
1c79356b
A
190}
191
192/*
9bccf70c 193 * Terminate a thread.
1c79356b
A
194 */
195kern_return_t
196thread_terminate(
0a7de745 197 thread_t thread)
1c79356b 198{
0a7de745
A
199 if (thread == THREAD_NULL) {
200 return KERN_INVALID_ARGUMENT;
201 }
1c79356b 202
5ba3f43e 203 /* Kernel threads can't be terminated without their own cooperation */
0a7de745
A
204 if (thread->task == kernel_task && thread != current_thread()) {
205 return KERN_FAILURE;
206 }
1c79356b 207
5ba3f43e 208 kern_return_t result = thread_terminate_internal(thread);
1c79356b
A
209
210 /*
5ba3f43e
A
211 * If a kernel thread is terminating itself, force handle the APC_AST here.
212 * Kernel threads don't pass through the return-to-user AST checking code,
213 * but all threads must finish their own termination in thread_apc_ast.
1c79356b 214 */
91447636 215 if (thread->task == kernel_task) {
5ba3f43e
A
216 assert(thread->active == FALSE);
217 thread_ast_clear(thread, AST_APC);
218 thread_apc_ast(thread);
219
9bccf70c 220 panic("thread_terminate");
5ba3f43e 221 /* NOTREACHED */
9bccf70c 222 }
1c79356b 223
0a7de745 224 return result;
1c79356b
A
225}
226
227/*
9bccf70c
A
228 * Suspend execution of the specified thread.
229 * This is a recursive-style suspension of the thread, a count of
230 * suspends is maintained.
1c79356b 231 *
91447636 232 * Called with thread mutex held.
1c79356b
A
233 */
234void
39037602 235thread_hold(thread_t thread)
1c79356b 236{
91447636 237 if (thread->suspend_count++ == 0) {
39037602
A
238 thread_set_apc_ast(thread);
239 assert(thread->suspend_parked == FALSE);
1c79356b
A
240 }
241}
242
243/*
91447636 244 * Decrement internal suspension count, setting thread
1c79356b
A
245 * runnable when count falls to zero.
246 *
39037602
A
247 * Because the wait is abortsafe, we can't be guaranteed that the thread
248 * is currently actually waiting even if suspend_parked is set.
249 *
91447636 250 * Called with thread mutex held.
1c79356b
A
251 */
252void
39037602 253thread_release(thread_t thread)
1c79356b 254{
39037602
A
255 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
256
257 /* fail-safe on non-assert builds */
0a7de745 258 if (thread->suspend_count == 0) {
39037602 259 return;
0a7de745 260 }
39037602
A
261
262 if (--thread->suspend_count == 0) {
263 if (!thread->started) {
264 thread_start(thread);
265 } else if (thread->suspend_parked) {
266 thread->suspend_parked = FALSE;
267 thread_wakeup_thread(&thread->suspend_count, thread);
9bccf70c 268 }
9bccf70c 269 }
1c79356b
A
270}
271
272kern_return_t
39037602 273thread_suspend(thread_t thread)
1c79356b 274{
39037602 275 kern_return_t result = KERN_SUCCESS;
1c79356b 276
0a7de745
A
277 if (thread == THREAD_NULL || thread->task == kernel_task) {
278 return KERN_INVALID_ARGUMENT;
279 }
9bccf70c 280
91447636 281 thread_mtx_lock(thread);
9bccf70c 282
91447636 283 if (thread->active) {
0a7de745 284 if (thread->user_stop_count++ == 0) {
39037602 285 thread_hold(thread);
0a7de745 286 }
39037602 287 } else {
91447636 288 result = KERN_TERMINATED;
39037602 289 }
91447636
A
290
291 thread_mtx_unlock(thread);
292
0a7de745 293 if (thread != current_thread() && result == KERN_SUCCESS) {
39236c6e 294 thread_wait(thread, FALSE);
0a7de745 295 }
9bccf70c 296
0a7de745 297 return result;
1c79356b
A
298}
299
300kern_return_t
39037602 301thread_resume(thread_t thread)
1c79356b 302{
39037602 303 kern_return_t result = KERN_SUCCESS;
1c79356b 304
0a7de745
A
305 if (thread == THREAD_NULL || thread->task == kernel_task) {
306 return KERN_INVALID_ARGUMENT;
307 }
1c79356b 308
91447636 309 thread_mtx_lock(thread);
9bccf70c 310
91447636
A
311 if (thread->active) {
312 if (thread->user_stop_count > 0) {
0a7de745 313 if (--thread->user_stop_count == 0) {
39037602 314 thread_release(thread);
0a7de745 315 }
39037602 316 } else {
9bccf70c 317 result = KERN_FAILURE;
39037602
A
318 }
319 } else {
9bccf70c 320 result = KERN_TERMINATED;
39037602 321 }
9bccf70c 322
91447636 323 thread_mtx_unlock(thread);
9bccf70c 324
0a7de745 325 return result;
1c79356b
A
326}
327
1c79356b 328/*
d9a64523 329 * thread_depress_abort_from_user:
1c79356b
A
330 *
331 * Prematurely abort priority depression if there is one.
332 */
333kern_return_t
d9a64523 334thread_depress_abort_from_user(thread_t thread)
1c79356b 335{
d9a64523 336 kern_return_t result;
1c79356b 337
0a7de745
A
338 if (thread == THREAD_NULL) {
339 return KERN_INVALID_ARGUMENT;
340 }
1c79356b 341
d9a64523 342 thread_mtx_lock(thread);
1c79356b 343
0a7de745 344 if (thread->active) {
d9a64523 345 result = thread_depress_abort(thread);
0a7de745 346 } else {
91447636 347 result = KERN_TERMINATED;
0a7de745 348 }
1c79356b 349
d9a64523 350 thread_mtx_unlock(thread);
1c79356b 351
0a7de745 352 return result;
1c79356b
A
353}
354
355
356/*
39037602
A
357 * Indicate that the thread should run the AST_APC callback
358 * to detect an abort condition.
9bccf70c 359 *
91447636 360 * Called with thread mutex held.
1c79356b 361 */
39037602 362static void
9bccf70c 363act_abort(
0a7de745 364 thread_t thread)
1c79356b 365{
0a7de745 366 spl_t s = splsched();
1c79356b 367
9bccf70c 368 thread_lock(thread);
91447636 369
6d2010ae
A
370 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
371 thread->sched_flags |= TH_SFLAG_ABORT;
39037602 372 thread_set_apc_ast_locked(thread);
d9a64523 373 thread_depress_abort_locked(thread);
39037602 374 } else {
6d2010ae 375 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
39037602 376 }
91447636 377
9bccf70c
A
378 thread_unlock(thread);
379 splx(s);
1c79356b 380}
39037602 381
1c79356b
A
382kern_return_t
383thread_abort(
0a7de745 384 thread_t thread)
1c79356b 385{
0a7de745 386 kern_return_t result = KERN_SUCCESS;
1c79356b 387
0a7de745
A
388 if (thread == THREAD_NULL) {
389 return KERN_INVALID_ARGUMENT;
390 }
9bccf70c 391
91447636 392 thread_mtx_lock(thread);
9bccf70c 393
91447636
A
394 if (thread->active) {
395 act_abort(thread);
396 clear_wait(thread, THREAD_INTERRUPTED);
0a7de745 397 } else {
91447636 398 result = KERN_TERMINATED;
0a7de745 399 }
1c79356b 400
91447636 401 thread_mtx_unlock(thread);
9bccf70c 402
0a7de745 403 return result;
1c79356b
A
404}
405
406kern_return_t
407thread_abort_safely(
0a7de745 408 thread_t thread)
1c79356b 409{
0a7de745 410 kern_return_t result = KERN_SUCCESS;
1c79356b 411
0a7de745
A
412 if (thread == THREAD_NULL) {
413 return KERN_INVALID_ARGUMENT;
414 }
1c79356b 415
91447636 416 thread_mtx_lock(thread);
9bccf70c 417
91447636 418 if (thread->active) {
0a7de745 419 spl_t s = splsched();
9bccf70c 420
91447636
A
421 thread_lock(thread);
422 if (!thread->at_safe_point ||
0a7de745 423 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
6d2010ae
A
424 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
425 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
39037602 426 thread_set_apc_ast_locked(thread);
d9a64523 427 thread_depress_abort_locked(thread);
91447636 428 }
9bccf70c 429 }
91447636
A
430 thread_unlock(thread);
431 splx(s);
39037602 432 } else {
91447636 433 result = KERN_TERMINATED;
39037602
A
434 }
435
91447636 436 thread_mtx_unlock(thread);
9bccf70c 437
0a7de745 438 return result;
1c79356b
A
439}
440
441/*** backward compatibility hacks ***/
442#include <mach/thread_info.h>
443#include <mach/thread_special_ports.h>
444#include <ipc/ipc_port.h>
1c79356b
A
445
446kern_return_t
447thread_info(
0a7de745
A
448 thread_t thread,
449 thread_flavor_t flavor,
450 thread_info_t thread_info_out,
451 mach_msg_type_number_t *thread_info_count)
1c79356b 452{
0a7de745 453 kern_return_t result;
1c79356b 454
0a7de745
A
455 if (thread == THREAD_NULL) {
456 return KERN_INVALID_ARGUMENT;
457 }
1c79356b 458
91447636 459 thread_mtx_lock(thread);
1c79356b 460
0a7de745 461 if (thread->active || thread->inspection) {
91447636 462 result = thread_info_internal(
0a7de745
A
463 thread, flavor, thread_info_out, thread_info_count);
464 } else {
91447636 465 result = KERN_TERMINATED;
0a7de745 466 }
1c79356b 467
91447636 468 thread_mtx_unlock(thread);
1c79356b 469
0a7de745 470 return result;
1c79356b
A
471}
472
d9a64523
A
473static inline kern_return_t
474thread_get_state_internal(
0a7de745
A
475 thread_t thread,
476 int flavor,
477 thread_state_t state, /* pointer to OUT array */
478 mach_msg_type_number_t *state_count, /*IN/OUT*/
479 boolean_t to_user)
1c79356b 480{
0a7de745 481 kern_return_t result = KERN_SUCCESS;
1c79356b 482
0a7de745
A
483 if (thread == THREAD_NULL) {
484 return KERN_INVALID_ARGUMENT;
485 }
1c79356b 486
91447636 487 thread_mtx_lock(thread);
1c79356b 488
91447636
A
489 if (thread->active) {
490 if (thread != current_thread()) {
491 thread_hold(thread);
1c79356b 492
91447636 493 thread_mtx_unlock(thread);
1c79356b 494
39236c6e 495 if (thread_stop(thread, FALSE)) {
91447636
A
496 thread_mtx_lock(thread);
497 result = machine_thread_get_state(
0a7de745 498 thread, flavor, state, state_count);
91447636 499 thread_unstop(thread);
0a7de745 500 } else {
91447636
A
501 thread_mtx_lock(thread);
502 result = KERN_ABORTED;
503 }
1c79356b 504
91447636 505 thread_release(thread);
0a7de745 506 } else {
91447636 507 result = machine_thread_get_state(
0a7de745
A
508 thread, flavor, state, state_count);
509 }
510 } else if (thread->inspection) {
3e170ce0 511 result = machine_thread_get_state(
0a7de745
A
512 thread, flavor, state, state_count);
513 } else {
91447636 514 result = KERN_TERMINATED;
0a7de745 515 }
1c79356b 516
d9a64523
A
517 if (to_user && result == KERN_SUCCESS) {
518 result = machine_thread_state_convert_to_user(thread, flavor, state,
0a7de745 519 state_count);
d9a64523
A
520 }
521
91447636 522 thread_mtx_unlock(thread);
1c79356b 523
0a7de745 524 return result;
1c79356b
A
525}
526
d9a64523
A
527/* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
528
529kern_return_t
530thread_get_state(
0a7de745
A
531 thread_t thread,
532 int flavor,
533 thread_state_t state,
534 mach_msg_type_number_t *state_count);
d9a64523
A
535
536kern_return_t
537thread_get_state(
0a7de745
A
538 thread_t thread,
539 int flavor,
540 thread_state_t state, /* pointer to OUT array */
541 mach_msg_type_number_t *state_count) /*IN/OUT*/
d9a64523
A
542{
543 return thread_get_state_internal(thread, flavor, state, state_count, FALSE);
544}
545
546kern_return_t
547thread_get_state_to_user(
0a7de745
A
548 thread_t thread,
549 int flavor,
550 thread_state_t state, /* pointer to OUT array */
551 mach_msg_type_number_t *state_count) /*IN/OUT*/
d9a64523
A
552{
553 return thread_get_state_internal(thread, flavor, state, state_count, TRUE);
554}
555
1c79356b 556/*
91447636
A
557 * Change thread's machine-dependent state. Called with nothing
558 * locked. Returns same way.
1c79356b 559 */
d9a64523 560static inline kern_return_t
6d2010ae 561thread_set_state_internal(
0a7de745
A
562 thread_t thread,
563 int flavor,
564 thread_state_t state,
565 mach_msg_type_number_t state_count,
566 boolean_t from_user)
1c79356b 567{
0a7de745 568 kern_return_t result = KERN_SUCCESS;
1c79356b 569
0a7de745
A
570 if (thread == THREAD_NULL) {
571 return KERN_INVALID_ARGUMENT;
572 }
1c79356b 573
91447636 574 thread_mtx_lock(thread);
9bccf70c 575
91447636 576 if (thread->active) {
d9a64523
A
577 if (from_user) {
578 result = machine_thread_state_convert_from_user(thread, flavor,
0a7de745 579 state, state_count);
d9a64523
A
580 if (result != KERN_SUCCESS) {
581 goto out;
582 }
583 }
91447636
A
584 if (thread != current_thread()) {
585 thread_hold(thread);
1c79356b 586
91447636 587 thread_mtx_unlock(thread);
9bccf70c 588
fe8ab488 589 if (thread_stop(thread, TRUE)) {
91447636
A
590 thread_mtx_lock(thread);
591 result = machine_thread_set_state(
0a7de745 592 thread, flavor, state, state_count);
91447636 593 thread_unstop(thread);
0a7de745 594 } else {
91447636
A
595 thread_mtx_lock(thread);
596 result = KERN_ABORTED;
597 }
9bccf70c 598
91447636 599 thread_release(thread);
0a7de745 600 } else {
91447636 601 result = machine_thread_set_state(
0a7de745
A
602 thread, flavor, state, state_count);
603 }
604 } else {
91447636 605 result = KERN_TERMINATED;
0a7de745 606 }
1c79356b 607
0a7de745 608 if ((result == KERN_SUCCESS) && from_user) {
6d2010ae 609 extmod_statistics_incr_thread_set_state(thread);
0a7de745 610 }
6d2010ae 611
d9a64523 612out:
91447636 613 thread_mtx_unlock(thread);
9bccf70c 614
0a7de745 615 return result;
1c79356b 616}
6d2010ae 617
0a7de745 618/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
6d2010ae
A
619kern_return_t
620thread_set_state(
0a7de745
A
621 thread_t thread,
622 int flavor,
623 thread_state_t state,
624 mach_msg_type_number_t state_count);
6d2010ae
A
625
626kern_return_t
627thread_set_state(
0a7de745
A
628 thread_t thread,
629 int flavor,
630 thread_state_t state,
631 mach_msg_type_number_t state_count)
6d2010ae
A
632{
633 return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
634}
0a7de745 635
6d2010ae
A
636kern_return_t
637thread_set_state_from_user(
0a7de745
A
638 thread_t thread,
639 int flavor,
640 thread_state_t state,
641 mach_msg_type_number_t state_count)
6d2010ae
A
642{
643 return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
644}
0a7de745 645
1c79356b 646/*
91447636
A
647 * Kernel-internal "thread" interfaces used outside this file:
648 */
649
650/* Initialize (or re-initialize) a thread state. Called from execve
651 * with nothing locked, returns same way.
1c79356b
A
652 */
653kern_return_t
91447636 654thread_state_initialize(
0a7de745 655 thread_t thread)
1c79356b 656{
0a7de745 657 kern_return_t result = KERN_SUCCESS;
1c79356b 658
0a7de745
A
659 if (thread == THREAD_NULL) {
660 return KERN_INVALID_ARGUMENT;
661 }
1c79356b 662
91447636 663 thread_mtx_lock(thread);
9bccf70c 664
91447636
A
665 if (thread->active) {
666 if (thread != current_thread()) {
667 thread_hold(thread);
9bccf70c 668
91447636 669 thread_mtx_unlock(thread);
9bccf70c 670
39236c6e 671 if (thread_stop(thread, TRUE)) {
91447636
A
672 thread_mtx_lock(thread);
673 result = machine_thread_state_initialize( thread );
674 thread_unstop(thread);
0a7de745 675 } else {
91447636
A
676 thread_mtx_lock(thread);
677 result = KERN_ABORTED;
678 }
9bccf70c 679
91447636 680 thread_release(thread);
0a7de745 681 } else {
316670eb 682 result = machine_thread_state_initialize( thread );
0a7de745
A
683 }
684 } else {
91447636 685 result = KERN_TERMINATED;
0a7de745 686 }
9bccf70c 687
91447636 688 thread_mtx_unlock(thread);
9bccf70c 689
0a7de745 690 return result;
1c79356b
A
691}
692
1c79356b
A
693
694kern_return_t
695thread_dup(
0a7de745 696 thread_t target)
1c79356b 697{
0a7de745
A
698 thread_t self = current_thread();
699 kern_return_t result = KERN_SUCCESS;
1c79356b 700
0a7de745
A
701 if (target == THREAD_NULL || target == self) {
702 return KERN_INVALID_ARGUMENT;
703 }
1c79356b 704
91447636 705 thread_mtx_lock(target);
9bccf70c 706
91447636
A
707 if (target->active) {
708 thread_hold(target);
9bccf70c 709
91447636 710 thread_mtx_unlock(target);
9bccf70c 711
39236c6e 712 if (thread_stop(target, TRUE)) {
91447636 713 thread_mtx_lock(target);
d9a64523
A
714 result = machine_thread_dup(self, target, FALSE);
715
0a7de745 716 if (self->affinity_set != AFFINITY_SET_NULL) {
2d21ac55 717 thread_affinity_dup(self, target);
0a7de745 718 }
91447636 719 thread_unstop(target);
0a7de745 720 } else {
91447636 721 thread_mtx_lock(target);
9bccf70c 722 result = KERN_ABORTED;
9bccf70c
A
723 }
724
91447636 725 thread_release(target);
0a7de745 726 } else {
91447636 727 result = KERN_TERMINATED;
0a7de745 728 }
9bccf70c 729
91447636 730 thread_mtx_unlock(target);
9bccf70c 731
0a7de745 732 return result;
1c79356b
A
733}
734
735
39037602
A
736kern_return_t
737thread_dup2(
0a7de745
A
738 thread_t source,
739 thread_t target)
39037602 740{
0a7de745
A
741 kern_return_t result = KERN_SUCCESS;
742 uint32_t active = 0;
39037602 743
0a7de745
A
744 if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
745 return KERN_INVALID_ARGUMENT;
746 }
39037602
A
747
748 thread_mtx_lock(source);
749 active = source->active;
750 thread_mtx_unlock(source);
751
752 if (!active) {
753 return KERN_TERMINATED;
754 }
755
756 thread_mtx_lock(target);
757
758 if (target->active || target->inspection) {
759 thread_hold(target);
760
761 thread_mtx_unlock(target);
762
763 if (thread_stop(target, TRUE)) {
764 thread_mtx_lock(target);
d9a64523 765 result = machine_thread_dup(source, target, TRUE);
0a7de745 766 if (source->affinity_set != AFFINITY_SET_NULL) {
39037602 767 thread_affinity_dup(source, target);
0a7de745 768 }
39037602 769 thread_unstop(target);
0a7de745 770 } else {
39037602
A
771 thread_mtx_lock(target);
772 result = KERN_ABORTED;
773 }
774
775 thread_release(target);
0a7de745 776 } else {
39037602 777 result = KERN_TERMINATED;
0a7de745 778 }
39037602
A
779
780 thread_mtx_unlock(target);
781
0a7de745 782 return result;
39037602
A
783}
784
1c79356b
A
785/*
786 * thread_setstatus:
787 *
788 * Set the status of the specified thread.
789 * Called with (and returns with) no locks held.
790 */
791kern_return_t
792thread_setstatus(
0a7de745
A
793 thread_t thread,
794 int flavor,
795 thread_state_t tstate,
796 mach_msg_type_number_t count)
1c79356b 797{
0a7de745 798 return thread_set_state(thread, flavor, tstate, count);
1c79356b
A
799}
800
d9a64523
A
801kern_return_t
802thread_setstatus_from_user(
0a7de745
A
803 thread_t thread,
804 int flavor,
805 thread_state_t tstate,
806 mach_msg_type_number_t count)
d9a64523 807{
0a7de745 808 return thread_set_state_from_user(thread, flavor, tstate, count);
d9a64523
A
809}
810
1c79356b
A
811/*
812 * thread_getstatus:
813 *
814 * Get the status of the specified thread.
815 */
816kern_return_t
817thread_getstatus(
0a7de745
A
818 thread_t thread,
819 int flavor,
820 thread_state_t tstate,
821 mach_msg_type_number_t *count)
1c79356b 822{
0a7de745 823 return thread_get_state(thread, flavor, tstate, count);
1c79356b
A
824}
825
d9a64523
A
826kern_return_t
827thread_getstatus_to_user(
0a7de745
A
828 thread_t thread,
829 int flavor,
830 thread_state_t tstate,
831 mach_msg_type_number_t *count)
d9a64523 832{
0a7de745 833 return thread_get_state_to_user(thread, flavor, tstate, count);
d9a64523
A
834}
835
fe8ab488
A
836/*
837 * Change thread's machine-dependent userspace TSD base.
838 * Called with nothing locked. Returns same way.
839 */
840kern_return_t
841thread_set_tsd_base(
0a7de745
A
842 thread_t thread,
843 mach_vm_offset_t tsd_base)
fe8ab488 844{
0a7de745 845 kern_return_t result = KERN_SUCCESS;
fe8ab488 846
0a7de745
A
847 if (thread == THREAD_NULL) {
848 return KERN_INVALID_ARGUMENT;
849 }
fe8ab488
A
850
851 thread_mtx_lock(thread);
852
853 if (thread->active) {
854 if (thread != current_thread()) {
855 thread_hold(thread);
856
857 thread_mtx_unlock(thread);
858
859 if (thread_stop(thread, TRUE)) {
860 thread_mtx_lock(thread);
861 result = machine_thread_set_tsd_base(thread, tsd_base);
862 thread_unstop(thread);
0a7de745 863 } else {
fe8ab488
A
864 thread_mtx_lock(thread);
865 result = KERN_ABORTED;
866 }
867
868 thread_release(thread);
0a7de745 869 } else {
fe8ab488 870 result = machine_thread_set_tsd_base(thread, tsd_base);
0a7de745
A
871 }
872 } else {
fe8ab488 873 result = KERN_TERMINATED;
0a7de745 874 }
fe8ab488
A
875
876 thread_mtx_unlock(thread);
877
0a7de745 878 return result;
fe8ab488
A
879}
880
1c79356b 881/*
39037602 882 * thread_set_apc_ast:
1c79356b 883 *
39037602
A
884 * Register the AST_APC callback that handles suspension and
885 * termination, if it hasn't been installed already.
1c79356b 886 *
39037602 887 * Called with the thread mutex held.
1c79356b 888 */
39037602
A
889static void
890thread_set_apc_ast(thread_t thread)
1c79356b 891{
39037602 892 spl_t s = splsched();
1c79356b 893
e7c99d92 894 thread_lock(thread);
39037602 895 thread_set_apc_ast_locked(thread);
e7c99d92 896 thread_unlock(thread);
39037602 897
91447636 898 splx(s);
1c79356b
A
899}
900
901/*
39037602 902 * thread_set_apc_ast_locked:
91447636 903 *
39037602 904 * Do the work of registering for the AST_APC callback.
1c79356b 905 *
39037602 906 * Called with the thread mutex and scheduling lock held.
1c79356b 907 */
39037602
A
908static void
909thread_set_apc_ast_locked(thread_t thread)
1c79356b 910{
91447636
A
911 thread_ast_set(thread, AST_APC);
912
39037602 913 if (thread == current_thread()) {
5ba3f43e 914 ast_propagate(thread);
39037602
A
915 } else {
916 processor_t processor = thread->last_processor;
9bccf70c 917
39037602
A
918 if (processor != PROCESSOR_NULL &&
919 processor->state == PROCESSOR_RUNNING &&
920 processor->active_thread == thread) {
9bccf70c 921 cause_ast_check(processor);
39037602 922 }
9bccf70c
A
923 }
924}
1c79356b 925
1c79356b
A
926/*
927 * Activation control support routines internal to this file:
3e170ce0 928 *
1c79356b
A
929 */
930
1c79356b 931/*
39037602 932 * thread_suspended
1c79356b 933 *
39037602 934 * Continuation routine for thread suspension. It checks
1c79356b 935 * to see whether there has been any new suspensions. If so, it
d9a64523 936 * installs the AST_APC handler again.
1c79356b 937 */
39037602
A
938__attribute__((noreturn))
939static void
940thread_suspended(__unused void *parameter, wait_result_t result)
1c79356b 941{
39037602 942 thread_t thread = current_thread();
91447636
A
943
944 thread_mtx_lock(thread);
1c79356b 945
0a7de745 946 if (result == THREAD_INTERRUPTED) {
39037602 947 thread->suspend_parked = FALSE;
0a7de745 948 } else {
39037602 949 assert(thread->suspend_parked == FALSE);
0a7de745 950 }
39037602 951
0a7de745 952 if (thread->suspend_count > 0) {
39037602 953 thread_set_apc_ast(thread);
0a7de745 954 }
9bccf70c 955
91447636
A
956 thread_mtx_unlock(thread);
957
1c79356b 958 thread_exception_return();
9bccf70c 959 /*NOTREACHED*/
1c79356b
A
960}
961
962/*
39037602
A
963 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
964 * Called with nothing locked. Returns (if it returns) the same way.
1c79356b
A
965 */
966void
39037602 967thread_apc_ast(thread_t thread)
1c79356b 968{
91447636 969 thread_mtx_lock(thread);
1c79356b 970
39037602
A
971 assert(thread->suspend_parked == FALSE);
972
973 spl_t s = splsched();
1c79356b 974 thread_lock(thread);
39037602
A
975
976 /* TH_SFLAG_POLLDEPRESS is OK to have here */
977 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
978
6d2010ae 979 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1c79356b
A
980 thread_unlock(thread);
981 splx(s);
982
39037602
A
983 if (!thread->active) {
984 /* Thread is ready to terminate, time to tear it down */
91447636 985 thread_mtx_unlock(thread);
1c79356b 986
91447636
A
987 thread_terminate_self();
988 /*NOTREACHED*/
989 }
1c79356b 990
39037602
A
991 /* If we're suspended, go to sleep and wait for someone to wake us up. */
992 if (thread->suspend_count > 0) {
993 thread->suspend_parked = TRUE;
d9a64523 994 assert_wait(&thread->suspend_count,
0a7de745 995 THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
39037602
A
996 thread_mtx_unlock(thread);
997
998 thread_block(thread_suspended);
999 /*NOTREACHED*/
1000 }
1001
91447636 1002 thread_mtx_unlock(thread);
1c79356b
A
1003}
1004
6d2010ae
A
1005/* Prototype, see justification above */
1006kern_return_t
1007act_set_state(
0a7de745
A
1008 thread_t thread,
1009 int flavor,
1010 thread_state_t state,
1011 mach_msg_type_number_t count);
6d2010ae 1012
1c79356b 1013kern_return_t
91447636 1014act_set_state(
0a7de745
A
1015 thread_t thread,
1016 int flavor,
1017 thread_state_t state,
1018 mach_msg_type_number_t count)
1c79356b 1019{
0a7de745
A
1020 if (thread == current_thread()) {
1021 return KERN_INVALID_ARGUMENT;
1022 }
1c79356b 1023
0a7de745 1024 return thread_set_state(thread, flavor, state, count);
1c79356b
A
1025}
1026
6d2010ae
A
1027kern_return_t
1028act_set_state_from_user(
0a7de745
A
1029 thread_t thread,
1030 int flavor,
1031 thread_state_t state,
1032 mach_msg_type_number_t count)
6d2010ae 1033{
0a7de745
A
1034 if (thread == current_thread()) {
1035 return KERN_INVALID_ARGUMENT;
1036 }
6d2010ae 1037
0a7de745 1038 return thread_set_state_from_user(thread, flavor, state, count);
6d2010ae
A
1039}
1040
d9a64523
A
1041/* Prototype, see justification above */
1042kern_return_t
1043act_get_state(
0a7de745
A
1044 thread_t thread,
1045 int flavor,
1046 thread_state_t state,
1047 mach_msg_type_number_t *count);
d9a64523 1048
1c79356b 1049kern_return_t
91447636 1050act_get_state(
0a7de745
A
1051 thread_t thread,
1052 int flavor,
1053 thread_state_t state,
1054 mach_msg_type_number_t *count)
1c79356b 1055{
0a7de745
A
1056 if (thread == current_thread()) {
1057 return KERN_INVALID_ARGUMENT;
1058 }
1c79356b 1059
0a7de745 1060 return thread_get_state(thread, flavor, state, count);
1c79356b
A
1061}
1062
d9a64523
A
1063kern_return_t
1064act_get_state_to_user(
0a7de745
A
1065 thread_t thread,
1066 int flavor,
1067 thread_state_t state,
1068 mach_msg_type_number_t *count)
d9a64523 1069{
0a7de745
A
1070 if (thread == current_thread()) {
1071 return KERN_INVALID_ARGUMENT;
1072 }
d9a64523 1073
0a7de745 1074 return thread_get_state_to_user(thread, flavor, state, count);
d9a64523
A
1075}
1076
316670eb
A
1077static void
1078act_set_ast(
0a7de745
A
1079 thread_t thread,
1080 ast_t ast)
1c79356b 1081{
3e170ce0
A
1082 spl_t s = splsched();
1083
91447636 1084 if (thread == current_thread()) {
316670eb 1085 thread_ast_set(thread, ast);
5ba3f43e 1086 ast_propagate(thread);
3e170ce0
A
1087 } else {
1088 processor_t processor;
0b4e3aa0 1089
9bccf70c 1090 thread_lock(thread);
316670eb 1091 thread_ast_set(thread, ast);
9bccf70c 1092 processor = thread->last_processor;
0a7de745
A
1093 if (processor != PROCESSOR_NULL &&
1094 processor->state == PROCESSOR_RUNNING &&
1095 processor->active_thread == thread) {
9bccf70c 1096 cause_ast_check(processor);
0a7de745 1097 }
9bccf70c 1098 thread_unlock(thread);
0b4e3aa0 1099 }
3e170ce0 1100
9bccf70c 1101 splx(s);
1c79356b
A
1102}
1103
5c9f4661
A
1104/*
1105 * set AST on thread without causing an AST check
1106 * and without taking the thread lock
1107 *
1108 * If thread is not the current thread, then it may take
1109 * up until the next context switch or quantum expiration
1110 * on that thread for it to notice the AST.
1111 */
1112static void
1113act_set_ast_async(thread_t thread,
0a7de745 1114 ast_t ast)
5c9f4661
A
1115{
1116 thread_ast_set(thread, ast);
1117
1118 if (thread == current_thread()) {
1119 spl_t s = splsched();
1120 ast_propagate(thread);
1121 splx(s);
1122 }
1123}
1124
316670eb
A
1125void
1126act_set_astbsd(
0a7de745 1127 thread_t thread)
316670eb
A
1128{
1129 act_set_ast( thread, AST_BSD );
1130}
1131
5ba3f43e
A
1132void
1133act_set_astkevent(thread_t thread, uint16_t bits)
1134{
cb323159 1135 os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
5ba3f43e 1136
5c9f4661
A
1137 /* kevent AST shouldn't send immediate IPIs */
1138 act_set_ast_async(thread, AST_KEVENT);
5ba3f43e
A
1139}
1140
cb323159
A
1141uint16_t
1142act_clear_astkevent(thread_t thread, uint16_t bits)
1143{
1144 /*
1145 * avoid the atomic operation if none of the bits is set,
1146 * which will be the common case.
1147 */
1148 uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1149 if (cur & bits) {
1150 cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1151 }
1152 return cur & bits;
1153}
1154
1155void
1156act_set_ast_reset_pcs(thread_t thread)
1157{
1158 act_set_ast(thread, AST_RESET_PCS);
1159}
1160
316670eb
A
1161void
1162act_set_kperf(
0a7de745 1163 thread_t thread)
316670eb
A
1164{
1165 /* safety check */
0a7de745
A
1166 if (thread != current_thread()) {
1167 if (!ml_get_interrupts_enabled()) {
316670eb 1168 panic("unsafe act_set_kperf operation");
0a7de745
A
1169 }
1170 }
316670eb
A
1171
1172 act_set_ast( thread, AST_KPERF );
1173}
1174
1175#if CONFIG_MACF
1176void
1177act_set_astmacf(
0a7de745 1178 thread_t thread)
316670eb
A
1179{
1180 act_set_ast( thread, AST_MACF);
1c79356b 1181}
316670eb 1182#endif
3e170ce0
A
1183
1184void
5c9f4661 1185act_set_astledger(thread_t thread)
3e170ce0
A
1186{
1187 act_set_ast(thread, AST_LEDGER);
1188}
1189
5c9f4661
A
1190/*
1191 * The ledger AST may need to be set while already holding
1192 * the thread lock. This routine skips sending the IPI,
1193 * allowing us to avoid the lock hold.
1194 *
1195 * However, it means the targeted thread must context switch
1196 * to recognize the ledger AST.
1197 */
1198void
1199act_set_astledger_async(thread_t thread)
1200{
1201 act_set_ast_async(thread, AST_LEDGER);
1202}
1203
39037602
A
1204void
1205act_set_io_telemetry_ast(thread_t thread)
1206{
1207 act_set_ast(thread, AST_TELEMETRY_IO);
1208}