]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread.h
xnu-6153.41.3.tar.gz
[apple/xnu.git] / osfmk / kern / thread.h
CommitLineData
1c79356b 1/*
0a7de745 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: thread.h
60 * Author: Avadis Tevanian, Jr.
61 *
62 * This file contains the structure definitions for threads.
63 *
64 */
65/*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
0a7de745 84#ifndef _KERN_THREAD_H_
1c79356b
A
85#define _KERN_THREAD_H_
86
87#include <mach/kern_return.h>
88#include <mach/mach_types.h>
89#include <mach/message.h>
90#include <mach/boolean.h>
55e303ae 91#include <mach/vm_param.h>
1c79356b
A
92#include <mach/thread_info.h>
93#include <mach/thread_status.h>
55e303ae 94#include <mach/exception_types.h>
9bccf70c 95
1c79356b 96#include <kern/kern_types.h>
5ba3f43e 97#include <vm/vm_kern.h>
1c79356b 98
91447636 99#include <sys/cdefs.h>
1c79356b 100
0a7de745 101#ifdef MACH_KERNEL_PRIVATE
1c79356b 102
55e303ae 103#include <mach_assert.h>
1c79356b
A
104#include <mach_ldebug.h>
105
91447636
A
106#include <ipc/ipc_types.h>
107
1c79356b 108#include <mach/port.h>
1c79356b 109#include <kern/cpu_number.h>
3e170ce0 110#include <kern/smp.h>
1c79356b 111#include <kern/queue.h>
d9a64523 112
1c79356b 113#include <kern/timer.h>
fe8ab488 114#include <kern/simple_lock.h>
91447636 115#include <kern/locks.h>
1c79356b
A
116#include <kern/sched.h>
117#include <kern/sched_prim.h>
fe8ab488 118#include <mach/sfi_class.h>
1c79356b 119#include <kern/thread_call.h>
5ba3f43e 120#include <kern/thread_group.h>
1c79356b
A
121#include <kern/timer_call.h>
122#include <kern/task.h>
55e303ae 123#include <kern/exception.h>
2d21ac55 124#include <kern/affinity.h>
813fb2f6
A
125#include <kern/debug.h>
126#include <kern/block_hint.h>
d9a64523 127#include <kern/turnstile.h>
cb323159 128#include <kern/mpsc_queue.h>
91447636 129
3e170ce0 130#include <kern/waitq.h>
5ba3f43e 131#include <san/kasan.h>
e8c3f781 132#include <os/refcnt.h>
3e170ce0 133
1c79356b 134#include <ipc/ipc_kmsg.h>
55e303ae 135
cb323159 136#include <machine/atomic.h>
91447636 137#include <machine/cpu_data.h>
1c79356b
A
138#include <machine/thread.h>
139
d9a64523
A
140#ifdef XNU_KERNEL_PRIVATE
141/* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */
142#include <kern/priority_queue.h>
143#endif /* XNU_KERNEL_PRIVATE */
144
5ba3f43e
A
145#if MONOTONIC
146#include <stdatomic.h>
147#include <machine/monotonic.h>
148#endif /* MONOTONIC */
149
150#if CONFIG_EMBEDDED
151/* Taskwatch related. TODO: find this a better home */
152typedef struct task_watcher task_watch_t;
153#endif /* CONFIG_EMBEDDED */
39037602 154
55e303ae 155struct thread {
39037602
A
156#if MACH_ASSERT
157#define THREAD_MAGIC 0x1234ABCDDCBA4321ULL
158 /* Ensure nothing uses &thread as a queue entry */
159 uint64_t thread_magic;
160#endif /* MACH_ASSERT */
161
1c79356b 162 /*
9bccf70c 163 * NOTE: The runq field in the thread structure has an unusual
2d21ac55 164 * locking protocol. If its value is PROCESSOR_NULL, then it is
9bccf70c 165 * locked by the thread_lock, but if its value is something else
fe8ab488
A
166 * then it is locked by the associated run queue lock. It is
167 * set to PROCESSOR_NULL without holding the thread lock, but the
168 * transition from PROCESSOR_NULL to non-null must be done
169 * under the thread lock and the run queue lock.
9bccf70c 170 *
3e170ce0
A
171 * New waitq APIs allow the 'links' and 'runq' fields to be
172 * anywhere in the thread structure.
1c79356b 173 */
39037602 174 union {
0a7de745
A
175 queue_chain_t runq_links; /* run queue links */
176 queue_chain_t wait_links; /* wait queue links */
cb323159 177 struct mpsc_queue_chain mpsc_links; /* thread daemon mpsc links */
0a7de745 178 struct priority_queue_entry wait_prioq_links; /* priority ordered waitq links */
39037602
A
179 };
180
39037602 181 event64_t wait_event; /* wait queue event */
94ff46dc 182 processor_t runq; /* run queue assignment */
39037602 183 struct waitq *waitq; /* wait queue this thread is enqueued on */
d9a64523
A
184 struct turnstile *turnstile; /* thread's turnstile, protected by primitives interlock */
185 void *inheritor; /* inheritor of the primitive the thread will block on */
cb323159
A
186 struct priority_queue sched_inheritor_queue; /* Inheritor queue for kernel promotion */
187 struct priority_queue base_inheritor_queue; /* Inheritor queue for user promotion */
188
189#if CONFIG_SCHED_CLUTCH
190 /*
191 * In the clutch scheduler, the threads are maintained in runqs at the clutch_bucket
192 * level (clutch_bucket defines a unique thread group and scheduling bucket pair). In
193 * order to determine the priority of the clutch bucket as a whole, it is necessary to
194 * find the highest thread in it. The thread could be present in the clutch bucket due
195 * to its base_pri or its promoted pri. This link is used to maintain that queue.
196 */
197 struct priority_queue_entry sched_clutchpri_link;
198
199#endif /* CONFIG_SCHED_CLUTCH */
39037602 200
9bccf70c 201 /* Data updated during assert_wait/thread_wakeup */
3e170ce0 202#if __SMP__
cb323159
A
203 decl_simple_lock_data(, sched_lock); /* scheduling lock (thread_lock()) */
204 decl_simple_lock_data(, wake_lock); /* for thread stop / wait (wake_lock()) */
3e170ce0 205#endif
94ff46dc 206 uint16_t options; /* options set by thread itself */
0a7de745
A
207#define TH_OPT_INTMASK 0x0003 /* interrupt / abort level */
208#define TH_OPT_VMPRIV 0x0004 /* may allocate reserved memory */
0a7de745
A
209#define TH_OPT_SYSTEM_CRITICAL 0x0010 /* Thread must always be allowed to run - even under heavy load */
210#define TH_OPT_PROC_CPULIMIT 0x0020 /* Thread has a task-wide CPU limit applied to it */
211#define TH_OPT_PRVT_CPULIMIT 0x0040 /* Thread has a thread-private CPU limit applied to it */
212#define TH_OPT_IDLE_THREAD 0x0080 /* Thread is a per-processor idle thread */
213#define TH_OPT_GLOBAL_FORCED_IDLE 0x0100 /* Thread performs forced idle for thermal control */
214#define TH_OPT_SCHED_VM_GROUP 0x0200 /* Thread belongs to special scheduler VM group */
215#define TH_OPT_HONOR_QLIMIT 0x0400 /* Thread will honor qlimit while sending mach_msg, regardless of MACH_SEND_ALWAYS */
216#define TH_OPT_SEND_IMPORTANCE 0x0800 /* Thread will allow importance donation from kernel rpc */
217#define TH_OPT_ZONE_GC 0x1000 /* zone_gc() called on this thread */
218
94ff46dc
A
219 bool wake_active; /* wake event on stop */
220 bool at_safe_point; /* thread_abort_safely allowed */
0a7de745
A
221 ast_t reason; /* why we blocked */
222 uint32_t quantum_remaining;
223 wait_result_t wait_result; /* outcome of wait -
224 * may be examined by this thread
225 * WITHOUT locking */
226 thread_continue_t continuation; /* continue here next dispatch */
227 void *parameter; /* continuation parameter */
0b4e3aa0 228
9bccf70c 229 /* Data updated/used in thread_invoke */
0a7de745
A
230 vm_offset_t kernel_stack; /* current kernel stack */
231 vm_offset_t reserved_stack; /* reserved kernel stack */
0b4e3aa0 232
5ba3f43e
A
233#if KASAN
234 struct kasan_thread_data kasan_data;
235#endif
236
cb323159
A
237#if CONFIG_KSANCOV
238 void *ksancov_data;
239#endif
240
0b4e3aa0 241 /* Thread state: */
0a7de745 242 int state;
1c79356b
A
243/*
244 * Thread states [bits or'ed]
245 */
0a7de745
A
246#define TH_WAIT 0x01 /* queued for waiting */
247#define TH_SUSP 0x02 /* stopped or requested to stop */
248#define TH_RUN 0x04 /* running or on runq */
249#define TH_UNINT 0x08 /* waiting uninteruptibly */
250#define TH_TERMINATE 0x10 /* halted at termination */
251#define TH_TERMINATE2 0x20 /* added to termination queue */
252#define TH_WAIT_REPORT 0x40 /* the wait is using the sched_call,
253 * only set if TH_WAIT is also set */
254#define TH_IDLE 0x80 /* idling processor */
1c79356b 255
9bccf70c 256 /* Scheduling information */
0a7de745
A
257 sched_mode_t sched_mode; /* scheduling mode */
258 sched_mode_t saved_mode; /* saved mode during forced mode demotion */
fe8ab488 259
39037602
A
260 /* This thread's contribution to global sched counters */
261 sched_bucket_t th_sched_bucket;
262
0a7de745
A
263 sfi_class_id_t sfi_class; /* SFI class (XXX Updated on CSW/QE/AST) */
264 sfi_class_id_t sfi_wait_class; /* Currently in SFI wait for this class, protected by sfi_lock */
39037602
A
265
266
0a7de745
A
267 uint32_t sched_flags; /* current flag bits */
268#define TH_SFLAG_NO_SMT 0x0001 /* On an SMT CPU, this thread must be scheduled alone */
269#define TH_SFLAG_FAILSAFE 0x0002 /* fail-safe has tripped */
270#define TH_SFLAG_THROTTLED 0x0004 /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */
271#define TH_SFLAG_DEMOTED_MASK (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE) /* saved_mode contains previous sched_mode */
6d2010ae 272
0a7de745
A
273#define TH_SFLAG_PROMOTED 0x0008 /* sched pri has been promoted by kernel mutex priority promotion */
274#define TH_SFLAG_ABORT 0x0010 /* abort interruptible waits */
275#define TH_SFLAG_ABORTSAFELY 0x0020 /* ... but only those at safe point */
276#define TH_SFLAG_ABORTED_MASK (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
277#define TH_SFLAG_DEPRESS 0x0040 /* normal depress yield */
278#define TH_SFLAG_POLLDEPRESS 0x0080 /* polled depress yield */
279#define TH_SFLAG_DEPRESSED_MASK (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
39037602 280/* unused TH_SFLAG_PRI_UPDATE 0x0100 */
0a7de745
A
281#define TH_SFLAG_EAGERPREEMPT 0x0200 /* Any preemption of this thread should be treated as if AST_URGENT applied */
282#define TH_SFLAG_RW_PROMOTED 0x0400 /* promote reason: blocking with RW lock held */
cb323159 283#define TH_SFLAG_BASE_PRI_FROZEN 0x0800 /* (effective) base_pri is frozen */
0a7de745 284#define TH_SFLAG_WAITQ_PROMOTED 0x1000 /* promote reason: waitq wakeup (generally for IPC receive) */
5ba3f43e
A
285
286
0a7de745 287#define TH_SFLAG_EXEC_PROMOTED 0x8000 /* promote reason: thread is in an exec */
d9a64523
A
288
289/* 'promote reasons' that request a priority floor only, not a custom priority */
290#define TH_SFLAG_PROMOTE_REASON_MASK (TH_SFLAG_RW_PROMOTED | TH_SFLAG_WAITQ_PROMOTED | TH_SFLAG_EXEC_PROMOTED)
39236c6e 291
0a7de745 292#define TH_SFLAG_RW_PROMOTED_BIT (10) /* 0x400 */
6d2010ae 293
3e170ce0 294 int16_t sched_pri; /* scheduled (current) priority */
cb323159
A
295 int16_t base_pri; /* effective base priority (equal to req_base_pri unless TH_SFLAG_BASE_PRI_FROZEN) */
296 int16_t req_base_pri; /* requested base priority */
3e170ce0
A
297 int16_t max_priority; /* copy of max base priority */
298 int16_t task_priority; /* copy of task base priority */
d9a64523 299 int16_t promotion_priority; /* priority thread is currently promoted to */
3e170ce0 300
6d2010ae
A
301#if defined(CONFIG_SCHED_GRRR)
302#if 0
0a7de745 303 uint16_t grrr_deficit; /* fixed point (1/1000th quantum) fractional deficit */
6d2010ae
A
304#endif
305#endif
d9a64523 306
0a7de745 307 int iotier_override; /* atomic operations to set, cleared on ret to user */
cb323159 308 os_refcnt_t ref_count; /* number of references to me */
d9a64523
A
309
310 lck_mtx_t* waiting_for_mutex; /* points to mutex we're waiting for until we acquire it */
1c79356b 311
0a7de745 312 uint32_t rwlock_count; /* Number of lck_rw_t locks held by thread */
1c79356b 313
0a7de745 314 integer_t importance; /* task-relative importance */
fe8ab488 315
39236c6e 316 /* Priority depression expiration */
0a7de745
A
317 integer_t depress_timer_active;
318 timer_call_data_t depress_timer;
319 /* real-time parameters */
320 struct { /* see mach/thread_policy.h */
321 uint32_t period;
322 uint32_t computation;
323 uint32_t constraint;
324 boolean_t preemptible;
325 uint64_t deadline;
326 } realtime;
327
328 uint64_t last_run_time; /* time when thread was switched away from */
329 uint64_t last_made_runnable_time; /* time when thread was unblocked or preempted */
330 uint64_t last_basepri_change_time; /* time when thread was last changed in basepri while runnable */
331 uint64_t same_pri_latency;
5ba3f43e
A
332#define THREAD_NOT_RUNNABLE (~0ULL)
333
fe8ab488
A
334
335#if defined(CONFIG_SCHED_MULTIQ)
0a7de745 336 sched_group_t sched_group;
fe8ab488 337#endif /* defined(CONFIG_SCHED_MULTIQ) */
0b4e3aa0 338
0a7de745
A
339 /* Data used during setrun/dispatch */
340 timer_data_t system_timer; /* system mode timer */
341 processor_t bound_processor; /* bound to a processor? */
342 processor_t last_processor; /* processor last dispatched on */
343 processor_t chosen_processor; /* Where we want to run this thread */
9bccf70c 344
0b4e3aa0 345 /* Fail-safe computation since last unblock or qualifying yield */
0a7de745
A
346 uint64_t computation_metered;
347 uint64_t computation_epoch;
348 uint64_t safe_release; /* when to release fail-safe */
1c79356b 349
2d21ac55 350 /* Call out from scheduler */
94ff46dc
A
351 void (*sched_call)(int type, thread_t thread);
352
6d2010ae 353#if defined(CONFIG_SCHED_PROTO)
0a7de745 354 uint32_t runqueue_generation; /* last time runqueue was drained */
6d2010ae 355#endif
d9a64523 356
55e303ae 357 /* Statistics and timesharing calculations */
fe8ab488 358#if defined(CONFIG_SCHED_TIMESHARE_CORE)
0a7de745
A
359 natural_t sched_stamp; /* last scheduler tick */
360 natural_t sched_usage; /* timesharing cpu usage [sched] */
361 natural_t pri_shift; /* usage -> priority from pset */
362 natural_t cpu_usage; /* instrumented cpu usage [%cpu] */
363 natural_t cpu_delta; /* accumulated cpu_usage delta */
fe8ab488
A
364#endif /* CONFIG_SCHED_TIMESHARE_CORE */
365
0a7de745
A
366 uint32_t c_switch; /* total context switches */
367 uint32_t p_switch; /* total processor switches */
368 uint32_t ps_switch; /* total pset switches */
1c79356b 369
3e170ce0 370 integer_t mutex_count; /* total count of locks held */
9bccf70c 371 /* Timing data structures */
0a7de745
A
372 int precise_user_kernel_time; /* precise user/kernel enabled for this thread */
373 timer_data_t user_timer; /* user mode timer */
374 uint64_t user_timer_save; /* saved user timer value */
375 uint64_t system_timer_save; /* saved system timer value */
376 uint64_t vtimer_user_save; /* saved values for vtimers */
377 uint64_t vtimer_prof_save;
378 uint64_t vtimer_rlim_save;
379 uint64_t vtimer_qos_save;
380
381 timer_data_t ptime; /* time executing in P mode */
382 timer_data_t runnable_timer; /* time the thread is runnable (including running) */
5ba3f43e 383
3e170ce0 384#if CONFIG_SCHED_SFI
fe8ab488 385 /* Timing for wait state */
0a7de745 386 uint64_t wait_sfi_begin_time; /* start time for thread waiting in SFI */
3e170ce0 387#endif
fe8ab488 388
2d21ac55
A
389 /*
390 * Processor/cache affinity
391 * - affinity_threads links task threads with the same affinity set
392 */
0a7de745 393 queue_chain_t affinity_threads;
94ff46dc
A
394 affinity_set_t affinity_set;
395
396#if CONFIG_EMBEDDED
397 task_watch_t * taskwatch; /* task watch */
398#endif /* CONFIG_EMBEDDED */
2d21ac55 399
5ba3f43e 400 /* Various bits of state to stash across a continuation, exclusive to the current thread block point */
1c79356b
A
401 union {
402 struct {
0a7de745
A
403 mach_msg_return_t state; /* receive state */
404 mach_port_seqno_t seqno; /* seqno of recvd message */
405 ipc_object_t object; /* object received on */
94ff46dc 406 vm_address_t msg_addr; /* receive buffer pointer */
0a7de745
A
407 mach_msg_size_t rsize; /* max size for recvd msg */
408 mach_msg_size_t msize; /* actual size for recvd msg */
409 mach_msg_option_t option; /* options for receive */
410 mach_port_name_t receiver_name; /* the receive port name */
411 struct knote *knote; /* knote fired for rcv */
39037602 412 union {
0a7de745
A
413 struct ipc_kmsg *kmsg; /* received message */
414 struct ipc_mqueue *peekq; /* mqueue to peek at */
39037602 415 struct {
0a7de745
A
416 mach_msg_priority_t qos; /* received message qos */
417 mach_msg_priority_t oqos; /* override qos for message */
39037602
A
418 } received_qos;
419 };
0a7de745 420 mach_msg_continue_t continuation;
9bccf70c 421 } receive;
1c79356b 422 struct {
0a7de745
A
423 struct semaphore *waitsemaphore; /* semaphore ref */
424 struct semaphore *signalsemaphore; /* semaphore ref */
425 int options; /* semaphore options */
426 kern_return_t result; /* primary result */
9bccf70c
A
427 mach_msg_continue_t continuation;
428 } sema;
cb323159
A
429 struct {
430#define THREAD_SAVE_IOKIT_TLS_COUNT 8
431 void *tls[THREAD_SAVE_IOKIT_TLS_COUNT];
432 } iokit;
9bccf70c 433 } saved;
1c79356b 434
5ba3f43e
A
435 /* Only user threads can cause guard exceptions, only kernel threads can be thread call threads */
436 union {
437 /* Group and call this thread is working on behalf of */
438 struct {
439 struct thread_call_group * thc_group;
440 struct thread_call * thc_call; /* debug only, may be deallocated */
441 } thc_state;
442
443 /* Structure to save information about guard exception */
444 struct {
445 mach_exception_code_t code;
446 mach_exception_subcode_t subcode;
447 } guard_exc_info;
448 };
39236c6e 449
3e170ce0
A
450 /* Kernel holds on this thread */
451 int16_t suspend_count;
452 /* User level suspensions */
453 int16_t user_stop_count;
39236c6e 454
9bccf70c 455 /* IPC data structures */
39236c6e 456#if IMPORTANCE_INHERITANCE
0a7de745 457 natural_t ith_assertions; /* assertions pending drop */
39236c6e 458#endif
0a7de745
A
459 struct ipc_kmsg_queue ith_messages; /* messages to reap */
460 mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */
1c79356b 461
94ff46dc
A
462 /* Pending thread ast(s) */
463 ast_t ast;
464
1c79356b 465 /* Ast/Halt data structures */
94ff46dc 466 vm_offset_t recover; /* page fault recover(copyin/out) */
1c79356b 467
0a7de745 468 queue_chain_t threads; /* global list of all threads */
1c79356b 469
55e303ae 470 /* Activation */
94ff46dc 471 queue_chain_t task_threads;
55e303ae 472
0a7de745
A
473 /* Task membership */
474 struct task *task;
475 vm_map_t map;
d9a64523 476#if DEVELOPMENT || DEBUG
94ff46dc 477 bool pmap_footprint_suspended;
d9a64523 478#endif /* DEVELOPMENT || DEBUG */
55e303ae 479
94ff46dc
A
480 /* Timed wait expiration */
481 timer_call_data_t wait_timer;
482 uint16_t wait_timer_active;
483 bool wait_timer_is_set;
55e303ae 484
0a7de745
A
485 /* Miscellaneous bits guarded by mutex */
486 uint32_t
487 active:1, /* Thread is active and has not been terminated */
488 started:1, /* Thread has been started after creation */
489 static_param:1, /* Disallow policy parameter changes */
490 inspection:1, /* TRUE when task is being inspected by crash reporter */
491 policy_reset:1, /* Disallow policy parameter changes on terminating threads */
492 suspend_parked:1, /* thread parked in thread_suspended */
493 corpse_dup:1, /* TRUE when thread is an inactive duplicate in a corpse */
494 :0;
39037602 495
94ff46dc
A
496 decl_lck_mtx_data(, mutex);
497
0a7de745
A
498 /* Ports associated with this thread */
499 struct ipc_port *ith_self; /* not a right, doesn't hold ref */
500 struct ipc_port *ith_sself; /* a send right */
501 struct ipc_port *ith_special_reply_port; /* ref to special reply port */
502 struct exception_action *exc_actions;
55e303ae 503
0a7de745
A
504#ifdef MACH_BSD
505 void *uthread;
55e303ae 506#endif
2d21ac55
A
507
508#if CONFIG_DTRACE
cb323159 509 uint16_t t_dtrace_flags; /* DTrace thread states */
0a7de745 510#define TH_DTRACE_EXECSUCCESS 0x01
cb323159 511 uint16_t t_dtrace_inprobe; /* Executing under dtrace_probe */
0a7de745
A
512 uint32_t t_dtrace_predcache; /* DTrace per thread predicate value hint */
513 int64_t t_dtrace_tracing; /* Thread time under dtrace_probe() */
514 int64_t t_dtrace_vtime;
2d21ac55 515#endif
b0d623f7 516
0a7de745
A
517 clock_sec_t t_page_creation_time;
518 uint32_t t_page_creation_count;
519 uint32_t t_page_creation_throttled;
04b8595b 520#if (DEVELOPMENT || DEBUG)
0a7de745
A
521 uint64_t t_page_creation_throttled_hard;
522 uint64_t t_page_creation_throttled_soft;
04b8595b 523#endif /* DEVELOPMENT || DEBUG */
cb323159 524 int t_pagein_error; /* for vm_fault(), holds error from vnop_pagein() */
04b8595b 525
39037602 526#ifdef KPERF
cb323159
A
527/* The high 8 bits are the number of frames to sample of a user callstack. */
528#define T_KPERF_CALLSTACK_DEPTH_OFFSET (24)
39037602
A
529#define T_KPERF_SET_CALLSTACK_DEPTH(DEPTH) (((uint32_t)(DEPTH)) << T_KPERF_CALLSTACK_DEPTH_OFFSET)
530#define T_KPERF_GET_CALLSTACK_DEPTH(FLAGS) ((FLAGS) >> T_KPERF_CALLSTACK_DEPTH_OFFSET)
94ff46dc
A
531#define T_KPERF_ACTIONID_OFFSET (18)
532#define T_KPERF_SET_ACTIONID(AID) (((uint32_t)(AID)) << T_KPERF_ACTIONID_OFFSET)
533#define T_KPERF_GET_ACTIONID(FLAGS) ((FLAGS) >> T_KPERF_ACTIONID_OFFSET)
39037602
A
534#endif
535
94ff46dc
A
536#define T_KPERF_AST_CALLSTACK 0x1 /* dump a callstack on thread's next AST */
537#define T_KPERF_AST_DISPATCH 0x2 /* dump a name on thread's next AST */
538#define T_KPC_ALLOC 0x4 /* thread needs a kpc_buf allocated */
539
540#define T_KPERF_AST_ALL \
541 (T_KPERF_AST_CALLSTACK | T_KPERF_AST_DISPATCH | T_KPC_ALLOC)
542/* only go up to T_KPERF_ACTIONID_OFFSET - 1 */
39037602
A
543
544#ifdef KPERF
94ff46dc 545 uint32_t kperf_ast;
39037602
A
546 uint32_t kperf_pet_gen; /* last generation of PET that sampled this thread*/
547 uint32_t kperf_c_switch; /* last dispatch detection */
548 uint32_t kperf_pet_cnt; /* how many times a thread has been sampled by PET */
549#endif
6d2010ae 550
39236c6e
A
551#ifdef KPC
552 /* accumulated performance counters for this thread */
553 uint64_t *kpc_buf;
554#endif
555
fe8ab488
A
556#if HYPERVISOR
557 /* hypervisor virtual CPU object associated with this thread */
558 void *hv_thread_target;
559#endif /* HYPERVISOR */
560
6d2010ae 561 /* Statistics accumulated per-thread and aggregated per-task */
0a7de745
A
562 uint32_t syscalls_unix;
563 uint32_t syscalls_mach;
564 ledger_t t_ledger;
565 ledger_t t_threadledger; /* per thread ledger */
566 ledger_t t_bankledger; /* ledger to charge someone */
567 uint64_t t_deduct_bank_ledger_time; /* cpu time to be deducted from bank ledger */
568 uint64_t t_deduct_bank_ledger_energy; /* energy to be deducted from bank ledger */
5ba3f43e 569
94ff46dc
A
570 uint64_t thread_id; /*system wide unique thread-id*/
571
5ba3f43e
A
572#if MONOTONIC
573 struct mt_thread t_monotonic;
574#endif /* MONOTONIC */
575
576 /*** Machine-dependent state ***/
577 struct machine_thread machine;
39236c6e 578
39037602
A
579 /* policy is protected by the thread mutex */
580 struct thread_requested_policy requested_policy;
581 struct thread_effective_policy effective_policy;
39236c6e 582
fe8ab488 583 /* usynch override is protected by the task lock, eventually will be thread mutex */
a1c7dba1 584 struct thread_qos_override {
0a7de745
A
585 struct thread_qos_override *override_next;
586 uint32_t override_contended_resource_count;
587 int16_t override_qos;
588 int16_t override_resource_type;
589 user_addr_t override_resource;
a1c7dba1 590 } *overrides;
fe8ab488 591
cb323159 592 uint32_t kevent_overrides;
94ff46dc
A
593 uint8_t user_promotion_basepri;
594 uint8_t kern_promotion_schedpri;
5ba3f43e 595 _Atomic uint16_t kevent_ast_bits;
39037602 596
0a7de745 597 io_stat_info_t thread_io_stats; /* per-thread I/O statistics */
39236c6e 598
0a7de745
A
599 uint32_t thread_callout_interrupt_wakeups;
600 uint32_t thread_callout_platform_idle_wakeups;
601 uint32_t thread_timer_wakeups_bin_1;
602 uint32_t thread_timer_wakeups_bin_2;
603 uint16_t thread_tag;
cb323159
A
604 /*
605 * callout_* fields are only set for thread call threads whereas guard_exc_fatal is set
606 * by user threads on themselves while taking a guard exception. So it's okay for them to
607 * share this bitfield.
608 */
0a7de745
A
609 uint16_t callout_woken_from_icontext:1,
610 callout_woken_from_platform_idle:1,
611 callout_woke_thread:1,
cb323159
A
612 guard_exc_fatal:1,
613 thread_bitfield_unused:12;
0a7de745
A
614
615 mach_port_name_t ith_voucher_name;
616 ipc_voucher_t ith_voucher;
fe8ab488 617#if CONFIG_IOSCHED
0a7de745 618 void *decmp_upl;
fe8ab488 619#endif /* CONFIG_IOSCHED */
3e170ce0 620
5ba3f43e
A
621 /* work interval (if any) associated with the thread. Uses thread mutex */
622 struct work_interval *th_work_interval;
39037602 623
0a7de745
A
624#if SCHED_TRACE_THREAD_WAKEUPS
625 uintptr_t thread_wakeup_bt[64];
39037602 626#endif
d9a64523
A
627 turnstile_update_flags_t inheritor_flags; /* inheritor flags for inheritor field */
628 block_hint_t pending_block_hint;
629 block_hint_t block_hint; /* What type of primitive last caused us to block. */
cb323159 630 integer_t decompressions; /* Per-thread decompressions counter to be added to per-task decompressions counter */
0b4e3aa0 631};
1c79356b 632
39037602
A
633#define ith_state saved.receive.state
634#define ith_object saved.receive.object
635#define ith_msg_addr saved.receive.msg_addr
636#define ith_rsize saved.receive.rsize
637#define ith_msize saved.receive.msize
0a7de745 638#define ith_option saved.receive.option
39037602
A
639#define ith_receiver_name saved.receive.receiver_name
640#define ith_continuation saved.receive.continuation
641#define ith_kmsg saved.receive.kmsg
642#define ith_peekq saved.receive.peekq
5ba3f43e 643#define ith_knote saved.receive.knote
39037602
A
644#define ith_qos saved.receive.received_qos.qos
645#define ith_qos_override saved.receive.received_qos.oqos
646#define ith_seqno saved.receive.seqno
647
648#define sth_waitsemaphore saved.sema.waitsemaphore
649#define sth_signalsemaphore saved.sema.signalsemaphore
650#define sth_options saved.sema.options
651#define sth_result saved.sema.result
652#define sth_continuation saved.sema.continuation
653
5ba3f43e
A
654#define ITH_KNOTE_NULL ((void *)NULL)
655#define ITH_KNOTE_PSEUDO ((void *)0xdeadbeef)
d9a64523
A
656/*
657 * The ith_knote is used during message delivery, and can safely be interpreted
658 * only when used for one of these codepaths, which the test for the msgt_name
659 * being RECEIVE or SEND_ONCE is about.
660 */
661#define ITH_KNOTE_VALID(kn, msgt_name) \
0a7de745
A
662 (((kn) != ITH_KNOTE_NULL && (kn) != ITH_KNOTE_PSEUDO) && \
663 ((msgt_name) == MACH_MSG_TYPE_PORT_RECEIVE || \
664 (msgt_name) == MACH_MSG_TYPE_PORT_SEND_ONCE))
5ba3f43e 665
39037602
A
666#if MACH_ASSERT
667#define assert_thread_magic(thread) assertf((thread)->thread_magic == THREAD_MAGIC, \
0a7de745
A
668 "bad thread magic 0x%llx for thread %p, expected 0x%llx", \
669 (thread)->thread_magic, (thread), THREAD_MAGIC)
39037602
A
670#else
671#define assert_thread_magic(thread) do { (void)(thread); } while (0)
672#endif
1c79356b 673
0a7de745 674extern void thread_bootstrap(void);
1c79356b 675
0a7de745 676extern void thread_init(void);
9bccf70c 677
0a7de745 678extern void thread_daemon_init(void);
1c79356b 679
0a7de745
A
680#define thread_reference_internal(thread) \
681 os_ref_retain(&(thread)->ref_count);
91447636 682
0a7de745
A
683#define thread_reference(thread) \
684MACRO_BEGIN \
685 if ((thread) != THREAD_NULL) \
686 thread_reference_internal(thread); \
91447636 687MACRO_END
1c79356b 688
0a7de745
A
689extern void thread_deallocate(
690 thread_t thread);
691
0a7de745
A
692extern void thread_inspect_deallocate(
693 thread_inspect_t thread);
3e170ce0 694
0a7de745 695extern void thread_terminate_self(void);
813fb2f6 696
0a7de745
A
697extern kern_return_t thread_terminate_internal(
698 thread_t thread);
1c79356b 699
0a7de745
A
700extern void thread_start(
701 thread_t thread) __attribute__ ((noinline));
91447636 702
0a7de745
A
703extern void thread_start_in_assert_wait(
704 thread_t thread,
705 event_t event,
706 wait_interrupt_t interruptible) __attribute__ ((noinline));
2d21ac55 707
0a7de745
A
708extern void thread_terminate_enqueue(
709 thread_t thread);
39037602 710
0a7de745
A
711extern void thread_exception_enqueue(
712 task_t task,
713 thread_t thread,
714 exception_type_t etype);
91447636 715
0a7de745
A
716extern void thread_copy_resource_info(
717 thread_t dst_thread,
718 thread_t src_thread);
39037602 719
0a7de745 720extern void thread_terminate_crashed_threads(void);
39037602 721
0a7de745
A
722extern void thread_stack_enqueue(
723 thread_t thread);
d9a64523 724
0a7de745
A
725extern void thread_hold(
726 thread_t thread);
91447636 727
0a7de745
A
728extern void thread_release(
729 thread_t thread);
1c79356b 730
cb323159 731extern void thread_corpse_continue(void) __dead2;
1c79356b 732
0a7de745 733extern boolean_t thread_is_active(thread_t thread);
39037602 734
0a7de745 735extern lck_grp_t thread_lck_grp;
743345f9 736
3e170ce0
A
737/* Locking for scheduler state, always acquired with interrupts disabled (splsched()) */
738#if __SMP__
0a7de745
A
739#define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, 0)
740#define thread_lock(th) simple_lock(&(th)->sched_lock, &thread_lck_grp)
741#define thread_unlock(th) simple_unlock(&(th)->sched_lock)
1c79356b 742
0a7de745
A
743#define wake_lock_init(th) simple_lock_init(&(th)->wake_lock, 0)
744#define wake_lock(th) simple_lock(&(th)->wake_lock, &thread_lck_grp)
745#define wake_unlock(th) simple_unlock(&(th)->wake_lock)
3e170ce0 746#else
0a7de745
A
747#define thread_lock_init(th) do { (void)th; } while(0)
748#define thread_lock(th) do { (void)th; } while(0)
749#define thread_unlock(th) do { (void)th; } while(0)
3e170ce0 750
0a7de745
A
751#define wake_lock_init(th) do { (void)th; } while(0)
752#define wake_lock(th) do { (void)th; } while(0)
753#define wake_unlock(th) do { (void)th; } while(0)
3e170ce0 754#endif
2d21ac55 755
0a7de745 756#define thread_should_halt_fast(thread) (!(thread)->active)
1c79356b 757
0a7de745
A
758extern void stack_alloc(
759 thread_t thread);
1c79356b 760
0a7de745
A
761extern void stack_handoff(
762 thread_t from,
763 thread_t to);
6d2010ae 764
0a7de745
A
765extern void stack_free(
766 thread_t thread);
1c79356b 767
0a7de745
A
768extern void stack_free_reserved(
769 thread_t thread);
1c79356b 770
0a7de745
A
771extern boolean_t stack_alloc_try(
772 thread_t thread);
91447636 773
0a7de745 774extern void stack_collect(void);
1c79356b 775
0a7de745 776extern void stack_init(void);
91447636 777
6d2010ae 778
0a7de745
A
779extern kern_return_t thread_info_internal(
780 thread_t thread,
781 thread_flavor_t flavor,
782 thread_info_t thread_info_out,
783 mach_msg_type_number_t *thread_info_count);
1c79356b 784
1c79356b 785
1c79356b 786
0a7de745
A
787extern kern_return_t kernel_thread_create(
788 thread_continue_t continuation,
789 void *parameter,
790 integer_t priority,
791 thread_t *new_thread);
1c79356b 792
0a7de745
A
793extern kern_return_t kernel_thread_start_priority(
794 thread_continue_t continuation,
795 void *parameter,
796 integer_t priority,
797 thread_t *new_thread);
1c79356b 798
0a7de745
A
799extern void machine_stack_attach(
800 thread_t thread,
801 vm_offset_t stack);
1c79356b 802
0a7de745
A
803extern vm_offset_t machine_stack_detach(
804 thread_t thread);
55e303ae 805
0a7de745
A
806extern void machine_stack_handoff(
807 thread_t old,
808 thread_t new);
55e303ae 809
0a7de745
A
810extern thread_t machine_switch_context(
811 thread_t old_thread,
812 thread_continue_t continuation,
813 thread_t new_thread);
55e303ae 814
0a7de745
A
815extern void machine_load_context(
816 thread_t thread) __attribute__((noreturn));
5ba3f43e 817
0a7de745
A
818extern kern_return_t machine_thread_state_initialize(
819 thread_t thread);
9bccf70c 820
0a7de745
A
821extern kern_return_t machine_thread_set_state(
822 thread_t thread,
823 thread_flavor_t flavor,
824 thread_state_t state,
825 mach_msg_type_number_t count);
55e303ae 826
cb323159
A
827extern mach_vm_address_t machine_thread_pc(
828 thread_t thread);
829
830extern void machine_thread_reset_pc(
831 thread_t thread,
832 mach_vm_address_t pc);
833
834extern boolean_t machine_thread_on_core(
835 thread_t thread);
836
0a7de745
A
837extern kern_return_t machine_thread_get_state(
838 thread_t thread,
839 thread_flavor_t flavor,
840 thread_state_t state,
841 mach_msg_type_number_t *count);
55e303ae 842
0a7de745
A
843extern kern_return_t machine_thread_state_convert_from_user(
844 thread_t thread,
845 thread_flavor_t flavor,
846 thread_state_t tstate,
847 mach_msg_type_number_t count);
d9a64523 848
0a7de745
A
849extern kern_return_t machine_thread_state_convert_to_user(
850 thread_t thread,
851 thread_flavor_t flavor,
852 thread_state_t tstate,
853 mach_msg_type_number_t *count);
d9a64523 854
0a7de745
A
855extern kern_return_t machine_thread_dup(
856 thread_t self,
857 thread_t target,
858 boolean_t is_corpse);
55e303ae 859
0a7de745 860extern void machine_thread_init(void);
55e303ae 861
0a7de745
A
862extern kern_return_t machine_thread_create(
863 thread_t thread,
864 task_t task);
865extern void machine_thread_switch_addrmode(
866 thread_t thread);
55e303ae 867
0a7de745
A
868extern void machine_thread_destroy(
869 thread_t thread);
55e303ae 870
0a7de745
A
871extern void machine_set_current_thread(
872 thread_t thread);
55e303ae 873
0a7de745
A
874extern kern_return_t machine_thread_get_kern_state(
875 thread_t thread,
876 thread_flavor_t flavor,
877 thread_state_t tstate,
878 mach_msg_type_number_t *count);
91447636 879
0a7de745
A
880extern kern_return_t machine_thread_inherit_taskwide(
881 thread_t thread,
882 task_t parent_task);
91447636 883
0a7de745
A
884extern kern_return_t machine_thread_set_tsd_base(
885 thread_t thread,
886 mach_vm_offset_t tsd_base);
55e303ae 887
0a7de745
A
888#define thread_mtx_lock(thread) lck_mtx_lock(&(thread)->mutex)
889#define thread_mtx_try(thread) lck_mtx_try_lock(&(thread)->mutex)
890#define thread_mtx_unlock(thread) lck_mtx_unlock(&(thread)->mutex)
55e303ae 891
39037602 892extern void thread_apc_ast(thread_t thread);
55e303ae 893
39037602 894extern void thread_update_qos_cpu_time(thread_t thread);
fe8ab488 895
2d21ac55
A
896void act_machine_sv_free(thread_t, int);
897
0a7de745
A
898vm_offset_t min_valid_stack_address(void);
899vm_offset_t max_valid_stack_address(void);
b0d623f7 900
0a7de745
A
901static inline uint16_t
902thread_set_tag_internal(thread_t thread, uint16_t tag)
903{
cb323159 904 return os_atomic_or_orig(&thread->thread_tag, tag, relaxed);
4b17d6b6 905}
39236c6e 906
0a7de745
A
907static inline uint16_t
908thread_get_tag_internal(thread_t thread)
909{
4b17d6b6
A
910 return thread->thread_tag;
911}
912
0a7de745
A
913extern bool thread_no_smt(thread_t thread);
914extern bool processor_active_thread_no_smt(processor_t processor);
fe8ab488 915
3e170ce0
A
916extern void thread_set_options(uint32_t thopt);
917
5ba3f43e 918
0a7de745 919#else /* MACH_KERNEL_PRIVATE */
1c79356b 920
91447636 921__BEGIN_DECLS
1c79356b 922
d9a64523
A
923extern void thread_mtx_lock(thread_t thread);
924
925extern void thread_mtx_unlock(thread_t thread);
926
cb323159 927extern thread_t current_thread(void) __attribute__((const));
91447636 928
0a7de745
A
929extern void thread_reference(
930 thread_t thread);
55e303ae 931
0a7de745
A
932extern void thread_deallocate(
933 thread_t thread);
55e303ae 934
cb323159
A
935#if BSD_KERNEL_PRIVATE
936/* Duplicated from osfmk/kern/ipc_tt.h */
937__options_decl(port_to_thread_options_t, uint32_t, {
938 PORT_TO_THREAD_NONE = 0x0000,
939 PORT_TO_THREAD_IN_CURRENT_TASK = 0x0001,
940 PORT_TO_THREAD_NOT_CURRENT_THREAD = 0x0002,
941});
942
943extern thread_t port_name_to_thread(
944 mach_port_name_t port_name,
945 port_to_thread_options_t options);
946#endif /* BSD_KERNEL_PRIVATE */
947
91447636 948__END_DECLS
1c79356b 949
0a7de745 950#endif /* MACH_KERNEL_PRIVATE */
1c79356b 951
0a7de745 952#ifdef KERNEL_PRIVATE
1c79356b 953
91447636 954__BEGIN_DECLS
55e303ae 955
cb323159 956extern void thread_deallocate_safe(
0a7de745 957 thread_t thread);
5ba3f43e 958
0a7de745
A
959extern uint64_t thread_dispatchqaddr(
960 thread_t thread);
5ba3f43e 961
0a7de745
A
962extern uint64_t thread_rettokern_addr(
963 thread_t thread);
b0d623f7 964
cb323159
A
965extern integer_t thread_kern_get_pri(thread_t thr) __attribute__((const));
966
967extern void thread_kern_set_pri(thread_t thr, integer_t pri);
968
969extern integer_t thread_kern_get_kernel_maxpri(void) __attribute__((const));
970
91447636 971__END_DECLS
55e303ae 972
0a7de745 973#endif /* KERNEL_PRIVATE */
55e303ae 974
fe8ab488
A
975#ifdef KERNEL
976__BEGIN_DECLS
977
0a7de745 978extern uint64_t thread_tid(thread_t thread);
fe8ab488
A
979
980__END_DECLS
981
982#endif /* KERNEL */
983
91447636 984__BEGIN_DECLS
55e303ae 985
0a7de745 986#ifdef XNU_KERNEL_PRIVATE
9bccf70c 987
4b17d6b6
A
988/*
989 * Thread tags; for easy identification.
990 */
0a7de745
A
991#define THREAD_TAG_MAINTHREAD 0x1
992#define THREAD_TAG_CALLOUT 0x2
993#define THREAD_TAG_IOWORKLOOP 0x4
4b17d6b6 994
0a7de745
A
995#define THREAD_TAG_PTHREAD 0x10
996#define THREAD_TAG_WORKQUEUE 0x20
39037602 997
0a7de745
A
998uint16_t thread_set_tag(thread_t, uint16_t);
999uint16_t thread_get_tag(thread_t);
1000uint64_t thread_last_run_time(thread_t);
4b17d6b6 1001
316670eb 1002extern kern_return_t thread_state_initialize(
0a7de745
A
1003 thread_t thread);
1004
1005extern kern_return_t thread_setstatus(
1006 thread_t thread,
1007 int flavor,
1008 thread_state_t tstate,
1009 mach_msg_type_number_t count);
1010
1011extern kern_return_t thread_setstatus_from_user(
1012 thread_t thread,
1013 int flavor,
1014 thread_state_t tstate,
1015 mach_msg_type_number_t count);
1016
1017extern kern_return_t thread_getstatus(
1018 thread_t thread,
1019 int flavor,
1020 thread_state_t tstate,
1021 mach_msg_type_number_t *count);
1022
1023extern kern_return_t thread_getstatus_to_user(
1024 thread_t thread,
1025 int flavor,
1026 thread_state_t tstate,
1027 mach_msg_type_number_t *count);
1028
1029extern kern_return_t thread_create_with_continuation(
1030 task_t task,
1031 thread_t *new_thread,
1032 thread_continue_t continuation);
3e170ce0 1033
743345f9 1034extern kern_return_t thread_create_waiting(task_t task,
0a7de745
A
1035 thread_continue_t continuation,
1036 event_t event,
1037 thread_t *new_thread);
743345f9 1038
0a7de745
A
1039extern kern_return_t thread_create_workq_waiting(
1040 task_t task,
1041 thread_continue_t thread_return,
1042 thread_t *new_thread);
39037602 1043
0a7de745
A
1044extern void thread_yield_internal(
1045 mach_msg_timeout_t interval);
2d21ac55 1046
0a7de745 1047extern void thread_yield_to_preemption(void);
39037602 1048
316670eb
A
1049/*
1050 * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
0a7de745 1051 *
316670eb
A
1052 * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
1053 * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
39236c6e 1054 * 3) Disable. Remove any existing CPU limit.
316670eb 1055 */
0a7de745
A
1056#define THREAD_CPULIMIT_BLOCK 0x1
1057#define THREAD_CPULIMIT_EXCEPTION 0x2
1058#define THREAD_CPULIMIT_DISABLE 0x3
316670eb
A
1059
1060struct _thread_ledger_indices {
1061 int cpu_time;
1062};
1063
1064extern struct _thread_ledger_indices thread_ledgers;
1065
39236c6e 1066extern int thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns);
316670eb
A
1067extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns);
1068
0a7de745
A
1069extern void thread_read_times(
1070 thread_t thread,
1071 time_value_t *user_time,
1072 time_value_t *system_time,
1073 time_value_t *runnable_time);
1c79356b 1074
0a7de745 1075extern uint64_t thread_get_runtime_self(void);
fe8ab488 1076
0a7de745
A
1077extern void thread_setuserstack(
1078 thread_t thread,
1079 mach_vm_offset_t user_stack);
1c79356b 1080
0a7de745
A
1081extern uint64_t thread_adjuserstack(
1082 thread_t thread,
1083 int adjust);
55e303ae 1084
0a7de745
A
1085extern void thread_setentrypoint(
1086 thread_t thread,
1087 mach_vm_offset_t entry);
91447636 1088
0a7de745
A
1089extern kern_return_t thread_set_tsd_base(
1090 thread_t thread,
1091 mach_vm_offset_t tsd_base);
fe8ab488 1092
0a7de745
A
1093extern kern_return_t thread_setsinglestep(
1094 thread_t thread,
1095 int on);
0c530ab8 1096
0a7de745
A
1097extern kern_return_t thread_userstack(
1098 thread_t,
1099 int,
1100 thread_state_t,
1101 unsigned int,
1102 mach_vm_offset_t *,
1103 int *,
1104 boolean_t);
6d2010ae 1105
0a7de745
A
1106extern kern_return_t thread_entrypoint(
1107 thread_t,
1108 int,
1109 thread_state_t,
1110 unsigned int,
1111 mach_vm_offset_t *);
6d2010ae 1112
0a7de745
A
1113extern kern_return_t thread_userstackdefault(
1114 mach_vm_offset_t *,
1115 boolean_t);
6d2010ae 1116
0a7de745
A
1117extern kern_return_t thread_wire_internal(
1118 host_priv_t host_priv,
1119 thread_t thread,
1120 boolean_t wired,
1121 boolean_t *prev_state);
91447636 1122
fe8ab488 1123
0a7de745 1124extern kern_return_t thread_dup(thread_t);
91447636 1125
39037602
A
1126extern kern_return_t thread_dup2(thread_t, thread_t);
1127
5ba3f43e
A
1128#if !defined(_SCHED_CALL_T_DEFINED)
1129#define _SCHED_CALL_T_DEFINED
0a7de745
A
1130typedef void (*sched_call_t)(
1131 int type,
1132 thread_t thread);
5ba3f43e 1133#endif
2d21ac55 1134
0a7de745
A
1135#define SCHED_CALL_BLOCK 0x1
1136#define SCHED_CALL_UNBLOCK 0x2
2d21ac55 1137
0a7de745
A
1138extern void thread_sched_call(
1139 thread_t thread,
1140 sched_call_t call);
2d21ac55 1141
0a7de745
A
1142extern boolean_t thread_is_static_param(
1143 thread_t thread);
fe8ab488 1144
0a7de745 1145extern task_t get_threadtask(thread_t);
0c530ab8 1146
d9a64523
A
1147/*
1148 * Thread is running within a 64-bit address space.
1149 */
0a7de745 1150#define thread_is_64bit_addr(thd) \
d9a64523
A
1151 task_has_64Bit_addr(get_threadtask(thd))
1152
1153/*
1154 * Thread is using 64-bit machine state.
1155 */
0a7de745 1156#define thread_is_64bit_data(thd) \
d9a64523 1157 task_has_64Bit_data(get_threadtask(thd))
91447636 1158
0a7de745
A
1159#if defined(__x86_64__)
1160extern int thread_task_has_ldt(thread_t);
1161#endif
1162extern void *get_bsdthread_info(thread_t);
1163extern void set_bsdthread_info(thread_t, void *);
cb323159 1164extern void set_thread_pagein_error(thread_t, int);
0a7de745
A
1165extern void *uthread_alloc(task_t, thread_t, int);
1166extern event_t workq_thread_init_and_wq_lock(task_t, thread_t); // bsd/pthread/
1167extern void uthread_cleanup_name(void *uthread);
1168extern void uthread_cleanup(task_t, void *, void *);
1169extern void uthread_zone_free(void *);
1170extern void uthread_cred_free(void *);
1171
1172extern void uthread_reset_proc_refcount(void *);
3e170ce0 1173#if PROC_REF_DEBUG
0a7de745
A
1174extern int uthread_get_proc_refcount(void *);
1175extern int proc_ref_tracking_disabled;
3e170ce0 1176#endif
91447636 1177
0a7de745
A
1178extern boolean_t thread_should_halt(
1179 thread_t thread);
91447636 1180
0a7de745
A
1181extern boolean_t thread_should_abort(
1182 thread_t);
316670eb 1183
2d21ac55
A
1184extern int is_64signalregset(void);
1185
3e170ce0 1186extern void act_set_kperf(thread_t);
5c9f4661
A
1187extern void act_set_astledger(thread_t thread);
1188extern void act_set_astledger_async(thread_t thread);
39037602 1189extern void act_set_io_telemetry_ast(thread_t);
2d21ac55
A
1190
1191extern uint32_t dtrace_get_thread_predcache(thread_t);
1192extern int64_t dtrace_get_thread_vtime(thread_t);
1193extern int64_t dtrace_get_thread_tracing(thread_t);
cb323159 1194extern uint16_t dtrace_get_thread_inprobe(thread_t);
3e170ce0 1195extern int dtrace_get_thread_last_cpu_id(thread_t);
2d21ac55
A
1196extern vm_offset_t dtrace_get_kernel_stack(thread_t);
1197extern void dtrace_set_thread_predcache(thread_t, uint32_t);
1198extern void dtrace_set_thread_vtime(thread_t, int64_t);
1199extern void dtrace_set_thread_tracing(thread_t, int64_t);
cb323159 1200extern void dtrace_set_thread_inprobe(thread_t, uint16_t);
2d21ac55 1201extern vm_offset_t dtrace_set_thread_recover(thread_t, vm_offset_t);
0a7de745 1202extern vm_offset_t dtrace_sign_and_set_thread_recover(thread_t, vm_offset_t);
b0d623f7 1203extern void dtrace_thread_bootstrap(void);
39236c6e 1204extern void dtrace_thread_didexec(thread_t);
2d21ac55
A
1205
1206extern int64_t dtrace_calc_thread_recent_vtime(thread_t);
1207
1208
0a7de745
A
1209extern kern_return_t thread_set_wq_state32(
1210 thread_t thread,
1211 thread_state_t tstate);
2d21ac55 1212
0a7de745
A
1213extern kern_return_t thread_set_wq_state64(
1214 thread_t thread,
1215 thread_state_t tstate);
2d21ac55 1216
0a7de745
A
1217extern vm_offset_t kernel_stack_mask;
1218extern vm_offset_t kernel_stack_size;
1219extern vm_offset_t kernel_stack_depth_max;
b0d623f7 1220
5ba3f43e
A
1221extern void guard_ast(thread_t);
1222extern void fd_guard_ast(thread_t,
0a7de745 1223 mach_exception_code_t, mach_exception_subcode_t);
5ba3f43e
A
1224#if CONFIG_VNGUARD
1225extern void vn_guard_ast(thread_t,
0a7de745 1226 mach_exception_code_t, mach_exception_subcode_t);
5ba3f43e
A
1227#endif
1228extern void mach_port_guard_ast(thread_t,
0a7de745 1229 mach_exception_code_t, mach_exception_subcode_t);
d9a64523 1230extern void virt_memory_guard_ast(thread_t,
0a7de745 1231 mach_exception_code_t, mach_exception_subcode_t);
5ba3f43e 1232extern void thread_guard_violation(thread_t,
cb323159 1233 mach_exception_code_t, mach_exception_subcode_t, boolean_t);
5ba3f43e 1234extern void thread_update_io_stats(thread_t, int size, int io_flags);
fe8ab488 1235
0a7de745 1236extern kern_return_t thread_set_voucher_name(mach_port_name_t name);
fe8ab488 1237extern kern_return_t thread_get_current_voucher_origin_pid(int32_t *pid);
39236c6e 1238
ecc0ceb4
A
1239extern void set_thread_rwlock_boost(void);
1240extern void clear_thread_rwlock_boost(void);
1241
490019cf
A
1242extern void thread_enable_send_importance(thread_t thread, boolean_t enable);
1243
d9a64523
A
1244/*
1245 * Translate signal context data pointer to userspace representation
1246 */
1247
0a7de745
A
1248extern kern_return_t machine_thread_siguctx_pointer_convert_to_user(
1249 thread_t thread,
1250 user_addr_t *uctxp);
1251
1252extern void machine_tecs(thread_t thr);
1253
1254typedef enum cpuvn {
1255 CPUVN_CI = 1
1256} cpuvn_e;
1257
1258extern int machine_csv(cpuvn_e cve);
d9a64523
A
1259
1260/*
1261 * Translate array of function pointer syscall arguments from userspace representation
1262 */
1263
0a7de745
A
1264extern kern_return_t machine_thread_function_pointers_convert_from_user(
1265 thread_t thread,
1266 user_addr_t *fptrs,
1267 uint32_t count);
d9a64523 1268
39037602
A
1269/* Get a backtrace for a threads kernel or user stack (user_p), with pc and optionally
1270 * frame pointer (getfp). Returns bytes added to buffer, and kThreadTruncatedBT in
1271 * thread_trace_flags if a user page is not present after kdp_lightweight_fault() is
1272 * called.
1273 */
1274
0a7de745
A
1275extern int machine_trace_thread(
1276 thread_t thread,
1277 char *tracepos,
1278 char *tracebound,
1279 int nframes,
1280 boolean_t user_p,
1281 boolean_t getfp,
1282 uint32_t *thread_trace_flags);
1283
1284extern int machine_trace_thread64(thread_t thread,
1285 char *tracepos,
1286 char *tracebound,
1287 int nframes,
1288 boolean_t user_p,
1289 boolean_t getfp,
1290 uint32_t *thread_trace_flags,
1291 uint64_t *sp);
d9a64523
A
1292
1293/*
1294 * Get the duration of the given thread's last wait.
1295 */
1296uint64_t thread_get_last_wait_duration(thread_t thread);
39037602 1297
0a7de745
A
1298extern void thread_set_no_smt(bool set);
1299extern bool thread_get_no_smt(void);
1300
1301#endif /* XNU_KERNEL_PRIVATE */
91447636 1302
cb323159
A
1303/*! @function thread_has_thread_name
1304 * @abstract Checks if a thread has a name.
1305 * @discussion This function takes one input, a thread, and returns a boolean value indicating if that thread already has a name associated with it.
1306 * @param th The thread to inspect.
1307 * @result TRUE if the thread has a name, FALSE otherwise.
1308 */
1309extern boolean_t thread_has_thread_name(thread_t th);
1310
1311/*! @function thread_set_thread_name
1312 * @abstract Set a thread's name.
1313 * @discussion This function takes two input parameters: a thread to name, and the name to apply to the thread. The name will be copied over to the thread in order to better identify the thread. If the name is longer than MAXTHREADNAMESIZE - 1, it will be truncated.
1314 * @param th The thread to be named.
1315 * @param name The name to apply to the thread.
1316 */
1317extern void thread_set_thread_name(thread_t th, const char* name);
fe8ab488 1318
b0d623f7 1319/*! @function kernel_thread_start
0a7de745
A
1320 * @abstract Create a kernel thread.
1321 * @discussion This function takes three input parameters, namely reference to the function that the thread should execute, caller specified data and a reference which is used to return the newly created kernel thread. The function returns KERN_SUCCESS on success or an appropriate kernel code type indicating the error. It may be noted that the caller is responsible for explicitly releasing the reference to the created thread when no longer needed. This should be done by calling thread_deallocate(new_thread).
1322 * @param continuation A C-function pointer where the thread will begin execution.
1323 * @param parameter Caller specified data to be passed to the new thread.
1324 * @param new_thread Reference to the new thread is returned in this parameter.
1325 * @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
1326 */
1327
1328extern kern_return_t kernel_thread_start(
1329 thread_continue_t continuation,
1330 void *parameter,
1331 thread_t *new_thread);
5ba3f43e 1332
6d2010ae
A
1333#ifdef KERNEL_PRIVATE
1334void thread_set_eager_preempt(thread_t thread);
1335void thread_clear_eager_preempt(thread_t thread);
a39ff7e2
A
1336void thread_set_honor_qlimit(thread_t thread);
1337void thread_clear_honor_qlimit(thread_t thread);
316670eb 1338extern ipc_port_t convert_thread_to_port(thread_t);
813fb2f6 1339extern ipc_port_t convert_thread_inspect_to_port(thread_inspect_t);
39037602 1340extern boolean_t is_vm_privileged(void);
fe8ab488 1341extern boolean_t set_vm_privilege(boolean_t);
5ba3f43e 1342extern kern_allocation_name_t thread_set_allocation_name(kern_allocation_name_t new_name);
cb323159
A
1343extern void *thread_iokit_tls_get(uint32_t index);
1344extern void thread_iokit_tls_set(uint32_t index, void * data);
6d2010ae 1345#endif /* KERNEL_PRIVATE */
55e303ae 1346
91447636 1347__END_DECLS
55e303ae 1348
0a7de745 1349#endif /* _KERN_THREAD_H_ */