]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.h
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / thread.h
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: thread.h
60 * Author: Avadis Tevanian, Jr.
61 *
62 * This file contains the structure definitions for threads.
63 *
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84 #ifndef _KERN_THREAD_H_
85 #define _KERN_THREAD_H_
86
87 #include <mach/kern_return.h>
88 #include <mach/mach_types.h>
89 #include <mach/message.h>
90 #include <mach/boolean.h>
91 #include <mach/vm_param.h>
92 #include <mach/thread_info.h>
93 #include <mach/thread_status.h>
94 #include <mach/exception_types.h>
95
96 #include <kern/kern_types.h>
97 #include <vm/vm_kern.h>
98
99 #include <sys/cdefs.h>
100
101 #ifdef XNU_KERNEL_PRIVATE
102 /* Thread tags; for easy identification. */
103 __options_closed_decl(thread_tag_t, uint16_t, {
104 THREAD_TAG_MAINTHREAD = 0x01,
105 THREAD_TAG_CALLOUT = 0x02,
106 THREAD_TAG_IOWORKLOOP = 0x04,
107 THREAD_TAG_PTHREAD = 0x10,
108 THREAD_TAG_WORKQUEUE = 0x20,
109 });
110 #endif /* XNU_KERNEL_PRIVATE */
111
112 #ifdef MACH_KERNEL_PRIVATE
113
114 #include <mach_assert.h>
115 #include <mach_ldebug.h>
116
117 #include <ipc/ipc_types.h>
118
119 #include <mach/port.h>
120 #include <kern/cpu_number.h>
121 #include <kern/smp.h>
122 #include <kern/queue.h>
123
124 #include <kern/timer.h>
125 #include <kern/simple_lock.h>
126 #include <kern/locks.h>
127 #include <kern/sched.h>
128 #include <kern/sched_prim.h>
129 #include <mach/sfi_class.h>
130 #include <kern/thread_call.h>
131 #include <kern/thread_group.h>
132 #include <kern/timer_call.h>
133 #include <kern/task.h>
134 #include <kern/exception.h>
135 #include <kern/affinity.h>
136 #include <kern/debug.h>
137 #include <kern/block_hint.h>
138 #include <kern/turnstile.h>
139 #include <kern/mpsc_queue.h>
140
141 #include <kern/waitq.h>
142 #include <san/kasan.h>
143 #include <os/refcnt.h>
144
145 #include <ipc/ipc_kmsg.h>
146
147 #include <machine/atomic.h>
148 #include <machine/cpu_data.h>
149 #include <machine/thread.h>
150
151 #ifdef XNU_KERNEL_PRIVATE
152 /* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */
153 #include <kern/priority_queue.h>
154 #endif /* XNU_KERNEL_PRIVATE */
155
156 #if MONOTONIC
157 #include <stdatomic.h>
158 #include <machine/monotonic.h>
159 #endif /* MONOTONIC */
160
161 #if CONFIG_TASKWATCH
162 /* Taskwatch related. TODO: find this a better home */
163 typedef struct task_watcher task_watch_t;
164 #endif /* CONFIG_TASKWATCH */
165
166 __options_decl(thread_work_interval_flags_t, uint32_t, {
167 TH_WORK_INTERVAL_FLAGS_NONE = 0x0,
168 #if CONFIG_SCHED_AUTO_JOIN
169 /* Flags to indicate status about work interval thread is currently part of */
170 TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK = 0x1,
171 #endif /* CONFIG_SCHED_AUTO_JOIN */
172 });
173
174 struct thread {
175 #if MACH_ASSERT
176 #define THREAD_MAGIC 0x1234ABCDDCBA4321ULL
177 /* Ensure nothing uses &thread as a queue entry */
178 uint64_t thread_magic;
179 #endif /* MACH_ASSERT */
180
181 /*
182 * NOTE: The runq field in the thread structure has an unusual
183 * locking protocol. If its value is PROCESSOR_NULL, then it is
184 * locked by the thread_lock, but if its value is something else
185 * then it is locked by the associated run queue lock. It is
186 * set to PROCESSOR_NULL without holding the thread lock, but the
187 * transition from PROCESSOR_NULL to non-null must be done
188 * under the thread lock and the run queue lock.
189 *
190 * New waitq APIs allow the 'links' and 'runq' fields to be
191 * anywhere in the thread structure.
192 */
193 union {
194 queue_chain_t runq_links; /* run queue links */
195 queue_chain_t wait_links; /* wait queue links */
196 struct mpsc_queue_chain mpsc_links; /* thread daemon mpsc links */
197 struct priority_queue_entry_sched wait_prioq_links; /* priority ordered waitq links */
198 };
199
200 event64_t wait_event; /* wait queue event */
201 processor_t runq; /* run queue assignment */
202 struct waitq *waitq; /* wait queue this thread is enqueued on */
203 struct turnstile *turnstile; /* thread's turnstile, protected by primitives interlock */
204 void *inheritor; /* inheritor of the primitive the thread will block on */
205 struct priority_queue_sched_max sched_inheritor_queue; /* Inheritor queue for kernel promotion */
206 struct priority_queue_sched_max base_inheritor_queue; /* Inheritor queue for user promotion */
207
208 #if CONFIG_SCHED_EDGE
209 boolean_t th_bound_cluster_enqueued;
210 #endif /* CONFIG_SCHED_EDGE */
211
212 #if CONFIG_SCHED_CLUTCH
213 /*
214 * In the clutch scheduler, the threads are maintained in runqs at the clutch_bucket
215 * level (clutch_bucket defines a unique thread group and scheduling bucket pair). The
216 * thread is linked via a couple of linkages in the clutch bucket:
217 *
218 * - A stable priority queue linkage which is the main runqueue (based on sched_pri) for the clutch bucket
219 * - A regular priority queue linkage which is based on thread's base/promoted pri (used for clutch bucket priority calculation)
220 * - A queue linkage used for timesharing operations of threads at the scheduler tick
221 */
222 struct priority_queue_entry_stable th_clutch_runq_link;
223 struct priority_queue_entry_sched th_clutch_pri_link;
224 queue_chain_t th_clutch_timeshare_link;
225 #endif /* CONFIG_SCHED_CLUTCH */
226
227 /* Data updated during assert_wait/thread_wakeup */
228 decl_simple_lock_data(, sched_lock); /* scheduling lock (thread_lock()) */
229 decl_simple_lock_data(, wake_lock); /* for thread stop / wait (wake_lock()) */
230 uint16_t options; /* options set by thread itself */
231 #define TH_OPT_INTMASK 0x0003 /* interrupt / abort level */
232 #define TH_OPT_VMPRIV 0x0004 /* may allocate reserved memory */
233 #define TH_OPT_SYSTEM_CRITICAL 0x0010 /* Thread must always be allowed to run - even under heavy load */
234 #define TH_OPT_PROC_CPULIMIT 0x0020 /* Thread has a task-wide CPU limit applied to it */
235 #define TH_OPT_PRVT_CPULIMIT 0x0040 /* Thread has a thread-private CPU limit applied to it */
236 #define TH_OPT_IDLE_THREAD 0x0080 /* Thread is a per-processor idle thread */
237 #define TH_OPT_GLOBAL_FORCED_IDLE 0x0100 /* Thread performs forced idle for thermal control */
238 #define TH_OPT_SCHED_VM_GROUP 0x0200 /* Thread belongs to special scheduler VM group */
239 #define TH_OPT_HONOR_QLIMIT 0x0400 /* Thread will honor qlimit while sending mach_msg, regardless of MACH_SEND_ALWAYS */
240 #define TH_OPT_SEND_IMPORTANCE 0x0800 /* Thread will allow importance donation from kernel rpc */
241 #define TH_OPT_ZONE_PRIV 0x1000 /* Thread may use the zone replenish reserve */
242 #define TH_OPT_IPC_TG_BLOCKED 0x2000 /* Thread blocked in sync IPC and has made the thread group blocked callout */
243
244 bool wake_active; /* wake event on stop */
245 bool at_safe_point; /* thread_abort_safely allowed */
246 uint8_t sched_saved_run_weight;
247 ast_t reason; /* why we blocked */
248 uint32_t quantum_remaining;
249 wait_result_t wait_result; /* outcome of wait -
250 * may be examined by this thread
251 * WITHOUT locking */
252 thread_continue_t continuation; /* continue here next dispatch */
253 void *parameter; /* continuation parameter */
254
255 /* Data updated/used in thread_invoke */
256 vm_offset_t kernel_stack; /* current kernel stack */
257 vm_offset_t reserved_stack; /* reserved kernel stack */
258
259 /*** Machine-dependent state ***/
260 struct machine_thread machine;
261
262 #if KASAN
263 struct kasan_thread_data kasan_data;
264 #endif
265 #if CONFIG_KSANCOV
266 void *ksancov_data;
267 #endif
268
269 /* Thread state: */
270 int state;
271 /*
272 * Thread states [bits or'ed]
273 */
274 #define TH_WAIT 0x01 /* queued for waiting */
275 #define TH_SUSP 0x02 /* stopped or requested to stop */
276 #define TH_RUN 0x04 /* running or on runq */
277 #define TH_UNINT 0x08 /* waiting uninteruptibly */
278 #define TH_TERMINATE 0x10 /* halted at termination */
279 #define TH_TERMINATE2 0x20 /* added to termination queue */
280 #define TH_WAIT_REPORT 0x40 /* the wait is using the sched_call,
281 * only set if TH_WAIT is also set */
282 #define TH_IDLE 0x80 /* idling processor */
283
284 /* Scheduling information */
285 sched_mode_t sched_mode; /* scheduling mode */
286 sched_mode_t saved_mode; /* saved mode during forced mode demotion */
287
288 /* This thread's contribution to global sched counters */
289 sched_bucket_t th_sched_bucket;
290
291 sfi_class_id_t sfi_class; /* SFI class (XXX Updated on CSW/QE/AST) */
292 sfi_class_id_t sfi_wait_class; /* Currently in SFI wait for this class, protected by sfi_lock */
293
294 uint32_t sched_flags; /* current flag bits */
295 #define TH_SFLAG_NO_SMT 0x0001 /* On an SMT CPU, this thread must be scheduled alone */
296 #define TH_SFLAG_FAILSAFE 0x0002 /* fail-safe has tripped */
297 #define TH_SFLAG_THROTTLED 0x0004 /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */
298 #define TH_SFLAG_DEMOTED_MASK (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE) /* saved_mode contains previous sched_mode */
299
300 #define TH_SFLAG_PROMOTED 0x0008 /* sched pri has been promoted by kernel mutex priority promotion */
301 #define TH_SFLAG_ABORT 0x0010 /* abort interruptible waits */
302 #define TH_SFLAG_ABORTSAFELY 0x0020 /* ... but only those at safe point */
303 #define TH_SFLAG_ABORTED_MASK (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
304 #define TH_SFLAG_DEPRESS 0x0040 /* normal depress yield */
305 #define TH_SFLAG_POLLDEPRESS 0x0080 /* polled depress yield */
306 #define TH_SFLAG_DEPRESSED_MASK (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
307 /* unused TH_SFLAG_PRI_UPDATE 0x0100 */
308 #define TH_SFLAG_EAGERPREEMPT 0x0200 /* Any preemption of this thread should be treated as if AST_URGENT applied */
309 #define TH_SFLAG_RW_PROMOTED 0x0400 /* promote reason: blocking with RW lock held */
310 #define TH_SFLAG_BASE_PRI_FROZEN 0x0800 /* (effective) base_pri is frozen */
311 #define TH_SFLAG_WAITQ_PROMOTED 0x1000 /* promote reason: waitq wakeup (generally for IPC receive) */
312
313 #if __AMP__
314 #define TH_SFLAG_ECORE_ONLY 0x2000 /* Bind thread to E core processor set */
315 #define TH_SFLAG_PCORE_ONLY 0x4000 /* Bind thread to P core processor set */
316 #endif
317
318 #define TH_SFLAG_EXEC_PROMOTED 0x8000 /* promote reason: thread is in an exec */
319
320 #define TH_SFLAG_THREAD_GROUP_AUTO_JOIN 0x10000 /* thread has been auto-joined to thread group */
321 #if __AMP__
322 #define TH_SFLAG_BOUND_SOFT 0x20000 /* thread is soft bound to a cluster; can run anywhere if bound cluster unavailable */
323 #endif /* __AMP__ */
324 /* 'promote reasons' that request a priority floor only, not a custom priority */
325 #define TH_SFLAG_PROMOTE_REASON_MASK (TH_SFLAG_RW_PROMOTED | TH_SFLAG_WAITQ_PROMOTED | TH_SFLAG_EXEC_PROMOTED)
326
327 #define TH_SFLAG_RW_PROMOTED_BIT (10) /* 0x400 */
328
329 int16_t sched_pri; /* scheduled (current) priority */
330 int16_t base_pri; /* effective base priority (equal to req_base_pri unless TH_SFLAG_BASE_PRI_FROZEN) */
331 int16_t req_base_pri; /* requested base priority */
332 int16_t max_priority; /* copy of max base priority */
333 int16_t task_priority; /* copy of task base priority */
334 int16_t promotion_priority; /* priority thread is currently promoted to */
335
336 int iotier_override; /* atomic operations to set, cleared on ret to user */
337 os_refcnt_t ref_count; /* number of references to me */
338
339 lck_mtx_t* waiting_for_mutex; /* points to mutex we're waiting for until we acquire it */
340
341 uint32_t rwlock_count; /* Number of lck_rw_t locks held by thread */
342 uint32_t t_temp_alloc_count; /* In flight temporary allocations */
343 #if DEBUG || DEVELOPMENT
344 queue_head_t t_temp_alloc_list;
345 #endif /* DEBUG || DEVELOPMENT */
346
347 integer_t importance; /* task-relative importance */
348
349 /* Priority depression expiration */
350 integer_t depress_timer_active;
351 timer_call_data_t depress_timer;
352
353 /* real-time parameters */
354 struct { /* see mach/thread_policy.h */
355 uint32_t period;
356 uint32_t computation;
357 uint32_t constraint;
358 boolean_t preemptible;
359 uint64_t deadline;
360 } realtime;
361
362 uint64_t last_run_time; /* time when thread was switched away from */
363 uint64_t last_made_runnable_time; /* time when thread was unblocked or preempted */
364 uint64_t last_basepri_change_time; /* time when thread was last changed in basepri while runnable */
365 uint64_t same_pri_latency;
366 #define THREAD_NOT_RUNNABLE (~0ULL)
367
368 #if CONFIG_THREAD_GROUPS
369 struct thread_group *thread_group;
370 #endif
371
372 #if defined(CONFIG_SCHED_MULTIQ)
373 sched_group_t sched_group;
374 #endif /* defined(CONFIG_SCHED_MULTIQ) */
375
376 /* Data used during setrun/dispatch */
377 timer_data_t system_timer; /* system mode timer */
378 processor_t bound_processor; /* bound to a processor? */
379 processor_t last_processor; /* processor last dispatched on */
380 processor_t chosen_processor; /* Where we want to run this thread */
381
382 /* Fail-safe computation since last unblock or qualifying yield */
383 uint64_t computation_metered;
384 uint64_t computation_epoch;
385 uint64_t safe_release; /* when to release fail-safe */
386
387 /* Call out from scheduler */
388 void (*sched_call)(int type, thread_t thread);
389
390 #if defined(CONFIG_SCHED_PROTO)
391 uint32_t runqueue_generation; /* last time runqueue was drained */
392 #endif
393
394 /* Statistics and timesharing calculations */
395 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
396 natural_t sched_stamp; /* last scheduler tick */
397 natural_t sched_usage; /* timesharing cpu usage [sched] */
398 natural_t pri_shift; /* usage -> priority from pset */
399 natural_t cpu_usage; /* instrumented cpu usage [%cpu] */
400 natural_t cpu_delta; /* accumulated cpu_usage delta */
401 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
402
403 uint32_t c_switch; /* total context switches */
404 uint32_t p_switch; /* total processor switches */
405 uint32_t ps_switch; /* total pset switches */
406
407 integer_t mutex_count; /* total count of locks held */
408 /* Timing data structures */
409 int precise_user_kernel_time; /* precise user/kernel enabled for this thread */
410 timer_data_t user_timer; /* user mode timer */
411 uint64_t user_timer_save; /* saved user timer value */
412 uint64_t system_timer_save; /* saved system timer value */
413 uint64_t vtimer_user_save; /* saved values for vtimers */
414 uint64_t vtimer_prof_save;
415 uint64_t vtimer_rlim_save;
416 uint64_t vtimer_qos_save;
417
418 timer_data_t ptime; /* time executing in P mode */
419 timer_data_t runnable_timer; /* time the thread is runnable (including running) */
420
421 #if CONFIG_SCHED_SFI
422 /* Timing for wait state */
423 uint64_t wait_sfi_begin_time; /* start time for thread waiting in SFI */
424 #endif
425
426 /*
427 * Processor/cache affinity
428 * - affinity_threads links task threads with the same affinity set
429 */
430 queue_chain_t affinity_threads;
431 affinity_set_t affinity_set;
432
433 #if CONFIG_TASKWATCH
434 task_watch_t *taskwatch; /* task watch */
435 #endif /* CONFIG_TASKWATCH */
436
437 /* Various bits of state to stash across a continuation, exclusive to the current thread block point */
438 union {
439 struct {
440 mach_msg_return_t state; /* receive state */
441 mach_port_seqno_t seqno; /* seqno of recvd message */
442 ipc_object_t object; /* object received on */
443 vm_address_t msg_addr; /* receive buffer pointer */
444 mach_msg_size_t rsize; /* max size for recvd msg */
445 mach_msg_size_t msize; /* actual size for recvd msg */
446 mach_msg_option_t option; /* options for receive */
447 mach_port_name_t receiver_name; /* the receive port name */
448 struct knote *knote; /* knote fired for rcv */
449 union {
450 struct ipc_kmsg *kmsg; /* received message */
451 struct ipc_mqueue *peekq; /* mqueue to peek at */
452 struct {
453 uint32_t ppri; /* received message pthread_priority_t */
454 mach_msg_qos_t oqos; /* override qos for message */
455 } received_qos;
456 };
457 mach_msg_continue_t continuation;
458 } receive;
459 struct {
460 struct semaphore *waitsemaphore; /* semaphore ref */
461 struct semaphore *signalsemaphore; /* semaphore ref */
462 int options; /* semaphore options */
463 kern_return_t result; /* primary result */
464 mach_msg_continue_t continuation;
465 } sema;
466 struct {
467 #define THREAD_SAVE_IOKIT_TLS_COUNT 8
468 void *tls[THREAD_SAVE_IOKIT_TLS_COUNT];
469 } iokit;
470 } saved;
471
472 /* Only user threads can cause guard exceptions, only kernel threads can be thread call threads */
473 union {
474 /* Thread call thread's state structure, stored on its stack */
475 struct thread_call_thread_state *thc_state;
476
477 /* Structure to save information about guard exception */
478 struct {
479 mach_exception_code_t code;
480 mach_exception_subcode_t subcode;
481 } guard_exc_info;
482 };
483
484 /* Kernel holds on this thread */
485 int16_t suspend_count;
486 /* User level suspensions */
487 int16_t user_stop_count;
488
489 /* IPC data structures */
490 #if IMPORTANCE_INHERITANCE
491 natural_t ith_assertions; /* assertions pending drop */
492 #endif
493 struct ipc_kmsg_queue ith_messages; /* messages to reap */
494 mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */
495
496 /* Pending thread ast(s) */
497 ast_t ast;
498
499 /* Ast/Halt data structures */
500 vm_offset_t recover; /* page fault recover(copyin/out) */
501
502 queue_chain_t threads; /* global list of all threads */
503
504 /* Activation */
505 queue_chain_t task_threads;
506
507 /* Task membership */
508 struct task *task;
509 vm_map_t map;
510 thread_t handoff_thread;
511 #if DEVELOPMENT || DEBUG
512 bool pmap_footprint_suspended;
513 #endif /* DEVELOPMENT || DEBUG */
514
515 /* Timed wait expiration */
516 timer_call_data_t wait_timer;
517 uint16_t wait_timer_active;
518 bool wait_timer_is_set;
519
520 /* Miscellaneous bits guarded by mutex */
521 uint32_t
522 active:1, /* Thread is active and has not been terminated */
523 ipc_active:1, /* IPC with the thread ports is allowed */
524 started:1, /* Thread has been started after creation */
525 static_param:1, /* Disallow policy parameter changes */
526 inspection:1, /* TRUE when task is being inspected by crash reporter */
527 policy_reset:1, /* Disallow policy parameter changes on terminating threads */
528 suspend_parked:1, /* thread parked in thread_suspended */
529 corpse_dup:1, /* TRUE when thread is an inactive duplicate in a corpse */
530 :0;
531
532 decl_lck_mtx_data(, mutex);
533
534 /*
535 * Different flavors of thread port.
536 * These flavors THREAD_FLAVOR_* are defined in mach_types.h
537 */
538 struct ipc_port *ith_thread_ports[THREAD_SELF_PORT_COUNT]; /* does not hold right */
539 struct ipc_port *ith_settable_self; /* a send right */
540 struct ipc_port *ith_self; /* immovable/pinned thread port */
541 struct ipc_port *ith_special_reply_port; /* ref to special reply port */
542 struct exception_action *exc_actions;
543
544 #ifdef MACH_BSD
545 void *uthread;
546 #endif
547
548 #if CONFIG_DTRACE
549 uint16_t t_dtrace_flags; /* DTrace thread states */
550 #define TH_DTRACE_EXECSUCCESS 0x01
551 uint16_t t_dtrace_inprobe; /* Executing under dtrace_probe */
552 uint32_t t_dtrace_predcache; /* DTrace per thread predicate value hint */
553 int64_t t_dtrace_tracing; /* Thread time under dtrace_probe() */
554 int64_t t_dtrace_vtime;
555 #endif
556
557 clock_sec_t t_page_creation_time;
558 uint32_t t_page_creation_count;
559 uint32_t t_page_creation_throttled;
560 #if (DEVELOPMENT || DEBUG)
561 uint64_t t_page_creation_throttled_hard;
562 uint64_t t_page_creation_throttled_soft;
563 #endif /* DEVELOPMENT || DEBUG */
564 int t_pagein_error; /* for vm_fault(), holds error from vnop_pagein() */
565
566 #ifdef KPERF
567 /* The high 8 bits are the number of frames to sample of a user callstack. */
568 #define T_KPERF_CALLSTACK_DEPTH_OFFSET (24)
569 #define T_KPERF_SET_CALLSTACK_DEPTH(DEPTH) (((uint32_t)(DEPTH)) << T_KPERF_CALLSTACK_DEPTH_OFFSET)
570 #define T_KPERF_GET_CALLSTACK_DEPTH(FLAGS) ((FLAGS) >> T_KPERF_CALLSTACK_DEPTH_OFFSET)
571 #define T_KPERF_ACTIONID_OFFSET (18)
572 #define T_KPERF_SET_ACTIONID(AID) (((uint32_t)(AID)) << T_KPERF_ACTIONID_OFFSET)
573 #define T_KPERF_GET_ACTIONID(FLAGS) ((FLAGS) >> T_KPERF_ACTIONID_OFFSET)
574 #endif
575
576 #define T_KPERF_AST_CALLSTACK 0x1 /* dump a callstack on thread's next AST */
577 #define T_KPERF_AST_DISPATCH 0x2 /* dump a name on thread's next AST */
578 #define T_KPC_ALLOC 0x4 /* thread needs a kpc_buf allocated */
579
580 #define T_KPERF_AST_ALL \
581 (T_KPERF_AST_CALLSTACK | T_KPERF_AST_DISPATCH | T_KPC_ALLOC)
582 /* only go up to T_KPERF_ACTIONID_OFFSET - 1 */
583
584 #ifdef KPERF
585 uint32_t kperf_ast;
586 uint32_t kperf_pet_gen; /* last generation of PET that sampled this thread*/
587 uint32_t kperf_c_switch; /* last dispatch detection */
588 uint32_t kperf_pet_cnt; /* how many times a thread has been sampled by PET */
589 #endif
590
591 #ifdef KPC
592 /* accumulated performance counters for this thread */
593 uint64_t *kpc_buf;
594 #endif
595
596 #if HYPERVISOR
597 /* hypervisor virtual CPU object associated with this thread */
598 void *hv_thread_target;
599 #endif /* HYPERVISOR */
600
601 /* Statistics accumulated per-thread and aggregated per-task */
602 uint32_t syscalls_unix;
603 uint32_t syscalls_mach;
604 ledger_t t_ledger;
605 ledger_t t_threadledger; /* per thread ledger */
606 ledger_t t_bankledger; /* ledger to charge someone */
607 uint64_t t_deduct_bank_ledger_time; /* cpu time to be deducted from bank ledger */
608 uint64_t t_deduct_bank_ledger_energy; /* energy to be deducted from bank ledger */
609
610 uint64_t thread_id; /*system wide unique thread-id*/
611
612 #if MONOTONIC
613 struct mt_thread t_monotonic;
614 #endif /* MONOTONIC */
615
616 /* policy is protected by the thread mutex */
617 struct thread_requested_policy requested_policy;
618 struct thread_effective_policy effective_policy;
619
620 /* usynch override is protected by the task lock, eventually will be thread mutex */
621 struct thread_qos_override {
622 struct thread_qos_override *override_next;
623 uint32_t override_contended_resource_count;
624 int16_t override_qos;
625 int16_t override_resource_type;
626 user_addr_t override_resource;
627 } *overrides;
628
629 uint32_t kevent_overrides;
630 uint8_t user_promotion_basepri;
631 uint8_t kern_promotion_schedpri;
632 _Atomic uint16_t kevent_ast_bits;
633
634 io_stat_info_t thread_io_stats; /* per-thread I/O statistics */
635
636 uint32_t thread_callout_interrupt_wakeups;
637 uint32_t thread_callout_platform_idle_wakeups;
638 uint32_t thread_timer_wakeups_bin_1;
639 uint32_t thread_timer_wakeups_bin_2;
640 thread_tag_t thread_tag;
641
642 /*
643 * callout_* fields are only set for thread call threads whereas guard_exc_fatal is set
644 * by user threads on themselves while taking a guard exception. So it's okay for them to
645 * share this bitfield.
646 */
647 uint16_t
648 callout_woken_from_icontext:1,
649 callout_woken_from_platform_idle:1,
650 callout_woke_thread:1,
651 guard_exc_fatal:1,
652 thread_bitfield_unused:12;
653
654 mach_port_name_t ith_voucher_name;
655 ipc_voucher_t ith_voucher;
656 #if CONFIG_IOSCHED
657 void *decmp_upl;
658 #endif /* CONFIG_IOSCHED */
659
660 /* work interval (if any) associated with the thread. Uses thread mutex */
661 struct work_interval *th_work_interval;
662 thread_work_interval_flags_t th_work_interval_flags;
663
664 #if SCHED_TRACE_THREAD_WAKEUPS
665 uintptr_t thread_wakeup_bt[64];
666 #endif
667 turnstile_update_flags_t inheritor_flags; /* inheritor flags for inheritor field */
668 block_hint_t pending_block_hint;
669 block_hint_t block_hint; /* What type of primitive last caused us to block. */
670 integer_t decompressions; /* Per-thread decompressions counter to be added to per-task decompressions counter */
671 int thread_region_page_shift; /* Page shift that this thread would like to use when */
672 /* introspecting a task. This is currently being used */
673 /* by footprint which uses a thread for each task being inspected. */
674 };
675
676 #define ith_state saved.receive.state
677 #define ith_object saved.receive.object
678 #define ith_msg_addr saved.receive.msg_addr
679 #define ith_rsize saved.receive.rsize
680 #define ith_msize saved.receive.msize
681 #define ith_option saved.receive.option
682 #define ith_receiver_name saved.receive.receiver_name
683 #define ith_continuation saved.receive.continuation
684 #define ith_kmsg saved.receive.kmsg
685 #define ith_peekq saved.receive.peekq
686 #define ith_knote saved.receive.knote
687 #define ith_ppriority saved.receive.received_qos.ppri
688 #define ith_qos_override saved.receive.received_qos.oqos
689 #define ith_seqno saved.receive.seqno
690
691 #define sth_waitsemaphore saved.sema.waitsemaphore
692 #define sth_signalsemaphore saved.sema.signalsemaphore
693 #define sth_options saved.sema.options
694 #define sth_result saved.sema.result
695 #define sth_continuation saved.sema.continuation
696
697 #define ITH_KNOTE_NULL ((void *)NULL)
698 #define ITH_KNOTE_PSEUDO ((void *)0xdeadbeef)
699 /*
700 * The ith_knote is used during message delivery, and can safely be interpreted
701 * only when used for one of these codepaths, which the test for the msgt_name
702 * being RECEIVE or SEND_ONCE is about.
703 */
704 #define ITH_KNOTE_VALID(kn, msgt_name) \
705 (((kn) != ITH_KNOTE_NULL && (kn) != ITH_KNOTE_PSEUDO) && \
706 ((msgt_name) == MACH_MSG_TYPE_PORT_RECEIVE || \
707 (msgt_name) == MACH_MSG_TYPE_PORT_SEND_ONCE))
708
709 #if MACH_ASSERT
710 #define assert_thread_magic(thread) assertf((thread)->thread_magic == THREAD_MAGIC, \
711 "bad thread magic 0x%llx for thread %p, expected 0x%llx", \
712 (thread)->thread_magic, (thread), THREAD_MAGIC)
713 #else
714 #define assert_thread_magic(thread) do { (void)(thread); } while (0)
715 #endif
716
717 extern thread_t thread_bootstrap(void);
718
719 extern void thread_machine_init_template(void);
720
721 extern void thread_init(void);
722
723 extern void thread_daemon_init(void);
724
725 #define thread_reference_internal(thread) \
726 os_ref_retain(&(thread)->ref_count);
727
728 #define thread_reference(thread) \
729 MACRO_BEGIN \
730 if ((thread) != THREAD_NULL) \
731 thread_reference_internal(thread); \
732 MACRO_END
733
734 extern void thread_deallocate(
735 thread_t thread);
736
737 extern void thread_inspect_deallocate(
738 thread_inspect_t thread);
739
740 extern void thread_read_deallocate(
741 thread_read_t thread);
742
743 extern void thread_terminate_self(void);
744
745 __options_decl(thread_terminate_options_t, uint32_t, {
746 TH_TERMINATE_OPTION_NONE,
747 TH_TERMINATE_OPTION_UNPIN
748 });
749
750 extern kern_return_t thread_terminate_internal(
751 thread_t thread,
752 thread_terminate_options_t options);
753
754 extern void thread_start(
755 thread_t thread) __attribute__ ((noinline));
756
757 extern void thread_start_in_assert_wait(
758 thread_t thread,
759 event_t event,
760 wait_interrupt_t interruptible) __attribute__ ((noinline));
761
762 extern void thread_terminate_enqueue(
763 thread_t thread);
764
765 extern void thread_exception_enqueue(
766 task_t task,
767 thread_t thread,
768 exception_type_t etype);
769
770 extern void thread_copy_resource_info(
771 thread_t dst_thread,
772 thread_t src_thread);
773
774 extern void thread_terminate_crashed_threads(void);
775
776 extern void thread_stack_enqueue(
777 thread_t thread);
778
779 extern void thread_hold(
780 thread_t thread);
781
782 extern void thread_release(
783 thread_t thread);
784
785 extern void thread_corpse_continue(void) __dead2;
786
787 extern boolean_t thread_is_active(thread_t thread);
788
789 extern lck_grp_t thread_lck_grp;
790
791 /* Locking for scheduler state, always acquired with interrupts disabled (splsched()) */
792 #define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, 0)
793 #define thread_lock(th) simple_lock(&(th)->sched_lock, &thread_lck_grp)
794 #define thread_unlock(th) simple_unlock(&(th)->sched_lock)
795
796 #define wake_lock_init(th) simple_lock_init(&(th)->wake_lock, 0)
797 #define wake_lock(th) simple_lock(&(th)->wake_lock, &thread_lck_grp)
798 #define wake_unlock(th) simple_unlock(&(th)->wake_lock)
799
800 #define thread_should_halt_fast(thread) (!(thread)->active)
801
802 extern void stack_alloc(
803 thread_t thread);
804
805 extern void stack_handoff(
806 thread_t from,
807 thread_t to);
808
809 extern void stack_free(
810 thread_t thread);
811
812 extern void stack_free_reserved(
813 thread_t thread);
814
815 extern boolean_t stack_alloc_try(
816 thread_t thread);
817
818 extern void stack_collect(void);
819
820 extern void stack_init(void);
821
822
823 extern kern_return_t thread_info_internal(
824 thread_t thread,
825 thread_flavor_t flavor,
826 thread_info_t thread_info_out,
827 mach_msg_type_number_t *thread_info_count);
828
829
830
831 extern kern_return_t kernel_thread_create(
832 thread_continue_t continuation,
833 void *parameter,
834 integer_t priority,
835 thread_t *new_thread);
836
837 extern kern_return_t kernel_thread_start_priority(
838 thread_continue_t continuation,
839 void *parameter,
840 integer_t priority,
841 thread_t *new_thread);
842
843 extern void machine_stack_attach(
844 thread_t thread,
845 vm_offset_t stack);
846
847 extern vm_offset_t machine_stack_detach(
848 thread_t thread);
849
850 extern void machine_stack_handoff(
851 thread_t old,
852 thread_t new);
853
854 extern thread_t machine_switch_context(
855 thread_t old_thread,
856 thread_continue_t continuation,
857 thread_t new_thread);
858
859 extern void machine_load_context(
860 thread_t thread) __attribute__((noreturn));
861
862 extern kern_return_t machine_thread_state_initialize(
863 thread_t thread);
864
865 extern kern_return_t machine_thread_set_state(
866 thread_t thread,
867 thread_flavor_t flavor,
868 thread_state_t state,
869 mach_msg_type_number_t count);
870
871 extern mach_vm_address_t machine_thread_pc(
872 thread_t thread);
873
874 extern void machine_thread_reset_pc(
875 thread_t thread,
876 mach_vm_address_t pc);
877
878 extern boolean_t machine_thread_on_core(
879 thread_t thread);
880
881 extern kern_return_t machine_thread_get_state(
882 thread_t thread,
883 thread_flavor_t flavor,
884 thread_state_t state,
885 mach_msg_type_number_t *count);
886
887 extern kern_return_t machine_thread_state_convert_from_user(
888 thread_t thread,
889 thread_flavor_t flavor,
890 thread_state_t tstate,
891 mach_msg_type_number_t count);
892
893 extern kern_return_t machine_thread_state_convert_to_user(
894 thread_t thread,
895 thread_flavor_t flavor,
896 thread_state_t tstate,
897 mach_msg_type_number_t *count);
898
899 extern kern_return_t machine_thread_dup(
900 thread_t self,
901 thread_t target,
902 boolean_t is_corpse);
903
904 extern void machine_thread_init(void);
905
906 extern void machine_thread_template_init(thread_t thr_template);
907
908
909 extern kern_return_t machine_thread_create(
910 thread_t thread,
911 task_t task);
912 extern void machine_thread_switch_addrmode(
913 thread_t thread);
914
915 extern void machine_thread_destroy(
916 thread_t thread);
917
918 extern void machine_set_current_thread(
919 thread_t thread);
920
921 extern kern_return_t machine_thread_get_kern_state(
922 thread_t thread,
923 thread_flavor_t flavor,
924 thread_state_t tstate,
925 mach_msg_type_number_t *count);
926
927 extern kern_return_t machine_thread_inherit_taskwide(
928 thread_t thread,
929 task_t parent_task);
930
931 extern kern_return_t machine_thread_set_tsd_base(
932 thread_t thread,
933 mach_vm_offset_t tsd_base);
934
935 #define thread_mtx_lock(thread) lck_mtx_lock(&(thread)->mutex)
936 #define thread_mtx_try(thread) lck_mtx_try_lock(&(thread)->mutex)
937 #define thread_mtx_unlock(thread) lck_mtx_unlock(&(thread)->mutex)
938 #define thread_mtx_held(thread) lck_mtx_assert(&(thread)->mutex, LCK_MTX_ASSERT_OWNED)
939
940 extern void thread_apc_ast(thread_t thread);
941
942 extern void thread_update_qos_cpu_time(thread_t thread);
943
944 void act_machine_sv_free(thread_t, int);
945
946 vm_offset_t min_valid_stack_address(void);
947 vm_offset_t max_valid_stack_address(void);
948
949 extern bool thread_no_smt(thread_t thread);
950 extern bool processor_active_thread_no_smt(processor_t processor);
951
952 extern void thread_set_options(uint32_t thopt);
953
954 #if CONFIG_THREAD_GROUPS
955 struct thread_group *thread_get_current_voucher_thread_group(thread_t thread);
956 #endif /* CONFIG_THREAD_GROUPS */
957
958 #else /* MACH_KERNEL_PRIVATE */
959
960 __BEGIN_DECLS
961
962 extern void thread_mtx_lock(thread_t thread);
963
964 extern void thread_mtx_unlock(thread_t thread);
965
966 extern thread_t current_thread(void) __attribute__((const));
967
968 extern void thread_reference(
969 thread_t thread);
970
971 extern void thread_deallocate(
972 thread_t thread);
973
974 #if BSD_KERNEL_PRIVATE
975 /* Duplicated from osfmk/kern/ipc_tt.h */
976 __options_decl(port_to_thread_options_t, uint32_t, {
977 PORT_TO_THREAD_NONE = 0x0000,
978 PORT_TO_THREAD_IN_CURRENT_TASK = 0x0001,
979 PORT_TO_THREAD_NOT_CURRENT_THREAD = 0x0002,
980 });
981
982 extern thread_t port_name_to_thread(
983 mach_port_name_t port_name,
984 port_to_thread_options_t options);
985 #endif /* BSD_KERNEL_PRIVATE */
986
987 __END_DECLS
988
989 #endif /* MACH_KERNEL_PRIVATE */
990
991 #ifdef KERNEL_PRIVATE
992
993 __BEGIN_DECLS
994
995 extern void thread_deallocate_safe(
996 thread_t thread);
997
998 extern uint64_t thread_dispatchqaddr(
999 thread_t thread);
1000
1001 extern uint64_t thread_rettokern_addr(
1002 thread_t thread);
1003
1004 extern integer_t thread_kern_get_pri(thread_t thr) __attribute__((const));
1005
1006 extern void thread_kern_set_pri(thread_t thr, integer_t pri);
1007
1008 extern integer_t thread_kern_get_kernel_maxpri(void) __attribute__((const));
1009
1010 __END_DECLS
1011
1012 #endif /* KERNEL_PRIVATE */
1013
1014 #ifdef KERNEL
1015 __BEGIN_DECLS
1016
1017 extern uint64_t thread_tid(thread_t thread);
1018
1019 __END_DECLS
1020
1021 #endif /* KERNEL */
1022
1023 __BEGIN_DECLS
1024
1025 #ifdef XNU_KERNEL_PRIVATE
1026
1027 uint16_t thread_set_tag(thread_t thread, uint16_t tag);
1028 uint16_t thread_get_tag(thread_t thread);
1029
1030 #ifdef MACH_KERNEL_PRIVATE
1031 static inline thread_tag_t
1032 thread_set_tag_internal(thread_t thread, thread_tag_t tag)
1033 {
1034 return os_atomic_or_orig(&thread->thread_tag, tag, relaxed);
1035 }
1036
1037 static inline thread_tag_t
1038 thread_get_tag_internal(thread_t thread)
1039 {
1040 return thread->thread_tag;
1041 }
1042 #endif /* MACH_KERNEL_PRIVATE */
1043
1044 uint64_t thread_last_run_time(thread_t thread);
1045
1046 extern kern_return_t thread_state_initialize(
1047 thread_t thread);
1048
1049 extern kern_return_t thread_setstatus(
1050 thread_t thread,
1051 int flavor,
1052 thread_state_t tstate,
1053 mach_msg_type_number_t count);
1054
1055 extern kern_return_t thread_setstatus_from_user(
1056 thread_t thread,
1057 int flavor,
1058 thread_state_t tstate,
1059 mach_msg_type_number_t count);
1060
1061 extern kern_return_t thread_getstatus(
1062 thread_t thread,
1063 int flavor,
1064 thread_state_t tstate,
1065 mach_msg_type_number_t *count);
1066
1067 extern kern_return_t thread_getstatus_to_user(
1068 thread_t thread,
1069 int flavor,
1070 thread_state_t tstate,
1071 mach_msg_type_number_t *count);
1072
1073 extern kern_return_t thread_create_with_continuation(
1074 task_t task,
1075 thread_t *new_thread,
1076 thread_continue_t continuation);
1077
1078 /* thread_create_waiting options */
1079 __options_decl(th_create_waiting_options_t, uint32_t, {
1080 TH_CREATE_WAITING_OPTION_PINNED = 0x10,
1081 TH_CREATE_WAITING_OPTION_IMMOVABLE = 0x20,
1082 });
1083 #define TH_CREATE_WAITING_OPTION_MASK 0x30
1084
1085 extern kern_return_t thread_create_waiting(task_t task,
1086 thread_continue_t continuation,
1087 event_t event,
1088 th_create_waiting_options_t options,
1089 thread_t *new_thread);
1090
1091 extern kern_return_t thread_create_workq_waiting(
1092 task_t task,
1093 thread_continue_t thread_return,
1094 thread_t *new_thread);
1095
1096 extern void thread_yield_internal(
1097 mach_msg_timeout_t interval);
1098
1099 extern void thread_yield_to_preemption(void);
1100
1101 /*
1102 * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
1103 *
1104 * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
1105 * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
1106 * 3) Disable. Remove any existing CPU limit.
1107 */
1108 #define THREAD_CPULIMIT_BLOCK 0x1
1109 #define THREAD_CPULIMIT_EXCEPTION 0x2
1110 #define THREAD_CPULIMIT_DISABLE 0x3
1111
1112 struct _thread_ledger_indices {
1113 int cpu_time;
1114 };
1115
1116 extern struct _thread_ledger_indices thread_ledgers;
1117
1118 extern int thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns);
1119 extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns);
1120
1121 extern void thread_read_times(
1122 thread_t thread,
1123 time_value_t *user_time,
1124 time_value_t *system_time,
1125 time_value_t *runnable_time);
1126
1127 extern uint64_t thread_get_runtime_self(void);
1128
1129 extern void thread_setuserstack(
1130 thread_t thread,
1131 mach_vm_offset_t user_stack);
1132
1133 extern user_addr_t thread_adjuserstack(
1134 thread_t thread,
1135 int adjust);
1136
1137
1138 extern void thread_setentrypoint(
1139 thread_t thread,
1140 mach_vm_offset_t entry);
1141
1142 extern kern_return_t thread_set_tsd_base(
1143 thread_t thread,
1144 mach_vm_offset_t tsd_base);
1145
1146 extern kern_return_t thread_setsinglestep(
1147 thread_t thread,
1148 int on);
1149
1150 extern kern_return_t thread_userstack(
1151 thread_t,
1152 int,
1153 thread_state_t,
1154 unsigned int,
1155 mach_vm_offset_t *,
1156 int *,
1157 boolean_t);
1158
1159 extern kern_return_t thread_entrypoint(
1160 thread_t,
1161 int,
1162 thread_state_t,
1163 unsigned int,
1164 mach_vm_offset_t *);
1165
1166 extern kern_return_t thread_userstackdefault(
1167 mach_vm_offset_t *,
1168 boolean_t);
1169
1170 extern kern_return_t thread_wire_internal(
1171 host_priv_t host_priv,
1172 thread_t thread,
1173 boolean_t wired,
1174 boolean_t *prev_state);
1175
1176
1177 extern kern_return_t thread_dup(thread_t);
1178
1179 extern kern_return_t thread_dup2(thread_t, thread_t);
1180
1181 #if !defined(_SCHED_CALL_T_DEFINED)
1182 #define _SCHED_CALL_T_DEFINED
1183 typedef void (*sched_call_t)(
1184 int type,
1185 thread_t thread);
1186 #endif
1187
1188 #define SCHED_CALL_BLOCK 0x1
1189 #define SCHED_CALL_UNBLOCK 0x2
1190
1191 extern void thread_sched_call(
1192 thread_t thread,
1193 sched_call_t call);
1194
1195 extern boolean_t thread_is_static_param(
1196 thread_t thread);
1197
1198 extern task_t get_threadtask(thread_t);
1199
1200 /*
1201 * Thread is running within a 64-bit address space.
1202 */
1203 #define thread_is_64bit_addr(thd) \
1204 task_has_64Bit_addr(get_threadtask(thd))
1205
1206 /*
1207 * Thread is using 64-bit machine state.
1208 */
1209 #define thread_is_64bit_data(thd) \
1210 task_has_64Bit_data(get_threadtask(thd))
1211
1212 #if defined(__x86_64__)
1213 extern int thread_task_has_ldt(thread_t);
1214 #endif
1215 extern void *get_bsdthread_info(thread_t);
1216 extern void set_bsdthread_info(thread_t, void *);
1217 extern void set_thread_pagein_error(thread_t, int);
1218 extern void *uthread_alloc(task_t, thread_t, int);
1219 extern event_t workq_thread_init_and_wq_lock(task_t, thread_t); // bsd/pthread/
1220 extern void uthread_cleanup_name(void *uthread);
1221 extern void uthread_cleanup(task_t, void *, void *);
1222 extern void uthread_zone_free(void *);
1223 extern void uthread_cred_free(void *);
1224 extern void uthread_reset_proc_refcount(void *);
1225 #if PROC_REF_DEBUG
1226 extern int uthread_get_proc_refcount(void *);
1227 extern int proc_ref_tracking_disabled;
1228 #endif
1229
1230 extern boolean_t thread_should_halt(
1231 thread_t thread);
1232
1233 extern boolean_t thread_should_abort(
1234 thread_t);
1235
1236 extern int is_64signalregset(void);
1237
1238 extern void act_set_kperf(thread_t);
1239 extern void act_set_astledger(thread_t thread);
1240 extern void act_set_astledger_async(thread_t thread);
1241 extern void act_set_io_telemetry_ast(thread_t);
1242
1243 extern uint32_t dtrace_get_thread_predcache(thread_t);
1244 extern int64_t dtrace_get_thread_vtime(thread_t);
1245 extern int64_t dtrace_get_thread_tracing(thread_t);
1246 extern uint16_t dtrace_get_thread_inprobe(thread_t);
1247 extern int dtrace_get_thread_last_cpu_id(thread_t);
1248 extern vm_offset_t dtrace_get_kernel_stack(thread_t);
1249 extern void dtrace_set_thread_predcache(thread_t, uint32_t);
1250 extern void dtrace_set_thread_vtime(thread_t, int64_t);
1251 extern void dtrace_set_thread_tracing(thread_t, int64_t);
1252 extern void dtrace_set_thread_inprobe(thread_t, uint16_t);
1253 extern vm_offset_t dtrace_set_thread_recover(thread_t, vm_offset_t);
1254 extern vm_offset_t dtrace_sign_and_set_thread_recover(thread_t, vm_offset_t);
1255 extern void dtrace_thread_bootstrap(void);
1256 extern void dtrace_thread_didexec(thread_t);
1257
1258 extern int64_t dtrace_calc_thread_recent_vtime(thread_t);
1259
1260
1261 extern kern_return_t thread_set_wq_state32(
1262 thread_t thread,
1263 thread_state_t tstate);
1264
1265 extern kern_return_t thread_set_wq_state64(
1266 thread_t thread,
1267 thread_state_t tstate);
1268
1269 extern vm_offset_t kernel_stack_mask;
1270 extern vm_offset_t kernel_stack_size;
1271 extern vm_offset_t kernel_stack_depth_max;
1272
1273 extern void guard_ast(thread_t);
1274 extern void fd_guard_ast(thread_t,
1275 mach_exception_code_t, mach_exception_subcode_t);
1276 #if CONFIG_VNGUARD
1277 extern void vn_guard_ast(thread_t,
1278 mach_exception_code_t, mach_exception_subcode_t);
1279 #endif
1280 extern void mach_port_guard_ast(thread_t,
1281 mach_exception_code_t, mach_exception_subcode_t);
1282 extern void virt_memory_guard_ast(thread_t,
1283 mach_exception_code_t, mach_exception_subcode_t);
1284 extern void thread_guard_violation(thread_t,
1285 mach_exception_code_t, mach_exception_subcode_t, boolean_t);
1286 extern void thread_update_io_stats(thread_t, int size, int io_flags);
1287
1288 extern kern_return_t thread_set_voucher_name(mach_port_name_t name);
1289 extern kern_return_t thread_get_current_voucher_origin_pid(int32_t *pid);
1290
1291 extern void set_thread_rwlock_boost(void);
1292 extern void clear_thread_rwlock_boost(void);
1293
1294 extern void thread_enable_send_importance(thread_t thread, boolean_t enable);
1295
1296 /*
1297 * Translate signal context data pointer to userspace representation
1298 */
1299
1300 extern kern_return_t machine_thread_siguctx_pointer_convert_to_user(
1301 thread_t thread,
1302 user_addr_t *uctxp);
1303
1304 extern void machine_tecs(thread_t thr);
1305
1306 typedef enum cpuvn {
1307 CPUVN_CI = 1
1308 } cpuvn_e;
1309
1310 extern int machine_csv(cpuvn_e cve);
1311
1312 /*
1313 * Translate array of function pointer syscall arguments from userspace representation
1314 */
1315
1316 extern kern_return_t machine_thread_function_pointers_convert_from_user(
1317 thread_t thread,
1318 user_addr_t *fptrs,
1319 uint32_t count);
1320
1321 /*
1322 * Get a backtrace for a threads kernel or user stack (user_p), using fp to start the
1323 * backtrace if provided.
1324 *
1325 * Returns bytes added to buffer, and kThreadTruncatedBT in thread_trace_flags if a
1326 * user page is not present after kdp_lightweight_fault() is * called.
1327 */
1328
1329 extern int machine_trace_thread(
1330 thread_t thread,
1331 char *tracepos,
1332 char *tracebound,
1333 int nframes,
1334 boolean_t user_p,
1335 uint32_t *thread_trace_flags);
1336
1337 extern int machine_trace_thread64(thread_t thread,
1338 char *tracepos,
1339 char *tracebound,
1340 int nframes,
1341 boolean_t user_p,
1342 uint32_t *thread_trace_flags,
1343 uint64_t *sp,
1344 vm_offset_t fp);
1345
1346 /*
1347 * Get the duration of the given thread's last wait.
1348 */
1349 uint64_t thread_get_last_wait_duration(thread_t thread);
1350
1351 extern bool thread_get_no_smt(void);
1352
1353 #endif /* XNU_KERNEL_PRIVATE */
1354
1355 #ifdef KERNEL_PRIVATE
1356 extern void thread_set_no_smt(bool set);
1357 #endif /* KERNEL_PRIVATE */
1358
1359 /*! @function thread_has_thread_name
1360 * @abstract Checks if a thread has a name.
1361 * @discussion This function takes one input, a thread, and returns a boolean value indicating if that thread already has a name associated with it.
1362 * @param th The thread to inspect.
1363 * @result TRUE if the thread has a name, FALSE otherwise.
1364 */
1365 extern boolean_t thread_has_thread_name(thread_t th);
1366
1367 /*! @function thread_set_thread_name
1368 * @abstract Set a thread's name.
1369 * @discussion This function takes two input parameters: a thread to name, and the name to apply to the thread. The name will be copied over to the thread in order to better identify the thread. If the name is longer than MAXTHREADNAMESIZE - 1, it will be truncated.
1370 * @param th The thread to be named.
1371 * @param name The name to apply to the thread.
1372 */
1373 extern void thread_set_thread_name(thread_t th, const char* name);
1374
1375 #ifdef XNU_KERNEL_PRIVATE
1376 extern void
1377 thread_get_thread_name(thread_t th, char* name);
1378 #endif /* XNU_KERNEL_PRIVATE */
1379
1380 /*! @function kernel_thread_start
1381 * @abstract Create a kernel thread.
1382 * @discussion This function takes three input parameters, namely reference to the function that the thread should execute, caller specified data and a reference which is used to return the newly created kernel thread. The function returns KERN_SUCCESS on success or an appropriate kernel code type indicating the error. It may be noted that the caller is responsible for explicitly releasing the reference to the created thread when no longer needed. This should be done by calling thread_deallocate(new_thread).
1383 * @param continuation A C-function pointer where the thread will begin execution.
1384 * @param parameter Caller specified data to be passed to the new thread.
1385 * @param new_thread Reference to the new thread is returned in this parameter.
1386 * @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
1387 */
1388
1389 extern kern_return_t kernel_thread_start(
1390 thread_continue_t continuation,
1391 void *parameter,
1392 thread_t *new_thread);
1393
1394 #ifdef KERNEL_PRIVATE
1395 void thread_set_eager_preempt(thread_t thread);
1396 void thread_clear_eager_preempt(thread_t thread);
1397 void thread_set_honor_qlimit(thread_t thread);
1398 void thread_clear_honor_qlimit(thread_t thread);
1399 extern ipc_port_t convert_thread_to_port(thread_t);
1400 extern ipc_port_t convert_thread_to_port_pinned(thread_t);
1401 extern ipc_port_t convert_thread_inspect_to_port(thread_inspect_t);
1402 extern ipc_port_t convert_thread_read_to_port(thread_read_t);
1403 extern boolean_t is_vm_privileged(void);
1404 extern boolean_t set_vm_privilege(boolean_t);
1405 extern kern_allocation_name_t thread_set_allocation_name(kern_allocation_name_t new_name);
1406 extern void *thread_iokit_tls_get(uint32_t index);
1407 extern void thread_iokit_tls_set(uint32_t index, void * data);
1408 extern void thread_port_with_flavor_notify(mach_msg_header_t *msg);
1409 extern int thread_self_region_page_shift(void);
1410 extern void thread_self_region_page_shift_set(int pgshift);
1411 extern kern_return_t thread_create_pinned(task_t task, thread_t *new_thread);
1412 extern kern_return_t thread_create_immovable(task_t task, thread_t *new_thread);
1413 extern kern_return_t thread_terminate_pinned(thread_t thread);
1414 #endif /* KERNEL_PRIVATE */
1415
1416 __END_DECLS
1417
1418 #endif /* _KERN_THREAD_H_ */