2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
62 * This file contains the structure definitions for threads.
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
84 #ifndef _KERN_THREAD_H_
85 #define _KERN_THREAD_H_
87 #include <mach/kern_return.h>
88 #include <mach/mach_types.h>
89 #include <mach/message.h>
90 #include <mach/boolean.h>
91 #include <mach/vm_param.h>
92 #include <mach/thread_info.h>
93 #include <mach/thread_status.h>
94 #include <mach/exception_types.h>
96 #include <kern/kern_types.h>
98 #include <sys/cdefs.h>
100 #ifdef MACH_KERNEL_PRIVATE
102 #include <mach_assert.h>
103 #include <mach_ldebug.h>
105 #include <ipc/ipc_types.h>
107 #include <mach/port.h>
108 #include <kern/cpu_number.h>
109 #include <kern/queue.h>
110 #include <kern/timer.h>
111 #include <kern/simple_lock.h>
112 #include <kern/locks.h>
113 #include <kern/sched.h>
114 #include <kern/sched_prim.h>
115 #include <mach/sfi_class.h>
116 #include <kern/thread_call.h>
117 #include <kern/timer_call.h>
118 #include <kern/task.h>
119 #include <kern/exception.h>
120 #include <kern/affinity.h>
122 #include <ipc/ipc_kmsg.h>
124 #include <machine/cpu_data.h>
125 #include <machine/thread.h>
129 * NOTE: The runq field in the thread structure has an unusual
130 * locking protocol. If its value is PROCESSOR_NULL, then it is
131 * locked by the thread_lock, but if its value is something else
132 * then it is locked by the associated run queue lock. It is
133 * set to PROCESSOR_NULL without holding the thread lock, but the
134 * transition from PROCESSOR_NULL to non-null must be done
135 * under the thread lock and the run queue lock.
137 * When the thread is on a wait queue, these first three fields
138 * are treated as an unofficial union with a wait_queue_element.
139 * If you change these, you must change that definition as well
140 * (kern/wait_queue.h).
142 /* Items examined often, modified infrequently */
143 queue_chain_t links
; /* run/wait queue links */
144 processor_t runq
; /* run queue assignment */
145 wait_queue_t wait_queue
; /* wait queue we are currently on */
146 event64_t wait_event
; /* wait queue event */
147 /* Data updated during assert_wait/thread_wakeup */
148 decl_simple_lock_data(,sched_lock
) /* scheduling lock (thread_lock()) */
149 decl_simple_lock_data(,wake_lock
) /* for thread stop / wait (wake_lock()) */
150 integer_t options
; /* options set by thread itself */
151 #define TH_OPT_INTMASK 0x0003 /* interrupt / abort level */
152 #define TH_OPT_VMPRIV 0x0004 /* may allocate reserved memory */
153 #define TH_OPT_DTRACE 0x0008 /* executing under dtrace_probe */
154 #define TH_OPT_SYSTEM_CRITICAL 0x0010 /* Thread must always be allowed to run - even under heavy load */
155 #define TH_OPT_PROC_CPULIMIT 0x0020 /* Thread has a task-wide CPU limit applied to it */
156 #define TH_OPT_PRVT_CPULIMIT 0x0040 /* Thread has a thread-private CPU limit applied to it */
157 #define TH_OPT_IDLE_THREAD 0x0080 /* Thread is a per-processor idle thread */
159 boolean_t wake_active
; /* wake event on stop */
160 int at_safe_point
; /* thread_abort_safely allowed */
161 ast_t reason
; /* why we blocked */
162 thread_continue_t continuation
; /* continue here next dispatch */
163 void *parameter
; /* continuation parameter */
164 wait_result_t wait_result
; /* outcome of wait -
165 * may be examined by this thread
168 /* Data updated/used in thread_invoke */
169 vm_offset_t kernel_stack
; /* current kernel stack */
170 vm_offset_t reserved_stack
; /* reserved kernel stack */
175 * Thread states [bits or'ed]
177 #define TH_WAIT 0x01 /* queued for waiting */
178 #define TH_SUSP 0x02 /* stopped or requested to stop */
179 #define TH_RUN 0x04 /* running or on runq */
180 #define TH_UNINT 0x08 /* waiting uninteruptibly */
181 #define TH_TERMINATE 0x10 /* halted at termination */
182 #define TH_TERMINATE2 0x20 /* added to termination queue */
184 #define TH_IDLE 0x80 /* idling processor */
186 /* Scheduling information */
187 sched_mode_t sched_mode
; /* scheduling mode */
188 sched_mode_t saved_mode
; /* saved mode during forced mode demotion */
190 sfi_class_id_t sfi_class
; /* SFI class (XXX Updated on CSW/QE/AST) */
191 sfi_class_id_t sfi_wait_class
; /* Currently in SFI wait for this class, protected by sfi_lock */
193 uint32_t sched_flags
; /* current flag bits */
194 #define TH_SFLAG_FAIRSHARE_TRIPPED 0x0001 /* fairshare scheduling activated */
195 #define TH_SFLAG_FAILSAFE 0x0002 /* fail-safe has tripped */
196 #define TH_SFLAG_THROTTLED 0x0004 /* thread treated as background for scheduler decay purposes */
197 #define TH_SFLAG_DEMOTED_MASK (TH_SFLAG_THROTTLE_DEMOTED | TH_SFLAG_FAILSAFE | TH_SFLAG_FAIRSHARE_TRIPPED) /* saved_mode contains previous sched_mode */
199 #define TH_SFLAG_PROMOTED 0x0008 /* sched pri has been promoted */
200 #define TH_SFLAG_ABORT 0x0010 /* abort interruptible waits */
201 #define TH_SFLAG_ABORTSAFELY 0x0020 /* ... but only those at safe point */
202 #define TH_SFLAG_ABORTED_MASK (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
203 #define TH_SFLAG_DEPRESS 0x0040 /* normal depress yield */
204 #define TH_SFLAG_POLLDEPRESS 0x0080 /* polled depress yield */
205 #define TH_SFLAG_DEPRESSED_MASK (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
206 #define TH_SFLAG_PRI_UPDATE 0x0100 /* Updating priority */
207 #define TH_SFLAG_EAGERPREEMPT 0x0200 /* Any preemption of this thread should be treated as if AST_URGENT applied */
208 #define TH_SFLAG_RW_PROMOTED 0x0400 /* sched pri has been promoted due to blocking with RW lock held */
209 #define TH_SFLAG_PROMOTED_MASK (TH_SFLAG_PROMOTED | TH_SFLAG_RW_PROMOTED)
210 #define TH_SFLAG_THROTTLE_DEMOTED 0x0800 /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */
212 #define TH_SFLAG_RW_PROMOTED_BIT (10) /* 0x400 */
214 int16_t sched_pri
; /* scheduled (current) priority */
215 int16_t priority
; /* base priority */
216 int16_t max_priority
; /* copy of max base priority */
217 int16_t task_priority
; /* copy of task base priority */
218 #if defined(CONFIG_SCHED_GRRR)
220 uint16_t grrr_deficit
; /* fixed point (1/1000th quantum) fractional deficit */
224 int16_t promotions
; /* level of promotion */
225 int16_t pending_promoter_index
;
226 uint32_t ref_count
; /* number of references to me */
227 void *pending_promoter
[2];
229 uint32_t rwlock_count
; /* Number of lck_rw_t locks held by thread */
232 uint32_t SHARE_COUNT
, BG_COUNT
; /* This thread's contribution to global sched counters (temporary debugging) */
233 #endif /* MACH_ASSERT */
235 integer_t importance
; /* task-relative importance */
237 /* Priority depression expiration */
238 integer_t depress_timer_active
;
239 timer_call_data_t depress_timer
;
240 /* real-time parameters */
241 struct { /* see mach/thread_policy.h */
243 uint32_t computation
;
245 boolean_t preemptible
;
249 uint32_t was_promoted_on_wakeup
;
250 uint64_t last_run_time
; /* time when thread was switched away from */
251 uint32_t quantum_remaining
; /* duration of current quantum remaining */
253 #if defined(CONFIG_SCHED_MULTIQ)
254 sched_group_t sched_group
;
255 #endif /* defined(CONFIG_SCHED_MULTIQ) */
257 /* Data used during setrun/dispatch */
258 timer_data_t system_timer
; /* system mode timer */
259 processor_t bound_processor
; /* bound to a processor? */
260 processor_t last_processor
; /* processor last dispatched on */
261 processor_t chosen_processor
; /* Where we want to run this thread */
263 /* Fail-safe computation since last unblock or qualifying yield */
264 uint64_t computation_metered
;
265 uint64_t computation_epoch
;
266 uint64_t safe_release
; /* when to release fail-safe */
268 /* Call out from scheduler */
272 #if defined(CONFIG_SCHED_PROTO)
273 uint32_t runqueue_generation
; /* last time runqueue was drained */
276 /* Statistics and timesharing calculations */
277 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
278 natural_t sched_stamp
; /* last scheduler tick */
279 natural_t sched_usage
; /* timesharing cpu usage [sched] */
280 natural_t pri_shift
; /* usage -> priority from pset */
281 natural_t cpu_usage
; /* instrumented cpu usage [%cpu] */
282 natural_t cpu_delta
; /* accumulated cpu_usage delta */
283 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
285 uint32_t c_switch
; /* total context switches */
286 uint32_t p_switch
; /* total processor switches */
287 uint32_t ps_switch
; /* total pset switches */
289 /* Timing data structures */
290 int precise_user_kernel_time
; /* precise user/kernel enabled for this thread */
291 timer_data_t user_timer
; /* user mode timer */
292 uint64_t user_timer_save
; /* saved user timer value */
293 uint64_t system_timer_save
; /* saved system timer value */
294 uint64_t vtimer_user_save
; /* saved values for vtimers */
295 uint64_t vtimer_prof_save
;
296 uint64_t vtimer_rlim_save
;
298 /* Timing for wait state */
299 uint64_t wait_sfi_begin_time
; /* start time for thread waiting in SFI */
301 /* Timed wait expiration */
302 timer_call_data_t wait_timer
;
303 integer_t wait_timer_active
;
304 boolean_t wait_timer_is_set
;
308 * Processor/cache affinity
309 * - affinity_threads links task threads with the same affinity set
311 affinity_set_t affinity_set
;
312 queue_chain_t affinity_threads
;
314 /* Various bits of stashed state */
317 mach_msg_return_t state
; /* receive state */
318 mach_port_seqno_t seqno
; /* seqno of recvd message */
319 ipc_object_t object
; /* object received on */
320 mach_vm_address_t msg_addr
; /* receive buffer pointer */
321 mach_msg_size_t msize
; /* max size for recvd msg */
322 mach_msg_option_t option
; /* options for receive */
323 mach_msg_size_t slist_size
; /* scatter list size */
324 mach_port_name_t receiver_name
; /* the receive port name */
325 struct ipc_kmsg
*kmsg
; /* received message */
326 mach_msg_continue_t continuation
;
329 struct semaphore
*waitsemaphore
; /* semaphore ref */
330 struct semaphore
*signalsemaphore
; /* semaphore ref */
331 int options
; /* semaphore options */
332 kern_return_t result
; /* primary result */
333 mach_msg_continue_t continuation
;
336 int option
; /* switch option */
337 boolean_t reenable_workq_callback
; /* on entry, callbacks were suspended */
339 int misc
; /* catch-all for other state */
342 /* Structure to save information about guard exception */
344 unsigned type
; /* EXC_GUARD reason/type */
345 mach_exception_data_type_t code
; /* Exception code */
346 mach_exception_data_type_t subcode
; /* Exception sub-code */
350 /* IPC data structures */
351 #if IMPORTANCE_INHERITANCE
352 natural_t ith_assertions
; /* assertions pending drop */
354 struct ipc_kmsg_queue ith_messages
; /* messages to reap */
355 mach_port_t ith_rpc_reply
; /* reply port for kernel RPCs */
357 /* Ast/Halt data structures */
358 vm_offset_t recover
; /* page fault recover(copyin/out) */
360 queue_chain_t threads
; /* global list of all threads */
363 queue_chain_t task_threads
;
365 /*** Machine-dependent state ***/
366 struct machine_thread machine
;
368 /* Task membership */
372 decl_lck_mtx_data(,mutex
)
375 /* Pending thread ast(s) */
378 /* Miscellaneous bits guarded by mutex */
380 active
:1, /* Thread is active and has not been terminated */
381 started
:1, /* Thread has been started after creation */
382 static_param
:1, /* Disallow policy parameter changes */
383 policy_reset
:1, /* Disallow policy parameter changes on terminating threads */
387 struct ReturnHandler
{
388 struct ReturnHandler
*next
;
390 struct ReturnHandler
*rh
,
391 struct thread
*thread
);
392 } *handlers
, special_handler
;
394 /* Ports associated with this thread */
395 struct ipc_port
*ith_self
; /* not a right, doesn't hold ref */
396 struct ipc_port
*ith_sself
; /* a send right */
397 struct exception_action
*exc_actions
;
404 uint32_t t_dtrace_flags
; /* DTrace thread states */
405 #define TH_DTRACE_EXECSUCCESS 0x01
406 uint32_t t_dtrace_predcache
;/* DTrace per thread predicate value hint */
407 int64_t t_dtrace_tracing
; /* Thread time under dtrace_probe() */
408 int64_t t_dtrace_vtime
;
411 clock_sec_t t_page_creation_time
;
412 uint32_t t_page_creation_count
;
414 #define T_CHUD_MARKED 0x01 /* this thread is marked by CHUD */
415 #define T_IN_CHUD 0x02 /* this thread is already in a CHUD handler */
416 #define THREAD_PMC_FLAG 0x04 /* Bit in "t_chud" signifying PMC interest */
417 #define T_AST_CALLSTACK 0x08 /* Thread scheduled to dump a
418 * callstack on its next
420 #define T_AST_NAME 0x10 /* Thread scheduled to dump
421 * its name on its next
423 #define T_NAME_DONE 0x20 /* Thread has previously
424 * recorded its name */
425 #define T_KPC_ALLOC 0x40 /* Thread needs a kpc_buf */
427 uint32_t t_chud
; /* CHUD flags, used for Shark */
428 uint32_t chud_c_switch
; /* last dispatch detection */
430 integer_t mutex_count
; /* total count of locks held */
433 /* accumulated performance counters for this thread */
438 /* count of how many times a thread has been sampled since it was last scheduled */
439 uint64_t kperf_pet_cnt
;
443 /* hypervisor virtual CPU object associated with this thread */
444 void *hv_thread_target
;
445 #endif /* HYPERVISOR */
447 uint64_t thread_id
; /*system wide unique thread-id*/
449 /* Statistics accumulated per-thread and aggregated per-task */
450 uint32_t syscalls_unix
;
451 uint32_t syscalls_mach
;
453 ledger_t t_threadledger
; /* per thread ledger */
454 uint64_t cpu_time_last_qos
;
456 ledger_t t_bankledger
; /* ledger to charge someone */
457 uint64_t t_deduct_bank_ledger_time
; /* cpu time to be deducted from bank ledger */
460 /* policy is protected by the task lock */
461 struct task_requested_policy requested_policy
;
462 struct task_effective_policy effective_policy
;
463 struct task_pended_policy pended_policy
;
465 /* usynch override is protected by the task lock, eventually will be thread mutex */
466 int usynch_override_contended_resource_count
;
468 int iotier_override
; /* atomic operations to set, cleared on ret to user */
469 io_stat_info_t thread_io_stats
; /* per-thread I/O statistics */
472 integer_t saved_importance
; /* saved task-relative importance */
474 uint32_t thread_callout_interrupt_wakeups
;
475 uint32_t thread_callout_platform_idle_wakeups
;
476 uint32_t thread_timer_wakeups_bin_1
;
477 uint32_t thread_timer_wakeups_bin_2
;
479 uint16_t callout_woken_from_icontext
:1,
480 callout_woken_from_platform_idle
:1,
481 callout_woke_thread
:1,
482 thread_bitfield_unused
:13;
483 /* Kernel holds on this thread */
484 int16_t suspend_count
;
485 /* User level suspensions */
486 int16_t user_stop_count
;
488 mach_port_name_t ith_voucher_name
;
489 ipc_voucher_t ith_voucher
;
492 #endif /* CONFIG_IOSCHED */
495 #define ith_state saved.receive.state
496 #define ith_object saved.receive.object
497 #define ith_msg_addr saved.receive.msg_addr
498 #define ith_msize saved.receive.msize
499 #define ith_option saved.receive.option
500 #define ith_scatter_list_size saved.receive.slist_size
501 #define ith_receiver_name saved.receive.receiver_name
502 #define ith_continuation saved.receive.continuation
503 #define ith_kmsg saved.receive.kmsg
504 #define ith_seqno saved.receive.seqno
506 #define sth_waitsemaphore saved.sema.waitsemaphore
507 #define sth_signalsemaphore saved.sema.signalsemaphore
508 #define sth_options saved.sema.options
509 #define sth_result saved.sema.result
510 #define sth_continuation saved.sema.continuation
512 extern void thread_bootstrap(void);
514 extern void thread_init(void);
516 extern void thread_daemon_init(void);
518 #define thread_reference_internal(thread) \
519 (void)hw_atomic_add(&(thread)->ref_count, 1)
521 #define thread_deallocate_internal(thread) \
522 hw_atomic_sub(&(thread)->ref_count, 1)
524 #define thread_reference(thread) \
526 if ((thread) != THREAD_NULL) \
527 thread_reference_internal(thread); \
530 extern void thread_deallocate(
533 extern void thread_terminate_self(void);
535 extern kern_return_t
thread_terminate_internal(
538 extern void thread_start_internal(
539 thread_t thread
) __attribute__ ((noinline
));
541 extern void thread_terminate_enqueue(
544 extern void thread_stack_enqueue(
547 extern void thread_hold(
550 extern void thread_release(
554 #define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, 0)
555 #define thread_lock(th) simple_lock(&(th)->sched_lock)
556 #define thread_unlock(th) simple_unlock(&(th)->sched_lock)
558 #define wake_lock_init(th) simple_lock_init(&(th)->wake_lock, 0)
559 #define wake_lock(th) simple_lock(&(th)->wake_lock)
560 #define wake_unlock(th) simple_unlock(&(th)->wake_lock)
562 #define thread_should_halt_fast(thread) (!(thread)->active)
564 extern void stack_alloc(
567 extern void stack_handoff(
571 extern void stack_free(
574 extern void stack_free_reserved(
577 extern boolean_t
stack_alloc_try(
580 extern void stack_collect(void);
582 extern void stack_init(void);
585 extern kern_return_t
thread_info_internal(
587 thread_flavor_t flavor
,
588 thread_info_t thread_info_out
,
589 mach_msg_type_number_t
*thread_info_count
);
591 extern void thread_task_priority(
594 integer_t max_priority
);
596 extern kern_return_t
thread_set_mode_and_absolute_pri(
601 extern void thread_policy_reset(
604 extern kern_return_t
kernel_thread_create(
605 thread_continue_t continuation
,
608 thread_t
*new_thread
);
610 extern kern_return_t
kernel_thread_start_priority(
611 thread_continue_t continuation
,
614 thread_t
*new_thread
);
616 extern void machine_stack_attach(
620 extern vm_offset_t
machine_stack_detach(
623 extern void machine_stack_handoff(
627 extern thread_t
machine_switch_context(
629 thread_continue_t continuation
,
630 thread_t new_thread
);
632 extern void machine_load_context(
635 extern kern_return_t
machine_thread_state_initialize(
638 extern kern_return_t
machine_thread_set_state(
640 thread_flavor_t flavor
,
641 thread_state_t state
,
642 mach_msg_type_number_t count
);
644 extern kern_return_t
machine_thread_get_state(
646 thread_flavor_t flavor
,
647 thread_state_t state
,
648 mach_msg_type_number_t
*count
);
650 extern kern_return_t
machine_thread_dup(
654 extern void machine_thread_init(void);
656 extern kern_return_t
machine_thread_create(
659 extern void machine_thread_switch_addrmode(
662 extern void machine_thread_destroy(
665 extern void machine_set_current_thread(
668 extern kern_return_t
machine_thread_get_kern_state(
670 thread_flavor_t flavor
,
671 thread_state_t tstate
,
672 mach_msg_type_number_t
*count
);
674 extern kern_return_t
machine_thread_inherit_taskwide(
678 extern kern_return_t
machine_thread_set_tsd_base(
680 mach_vm_offset_t tsd_base
);
682 typedef struct ReturnHandler ReturnHandler
;
684 #define thread_mtx_lock(thread) lck_mtx_lock(&(thread)->mutex)
685 #define thread_mtx_try(thread) lck_mtx_try_lock(&(thread)->mutex)
686 #define thread_mtx_unlock(thread) lck_mtx_unlock(&(thread)->mutex)
688 extern void act_execute_returnhandlers(void);
690 extern void install_special_handler(
693 extern void special_handler(
698 thread_update_qos_cpu_time(
700 boolean_t lock_needed
);
702 void act_machine_sv_free(thread_t
, int);
704 vm_offset_t
min_valid_stack_address(void);
705 vm_offset_t
max_valid_stack_address(void);
707 static inline uint16_t thread_set_tag_internal(thread_t thread
, uint16_t tag
) {
708 return __sync_fetch_and_or(&thread
->thread_tag
, tag
);
711 static inline uint16_t thread_get_tag_internal(thread_t thread
) {
712 return thread
->thread_tag
;
716 int qos_pri
[THREAD_QOS_LAST
];
717 int qos_iotier
[THREAD_QOS_LAST
];
718 uint32_t qos_through_qos
[THREAD_QOS_LAST
];
719 uint32_t qos_latency_qos
[THREAD_QOS_LAST
];
720 } qos_policy_params_t
;
722 #else /* MACH_KERNEL_PRIVATE */
726 extern thread_t
current_thread(void);
728 extern void thread_reference(
731 extern void thread_deallocate(
736 #endif /* MACH_KERNEL_PRIVATE */
738 #ifdef KERNEL_PRIVATE
742 extern uint64_t thread_dispatchqaddr(
747 #endif /* KERNEL_PRIVATE */
752 extern uint64_t thread_tid(thread_t thread
);
760 #ifdef XNU_KERNEL_PRIVATE
763 * Thread tags; for easy identification.
765 #define THREAD_TAG_MAINTHREAD 0x1
766 #define THREAD_TAG_CALLOUT 0x2
767 #define THREAD_TAG_IOWORKLOOP 0x4
769 uint16_t thread_set_tag(thread_t
, uint16_t);
770 uint16_t thread_get_tag(thread_t
);
773 extern kern_return_t
thread_state_initialize(
776 extern kern_return_t
thread_setstatus(
779 thread_state_t tstate
,
780 mach_msg_type_number_t count
);
782 extern kern_return_t
thread_getstatus(
785 thread_state_t tstate
,
786 mach_msg_type_number_t
*count
);
788 extern kern_return_t
thread_create_workq(
790 thread_continue_t thread_return
,
791 thread_t
*new_thread
);
793 extern void thread_yield_internal(
794 mach_msg_timeout_t interval
);
797 * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
799 * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
800 * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
801 * 3) Disable. Remove any existing CPU limit.
803 #define THREAD_CPULIMIT_BLOCK 0x1
804 #define THREAD_CPULIMIT_EXCEPTION 0x2
805 #define THREAD_CPULIMIT_DISABLE 0x3
807 struct _thread_ledger_indices
{
811 extern struct _thread_ledger_indices thread_ledgers
;
813 extern int thread_get_cpulimit(int *action
, uint8_t *percentage
, uint64_t *interval_ns
);
814 extern int thread_set_cpulimit(int action
, uint8_t percentage
, uint64_t interval_ns
);
816 extern void thread_read_times(
818 time_value_t
*user_time
,
819 time_value_t
*system_time
);
821 extern uint64_t thread_get_runtime_self(void);
823 extern void thread_setuserstack(
825 mach_vm_offset_t user_stack
);
827 extern uint64_t thread_adjuserstack(
831 extern void thread_setentrypoint(
833 mach_vm_offset_t entry
);
835 extern kern_return_t
thread_set_tsd_base(
837 mach_vm_offset_t tsd_base
);
839 extern kern_return_t
thread_setsinglestep(
843 extern kern_return_t
thread_userstack(
851 extern kern_return_t
thread_entrypoint(
858 extern kern_return_t
thread_userstackdefault(
862 extern kern_return_t
thread_wire_internal(
863 host_priv_t host_priv
,
866 boolean_t
*prev_state
);
869 extern kern_return_t
thread_dup(thread_t
);
871 typedef void (*sched_call_t
)(
875 #define SCHED_CALL_BLOCK 0x1
876 #define SCHED_CALL_UNBLOCK 0x2
878 extern void thread_sched_call(
882 extern void thread_static_param(
886 extern boolean_t
thread_is_static_param(
889 extern kern_return_t
thread_policy_set_internal(
891 thread_policy_flavor_t flavor
,
892 thread_policy_t policy_info
,
893 mach_msg_type_number_t count
);
895 extern boolean_t
thread_has_qos_policy(thread_t thread
);
897 extern kern_return_t
thread_remove_qos_policy(thread_t thread
);
899 extern task_t
get_threadtask(thread_t
);
900 #define thread_is_64bit(thd) \
901 task_has_64BitAddr(get_threadtask(thd))
904 extern void *get_bsdthread_info(thread_t
);
905 extern void set_bsdthread_info(thread_t
, void *);
906 extern void *uthread_alloc(task_t
, thread_t
, int);
907 extern void uthread_cleanup(task_t
, void *, void *);
908 extern void uthread_zone_free(void *);
909 extern void uthread_cred_free(void *);
911 extern boolean_t
thread_should_halt(
914 extern boolean_t
thread_should_abort(
917 extern int is_64signalregset(void);
919 void act_set_apc(thread_t
);
920 void act_set_kperf(thread_t
);
922 extern uint32_t dtrace_get_thread_predcache(thread_t
);
923 extern int64_t dtrace_get_thread_vtime(thread_t
);
924 extern int64_t dtrace_get_thread_tracing(thread_t
);
925 extern boolean_t
dtrace_get_thread_reentering(thread_t
);
926 extern vm_offset_t
dtrace_get_kernel_stack(thread_t
);
927 extern void dtrace_set_thread_predcache(thread_t
, uint32_t);
928 extern void dtrace_set_thread_vtime(thread_t
, int64_t);
929 extern void dtrace_set_thread_tracing(thread_t
, int64_t);
930 extern void dtrace_set_thread_reentering(thread_t
, boolean_t
);
931 extern vm_offset_t
dtrace_set_thread_recover(thread_t
, vm_offset_t
);
932 extern void dtrace_thread_bootstrap(void);
933 extern void dtrace_thread_didexec(thread_t
);
935 extern int64_t dtrace_calc_thread_recent_vtime(thread_t
);
938 extern kern_return_t
thread_set_wq_state32(
940 thread_state_t tstate
);
942 extern kern_return_t
thread_set_wq_state64(
944 thread_state_t tstate
);
946 extern vm_offset_t kernel_stack_mask
;
947 extern vm_offset_t kernel_stack_size
;
948 extern vm_offset_t kernel_stack_depth_max
;
950 void guard_ast(thread_t thread
);
951 extern void fd_guard_ast(thread_t thread
);
952 extern void mach_port_guard_ast(thread_t thread
);
953 extern void thread_guard_violation(thread_t thread
, unsigned type
);
954 extern void thread_update_io_stats(thread_t thread
, int size
, int io_flags
);
956 extern kern_return_t
thread_set_voucher_name(mach_port_name_t name
);
957 extern kern_return_t
thread_get_current_voucher_origin_pid(int32_t *pid
);
959 #endif /* XNU_KERNEL_PRIVATE */
962 /*! @function kernel_thread_start
963 @abstract Create a kernel thread.
964 @discussion This function takes three input parameters, namely reference to the function that the thread should execute, caller specified data and a reference which is used to return the newly created kernel thread. The function returns KERN_SUCCESS on success or an appropriate kernel code type indicating the error. It may be noted that the caller is responsible for explicitly releasing the reference to the created thread when no longer needed. This should be done by calling thread_deallocate(new_thread).
965 @param continuation A C-function pointer where the thread will begin execution.
966 @param parameter Caller specified data to be passed to the new thread.
967 @param new_thread Reference to the new thread is returned in this parameter.
968 @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
971 extern kern_return_t
kernel_thread_start(
972 thread_continue_t continuation
,
974 thread_t
*new_thread
);
975 #ifdef KERNEL_PRIVATE
976 void thread_set_eager_preempt(thread_t thread
);
977 void thread_clear_eager_preempt(thread_t thread
);
978 extern ipc_port_t
convert_thread_to_port(thread_t
);
979 extern boolean_t
set_vm_privilege(boolean_t
);
980 #endif /* KERNEL_PRIVATE */
984 #endif /* _KERN_THREAD_H_ */