]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.h
89ac1937bf42283f6621455303729a165442f968
[apple/xnu.git] / osfmk / kern / thread.h
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: thread.h
60 * Author: Avadis Tevanian, Jr.
61 *
62 * This file contains the structure definitions for threads.
63 *
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84 #ifndef _KERN_THREAD_H_
85 #define _KERN_THREAD_H_
86
87 #include <mach/kern_return.h>
88 #include <mach/mach_types.h>
89 #include <mach/message.h>
90 #include <mach/boolean.h>
91 #include <mach/vm_param.h>
92 #include <mach/thread_info.h>
93 #include <mach/thread_status.h>
94 #include <mach/exception_types.h>
95
96 #include <kern/kern_types.h>
97
98 #include <sys/cdefs.h>
99
100 #ifdef MACH_KERNEL_PRIVATE
101
102 #include <cputypes.h>
103
104 #include <mach_assert.h>
105 #include <mach_ldebug.h>
106
107 #include <ipc/ipc_types.h>
108
109 #include <mach/port.h>
110 #include <kern/cpu_number.h>
111 #include <kern/queue.h>
112 #include <kern/timer.h>
113 #include <kern/lock.h>
114 #include <kern/locks.h>
115 #include <kern/sched.h>
116 #include <kern/sched_prim.h>
117 #include <kern/thread_call.h>
118 #include <kern/timer_call.h>
119 #include <kern/task.h>
120 #include <kern/exception.h>
121 #include <kern/affinity.h>
122
123 #include <ipc/ipc_kmsg.h>
124
125 #include <machine/cpu_data.h>
126 #include <machine/thread.h>
127
128 struct thread {
129 /*
130 * NOTE: The runq field in the thread structure has an unusual
131 * locking protocol. If its value is PROCESSOR_NULL, then it is
132 * locked by the thread_lock, but if its value is something else
133 * then it is locked by the associated run queue lock.
134 *
135 * When the thread is on a wait queue, these first three fields
136 * are treated as an unofficial union with a wait_queue_element.
137 * If you change these, you must change that definition as well
138 * (kern/wait_queue.h).
139 */
140 /* Items examined often, modified infrequently */
141 queue_chain_t links; /* run/wait queue links */
142 processor_t runq; /* run queue assignment */
143 wait_queue_t wait_queue; /* wait queue we are currently on */
144 event64_t wait_event; /* wait queue event */
145 integer_t options; /* options set by thread itself */
146 #define TH_OPT_INTMASK 0x03 /* interrupt / abort level */
147 #define TH_OPT_VMPRIV 0x04 /* may allocate reserved memory */
148 #define TH_OPT_DTRACE 0x08 /* executing under dtrace_probe */
149 #define TH_OPT_SYSTEM_CRITICAL 0x10 /* Thread must always be allowed to run - even under heavy load */
150 #define TH_OPT_PROC_CPULIMIT 0x20 /* Thread has a task-wide CPU limit applied to it */
151 #define TH_OPT_PRVT_CPULIMIT 0x40 /* Thread has a thread-private CPU limit applied to it */
152 #define TH_OPT_IDLE_THREAD 0x0080 /* Thread is a per-processor idle thread */
153
154 /* Data updated during assert_wait/thread_wakeup */
155 decl_simple_lock_data(,sched_lock) /* scheduling lock (thread_lock()) */
156 decl_simple_lock_data(,wake_lock) /* for thread stop / wait (wake_lock()) */
157 boolean_t wake_active; /* wake event on stop */
158 int at_safe_point; /* thread_abort_safely allowed */
159 ast_t reason; /* why we blocked */
160 wait_result_t wait_result; /* outcome of wait -
161 * may be examined by this thread
162 * WITHOUT locking */
163 thread_continue_t continuation; /* continue here next dispatch */
164 void *parameter; /* continuation parameter */
165
166 /* Data updated/used in thread_invoke */
167 struct funnel_lock *funnel_lock; /* Non-reentrancy funnel */
168 int funnel_state;
169 #define TH_FN_OWNED 0x1 /* we own the funnel */
170 #define TH_FN_REFUNNEL 0x2 /* re-acquire funnel on dispatch */
171
172 vm_offset_t kernel_stack; /* current kernel stack */
173 vm_offset_t reserved_stack; /* reserved kernel stack */
174
175 /* Thread state: */
176 int state;
177 /*
178 * Thread states [bits or'ed]
179 */
180 #define TH_WAIT 0x01 /* queued for waiting */
181 #define TH_SUSP 0x02 /* stopped or requested to stop */
182 #define TH_RUN 0x04 /* running or on runq */
183 #define TH_UNINT 0x08 /* waiting uninteruptibly */
184 #define TH_TERMINATE 0x10 /* halted at termination */
185 #define TH_TERMINATE2 0x20 /* added to termination queue */
186
187 #define TH_IDLE 0x80 /* idling processor */
188
189 /* Scheduling information */
190 sched_mode_t sched_mode; /* scheduling mode */
191 sched_mode_t saved_mode; /* saved mode during forced mode demotion */
192
193 unsigned int sched_flags; /* current flag bits */
194 #define TH_SFLAG_FAIRSHARE_TRIPPED 0x0001 /* fairshare scheduling activated */
195 #define TH_SFLAG_FAILSAFE 0x0002 /* fail-safe has tripped */
196 #define TH_SFLAG_THROTTLED 0x0004 /* owner task in throttled state */
197 #define TH_SFLAG_DEMOTED_MASK (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE | TH_SFLAG_FAIRSHARE_TRIPPED)
198
199 #define TH_SFLAG_PROMOTED 0x0008 /* sched pri has been promoted */
200 #define TH_SFLAG_ABORT 0x0010 /* abort interruptible waits */
201 #define TH_SFLAG_ABORTSAFELY 0x0020 /* ... but only those at safe point */
202 #define TH_SFLAG_ABORTED_MASK (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
203 #define TH_SFLAG_DEPRESS 0x0040 /* normal depress yield */
204 #define TH_SFLAG_POLLDEPRESS 0x0080 /* polled depress yield */
205 #define TH_SFLAG_DEPRESSED_MASK (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
206 #define TH_SFLAG_PRI_UPDATE 0x0100 /* Updating priority */
207 #define TH_SFLAG_EAGERPREEMPT 0x0200 /* Any preemption of this thread should be treated as if AST_URGENT applied */
208
209 /*
210 * A thread can either be completely unthrottled, about to be throttled,
211 * throttled (TH_SFLAG_THROTTLED), or about to be unthrottled
212 */
213 #define TH_SFLAG_PENDING_THROTTLE_DEMOTION 0x1000 /* Pending sched_mode demotion */
214 #define TH_SFLAG_PENDING_THROTTLE_PROMOTION 0x2000 /* Pending sched_mode promition */
215 #define TH_SFLAG_PENDING_THROTTLE_MASK (TH_SFLAG_PENDING_THROTTLE_DEMOTION | TH_SFLAG_PENDING_THROTTLE_PROMOTION)
216
217 integer_t sched_pri; /* scheduled (current) priority */
218 integer_t priority; /* base priority */
219 integer_t max_priority; /* max base priority */
220 integer_t task_priority; /* copy of task base priority */
221
222 #if defined(CONFIG_SCHED_GRRR)
223 #if 0
224 uint16_t grrr_deficit; /* fixed point (1/1000th quantum) fractional deficit */
225 #endif
226 #endif
227
228 integer_t promotions; /* level of promotion */
229 integer_t pending_promoter_index;
230 void *pending_promoter[2];
231
232 integer_t importance; /* task-relative importance */
233
234 /* real-time parameters */
235 struct { /* see mach/thread_policy.h */
236 uint32_t period;
237 uint32_t computation;
238 uint32_t constraint;
239 boolean_t preemptible;
240
241 uint64_t deadline;
242 } realtime;
243
244 uint32_t was_promoted_on_wakeup;
245 uint32_t current_quantum; /* duration of current quantum */
246 uint64_t last_run_time; /* time when thread was switched away from */
247 uint64_t last_quantum_refill_time; /* time when current_quantum was refilled after expiration */
248
249 /* Data used during setrun/dispatch */
250 timer_data_t system_timer; /* system mode timer */
251 processor_t bound_processor; /* bound to a processor? */
252 processor_t last_processor; /* processor last dispatched on */
253 processor_t chosen_processor; /* Where we want to run this thread */
254
255 /* Fail-safe computation since last unblock or qualifying yield */
256 uint64_t computation_metered;
257 uint64_t computation_epoch;
258 uint64_t safe_release; /* when to release fail-safe */
259
260 /* Call out from scheduler */
261 void (*sched_call)(
262 int type,
263 thread_t thread);
264 #if defined(CONFIG_SCHED_PROTO)
265 uint32_t runqueue_generation; /* last time runqueue was drained */
266 #endif
267
268 /* Statistics and timesharing calculations */
269 #if defined(CONFIG_SCHED_TRADITIONAL)
270 natural_t sched_stamp; /* last scheduler tick */
271 natural_t sched_usage; /* timesharing cpu usage [sched] */
272 natural_t pri_shift; /* usage -> priority from pset */
273 natural_t cpu_usage; /* instrumented cpu usage [%cpu] */
274 natural_t cpu_delta; /* accumulated cpu_usage delta */
275 #endif
276 uint32_t c_switch; /* total context switches */
277 uint32_t p_switch; /* total processor switches */
278 uint32_t ps_switch; /* total pset switches */
279
280 /* Timing data structures */
281 int precise_user_kernel_time; /* precise user/kernel enabled for this thread */
282 timer_data_t user_timer; /* user mode timer */
283 uint64_t user_timer_save; /* saved user timer value */
284 uint64_t system_timer_save; /* saved system timer value */
285 uint64_t vtimer_user_save; /* saved values for vtimers */
286 uint64_t vtimer_prof_save;
287 uint64_t vtimer_rlim_save;
288
289 /* Timed wait expiration */
290 timer_call_data_t wait_timer;
291 integer_t wait_timer_active;
292 boolean_t wait_timer_is_set;
293
294 /* Priority depression expiration */
295 timer_call_data_t depress_timer;
296 integer_t depress_timer_active;
297
298 /*
299 * Processor/cache affinity
300 * - affinity_threads links task threads with the same affinity set
301 */
302 affinity_set_t affinity_set;
303 queue_chain_t affinity_threads;
304
305 /* Various bits of stashed state */
306 union {
307 struct {
308 mach_msg_return_t state; /* receive state */
309 ipc_object_t object; /* object received on */
310 mach_vm_address_t msg_addr; /* receive buffer pointer */
311 mach_msg_size_t msize; /* max size for recvd msg */
312 mach_msg_option_t option; /* options for receive */
313 mach_msg_size_t slist_size; /* scatter list size */
314 mach_port_name_t receiver_name; /* the receive port name */
315 struct ipc_kmsg *kmsg; /* received message */
316 mach_port_seqno_t seqno; /* seqno of recvd message */
317 mach_msg_continue_t continuation;
318 } receive;
319 struct {
320 struct semaphore *waitsemaphore; /* semaphore ref */
321 struct semaphore *signalsemaphore; /* semaphore ref */
322 int options; /* semaphore options */
323 kern_return_t result; /* primary result */
324 mach_msg_continue_t continuation;
325 } sema;
326 struct {
327 int option; /* switch option */
328 } swtch;
329 int misc; /* catch-all for other state */
330 } saved;
331
332 /* IPC data structures */
333 struct ipc_kmsg_queue ith_messages;
334 mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */
335
336 /* Ast/Halt data structures */
337 vm_offset_t recover; /* page fault recover(copyin/out) */
338 uint32_t ref_count; /* number of references to me */
339
340 queue_chain_t threads; /* global list of all threads */
341
342 /* Activation */
343 queue_chain_t task_threads;
344
345 /*** Machine-dependent state ***/
346 struct machine_thread machine;
347
348 /* Task membership */
349 struct task *task;
350 vm_map_t map;
351
352 decl_lck_mtx_data(,mutex)
353
354 /* Kernel holds on this thread */
355 int suspend_count;
356
357 /* User level suspensions */
358 int user_stop_count;
359
360 /* Pending thread ast(s) */
361 ast_t ast;
362
363 /* Miscellaneous bits guarded by mutex */
364 uint32_t
365 active:1, /* Thread is active and has not been terminated */
366 started:1, /* Thread has been started after creation */
367 static_param:1, /* Disallow policy parameter changes */
368 :0;
369
370 /* Return Handers */
371 struct ReturnHandler {
372 struct ReturnHandler *next;
373 void (*handler)(
374 struct ReturnHandler *rh,
375 struct thread *thread);
376 } *handlers, special_handler;
377
378 /* Ports associated with this thread */
379 struct ipc_port *ith_self; /* not a right, doesn't hold ref */
380 struct ipc_port *ith_sself; /* a send right */
381 struct exception_action exc_actions[EXC_TYPES_COUNT];
382
383 /* Owned ulocks (a lock set element) */
384 queue_head_t held_ulocks;
385
386 #ifdef MACH_BSD
387 void *uthread;
388 #endif
389
390 #if CONFIG_DTRACE
391 uint32_t t_dtrace_predcache;/* DTrace per thread predicate value hint */
392 int64_t t_dtrace_tracing; /* Thread time under dtrace_probe() */
393 int64_t t_dtrace_vtime;
394 #endif
395
396 uint32_t t_page_creation_count;
397 clock_sec_t t_page_creation_time;
398
399 #define T_CHUD_MARKED 0x01 /* this thread is marked by CHUD */
400 #define T_IN_CHUD 0x02 /* this thread is already in a CHUD handler */
401 #define THREAD_PMC_FLAG 0x04 /* Bit in "t_chud" signifying PMC interest */
402 #define T_AST_CALLSTACK 0x08 /* Thread scheduled to dump a
403 * callstack on its next
404 * AST */
405 #define T_AST_NAME 0x10 /* Thread scheduled to dump
406 * its name on its next
407 * AST */
408 #define T_NAME_DONE 0x20 /* Thread has previously
409 * recorded its name */
410
411 uint32_t t_chud; /* CHUD flags, used for Shark */
412 uint32_t chud_c_switch; /* last dispatch detection */
413
414 integer_t mutex_count; /* total count of locks held */
415
416 uint64_t thread_id; /*system wide unique thread-id*/
417
418 /* Statistics accumulated per-thread and aggregated per-task */
419 uint32_t syscalls_unix;
420 uint32_t syscalls_mach;
421 ledger_t t_ledger;
422 ledger_t t_threadledger; /* per thread ledger */
423 struct process_policy ext_appliedstate; /* externally applied actions */
424 struct process_policy ext_policystate; /* externally defined process policy states*/
425 struct process_policy appliedstate; /* self applied acions */
426 struct process_policy policystate; /* process wide policy states */
427 #if CONFIG_EMBEDDED
428 task_watch_t * taskwatch; /* task watch */
429 integer_t saved_importance; /* saved task-relative importance */
430 #endif /* CONFIG_EMBEDDED */
431 uint32_t thread_callout_interrupt_wakeups;
432 uint32_t thread_callout_platform_idle_wakeups;
433 uint32_t thread_timer_wakeups_bin_1;
434 uint32_t thread_timer_wakeups_bin_2;
435 uint16_t thread_tag;
436 uint16_t callout_woken_from_icontext:1,
437 callout_woken_from_platform_idle:1,
438 thread_bitfield_unused:14;
439
440 };
441
442 #define ith_state saved.receive.state
443 #define ith_object saved.receive.object
444 #define ith_msg_addr saved.receive.msg_addr
445 #define ith_msize saved.receive.msize
446 #define ith_option saved.receive.option
447 #define ith_scatter_list_size saved.receive.slist_size
448 #define ith_receiver_name saved.receive.receiver_name
449 #define ith_continuation saved.receive.continuation
450 #define ith_kmsg saved.receive.kmsg
451 #define ith_seqno saved.receive.seqno
452
453 #define sth_waitsemaphore saved.sema.waitsemaphore
454 #define sth_signalsemaphore saved.sema.signalsemaphore
455 #define sth_options saved.sema.options
456 #define sth_result saved.sema.result
457 #define sth_continuation saved.sema.continuation
458
459 extern void thread_bootstrap(void) __attribute__((section("__TEXT, initcode")));
460
461 extern void thread_init(void) __attribute__((section("__TEXT, initcode")));
462
463 extern void thread_daemon_init(void);
464
465 #define thread_reference_internal(thread) \
466 (void)hw_atomic_add(&(thread)->ref_count, 1)
467
468 #define thread_deallocate_internal(thread) \
469 hw_atomic_sub(&(thread)->ref_count, 1)
470
471 #define thread_reference(thread) \
472 MACRO_BEGIN \
473 if ((thread) != THREAD_NULL) \
474 thread_reference_internal(thread); \
475 MACRO_END
476
477 extern void thread_deallocate(
478 thread_t thread);
479
480 extern void thread_terminate_self(void);
481
482 extern kern_return_t thread_terminate_internal(
483 thread_t thread);
484
485 extern void thread_start_internal(
486 thread_t thread) __attribute__ ((noinline));
487
488 extern void thread_terminate_enqueue(
489 thread_t thread);
490
491 extern void thread_stack_enqueue(
492 thread_t thread);
493
494 extern void thread_hold(
495 thread_t thread);
496
497 extern void thread_release(
498 thread_t thread);
499
500
501 #define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, 0)
502 #define thread_lock(th) simple_lock(&(th)->sched_lock)
503 #define thread_unlock(th) simple_unlock(&(th)->sched_lock)
504
505 #define wake_lock_init(th) simple_lock_init(&(th)->wake_lock, 0)
506 #define wake_lock(th) simple_lock(&(th)->wake_lock)
507 #define wake_unlock(th) simple_unlock(&(th)->wake_lock)
508
509 #define thread_should_halt_fast(thread) (!(thread)->active)
510
511 extern void stack_alloc(
512 thread_t thread);
513
514 extern void stack_handoff(
515 thread_t from,
516 thread_t to);
517
518 extern void stack_free(
519 thread_t thread);
520
521 extern void stack_free_reserved(
522 thread_t thread);
523
524 extern boolean_t stack_alloc_try(
525 thread_t thread);
526
527 extern void stack_collect(void);
528
529 extern void stack_init(void) __attribute__((section("__TEXT, initcode")));
530
531
532 extern kern_return_t thread_info_internal(
533 thread_t thread,
534 thread_flavor_t flavor,
535 thread_info_t thread_info_out,
536 mach_msg_type_number_t *thread_info_count);
537
538 extern void thread_task_priority(
539 thread_t thread,
540 integer_t priority,
541 integer_t max_priority);
542
543 extern void thread_policy_reset(
544 thread_t thread);
545
546 extern kern_return_t kernel_thread_create(
547 thread_continue_t continuation,
548 void *parameter,
549 integer_t priority,
550 thread_t *new_thread);
551
552 extern kern_return_t kernel_thread_start_priority(
553 thread_continue_t continuation,
554 void *parameter,
555 integer_t priority,
556 thread_t *new_thread);
557
558 extern void machine_stack_attach(
559 thread_t thread,
560 vm_offset_t stack);
561
562 extern vm_offset_t machine_stack_detach(
563 thread_t thread);
564
565 extern void machine_stack_handoff(
566 thread_t old,
567 thread_t new);
568
569 extern thread_t machine_switch_context(
570 thread_t old_thread,
571 thread_continue_t continuation,
572 thread_t new_thread);
573
574 extern void machine_load_context(
575 thread_t thread);
576
577 extern kern_return_t machine_thread_state_initialize(
578 thread_t thread);
579
580 extern kern_return_t machine_thread_set_state(
581 thread_t thread,
582 thread_flavor_t flavor,
583 thread_state_t state,
584 mach_msg_type_number_t count);
585
586 extern kern_return_t machine_thread_get_state(
587 thread_t thread,
588 thread_flavor_t flavor,
589 thread_state_t state,
590 mach_msg_type_number_t *count);
591
592 extern kern_return_t machine_thread_dup(
593 thread_t self,
594 thread_t target);
595
596 extern void machine_thread_init(void);
597
598 extern kern_return_t machine_thread_create(
599 thread_t thread,
600 task_t task);
601 extern void machine_thread_switch_addrmode(
602 thread_t thread);
603
604 extern void machine_thread_destroy(
605 thread_t thread);
606
607 extern void machine_set_current_thread(
608 thread_t thread);
609
610 extern kern_return_t machine_thread_get_kern_state(
611 thread_t thread,
612 thread_flavor_t flavor,
613 thread_state_t tstate,
614 mach_msg_type_number_t *count);
615
616 extern kern_return_t machine_thread_inherit_taskwide(
617 thread_t thread,
618 task_t parent_task);
619
620 /*
621 * XXX Funnel locks XXX
622 */
623
624 struct funnel_lock {
625 int fnl_type; /* funnel type */
626 lck_mtx_t *fnl_mutex; /* underlying mutex for the funnel */
627 void * fnl_mtxholder; /* thread (last)holdng mutex */
628 void * fnl_mtxrelease; /* thread (last)releasing mutex */
629 lck_mtx_t *fnl_oldmutex; /* Mutex before collapsing split funnel */
630 };
631
632 typedef struct ReturnHandler ReturnHandler;
633
634 #define thread_mtx_lock(thread) lck_mtx_lock(&(thread)->mutex)
635 #define thread_mtx_try(thread) lck_mtx_try_lock(&(thread)->mutex)
636 #define thread_mtx_unlock(thread) lck_mtx_unlock(&(thread)->mutex)
637
638 extern void act_execute_returnhandlers(void);
639
640 extern void install_special_handler(
641 thread_t thread);
642
643 extern void special_handler(
644 ReturnHandler *rh,
645 thread_t thread);
646
647 void act_machine_sv_free(thread_t, int);
648
649 vm_offset_t min_valid_stack_address(void);
650 vm_offset_t max_valid_stack_address(void);
651
652 extern void funnel_lock(
653 struct funnel_lock *lock);
654
655 extern void funnel_unlock(
656 struct funnel_lock *lock);
657
658 static inline uint16_t thread_set_tag_internal(thread_t thread, uint16_t tag) {
659 return __sync_fetch_and_or(&thread->thread_tag, tag);
660 }
661 static inline uint16_t thread_get_tag_internal(thread_t thread) {
662 return thread->thread_tag;
663 }
664
665 #else /* MACH_KERNEL_PRIVATE */
666
667 __BEGIN_DECLS
668
669 extern thread_t current_thread(void);
670
671 extern void thread_reference(
672 thread_t thread);
673
674 extern void thread_deallocate(
675 thread_t thread);
676
677 __END_DECLS
678
679 #endif /* MACH_KERNEL_PRIVATE */
680
681 #ifdef KERNEL_PRIVATE
682
683 __BEGIN_DECLS
684
685 #if defined(__i386__)
686
687 extern thread_t kernel_thread(
688 task_t task,
689 void (*start)(void));
690
691 #endif /* defined(__i386__) */
692
693 extern uint64_t thread_tid(
694 thread_t thread);
695
696 extern uint64_t thread_dispatchqaddr(
697 thread_t thread);
698
699 __END_DECLS
700
701 #endif /* KERNEL_PRIVATE */
702
703 __BEGIN_DECLS
704
705 #ifdef XNU_KERNEL_PRIVATE
706
707 /*
708 * Thread tags; for easy identification.
709 */
710 #define THREAD_TAG_MAINTHREAD 0x1
711 #define THREAD_TAG_CALLOUT 0x2
712 #define THREAD_TAG_IOWORKLOOP 0x4
713
714 uint16_t thread_set_tag(thread_t, uint16_t);
715 uint16_t thread_get_tag(thread_t);
716
717 extern kern_return_t thread_state_initialize(
718 thread_t thread);
719
720 extern kern_return_t thread_setstatus(
721 thread_t thread,
722 int flavor,
723 thread_state_t tstate,
724 mach_msg_type_number_t count);
725
726 extern kern_return_t thread_getstatus(
727 thread_t thread,
728 int flavor,
729 thread_state_t tstate,
730 mach_msg_type_number_t *count);
731
732 extern kern_return_t thread_create_workq(
733 task_t task,
734 thread_continue_t thread_return,
735 thread_t *new_thread);
736
737 extern void thread_yield_internal(
738 mach_msg_timeout_t interval);
739
740 /*
741 * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
742 *
743 * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
744 * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
745 */
746 #define THREAD_CPULIMIT_BLOCK 0x1
747 #define THREAD_CPULIMIT_EXCEPTION 0x2
748
749 struct _thread_ledger_indices {
750 int cpu_time;
751 };
752
753 extern struct _thread_ledger_indices thread_ledgers;
754
755 extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns);
756
757 typedef struct funnel_lock funnel_t;
758
759 #define THR_FUNNEL_NULL (funnel_t *)0
760
761 extern funnel_t *funnel_alloc(
762 int type);
763
764 extern void funnel_free(
765 funnel_t *lock);
766
767 extern funnel_t *thread_funnel_get(void);
768
769 extern boolean_t thread_funnel_set(
770 funnel_t *lock,
771 boolean_t funneled);
772
773 extern void thread_read_times(
774 thread_t thread,
775 time_value_t *user_time,
776 time_value_t *system_time);
777
778 extern void thread_setuserstack(
779 thread_t thread,
780 mach_vm_offset_t user_stack);
781
782 extern uint64_t thread_adjuserstack(
783 thread_t thread,
784 int adjust);
785
786 extern void thread_setentrypoint(
787 thread_t thread,
788 mach_vm_offset_t entry);
789
790 extern kern_return_t thread_setsinglestep(
791 thread_t thread,
792 int on);
793
794 extern kern_return_t thread_userstack(
795 thread_t,
796 int,
797 thread_state_t,
798 unsigned int,
799 mach_vm_offset_t *,
800 int *);
801
802 extern kern_return_t thread_entrypoint(
803 thread_t,
804 int,
805 thread_state_t,
806 unsigned int,
807 mach_vm_offset_t *);
808
809 extern kern_return_t thread_userstackdefault(
810 thread_t,
811 mach_vm_offset_t *);
812
813 extern kern_return_t thread_wire_internal(
814 host_priv_t host_priv,
815 thread_t thread,
816 boolean_t wired,
817 boolean_t *prev_state);
818
819 extern kern_return_t thread_dup(thread_t);
820
821 typedef void (*sched_call_t)(
822 int type,
823 thread_t thread);
824
825 #define SCHED_CALL_BLOCK 0x1
826 #define SCHED_CALL_UNBLOCK 0x2
827
828 extern void thread_sched_call(
829 thread_t thread,
830 sched_call_t call);
831
832 extern void thread_static_param(
833 thread_t thread,
834 boolean_t state);
835
836 extern kern_return_t thread_policy_set_internal(
837 thread_t thread,
838 thread_policy_flavor_t flavor,
839 thread_policy_t policy_info,
840 mach_msg_type_number_t count);
841
842
843 extern task_t get_threadtask(thread_t);
844 #define thread_is_64bit(thd) \
845 task_has_64BitAddr(get_threadtask(thd))
846
847
848 extern void *get_bsdthread_info(thread_t);
849 extern void set_bsdthread_info(thread_t, void *);
850 extern void *uthread_alloc(task_t, thread_t, int);
851 extern void uthread_cleanup(task_t, void *, void *);
852 extern void uthread_zone_free(void *);
853 extern void uthread_cred_free(void *);
854
855 extern boolean_t thread_should_halt(
856 thread_t thread);
857
858 extern boolean_t thread_should_abort(
859 thread_t);
860
861 extern int is_64signalregset(void);
862
863 void act_set_apc(thread_t);
864 void act_set_kperf(thread_t);
865
866 extern uint32_t dtrace_get_thread_predcache(thread_t);
867 extern int64_t dtrace_get_thread_vtime(thread_t);
868 extern int64_t dtrace_get_thread_tracing(thread_t);
869 extern boolean_t dtrace_get_thread_reentering(thread_t);
870 extern vm_offset_t dtrace_get_kernel_stack(thread_t);
871 extern void dtrace_set_thread_predcache(thread_t, uint32_t);
872 extern void dtrace_set_thread_vtime(thread_t, int64_t);
873 extern void dtrace_set_thread_tracing(thread_t, int64_t);
874 extern void dtrace_set_thread_reentering(thread_t, boolean_t);
875 extern vm_offset_t dtrace_set_thread_recover(thread_t, vm_offset_t);
876 extern void dtrace_thread_bootstrap(void);
877
878 extern int64_t dtrace_calc_thread_recent_vtime(thread_t);
879
880
881 extern void thread_set_wq_state32(
882 thread_t thread,
883 thread_state_t tstate);
884
885 extern void thread_set_wq_state64(
886 thread_t thread,
887 thread_state_t tstate);
888
889 extern vm_offset_t kernel_stack_mask;
890 extern vm_offset_t kernel_stack_size;
891 extern vm_offset_t kernel_stack_depth_max;
892
893 #endif /* XNU_KERNEL_PRIVATE */
894
895 /*! @function kernel_thread_start
896 @abstract Create a kernel thread.
897 @discussion This function takes three input parameters, namely reference to the function that the thread should execute, caller specified data and a reference which is used to return the newly created kernel thread. The function returns KERN_SUCCESS on success or an appropriate kernel code type indicating the error. It may be noted that the caller is responsible for explicitly releasing the reference to the created thread when no longer needed. This should be done by calling thread_deallocate(new_thread).
898 @param continuation A C-function pointer where the thread will begin execution.
899 @param parameter Caller specified data to be passed to the new thread.
900 @param new_thread Reference to the new thread is returned in this parameter.
901 @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
902 */
903
904 extern kern_return_t kernel_thread_start(
905 thread_continue_t continuation,
906 void *parameter,
907 thread_t *new_thread);
908 #ifdef KERNEL_PRIVATE
909 void thread_set_eager_preempt(thread_t thread);
910 void thread_clear_eager_preempt(thread_t thread);
911 extern ipc_port_t convert_thread_to_port(thread_t);
912 #endif /* KERNEL_PRIVATE */
913
914 __END_DECLS
915
916 #endif /* _KERN_THREAD_H_ */