2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #ifndef _KERN_KERN_TYPES_H_
33 #define _KERN_KERN_TYPES_H_
36 #include <mach/mach_types.h>
37 #include <mach/machine/vm_types.h>
41 #ifndef MACH_KERNEL_PRIVATE
46 struct wait_queue
{ unsigned int opaque
[2]; uintptr_t opaquep
[2]; } ;
48 struct wait_queue
{ unsigned char opaque
[32]; };
51 #endif /* MACH_KERNEL_PRIVATE */
53 typedef struct zone
*zone_t
;
54 #define ZONE_NULL ((zone_t) 0)
56 typedef struct wait_queue
*wait_queue_t
;
57 #define WAIT_QUEUE_NULL ((wait_queue_t) 0)
58 #define SIZEOF_WAITQUEUE sizeof(struct wait_queue)
60 typedef vm_offset_t ipc_kobject_t
;
61 #define IKO_NULL ((ipc_kobject_t) 0)
63 #endif /* KERNEL_PRIVATE */
65 typedef void *event_t
; /* wait event */
66 #define NO_EVENT ((event_t) 0)
68 typedef uint64_t event64_t
; /* 64 bit wait event */
69 #define NO_EVENT64 ((event64_t) 0)
70 #define CAST_EVENT64_T(a_ptr) ((event64_t)((uintptr_t)(a_ptr)))
73 * Possible wait_result_t values.
75 typedef int wait_result_t
;
76 #define THREAD_WAITING -1 /* thread is waiting */
77 #define THREAD_AWAKENED 0 /* normal wakeup */
78 #define THREAD_TIMED_OUT 1 /* timeout expired */
79 #define THREAD_INTERRUPTED 2 /* aborted/interrupted */
80 #define THREAD_RESTART 3 /* restart operation entirely */
81 #define THREAD_NOT_WAITING 10 /* thread didn't need to wait */
83 typedef void (*thread_continue_t
)(void *, wait_result_t
);
84 #define THREAD_CONTINUE_NULL ((thread_continue_t) 0)
87 * Interruptible flag for waits.
89 * THREAD_UNINT: Uninterruptible wait
90 * Wait will only end when someone explicitly wakes up the thread, or if the
91 * wait timeout expires.
93 * Use this state if the system as a whole cannot recover from a thread being
94 * interrupted out of the wait.
96 * THREAD_INTERRUPTIBLE:
97 * Wait will end if someone explicitly wakes up the thread, the wait timeout
98 * expires, or the current thread is being terminated.
100 * This value can be used when your operation may not be cleanly restartable
101 * for the current process or thread (i.e. the loss of state would be only visible
102 * to the current client). Since the thread is exiting anyways, you're willing
103 * to cut the operation short. The system as a whole must be able to cleanly
104 * deal with the interruption (i.e. remain in a consistent and recoverable state).
107 * Wait will end if someone explicitly wakes up the thread, the wait timeout
108 * expires, the current thread is being terminated, if any signal arrives for
109 * the task, or thread_abort_safely() is called on the thread.
111 * Using this value means that you are willing to be interrupted in the face
112 * of any user signal, and safely rewind the thread back to the user/kernel
113 * boundary. Many syscalls will try to restart the operation they were performing
114 * after the signal has been handled.
116 * You must provide this value for any unbounded wait - otherwise you will
117 * pend user signals forever.
119 * Thread interrupt mask:
121 * The current maximum interruptible state for the thread, as set by
122 * thread_interrupt_level(), will limit the conditions that will cause a wake.
123 * This is useful for code that can't be interrupted to set before calling code
124 * that doesn't know that.
126 * Thread termination vs safe abort:
128 * Termination abort: thread_abort(), thread_terminate()
130 * A termination abort is sticky. Once a thread is marked for termination, every
131 * THREAD_INTERRUPTIBLE wait will return immediately with THREAD_INTERRUPTED
132 * until the thread successfully exits.
134 * Safe abort: thread_abort_safely()
136 * A safe abort is not sticky. The current wait, (or the next wait if the thread
137 * is not currently waiting) will be interrupted, but then the abort condition is cleared.
138 * The next wait will sleep as normal. Safe aborts only have a single effect.
140 * The path back to the user/kernel boundary must not make any further unbounded
141 * wait calls. The waiter should detect the THREAD_INTERRUPTED return code
142 * from an ABORTSAFE wait and return an error code that causes its caller
143 * to understand that the current operation has been interrupted, and its
144 * caller should return a similar error code, and so on until the
145 * user/kernel boundary is reached. For Mach, the error code is usually KERN_ABORTED,
146 * for BSD it is EINTR.
148 * Debuggers rely on the safe abort mechanism - a signaled thread must return to
149 * the AST at the user/kernel boundary for the debugger to finish attaching.
151 * No wait/block will ever disappear a thread out from under the waiter. The block
152 * call will always either return or call the passed in continuation.
154 typedef int wait_interrupt_t
;
155 #define THREAD_UNINT 0 /* not interruptible */
156 #define THREAD_INTERRUPTIBLE 1 /* may not be restartable */
157 #define THREAD_ABORTSAFE 2 /* abortable safely */
159 typedef int wait_timeout_urgency_t
;
160 #define TIMEOUT_URGENCY_SYS_NORMAL 0x00 /* use default leeway thresholds for system */
161 #define TIMEOUT_URGENCY_SYS_CRITICAL 0x01 /* use critical leeway thresholds for system */
162 #define TIMEOUT_URGENCY_SYS_BACKGROUND 0x02 /* use background leeway thresholds for system */
164 #define TIMEOUT_URGENCY_USER_MASK 0x10 /* mask to identify user timeout urgency classes */
165 #define TIMEOUT_URGENCY_USER_NORMAL 0x10 /* use default leeway thresholds for user */
166 #define TIMEOUT_URGENCY_USER_CRITICAL 0x11 /* use critical leeway thresholds for user */
167 #define TIMEOUT_URGENCY_USER_BACKGROUND 0x12 /* use background leeway thresholds for user */
169 #define TIMEOUT_URGENCY_MASK 0x13 /* mask to identify timeout urgency */
171 #define TIMEOUT_URGENCY_LEEWAY 0x20 /* don't ignore provided leeway value */
173 #define TIMEOUT_URGENCY_FIRST_AVAIL 0x40 /* first available bit outside of urgency mask/leeway */
174 #define TIMEOUT_URGENCY_RATELIMITED 0x80
177 * Timeout and deadline tokens for waits.
178 * The following tokens define common values for leeway and deadline parameters.
180 #define TIMEOUT_NO_LEEWAY (0ULL)
181 #define TIMEOUT_WAIT_FOREVER (0ULL)
183 #ifdef KERNEL_PRIVATE
186 * n.b. this is defined in thread_call.h, but in the TIMEOUT_URGENCY flags space:
187 * #define THREAD_CALL_CONTINUOUS 0x100
190 #ifdef MACH_KERNEL_PRIVATE
192 #include <kern/misc_protos.h>
193 typedef struct clock
*clock_t;
195 typedef struct mig_object
*mig_object_t
;
196 #define MIG_OBJECT_NULL ((mig_object_t) 0)
198 typedef struct mig_notify
*mig_notify_t
;
199 #define MIG_NOTIFY_NULL ((mig_notify_t) 0)
201 typedef struct pset_node
*pset_node_t
;
202 #define PSET_NODE_NULL ((pset_node_t) 0)
204 typedef struct affinity_set
*affinity_set_t
;
205 #define AFFINITY_SET_NULL ((affinity_set_t) 0)
207 typedef struct run_queue
*run_queue_t
;
208 #define RUN_QUEUE_NULL ((run_queue_t) 0)
210 typedef struct grrr_run_queue
*grrr_run_queue_t
;
211 #define GRRR_RUN_QUEUE_NULL ((grrr_run_queue_t) 0)
213 typedef struct grrr_group
*grrr_group_t
;
214 #define GRRR_GROUP_NULL ((grrr_group_t) 0)
216 #if defined(CONFIG_SCHED_MULTIQ)
217 typedef struct sched_group
*sched_group_t
;
218 #define SCHED_GROUP_NULL ((sched_group_t) 0)
219 #endif /* defined(CONFIG_SCHED_MULTIQ) */
221 #else /* MACH_KERNEL_PRIVATE */
223 struct wait_queue_set
;
224 struct _wait_queue_link
;
226 #endif /* MACH_KERNEL_PRIVATE */
228 typedef struct wait_queue_set
*wait_queue_set_t
;
229 #define WAIT_QUEUE_SET_NULL ((wait_queue_set_t)0)
230 #define SIZEOF_WAITQUEUE_SET wait_queue_set_size()
232 typedef struct _wait_queue_link
*wait_queue_link_t
;
233 #define WAIT_QUEUE_LINK_NULL ((wait_queue_link_t)0)
234 #define SIZEOF_WAITQUEUE_LINK wait_queue_link_size()
236 typedef struct perfcontrol_state
*perfcontrol_state_t
;
237 #define PERFCONTROL_STATE_NULL ((perfcontrol_state_t)0)
240 * Enum to define the event which caused the CLPC callout
242 typedef enum perfcontrol_event
{
244 * Thread State Update Events
245 * Used to indicate events that update properties for
246 * a given thread. These events are passed as part of the
247 * sched_perfcontrol_state_update_t callout
250 THREAD_GROUP_UPDATE
= 2,
251 PERFCONTROL_ATTR_UPDATE
= 3,
253 * Context Switch Events
254 * Used to indicate events that switch from one thread
255 * to the other. These events are passed as part of the
256 * sched_perfcontrol_csw_t callout.
263 * Flags for the sched_perfcontrol_csw_t & sched_perfcontrol_state_update_t
265 * Currently defined flags are:
266 * PERFCONTROL_CALLOUT_WAKE_UNSAFE - Flag to indicate its unsafe to
267 * do a wakeup as part of this callout. If this is set, it
268 * indicates that the scheduler holds a spinlock which might be needed
269 * in the wakeup path. In that case CLPC should do a thread_call
270 * instead of a direct wakeup to run their workloop thread.
272 #define PERFCONTROL_CALLOUT_WAKE_UNSAFE 0x1
275 * Enum to define the perfcontrol class for thread.
276 * thread_get_perfcontrol_class() takes the thread's
277 * priority, QoS, urgency etc. into consideration and
278 * produces a value in this enum.
280 typedef enum perfcontrol_class
{
282 PERFCONTROL_CLASS_IDLE
= 1,
284 PERFCONTROL_CLASS_KERNEL
= 2,
285 /* Realtime Thread */
286 PERFCONTROL_CLASS_REALTIME
= 3,
287 /* Background Thread */
288 PERFCONTROL_CLASS_BACKGROUND
= 4,
290 PERFCONTROL_CLASS_UTILITY
= 5,
291 /* Non-UI Thread (Default/Legacy) */
292 PERFCONTROL_CLASS_NONUI
= 6,
293 /* UI Thread (UI/IN) */
294 PERFCONTROL_CLASS_UI
= 7,
295 /* Above UI Thread */
296 PERFCONTROL_CLASS_ABOVEUI
= 8,
297 } perfcontrol_class_t
;
299 #endif /* KERNEL_PRIVATE */
301 #endif /* _KERN_KERN_TYPES_H_ */