]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kern_types.h
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / kern_types.h
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
0a7de745
A
32#ifndef _KERN_KERN_TYPES_H_
33#define _KERN_KERN_TYPES_H_
1c79356b 34
0b4e3aa0 35#include <stdint.h>
9bccf70c 36#include <mach/mach_types.h>
1c79356b 37#include <mach/machine/vm_types.h>
9bccf70c 38
0a7de745 39#ifdef KERNEL_PRIVATE
1c79356b 40
0a7de745 41#ifndef MACH_KERNEL_PRIVATE
9bccf70c 42
0a7de745 43struct zone;
9bccf70c 44
b0d623f7 45#ifndef __LP64__
0a7de745 46struct wait_queue { unsigned int opaque[2]; uintptr_t opaquep[2]; };
b0d623f7
A
47#else
48struct wait_queue { unsigned char opaque[32]; };
49#endif
9bccf70c 50
0a7de745 51#endif /* MACH_KERNEL_PRIVATE */
1c79356b 52
0a7de745
A
53typedef struct zone *zone_t;
54#define ZONE_NULL ((zone_t) 0)
9bccf70c 55
0a7de745
A
56typedef struct wait_queue *wait_queue_t;
57#define WAIT_QUEUE_NULL ((wait_queue_t) 0)
58#define SIZEOF_WAITQUEUE sizeof(struct wait_queue)
9bccf70c 59
f427ee49 60typedef void * ipc_kobject_t;
0a7de745 61#define IKO_NULL ((ipc_kobject_t) 0)
9bccf70c 62
0a7de745 63#endif /* KERNEL_PRIVATE */
91447636 64
0a7de745
A
65typedef void *event_t; /* wait event */
66#define NO_EVENT ((event_t) 0)
9bccf70c 67
0a7de745
A
68typedef uint64_t event64_t; /* 64 bit wait event */
69#define NO_EVENT64 ((event64_t) 0)
70#define CAST_EVENT64_T(a_ptr) ((event64_t)((uintptr_t)(a_ptr)))
9bccf70c
A
71
72/*
73 * Possible wait_result_t values.
74 */
75typedef int wait_result_t;
0a7de745
A
76#define THREAD_WAITING -1 /* thread is waiting */
77#define THREAD_AWAKENED 0 /* normal wakeup */
78#define THREAD_TIMED_OUT 1 /* timeout expired */
79#define THREAD_INTERRUPTED 2 /* aborted/interrupted */
80#define THREAD_RESTART 3 /* restart operation entirely */
b0d623f7 81#define THREAD_NOT_WAITING 10 /* thread didn't need to wait */
9bccf70c 82
0a7de745 83typedef void (*thread_continue_t)(void *, wait_result_t);
cb323159 84#define THREAD_CONTINUE_NULL ((thread_continue_t) NULL)
9bccf70c
A
85
86/*
87 * Interruptible flag for waits.
39236c6e
A
88 *
89 * THREAD_UNINT: Uninterruptible wait
90 * Wait will only end when someone explicitly wakes up the thread, or if the
91 * wait timeout expires.
92 *
93 * Use this state if the system as a whole cannot recover from a thread being
94 * interrupted out of the wait.
95 *
96 * THREAD_INTERRUPTIBLE:
97 * Wait will end if someone explicitly wakes up the thread, the wait timeout
98 * expires, or the current thread is being terminated.
99 *
100 * This value can be used when your operation may not be cleanly restartable
101 * for the current process or thread (i.e. the loss of state would be only visible
102 * to the current client). Since the thread is exiting anyways, you're willing
103 * to cut the operation short. The system as a whole must be able to cleanly
104 * deal with the interruption (i.e. remain in a consistent and recoverable state).
105 *
106 * THREAD_ABORTSAFE:
107 * Wait will end if someone explicitly wakes up the thread, the wait timeout
108 * expires, the current thread is being terminated, if any signal arrives for
109 * the task, or thread_abort_safely() is called on the thread.
110 *
111 * Using this value means that you are willing to be interrupted in the face
112 * of any user signal, and safely rewind the thread back to the user/kernel
113 * boundary. Many syscalls will try to restart the operation they were performing
114 * after the signal has been handled.
115 *
116 * You must provide this value for any unbounded wait - otherwise you will
117 * pend user signals forever.
118 *
d9a64523
A
119 * THREAD_WAIT_NOREPORT:
120 * The scheduler has a callback (sched_call) that some subsystems use to
121 * decide whether more threads should be thrown at a given problem by trying
122 * to maintain a good level of concurrency.
123 *
124 * When the wait will not be helped by adding more threads (e.g. lock
125 * contention), using this flag as an argument to assert_wait* (or any of its
126 * wrappers) will prevent the next wait/block to cause thread creation.
127 *
128 * This comes in two flavors: THREAD_WAIT_NOREPORT_KERNEL, and
129 * THREAD_WAIT_NOREPORT_USER to prevent reporting about the wait for kernel
130 * and user threads respectively.
131 *
39236c6e
A
132 * Thread interrupt mask:
133 *
d9a64523
A
134 * The current maximum interruptible state for the thread, as set by
135 * thread_interrupt_level(), will limit the conditions that will cause a wake.
136 * This is useful for code that can't be interrupted to set before calling code
137 * that doesn't know that.
39236c6e
A
138 *
139 * Thread termination vs safe abort:
140 *
141 * Termination abort: thread_abort(), thread_terminate()
142 *
143 * A termination abort is sticky. Once a thread is marked for termination, every
144 * THREAD_INTERRUPTIBLE wait will return immediately with THREAD_INTERRUPTED
145 * until the thread successfully exits.
146 *
147 * Safe abort: thread_abort_safely()
148 *
149 * A safe abort is not sticky. The current wait, (or the next wait if the thread
150 * is not currently waiting) will be interrupted, but then the abort condition is cleared.
151 * The next wait will sleep as normal. Safe aborts only have a single effect.
152 *
153 * The path back to the user/kernel boundary must not make any further unbounded
154 * wait calls. The waiter should detect the THREAD_INTERRUPTED return code
155 * from an ABORTSAFE wait and return an error code that causes its caller
156 * to understand that the current operation has been interrupted, and its
157 * caller should return a similar error code, and so on until the
158 * user/kernel boundary is reached. For Mach, the error code is usually KERN_ABORTED,
159 * for BSD it is EINTR.
160 *
161 * Debuggers rely on the safe abort mechanism - a signaled thread must return to
162 * the AST at the user/kernel boundary for the debugger to finish attaching.
163 *
164 * No wait/block will ever disappear a thread out from under the waiter. The block
165 * call will always either return or call the passed in continuation.
9bccf70c
A
166 */
167typedef int wait_interrupt_t;
d9a64523
A
168#define THREAD_UNINT 0x00000000 /* not interruptible */
169#define THREAD_INTERRUPTIBLE 0x00000001 /* may not be restartable */
170#define THREAD_ABORTSAFE 0x00000002 /* abortable safely */
171#define THREAD_WAIT_NOREPORT_KERNEL 0x80000000
172#define THREAD_WAIT_NOREPORT_USER 0x40000000
173#define THREAD_WAIT_NOREPORT (THREAD_WAIT_NOREPORT_KERNEL | THREAD_WAIT_NOREPORT_USER)
9bccf70c 174
39236c6e 175typedef int wait_timeout_urgency_t;
0a7de745
A
176#define TIMEOUT_URGENCY_SYS_NORMAL 0x00 /* use default leeway thresholds for system */
177#define TIMEOUT_URGENCY_SYS_CRITICAL 0x01 /* use critical leeway thresholds for system */
178#define TIMEOUT_URGENCY_SYS_BACKGROUND 0x02 /* use background leeway thresholds for system */
39236c6e 179
0a7de745
A
180#define TIMEOUT_URGENCY_USER_MASK 0x10 /* mask to identify user timeout urgency classes */
181#define TIMEOUT_URGENCY_USER_NORMAL 0x10 /* use default leeway thresholds for user */
182#define TIMEOUT_URGENCY_USER_CRITICAL 0x11 /* use critical leeway thresholds for user */
183#define TIMEOUT_URGENCY_USER_BACKGROUND 0x12 /* use background leeway thresholds for user */
39236c6e 184
0a7de745 185#define TIMEOUT_URGENCY_MASK 0x13 /* mask to identify timeout urgency */
39236c6e 186
0a7de745 187#define TIMEOUT_URGENCY_LEEWAY 0x20 /* don't ignore provided leeway value */
39236c6e 188
0a7de745
A
189#define TIMEOUT_URGENCY_FIRST_AVAIL 0x40 /* first available bit outside of urgency mask/leeway */
190#define TIMEOUT_URGENCY_RATELIMITED 0x80
3e170ce0
A
191
192/*
193 * Timeout and deadline tokens for waits.
194 * The following tokens define common values for leeway and deadline parameters.
195 */
0a7de745
A
196#define TIMEOUT_NO_LEEWAY (0ULL)
197#define TIMEOUT_WAIT_FOREVER (0ULL)
3e170ce0 198
0a7de745 199#ifdef KERNEL_PRIVATE
9bccf70c 200
39037602
A
201/*
202 * n.b. this is defined in thread_call.h, but in the TIMEOUT_URGENCY flags space:
203 * #define THREAD_CALL_CONTINUOUS 0x100
204 */
205
0a7de745 206#ifdef MACH_KERNEL_PRIVATE
1c79356b
A
207
208#include <kern/misc_protos.h>
0a7de745 209typedef struct clock *clock_t;
1c79356b 210
0a7de745
A
211typedef struct mig_object *mig_object_t;
212#define MIG_OBJECT_NULL ((mig_object_t) 0)
91447636 213
0a7de745
A
214typedef struct mig_notify *mig_notify_t;
215#define MIG_NOTIFY_NULL ((mig_notify_t) 0)
9bccf70c 216
0a7de745
A
217typedef struct pset_node *pset_node_t;
218#define PSET_NODE_NULL ((pset_node_t) 0)
2d21ac55 219
0a7de745
A
220typedef struct affinity_set *affinity_set_t;
221#define AFFINITY_SET_NULL ((affinity_set_t) 0)
91447636 222
6d2010ae
A
223typedef struct run_queue *run_queue_t;
224#define RUN_QUEUE_NULL ((run_queue_t) 0)
225
226typedef struct grrr_run_queue *grrr_run_queue_t;
227#define GRRR_RUN_QUEUE_NULL ((grrr_run_queue_t) 0)
228
0a7de745
A
229typedef struct grrr_group *grrr_group_t;
230#define GRRR_GROUP_NULL ((grrr_group_t) 0)
6d2010ae 231
fe8ab488
A
232#if defined(CONFIG_SCHED_MULTIQ)
233typedef struct sched_group *sched_group_t;
234#define SCHED_GROUP_NULL ((sched_group_t) 0)
235#endif /* defined(CONFIG_SCHED_MULTIQ) */
236
0a7de745 237#else /* MACH_KERNEL_PRIVATE */
9bccf70c 238
0a7de745
A
239struct wait_queue_set;
240struct _wait_queue_link;
91447636 241
0a7de745 242#endif /* MACH_KERNEL_PRIVATE */
9bccf70c 243
0a7de745
A
244typedef struct wait_queue_set *wait_queue_set_t;
245#define WAIT_QUEUE_SET_NULL ((wait_queue_set_t)0)
246#define SIZEOF_WAITQUEUE_SET wait_queue_set_size()
9bccf70c 247
0a7de745
A
248typedef struct _wait_queue_link *wait_queue_link_t;
249#define WAIT_QUEUE_LINK_NULL ((wait_queue_link_t)0)
250#define SIZEOF_WAITQUEUE_LINK wait_queue_link_size()
1c79356b 251
0a7de745
A
252typedef struct perfcontrol_state *perfcontrol_state_t;
253#define PERFCONTROL_STATE_NULL ((perfcontrol_state_t)0)
1c79356b 254
5ba3f43e
A
255/*
256 * Enum to define the event which caused the CLPC callout
257 */
258typedef enum perfcontrol_event {
0a7de745
A
259 /*
260 * Thread State Update Events
261 * Used to indicate events that update properties for
262 * a given thread. These events are passed as part of the
263 * sched_perfcontrol_state_update_t callout
264 */
265 QUANTUM_EXPIRY = 1,
266 THREAD_GROUP_UPDATE = 2,
267 PERFCONTROL_ATTR_UPDATE = 3,
268 /*
269 * Context Switch Events
270 * Used to indicate events that switch from one thread
271 * to the other. These events are passed as part of the
272 * sched_perfcontrol_csw_t callout.
273 */
274 CONTEXT_SWITCH = 10,
275 IDLE = 11
5ba3f43e
A
276} perfcontrol_event;
277
0a7de745 278/*
f427ee49
A
279 * Flags for the sched_perfcontrol_csw_t, sched_perfcontrol_state_update_t
280 * & sched_perfcontrol_thread_group_blocked_t/sched_perfcontrol_thread_group_unblocked_t
5ba3f43e
A
281 * callouts.
282 * Currently defined flags are:
f427ee49
A
283 *
284 * PERFCONTROL_CALLOUT_WAKE_UNSAFE: Flag to indicate its unsafe to
285 * do a wakeup as part of this callout. If this is set, it
286 * indicates that the scheduler holds a spinlock which might be needed
287 * in the wakeup path. In that case CLPC should do a thread_call
288 * instead of a direct wakeup to run their workloop thread.
289 *
290 * PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER: Flag to indicate
291 * that the render server thread group is blocking/unblocking progress
292 * of another thread group. The render server thread group is well
293 * known to CLPC, so XNU simply passes this flag instead of taking
294 * a reference on it. It is illegal to pass both the TG identity and
295 * this flag in the callout; this flag should only be set with the
296 * blocking/unblocking TG being NULL.
5ba3f43e 297 */
f427ee49
A
298#define PERFCONTROL_CALLOUT_WAKE_UNSAFE (0x1)
299#define PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER (0x2)
5ba3f43e
A
300
301/*
302 * Enum to define the perfcontrol class for thread.
0a7de745
A
303 * thread_get_perfcontrol_class() takes the thread's
304 * priority, QoS, urgency etc. into consideration and
5ba3f43e
A
305 * produces a value in this enum.
306 */
307typedef enum perfcontrol_class {
0a7de745
A
308 /* Idle thread */
309 PERFCONTROL_CLASS_IDLE = 1,
310 /* Kernel thread */
311 PERFCONTROL_CLASS_KERNEL = 2,
312 /* Realtime Thread */
313 PERFCONTROL_CLASS_REALTIME = 3,
314 /* Background Thread */
315 PERFCONTROL_CLASS_BACKGROUND = 4,
316 /* Utility Thread */
317 PERFCONTROL_CLASS_UTILITY = 5,
318 /* Non-UI Thread (Default/Legacy) */
319 PERFCONTROL_CLASS_NONUI = 6,
320 /* UI Thread (UI/IN) */
321 PERFCONTROL_CLASS_UI = 7,
322 /* Above UI Thread */
323 PERFCONTROL_CLASS_ABOVEUI = 8,
f427ee49
A
324 /* Maximum class */
325 PERFCONTROL_CLASS_MAX = 9,
5ba3f43e
A
326} perfcontrol_class_t;
327
f427ee49
A
328/*
329 * struct sched_clutch_edge
330 *
331 * Represents an edge from one cluster to another in the Edge Scheduler.
332 * An edge has the following properties:
333 * - Edge Weight: A value which indicates the likelihood of migrating threads
334 * across that edge. The actual unit of the edge weight is in (usecs) of
335 * scheduling delay.
336 * - Migration Allowed: Bit indicating if migrations are allowed across this
337 * edge from src to dst.
338 * - Steal Allowed: Bit indicating whether the dst cluster is allowed to steal
339 * across that edge when a processor in that cluster goes idle.
340 *
341 * These values can be modified by CLPC for better load balancing, thermal
342 * mitigations etc.
343 */
344typedef union sched_clutch_edge {
345 struct {
346 uint32_t
347 /* boolean_t */ sce_migration_allowed : 1,
348 /* boolean_t */ sce_steal_allowed : 1,
349 _reserved : 30;
350 uint32_t sce_migration_weight;
351 };
352 uint64_t sce_edge_packed;
353} sched_clutch_edge;
354
0a7de745 355#endif /* KERNEL_PRIVATE */
1c79356b 356
0a7de745 357#endif /* _KERN_KERN_TYPES_H_ */