]> git.saurik.com Git - apple/xnu.git/blob - osfmk/mach/thread_policy.h
xnu-4570.31.3.tar.gz
[apple/xnu.git] / osfmk / mach / thread_policy.h
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _MACH_THREAD_POLICY_H_
30 #define _MACH_THREAD_POLICY_H_
31
32 #include <mach/mach_types.h>
33
34 /*
35 * These are the calls for accessing the policy parameters
36 * of a particular thread.
37 *
38 * The extra 'get_default' parameter to the second call is
39 * IN/OUT as follows:
40 * 1) if asserted on the way in it indicates that the default
41 * values should be returned, not the ones currently set, in
42 * this case 'get_default' will always be asserted on return;
43 * 2) if unasserted on the way in, the current settings are
44 * desired and if still unasserted on return, then the info
45 * returned reflects the current settings, otherwise if
46 * 'get_default' returns asserted, it means that there are no
47 * current settings due to other parameters taking precedence,
48 * and the default ones are being returned instead.
49 */
50
51 typedef natural_t thread_policy_flavor_t;
52 typedef integer_t *thread_policy_t;
53
54 /*
55 kern_return_t thread_policy_set(
56 thread_t thread,
57 thread_policy_flavor_t flavor,
58 thread_policy_t policy_info,
59 mach_msg_type_number_t count);
60
61 kern_return_t thread_policy_get(
62 thread_t thread,
63 thread_policy_flavor_t flavor,
64 thread_policy_t policy_info,
65 mach_msg_type_number_t *count,
66 boolean_t *get_default);
67 */
68
69 /*
70 * Defined flavors.
71 */
72 /*
73 * THREAD_STANDARD_POLICY:
74 *
75 * This is the standard (fair) scheduling mode, assigned to new
76 * threads. The thread will be given processor time in a manner
77 * which apportions approximately equal share to long running
78 * computations.
79 *
80 * Parameters:
81 * [none]
82 */
83
84 #define THREAD_STANDARD_POLICY 1
85
86 struct thread_standard_policy {
87 natural_t no_data;
88 };
89
90 typedef struct thread_standard_policy thread_standard_policy_data_t;
91 typedef struct thread_standard_policy *thread_standard_policy_t;
92
93 #define THREAD_STANDARD_POLICY_COUNT 0
94
95 /*
96 * THREAD_EXTENDED_POLICY:
97 *
98 * Extended form of THREAD_STANDARD_POLICY, which supplies a
99 * hint indicating whether this is a long running computation.
100 *
101 * Parameters:
102 *
103 * timeshare: TRUE (the default) results in identical scheduling
104 * behavior as THREAD_STANDARD_POLICY.
105 */
106
107 #define THREAD_EXTENDED_POLICY 1
108
109 struct thread_extended_policy {
110 boolean_t timeshare;
111 };
112
113 typedef struct thread_extended_policy thread_extended_policy_data_t;
114 typedef struct thread_extended_policy *thread_extended_policy_t;
115
116 #define THREAD_EXTENDED_POLICY_COUNT ((mach_msg_type_number_t) \
117 (sizeof (thread_extended_policy_data_t) / sizeof (integer_t)))
118
119 /*
120 * THREAD_TIME_CONSTRAINT_POLICY:
121 *
122 * This scheduling mode is for threads which have real time
123 * constraints on their execution.
124 *
125 * Parameters:
126 *
127 * period: This is the nominal amount of time between separate
128 * processing arrivals, specified in absolute time units. A
129 * value of 0 indicates that there is no inherent periodicity in
130 * the computation.
131 *
132 * computation: This is the nominal amount of computation
133 * time needed during a separate processing arrival, specified
134 * in absolute time units.
135 *
136 * constraint: This is the maximum amount of real time that
137 * may elapse from the start of a separate processing arrival
138 * to the end of computation for logically correct functioning,
139 * specified in absolute time units. Must be (>= computation).
140 * Note that latency = (constraint - computation).
141 *
142 * preemptible: This indicates that the computation may be
143 * interrupted, subject to the constraint specified above.
144 */
145
146 #define THREAD_TIME_CONSTRAINT_POLICY 2
147
148 struct thread_time_constraint_policy {
149 uint32_t period;
150 uint32_t computation;
151 uint32_t constraint;
152 boolean_t preemptible;
153 };
154
155 typedef struct thread_time_constraint_policy \
156 thread_time_constraint_policy_data_t;
157 typedef struct thread_time_constraint_policy \
158 *thread_time_constraint_policy_t;
159
160 #define THREAD_TIME_CONSTRAINT_POLICY_COUNT ((mach_msg_type_number_t) \
161 (sizeof (thread_time_constraint_policy_data_t) / sizeof (integer_t)))
162
163 /*
164 * THREAD_PRECEDENCE_POLICY:
165 *
166 * This may be used to indicate the relative value of the
167 * computation compared to the other threads in the task.
168 *
169 * Parameters:
170 *
171 * importance: The importance is specified as a signed value.
172 */
173
174 #define THREAD_PRECEDENCE_POLICY 3
175
176 struct thread_precedence_policy {
177 integer_t importance;
178 };
179
180 typedef struct thread_precedence_policy thread_precedence_policy_data_t;
181 typedef struct thread_precedence_policy *thread_precedence_policy_t;
182
183 #define THREAD_PRECEDENCE_POLICY_COUNT ((mach_msg_type_number_t) \
184 (sizeof (thread_precedence_policy_data_t) / sizeof (integer_t)))
185
186 /*
187 * THREAD_AFFINITY_POLICY:
188 *
189 * This policy is experimental.
190 * This may be used to express affinity relationships
191 * between threads in the task. Threads with the same affinity tag will
192 * be scheduled to share an L2 cache if possible. That is, affinity tags
193 * are a hint to the scheduler for thread placement.
194 *
195 * The namespace of affinity tags is generally local to one task. However,
196 * a child task created after the assignment of affinity tags by its parent
197 * will share that namespace. In particular, a family of forked processes
198 * may be created with a shared affinity namespace.
199 *
200 * Parameters:
201 * tag: The affinity set identifier.
202 */
203
204 #define THREAD_AFFINITY_POLICY 4
205
206 struct thread_affinity_policy {
207 integer_t affinity_tag;
208 };
209
210 #define THREAD_AFFINITY_TAG_NULL 0
211
212 typedef struct thread_affinity_policy thread_affinity_policy_data_t;
213 typedef struct thread_affinity_policy *thread_affinity_policy_t;
214
215 #define THREAD_AFFINITY_POLICY_COUNT ((mach_msg_type_number_t) \
216 (sizeof (thread_affinity_policy_data_t) / sizeof (integer_t)))
217
218 /*
219 * THREAD_BACKGROUND_POLICY:
220 */
221
222 #define THREAD_BACKGROUND_POLICY 5
223
224 struct thread_background_policy {
225 integer_t priority;
226 };
227
228 #define THREAD_BACKGROUND_POLICY_DARWIN_BG 0x1000
229
230 typedef struct thread_background_policy thread_background_policy_data_t;
231 typedef struct thread_background_policy *thread_background_policy_t;
232
233 #define THREAD_BACKGROUND_POLICY_COUNT ((mach_msg_type_number_t) \
234 (sizeof (thread_background_policy_data_t) / sizeof (integer_t)))
235
236
237 #define THREAD_LATENCY_QOS_POLICY 7
238 typedef integer_t thread_latency_qos_t;
239
240 struct thread_latency_qos_policy {
241 thread_latency_qos_t thread_latency_qos_tier;
242 };
243
244 typedef struct thread_latency_qos_policy thread_latency_qos_policy_data_t;
245 typedef struct thread_latency_qos_policy *thread_latency_qos_policy_t;
246
247 #define THREAD_LATENCY_QOS_POLICY_COUNT ((mach_msg_type_number_t) \
248 (sizeof (thread_latency_qos_policy_data_t) / sizeof (integer_t)))
249
250 #define THREAD_THROUGHPUT_QOS_POLICY 8
251 typedef integer_t thread_throughput_qos_t;
252
253 struct thread_throughput_qos_policy {
254 thread_throughput_qos_t thread_throughput_qos_tier;
255 };
256
257 typedef struct thread_throughput_qos_policy thread_throughput_qos_policy_data_t;
258 typedef struct thread_throughput_qos_policy *thread_throughput_qos_policy_t;
259
260 #define THREAD_THROUGHPUT_QOS_POLICY_COUNT ((mach_msg_type_number_t) \
261 (sizeof (thread_throughput_qos_policy_data_t) / sizeof (integer_t)))
262
263 #ifdef PRIVATE
264
265 /*
266 * THREAD_POLICY_STATE:
267 */
268 #define THREAD_POLICY_STATE 6
269
270 #define THREAD_POLICY_STATE_FLAG_STATIC_PARAM 0x1
271
272 struct thread_policy_state {
273 integer_t requested;
274 integer_t effective;
275 integer_t pending;
276 integer_t flags;
277 uint64_t thps_requested_policy;
278 uint64_t thps_effective_policy;
279 uint32_t thps_user_promotions;
280 uint32_t thps_user_promotion_basepri;
281 uint32_t thps_ipc_overrides;
282 uint32_t reserved32;
283 uint64_t reserved[2];
284 };
285
286 typedef struct thread_policy_state thread_policy_state_data_t;
287 typedef struct thread_policy_state *thread_policy_state_t;
288
289 #define THREAD_POLICY_STATE_COUNT ((mach_msg_type_number_t) \
290 (sizeof (thread_policy_state_data_t) / sizeof (integer_t)))
291
292 /*
293 * THREAD_QOS_POLICY:
294 */
295 #define THREAD_QOS_POLICY 9
296 #define THREAD_QOS_POLICY_OVERRIDE 10
297
298 #define THREAD_QOS_UNSPECIFIED 0
299 #define THREAD_QOS_DEFAULT THREAD_QOS_UNSPECIFIED /* Temporary rename */
300 #define THREAD_QOS_MAINTENANCE 1
301 #define THREAD_QOS_BACKGROUND 2
302 #define THREAD_QOS_UTILITY 3
303 #define THREAD_QOS_LEGACY 4 /* i.e. default workq threads */
304 #define THREAD_QOS_USER_INITIATED 5
305 #define THREAD_QOS_USER_INTERACTIVE 6
306
307 #define THREAD_QOS_LAST 7
308
309 #define THREAD_QOS_MIN_TIER_IMPORTANCE (-15)
310
311 /*
312 * Overrides are inputs to the task/thread policy engine that
313 * temporarily elevate the effective QoS of a thread without changing
314 * its steady-state (and round-trip-able) requested QoS. The
315 * interfaces into the kernel allow the caller to associate a resource
316 * and type that describe the reason/lifecycle of the override. For
317 * instance, a contended pthread_mutex_t held by a UTILITY thread
318 * might get an override to USER_INTERACTIVE, with the resource
319 * being the userspace address of the pthread_mutex_t. When the
320 * owning thread releases that resource, it can call into the
321 * task policy subsystem to drop the override because of that resource,
322 * although if more contended locks are held by the thread, the
323 * effective QoS may remain overridden for longer.
324 *
325 * THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX is used for contended
326 * pthread_mutex_t's via the pthread kext. The holder gets an override
327 * with resource=&mutex and a count of 1 by the initial contender.
328 * Subsequent contenders raise the QoS value, until the holder
329 * decrements the count to 0 and the override is released.
330 *
331 * THREAD_QOS_OVERRIDE_TYPE_PTHREAD_RWLOCK is unimplemented and has no
332 * specified semantics.
333 *
334 * THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE are explicitly
335 * paired start/end overrides on a target thread. The resource can
336 * either be a memory allocation in userspace, or the pthread_t of the
337 * overrider if no allocation was used.
338 *
339 * THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE are used to
340 * override the QoS of a thread currently draining a serial dispatch
341 * queue, so that it can get to a block of higher QoS than its
342 * predecessors. The override is applied by a thread enqueueing work
343 * with resource=&queue, and reset by the thread that was overriden
344 * once it has drained the queue. Since the ++ and reset are
345 * asynchronous, there is the possibility of a ++ after the target
346 * thread has issued a reset, in which case the workqueue thread may
347 * issue a reset-all in its outermost scope before deciding whether it
348 * should return to dequeueing work from the global concurrent queues,
349 * or return to the kernel.
350 *
351 * THREAD_QOS_OVERRIDE_TYPE_WILDCARD is a catch-all which will reset every
352 * resource matching the resource value. Passing
353 * THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD as well will reset everything.
354 */
355
356 #define THREAD_QOS_OVERRIDE_TYPE_UNKNOWN (0)
357 #define THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX (1)
358 #define THREAD_QOS_OVERRIDE_TYPE_PTHREAD_RWLOCK (2)
359 #define THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE (3)
360 #define THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE (4)
361 #define THREAD_QOS_OVERRIDE_TYPE_WILDCARD (5)
362
363 /* A special resource value to indicate a resource wildcard */
364 #define THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD (~((user_addr_t)0))
365
366 struct thread_qos_policy {
367 integer_t qos_tier;
368 integer_t tier_importance;
369 };
370
371 typedef struct thread_qos_policy thread_qos_policy_data_t;
372 typedef struct thread_qos_policy *thread_qos_policy_t;
373
374 #define THREAD_QOS_POLICY_COUNT ((mach_msg_type_number_t) \
375 (sizeof (thread_qos_policy_data_t) / sizeof (integer_t)))
376
377 #endif /* PRIVATE */
378
379 #ifdef PRIVATE
380
381 /*
382 * Internal bitfields are privately exported for revlocked tracing tools like msa to decode tracepoints.
383 *
384 * These struct definitions *will* change in the future.
385 * When they do, we will update THREAD_POLICY_INTERNAL_STRUCT_VERSION.
386 */
387
388 #define THREAD_POLICY_INTERNAL_STRUCT_VERSION 4
389
390 struct thread_requested_policy {
391 uint64_t thrp_int_darwinbg :1, /* marked as darwinbg via setpriority */
392 thrp_ext_darwinbg :1,
393 thrp_int_iotier :2, /* IO throttle tier */
394 thrp_ext_iotier :2,
395 thrp_int_iopassive :1, /* should IOs cause lower tiers to be throttled */
396 thrp_ext_iopassive :1,
397 thrp_latency_qos :3, /* Timer latency QoS */
398 thrp_through_qos :3, /* Computation throughput QoS */
399
400 thrp_pidbind_bg :1, /* task i'm bound to is marked 'watchbg' */
401 thrp_qos :3, /* thread qos class */
402 thrp_qos_relprio :4, /* thread qos relative priority (store as inverse, -10 -> 0xA) */
403 thrp_qos_override :3, /* thread qos class override */
404 thrp_qos_promote :3, /* thread qos class from promotion */
405 thrp_qos_ipc_override :3, /* thread qos class from ipc override */
406 thrp_terminated :1, /* heading for termination */
407 thrp_qos_sync_ipc_override:3, /* thread qos class from sync ipc override */
408
409 thrp_reserved :29;
410 };
411
412 struct thread_effective_policy {
413 uint64_t thep_darwinbg :1, /* marked as 'background', and sockets are marked bg when created */
414 thep_io_tier :2, /* effective throttle tier */
415 thep_io_passive :1, /* should IOs cause lower tiers to be throttled */
416 thep_all_sockets_bg :1, /* All existing sockets in process are marked as bg (thread: all created by thread) */
417 thep_new_sockets_bg :1, /* Newly created sockets should be marked as bg */
418 thep_terminated :1, /* all throttles have been removed for quick exit or SIGTERM handling */
419 thep_qos_ui_is_urgent :1, /* bump UI-Interactive QoS up to the urgent preemption band */
420 thep_latency_qos :3, /* Timer latency QoS level */
421 thep_through_qos :3, /* Computation throughput QoS level */
422
423 thep_qos :3, /* thread qos class */
424 thep_qos_relprio :4, /* thread qos relative priority (store as inverse, -10 -> 0xA) */
425 thep_qos_promote :3, /* thread qos class used for promotion */
426
427 thep_reserved :40;
428 };
429
430 #endif /* PRIVATE */
431
432
433 #endif /* _MACH_THREAD_POLICY_H_ */