]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_policy.c
xnu-1228.12.14.tar.gz
[apple/xnu.git] / osfmk / kern / thread_policy.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
1c79356b 28
91447636
A
29#include <mach/mach_types.h>
30#include <mach/thread_act_server.h>
31
32#include <kern/kern_types.h>
55e303ae 33#include <kern/processor.h>
1c79356b 34#include <kern/thread.h>
2d21ac55 35#include <kern/affinity.h>
1c79356b 36
0b4e3aa0
A
37static void
38thread_recompute_priority(
39 thread_t thread);
40
1c79356b
A
41kern_return_t
42thread_policy_set(
91447636 43 thread_t thread,
1c79356b
A
44 thread_policy_flavor_t flavor,
45 thread_policy_t policy_info,
46 mach_msg_type_number_t count)
47{
48 kern_return_t result = KERN_SUCCESS;
1c79356b
A
49 spl_t s;
50
91447636 51 if (thread == THREAD_NULL)
1c79356b
A
52 return (KERN_INVALID_ARGUMENT);
53
91447636
A
54 thread_mtx_lock(thread);
55 if (!thread->active) {
56 thread_mtx_unlock(thread);
1c79356b
A
57
58 return (KERN_TERMINATED);
59 }
60
2d21ac55
A
61 if (thread->static_param) {
62 thread_mtx_unlock(thread);
63
64 return (KERN_SUCCESS);
65 }
66
1c79356b
A
67 switch (flavor) {
68
0b4e3aa0 69 case THREAD_EXTENDED_POLICY:
1c79356b 70 {
0b4e3aa0
A
71 boolean_t timeshare = TRUE;
72
73 if (count >= THREAD_EXTENDED_POLICY_COUNT) {
74 thread_extended_policy_t info;
75
76 info = (thread_extended_policy_t)policy_info;
77 timeshare = info->timeshare;
78 }
1c79356b
A
79
80 s = splsched();
81 thread_lock(thread);
82
0b4e3aa0 83 if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
55e303ae
A
84 integer_t oldmode = (thread->sched_mode & TH_MODE_TIMESHARE);
85
0b4e3aa0 86 thread->sched_mode &= ~TH_MODE_REALTIME;
1c79356b 87
55e303ae 88 if (timeshare && !oldmode) {
0b4e3aa0 89 thread->sched_mode |= TH_MODE_TIMESHARE;
55e303ae 90
2d21ac55
A
91 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
92 sched_share_incr();
55e303ae 93 }
0b4e3aa0 94 else
55e303ae 95 if (!timeshare && oldmode) {
0b4e3aa0 96 thread->sched_mode &= ~TH_MODE_TIMESHARE;
1c79356b 97
2d21ac55
A
98 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
99 sched_share_decr();
55e303ae
A
100 }
101
0b4e3aa0
A
102 thread_recompute_priority(thread);
103 }
104 else {
105 thread->safe_mode &= ~TH_MODE_REALTIME;
1c79356b 106
0b4e3aa0
A
107 if (timeshare)
108 thread->safe_mode |= TH_MODE_TIMESHARE;
109 else
110 thread->safe_mode &= ~TH_MODE_TIMESHARE;
111 }
1c79356b
A
112
113 thread_unlock(thread);
114 splx(s);
115
116 break;
117 }
118
119 case THREAD_TIME_CONSTRAINT_POLICY:
120 {
121 thread_time_constraint_policy_t info;
122
123 if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
124 result = KERN_INVALID_ARGUMENT;
125 break;
126 }
127
128 info = (thread_time_constraint_policy_t)policy_info;
55e303ae
A
129 if ( info->constraint < info->computation ||
130 info->computation > max_rt_quantum ||
0b4e3aa0
A
131 info->computation < min_rt_quantum ) {
132 result = KERN_INVALID_ARGUMENT;
133 break;
134 }
1c79356b
A
135
136 s = splsched();
137 thread_lock(thread);
138
1c79356b
A
139 thread->realtime.period = info->period;
140 thread->realtime.computation = info->computation;
141 thread->realtime.constraint = info->constraint;
142 thread->realtime.preemptible = info->preemptible;
143
0b4e3aa0 144 if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
55e303ae
A
145 if (thread->sched_mode & TH_MODE_TIMESHARE) {
146 thread->sched_mode &= ~TH_MODE_TIMESHARE;
147
2d21ac55
A
148 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
149 sched_share_decr();
55e303ae 150 }
0b4e3aa0
A
151 thread->sched_mode |= TH_MODE_REALTIME;
152 thread_recompute_priority(thread);
153 }
154 else {
155 thread->safe_mode &= ~TH_MODE_TIMESHARE;
156 thread->safe_mode |= TH_MODE_REALTIME;
157 }
1c79356b
A
158
159 thread_unlock(thread);
160 splx(s);
161
162 break;
163 }
164
165 case THREAD_PRECEDENCE_POLICY:
166 {
167 thread_precedence_policy_t info;
168
169 if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
170 result = KERN_INVALID_ARGUMENT;
171 break;
172 }
173
174 info = (thread_precedence_policy_t)policy_info;
175
176 s = splsched();
177 thread_lock(thread);
178
179 thread->importance = info->importance;
180
0b4e3aa0 181 thread_recompute_priority(thread);
1c79356b
A
182
183 thread_unlock(thread);
184 splx(s);
185
186 break;
187 }
188
2d21ac55
A
189 case THREAD_AFFINITY_POLICY:
190 {
191 thread_affinity_policy_t info;
192
193 if (!thread_affinity_is_supported()) {
194 result = KERN_NOT_SUPPORTED;
195 break;
196 }
197 if (count < THREAD_AFFINITY_POLICY_COUNT) {
198 result = KERN_INVALID_ARGUMENT;
199 break;
200 }
201
202 info = (thread_affinity_policy_t) policy_info;
203 /*
204 * Unlock the thread mutex here and
205 * return directly after calling thread_affinity_set().
206 * This is necessary for correct lock ordering because
207 * thread_affinity_set() takes the task lock.
208 */
209 thread_mtx_unlock(thread);
210 return thread_affinity_set(thread, info->affinity_tag);
211 }
1c79356b
A
212 default:
213 result = KERN_INVALID_ARGUMENT;
214 break;
215 }
216
91447636 217 thread_mtx_unlock(thread);
1c79356b 218
1c79356b
A
219 return (result);
220}
221
0b4e3aa0
A
222static void
223thread_recompute_priority(
224 thread_t thread)
225{
226 integer_t priority;
227
228 if (thread->sched_mode & TH_MODE_REALTIME)
55e303ae 229 priority = BASEPRI_RTQUEUES;
0b4e3aa0
A
230 else {
231 if (thread->importance > MAXPRI)
232 priority = MAXPRI;
233 else
234 if (thread->importance < -MAXPRI)
235 priority = -MAXPRI;
236 else
237 priority = thread->importance;
238
239 priority += thread->task_priority;
240
241 if (priority > thread->max_priority)
242 priority = thread->max_priority;
243 else
244 if (priority < MINPRI)
245 priority = MINPRI;
246 }
247
9bccf70c 248 set_priority(thread, priority);
0b4e3aa0
A
249}
250
251void
252thread_task_priority(
253 thread_t thread,
254 integer_t priority,
255 integer_t max_priority)
256{
257 spl_t s;
258
259 assert(thread != THREAD_NULL);
260
261 s = splsched();
262 thread_lock(thread);
263
264 thread->task_priority = priority;
265 thread->max_priority = max_priority;
266
267 thread_recompute_priority(thread);
268
269 thread_unlock(thread);
270 splx(s);
271}
272
91447636
A
273void
274thread_policy_reset(
275 thread_t thread)
276{
2d21ac55
A
277 spl_t s;
278
279 s = splsched();
280 thread_lock(thread);
281
91447636
A
282 if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
283 thread->sched_mode &= ~TH_MODE_REALTIME;
284
285 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
286 thread->sched_mode |= TH_MODE_TIMESHARE;
287
2d21ac55
A
288 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
289 sched_share_incr();
91447636
A
290 }
291 }
292 else {
293 thread->safe_mode = 0;
294 thread->sched_mode &= ~TH_MODE_FAILSAFE;
295 }
296
297 thread->importance = 0;
298
299 thread_recompute_priority(thread);
2d21ac55
A
300
301 thread_unlock(thread);
302 splx(s);
91447636
A
303}
304
1c79356b
A
305kern_return_t
306thread_policy_get(
91447636 307 thread_t thread,
1c79356b
A
308 thread_policy_flavor_t flavor,
309 thread_policy_t policy_info,
310 mach_msg_type_number_t *count,
311 boolean_t *get_default)
312{
313 kern_return_t result = KERN_SUCCESS;
1c79356b
A
314 spl_t s;
315
91447636 316 if (thread == THREAD_NULL)
1c79356b
A
317 return (KERN_INVALID_ARGUMENT);
318
91447636
A
319 thread_mtx_lock(thread);
320 if (!thread->active) {
321 thread_mtx_unlock(thread);
1c79356b
A
322
323 return (KERN_TERMINATED);
324 }
325
1c79356b
A
326 switch (flavor) {
327
0b4e3aa0
A
328 case THREAD_EXTENDED_POLICY:
329 {
330 boolean_t timeshare = TRUE;
1c79356b 331
0b4e3aa0
A
332 if (!(*get_default)) {
333 s = splsched();
334 thread_lock(thread);
335
336 if ( !(thread->sched_mode & TH_MODE_REALTIME) &&
337 !(thread->safe_mode & TH_MODE_REALTIME) ) {
338 if (!(thread->sched_mode & TH_MODE_FAILSAFE))
339 timeshare = (thread->sched_mode & TH_MODE_TIMESHARE) != 0;
340 else
341 timeshare = (thread->safe_mode & TH_MODE_TIMESHARE) != 0;
342 }
343 else
344 *get_default = TRUE;
345
346 thread_unlock(thread);
347 splx(s);
348 }
349
350 if (*count >= THREAD_EXTENDED_POLICY_COUNT) {
351 thread_extended_policy_t info;
352
353 info = (thread_extended_policy_t)policy_info;
354 info->timeshare = timeshare;
355 }
1c79356b 356
1c79356b 357 break;
0b4e3aa0 358 }
1c79356b
A
359
360 case THREAD_TIME_CONSTRAINT_POLICY:
361 {
362 thread_time_constraint_policy_t info;
363
364 if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
365 result = KERN_INVALID_ARGUMENT;
366 break;
367 }
368
369 info = (thread_time_constraint_policy_t)policy_info;
370
0b4e3aa0
A
371 if (!(*get_default)) {
372 s = splsched();
373 thread_lock(thread);
1c79356b 374
0b4e3aa0
A
375 if ( (thread->sched_mode & TH_MODE_REALTIME) ||
376 (thread->safe_mode & TH_MODE_REALTIME) ) {
377 info->period = thread->realtime.period;
378 info->computation = thread->realtime.computation;
379 info->constraint = thread->realtime.constraint;
380 info->preemptible = thread->realtime.preemptible;
381 }
382 else
383 *get_default = TRUE;
1c79356b 384
0b4e3aa0
A
385 thread_unlock(thread);
386 splx(s);
387 }
1c79356b 388
0b4e3aa0 389 if (*get_default) {
1c79356b 390 info->period = 0;
0b4e3aa0
A
391 info->computation = std_quantum / 2;
392 info->constraint = std_quantum;
1c79356b
A
393 info->preemptible = TRUE;
394 }
395
1c79356b
A
396 break;
397 }
398
399 case THREAD_PRECEDENCE_POLICY:
400 {
401 thread_precedence_policy_t info;
402
403 if (*count < THREAD_PRECEDENCE_POLICY_COUNT) {
404 result = KERN_INVALID_ARGUMENT;
405 break;
406 }
407
408 info = (thread_precedence_policy_t)policy_info;
409
0b4e3aa0 410 if (!(*get_default)) {
1c79356b
A
411 s = splsched();
412 thread_lock(thread);
413
414 info->importance = thread->importance;
415
416 thread_unlock(thread);
417 splx(s);
418 }
0b4e3aa0
A
419 else
420 info->importance = 0;
1c79356b
A
421
422 break;
423 }
424
2d21ac55
A
425 case THREAD_AFFINITY_POLICY:
426 {
427 thread_affinity_policy_t info;
428
429 if (!thread_affinity_is_supported()) {
430 result = KERN_NOT_SUPPORTED;
431 break;
432 }
433 if (*count < THREAD_AFFINITY_POLICY_COUNT) {
434 result = KERN_INVALID_ARGUMENT;
435 break;
436 }
437
438 info = (thread_affinity_policy_t)policy_info;
439
440 if (!(*get_default))
441 info->affinity_tag = thread_affinity_get(thread);
442 else
443 info->affinity_tag = THREAD_AFFINITY_TAG_NULL;
444
445 break;
446 }
447
1c79356b
A
448 default:
449 result = KERN_INVALID_ARGUMENT;
450 break;
451 }
452
91447636 453 thread_mtx_unlock(thread);
1c79356b
A
454
455 return (result);
456}