]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/thread_policy.c
xnu-1228.12.14.tar.gz
[apple/xnu.git] / osfmk / kern / thread_policy.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/thread_act_server.h>
31
32#include <kern/kern_types.h>
33#include <kern/processor.h>
34#include <kern/thread.h>
35#include <kern/affinity.h>
36
37static void
38thread_recompute_priority(
39 thread_t thread);
40
41kern_return_t
42thread_policy_set(
43 thread_t thread,
44 thread_policy_flavor_t flavor,
45 thread_policy_t policy_info,
46 mach_msg_type_number_t count)
47{
48 kern_return_t result = KERN_SUCCESS;
49 spl_t s;
50
51 if (thread == THREAD_NULL)
52 return (KERN_INVALID_ARGUMENT);
53
54 thread_mtx_lock(thread);
55 if (!thread->active) {
56 thread_mtx_unlock(thread);
57
58 return (KERN_TERMINATED);
59 }
60
61 if (thread->static_param) {
62 thread_mtx_unlock(thread);
63
64 return (KERN_SUCCESS);
65 }
66
67 switch (flavor) {
68
69 case THREAD_EXTENDED_POLICY:
70 {
71 boolean_t timeshare = TRUE;
72
73 if (count >= THREAD_EXTENDED_POLICY_COUNT) {
74 thread_extended_policy_t info;
75
76 info = (thread_extended_policy_t)policy_info;
77 timeshare = info->timeshare;
78 }
79
80 s = splsched();
81 thread_lock(thread);
82
83 if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
84 integer_t oldmode = (thread->sched_mode & TH_MODE_TIMESHARE);
85
86 thread->sched_mode &= ~TH_MODE_REALTIME;
87
88 if (timeshare && !oldmode) {
89 thread->sched_mode |= TH_MODE_TIMESHARE;
90
91 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
92 sched_share_incr();
93 }
94 else
95 if (!timeshare && oldmode) {
96 thread->sched_mode &= ~TH_MODE_TIMESHARE;
97
98 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
99 sched_share_decr();
100 }
101
102 thread_recompute_priority(thread);
103 }
104 else {
105 thread->safe_mode &= ~TH_MODE_REALTIME;
106
107 if (timeshare)
108 thread->safe_mode |= TH_MODE_TIMESHARE;
109 else
110 thread->safe_mode &= ~TH_MODE_TIMESHARE;
111 }
112
113 thread_unlock(thread);
114 splx(s);
115
116 break;
117 }
118
119 case THREAD_TIME_CONSTRAINT_POLICY:
120 {
121 thread_time_constraint_policy_t info;
122
123 if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
124 result = KERN_INVALID_ARGUMENT;
125 break;
126 }
127
128 info = (thread_time_constraint_policy_t)policy_info;
129 if ( info->constraint < info->computation ||
130 info->computation > max_rt_quantum ||
131 info->computation < min_rt_quantum ) {
132 result = KERN_INVALID_ARGUMENT;
133 break;
134 }
135
136 s = splsched();
137 thread_lock(thread);
138
139 thread->realtime.period = info->period;
140 thread->realtime.computation = info->computation;
141 thread->realtime.constraint = info->constraint;
142 thread->realtime.preemptible = info->preemptible;
143
144 if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
145 if (thread->sched_mode & TH_MODE_TIMESHARE) {
146 thread->sched_mode &= ~TH_MODE_TIMESHARE;
147
148 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
149 sched_share_decr();
150 }
151 thread->sched_mode |= TH_MODE_REALTIME;
152 thread_recompute_priority(thread);
153 }
154 else {
155 thread->safe_mode &= ~TH_MODE_TIMESHARE;
156 thread->safe_mode |= TH_MODE_REALTIME;
157 }
158
159 thread_unlock(thread);
160 splx(s);
161
162 break;
163 }
164
165 case THREAD_PRECEDENCE_POLICY:
166 {
167 thread_precedence_policy_t info;
168
169 if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
170 result = KERN_INVALID_ARGUMENT;
171 break;
172 }
173
174 info = (thread_precedence_policy_t)policy_info;
175
176 s = splsched();
177 thread_lock(thread);
178
179 thread->importance = info->importance;
180
181 thread_recompute_priority(thread);
182
183 thread_unlock(thread);
184 splx(s);
185
186 break;
187 }
188
189 case THREAD_AFFINITY_POLICY:
190 {
191 thread_affinity_policy_t info;
192
193 if (!thread_affinity_is_supported()) {
194 result = KERN_NOT_SUPPORTED;
195 break;
196 }
197 if (count < THREAD_AFFINITY_POLICY_COUNT) {
198 result = KERN_INVALID_ARGUMENT;
199 break;
200 }
201
202 info = (thread_affinity_policy_t) policy_info;
203 /*
204 * Unlock the thread mutex here and
205 * return directly after calling thread_affinity_set().
206 * This is necessary for correct lock ordering because
207 * thread_affinity_set() takes the task lock.
208 */
209 thread_mtx_unlock(thread);
210 return thread_affinity_set(thread, info->affinity_tag);
211 }
212 default:
213 result = KERN_INVALID_ARGUMENT;
214 break;
215 }
216
217 thread_mtx_unlock(thread);
218
219 return (result);
220}
221
222static void
223thread_recompute_priority(
224 thread_t thread)
225{
226 integer_t priority;
227
228 if (thread->sched_mode & TH_MODE_REALTIME)
229 priority = BASEPRI_RTQUEUES;
230 else {
231 if (thread->importance > MAXPRI)
232 priority = MAXPRI;
233 else
234 if (thread->importance < -MAXPRI)
235 priority = -MAXPRI;
236 else
237 priority = thread->importance;
238
239 priority += thread->task_priority;
240
241 if (priority > thread->max_priority)
242 priority = thread->max_priority;
243 else
244 if (priority < MINPRI)
245 priority = MINPRI;
246 }
247
248 set_priority(thread, priority);
249}
250
251void
252thread_task_priority(
253 thread_t thread,
254 integer_t priority,
255 integer_t max_priority)
256{
257 spl_t s;
258
259 assert(thread != THREAD_NULL);
260
261 s = splsched();
262 thread_lock(thread);
263
264 thread->task_priority = priority;
265 thread->max_priority = max_priority;
266
267 thread_recompute_priority(thread);
268
269 thread_unlock(thread);
270 splx(s);
271}
272
273void
274thread_policy_reset(
275 thread_t thread)
276{
277 spl_t s;
278
279 s = splsched();
280 thread_lock(thread);
281
282 if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
283 thread->sched_mode &= ~TH_MODE_REALTIME;
284
285 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
286 thread->sched_mode |= TH_MODE_TIMESHARE;
287
288 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
289 sched_share_incr();
290 }
291 }
292 else {
293 thread->safe_mode = 0;
294 thread->sched_mode &= ~TH_MODE_FAILSAFE;
295 }
296
297 thread->importance = 0;
298
299 thread_recompute_priority(thread);
300
301 thread_unlock(thread);
302 splx(s);
303}
304
305kern_return_t
306thread_policy_get(
307 thread_t thread,
308 thread_policy_flavor_t flavor,
309 thread_policy_t policy_info,
310 mach_msg_type_number_t *count,
311 boolean_t *get_default)
312{
313 kern_return_t result = KERN_SUCCESS;
314 spl_t s;
315
316 if (thread == THREAD_NULL)
317 return (KERN_INVALID_ARGUMENT);
318
319 thread_mtx_lock(thread);
320 if (!thread->active) {
321 thread_mtx_unlock(thread);
322
323 return (KERN_TERMINATED);
324 }
325
326 switch (flavor) {
327
328 case THREAD_EXTENDED_POLICY:
329 {
330 boolean_t timeshare = TRUE;
331
332 if (!(*get_default)) {
333 s = splsched();
334 thread_lock(thread);
335
336 if ( !(thread->sched_mode & TH_MODE_REALTIME) &&
337 !(thread->safe_mode & TH_MODE_REALTIME) ) {
338 if (!(thread->sched_mode & TH_MODE_FAILSAFE))
339 timeshare = (thread->sched_mode & TH_MODE_TIMESHARE) != 0;
340 else
341 timeshare = (thread->safe_mode & TH_MODE_TIMESHARE) != 0;
342 }
343 else
344 *get_default = TRUE;
345
346 thread_unlock(thread);
347 splx(s);
348 }
349
350 if (*count >= THREAD_EXTENDED_POLICY_COUNT) {
351 thread_extended_policy_t info;
352
353 info = (thread_extended_policy_t)policy_info;
354 info->timeshare = timeshare;
355 }
356
357 break;
358 }
359
360 case THREAD_TIME_CONSTRAINT_POLICY:
361 {
362 thread_time_constraint_policy_t info;
363
364 if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
365 result = KERN_INVALID_ARGUMENT;
366 break;
367 }
368
369 info = (thread_time_constraint_policy_t)policy_info;
370
371 if (!(*get_default)) {
372 s = splsched();
373 thread_lock(thread);
374
375 if ( (thread->sched_mode & TH_MODE_REALTIME) ||
376 (thread->safe_mode & TH_MODE_REALTIME) ) {
377 info->period = thread->realtime.period;
378 info->computation = thread->realtime.computation;
379 info->constraint = thread->realtime.constraint;
380 info->preemptible = thread->realtime.preemptible;
381 }
382 else
383 *get_default = TRUE;
384
385 thread_unlock(thread);
386 splx(s);
387 }
388
389 if (*get_default) {
390 info->period = 0;
391 info->computation = std_quantum / 2;
392 info->constraint = std_quantum;
393 info->preemptible = TRUE;
394 }
395
396 break;
397 }
398
399 case THREAD_PRECEDENCE_POLICY:
400 {
401 thread_precedence_policy_t info;
402
403 if (*count < THREAD_PRECEDENCE_POLICY_COUNT) {
404 result = KERN_INVALID_ARGUMENT;
405 break;
406 }
407
408 info = (thread_precedence_policy_t)policy_info;
409
410 if (!(*get_default)) {
411 s = splsched();
412 thread_lock(thread);
413
414 info->importance = thread->importance;
415
416 thread_unlock(thread);
417 splx(s);
418 }
419 else
420 info->importance = 0;
421
422 break;
423 }
424
425 case THREAD_AFFINITY_POLICY:
426 {
427 thread_affinity_policy_t info;
428
429 if (!thread_affinity_is_supported()) {
430 result = KERN_NOT_SUPPORTED;
431 break;
432 }
433 if (*count < THREAD_AFFINITY_POLICY_COUNT) {
434 result = KERN_INVALID_ARGUMENT;
435 break;
436 }
437
438 info = (thread_affinity_policy_t)policy_info;
439
440 if (!(*get_default))
441 info->affinity_tag = thread_affinity_get(thread);
442 else
443 info->affinity_tag = THREAD_AFFINITY_TAG_NULL;
444
445 break;
446 }
447
448 default:
449 result = KERN_INVALID_ARGUMENT;
450 break;
451 }
452
453 thread_mtx_unlock(thread);
454
455 return (result);
456}