]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_policy.c
xnu-1699.22.81.tar.gz
[apple/xnu.git] / osfmk / kern / thread_policy.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/thread_act_server.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/processor.h>
34 #include <kern/thread.h>
35 #include <kern/affinity.h>
36
37 static void
38 thread_recompute_priority(
39 thread_t thread);
40
41 #if CONFIG_EMBEDDED
42 static void
43 thread_throttle(
44 thread_t thread,
45 integer_t task_priority);
46
47 extern int mach_do_background_thread(thread_t thread, int prio);
48 #endif
49
50
51 kern_return_t
52 thread_policy_set(
53 thread_t thread,
54 thread_policy_flavor_t flavor,
55 thread_policy_t policy_info,
56 mach_msg_type_number_t count)
57 {
58
59 if (thread == THREAD_NULL)
60 return (KERN_INVALID_ARGUMENT);
61
62 if (thread->static_param)
63 return (KERN_SUCCESS);
64
65 return (thread_policy_set_internal(thread, flavor, policy_info, count));
66 }
67
68 kern_return_t
69 thread_policy_set_internal(
70 thread_t thread,
71 thread_policy_flavor_t flavor,
72 thread_policy_t policy_info,
73 mach_msg_type_number_t count)
74 {
75 kern_return_t result = KERN_SUCCESS;
76 spl_t s;
77
78 thread_mtx_lock(thread);
79 if (!thread->active) {
80 thread_mtx_unlock(thread);
81
82 return (KERN_TERMINATED);
83 }
84 switch (flavor) {
85
86 case THREAD_EXTENDED_POLICY:
87 {
88 boolean_t timeshare = TRUE;
89
90 if (count >= THREAD_EXTENDED_POLICY_COUNT) {
91 thread_extended_policy_t info;
92
93 info = (thread_extended_policy_t)policy_info;
94 timeshare = info->timeshare;
95 }
96
97 if (!SCHED(supports_timeshare_mode)())
98 timeshare = FALSE;
99
100 s = splsched();
101 thread_lock(thread);
102
103 if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) {
104 integer_t oldmode = (thread->sched_mode == TH_MODE_TIMESHARE);
105
106 if (timeshare) {
107 thread->sched_mode = TH_MODE_TIMESHARE;
108
109 if (!oldmode) {
110 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
111 sched_share_incr();
112 }
113 }
114 else {
115 thread->sched_mode = TH_MODE_FIXED;
116
117 if (oldmode) {
118 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
119 sched_share_decr();
120 }
121 }
122
123 thread_recompute_priority(thread);
124 }
125 else {
126
127 if (timeshare)
128 thread->saved_mode = TH_MODE_TIMESHARE;
129 else
130 thread->saved_mode = TH_MODE_FIXED;
131 }
132
133 thread_unlock(thread);
134 splx(s);
135
136 break;
137 }
138
139 case THREAD_TIME_CONSTRAINT_POLICY:
140 {
141 thread_time_constraint_policy_t info;
142
143 if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
144 result = KERN_INVALID_ARGUMENT;
145 break;
146 }
147
148 info = (thread_time_constraint_policy_t)policy_info;
149 if ( info->constraint < info->computation ||
150 info->computation > max_rt_quantum ||
151 info->computation < min_rt_quantum ) {
152 result = KERN_INVALID_ARGUMENT;
153 break;
154 }
155
156 s = splsched();
157 thread_lock(thread);
158
159 thread->realtime.period = info->period;
160 thread->realtime.computation = info->computation;
161 thread->realtime.constraint = info->constraint;
162 thread->realtime.preemptible = info->preemptible;
163
164 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
165 thread->saved_mode = TH_MODE_REALTIME;
166 }
167 #if CONFIG_EMBEDDED
168 else if (thread->task_priority <= MAXPRI_THROTTLE) {
169 thread->saved_mode = TH_MODE_REALTIME;
170 thread->sched_flags |= TH_SFLAG_THROTTLED;
171 }
172 #endif
173 else {
174 if (thread->sched_mode == TH_MODE_TIMESHARE) {
175 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
176 sched_share_decr();
177 }
178 thread->sched_mode = TH_MODE_REALTIME;
179 thread_recompute_priority(thread);
180 }
181
182 thread_unlock(thread);
183 splx(s);
184
185 break;
186 }
187
188 case THREAD_PRECEDENCE_POLICY:
189 {
190 thread_precedence_policy_t info;
191
192 if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
193 result = KERN_INVALID_ARGUMENT;
194 break;
195 }
196 info = (thread_precedence_policy_t)policy_info;
197
198 s = splsched();
199 thread_lock(thread);
200
201 thread->importance = info->importance;
202
203 thread_recompute_priority(thread);
204
205 thread_unlock(thread);
206 splx(s);
207
208 break;
209 }
210
211 case THREAD_AFFINITY_POLICY:
212 {
213 thread_affinity_policy_t info;
214
215 if (!thread_affinity_is_supported()) {
216 result = KERN_NOT_SUPPORTED;
217 break;
218 }
219 if (count < THREAD_AFFINITY_POLICY_COUNT) {
220 result = KERN_INVALID_ARGUMENT;
221 break;
222 }
223
224 info = (thread_affinity_policy_t) policy_info;
225 /*
226 * Unlock the thread mutex here and
227 * return directly after calling thread_affinity_set().
228 * This is necessary for correct lock ordering because
229 * thread_affinity_set() takes the task lock.
230 */
231 thread_mtx_unlock(thread);
232 return thread_affinity_set(thread, info->affinity_tag);
233 }
234
235 #if CONFIG_EMBEDDED
236 case THREAD_BACKGROUND_POLICY:
237 {
238 thread_background_policy_t info;
239
240 info = (thread_background_policy_t) policy_info;
241
242 thread_mtx_unlock(thread);
243 return mach_do_background_thread(thread, info->priority);
244 }
245 #endif /* CONFIG_EMBEDDED */
246
247 default:
248 result = KERN_INVALID_ARGUMENT;
249 break;
250 }
251
252 thread_mtx_unlock(thread);
253 return (result);
254 }
255
256 static void
257 thread_recompute_priority(
258 thread_t thread)
259 {
260 integer_t priority;
261
262 if (thread->sched_mode == TH_MODE_REALTIME)
263 priority = BASEPRI_RTQUEUES;
264 else {
265 if (thread->importance > MAXPRI)
266 priority = MAXPRI;
267 else
268 if (thread->importance < -MAXPRI)
269 priority = -MAXPRI;
270 else
271 priority = thread->importance;
272
273 priority += thread->task_priority;
274
275 if (priority > thread->max_priority)
276 priority = thread->max_priority;
277 else
278 if (priority < MINPRI)
279 priority = MINPRI;
280 #if CONFIG_EMBEDDED
281 /* No one can have a base priority less than MAXPRI_THROTTLE */
282 if (priority < MAXPRI_THROTTLE)
283 priority = MAXPRI_THROTTLE;
284 #endif /* CONFIG_EMBEDDED */
285 }
286
287 set_priority(thread, priority);
288 }
289
290 #if CONFIG_EMBEDDED
291 static void
292 thread_throttle(
293 thread_t thread,
294 integer_t task_priority)
295 {
296 if (!(thread->sched_flags & TH_SFLAG_THROTTLED) &&
297 (task_priority <= MAXPRI_THROTTLE)) {
298
299 if (!((thread->sched_mode == TH_MODE_REALTIME) ||
300 (thread->saved_mode == TH_MODE_REALTIME))) {
301 return;
302 }
303
304 /* Demote to timeshare if throttling */
305 if (thread->sched_mode == TH_MODE_REALTIME)
306 {
307 thread->saved_mode = TH_MODE_REALTIME;
308
309 if (thread->sched_mode == TH_MODE_TIMESHARE) {
310 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
311 sched_share_incr();
312 }
313 }
314
315 /* TH_SFLAG_FAILSAFE and TH_SFLAG_THROTTLED are mutually exclusive,
316 * since a throttled thread is not realtime during the throttle
317 * and doesn't need the failsafe repromotion. We therefore clear
318 * the former and set the latter flags here.
319 */
320 thread->sched_flags &= ~TH_SFLAG_FAILSAFE;
321 thread->sched_flags |= TH_SFLAG_THROTTLED;
322
323 if (SCHED(supports_timeshare_mode)())
324 thread->sched_mode = TH_MODE_TIMESHARE;
325 else
326 thread->sched_mode = TH_MODE_FIXED;
327 }
328 else if ((thread->sched_flags & TH_SFLAG_THROTTLED) &&
329 (task_priority > MAXPRI_THROTTLE)) {
330
331 /* Promote back to real time if unthrottling */
332 if (!(thread->saved_mode == TH_MODE_TIMESHARE)) {
333
334 thread->sched_mode = thread->saved_mode;
335
336 if (thread->sched_mode == TH_MODE_TIMESHARE) {
337 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
338 sched_share_decr();
339 }
340
341 thread->saved_mode = TH_MODE_NONE;
342 }
343
344 thread->sched_flags &= ~TH_SFLAG_THROTTLED;
345 }
346 }
347 #endif
348
349 void
350 thread_task_priority(
351 thread_t thread,
352 integer_t priority,
353 integer_t max_priority)
354 {
355 spl_t s;
356
357 assert(thread != THREAD_NULL);
358
359 s = splsched();
360 thread_lock(thread);
361
362 #if CONFIG_EMBEDDED
363 thread_throttle(thread, priority);
364 #endif
365
366 thread->task_priority = priority;
367 thread->max_priority = max_priority;
368
369 thread_recompute_priority(thread);
370
371 thread_unlock(thread);
372 splx(s);
373 }
374
375 void
376 thread_policy_reset(
377 thread_t thread)
378 {
379 spl_t s;
380
381 s = splsched();
382 thread_lock(thread);
383
384 if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) {
385 sched_mode_t oldmode = thread->sched_mode;
386
387 thread->sched_mode = SCHED(initial_thread_sched_mode)(thread->task);
388
389 if ((oldmode != TH_MODE_TIMESHARE) && (thread->sched_mode == TH_MODE_TIMESHARE)) {
390
391 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
392 sched_share_incr();
393 }
394 }
395 else {
396 thread->saved_mode = TH_MODE_NONE;
397 thread->sched_flags &= ~TH_SFLAG_DEMOTED_MASK;
398 }
399
400 thread->importance = 0;
401
402 thread_recompute_priority(thread);
403
404 thread_unlock(thread);
405 splx(s);
406 }
407
408 kern_return_t
409 thread_policy_get(
410 thread_t thread,
411 thread_policy_flavor_t flavor,
412 thread_policy_t policy_info,
413 mach_msg_type_number_t *count,
414 boolean_t *get_default)
415 {
416 kern_return_t result = KERN_SUCCESS;
417 spl_t s;
418
419 if (thread == THREAD_NULL)
420 return (KERN_INVALID_ARGUMENT);
421
422 thread_mtx_lock(thread);
423 if (!thread->active) {
424 thread_mtx_unlock(thread);
425
426 return (KERN_TERMINATED);
427 }
428
429 switch (flavor) {
430
431 case THREAD_EXTENDED_POLICY:
432 {
433 boolean_t timeshare = TRUE;
434
435 if (!(*get_default)) {
436 s = splsched();
437 thread_lock(thread);
438
439 if ( (thread->sched_mode != TH_MODE_REALTIME) &&
440 (thread->saved_mode != TH_MODE_REALTIME) ) {
441 if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK))
442 timeshare = (thread->sched_mode == TH_MODE_TIMESHARE) != 0;
443 else
444 timeshare = (thread->saved_mode == TH_MODE_TIMESHARE) != 0;
445 }
446 else
447 *get_default = TRUE;
448
449 thread_unlock(thread);
450 splx(s);
451 }
452
453 if (*count >= THREAD_EXTENDED_POLICY_COUNT) {
454 thread_extended_policy_t info;
455
456 info = (thread_extended_policy_t)policy_info;
457 info->timeshare = timeshare;
458 }
459
460 break;
461 }
462
463 case THREAD_TIME_CONSTRAINT_POLICY:
464 {
465 thread_time_constraint_policy_t info;
466
467 if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
468 result = KERN_INVALID_ARGUMENT;
469 break;
470 }
471
472 info = (thread_time_constraint_policy_t)policy_info;
473
474 if (!(*get_default)) {
475 s = splsched();
476 thread_lock(thread);
477
478 if ( (thread->sched_mode == TH_MODE_REALTIME) ||
479 (thread->saved_mode == TH_MODE_REALTIME) ) {
480 info->period = thread->realtime.period;
481 info->computation = thread->realtime.computation;
482 info->constraint = thread->realtime.constraint;
483 info->preemptible = thread->realtime.preemptible;
484 }
485 else
486 *get_default = TRUE;
487
488 thread_unlock(thread);
489 splx(s);
490 }
491
492 if (*get_default) {
493 info->period = 0;
494 info->computation = default_timeshare_computation;
495 info->constraint = default_timeshare_constraint;
496 info->preemptible = TRUE;
497 }
498
499 break;
500 }
501
502 case THREAD_PRECEDENCE_POLICY:
503 {
504 thread_precedence_policy_t info;
505
506 if (*count < THREAD_PRECEDENCE_POLICY_COUNT) {
507 result = KERN_INVALID_ARGUMENT;
508 break;
509 }
510
511 info = (thread_precedence_policy_t)policy_info;
512
513 if (!(*get_default)) {
514 s = splsched();
515 thread_lock(thread);
516
517 info->importance = thread->importance;
518
519 thread_unlock(thread);
520 splx(s);
521 }
522 else
523 info->importance = 0;
524
525 break;
526 }
527
528 case THREAD_AFFINITY_POLICY:
529 {
530 thread_affinity_policy_t info;
531
532 if (!thread_affinity_is_supported()) {
533 result = KERN_NOT_SUPPORTED;
534 break;
535 }
536 if (*count < THREAD_AFFINITY_POLICY_COUNT) {
537 result = KERN_INVALID_ARGUMENT;
538 break;
539 }
540
541 info = (thread_affinity_policy_t)policy_info;
542
543 if (!(*get_default))
544 info->affinity_tag = thread_affinity_get(thread);
545 else
546 info->affinity_tag = THREAD_AFFINITY_TAG_NULL;
547
548 break;
549 }
550
551 default:
552 result = KERN_INVALID_ARGUMENT;
553 break;
554 }
555
556 thread_mtx_unlock(thread);
557
558 return (result);
559 }