]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_policy.c
8108514f547f3d0d3c530dfb82b96eef45208b30
[apple/xnu.git] / osfmk / kern / thread_policy.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/thread_act_server.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/processor.h>
34 #include <kern/thread.h>
35 #include <kern/affinity.h>
36
37 static void
38 thread_recompute_priority(
39 thread_t thread);
40
41 #if CONFIG_EMBEDDED
42 static void
43 thread_throttle(
44 thread_t thread,
45 integer_t task_priority);
46
47 extern int mach_do_background_thread(thread_t thread, int prio);
48 #endif
49
50
51 kern_return_t
52 thread_policy_set(
53 thread_t thread,
54 thread_policy_flavor_t flavor,
55 thread_policy_t policy_info,
56 mach_msg_type_number_t count)
57 {
58
59 if (thread == THREAD_NULL)
60 return (KERN_INVALID_ARGUMENT);
61
62 if (thread->static_param)
63 return (KERN_SUCCESS);
64
65 return (thread_policy_set_internal(thread, flavor, policy_info, count));
66 }
67
68 kern_return_t
69 thread_policy_set_internal(
70 thread_t thread,
71 thread_policy_flavor_t flavor,
72 thread_policy_t policy_info,
73 mach_msg_type_number_t count)
74 {
75 kern_return_t result = KERN_SUCCESS;
76 spl_t s;
77
78 thread_mtx_lock(thread);
79 if (!thread->active) {
80 thread_mtx_unlock(thread);
81
82 return (KERN_TERMINATED);
83 }
84 switch (flavor) {
85
86 case THREAD_EXTENDED_POLICY:
87 {
88 boolean_t timeshare = TRUE;
89
90 if (count >= THREAD_EXTENDED_POLICY_COUNT) {
91 thread_extended_policy_t info;
92
93 info = (thread_extended_policy_t)policy_info;
94 timeshare = info->timeshare;
95 }
96
97 if (!SCHED(supports_timeshare_mode)())
98 timeshare = FALSE;
99
100 s = splsched();
101 thread_lock(thread);
102
103 if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) {
104 integer_t oldmode = (thread->sched_mode == TH_MODE_TIMESHARE);
105
106 if (timeshare) {
107 thread->sched_mode = TH_MODE_TIMESHARE;
108
109 if (!oldmode) {
110 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
111 sched_share_incr();
112 }
113 }
114 else {
115 thread->sched_mode = TH_MODE_FIXED;
116
117 if (oldmode) {
118 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
119 sched_share_decr();
120 }
121 }
122
123 thread_recompute_priority(thread);
124 }
125 else {
126
127 if (timeshare)
128 thread->saved_mode = TH_MODE_TIMESHARE;
129 else
130 thread->saved_mode = TH_MODE_FIXED;
131 }
132
133 thread_unlock(thread);
134 splx(s);
135
136 break;
137 }
138
139 case THREAD_TIME_CONSTRAINT_POLICY:
140 {
141 thread_time_constraint_policy_t info;
142
143 if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
144 result = KERN_INVALID_ARGUMENT;
145 break;
146 }
147
148 info = (thread_time_constraint_policy_t)policy_info;
149 if ( info->constraint < info->computation ||
150 info->computation > max_rt_quantum ||
151 info->computation < min_rt_quantum ) {
152 result = KERN_INVALID_ARGUMENT;
153 break;
154 }
155
156 s = splsched();
157 thread_lock(thread);
158
159 thread->realtime.period = info->period;
160 thread->realtime.computation = info->computation;
161 thread->realtime.constraint = info->constraint;
162 thread->realtime.preemptible = info->preemptible;
163
164 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
165 thread->saved_mode = TH_MODE_REALTIME;
166 }
167 else {
168 if (thread->sched_mode == TH_MODE_TIMESHARE) {
169 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
170 sched_share_decr();
171 }
172 thread->sched_mode = TH_MODE_REALTIME;
173 thread_recompute_priority(thread);
174 }
175
176 thread_unlock(thread);
177 splx(s);
178
179 break;
180 }
181
182 case THREAD_PRECEDENCE_POLICY:
183 {
184 thread_precedence_policy_t info;
185
186 if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
187 result = KERN_INVALID_ARGUMENT;
188 break;
189 }
190 info = (thread_precedence_policy_t)policy_info;
191
192 s = splsched();
193 thread_lock(thread);
194
195 thread->importance = info->importance;
196
197 thread_recompute_priority(thread);
198
199 thread_unlock(thread);
200 splx(s);
201
202 break;
203 }
204
205 case THREAD_AFFINITY_POLICY:
206 {
207 thread_affinity_policy_t info;
208
209 if (!thread_affinity_is_supported()) {
210 result = KERN_NOT_SUPPORTED;
211 break;
212 }
213 if (count < THREAD_AFFINITY_POLICY_COUNT) {
214 result = KERN_INVALID_ARGUMENT;
215 break;
216 }
217
218 info = (thread_affinity_policy_t) policy_info;
219 /*
220 * Unlock the thread mutex here and
221 * return directly after calling thread_affinity_set().
222 * This is necessary for correct lock ordering because
223 * thread_affinity_set() takes the task lock.
224 */
225 thread_mtx_unlock(thread);
226 return thread_affinity_set(thread, info->affinity_tag);
227 }
228
229 #if CONFIG_EMBEDDED
230 case THREAD_BACKGROUND_POLICY:
231 {
232 thread_background_policy_t info;
233
234 info = (thread_background_policy_t) policy_info;
235
236 thread_mtx_unlock(thread);
237 return mach_do_background_thread(thread, info->priority);
238 }
239 #endif /* CONFIG_EMBEDDED */
240
241 default:
242 result = KERN_INVALID_ARGUMENT;
243 break;
244 }
245
246 thread_mtx_unlock(thread);
247 return (result);
248 }
249
250 static void
251 thread_recompute_priority(
252 thread_t thread)
253 {
254 integer_t priority;
255
256 if (thread->sched_mode == TH_MODE_REALTIME)
257 priority = BASEPRI_RTQUEUES;
258 else {
259 if (thread->importance > MAXPRI)
260 priority = MAXPRI;
261 else
262 if (thread->importance < -MAXPRI)
263 priority = -MAXPRI;
264 else
265 priority = thread->importance;
266
267 priority += thread->task_priority;
268
269 if (priority > thread->max_priority)
270 priority = thread->max_priority;
271 else
272 if (priority < MINPRI)
273 priority = MINPRI;
274 #if CONFIG_EMBEDDED
275 /* No one can have a base priority less than MAXPRI_THROTTLE */
276 if (priority < MAXPRI_THROTTLE)
277 priority = MAXPRI_THROTTLE;
278 #endif /* CONFIG_EMBEDDED */
279 }
280
281 set_priority(thread, priority);
282 }
283
284 #if CONFIG_EMBEDDED
285 static void
286 thread_throttle(
287 thread_t thread,
288 integer_t task_priority)
289 {
290 if ((!(thread->sched_flags & TH_SFLAG_THROTTLED)
291 || (thread->sched_flags & TH_SFLAG_PENDING_THROTTLE_PROMOTION))
292 && (task_priority <= MAXPRI_THROTTLE)) {
293
294 /* Kill a promotion if it was in flight */
295 thread->sched_flags &= ~TH_SFLAG_PENDING_THROTTLE_PROMOTION;
296
297 if (!(thread->sched_flags & TH_SFLAG_THROTTLED)) {
298 /*
299 * Set the pending bit so that we can switch runqueues
300 * (potentially) at a later time safely
301 */
302 thread->sched_flags |= TH_SFLAG_PENDING_THROTTLE_DEMOTION;
303 }
304 }
305 else if (((thread->sched_flags & TH_SFLAG_THROTTLED)
306 || (thread->sched_flags & TH_SFLAG_PENDING_THROTTLE_DEMOTION))
307 && (task_priority > MAXPRI_THROTTLE)) {
308
309 /* Kill a demotion if it was in flight */
310 thread->sched_flags &= ~TH_SFLAG_PENDING_THROTTLE_DEMOTION;
311
312 if (thread->sched_flags & TH_SFLAG_THROTTLED) {
313 thread->sched_flags |= TH_SFLAG_PENDING_THROTTLE_PROMOTION;
314 }
315 }
316 }
317 #endif
318
319 void
320 thread_task_priority(
321 thread_t thread,
322 integer_t priority,
323 integer_t max_priority)
324 {
325 spl_t s;
326
327 assert(thread != THREAD_NULL);
328
329 s = splsched();
330 thread_lock(thread);
331
332 #if CONFIG_EMBEDDED
333 thread_throttle(thread, priority);
334 #endif
335
336 thread->task_priority = priority;
337 thread->max_priority = max_priority;
338
339 thread_recompute_priority(thread);
340
341 thread_unlock(thread);
342 splx(s);
343 }
344
345 void
346 thread_policy_reset(
347 thread_t thread)
348 {
349 spl_t s;
350
351 s = splsched();
352 thread_lock(thread);
353
354 if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) {
355 sched_mode_t oldmode = thread->sched_mode;
356
357 thread->sched_mode = SCHED(initial_thread_sched_mode)(thread->task);
358
359 if ((oldmode != TH_MODE_TIMESHARE) && (thread->sched_mode == TH_MODE_TIMESHARE)) {
360
361 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
362 sched_share_incr();
363 }
364 }
365 else {
366 thread->sched_mode = thread->saved_mode;
367 thread->saved_mode = TH_MODE_NONE;
368 thread->sched_flags &= ~TH_SFLAG_DEMOTED_MASK;
369 }
370
371 thread->importance = 0;
372
373 thread_recompute_priority(thread);
374
375 thread_unlock(thread);
376 splx(s);
377 }
378
379 kern_return_t
380 thread_policy_get(
381 thread_t thread,
382 thread_policy_flavor_t flavor,
383 thread_policy_t policy_info,
384 mach_msg_type_number_t *count,
385 boolean_t *get_default)
386 {
387 kern_return_t result = KERN_SUCCESS;
388 spl_t s;
389
390 if (thread == THREAD_NULL)
391 return (KERN_INVALID_ARGUMENT);
392
393 thread_mtx_lock(thread);
394 if (!thread->active) {
395 thread_mtx_unlock(thread);
396
397 return (KERN_TERMINATED);
398 }
399
400 switch (flavor) {
401
402 case THREAD_EXTENDED_POLICY:
403 {
404 boolean_t timeshare = TRUE;
405
406 if (!(*get_default)) {
407 s = splsched();
408 thread_lock(thread);
409
410 if ( (thread->sched_mode != TH_MODE_REALTIME) &&
411 (thread->saved_mode != TH_MODE_REALTIME) ) {
412 if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK))
413 timeshare = (thread->sched_mode == TH_MODE_TIMESHARE) != 0;
414 else
415 timeshare = (thread->saved_mode == TH_MODE_TIMESHARE) != 0;
416 }
417 else
418 *get_default = TRUE;
419
420 thread_unlock(thread);
421 splx(s);
422 }
423
424 if (*count >= THREAD_EXTENDED_POLICY_COUNT) {
425 thread_extended_policy_t info;
426
427 info = (thread_extended_policy_t)policy_info;
428 info->timeshare = timeshare;
429 }
430
431 break;
432 }
433
434 case THREAD_TIME_CONSTRAINT_POLICY:
435 {
436 thread_time_constraint_policy_t info;
437
438 if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
439 result = KERN_INVALID_ARGUMENT;
440 break;
441 }
442
443 info = (thread_time_constraint_policy_t)policy_info;
444
445 if (!(*get_default)) {
446 s = splsched();
447 thread_lock(thread);
448
449 if ( (thread->sched_mode == TH_MODE_REALTIME) ||
450 (thread->saved_mode == TH_MODE_REALTIME) ) {
451 info->period = thread->realtime.period;
452 info->computation = thread->realtime.computation;
453 info->constraint = thread->realtime.constraint;
454 info->preemptible = thread->realtime.preemptible;
455 }
456 else
457 *get_default = TRUE;
458
459 thread_unlock(thread);
460 splx(s);
461 }
462
463 if (*get_default) {
464 info->period = 0;
465 info->computation = default_timeshare_computation;
466 info->constraint = default_timeshare_constraint;
467 info->preemptible = TRUE;
468 }
469
470 break;
471 }
472
473 case THREAD_PRECEDENCE_POLICY:
474 {
475 thread_precedence_policy_t info;
476
477 if (*count < THREAD_PRECEDENCE_POLICY_COUNT) {
478 result = KERN_INVALID_ARGUMENT;
479 break;
480 }
481
482 info = (thread_precedence_policy_t)policy_info;
483
484 if (!(*get_default)) {
485 s = splsched();
486 thread_lock(thread);
487
488 info->importance = thread->importance;
489
490 thread_unlock(thread);
491 splx(s);
492 }
493 else
494 info->importance = 0;
495
496 break;
497 }
498
499 case THREAD_AFFINITY_POLICY:
500 {
501 thread_affinity_policy_t info;
502
503 if (!thread_affinity_is_supported()) {
504 result = KERN_NOT_SUPPORTED;
505 break;
506 }
507 if (*count < THREAD_AFFINITY_POLICY_COUNT) {
508 result = KERN_INVALID_ARGUMENT;
509 break;
510 }
511
512 info = (thread_affinity_policy_t)policy_info;
513
514 if (!(*get_default))
515 info->affinity_tag = thread_affinity_get(thread);
516 else
517 info->affinity_tag = THREAD_AFFINITY_TAG_NULL;
518
519 break;
520 }
521
522 default:
523 result = KERN_INVALID_ARGUMENT;
524 break;
525 }
526
527 thread_mtx_unlock(thread);
528
529 return (result);
530 }