2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/thread_act_server.h>
32 #include <kern/kern_types.h>
33 #include <kern/processor.h>
34 #include <kern/thread.h>
35 #include <kern/affinity.h>
38 thread_recompute_priority(
45 integer_t task_priority
);
47 extern int mach_do_background_thread(thread_t thread
, int prio
);
54 thread_policy_flavor_t flavor
,
55 thread_policy_t policy_info
,
56 mach_msg_type_number_t count
)
59 if (thread
== THREAD_NULL
)
60 return (KERN_INVALID_ARGUMENT
);
62 if (thread
->static_param
)
63 return (KERN_SUCCESS
);
65 return (thread_policy_set_internal(thread
, flavor
, policy_info
, count
));
69 thread_policy_set_internal(
71 thread_policy_flavor_t flavor
,
72 thread_policy_t policy_info
,
73 mach_msg_type_number_t count
)
75 kern_return_t result
= KERN_SUCCESS
;
78 thread_mtx_lock(thread
);
79 if (!thread
->active
) {
80 thread_mtx_unlock(thread
);
82 return (KERN_TERMINATED
);
86 case THREAD_EXTENDED_POLICY
:
88 boolean_t timeshare
= TRUE
;
90 if (count
>= THREAD_EXTENDED_POLICY_COUNT
) {
91 thread_extended_policy_t info
;
93 info
= (thread_extended_policy_t
)policy_info
;
94 timeshare
= info
->timeshare
;
97 if (!SCHED(supports_timeshare_mode
)())
103 if (!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
)) {
104 integer_t oldmode
= (thread
->sched_mode
== TH_MODE_TIMESHARE
);
107 thread
->sched_mode
= TH_MODE_TIMESHARE
;
110 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
115 thread
->sched_mode
= TH_MODE_FIXED
;
118 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
123 thread_recompute_priority(thread
);
128 thread
->saved_mode
= TH_MODE_TIMESHARE
;
130 thread
->saved_mode
= TH_MODE_FIXED
;
133 thread_unlock(thread
);
139 case THREAD_TIME_CONSTRAINT_POLICY
:
141 thread_time_constraint_policy_t info
;
143 if (count
< THREAD_TIME_CONSTRAINT_POLICY_COUNT
) {
144 result
= KERN_INVALID_ARGUMENT
;
148 info
= (thread_time_constraint_policy_t
)policy_info
;
149 if ( info
->constraint
< info
->computation
||
150 info
->computation
> max_rt_quantum
||
151 info
->computation
< min_rt_quantum
) {
152 result
= KERN_INVALID_ARGUMENT
;
159 thread
->realtime
.period
= info
->period
;
160 thread
->realtime
.computation
= info
->computation
;
161 thread
->realtime
.constraint
= info
->constraint
;
162 thread
->realtime
.preemptible
= info
->preemptible
;
164 if (thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
) {
165 thread
->saved_mode
= TH_MODE_REALTIME
;
168 else if (thread
->task_priority
<= MAXPRI_THROTTLE
) {
169 thread
->saved_mode
= TH_MODE_REALTIME
;
170 thread
->sched_flags
|= TH_SFLAG_THROTTLED
;
174 if (thread
->sched_mode
== TH_MODE_TIMESHARE
) {
175 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
178 thread
->sched_mode
= TH_MODE_REALTIME
;
179 thread_recompute_priority(thread
);
182 thread_unlock(thread
);
188 case THREAD_PRECEDENCE_POLICY
:
190 thread_precedence_policy_t info
;
192 if (count
< THREAD_PRECEDENCE_POLICY_COUNT
) {
193 result
= KERN_INVALID_ARGUMENT
;
196 info
= (thread_precedence_policy_t
)policy_info
;
201 thread
->importance
= info
->importance
;
203 thread_recompute_priority(thread
);
205 thread_unlock(thread
);
211 case THREAD_AFFINITY_POLICY
:
213 thread_affinity_policy_t info
;
215 if (!thread_affinity_is_supported()) {
216 result
= KERN_NOT_SUPPORTED
;
219 if (count
< THREAD_AFFINITY_POLICY_COUNT
) {
220 result
= KERN_INVALID_ARGUMENT
;
224 info
= (thread_affinity_policy_t
) policy_info
;
226 * Unlock the thread mutex here and
227 * return directly after calling thread_affinity_set().
228 * This is necessary for correct lock ordering because
229 * thread_affinity_set() takes the task lock.
231 thread_mtx_unlock(thread
);
232 return thread_affinity_set(thread
, info
->affinity_tag
);
236 case THREAD_BACKGROUND_POLICY
:
238 thread_background_policy_t info
;
240 info
= (thread_background_policy_t
) policy_info
;
242 thread_mtx_unlock(thread
);
243 return mach_do_background_thread(thread
, info
->priority
);
245 #endif /* CONFIG_EMBEDDED */
248 result
= KERN_INVALID_ARGUMENT
;
252 thread_mtx_unlock(thread
);
257 thread_recompute_priority(
262 if (thread
->sched_mode
== TH_MODE_REALTIME
)
263 priority
= BASEPRI_RTQUEUES
;
265 if (thread
->importance
> MAXPRI
)
268 if (thread
->importance
< -MAXPRI
)
271 priority
= thread
->importance
;
273 priority
+= thread
->task_priority
;
275 if (priority
> thread
->max_priority
)
276 priority
= thread
->max_priority
;
278 if (priority
< MINPRI
)
281 /* No one can have a base priority less than MAXPRI_THROTTLE */
282 if (priority
< MAXPRI_THROTTLE
)
283 priority
= MAXPRI_THROTTLE
;
284 #endif /* CONFIG_EMBEDDED */
287 set_priority(thread
, priority
);
294 integer_t task_priority
)
296 if (!(thread
->sched_flags
& TH_SFLAG_THROTTLED
) &&
297 (task_priority
<= MAXPRI_THROTTLE
)) {
299 if (!((thread
->sched_mode
== TH_MODE_REALTIME
) ||
300 (thread
->saved_mode
== TH_MODE_REALTIME
))) {
304 /* Demote to timeshare if throttling */
305 if (thread
->sched_mode
== TH_MODE_REALTIME
)
307 thread
->saved_mode
= TH_MODE_REALTIME
;
309 if (thread
->sched_mode
== TH_MODE_TIMESHARE
) {
310 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
315 /* TH_SFLAG_FAILSAFE and TH_SFLAG_THROTTLED are mutually exclusive,
316 * since a throttled thread is not realtime during the throttle
317 * and doesn't need the failsafe repromotion. We therefore clear
318 * the former and set the latter flags here.
320 thread
->sched_flags
&= ~TH_SFLAG_FAILSAFE
;
321 thread
->sched_flags
|= TH_SFLAG_THROTTLED
;
323 if (SCHED(supports_timeshare_mode
)())
324 thread
->sched_mode
= TH_MODE_TIMESHARE
;
326 thread
->sched_mode
= TH_MODE_FIXED
;
328 else if ((thread
->sched_flags
& TH_SFLAG_THROTTLED
) &&
329 (task_priority
> MAXPRI_THROTTLE
)) {
331 /* Promote back to real time if unthrottling */
332 if (!(thread
->saved_mode
== TH_MODE_TIMESHARE
)) {
334 thread
->sched_mode
= thread
->saved_mode
;
336 if (thread
->sched_mode
== TH_MODE_TIMESHARE
) {
337 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
341 thread
->saved_mode
= TH_MODE_NONE
;
344 thread
->sched_flags
&= ~TH_SFLAG_THROTTLED
;
350 thread_task_priority(
353 integer_t max_priority
)
357 assert(thread
!= THREAD_NULL
);
363 thread_throttle(thread
, priority
);
366 thread
->task_priority
= priority
;
367 thread
->max_priority
= max_priority
;
369 thread_recompute_priority(thread
);
371 thread_unlock(thread
);
384 if (!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
)) {
385 sched_mode_t oldmode
= thread
->sched_mode
;
387 thread
->sched_mode
= SCHED(initial_thread_sched_mode
)(thread
->task
);
389 if ((oldmode
!= TH_MODE_TIMESHARE
) && (thread
->sched_mode
== TH_MODE_TIMESHARE
)) {
391 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
396 thread
->saved_mode
= TH_MODE_NONE
;
397 thread
->sched_flags
&= ~TH_SFLAG_DEMOTED_MASK
;
400 thread
->importance
= 0;
402 thread_recompute_priority(thread
);
404 thread_unlock(thread
);
411 thread_policy_flavor_t flavor
,
412 thread_policy_t policy_info
,
413 mach_msg_type_number_t
*count
,
414 boolean_t
*get_default
)
416 kern_return_t result
= KERN_SUCCESS
;
419 if (thread
== THREAD_NULL
)
420 return (KERN_INVALID_ARGUMENT
);
422 thread_mtx_lock(thread
);
423 if (!thread
->active
) {
424 thread_mtx_unlock(thread
);
426 return (KERN_TERMINATED
);
431 case THREAD_EXTENDED_POLICY
:
433 boolean_t timeshare
= TRUE
;
435 if (!(*get_default
)) {
439 if ( (thread
->sched_mode
!= TH_MODE_REALTIME
) &&
440 (thread
->saved_mode
!= TH_MODE_REALTIME
) ) {
441 if (!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
))
442 timeshare
= (thread
->sched_mode
== TH_MODE_TIMESHARE
) != 0;
444 timeshare
= (thread
->saved_mode
== TH_MODE_TIMESHARE
) != 0;
449 thread_unlock(thread
);
453 if (*count
>= THREAD_EXTENDED_POLICY_COUNT
) {
454 thread_extended_policy_t info
;
456 info
= (thread_extended_policy_t
)policy_info
;
457 info
->timeshare
= timeshare
;
463 case THREAD_TIME_CONSTRAINT_POLICY
:
465 thread_time_constraint_policy_t info
;
467 if (*count
< THREAD_TIME_CONSTRAINT_POLICY_COUNT
) {
468 result
= KERN_INVALID_ARGUMENT
;
472 info
= (thread_time_constraint_policy_t
)policy_info
;
474 if (!(*get_default
)) {
478 if ( (thread
->sched_mode
== TH_MODE_REALTIME
) ||
479 (thread
->saved_mode
== TH_MODE_REALTIME
) ) {
480 info
->period
= thread
->realtime
.period
;
481 info
->computation
= thread
->realtime
.computation
;
482 info
->constraint
= thread
->realtime
.constraint
;
483 info
->preemptible
= thread
->realtime
.preemptible
;
488 thread_unlock(thread
);
494 info
->computation
= default_timeshare_computation
;
495 info
->constraint
= default_timeshare_constraint
;
496 info
->preemptible
= TRUE
;
502 case THREAD_PRECEDENCE_POLICY
:
504 thread_precedence_policy_t info
;
506 if (*count
< THREAD_PRECEDENCE_POLICY_COUNT
) {
507 result
= KERN_INVALID_ARGUMENT
;
511 info
= (thread_precedence_policy_t
)policy_info
;
513 if (!(*get_default
)) {
517 info
->importance
= thread
->importance
;
519 thread_unlock(thread
);
523 info
->importance
= 0;
528 case THREAD_AFFINITY_POLICY
:
530 thread_affinity_policy_t info
;
532 if (!thread_affinity_is_supported()) {
533 result
= KERN_NOT_SUPPORTED
;
536 if (*count
< THREAD_AFFINITY_POLICY_COUNT
) {
537 result
= KERN_INVALID_ARGUMENT
;
541 info
= (thread_affinity_policy_t
)policy_info
;
544 info
->affinity_tag
= thread_affinity_get(thread
);
546 info
->affinity_tag
= THREAD_AFFINITY_TAG_NULL
;
552 result
= KERN_INVALID_ARGUMENT
;
556 thread_mtx_unlock(thread
);