2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/thread_act_server.h>
32 #include <kern/kern_types.h>
33 #include <kern/processor.h>
34 #include <kern/thread.h>
35 #include <kern/affinity.h>
38 thread_recompute_priority(
45 integer_t task_priority
);
47 extern int mach_do_background_thread(thread_t thread
, int prio
);
54 thread_policy_flavor_t flavor
,
55 thread_policy_t policy_info
,
56 mach_msg_type_number_t count
)
59 if (thread
== THREAD_NULL
)
60 return (KERN_INVALID_ARGUMENT
);
62 if (thread
->static_param
)
63 return (KERN_SUCCESS
);
65 return (thread_policy_set_internal(thread
, flavor
, policy_info
, count
));
69 thread_policy_set_internal(
71 thread_policy_flavor_t flavor
,
72 thread_policy_t policy_info
,
73 mach_msg_type_number_t count
)
75 kern_return_t result
= KERN_SUCCESS
;
78 thread_mtx_lock(thread
);
79 if (!thread
->active
) {
80 thread_mtx_unlock(thread
);
82 return (KERN_TERMINATED
);
86 case THREAD_EXTENDED_POLICY
:
88 boolean_t timeshare
= TRUE
;
90 if (count
>= THREAD_EXTENDED_POLICY_COUNT
) {
91 thread_extended_policy_t info
;
93 info
= (thread_extended_policy_t
)policy_info
;
94 timeshare
= info
->timeshare
;
97 if (!SCHED(supports_timeshare_mode
)())
103 if (!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
)) {
104 integer_t oldmode
= (thread
->sched_mode
== TH_MODE_TIMESHARE
);
107 thread
->sched_mode
= TH_MODE_TIMESHARE
;
110 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
115 thread
->sched_mode
= TH_MODE_FIXED
;
118 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
123 thread_recompute_priority(thread
);
128 thread
->saved_mode
= TH_MODE_TIMESHARE
;
130 thread
->saved_mode
= TH_MODE_FIXED
;
133 thread_unlock(thread
);
139 case THREAD_TIME_CONSTRAINT_POLICY
:
141 thread_time_constraint_policy_t info
;
143 if (count
< THREAD_TIME_CONSTRAINT_POLICY_COUNT
) {
144 result
= KERN_INVALID_ARGUMENT
;
148 info
= (thread_time_constraint_policy_t
)policy_info
;
149 if ( info
->constraint
< info
->computation
||
150 info
->computation
> max_rt_quantum
||
151 info
->computation
< min_rt_quantum
) {
152 result
= KERN_INVALID_ARGUMENT
;
159 thread
->realtime
.period
= info
->period
;
160 thread
->realtime
.computation
= info
->computation
;
161 thread
->realtime
.constraint
= info
->constraint
;
162 thread
->realtime
.preemptible
= info
->preemptible
;
164 if (thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
) {
165 thread
->saved_mode
= TH_MODE_REALTIME
;
168 if (thread
->sched_mode
== TH_MODE_TIMESHARE
) {
169 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
172 thread
->sched_mode
= TH_MODE_REALTIME
;
173 thread_recompute_priority(thread
);
176 thread_unlock(thread
);
182 case THREAD_PRECEDENCE_POLICY
:
184 thread_precedence_policy_t info
;
186 if (count
< THREAD_PRECEDENCE_POLICY_COUNT
) {
187 result
= KERN_INVALID_ARGUMENT
;
190 info
= (thread_precedence_policy_t
)policy_info
;
195 thread
->importance
= info
->importance
;
197 thread_recompute_priority(thread
);
199 thread_unlock(thread
);
205 case THREAD_AFFINITY_POLICY
:
207 thread_affinity_policy_t info
;
209 if (!thread_affinity_is_supported()) {
210 result
= KERN_NOT_SUPPORTED
;
213 if (count
< THREAD_AFFINITY_POLICY_COUNT
) {
214 result
= KERN_INVALID_ARGUMENT
;
218 info
= (thread_affinity_policy_t
) policy_info
;
220 * Unlock the thread mutex here and
221 * return directly after calling thread_affinity_set().
222 * This is necessary for correct lock ordering because
223 * thread_affinity_set() takes the task lock.
225 thread_mtx_unlock(thread
);
226 return thread_affinity_set(thread
, info
->affinity_tag
);
230 case THREAD_BACKGROUND_POLICY
:
232 thread_background_policy_t info
;
234 info
= (thread_background_policy_t
) policy_info
;
236 thread_mtx_unlock(thread
);
237 return mach_do_background_thread(thread
, info
->priority
);
239 #endif /* CONFIG_EMBEDDED */
242 result
= KERN_INVALID_ARGUMENT
;
246 thread_mtx_unlock(thread
);
251 thread_recompute_priority(
256 if (thread
->sched_mode
== TH_MODE_REALTIME
)
257 priority
= BASEPRI_RTQUEUES
;
259 if (thread
->importance
> MAXPRI
)
262 if (thread
->importance
< -MAXPRI
)
265 priority
= thread
->importance
;
267 priority
+= thread
->task_priority
;
269 if (priority
> thread
->max_priority
)
270 priority
= thread
->max_priority
;
272 if (priority
< MINPRI
)
275 /* No one can have a base priority less than MAXPRI_THROTTLE */
276 if (priority
< MAXPRI_THROTTLE
)
277 priority
= MAXPRI_THROTTLE
;
278 #endif /* CONFIG_EMBEDDED */
281 set_priority(thread
, priority
);
288 integer_t task_priority
)
290 if ((!(thread
->sched_flags
& TH_SFLAG_THROTTLED
)
291 || (thread
->sched_flags
& TH_SFLAG_PENDING_THROTTLE_PROMOTION
))
292 && (task_priority
<= MAXPRI_THROTTLE
)) {
294 /* Kill a promotion if it was in flight */
295 thread
->sched_flags
&= ~TH_SFLAG_PENDING_THROTTLE_PROMOTION
;
297 if (!(thread
->sched_flags
& TH_SFLAG_THROTTLED
)) {
299 * Set the pending bit so that we can switch runqueues
300 * (potentially) at a later time safely
302 thread
->sched_flags
|= TH_SFLAG_PENDING_THROTTLE_DEMOTION
;
305 else if (((thread
->sched_flags
& TH_SFLAG_THROTTLED
)
306 || (thread
->sched_flags
& TH_SFLAG_PENDING_THROTTLE_DEMOTION
))
307 && (task_priority
> MAXPRI_THROTTLE
)) {
309 /* Kill a demotion if it was in flight */
310 thread
->sched_flags
&= ~TH_SFLAG_PENDING_THROTTLE_DEMOTION
;
312 if (thread
->sched_flags
& TH_SFLAG_THROTTLED
) {
313 thread
->sched_flags
|= TH_SFLAG_PENDING_THROTTLE_PROMOTION
;
320 thread_task_priority(
323 integer_t max_priority
)
327 assert(thread
!= THREAD_NULL
);
333 thread_throttle(thread
, priority
);
336 thread
->task_priority
= priority
;
337 thread
->max_priority
= max_priority
;
339 thread_recompute_priority(thread
);
341 thread_unlock(thread
);
354 if (!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
)) {
355 sched_mode_t oldmode
= thread
->sched_mode
;
357 thread
->sched_mode
= SCHED(initial_thread_sched_mode
)(thread
->task
);
359 if ((oldmode
!= TH_MODE_TIMESHARE
) && (thread
->sched_mode
== TH_MODE_TIMESHARE
)) {
361 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
366 thread
->sched_mode
= thread
->saved_mode
;
367 thread
->saved_mode
= TH_MODE_NONE
;
368 thread
->sched_flags
&= ~TH_SFLAG_DEMOTED_MASK
;
371 thread
->importance
= 0;
373 thread_recompute_priority(thread
);
375 thread_unlock(thread
);
382 thread_policy_flavor_t flavor
,
383 thread_policy_t policy_info
,
384 mach_msg_type_number_t
*count
,
385 boolean_t
*get_default
)
387 kern_return_t result
= KERN_SUCCESS
;
390 if (thread
== THREAD_NULL
)
391 return (KERN_INVALID_ARGUMENT
);
393 thread_mtx_lock(thread
);
394 if (!thread
->active
) {
395 thread_mtx_unlock(thread
);
397 return (KERN_TERMINATED
);
402 case THREAD_EXTENDED_POLICY
:
404 boolean_t timeshare
= TRUE
;
406 if (!(*get_default
)) {
410 if ( (thread
->sched_mode
!= TH_MODE_REALTIME
) &&
411 (thread
->saved_mode
!= TH_MODE_REALTIME
) ) {
412 if (!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
))
413 timeshare
= (thread
->sched_mode
== TH_MODE_TIMESHARE
) != 0;
415 timeshare
= (thread
->saved_mode
== TH_MODE_TIMESHARE
) != 0;
420 thread_unlock(thread
);
424 if (*count
>= THREAD_EXTENDED_POLICY_COUNT
) {
425 thread_extended_policy_t info
;
427 info
= (thread_extended_policy_t
)policy_info
;
428 info
->timeshare
= timeshare
;
434 case THREAD_TIME_CONSTRAINT_POLICY
:
436 thread_time_constraint_policy_t info
;
438 if (*count
< THREAD_TIME_CONSTRAINT_POLICY_COUNT
) {
439 result
= KERN_INVALID_ARGUMENT
;
443 info
= (thread_time_constraint_policy_t
)policy_info
;
445 if (!(*get_default
)) {
449 if ( (thread
->sched_mode
== TH_MODE_REALTIME
) ||
450 (thread
->saved_mode
== TH_MODE_REALTIME
) ) {
451 info
->period
= thread
->realtime
.period
;
452 info
->computation
= thread
->realtime
.computation
;
453 info
->constraint
= thread
->realtime
.constraint
;
454 info
->preemptible
= thread
->realtime
.preemptible
;
459 thread_unlock(thread
);
465 info
->computation
= default_timeshare_computation
;
466 info
->constraint
= default_timeshare_constraint
;
467 info
->preemptible
= TRUE
;
473 case THREAD_PRECEDENCE_POLICY
:
475 thread_precedence_policy_t info
;
477 if (*count
< THREAD_PRECEDENCE_POLICY_COUNT
) {
478 result
= KERN_INVALID_ARGUMENT
;
482 info
= (thread_precedence_policy_t
)policy_info
;
484 if (!(*get_default
)) {
488 info
->importance
= thread
->importance
;
490 thread_unlock(thread
);
494 info
->importance
= 0;
499 case THREAD_AFFINITY_POLICY
:
501 thread_affinity_policy_t info
;
503 if (!thread_affinity_is_supported()) {
504 result
= KERN_NOT_SUPPORTED
;
507 if (*count
< THREAD_AFFINITY_POLICY_COUNT
) {
508 result
= KERN_INVALID_ARGUMENT
;
512 info
= (thread_affinity_policy_t
)policy_info
;
515 info
->affinity_tag
= thread_affinity_get(thread
);
517 info
->affinity_tag
= THREAD_AFFINITY_TAG_NULL
;
523 result
= KERN_INVALID_ARGUMENT
;
527 thread_mtx_unlock(thread
);