2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/counters.h>
42 #include <kern/cpu_number.h>
43 #include <kern/cpu_data.h>
44 #include <kern/debug.h>
45 #include <kern/lock.h>
46 #include <kern/macro_help.h>
47 #include <kern/machine.h>
48 #include <kern/misc_protos.h>
49 #include <kern/processor.h>
50 #include <kern/queue.h>
51 #include <kern/sched.h>
52 #include <kern/sched_prim.h>
53 #include <kern/syscall_subr.h>
54 #include <kern/task.h>
55 #include <kern/thread.h>
56 #include <kern/wait_queue.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_map.h>
64 #include <sys/kdebug.h>
67 sched_fixedpriority_init(void);
70 sched_fixedpriority_with_pset_runqueue_init(void);
73 sched_fixedpriority_timebase_init(void);
76 sched_fixedpriority_processor_init(processor_t processor
);
79 sched_fixedpriority_pset_init(processor_set_t pset
);
82 sched_fixedpriority_maintenance_continuation(void);
85 sched_fixedpriority_choose_thread(processor_t processor
,
89 sched_fixedpriority_steal_thread(processor_set_t pset
);
92 sched_fixedpriority_compute_priority(thread_t thread
,
93 boolean_t override_depress
);
96 sched_fixedpriority_choose_processor( processor_set_t pset
,
97 processor_t processor
,
102 sched_fixedpriority_processor_enqueue(
103 processor_t processor
,
108 sched_fixedpriority_processor_queue_shutdown(
109 processor_t processor
);
112 sched_fixedpriority_processor_queue_remove(
113 processor_t processor
,
117 sched_fixedpriority_processor_queue_empty(processor_t processor
);
120 sched_fixedpriority_processor_queue_has_priority(processor_t processor
,
125 sched_fixedpriority_priority_is_urgent(int priority
);
128 sched_fixedpriority_processor_csw_check(processor_t processor
);
131 sched_fixedpriority_initial_quantum_size(thread_t thread
);
134 sched_fixedpriority_initial_thread_sched_mode(task_t parent_task
);
137 sched_fixedpriority_supports_timeshare_mode(void);
140 sched_fixedpriority_can_update_priority(thread_t thread
);
143 sched_fixedpriority_update_priority(thread_t thread
);
146 sched_fixedpriority_lightweight_update_priority(thread_t thread
);
149 sched_fixedpriority_quantum_expire(thread_t thread
);
152 sched_fixedpriority_should_current_thread_rechoose_processor(processor_t processor
);
155 sched_fixedpriority_processor_runq_count(processor_t processor
);
158 sched_fixedpriority_processor_runq_stats_count_sum(processor_t processor
);
160 const struct sched_dispatch_table sched_fixedpriority_dispatch
= {
161 sched_fixedpriority_init
,
162 sched_fixedpriority_timebase_init
,
163 sched_fixedpriority_processor_init
,
164 sched_fixedpriority_pset_init
,
165 sched_fixedpriority_maintenance_continuation
,
166 sched_fixedpriority_choose_thread
,
167 sched_fixedpriority_steal_thread
,
168 sched_fixedpriority_compute_priority
,
169 sched_fixedpriority_choose_processor
,
170 sched_fixedpriority_processor_enqueue
,
171 sched_fixedpriority_processor_queue_shutdown
,
172 sched_fixedpriority_processor_queue_remove
,
173 sched_fixedpriority_processor_queue_empty
,
174 sched_fixedpriority_priority_is_urgent
,
175 sched_fixedpriority_processor_csw_check
,
176 sched_fixedpriority_processor_queue_has_priority
,
177 sched_fixedpriority_initial_quantum_size
,
178 sched_fixedpriority_initial_thread_sched_mode
,
179 sched_fixedpriority_supports_timeshare_mode
,
180 sched_fixedpriority_can_update_priority
,
181 sched_fixedpriority_update_priority
,
182 sched_fixedpriority_lightweight_update_priority
,
183 sched_fixedpriority_quantum_expire
,
184 sched_fixedpriority_should_current_thread_rechoose_processor
,
185 sched_fixedpriority_processor_runq_count
,
186 sched_fixedpriority_processor_runq_stats_count_sum
,
187 sched_traditional_fairshare_init
,
188 sched_traditional_fairshare_runq_count
,
189 sched_traditional_fairshare_runq_stats_count_sum
,
190 sched_traditional_fairshare_enqueue
,
191 sched_traditional_fairshare_dequeue
,
192 sched_traditional_fairshare_queue_remove
,
193 TRUE
/* direct_dispatch_to_idle_processors */
196 const struct sched_dispatch_table sched_fixedpriority_with_pset_runqueue_dispatch
= {
197 sched_fixedpriority_with_pset_runqueue_init
,
198 sched_fixedpriority_timebase_init
,
199 sched_fixedpriority_processor_init
,
200 sched_fixedpriority_pset_init
,
201 sched_fixedpriority_maintenance_continuation
,
202 sched_fixedpriority_choose_thread
,
203 sched_fixedpriority_steal_thread
,
204 sched_fixedpriority_compute_priority
,
205 sched_fixedpriority_choose_processor
,
206 sched_fixedpriority_processor_enqueue
,
207 sched_fixedpriority_processor_queue_shutdown
,
208 sched_fixedpriority_processor_queue_remove
,
209 sched_fixedpriority_processor_queue_empty
,
210 sched_fixedpriority_priority_is_urgent
,
211 sched_fixedpriority_processor_csw_check
,
212 sched_fixedpriority_processor_queue_has_priority
,
213 sched_fixedpriority_initial_quantum_size
,
214 sched_fixedpriority_initial_thread_sched_mode
,
215 sched_fixedpriority_supports_timeshare_mode
,
216 sched_fixedpriority_can_update_priority
,
217 sched_fixedpriority_update_priority
,
218 sched_fixedpriority_lightweight_update_priority
,
219 sched_fixedpriority_quantum_expire
,
220 sched_fixedpriority_should_current_thread_rechoose_processor
,
221 sched_fixedpriority_processor_runq_count
,
222 sched_fixedpriority_processor_runq_stats_count_sum
,
223 sched_traditional_fairshare_init
,
224 sched_traditional_fairshare_runq_count
,
225 sched_traditional_fairshare_runq_stats_count_sum
,
226 sched_traditional_fairshare_enqueue
,
227 sched_traditional_fairshare_dequeue
,
228 sched_traditional_fairshare_queue_remove
,
229 FALSE
/* direct_dispatch_to_idle_processors */
232 extern int max_unsafe_quanta
;
234 #define SCHED_FIXEDPRIORITY_DEFAULT_QUANTUM 5 /* in ms */
235 static uint32_t sched_fixedpriority_quantum_ms
= SCHED_FIXEDPRIORITY_DEFAULT_QUANTUM
;
236 static uint32_t sched_fixedpriority_quantum
;
238 #define SCHED_FIXEDPRIORITY_DEFAULT_FAIRSHARE_MINIMUM_BLOCK_TIME 100 /* ms */
239 static uint32_t fairshare_minimum_blocked_time_ms
= SCHED_FIXEDPRIORITY_DEFAULT_FAIRSHARE_MINIMUM_BLOCK_TIME
;
240 static uint32_t fairshare_minimum_blocked_time
;
242 static uint32_t sched_fixedpriority_tick
;
243 static uint64_t sched_fixedpriority_tick_deadline
;
244 extern uint32_t grrr_rescale_tick
;
246 static boolean_t sched_fixedpriority_use_pset_runqueue
= FALSE
;
248 __attribute__((always_inline
))
249 static inline run_queue_t
runq_for_processor(processor_t processor
)
251 if (sched_fixedpriority_use_pset_runqueue
)
252 return &processor
->processor_set
->pset_runq
;
254 return &processor
->runq
;
257 __attribute__((always_inline
))
258 static inline void runq_consider_incr_bound_count(processor_t processor
, thread_t thread
)
260 if (thread
->bound_processor
== PROCESSOR_NULL
)
263 assert(thread
->bound_processor
== processor
);
265 if (sched_fixedpriority_use_pset_runqueue
)
266 processor
->processor_set
->pset_runq_bound_count
++;
268 processor
->runq_bound_count
++;
271 __attribute__((always_inline
))
272 static inline void runq_consider_decr_bound_count(processor_t processor
, thread_t thread
)
274 if (thread
->bound_processor
== PROCESSOR_NULL
)
277 assert(thread
->bound_processor
== processor
);
279 if (sched_fixedpriority_use_pset_runqueue
)
280 processor
->processor_set
->pset_runq_bound_count
--;
282 processor
->runq_bound_count
--;
286 sched_fixedpriority_init(void)
288 if (!PE_parse_boot_argn("fixedpriority_quantum", &sched_fixedpriority_quantum_ms
, sizeof (sched_fixedpriority_quantum_ms
))) {
289 sched_fixedpriority_quantum_ms
= SCHED_FIXEDPRIORITY_DEFAULT_QUANTUM
;
292 if (sched_fixedpriority_quantum_ms
< 1)
293 sched_fixedpriority_quantum_ms
= SCHED_FIXEDPRIORITY_DEFAULT_QUANTUM
;
295 printf("standard fixed priority timeslicing quantum is %u ms\n", sched_fixedpriority_quantum_ms
);
299 sched_fixedpriority_with_pset_runqueue_init(void)
301 sched_fixedpriority_init();
302 sched_fixedpriority_use_pset_runqueue
= TRUE
;
306 sched_fixedpriority_timebase_init(void)
310 /* standard timeslicing quantum */
311 clock_interval_to_absolutetime_interval(
312 sched_fixedpriority_quantum_ms
, NSEC_PER_MSEC
, &abstime
);
313 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
314 sched_fixedpriority_quantum
= (uint32_t)abstime
;
316 thread_depress_time
= 1 * sched_fixedpriority_quantum
;
317 default_timeshare_computation
= sched_fixedpriority_quantum
/ 2;
318 default_timeshare_constraint
= sched_fixedpriority_quantum
;
320 max_unsafe_computation
= max_unsafe_quanta
* sched_fixedpriority_quantum
;
321 sched_safe_duration
= 2 * max_unsafe_quanta
* sched_fixedpriority_quantum
;
323 if (!PE_parse_boot_argn("fairshare_minblockedtime", &fairshare_minimum_blocked_time_ms
, sizeof (fairshare_minimum_blocked_time_ms
))) {
324 fairshare_minimum_blocked_time_ms
= SCHED_FIXEDPRIORITY_DEFAULT_FAIRSHARE_MINIMUM_BLOCK_TIME
;
327 clock_interval_to_absolutetime_interval(
328 fairshare_minimum_blocked_time_ms
, NSEC_PER_MSEC
, &abstime
);
330 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
331 fairshare_minimum_blocked_time
= (uint32_t)abstime
;
335 sched_fixedpriority_processor_init(processor_t processor
)
337 if (!sched_fixedpriority_use_pset_runqueue
) {
338 run_queue_init(&processor
->runq
);
340 processor
->runq_bound_count
= 0;
344 sched_fixedpriority_pset_init(processor_set_t pset
)
346 if (sched_fixedpriority_use_pset_runqueue
) {
347 run_queue_init(&pset
->pset_runq
);
349 pset
->pset_runq_bound_count
= 0;
354 sched_fixedpriority_maintenance_continuation(void)
356 uint64_t abstime
= mach_absolute_time();
358 sched_fixedpriority_tick
++;
362 * Compute various averages.
366 if (sched_fixedpriority_tick_deadline
== 0)
367 sched_fixedpriority_tick_deadline
= abstime
;
369 clock_deadline_for_periodic_event(10*sched_one_second_interval
, abstime
,
370 &sched_fixedpriority_tick_deadline
);
372 assert_wait_deadline((event_t
)sched_fixedpriority_maintenance_continuation
, THREAD_UNINT
, sched_fixedpriority_tick_deadline
);
373 thread_block((thread_continue_t
)sched_fixedpriority_maintenance_continuation
);
379 sched_fixedpriority_choose_thread(processor_t processor
,
384 thread
= choose_thread(processor
, runq_for_processor(processor
), priority
);
385 if (thread
!= THREAD_NULL
) {
386 runq_consider_decr_bound_count(processor
, thread
);
393 sched_fixedpriority_steal_thread(processor_set_t pset
)
397 return (THREAD_NULL
);
402 sched_fixedpriority_compute_priority(thread_t thread
,
403 boolean_t override_depress
)
405 /* Reset current priority to base priority */
406 if ( !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
407 (!(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) ||
408 override_depress
) ) {
409 set_sched_pri(thread
, thread
->priority
);
414 sched_fixedpriority_choose_processor( processor_set_t pset
,
415 processor_t processor
,
418 return choose_processor(pset
, processor
, thread
);
421 sched_fixedpriority_processor_enqueue(
422 processor_t processor
,
426 run_queue_t rq
= runq_for_processor(processor
);
429 result
= run_queue_enqueue(rq
, thread
, options
);
430 thread
->runq
= processor
;
431 runq_consider_incr_bound_count(processor
, thread
);
437 sched_fixedpriority_processor_queue_shutdown(
438 processor_t processor
)
440 processor_set_t pset
= processor
->processor_set
;
442 queue_head_t tqueue
, bqueue
;
447 while ((thread
= sched_fixedpriority_choose_thread(processor
, IDLEPRI
)) != THREAD_NULL
) {
448 if (thread
->bound_processor
== PROCESSOR_NULL
) {
449 enqueue_tail(&tqueue
, (queue_entry_t
)thread
);
451 enqueue_tail(&bqueue
, (queue_entry_t
)thread
);
455 while ((thread
= (thread_t
)dequeue_head(&bqueue
)) != THREAD_NULL
) {
456 sched_fixedpriority_processor_enqueue(processor
, thread
, SCHED_TAILQ
);
461 while ((thread
= (thread_t
)dequeue_head(&tqueue
)) != THREAD_NULL
) {
464 thread_setrun(thread
, SCHED_TAILQ
);
466 thread_unlock(thread
);
471 sched_fixedpriority_processor_queue_remove(
472 processor_t processor
,
478 rqlock
= &processor
->processor_set
->sched_lock
;
479 rq
= runq_for_processor(processor
);
482 if (processor
== thread
->runq
) {
484 * Thread is on a run queue and we have a lock on
487 runq_consider_decr_bound_count(processor
, thread
);
488 run_queue_remove(rq
, thread
);
492 * The thread left the run queue before we could
493 * lock the run queue.
495 assert(thread
->runq
== PROCESSOR_NULL
);
496 processor
= PROCESSOR_NULL
;
499 simple_unlock(rqlock
);
501 return (processor
!= PROCESSOR_NULL
);
505 sched_fixedpriority_processor_queue_empty(processor_t processor
)
508 * See sched_traditional_with_pset_runqueue_processor_queue_empty
511 int count
= runq_for_processor(processor
)->count
;
513 if (sched_fixedpriority_use_pset_runqueue
) {
514 processor_set_t pset
= processor
->processor_set
;
516 count
-= pset
->pset_runq_bound_count
;
517 count
+= processor
->runq_bound_count
;
524 sched_fixedpriority_processor_queue_has_priority(processor_t processor
,
529 return runq_for_processor(processor
)->highq
>= priority
;
531 return runq_for_processor(processor
)->highq
> priority
;
534 /* Implement sched_preempt_pri in code */
536 sched_fixedpriority_priority_is_urgent(int priority
)
538 if (priority
<= BASEPRI_FOREGROUND
)
541 if (priority
< MINPRI_KERNEL
)
544 if (priority
>= BASEPRI_PREEMPT
)
551 sched_fixedpriority_processor_csw_check(processor_t processor
)
554 boolean_t has_higher
;
556 runq
= runq_for_processor(processor
);
557 if (first_timeslice(processor
)) {
558 has_higher
= (runq
->highq
> processor
->current_pri
);
560 has_higher
= (runq
->highq
>= processor
->current_pri
);
563 if (runq
->urgency
> 0)
564 return (AST_PREEMPT
| AST_URGENT
);
566 if (processor
->active_thread
&& thread_eager_preemption(processor
->active_thread
))
567 return (AST_PREEMPT
| AST_URGENT
);
570 } else if (processor
->current_thmode
== TH_MODE_FAIRSHARE
) {
571 if (!sched_fixedpriority_processor_queue_empty(processor
)) {
572 /* Allow queued threads to run if the current thread got demoted to fairshare */
573 return (AST_PREEMPT
| AST_URGENT
);
574 } else if ((!first_timeslice(processor
)) && SCHED(fairshare_runq_count
)() > 0) {
575 /* Allow other fairshare threads to run */
576 return AST_PREEMPT
| AST_URGENT
;
584 sched_fixedpriority_initial_quantum_size(thread_t thread __unused
)
586 return sched_fixedpriority_quantum
;
590 sched_fixedpriority_initial_thread_sched_mode(task_t parent_task
)
592 if (parent_task
== kernel_task
)
593 return TH_MODE_FIXED
;
595 return TH_MODE_TIMESHARE
;
599 sched_fixedpriority_supports_timeshare_mode(void)
605 sched_fixedpriority_can_update_priority(thread_t thread __unused
)
607 return ((thread
->sched_flags
& TH_SFLAG_PRI_UPDATE
) == 0);
611 sched_fixedpriority_update_priority(thread_t thread
)
613 uint64_t current_time
= mach_absolute_time();
615 thread
->sched_flags
|= TH_SFLAG_PRI_UPDATE
;
617 if (thread
->sched_flags
& TH_SFLAG_FAIRSHARE_TRIPPED
) {
620 * Make sure we've waited fairshare_minimum_blocked_time both from the time
621 * we were throttled into the fairshare band, and the last time
624 if (current_time
>= thread
->last_run_time
+ fairshare_minimum_blocked_time
) {
626 boolean_t removed
= thread_run_queue_remove(thread
);
628 thread
->sched_flags
&= ~TH_SFLAG_FAIRSHARE_TRIPPED
;
629 thread
->sched_mode
= thread
->saved_mode
;
630 thread
->saved_mode
= TH_MODE_NONE
;
633 thread_setrun(thread
, SCHED_TAILQ
);
635 KERNEL_DEBUG_CONSTANT1(
636 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_FAIRSHARE_EXIT
) | DBG_FUNC_NONE
, (uint32_t)(thread
->last_run_time
& 0xFFFFFFFF), (uint32_t)(thread
->last_run_time
>> 32), (uint32_t)(current_time
& 0xFFFFFFFF), (uint32_t)(current_time
>> 32), thread_tid(thread
));
639 } else if ((thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) && (thread
->bound_processor
== PROCESSOR_NULL
)) {
640 boolean_t removed
= thread_run_queue_remove(thread
);
642 thread
->sched_flags
|= TH_SFLAG_FAIRSHARE_TRIPPED
;
643 thread
->saved_mode
= thread
->sched_mode
;
644 thread
->sched_mode
= TH_MODE_FAIRSHARE
;
646 thread
->last_quantum_refill_time
= thread
->last_run_time
- 2 * sched_fixedpriority_quantum
- 1;
649 thread_setrun(thread
, SCHED_TAILQ
);
651 KERNEL_DEBUG_CONSTANT(
652 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_FAIRSHARE_ENTER
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), 0xFFFFFFFF, 0, 0, 0);
657 /* Check for pending throttle transitions, and safely switch queues */
658 if ((thread
->sched_flags
& TH_SFLAG_PENDING_THROTTLE_MASK
) && (thread
->bound_processor
== PROCESSOR_NULL
)) {
659 boolean_t removed
= thread_run_queue_remove(thread
);
661 if (thread
->sched_flags
& TH_SFLAG_PENDING_THROTTLE_DEMOTION
) {
662 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
663 thread
->saved_mode
= thread
->sched_mode
;
664 thread
->sched_mode
= TH_MODE_TIMESHARE
;
666 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
670 * It's possible that this is a realtime thread that has
671 * already tripped the failsafe, in which case it should not
674 if (!(thread
->sched_flags
& TH_SFLAG_FAILSAFE
)) {
676 thread
->saved_mode
= thread
->sched_mode
;
678 if (thread
->sched_mode
== TH_MODE_TIMESHARE
) {
679 thread
->sched_mode
= TH_MODE_FAIRSHARE
;
683 thread
->sched_flags
|= TH_SFLAG_THROTTLED
;
685 KERNEL_DEBUG_CONSTANT(
686 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_FAIRSHARE_ENTER
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), 0xFFFFFFFF, 0, 0, 0);
689 if ((thread
->sched_mode
== TH_MODE_TIMESHARE
)
690 && (thread
->saved_mode
== TH_MODE_REALTIME
)) {
691 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
695 thread
->sched_mode
= thread
->saved_mode
;
696 thread
->saved_mode
= TH_MODE_NONE
;
697 thread
->sched_flags
&= ~TH_SFLAG_THROTTLED
;
699 KERNEL_DEBUG_CONSTANT1(
700 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_FAIRSHARE_EXIT
) | DBG_FUNC_NONE
, 0, 0, 0, 0, thread_tid(thread
));
704 thread
->sched_flags
&= ~(TH_SFLAG_PENDING_THROTTLE_MASK
);
707 thread_setrun(thread
, SCHED_TAILQ
);
712 * Check for fail-safe release.
714 if ( (thread
->sched_flags
& TH_SFLAG_FAILSAFE
) &&
715 current_time
>= thread
->safe_release
) {
718 thread
->sched_flags
&= ~TH_SFLAG_FAILSAFE
;
720 if (!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
)) {
721 /* Restore to previous */
723 thread
->sched_mode
= thread
->saved_mode
;
724 thread
->saved_mode
= TH_MODE_NONE
;
726 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
727 thread
->priority
= BASEPRI_RTQUEUES
;
731 if (!(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
))
732 set_sched_pri(thread
, thread
->priority
);
736 thread
->sched_flags
&= ~TH_SFLAG_PRI_UPDATE
;
741 sched_fixedpriority_lightweight_update_priority(thread_t thread __unused
)
747 sched_fixedpriority_quantum_expire(
750 /* Put thread into fairshare class, core scheduler will manage runqueue */
751 if ((thread
->sched_mode
== TH_MODE_TIMESHARE
) && (thread
->task
!= kernel_task
) && !(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
)) {
752 uint64_t elapsed
= thread
->last_run_time
- thread
->last_quantum_refill_time
;
754 /* If we managed to use our quantum in less than 2*quantum wall clock time,
755 * we are considered CPU bound and eligible for demotion. Since the quantum
756 * is reset when thread_unblock() is called, we are only really considering
757 * threads that elongate their execution time due to preemption.
759 if ((elapsed
< 2 * sched_fixedpriority_quantum
) && (thread
->bound_processor
== PROCESSOR_NULL
)) {
761 thread
->saved_mode
= thread
->sched_mode
;
762 thread
->sched_mode
= TH_MODE_FAIRSHARE
;
763 thread
->sched_flags
|= TH_SFLAG_FAIRSHARE_TRIPPED
;
764 KERNEL_DEBUG_CONSTANT(
765 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_FAIRSHARE_ENTER
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), (uint32_t)(elapsed
& 0xFFFFFFFF), (uint32_t)(elapsed
>> 32), 0, 0);
772 sched_fixedpriority_should_current_thread_rechoose_processor(processor_t processor __unused
)
779 sched_fixedpriority_processor_runq_count(processor_t processor
)
781 return runq_for_processor(processor
)->count
;
785 sched_fixedpriority_processor_runq_stats_count_sum(processor_t processor
)
787 return runq_for_processor(processor
)->runq_stats
.count_sum
;