]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_fixedpriority.c
xnu-1699.22.81.tar.gz
[apple/xnu.git] / osfmk / kern / sched_fixedpriority.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
34
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
38
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/counters.h>
42 #include <kern/cpu_number.h>
43 #include <kern/cpu_data.h>
44 #include <kern/debug.h>
45 #include <kern/lock.h>
46 #include <kern/macro_help.h>
47 #include <kern/machine.h>
48 #include <kern/misc_protos.h>
49 #include <kern/processor.h>
50 #include <kern/queue.h>
51 #include <kern/sched.h>
52 #include <kern/sched_prim.h>
53 #include <kern/syscall_subr.h>
54 #include <kern/task.h>
55 #include <kern/thread.h>
56 #include <kern/wait_queue.h>
57
58 #include <vm/pmap.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_map.h>
61
62 #include <mach/sdt.h>
63
64 #include <sys/kdebug.h>
65
66 static void
67 sched_fixedpriority_init(void);
68
69 static void
70 sched_fixedpriority_with_pset_runqueue_init(void);
71
72 static void
73 sched_fixedpriority_timebase_init(void);
74
75 static void
76 sched_fixedpriority_processor_init(processor_t processor);
77
78 static void
79 sched_fixedpriority_pset_init(processor_set_t pset);
80
81 static void
82 sched_fixedpriority_maintenance_continuation(void);
83
84 static thread_t
85 sched_fixedpriority_choose_thread(processor_t processor,
86 int priority);
87
88 static thread_t
89 sched_fixedpriority_steal_thread(processor_set_t pset);
90
91 static void
92 sched_fixedpriority_compute_priority(thread_t thread,
93 boolean_t override_depress);
94
95 static processor_t
96 sched_fixedpriority_choose_processor( processor_set_t pset,
97 processor_t processor,
98 thread_t thread);
99
100
101 static boolean_t
102 sched_fixedpriority_processor_enqueue(
103 processor_t processor,
104 thread_t thread,
105 integer_t options);
106
107 static void
108 sched_fixedpriority_processor_queue_shutdown(
109 processor_t processor);
110
111 static boolean_t
112 sched_fixedpriority_processor_queue_remove(
113 processor_t processor,
114 thread_t thread);
115
116 static boolean_t
117 sched_fixedpriority_processor_queue_empty(processor_t processor);
118
119 static boolean_t
120 sched_fixedpriority_processor_queue_has_priority(processor_t processor,
121 int priority,
122 boolean_t gte);
123
124 static boolean_t
125 sched_fixedpriority_priority_is_urgent(int priority);
126
127 static ast_t
128 sched_fixedpriority_processor_csw_check(processor_t processor);
129
130 static uint32_t
131 sched_fixedpriority_initial_quantum_size(thread_t thread);
132
133 static sched_mode_t
134 sched_fixedpriority_initial_thread_sched_mode(task_t parent_task);
135
136 static boolean_t
137 sched_fixedpriority_supports_timeshare_mode(void);
138
139 static boolean_t
140 sched_fixedpriority_can_update_priority(thread_t thread);
141
142 static void
143 sched_fixedpriority_update_priority(thread_t thread);
144
145 static void
146 sched_fixedpriority_lightweight_update_priority(thread_t thread);
147
148 static void
149 sched_fixedpriority_quantum_expire(thread_t thread);
150
151 static boolean_t
152 sched_fixedpriority_should_current_thread_rechoose_processor(processor_t processor);
153
154 static int
155 sched_fixedpriority_processor_runq_count(processor_t processor);
156
157 static uint64_t
158 sched_fixedpriority_processor_runq_stats_count_sum(processor_t processor);
159
160 const struct sched_dispatch_table sched_fixedpriority_dispatch = {
161 sched_fixedpriority_init,
162 sched_fixedpriority_timebase_init,
163 sched_fixedpriority_processor_init,
164 sched_fixedpriority_pset_init,
165 sched_fixedpriority_maintenance_continuation,
166 sched_fixedpriority_choose_thread,
167 sched_fixedpriority_steal_thread,
168 sched_fixedpriority_compute_priority,
169 sched_fixedpriority_choose_processor,
170 sched_fixedpriority_processor_enqueue,
171 sched_fixedpriority_processor_queue_shutdown,
172 sched_fixedpriority_processor_queue_remove,
173 sched_fixedpriority_processor_queue_empty,
174 sched_fixedpriority_priority_is_urgent,
175 sched_fixedpriority_processor_csw_check,
176 sched_fixedpriority_processor_queue_has_priority,
177 sched_fixedpriority_initial_quantum_size,
178 sched_fixedpriority_initial_thread_sched_mode,
179 sched_fixedpriority_supports_timeshare_mode,
180 sched_fixedpriority_can_update_priority,
181 sched_fixedpriority_update_priority,
182 sched_fixedpriority_lightweight_update_priority,
183 sched_fixedpriority_quantum_expire,
184 sched_fixedpriority_should_current_thread_rechoose_processor,
185 sched_fixedpriority_processor_runq_count,
186 sched_fixedpriority_processor_runq_stats_count_sum,
187 sched_traditional_fairshare_init,
188 sched_traditional_fairshare_runq_count,
189 sched_traditional_fairshare_runq_stats_count_sum,
190 sched_traditional_fairshare_enqueue,
191 sched_traditional_fairshare_dequeue,
192 sched_traditional_fairshare_queue_remove,
193 TRUE /* direct_dispatch_to_idle_processors */
194 };
195
196 const struct sched_dispatch_table sched_fixedpriority_with_pset_runqueue_dispatch = {
197 sched_fixedpriority_with_pset_runqueue_init,
198 sched_fixedpriority_timebase_init,
199 sched_fixedpriority_processor_init,
200 sched_fixedpriority_pset_init,
201 sched_fixedpriority_maintenance_continuation,
202 sched_fixedpriority_choose_thread,
203 sched_fixedpriority_steal_thread,
204 sched_fixedpriority_compute_priority,
205 sched_fixedpriority_choose_processor,
206 sched_fixedpriority_processor_enqueue,
207 sched_fixedpriority_processor_queue_shutdown,
208 sched_fixedpriority_processor_queue_remove,
209 sched_fixedpriority_processor_queue_empty,
210 sched_fixedpriority_priority_is_urgent,
211 sched_fixedpriority_processor_csw_check,
212 sched_fixedpriority_processor_queue_has_priority,
213 sched_fixedpriority_initial_quantum_size,
214 sched_fixedpriority_initial_thread_sched_mode,
215 sched_fixedpriority_supports_timeshare_mode,
216 sched_fixedpriority_can_update_priority,
217 sched_fixedpriority_update_priority,
218 sched_fixedpriority_lightweight_update_priority,
219 sched_fixedpriority_quantum_expire,
220 sched_fixedpriority_should_current_thread_rechoose_processor,
221 sched_fixedpriority_processor_runq_count,
222 sched_fixedpriority_processor_runq_stats_count_sum,
223 sched_traditional_fairshare_init,
224 sched_traditional_fairshare_runq_count,
225 sched_traditional_fairshare_runq_stats_count_sum,
226 sched_traditional_fairshare_enqueue,
227 sched_traditional_fairshare_dequeue,
228 sched_traditional_fairshare_queue_remove,
229 FALSE /* direct_dispatch_to_idle_processors */
230 };
231
232 extern int max_unsafe_quanta;
233
234 #define SCHED_FIXEDPRIORITY_DEFAULT_QUANTUM 5 /* in ms */
235 static uint32_t sched_fixedpriority_quantum_ms = SCHED_FIXEDPRIORITY_DEFAULT_QUANTUM;
236 static uint32_t sched_fixedpriority_quantum;
237
238 #define SCHED_FIXEDPRIORITY_DEFAULT_FAIRSHARE_MINIMUM_BLOCK_TIME 100 /* ms */
239 static uint32_t fairshare_minimum_blocked_time_ms = SCHED_FIXEDPRIORITY_DEFAULT_FAIRSHARE_MINIMUM_BLOCK_TIME;
240 static uint32_t fairshare_minimum_blocked_time;
241
242 static uint32_t sched_fixedpriority_tick;
243 static uint64_t sched_fixedpriority_tick_deadline;
244 extern uint32_t grrr_rescale_tick;
245
246 static boolean_t sched_fixedpriority_use_pset_runqueue = FALSE;
247
248 __attribute__((always_inline))
249 static inline run_queue_t runq_for_processor(processor_t processor)
250 {
251 if (sched_fixedpriority_use_pset_runqueue)
252 return &processor->processor_set->pset_runq;
253 else
254 return &processor->runq;
255 }
256
257 __attribute__((always_inline))
258 static inline void runq_consider_incr_bound_count(processor_t processor, thread_t thread)
259 {
260 if (thread->bound_processor == PROCESSOR_NULL)
261 return;
262
263 assert(thread->bound_processor == processor);
264
265 if (sched_fixedpriority_use_pset_runqueue)
266 processor->processor_set->pset_runq_bound_count++;
267
268 processor->runq_bound_count++;
269 }
270
271 __attribute__((always_inline))
272 static inline void runq_consider_decr_bound_count(processor_t processor, thread_t thread)
273 {
274 if (thread->bound_processor == PROCESSOR_NULL)
275 return;
276
277 assert(thread->bound_processor == processor);
278
279 if (sched_fixedpriority_use_pset_runqueue)
280 processor->processor_set->pset_runq_bound_count--;
281
282 processor->runq_bound_count--;
283 }
284
285 static void
286 sched_fixedpriority_init(void)
287 {
288 if (!PE_parse_boot_argn("fixedpriority_quantum", &sched_fixedpriority_quantum_ms, sizeof (sched_fixedpriority_quantum_ms))) {
289 sched_fixedpriority_quantum_ms = SCHED_FIXEDPRIORITY_DEFAULT_QUANTUM;
290 }
291
292 if (sched_fixedpriority_quantum_ms < 1)
293 sched_fixedpriority_quantum_ms = SCHED_FIXEDPRIORITY_DEFAULT_QUANTUM;
294
295 printf("standard fixed priority timeslicing quantum is %u ms\n", sched_fixedpriority_quantum_ms);
296 }
297
298 static void
299 sched_fixedpriority_with_pset_runqueue_init(void)
300 {
301 sched_fixedpriority_init();
302 sched_fixedpriority_use_pset_runqueue = TRUE;
303 }
304
305 static void
306 sched_fixedpriority_timebase_init(void)
307 {
308 uint64_t abstime;
309
310 /* standard timeslicing quantum */
311 clock_interval_to_absolutetime_interval(
312 sched_fixedpriority_quantum_ms, NSEC_PER_MSEC, &abstime);
313 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
314 sched_fixedpriority_quantum = (uint32_t)abstime;
315
316 thread_depress_time = 1 * sched_fixedpriority_quantum;
317 default_timeshare_computation = sched_fixedpriority_quantum / 2;
318 default_timeshare_constraint = sched_fixedpriority_quantum;
319
320 max_unsafe_computation = max_unsafe_quanta * sched_fixedpriority_quantum;
321 sched_safe_duration = 2 * max_unsafe_quanta * sched_fixedpriority_quantum;
322
323 if (!PE_parse_boot_argn("fairshare_minblockedtime", &fairshare_minimum_blocked_time_ms, sizeof (fairshare_minimum_blocked_time_ms))) {
324 fairshare_minimum_blocked_time_ms = SCHED_FIXEDPRIORITY_DEFAULT_FAIRSHARE_MINIMUM_BLOCK_TIME;
325 }
326
327 clock_interval_to_absolutetime_interval(
328 fairshare_minimum_blocked_time_ms, NSEC_PER_MSEC, &abstime);
329
330 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
331 fairshare_minimum_blocked_time = (uint32_t)abstime;
332 }
333
334 static void
335 sched_fixedpriority_processor_init(processor_t processor)
336 {
337 if (!sched_fixedpriority_use_pset_runqueue) {
338 run_queue_init(&processor->runq);
339 }
340 processor->runq_bound_count = 0;
341 }
342
343 static void
344 sched_fixedpriority_pset_init(processor_set_t pset)
345 {
346 if (sched_fixedpriority_use_pset_runqueue) {
347 run_queue_init(&pset->pset_runq);
348 }
349 pset->pset_runq_bound_count = 0;
350 }
351
352
353 static void
354 sched_fixedpriority_maintenance_continuation(void)
355 {
356 uint64_t abstime = mach_absolute_time();
357
358 sched_fixedpriority_tick++;
359 grrr_rescale_tick++;
360
361 /*
362 * Compute various averages.
363 */
364 compute_averages();
365
366 if (sched_fixedpriority_tick_deadline == 0)
367 sched_fixedpriority_tick_deadline = abstime;
368
369 clock_deadline_for_periodic_event(10*sched_one_second_interval, abstime,
370 &sched_fixedpriority_tick_deadline);
371
372 assert_wait_deadline((event_t)sched_fixedpriority_maintenance_continuation, THREAD_UNINT, sched_fixedpriority_tick_deadline);
373 thread_block((thread_continue_t)sched_fixedpriority_maintenance_continuation);
374 /*NOTREACHED*/
375 }
376
377
378 static thread_t
379 sched_fixedpriority_choose_thread(processor_t processor,
380 int priority)
381 {
382 thread_t thread;
383
384 thread = choose_thread(processor, runq_for_processor(processor), priority);
385 if (thread != THREAD_NULL) {
386 runq_consider_decr_bound_count(processor, thread);
387 }
388
389 return thread;
390 }
391
392 static thread_t
393 sched_fixedpriority_steal_thread(processor_set_t pset)
394 {
395 pset_unlock(pset);
396
397 return (THREAD_NULL);
398
399 }
400
401 static void
402 sched_fixedpriority_compute_priority(thread_t thread,
403 boolean_t override_depress)
404 {
405 /* Reset current priority to base priority */
406 if ( !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
407 (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) ||
408 override_depress ) ) {
409 set_sched_pri(thread, thread->priority);
410 }
411 }
412
413 static processor_t
414 sched_fixedpriority_choose_processor( processor_set_t pset,
415 processor_t processor,
416 thread_t thread)
417 {
418 return choose_processor(pset, processor, thread);
419 }
420 static boolean_t
421 sched_fixedpriority_processor_enqueue(
422 processor_t processor,
423 thread_t thread,
424 integer_t options)
425 {
426 run_queue_t rq = runq_for_processor(processor);
427 boolean_t result;
428
429 result = run_queue_enqueue(rq, thread, options);
430 thread->runq = processor;
431 runq_consider_incr_bound_count(processor, thread);
432
433 return (result);
434 }
435
436 static void
437 sched_fixedpriority_processor_queue_shutdown(
438 processor_t processor)
439 {
440 processor_set_t pset = processor->processor_set;
441 thread_t thread;
442 queue_head_t tqueue, bqueue;
443
444 queue_init(&tqueue);
445 queue_init(&bqueue);
446
447 while ((thread = sched_fixedpriority_choose_thread(processor, IDLEPRI)) != THREAD_NULL) {
448 if (thread->bound_processor == PROCESSOR_NULL) {
449 enqueue_tail(&tqueue, (queue_entry_t)thread);
450 } else {
451 enqueue_tail(&bqueue, (queue_entry_t)thread);
452 }
453 }
454
455 while ((thread = (thread_t)dequeue_head(&bqueue)) != THREAD_NULL) {
456 sched_fixedpriority_processor_enqueue(processor, thread, SCHED_TAILQ);
457 }
458
459 pset_unlock(pset);
460
461 while ((thread = (thread_t)dequeue_head(&tqueue)) != THREAD_NULL) {
462 thread_lock(thread);
463
464 thread_setrun(thread, SCHED_TAILQ);
465
466 thread_unlock(thread);
467 }
468 }
469
470 static boolean_t
471 sched_fixedpriority_processor_queue_remove(
472 processor_t processor,
473 thread_t thread)
474 {
475 void * rqlock;
476 run_queue_t rq;
477
478 rqlock = &processor->processor_set->sched_lock;
479 rq = runq_for_processor(processor);
480
481 simple_lock(rqlock);
482 if (processor == thread->runq) {
483 /*
484 * Thread is on a run queue and we have a lock on
485 * that run queue.
486 */
487 runq_consider_decr_bound_count(processor, thread);
488 run_queue_remove(rq, thread);
489 }
490 else {
491 /*
492 * The thread left the run queue before we could
493 * lock the run queue.
494 */
495 assert(thread->runq == PROCESSOR_NULL);
496 processor = PROCESSOR_NULL;
497 }
498
499 simple_unlock(rqlock);
500
501 return (processor != PROCESSOR_NULL);
502 }
503
504 static boolean_t
505 sched_fixedpriority_processor_queue_empty(processor_t processor)
506 {
507 /*
508 * See sched_traditional_with_pset_runqueue_processor_queue_empty
509 * for algorithm
510 */
511 int count = runq_for_processor(processor)->count;
512
513 if (sched_fixedpriority_use_pset_runqueue) {
514 processor_set_t pset = processor->processor_set;
515
516 count -= pset->pset_runq_bound_count;
517 count += processor->runq_bound_count;
518 }
519
520 return count == 0;
521 }
522
523 static boolean_t
524 sched_fixedpriority_processor_queue_has_priority(processor_t processor,
525 int priority,
526 boolean_t gte)
527 {
528 if (gte)
529 return runq_for_processor(processor)->highq >= priority;
530 else
531 return runq_for_processor(processor)->highq > priority;
532 }
533
534 /* Implement sched_preempt_pri in code */
535 static boolean_t
536 sched_fixedpriority_priority_is_urgent(int priority)
537 {
538 if (priority <= BASEPRI_FOREGROUND)
539 return FALSE;
540
541 if (priority < MINPRI_KERNEL)
542 return TRUE;
543
544 if (priority >= BASEPRI_PREEMPT)
545 return TRUE;
546
547 return FALSE;
548 }
549
550 static ast_t
551 sched_fixedpriority_processor_csw_check(processor_t processor)
552 {
553 run_queue_t runq;
554
555 runq = runq_for_processor(processor);
556 if (runq->highq > processor->current_pri) {
557 if (runq->urgency > 0)
558 return (AST_PREEMPT | AST_URGENT);
559
560 if (processor->active_thread && thread_eager_preemption(processor->active_thread))
561 return (AST_PREEMPT | AST_URGENT);
562
563 return AST_PREEMPT;
564 } else if (processor->current_thmode == TH_MODE_FAIRSHARE) {
565 if (!sched_fixedpriority_processor_queue_empty(processor)) {
566 /* Allow queued threads to run if the current thread got demoted to fairshare */
567 return (AST_PREEMPT | AST_URGENT);
568 } else if ((!first_timeslice(processor)) && SCHED(fairshare_runq_count)() > 0) {
569 /* Allow other fairshare threads to run */
570 return AST_PREEMPT | AST_URGENT;
571 }
572 }
573
574 return AST_NONE;
575 }
576
577 static uint32_t
578 sched_fixedpriority_initial_quantum_size(thread_t thread __unused)
579 {
580 return sched_fixedpriority_quantum;
581 }
582
583 static sched_mode_t
584 sched_fixedpriority_initial_thread_sched_mode(task_t parent_task)
585 {
586 if (parent_task == kernel_task)
587 return TH_MODE_FIXED;
588 else
589 return TH_MODE_TIMESHARE;
590 }
591
592 static boolean_t
593 sched_fixedpriority_supports_timeshare_mode(void)
594 {
595 return TRUE;
596 }
597
598 static boolean_t
599 sched_fixedpriority_can_update_priority(thread_t thread __unused)
600 {
601 return ((thread->sched_flags & TH_SFLAG_PRI_UPDATE) == 0);
602 }
603
604 static void
605 sched_fixedpriority_update_priority(thread_t thread)
606 {
607 uint64_t current_time = mach_absolute_time();
608
609 thread->sched_flags |= TH_SFLAG_PRI_UPDATE;
610
611 if (thread->sched_flags & TH_SFLAG_FAIRSHARE_TRIPPED) {
612
613 /*
614 * Make sure we've waited fairshare_minimum_blocked_time both from the time
615 * we were throttled into the fairshare band, and the last time
616 * we ran.
617 */
618 if (current_time >= thread->last_run_time + fairshare_minimum_blocked_time) {
619
620 boolean_t removed = thread_run_queue_remove(thread);
621
622 thread->sched_flags &= ~TH_SFLAG_FAIRSHARE_TRIPPED;
623 thread->sched_mode = thread->saved_mode;
624 thread->saved_mode = TH_MODE_NONE;
625
626 if (removed)
627 thread_setrun(thread, SCHED_TAILQ);
628
629 KERNEL_DEBUG_CONSTANT1(
630 MACHDBG_CODE(DBG_MACH_SCHED,MACH_FAIRSHARE_EXIT) | DBG_FUNC_NONE, (uint32_t)(thread->last_run_time & 0xFFFFFFFF), (uint32_t)(thread->last_run_time >> 32), (uint32_t)(current_time & 0xFFFFFFFF), (uint32_t)(current_time >> 32), thread_tid(thread));
631
632 }
633 } else if ((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) && (thread->bound_processor == PROCESSOR_NULL)) {
634 boolean_t removed = thread_run_queue_remove(thread);
635
636 thread->sched_flags |= TH_SFLAG_FAIRSHARE_TRIPPED;
637 thread->saved_mode = thread->sched_mode;
638 thread->sched_mode = TH_MODE_FAIRSHARE;
639
640 thread->last_quantum_refill_time = thread->last_run_time - 2 * sched_fixedpriority_quantum - 1;
641
642 if (removed)
643 thread_setrun(thread, SCHED_TAILQ);
644
645 KERNEL_DEBUG_CONSTANT(
646 MACHDBG_CODE(DBG_MACH_SCHED,MACH_FAIRSHARE_ENTER) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), 0xFFFFFFFF, 0, 0, 0);
647
648 }
649
650 /*
651 * Check for fail-safe release.
652 */
653 if ( (thread->sched_flags & TH_SFLAG_FAILSAFE) &&
654 current_time >= thread->safe_release ) {
655
656
657 thread->sched_flags &= ~TH_SFLAG_FAILSAFE;
658
659 if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) {
660 /* Restore to previous */
661
662 thread->sched_mode = thread->saved_mode;
663 thread->saved_mode = TH_MODE_NONE;
664
665 if (thread->sched_mode == TH_MODE_REALTIME) {
666 thread->priority = BASEPRI_RTQUEUES;
667
668 }
669
670 if (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK))
671 set_sched_pri(thread, thread->priority);
672 }
673 }
674
675 thread->sched_flags &= ~TH_SFLAG_PRI_UPDATE;
676 return;
677 }
678
679 static void
680 sched_fixedpriority_lightweight_update_priority(thread_t thread __unused)
681 {
682 return;
683 }
684
685 static void
686 sched_fixedpriority_quantum_expire(
687 thread_t thread)
688 {
689 /* Put thread into fairshare class, core scheduler will manage runqueue */
690 if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->task != kernel_task) && !(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) {
691 uint64_t elapsed = thread->last_run_time - thread->last_quantum_refill_time;
692
693 /* If we managed to use our quantum in less than 2*quantum wall clock time,
694 * we are considered CPU bound and eligible for demotion. Since the quantum
695 * is reset when thread_unblock() is called, we are only really considering
696 * threads that elongate their execution time due to preemption.
697 */
698 if ((elapsed < 2 * sched_fixedpriority_quantum) && (thread->bound_processor == PROCESSOR_NULL)) {
699
700 thread->saved_mode = thread->sched_mode;
701 thread->sched_mode = TH_MODE_FAIRSHARE;
702 thread->sched_flags |= TH_SFLAG_FAIRSHARE_TRIPPED;
703 KERNEL_DEBUG_CONSTANT(
704 MACHDBG_CODE(DBG_MACH_SCHED,MACH_FAIRSHARE_ENTER) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), (uint32_t)(elapsed & 0xFFFFFFFF), (uint32_t)(elapsed >> 32), 0, 0);
705 }
706 }
707 }
708
709
710 static boolean_t
711 sched_fixedpriority_should_current_thread_rechoose_processor(processor_t processor __unused)
712 {
713 return (TRUE);
714 }
715
716
717 static int
718 sched_fixedpriority_processor_runq_count(processor_t processor)
719 {
720 return runq_for_processor(processor)->count;
721 }
722
723 static uint64_t
724 sched_fixedpriority_processor_runq_stats_count_sum(processor_t processor)
725 {
726 return runq_for_processor(processor)->runq_stats.count_sum;
727 }