]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_grrr.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / osfmk / kern / sched_grrr.c
CommitLineData
6d2010ae
A
1/*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/machine.h>
31#include <mach/policy.h>
32#include <mach/sync_policy.h>
33#include <mach/thread_act.h>
34
35#include <machine/machine_routines.h>
36#include <machine/sched_param.h>
37#include <machine/machine_cpu.h>
38
39#include <kern/kern_types.h>
40#include <kern/clock.h>
41#include <kern/counters.h>
42#include <kern/cpu_number.h>
43#include <kern/cpu_data.h>
44#include <kern/debug.h>
6d2010ae
A
45#include <kern/macro_help.h>
46#include <kern/machine.h>
47#include <kern/misc_protos.h>
48#include <kern/processor.h>
49#include <kern/queue.h>
50#include <kern/sched.h>
51#include <kern/sched_prim.h>
52#include <kern/syscall_subr.h>
53#include <kern/task.h>
54#include <kern/thread.h>
55#include <kern/wait_queue.h>
56
57#include <vm/pmap.h>
58#include <vm/vm_kern.h>
59#include <vm/vm_map.h>
60
61#include <mach/sdt.h>
62
63#include <sys/kdebug.h>
64
65#if defined(CONFIG_SCHED_GRRR_CORE)
66
67static void
68grrr_priority_mapping_init(void);
69
70static boolean_t
71grrr_enqueue(
72 grrr_run_queue_t rq,
73 thread_t thread);
74
75static thread_t
76grrr_select(
77 grrr_run_queue_t rq);
78
79static void
80grrr_remove(
81 grrr_run_queue_t rq,
82 thread_t thread);
83
84
85static void
86grrr_sorted_list_insert_group(grrr_run_queue_t rq,
87 grrr_group_t group);
88
89static void
90grrr_rescale_work(grrr_run_queue_t rq);
91
92static void
93grrr_runqueue_init(grrr_run_queue_t runq);
94
95/* Map Mach priorities to ones suitable for proportional sharing */
96static grrr_proportional_priority_t grrr_priority_mapping[NRQS];
97
98/* Map each proportional priority to its group */
99static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES];
100
101uint32_t grrr_rescale_tick;
102
103#endif /* defined(CONFIG_SCHED_GRRR_CORE) */
104
105#if defined(CONFIG_SCHED_GRRR)
106
107static void
108sched_grrr_init(void);
109
110static void
111sched_grrr_timebase_init(void);
112
113static void
114sched_grrr_processor_init(processor_t processor);
115
116static void
117sched_grrr_pset_init(processor_set_t pset);
118
119static void
120sched_grrr_maintenance_continuation(void);
121
122static thread_t
123sched_grrr_choose_thread(processor_t processor,
fe8ab488
A
124 int priority,
125 ast_t reason);
6d2010ae
A
126
127static thread_t
128sched_grrr_steal_thread(processor_set_t pset);
129
130static void
131sched_grrr_compute_priority(thread_t thread,
132 boolean_t override_depress);
133
134static processor_t
135sched_grrr_choose_processor( processor_set_t pset,
136 processor_t processor,
137 thread_t thread);
138
139static boolean_t
140sched_grrr_processor_enqueue(
141 processor_t processor,
142 thread_t thread,
143 integer_t options);
144
145static void
146sched_grrr_processor_queue_shutdown(
147 processor_t processor);
148
149static boolean_t
150sched_grrr_processor_queue_remove(
151 processor_t processor,
152 thread_t thread);
153
154static boolean_t
155sched_grrr_processor_queue_empty(processor_t processor);
156
157static boolean_t
158sched_grrr_processor_queue_has_priority(processor_t processor,
159 int priority,
160 boolean_t gte);
161
162static boolean_t
163sched_grrr_priority_is_urgent(int priority);
164
165static ast_t
166sched_grrr_processor_csw_check(processor_t processor);
167
168static uint32_t
169sched_grrr_initial_quantum_size(thread_t thread);
170
171static sched_mode_t
172sched_grrr_initial_thread_sched_mode(task_t parent_task);
173
6d2010ae
A
174static boolean_t
175sched_grrr_can_update_priority(thread_t thread);
176
177static void
178sched_grrr_update_priority(thread_t thread);
179
180static void
181sched_grrr_lightweight_update_priority(thread_t thread);
182
183static void
184sched_grrr_quantum_expire(thread_t thread);
185
186static boolean_t
187sched_grrr_should_current_thread_rechoose_processor(processor_t processor);
188
189static int
190sched_grrr_processor_runq_count(processor_t processor);
191
192static uint64_t
193sched_grrr_processor_runq_stats_count_sum(processor_t processor);
194
fe8ab488
A
195static int
196sched_grrr_processor_bound_count(processor_t processor);
197
198static void
199sched_grrr_thread_update_scan(void);
200
6d2010ae 201const struct sched_dispatch_table sched_grrr_dispatch = {
fe8ab488
A
202 .init = sched_grrr_init,
203 .timebase_init = sched_grrr_timebase_init,
204 .processor_init = sched_grrr_processor_init,
205 .pset_init = sched_grrr_pset_init,
206 .maintenance_continuation = sched_grrr_maintenance_continuation,
207 .choose_thread = sched_grrr_choose_thread,
208 .steal_thread = sched_grrr_steal_thread,
209 .compute_priority = sched_grrr_compute_priority,
210 .choose_processor = sched_grrr_choose_processor,
211 .processor_enqueue = sched_grrr_processor_enqueue,
212 .processor_queue_shutdown = sched_grrr_processor_queue_shutdown,
213 .processor_queue_remove = sched_grrr_processor_queue_remove,
214 .processor_queue_empty = sched_grrr_processor_queue_empty,
215 .priority_is_urgent = sched_grrr_priority_is_urgent,
216 .processor_csw_check = sched_grrr_processor_csw_check,
217 .processor_queue_has_priority = sched_grrr_processor_queue_has_priority,
218 .initial_quantum_size = sched_grrr_initial_quantum_size,
219 .initial_thread_sched_mode = sched_grrr_initial_thread_sched_mode,
220 .can_update_priority = sched_grrr_can_update_priority,
221 .update_priority = sched_grrr_update_priority,
222 .lightweight_update_priority = sched_grrr_lightweight_update_priority,
223 .quantum_expire = sched_grrr_quantum_expire,
224 .should_current_thread_rechoose_processor = sched_grrr_should_current_thread_rechoose_processor,
225 .processor_runq_count = sched_grrr_processor_runq_count,
226 .processor_runq_stats_count_sum = sched_grrr_processor_runq_stats_count_sum,
227 .fairshare_init = sched_grrr_fairshare_init,
228 .fairshare_runq_count = sched_grrr_fairshare_runq_count,
229 .fairshare_runq_stats_count_sum = sched_grrr_fairshare_runq_stats_count_sum,
230 .fairshare_enqueue = sched_grrr_fairshare_enqueue,
231 .fairshare_dequeue = sched_grrr_fairshare_dequeue,
232 .fairshare_queue_remove = sched_grrr_fairshare_queue_remove,
233 .processor_bound_count = sched_grrr_processor_bound_count,
234 .thread_update_scan = sched_grrr_thread_update_scan,
235 .direct_dispatch_to_idle_processors = TRUE,
6d2010ae
A
236};
237
6d2010ae
A
238extern int max_unsafe_quanta;
239
240static uint32_t grrr_quantum_us;
241static uint32_t grrr_quantum;
242
243static uint64_t sched_grrr_tick_deadline;
244
245static void
246sched_grrr_init(void)
247{
248 if (default_preemption_rate < 1)
249 default_preemption_rate = 100;
250 grrr_quantum_us = (1000 * 1000) / default_preemption_rate;
251
252 printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us);
253
254 grrr_priority_mapping_init();
255}
256
257static void
258sched_grrr_timebase_init(void)
259{
260 uint64_t abstime;
261
262 /* standard timeslicing quantum */
263 clock_interval_to_absolutetime_interval(
264 grrr_quantum_us, NSEC_PER_USEC, &abstime);
265 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
266 grrr_quantum = (uint32_t)abstime;
267
268 thread_depress_time = 1 * grrr_quantum;
269 default_timeshare_computation = grrr_quantum / 2;
270 default_timeshare_constraint = grrr_quantum;
271
272 max_unsafe_computation = max_unsafe_quanta * grrr_quantum;
273 sched_safe_duration = 2 * max_unsafe_quanta * grrr_quantum;
274
275}
276
277static void
278sched_grrr_processor_init(processor_t processor)
279{
280 grrr_runqueue_init(&processor->grrr_runq);
281}
282
283static void
284sched_grrr_pset_init(processor_set_t pset __unused)
285{
286}
287
288static void
289sched_grrr_maintenance_continuation(void)
290{
291 uint64_t abstime = mach_absolute_time();
292
293 grrr_rescale_tick++;
294
295 /*
296 * Compute various averages.
297 */
39236c6e 298 compute_averages(1);
6d2010ae
A
299
300 if (sched_grrr_tick_deadline == 0)
301 sched_grrr_tick_deadline = abstime;
302
303 clock_deadline_for_periodic_event(10*sched_one_second_interval, abstime,
304 &sched_grrr_tick_deadline);
305
306 assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline);
307 thread_block((thread_continue_t)sched_grrr_maintenance_continuation);
308 /*NOTREACHED*/
309}
310
311
312static thread_t
313sched_grrr_choose_thread(processor_t processor,
fe8ab488
A
314 int priority __unused,
315 ast_t reason __unused)
6d2010ae
A
316{
317 grrr_run_queue_t rq = &processor->grrr_runq;
318
319 return grrr_select(rq);
320}
321
322static thread_t
323sched_grrr_steal_thread(processor_set_t pset)
324{
325 pset_unlock(pset);
326
327 return (THREAD_NULL);
328
329}
330
331static void
332sched_grrr_compute_priority(thread_t thread,
333 boolean_t override_depress __unused)
334{
335 set_sched_pri(thread, thread->priority);
336}
337
338static processor_t
339sched_grrr_choose_processor( processor_set_t pset,
340 processor_t processor,
341 thread_t thread)
342{
343 return choose_processor(pset, processor, thread);
344}
345
346static boolean_t
347sched_grrr_processor_enqueue(
348 processor_t processor,
349 thread_t thread,
350 integer_t options __unused)
351{
352 grrr_run_queue_t rq = &processor->grrr_runq;
353 boolean_t result;
354
355 result = grrr_enqueue(rq, thread);
356
357 thread->runq = processor;
358
359 return result;
360}
361
362static void
363sched_grrr_processor_queue_shutdown(
364 processor_t processor)
365{
366 processor_set_t pset = processor->processor_set;
367 thread_t thread;
368 queue_head_t tqueue, bqueue;
369
370 queue_init(&tqueue);
371 queue_init(&bqueue);
372
fe8ab488 373 while ((thread = sched_grrr_choose_thread(processor, IDLEPRI, AST_NONE)) != THREAD_NULL) {
6d2010ae
A
374 if (thread->bound_processor == PROCESSOR_NULL) {
375 enqueue_tail(&tqueue, (queue_entry_t)thread);
376 } else {
377 enqueue_tail(&bqueue, (queue_entry_t)thread);
378 }
379 }
380
39236c6e 381 while ((thread = (thread_t)(void *)dequeue_head(&bqueue)) != THREAD_NULL) {
6d2010ae
A
382 sched_grrr_processor_enqueue(processor, thread, SCHED_TAILQ);
383 }
384
385 pset_unlock(pset);
386
39236c6e 387 while ((thread = (thread_t)(void *)dequeue_head(&tqueue)) != THREAD_NULL) {
6d2010ae
A
388 thread_lock(thread);
389
390 thread_setrun(thread, SCHED_TAILQ);
391
392 thread_unlock(thread);
393 }
394}
395
396static boolean_t
397sched_grrr_processor_queue_remove(
398 processor_t processor,
399 thread_t thread)
400{
401 void * rqlock;
402
403 rqlock = &processor->processor_set->sched_lock;
404 simple_lock(rqlock);
405
406 if (processor == thread->runq) {
407 /*
408 * Thread is on a run queue and we have a lock on
409 * that run queue.
410 */
411 grrr_run_queue_t rq = &processor->grrr_runq;
412
413 grrr_remove(rq, thread);
414 } else {
415 /*
416 * The thread left the run queue before we could
417 * lock the run queue.
418 */
419 assert(thread->runq == PROCESSOR_NULL);
420 processor = PROCESSOR_NULL;
421 }
422
423 simple_unlock(rqlock);
424
425 return (processor != PROCESSOR_NULL);
426}
427
428static boolean_t
429sched_grrr_processor_queue_empty(processor_t processor __unused)
430{
431 boolean_t result;
432
433 result = (processor->grrr_runq.count == 0);
434
435 return result;
436}
437
438static boolean_t
439sched_grrr_processor_queue_has_priority(processor_t processor,
440 int priority,
441 boolean_t gte __unused)
442{
443 grrr_run_queue_t rq = &processor->grrr_runq;
444 unsigned int i;
445
446 i = grrr_group_mapping[grrr_priority_mapping[priority]];
447 for ( ; i < NUM_GRRR_GROUPS; i++) {
448 if (rq->groups[i].count > 0)
449 return (TRUE);
450 }
451
452 return (FALSE);
453}
454
455/* Implement sched_preempt_pri in code */
456static boolean_t
457sched_grrr_priority_is_urgent(int priority)
458{
459 if (priority <= BASEPRI_FOREGROUND)
460 return FALSE;
461
462 if (priority < MINPRI_KERNEL)
463 return TRUE;
464
465 if (priority >= BASEPRI_PREEMPT)
466 return TRUE;
467
468 return FALSE;
469}
470
471static ast_t
472sched_grrr_processor_csw_check(processor_t processor)
473{
474 int count;
475
476 count = sched_grrr_processor_runq_count(processor);
477
478 if (count > 0) {
479
480 return AST_PREEMPT;
481 }
482
483 return AST_NONE;
484}
485
486static uint32_t
487sched_grrr_initial_quantum_size(thread_t thread __unused)
488{
489 return grrr_quantum;
490}
491
492static sched_mode_t
493sched_grrr_initial_thread_sched_mode(task_t parent_task)
494{
495 if (parent_task == kernel_task)
496 return TH_MODE_FIXED;
497 else
498 return TH_MODE_TIMESHARE;
499}
500
6d2010ae
A
501static boolean_t
502sched_grrr_can_update_priority(thread_t thread __unused)
503{
504 return FALSE;
505}
506
507static void
508sched_grrr_update_priority(thread_t thread __unused)
509{
510
511}
512
513static void
514sched_grrr_lightweight_update_priority(thread_t thread __unused)
515{
516 return;
517}
518
519static void
520sched_grrr_quantum_expire(
521 thread_t thread __unused)
522{
523}
524
525
526static boolean_t
527sched_grrr_should_current_thread_rechoose_processor(processor_t processor __unused)
528{
529 return (TRUE);
530}
531
532static int
533sched_grrr_processor_runq_count(processor_t processor)
534{
535 return processor->grrr_runq.count;
536}
537
538static uint64_t
539sched_grrr_processor_runq_stats_count_sum(processor_t processor)
540{
541 return processor->grrr_runq.runq_stats.count_sum;
542}
543
fe8ab488
A
544static int
545sched_grrr_processor_bound_count(__unused processor_t processor)
546{
547 return 0;
548}
549
550static void
551sched_grrr_thread_update_scan(void)
552{
553
554}
555
6d2010ae
A
556#endif /* defined(CONFIG_SCHED_GRRR) */
557
558#if defined(CONFIG_SCHED_GRRR_CORE)
559
560static void
561grrr_priority_mapping_init(void)
562{
563 unsigned int i;
564
565 /* Map 0->0 up to 10->20 */
566 for (i=0; i <= 10; i++) {
567 grrr_priority_mapping[i] = 2*i;
568 }
569
570 /* Map user priorities 11->33 up to 51 -> 153 */
571 for (i=11; i <= 51; i++) {
572 grrr_priority_mapping[i] = 3*i;
573 }
574
575 /* Map high priorities 52->180 up to 127->255 */
576 for (i=52; i <= 127; i++) {
577 grrr_priority_mapping[i] = 128 + i;
578 }
579
580 for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) {
581
582#if 0
583 unsigned j, k;
584 /* Calculate log(i); */
585 for (j=0, k=1; k <= i; j++, k *= 2);
586#endif
587
588 /* Groups of 4 */
589 grrr_group_mapping[i] = i >> 2;
590 }
591
592}
593
594static thread_t
595grrr_intragroup_schedule(grrr_group_t group)
596{
597 thread_t thread;
598
599 if (group->count == 0) {
600 return THREAD_NULL;
601 }
602
603 thread = group->current_client;
604 if (thread == THREAD_NULL) {
39236c6e 605 thread = (thread_t)(void *)queue_first(&group->clients);
6d2010ae
A
606 }
607
608 if (1 /* deficit */) {
39236c6e 609 group->current_client = (thread_t)(void *)queue_next((queue_entry_t)thread);
6d2010ae 610 if (queue_end(&group->clients, (queue_entry_t)group->current_client)) {
39236c6e 611 group->current_client = (thread_t)(void *)queue_first(&group->clients);
6d2010ae
A
612 }
613
614 thread = group->current_client;
615 }
616
617 return thread;
618}
619
620static thread_t
621grrr_intergroup_schedule(grrr_run_queue_t rq)
622{
623 thread_t thread;
624 grrr_group_t group;
625
626 if (rq->count == 0) {
627 return THREAD_NULL;
628 }
629
630 group = rq->current_group;
631
632 if (group == GRRR_GROUP_NULL) {
633 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
634 }
635
636 thread = grrr_intragroup_schedule(group);
637
638 if ((group->work >= (UINT32_MAX-256)) || (rq->last_rescale_tick != grrr_rescale_tick)) {
639 grrr_rescale_work(rq);
640 }
641 group->work++;
642
643 if (queue_end(&rq->sorted_group_list, queue_next((queue_entry_t)group))) {
644 /* last group, go back to beginning */
645 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
646 } else {
647 grrr_group_t nextgroup = (grrr_group_t)queue_next((queue_entry_t)group);
648 uint64_t orderleft, orderright;
649
650 /*
651 * The well-ordering condition for intergroup selection is:
652 *
653 * (group->work+1) / (nextgroup->work+1) > (group->weight) / (nextgroup->weight)
654 *
655 * Multiply both sides by their denominators to avoid division
656 *
657 */
658 orderleft = (group->work + 1) * ((uint64_t)nextgroup->weight);
659 orderright = (nextgroup->work + 1) * ((uint64_t)group->weight);
660 if (orderleft > orderright) {
661 group = nextgroup;
662 } else {
663 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
664 }
665 }
666
667 rq->current_group = group;
668
669 return thread;
670}
671
672static void
673grrr_runqueue_init(grrr_run_queue_t runq)
674{
675 grrr_group_index_t index;
676
677 runq->count = 0;
678
679 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
680 unsigned int prisearch;
681
682 for (prisearch = 0;
683 prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES;
684 prisearch++) {
685 if (grrr_group_mapping[prisearch] == index) {
686 runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch;
687 break;
688 }
689 }
690
691 runq->groups[index].index = index;
692
693 queue_init(&runq->groups[index].clients);
694 runq->groups[index].count = 0;
695 runq->groups[index].weight = 0;
696 runq->groups[index].work = 0;
697 runq->groups[index].current_client = THREAD_NULL;
698 }
699
700 queue_init(&runq->sorted_group_list);
701 runq->weight = 0;
702 runq->current_group = GRRR_GROUP_NULL;
703}
704
705static void
706grrr_rescale_work(grrr_run_queue_t rq)
707{
708 grrr_group_index_t index;
709
710 /* avoid overflow by scaling by 1/8th */
711 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
712 rq->groups[index].work >>= 3;
713 }
714
715 rq->last_rescale_tick = grrr_rescale_tick;
716}
717
718static boolean_t
719grrr_enqueue(
720 grrr_run_queue_t rq,
721 thread_t thread)
722{
723 grrr_proportional_priority_t gpriority;
724 grrr_group_index_t gindex;
725 grrr_group_t group;
726
727 gpriority = grrr_priority_mapping[thread->sched_pri];
728 gindex = grrr_group_mapping[gpriority];
729 group = &rq->groups[gindex];
730
731#if 0
732 thread->grrr_deficit = 0;
733#endif
734
735 if (group->count == 0) {
736 /* Empty group, this is the first client */
737 enqueue_tail(&group->clients, (queue_entry_t)thread);
738 group->count = 1;
739 group->weight = gpriority;
740 group->current_client = thread;
741 } else {
742 /* Insert before the current client */
743 if (group->current_client == THREAD_NULL ||
744 queue_first(&group->clients) == (queue_entry_t)group->current_client) {
745 enqueue_head(&group->clients, (queue_entry_t)thread);
746 } else {
747 insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client));
748 }
749 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
750 group->count++;
751 group->weight += gpriority;
752
753 /* Since there was already a client, this is on the per-processor sorted list already */
754 remqueue((queue_entry_t)group);
755 }
756
757 grrr_sorted_list_insert_group(rq, group);
758
759 rq->count++;
760 rq->weight += gpriority;
761
762 return (FALSE);
763}
764
765static thread_t
766grrr_select(grrr_run_queue_t rq)
767{
768 thread_t thread;
769
770 thread = grrr_intergroup_schedule(rq);
771 if (thread != THREAD_NULL) {
772 grrr_proportional_priority_t gpriority;
773 grrr_group_index_t gindex;
774 grrr_group_t group;
775
776 gpriority = grrr_priority_mapping[thread->sched_pri];
777 gindex = grrr_group_mapping[gpriority];
778 group = &rq->groups[gindex];
779
780 remqueue((queue_entry_t)thread);
781 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
782 group->count--;
783 group->weight -= gpriority;
784 if (group->current_client == thread) {
785 group->current_client = THREAD_NULL;
786 }
787
788 remqueue((queue_entry_t)group);
789 if (group->count == 0) {
790 if (rq->current_group == group) {
791 rq->current_group = GRRR_GROUP_NULL;
792 }
793 } else {
794 /* Need to re-insert in sorted location */
795 grrr_sorted_list_insert_group(rq, group);
796 }
797
798 rq->count--;
799 rq->weight -= gpriority;
800
801 thread->runq = PROCESSOR_NULL;
802 }
803
804
805 return (thread);
806}
807
808static void
809grrr_remove(
810 grrr_run_queue_t rq,
811 thread_t thread)
812{
813 grrr_proportional_priority_t gpriority;
814 grrr_group_index_t gindex;
815 grrr_group_t group;
816
817 gpriority = grrr_priority_mapping[thread->sched_pri];
818 gindex = grrr_group_mapping[gpriority];
819 group = &rq->groups[gindex];
820
821 remqueue((queue_entry_t)thread);
822 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
823 group->count--;
824 group->weight -= gpriority;
825 if (group->current_client == thread) {
826 group->current_client = THREAD_NULL;
827 }
828
829 remqueue((queue_entry_t)group);
830 if (group->count == 0) {
831 if (rq->current_group == group) {
832 rq->current_group = GRRR_GROUP_NULL;
833 }
834 } else {
835 /* Need to re-insert in sorted location */
836 grrr_sorted_list_insert_group(rq, group);
837 }
838
839 rq->count--;
840 rq->weight -= gpriority;
841
842 thread->runq = PROCESSOR_NULL;
843}
844
845static void
846grrr_sorted_list_insert_group(grrr_run_queue_t rq,
847 grrr_group_t group)
848{
849 /* Simple insertion sort */
850 if (queue_empty(&rq->sorted_group_list)) {
851 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
852 } else {
853 grrr_group_t search_group;
854
855 /* Start searching from the head (heaviest weight) for the first
856 * element less than us, so we can insert before it
857 */
858 search_group = (grrr_group_t)queue_first(&rq->sorted_group_list);
859 while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group) ) {
860
861 if (search_group->weight < group->weight) {
862 /* we should be before this */
863 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
864 break;
865 } if (search_group->weight == group->weight) {
866 /* Use group index as a tie breaker */
867 if (search_group->index < group->index) {
868 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
869 break;
870 }
871 }
872
873 /* otherwise, our weight is too small, keep going */
874 search_group = (grrr_group_t)queue_next((queue_entry_t)search_group);
875 }
876
877 if (queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
878 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
879 } else {
880 insque((queue_entry_t)group, (queue_entry_t)search_group);
881 }
882 }
883}
884
885#endif /* defined(CONFIG_SCHED_GRRR_CORE) */
886
fe8ab488 887#if defined(CONFIG_SCHED_GRRR)
6d2010ae
A
888
889static struct grrr_run_queue fs_grrr_runq;
890#define FS_GRRR_RUNQ ((processor_t)-2)
891decl_simple_lock_data(static,fs_grrr_lock);
892
893void
894sched_grrr_fairshare_init(void)
895{
896 grrr_priority_mapping_init();
897
898 simple_lock_init(&fs_grrr_lock, 0);
899 grrr_runqueue_init(&fs_grrr_runq);
900}
901
902
903int
904sched_grrr_fairshare_runq_count(void)
905{
906 return fs_grrr_runq.count;
907}
908
909uint64_t
910sched_grrr_fairshare_runq_stats_count_sum(void)
911{
912 return fs_grrr_runq.runq_stats.count_sum;
913}
914
915void
916sched_grrr_fairshare_enqueue(thread_t thread)
917{
918 simple_lock(&fs_grrr_lock);
919
920 (void)grrr_enqueue(&fs_grrr_runq, thread);
921
922 thread->runq = FS_GRRR_RUNQ;
923
924 simple_unlock(&fs_grrr_lock);
925}
926
927thread_t sched_grrr_fairshare_dequeue(void)
928{
929 thread_t thread;
930
931 simple_lock(&fs_grrr_lock);
932 if (fs_grrr_runq.count > 0) {
933 thread = grrr_select(&fs_grrr_runq);
934
935 simple_unlock(&fs_grrr_lock);
936
937 return (thread);
938 }
939 simple_unlock(&fs_grrr_lock);
940
941 return THREAD_NULL;
942}
943
944boolean_t sched_grrr_fairshare_queue_remove(thread_t thread)
945{
946
947 simple_lock(&fs_grrr_lock);
948
949 if (FS_GRRR_RUNQ == thread->runq) {
950 grrr_remove(&fs_grrr_runq, thread);
951
952 simple_unlock(&fs_grrr_lock);
953 return (TRUE);
954 }
955 else {
956 /*
957 * The thread left the run queue before we could
958 * lock the run queue.
959 */
960 assert(thread->runq == PROCESSOR_NULL);
961 simple_unlock(&fs_grrr_lock);
962 return (FALSE);
963 }
964}
965
fe8ab488 966#endif /* defined(CONFIG_SCHED_GRRR) */