]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_grrr.c
xnu-3248.40.184.tar.gz
[apple/xnu.git] / osfmk / kern / sched_grrr.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
34
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
38
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/counters.h>
42 #include <kern/cpu_number.h>
43 #include <kern/cpu_data.h>
44 #include <kern/debug.h>
45 #include <kern/macro_help.h>
46 #include <kern/machine.h>
47 #include <kern/misc_protos.h>
48 #include <kern/processor.h>
49 #include <kern/queue.h>
50 #include <kern/sched.h>
51 #include <kern/sched_prim.h>
52 #include <kern/syscall_subr.h>
53 #include <kern/task.h>
54 #include <kern/thread.h>
55
56 #include <vm/pmap.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_map.h>
59
60 #include <mach/sdt.h>
61
62 #include <sys/kdebug.h>
63
64 #if defined(CONFIG_SCHED_GRRR_CORE)
65
66 static void
67 grrr_priority_mapping_init(void);
68
69 static boolean_t
70 grrr_enqueue(
71 grrr_run_queue_t rq,
72 thread_t thread);
73
74 static thread_t
75 grrr_select(
76 grrr_run_queue_t rq);
77
78 static void
79 grrr_remove(
80 grrr_run_queue_t rq,
81 thread_t thread);
82
83
84 static void
85 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
86 grrr_group_t group);
87
88 static void
89 grrr_rescale_work(grrr_run_queue_t rq);
90
91 static void
92 grrr_runqueue_init(grrr_run_queue_t runq);
93
94 /* Map Mach priorities to ones suitable for proportional sharing */
95 static grrr_proportional_priority_t grrr_priority_mapping[NRQS];
96
97 /* Map each proportional priority to its group */
98 static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES];
99
100 uint32_t grrr_rescale_tick;
101
102 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
103
104 #if defined(CONFIG_SCHED_GRRR)
105
106 static void
107 sched_grrr_init(void);
108
109 static void
110 sched_grrr_timebase_init(void);
111
112 static void
113 sched_grrr_processor_init(processor_t processor);
114
115 static void
116 sched_grrr_pset_init(processor_set_t pset);
117
118 static void
119 sched_grrr_maintenance_continuation(void);
120
121 static thread_t
122 sched_grrr_choose_thread(processor_t processor,
123 int priority,
124 ast_t reason);
125
126 static thread_t
127 sched_grrr_steal_thread(processor_set_t pset);
128
129 static int
130 sched_grrr_compute_priority(thread_t thread);
131
132 static processor_t
133 sched_grrr_choose_processor( processor_set_t pset,
134 processor_t processor,
135 thread_t thread);
136
137 static boolean_t
138 sched_grrr_processor_enqueue(
139 processor_t processor,
140 thread_t thread,
141 integer_t options);
142
143 static void
144 sched_grrr_processor_queue_shutdown(
145 processor_t processor);
146
147 static boolean_t
148 sched_grrr_processor_queue_remove(
149 processor_t processor,
150 thread_t thread);
151
152 static boolean_t
153 sched_grrr_processor_queue_empty(processor_t processor);
154
155 static boolean_t
156 sched_grrr_processor_queue_has_priority(processor_t processor,
157 int priority,
158 boolean_t gte);
159
160 static boolean_t
161 sched_grrr_priority_is_urgent(int priority);
162
163 static ast_t
164 sched_grrr_processor_csw_check(processor_t processor);
165
166 static uint32_t
167 sched_grrr_initial_quantum_size(thread_t thread);
168
169 static sched_mode_t
170 sched_grrr_initial_thread_sched_mode(task_t parent_task);
171
172 static boolean_t
173 sched_grrr_can_update_priority(thread_t thread);
174
175 static void
176 sched_grrr_update_priority(thread_t thread);
177
178 static void
179 sched_grrr_lightweight_update_priority(thread_t thread);
180
181 static int
182 sched_grrr_processor_runq_count(processor_t processor);
183
184 static uint64_t
185 sched_grrr_processor_runq_stats_count_sum(processor_t processor);
186
187 static int
188 sched_grrr_processor_bound_count(processor_t processor);
189
190 static void
191 sched_grrr_thread_update_scan(sched_update_scan_context_t scan_context);
192
193 const struct sched_dispatch_table sched_grrr_dispatch = {
194 .sched_name = "grrr",
195 .init = sched_grrr_init,
196 .timebase_init = sched_grrr_timebase_init,
197 .processor_init = sched_grrr_processor_init,
198 .pset_init = sched_grrr_pset_init,
199 .maintenance_continuation = sched_grrr_maintenance_continuation,
200 .choose_thread = sched_grrr_choose_thread,
201 .steal_thread_enabled = FALSE,
202 .steal_thread = sched_grrr_steal_thread,
203 .compute_timeshare_priority = sched_grrr_compute_priority,
204 .choose_processor = sched_grrr_choose_processor,
205 .processor_enqueue = sched_grrr_processor_enqueue,
206 .processor_queue_shutdown = sched_grrr_processor_queue_shutdown,
207 .processor_queue_remove = sched_grrr_processor_queue_remove,
208 .processor_queue_empty = sched_grrr_processor_queue_empty,
209 .priority_is_urgent = sched_grrr_priority_is_urgent,
210 .processor_csw_check = sched_grrr_processor_csw_check,
211 .processor_queue_has_priority = sched_grrr_processor_queue_has_priority,
212 .initial_quantum_size = sched_grrr_initial_quantum_size,
213 .initial_thread_sched_mode = sched_grrr_initial_thread_sched_mode,
214 .can_update_priority = sched_grrr_can_update_priority,
215 .update_priority = sched_grrr_update_priority,
216 .lightweight_update_priority = sched_grrr_lightweight_update_priority,
217 .quantum_expire = sched_default_quantum_expire,
218 .processor_runq_count = sched_grrr_processor_runq_count,
219 .processor_runq_stats_count_sum = sched_grrr_processor_runq_stats_count_sum,
220 .processor_bound_count = sched_grrr_processor_bound_count,
221 .thread_update_scan = sched_grrr_thread_update_scan,
222 .direct_dispatch_to_idle_processors = TRUE,
223 .multiple_psets_enabled = TRUE,
224 .sched_groups_enabled = FALSE,
225 };
226
227 extern int max_unsafe_quanta;
228
229 static uint32_t grrr_quantum_us;
230 static uint32_t grrr_quantum;
231
232 static uint64_t sched_grrr_tick_deadline;
233
234 static void
235 sched_grrr_init(void)
236 {
237 if (default_preemption_rate < 1)
238 default_preemption_rate = 100;
239 grrr_quantum_us = (1000 * 1000) / default_preemption_rate;
240
241 printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us);
242
243 grrr_priority_mapping_init();
244 }
245
246 static void
247 sched_grrr_timebase_init(void)
248 {
249 uint64_t abstime;
250
251 /* standard timeslicing quantum */
252 clock_interval_to_absolutetime_interval(
253 grrr_quantum_us, NSEC_PER_USEC, &abstime);
254 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
255 grrr_quantum = (uint32_t)abstime;
256
257 thread_depress_time = 1 * grrr_quantum;
258 default_timeshare_computation = grrr_quantum / 2;
259 default_timeshare_constraint = grrr_quantum;
260
261 max_unsafe_computation = max_unsafe_quanta * grrr_quantum;
262 sched_safe_duration = 2 * max_unsafe_quanta * grrr_quantum;
263
264 }
265
266 static void
267 sched_grrr_processor_init(processor_t processor)
268 {
269 grrr_runqueue_init(&processor->grrr_runq);
270 }
271
272 static void
273 sched_grrr_pset_init(processor_set_t pset __unused)
274 {
275 }
276
277 static void
278 sched_grrr_maintenance_continuation(void)
279 {
280 uint64_t abstime = mach_absolute_time();
281
282 grrr_rescale_tick++;
283
284 /*
285 * Compute various averages.
286 */
287 compute_averages(1);
288
289 if (sched_grrr_tick_deadline == 0)
290 sched_grrr_tick_deadline = abstime;
291
292 clock_deadline_for_periodic_event(10*sched_one_second_interval, abstime,
293 &sched_grrr_tick_deadline);
294
295 assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline);
296 thread_block((thread_continue_t)sched_grrr_maintenance_continuation);
297 /*NOTREACHED*/
298 }
299
300
301 static thread_t
302 sched_grrr_choose_thread(processor_t processor,
303 int priority __unused,
304 ast_t reason __unused)
305 {
306 grrr_run_queue_t rq = &processor->grrr_runq;
307
308 return grrr_select(rq);
309 }
310
311 static thread_t
312 sched_grrr_steal_thread(processor_set_t pset)
313 {
314 pset_unlock(pset);
315
316 return (THREAD_NULL);
317
318 }
319
320 static int
321 sched_grrr_compute_priority(thread_t thread)
322 {
323 return thread->base_pri;
324 }
325
326 static processor_t
327 sched_grrr_choose_processor( processor_set_t pset,
328 processor_t processor,
329 thread_t thread)
330 {
331 return choose_processor(pset, processor, thread);
332 }
333
334 static boolean_t
335 sched_grrr_processor_enqueue(
336 processor_t processor,
337 thread_t thread,
338 integer_t options __unused)
339 {
340 grrr_run_queue_t rq = &processor->grrr_runq;
341 boolean_t result;
342
343 result = grrr_enqueue(rq, thread);
344
345 thread->runq = processor;
346
347 return result;
348 }
349
350 static void
351 sched_grrr_processor_queue_shutdown(
352 processor_t processor)
353 {
354 processor_set_t pset = processor->processor_set;
355 thread_t thread;
356 queue_head_t tqueue, bqueue;
357
358 queue_init(&tqueue);
359 queue_init(&bqueue);
360
361 while ((thread = sched_grrr_choose_thread(processor, IDLEPRI, AST_NONE)) != THREAD_NULL) {
362 if (thread->bound_processor == PROCESSOR_NULL) {
363 enqueue_tail(&tqueue, (queue_entry_t)thread);
364 } else {
365 enqueue_tail(&bqueue, (queue_entry_t)thread);
366 }
367 }
368
369 while ((thread = (thread_t)(void *)dequeue_head(&bqueue)) != THREAD_NULL) {
370 sched_grrr_processor_enqueue(processor, thread, SCHED_TAILQ);
371 }
372
373 pset_unlock(pset);
374
375 while ((thread = (thread_t)(void *)dequeue_head(&tqueue)) != THREAD_NULL) {
376 thread_lock(thread);
377
378 thread_setrun(thread, SCHED_TAILQ);
379
380 thread_unlock(thread);
381 }
382 }
383
384 static boolean_t
385 sched_grrr_processor_queue_remove(
386 processor_t processor,
387 thread_t thread)
388 {
389 void * rqlock;
390
391 rqlock = &processor->processor_set->sched_lock;
392 simple_lock(rqlock);
393
394 if (processor == thread->runq) {
395 /*
396 * Thread is on a run queue and we have a lock on
397 * that run queue.
398 */
399 grrr_run_queue_t rq = &processor->grrr_runq;
400
401 grrr_remove(rq, thread);
402 } else {
403 /*
404 * The thread left the run queue before we could
405 * lock the run queue.
406 */
407 assert(thread->runq == PROCESSOR_NULL);
408 processor = PROCESSOR_NULL;
409 }
410
411 simple_unlock(rqlock);
412
413 return (processor != PROCESSOR_NULL);
414 }
415
416 static boolean_t
417 sched_grrr_processor_queue_empty(processor_t processor __unused)
418 {
419 boolean_t result;
420
421 result = (processor->grrr_runq.count == 0);
422
423 return result;
424 }
425
426 static boolean_t
427 sched_grrr_processor_queue_has_priority(processor_t processor,
428 int priority,
429 boolean_t gte __unused)
430 {
431 grrr_run_queue_t rq = &processor->grrr_runq;
432 unsigned int i;
433
434 i = grrr_group_mapping[grrr_priority_mapping[priority]];
435 for ( ; i < NUM_GRRR_GROUPS; i++) {
436 if (rq->groups[i].count > 0)
437 return (TRUE);
438 }
439
440 return (FALSE);
441 }
442
443 /* Implement sched_preempt_pri in code */
444 static boolean_t
445 sched_grrr_priority_is_urgent(int priority)
446 {
447 if (priority <= BASEPRI_FOREGROUND)
448 return FALSE;
449
450 if (priority < MINPRI_KERNEL)
451 return TRUE;
452
453 if (priority >= BASEPRI_PREEMPT)
454 return TRUE;
455
456 return FALSE;
457 }
458
459 static ast_t
460 sched_grrr_processor_csw_check(processor_t processor)
461 {
462 int count;
463
464 count = sched_grrr_processor_runq_count(processor);
465
466 if (count > 0) {
467
468 return AST_PREEMPT;
469 }
470
471 return AST_NONE;
472 }
473
474 static uint32_t
475 sched_grrr_initial_quantum_size(thread_t thread __unused)
476 {
477 return grrr_quantum;
478 }
479
480 static sched_mode_t
481 sched_grrr_initial_thread_sched_mode(task_t parent_task)
482 {
483 if (parent_task == kernel_task)
484 return TH_MODE_FIXED;
485 else
486 return TH_MODE_TIMESHARE;
487 }
488
489 static boolean_t
490 sched_grrr_can_update_priority(thread_t thread __unused)
491 {
492 return FALSE;
493 }
494
495 static void
496 sched_grrr_update_priority(thread_t thread __unused)
497 {
498
499 }
500
501 static void
502 sched_grrr_lightweight_update_priority(thread_t thread __unused)
503 {
504 return;
505 }
506
507 static int
508 sched_grrr_processor_runq_count(processor_t processor)
509 {
510 return processor->grrr_runq.count;
511 }
512
513 static uint64_t
514 sched_grrr_processor_runq_stats_count_sum(processor_t processor)
515 {
516 return processor->grrr_runq.runq_stats.count_sum;
517 }
518
519 static int
520 sched_grrr_processor_bound_count(__unused processor_t processor)
521 {
522 return 0;
523 }
524
525 static void
526 sched_grrr_thread_update_scan(__unused sched_update_scan_context_t scan_context)
527 {
528
529 }
530
531 #endif /* defined(CONFIG_SCHED_GRRR) */
532
533 #if defined(CONFIG_SCHED_GRRR_CORE)
534
535 static void
536 grrr_priority_mapping_init(void)
537 {
538 unsigned int i;
539
540 /* Map 0->0 up to 10->20 */
541 for (i=0; i <= 10; i++) {
542 grrr_priority_mapping[i] = 2*i;
543 }
544
545 /* Map user priorities 11->33 up to 51 -> 153 */
546 for (i=11; i <= 51; i++) {
547 grrr_priority_mapping[i] = 3*i;
548 }
549
550 /* Map high priorities 52->180 up to 127->255 */
551 for (i=52; i <= 127; i++) {
552 grrr_priority_mapping[i] = 128 + i;
553 }
554
555 for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) {
556
557 #if 0
558 unsigned j, k;
559 /* Calculate log(i); */
560 for (j=0, k=1; k <= i; j++, k *= 2);
561 #endif
562
563 /* Groups of 4 */
564 grrr_group_mapping[i] = i >> 2;
565 }
566
567 }
568
569 static thread_t
570 grrr_intragroup_schedule(grrr_group_t group)
571 {
572 thread_t thread;
573
574 if (group->count == 0) {
575 return THREAD_NULL;
576 }
577
578 thread = group->current_client;
579 if (thread == THREAD_NULL) {
580 thread = (thread_t)(void *)queue_first(&group->clients);
581 }
582
583 if (1 /* deficit */) {
584 group->current_client = (thread_t)(void *)queue_next((queue_entry_t)thread);
585 if (queue_end(&group->clients, (queue_entry_t)group->current_client)) {
586 group->current_client = (thread_t)(void *)queue_first(&group->clients);
587 }
588
589 thread = group->current_client;
590 }
591
592 return thread;
593 }
594
595 static thread_t
596 grrr_intergroup_schedule(grrr_run_queue_t rq)
597 {
598 thread_t thread;
599 grrr_group_t group;
600
601 if (rq->count == 0) {
602 return THREAD_NULL;
603 }
604
605 group = rq->current_group;
606
607 if (group == GRRR_GROUP_NULL) {
608 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
609 }
610
611 thread = grrr_intragroup_schedule(group);
612
613 if ((group->work >= (UINT32_MAX-256)) || (rq->last_rescale_tick != grrr_rescale_tick)) {
614 grrr_rescale_work(rq);
615 }
616 group->work++;
617
618 if (queue_end(&rq->sorted_group_list, queue_next((queue_entry_t)group))) {
619 /* last group, go back to beginning */
620 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
621 } else {
622 grrr_group_t nextgroup = (grrr_group_t)queue_next((queue_entry_t)group);
623 uint64_t orderleft, orderright;
624
625 /*
626 * The well-ordering condition for intergroup selection is:
627 *
628 * (group->work+1) / (nextgroup->work+1) > (group->weight) / (nextgroup->weight)
629 *
630 * Multiply both sides by their denominators to avoid division
631 *
632 */
633 orderleft = (group->work + 1) * ((uint64_t)nextgroup->weight);
634 orderright = (nextgroup->work + 1) * ((uint64_t)group->weight);
635 if (orderleft > orderright) {
636 group = nextgroup;
637 } else {
638 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
639 }
640 }
641
642 rq->current_group = group;
643
644 return thread;
645 }
646
647 static void
648 grrr_runqueue_init(grrr_run_queue_t runq)
649 {
650 grrr_group_index_t index;
651
652 runq->count = 0;
653
654 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
655 unsigned int prisearch;
656
657 for (prisearch = 0;
658 prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES;
659 prisearch++) {
660 if (grrr_group_mapping[prisearch] == index) {
661 runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch;
662 break;
663 }
664 }
665
666 runq->groups[index].index = index;
667
668 queue_init(&runq->groups[index].clients);
669 runq->groups[index].count = 0;
670 runq->groups[index].weight = 0;
671 runq->groups[index].work = 0;
672 runq->groups[index].current_client = THREAD_NULL;
673 }
674
675 queue_init(&runq->sorted_group_list);
676 runq->weight = 0;
677 runq->current_group = GRRR_GROUP_NULL;
678 }
679
680 static void
681 grrr_rescale_work(grrr_run_queue_t rq)
682 {
683 grrr_group_index_t index;
684
685 /* avoid overflow by scaling by 1/8th */
686 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
687 rq->groups[index].work >>= 3;
688 }
689
690 rq->last_rescale_tick = grrr_rescale_tick;
691 }
692
693 static boolean_t
694 grrr_enqueue(
695 grrr_run_queue_t rq,
696 thread_t thread)
697 {
698 grrr_proportional_priority_t gpriority;
699 grrr_group_index_t gindex;
700 grrr_group_t group;
701
702 gpriority = grrr_priority_mapping[thread->sched_pri];
703 gindex = grrr_group_mapping[gpriority];
704 group = &rq->groups[gindex];
705
706 #if 0
707 thread->grrr_deficit = 0;
708 #endif
709
710 if (group->count == 0) {
711 /* Empty group, this is the first client */
712 enqueue_tail(&group->clients, (queue_entry_t)thread);
713 group->count = 1;
714 group->weight = gpriority;
715 group->current_client = thread;
716 } else {
717 /* Insert before the current client */
718 if (group->current_client == THREAD_NULL ||
719 queue_first(&group->clients) == (queue_entry_t)group->current_client) {
720 enqueue_head(&group->clients, (queue_entry_t)thread);
721 } else {
722 insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client));
723 }
724 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
725 group->count++;
726 group->weight += gpriority;
727
728 /* Since there was already a client, this is on the per-processor sorted list already */
729 remqueue((queue_entry_t)group);
730 }
731
732 grrr_sorted_list_insert_group(rq, group);
733
734 rq->count++;
735 rq->weight += gpriority;
736
737 return (FALSE);
738 }
739
740 static thread_t
741 grrr_select(grrr_run_queue_t rq)
742 {
743 thread_t thread;
744
745 thread = grrr_intergroup_schedule(rq);
746 if (thread != THREAD_NULL) {
747 grrr_proportional_priority_t gpriority;
748 grrr_group_index_t gindex;
749 grrr_group_t group;
750
751 gpriority = grrr_priority_mapping[thread->sched_pri];
752 gindex = grrr_group_mapping[gpriority];
753 group = &rq->groups[gindex];
754
755 remqueue((queue_entry_t)thread);
756 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
757 group->count--;
758 group->weight -= gpriority;
759 if (group->current_client == thread) {
760 group->current_client = THREAD_NULL;
761 }
762
763 remqueue((queue_entry_t)group);
764 if (group->count == 0) {
765 if (rq->current_group == group) {
766 rq->current_group = GRRR_GROUP_NULL;
767 }
768 } else {
769 /* Need to re-insert in sorted location */
770 grrr_sorted_list_insert_group(rq, group);
771 }
772
773 rq->count--;
774 rq->weight -= gpriority;
775
776 thread->runq = PROCESSOR_NULL;
777 }
778
779
780 return (thread);
781 }
782
783 static void
784 grrr_remove(
785 grrr_run_queue_t rq,
786 thread_t thread)
787 {
788 grrr_proportional_priority_t gpriority;
789 grrr_group_index_t gindex;
790 grrr_group_t group;
791
792 gpriority = grrr_priority_mapping[thread->sched_pri];
793 gindex = grrr_group_mapping[gpriority];
794 group = &rq->groups[gindex];
795
796 remqueue((queue_entry_t)thread);
797 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
798 group->count--;
799 group->weight -= gpriority;
800 if (group->current_client == thread) {
801 group->current_client = THREAD_NULL;
802 }
803
804 remqueue((queue_entry_t)group);
805 if (group->count == 0) {
806 if (rq->current_group == group) {
807 rq->current_group = GRRR_GROUP_NULL;
808 }
809 } else {
810 /* Need to re-insert in sorted location */
811 grrr_sorted_list_insert_group(rq, group);
812 }
813
814 rq->count--;
815 rq->weight -= gpriority;
816
817 thread->runq = PROCESSOR_NULL;
818 }
819
820 static void
821 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
822 grrr_group_t group)
823 {
824 /* Simple insertion sort */
825 if (queue_empty(&rq->sorted_group_list)) {
826 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
827 } else {
828 grrr_group_t search_group;
829
830 /* Start searching from the head (heaviest weight) for the first
831 * element less than us, so we can insert before it
832 */
833 search_group = (grrr_group_t)queue_first(&rq->sorted_group_list);
834 while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group) ) {
835
836 if (search_group->weight < group->weight) {
837 /* we should be before this */
838 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
839 break;
840 } if (search_group->weight == group->weight) {
841 /* Use group index as a tie breaker */
842 if (search_group->index < group->index) {
843 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
844 break;
845 }
846 }
847
848 /* otherwise, our weight is too small, keep going */
849 search_group = (grrr_group_t)queue_next((queue_entry_t)search_group);
850 }
851
852 if (queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
853 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
854 } else {
855 insque((queue_entry_t)group, (queue_entry_t)search_group);
856 }
857 }
858 }
859
860 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */