]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_grrr.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / kern / sched_grrr.c
1 /*
2 * Copyright (c) 2009-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
34
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
38
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/counters.h>
42 #include <kern/cpu_number.h>
43 #include <kern/cpu_data.h>
44 #include <kern/debug.h>
45 #include <kern/macro_help.h>
46 #include <kern/machine.h>
47 #include <kern/misc_protos.h>
48 #include <kern/processor.h>
49 #include <kern/queue.h>
50 #include <kern/sched.h>
51 #include <kern/sched_prim.h>
52 #include <kern/syscall_subr.h>
53 #include <kern/task.h>
54 #include <kern/thread.h>
55
56 #include <vm/pmap.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_map.h>
59
60 #include <mach/sdt.h>
61
62 #include <sys/kdebug.h>
63
64 #if defined(CONFIG_SCHED_GRRR_CORE)
65
66 static void
67 grrr_priority_mapping_init(void);
68
69 static boolean_t
70 grrr_enqueue(
71 grrr_run_queue_t rq,
72 thread_t thread);
73
74 static thread_t
75 grrr_select(
76 grrr_run_queue_t rq);
77
78 static void
79 grrr_remove(
80 grrr_run_queue_t rq,
81 thread_t thread);
82
83
84 static void
85 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
86 grrr_group_t group);
87
88 static void
89 grrr_rescale_work(grrr_run_queue_t rq);
90
91 static void
92 grrr_runqueue_init(grrr_run_queue_t runq);
93
94 /* Map Mach priorities to ones suitable for proportional sharing */
95 static grrr_proportional_priority_t grrr_priority_mapping[NRQS];
96
97 /* Map each proportional priority to its group */
98 static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES];
99
100 uint32_t grrr_rescale_tick;
101
102 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
103
104 #if defined(CONFIG_SCHED_GRRR)
105
106 static void
107 sched_grrr_init(void);
108
109 static void
110 sched_grrr_timebase_init(void);
111
112 static void
113 sched_grrr_processor_init(processor_t processor);
114
115 static void
116 sched_grrr_pset_init(processor_set_t pset);
117
118 static void
119 sched_grrr_maintenance_continuation(void);
120
121 static thread_t
122 sched_grrr_choose_thread(processor_t processor,
123 int priority,
124 ast_t reason);
125
126 static thread_t
127 sched_grrr_steal_thread(processor_set_t pset);
128
129 static int
130 sched_grrr_compute_priority(thread_t thread);
131
132 static processor_t
133 sched_grrr_choose_processor( processor_set_t pset,
134 processor_t processor,
135 thread_t thread);
136
137 static boolean_t
138 sched_grrr_processor_enqueue(
139 processor_t processor,
140 thread_t thread,
141 integer_t options);
142
143 static void
144 sched_grrr_processor_queue_shutdown(
145 processor_t processor);
146
147 static boolean_t
148 sched_grrr_processor_queue_remove(
149 processor_t processor,
150 thread_t thread);
151
152 static boolean_t
153 sched_grrr_processor_queue_empty(processor_t processor);
154
155 static boolean_t
156 sched_grrr_processor_queue_has_priority(processor_t processor,
157 int priority,
158 boolean_t gte);
159
160 static boolean_t
161 sched_grrr_priority_is_urgent(int priority);
162
163 static ast_t
164 sched_grrr_processor_csw_check(processor_t processor);
165
166 static uint32_t
167 sched_grrr_initial_quantum_size(thread_t thread);
168
169 static sched_mode_t
170 sched_grrr_initial_thread_sched_mode(task_t parent_task);
171
172 static boolean_t
173 sched_grrr_can_update_priority(thread_t thread);
174
175 static void
176 sched_grrr_update_priority(thread_t thread);
177
178 static void
179 sched_grrr_lightweight_update_priority(thread_t thread);
180
181 static int
182 sched_grrr_processor_runq_count(processor_t processor);
183
184 static uint64_t
185 sched_grrr_processor_runq_stats_count_sum(processor_t processor);
186
187 static int
188 sched_grrr_processor_bound_count(processor_t processor);
189
190 static void
191 sched_grrr_thread_update_scan(sched_update_scan_context_t scan_context);
192
193 const struct sched_dispatch_table sched_grrr_dispatch = {
194 .sched_name = "grrr",
195 .init = sched_grrr_init,
196 .timebase_init = sched_grrr_timebase_init,
197 .processor_init = sched_grrr_processor_init,
198 .pset_init = sched_grrr_pset_init,
199 .maintenance_continuation = sched_grrr_maintenance_continuation,
200 .choose_thread = sched_grrr_choose_thread,
201 .steal_thread_enabled = FALSE,
202 .steal_thread = sched_grrr_steal_thread,
203 .compute_timeshare_priority = sched_grrr_compute_priority,
204 .choose_processor = sched_grrr_choose_processor,
205 .processor_enqueue = sched_grrr_processor_enqueue,
206 .processor_queue_shutdown = sched_grrr_processor_queue_shutdown,
207 .processor_queue_remove = sched_grrr_processor_queue_remove,
208 .processor_queue_empty = sched_grrr_processor_queue_empty,
209 .priority_is_urgent = sched_grrr_priority_is_urgent,
210 .processor_csw_check = sched_grrr_processor_csw_check,
211 .processor_queue_has_priority = sched_grrr_processor_queue_has_priority,
212 .initial_quantum_size = sched_grrr_initial_quantum_size,
213 .initial_thread_sched_mode = sched_grrr_initial_thread_sched_mode,
214 .can_update_priority = sched_grrr_can_update_priority,
215 .update_priority = sched_grrr_update_priority,
216 .lightweight_update_priority = sched_grrr_lightweight_update_priority,
217 .quantum_expire = sched_default_quantum_expire,
218 .processor_runq_count = sched_grrr_processor_runq_count,
219 .processor_runq_stats_count_sum = sched_grrr_processor_runq_stats_count_sum,
220 .processor_bound_count = sched_grrr_processor_bound_count,
221 .thread_update_scan = sched_grrr_thread_update_scan,
222 .direct_dispatch_to_idle_processors = TRUE,
223 .multiple_psets_enabled = TRUE,
224 .sched_groups_enabled = FALSE,
225 };
226
227 extern int max_unsafe_quanta;
228
229 static uint32_t grrr_quantum_us;
230 static uint32_t grrr_quantum;
231
232 static uint64_t sched_grrr_tick_deadline;
233
234 static void
235 sched_grrr_init(void)
236 {
237 if (default_preemption_rate < 1)
238 default_preemption_rate = 100;
239 grrr_quantum_us = (1000 * 1000) / default_preemption_rate;
240
241 printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us);
242
243 grrr_priority_mapping_init();
244 }
245
246 static void
247 sched_grrr_timebase_init(void)
248 {
249 uint64_t abstime;
250
251 /* standard timeslicing quantum */
252 clock_interval_to_absolutetime_interval(
253 grrr_quantum_us, NSEC_PER_USEC, &abstime);
254 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
255 grrr_quantum = (uint32_t)abstime;
256
257 thread_depress_time = 1 * grrr_quantum;
258 default_timeshare_computation = grrr_quantum / 2;
259 default_timeshare_constraint = grrr_quantum;
260
261 max_unsafe_computation = max_unsafe_quanta * grrr_quantum;
262 sched_safe_duration = 2 * max_unsafe_quanta * grrr_quantum;
263
264 }
265
266 static void
267 sched_grrr_processor_init(processor_t processor)
268 {
269 grrr_runqueue_init(&processor->grrr_runq);
270 }
271
272 static void
273 sched_grrr_pset_init(processor_set_t pset __unused)
274 {
275 }
276
277 static void
278 sched_grrr_maintenance_continuation(void)
279 {
280 uint64_t abstime = mach_absolute_time();
281
282 grrr_rescale_tick++;
283
284 /*
285 * Compute various averages.
286 */
287 compute_averages(1);
288
289 if (sched_grrr_tick_deadline == 0)
290 sched_grrr_tick_deadline = abstime;
291
292 clock_deadline_for_periodic_event(10*sched_one_second_interval, abstime,
293 &sched_grrr_tick_deadline);
294
295 assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline);
296 thread_block((thread_continue_t)sched_grrr_maintenance_continuation);
297 /*NOTREACHED*/
298 }
299
300 static thread_t
301 sched_grrr_choose_thread(processor_t processor,
302 int priority __unused,
303 ast_t reason __unused)
304 {
305 grrr_run_queue_t rq = &processor->grrr_runq;
306
307 return grrr_select(rq);
308 }
309
310 static thread_t
311 sched_grrr_steal_thread(processor_set_t pset)
312 {
313 pset_unlock(pset);
314
315 return THREAD_NULL;
316 }
317
318 static int
319 sched_grrr_compute_priority(thread_t thread)
320 {
321 return thread->base_pri;
322 }
323
324 static processor_t
325 sched_grrr_choose_processor( processor_set_t pset,
326 processor_t processor,
327 thread_t thread)
328 {
329 return choose_processor(pset, processor, thread);
330 }
331
332 static boolean_t
333 sched_grrr_processor_enqueue(
334 processor_t processor,
335 thread_t thread,
336 integer_t options __unused)
337 {
338 grrr_run_queue_t rq = &processor->grrr_runq;
339 boolean_t result;
340
341 result = grrr_enqueue(rq, thread);
342
343 thread->runq = processor;
344
345 return result;
346 }
347
348 static void
349 sched_grrr_processor_queue_shutdown(
350 processor_t processor)
351 {
352 processor_set_t pset = processor->processor_set;
353 thread_t thread;
354 queue_head_t tqueue, bqueue;
355
356 queue_init(&tqueue);
357 queue_init(&bqueue);
358
359 while ((thread = sched_grrr_choose_thread(processor, IDLEPRI, AST_NONE)) != THREAD_NULL) {
360 if (thread->bound_processor == PROCESSOR_NULL) {
361 enqueue_tail(&tqueue, (queue_entry_t)thread);
362 } else {
363 enqueue_tail(&bqueue, (queue_entry_t)thread);
364 }
365 }
366
367 while ((thread = (thread_t)(void *)dequeue_head(&bqueue)) != THREAD_NULL) {
368 sched_grrr_processor_enqueue(processor, thread, SCHED_TAILQ);
369 }
370
371 pset_unlock(pset);
372
373 while ((thread = (thread_t)(void *)dequeue_head(&tqueue)) != THREAD_NULL) {
374 thread_lock(thread);
375
376 thread_setrun(thread, SCHED_TAILQ);
377
378 thread_unlock(thread);
379 }
380 }
381
382 static boolean_t
383 sched_grrr_processor_queue_remove(
384 processor_t processor,
385 thread_t thread)
386 {
387 processor_set_t pset = processor->processor_set;
388
389 pset_lock(pset);
390
391 if (processor == thread->runq) {
392 /*
393 * Thread is on a run queue and we have a lock on
394 * that run queue.
395 */
396 grrr_run_queue_t rq = &processor->grrr_runq;
397
398 grrr_remove(rq, thread);
399 } else {
400 /*
401 * The thread left the run queue before we could
402 * lock the run queue.
403 */
404 assert(thread->runq == PROCESSOR_NULL);
405 processor = PROCESSOR_NULL;
406 }
407
408 pset_unlock(pset);
409
410 return (processor != PROCESSOR_NULL);
411 }
412
413 static boolean_t
414 sched_grrr_processor_queue_empty(processor_t processor __unused)
415 {
416 boolean_t result;
417
418 result = (processor->grrr_runq.count == 0);
419
420 return result;
421 }
422
423 static boolean_t
424 sched_grrr_processor_queue_has_priority(processor_t processor,
425 int priority,
426 boolean_t gte __unused)
427 {
428 grrr_run_queue_t rq = &processor->grrr_runq;
429 unsigned int i;
430
431 i = grrr_group_mapping[grrr_priority_mapping[priority]];
432 for ( ; i < NUM_GRRR_GROUPS; i++) {
433 if (rq->groups[i].count > 0)
434 return TRUE;
435 }
436
437 return FALSE;
438 }
439
440 /* Implement sched_preempt_pri in code */
441 static boolean_t
442 sched_grrr_priority_is_urgent(int priority)
443 {
444 if (priority <= BASEPRI_FOREGROUND)
445 return FALSE;
446
447 if (priority < MINPRI_KERNEL)
448 return TRUE;
449
450 if (priority >= BASEPRI_PREEMPT)
451 return TRUE;
452
453 return FALSE;
454 }
455
456 static ast_t
457 sched_grrr_processor_csw_check(processor_t processor)
458 {
459 int count;
460
461 count = sched_grrr_processor_runq_count(processor);
462
463 if (count > 0)
464 return AST_PREEMPT;
465
466 return AST_NONE;
467 }
468
469 static uint32_t
470 sched_grrr_initial_quantum_size(thread_t thread __unused)
471 {
472 return grrr_quantum;
473 }
474
475 static sched_mode_t
476 sched_grrr_initial_thread_sched_mode(task_t parent_task)
477 {
478 if (parent_task == kernel_task)
479 return TH_MODE_FIXED;
480 else
481 return TH_MODE_TIMESHARE;
482 }
483
484 static boolean_t
485 sched_grrr_can_update_priority(thread_t thread __unused)
486 {
487 return FALSE;
488 }
489
490 static void
491 sched_grrr_update_priority(thread_t thread __unused)
492 {
493 return;
494 }
495
496 static void
497 sched_grrr_lightweight_update_priority(thread_t thread __unused)
498 {
499 return;
500 }
501
502 static int
503 sched_grrr_processor_runq_count(processor_t processor)
504 {
505 return processor->grrr_runq.count;
506 }
507
508 static uint64_t
509 sched_grrr_processor_runq_stats_count_sum(processor_t processor)
510 {
511 return processor->grrr_runq.runq_stats.count_sum;
512 }
513
514 static int
515 sched_grrr_processor_bound_count(__unused processor_t processor)
516 {
517 return 0;
518 }
519
520 static void
521 sched_grrr_thread_update_scan(__unused sched_update_scan_context_t scan_context)
522 {
523 return;
524 }
525
526 #endif /* defined(CONFIG_SCHED_GRRR) */
527
528 #if defined(CONFIG_SCHED_GRRR_CORE)
529
530 static void
531 grrr_priority_mapping_init(void)
532 {
533 unsigned int i;
534
535 /* Map 0->0 up to 10->20 */
536 for (i=0; i <= 10; i++) {
537 grrr_priority_mapping[i] = 2*i;
538 }
539
540 /* Map user priorities 11->33 up to 51 -> 153 */
541 for (i=11; i <= 51; i++) {
542 grrr_priority_mapping[i] = 3*i;
543 }
544
545 /* Map high priorities 52->180 up to 127->255 */
546 for (i=52; i <= 127; i++) {
547 grrr_priority_mapping[i] = 128 + i;
548 }
549
550 for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) {
551
552 #if 0
553 unsigned j, k;
554 /* Calculate log(i); */
555 for (j=0, k=1; k <= i; j++, k *= 2);
556 #endif
557
558 /* Groups of 4 */
559 grrr_group_mapping[i] = i >> 2;
560 }
561 }
562
563 static thread_t
564 grrr_intragroup_schedule(grrr_group_t group)
565 {
566 thread_t thread;
567
568 if (group->count == 0) {
569 return THREAD_NULL;
570 }
571
572 thread = group->current_client;
573 if (thread == THREAD_NULL) {
574 thread = (thread_t)(void *)queue_first(&group->clients);
575 }
576
577 if (1 /* deficit */) {
578 group->current_client = (thread_t)(void *)queue_next((queue_entry_t)thread);
579 if (queue_end(&group->clients, (queue_entry_t)group->current_client)) {
580 group->current_client = (thread_t)(void *)queue_first(&group->clients);
581 }
582
583 thread = group->current_client;
584 }
585
586 return thread;
587 }
588
589 static thread_t
590 grrr_intergroup_schedule(grrr_run_queue_t rq)
591 {
592 thread_t thread;
593 grrr_group_t group;
594
595 if (rq->count == 0) {
596 return THREAD_NULL;
597 }
598
599 group = rq->current_group;
600
601 if (group == GRRR_GROUP_NULL) {
602 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
603 }
604
605 thread = grrr_intragroup_schedule(group);
606
607 if ((group->work >= (UINT32_MAX-256)) || (rq->last_rescale_tick != grrr_rescale_tick)) {
608 grrr_rescale_work(rq);
609 }
610 group->work++;
611
612 if (queue_end(&rq->sorted_group_list, queue_next((queue_entry_t)group))) {
613 /* last group, go back to beginning */
614 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
615 } else {
616 grrr_group_t nextgroup = (grrr_group_t)queue_next((queue_entry_t)group);
617 uint64_t orderleft, orderright;
618
619 /*
620 * The well-ordering condition for intergroup selection is:
621 *
622 * (group->work+1) / (nextgroup->work+1) > (group->weight) / (nextgroup->weight)
623 *
624 * Multiply both sides by their denominators to avoid division
625 *
626 */
627 orderleft = (group->work + 1) * ((uint64_t)nextgroup->weight);
628 orderright = (nextgroup->work + 1) * ((uint64_t)group->weight);
629 if (orderleft > orderright) {
630 group = nextgroup;
631 } else {
632 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
633 }
634 }
635
636 rq->current_group = group;
637
638 return thread;
639 }
640
641 static void
642 grrr_runqueue_init(grrr_run_queue_t runq)
643 {
644 grrr_group_index_t index;
645
646 runq->count = 0;
647
648 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
649 unsigned int prisearch;
650
651 for (prisearch = 0;
652 prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES;
653 prisearch++) {
654 if (grrr_group_mapping[prisearch] == index) {
655 runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch;
656 break;
657 }
658 }
659
660 runq->groups[index].index = index;
661
662 queue_init(&runq->groups[index].clients);
663 runq->groups[index].count = 0;
664 runq->groups[index].weight = 0;
665 runq->groups[index].work = 0;
666 runq->groups[index].current_client = THREAD_NULL;
667 }
668
669 queue_init(&runq->sorted_group_list);
670 runq->weight = 0;
671 runq->current_group = GRRR_GROUP_NULL;
672 }
673
674 static void
675 grrr_rescale_work(grrr_run_queue_t rq)
676 {
677 grrr_group_index_t index;
678
679 /* avoid overflow by scaling by 1/8th */
680 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
681 rq->groups[index].work >>= 3;
682 }
683
684 rq->last_rescale_tick = grrr_rescale_tick;
685 }
686
687 static boolean_t
688 grrr_enqueue(
689 grrr_run_queue_t rq,
690 thread_t thread)
691 {
692 grrr_proportional_priority_t gpriority;
693 grrr_group_index_t gindex;
694 grrr_group_t group;
695
696 gpriority = grrr_priority_mapping[thread->sched_pri];
697 gindex = grrr_group_mapping[gpriority];
698 group = &rq->groups[gindex];
699
700 #if 0
701 thread->grrr_deficit = 0;
702 #endif
703
704 if (group->count == 0) {
705 /* Empty group, this is the first client */
706 enqueue_tail(&group->clients, (queue_entry_t)thread);
707 group->count = 1;
708 group->weight = gpriority;
709 group->current_client = thread;
710 } else {
711 /* Insert before the current client */
712 if (group->current_client == THREAD_NULL ||
713 queue_first(&group->clients) == (queue_entry_t)group->current_client) {
714 enqueue_head(&group->clients, (queue_entry_t)thread);
715 } else {
716 insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client));
717 }
718 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
719 group->count++;
720 group->weight += gpriority;
721
722 /* Since there was already a client, this is on the per-processor sorted list already */
723 remqueue((queue_entry_t)group);
724 }
725
726 grrr_sorted_list_insert_group(rq, group);
727
728 rq->count++;
729 rq->weight += gpriority;
730
731 return FALSE;
732 }
733
734 static thread_t
735 grrr_select(grrr_run_queue_t rq)
736 {
737 thread_t thread;
738
739 thread = grrr_intergroup_schedule(rq);
740 if (thread != THREAD_NULL) {
741 grrr_proportional_priority_t gpriority;
742 grrr_group_index_t gindex;
743 grrr_group_t group;
744
745 gpriority = grrr_priority_mapping[thread->sched_pri];
746 gindex = grrr_group_mapping[gpriority];
747 group = &rq->groups[gindex];
748
749 remqueue((queue_entry_t)thread);
750 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
751 group->count--;
752 group->weight -= gpriority;
753 if (group->current_client == thread) {
754 group->current_client = THREAD_NULL;
755 }
756
757 remqueue((queue_entry_t)group);
758 if (group->count == 0) {
759 if (rq->current_group == group) {
760 rq->current_group = GRRR_GROUP_NULL;
761 }
762 } else {
763 /* Need to re-insert in sorted location */
764 grrr_sorted_list_insert_group(rq, group);
765 }
766
767 rq->count--;
768 rq->weight -= gpriority;
769
770 thread->runq = PROCESSOR_NULL;
771 }
772
773 return thread;
774 }
775
776 static void
777 grrr_remove(
778 grrr_run_queue_t rq,
779 thread_t thread)
780 {
781 grrr_proportional_priority_t gpriority;
782 grrr_group_index_t gindex;
783 grrr_group_t group;
784
785 gpriority = grrr_priority_mapping[thread->sched_pri];
786 gindex = grrr_group_mapping[gpriority];
787 group = &rq->groups[gindex];
788
789 remqueue((queue_entry_t)thread);
790 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
791 group->count--;
792 group->weight -= gpriority;
793 if (group->current_client == thread) {
794 group->current_client = THREAD_NULL;
795 }
796
797 remqueue((queue_entry_t)group);
798 if (group->count == 0) {
799 if (rq->current_group == group) {
800 rq->current_group = GRRR_GROUP_NULL;
801 }
802 } else {
803 /* Need to re-insert in sorted location */
804 grrr_sorted_list_insert_group(rq, group);
805 }
806
807 rq->count--;
808 rq->weight -= gpriority;
809
810 thread->runq = PROCESSOR_NULL;
811 }
812
813 static void
814 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
815 grrr_group_t group)
816 {
817 /* Simple insertion sort */
818 if (queue_empty(&rq->sorted_group_list)) {
819 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
820 } else {
821 grrr_group_t search_group;
822
823 /* Start searching from the head (heaviest weight) for the first
824 * element less than us, so we can insert before it
825 */
826 search_group = (grrr_group_t)queue_first(&rq->sorted_group_list);
827 while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group) ) {
828
829 if (search_group->weight < group->weight) {
830 /* we should be before this */
831 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
832 break;
833 } if (search_group->weight == group->weight) {
834 /* Use group index as a tie breaker */
835 if (search_group->index < group->index) {
836 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
837 break;
838 }
839 }
840
841 /* otherwise, our weight is too small, keep going */
842 search_group = (grrr_group_t)queue_next((queue_entry_t)search_group);
843 }
844
845 if (queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
846 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
847 } else {
848 insque((queue_entry_t)group, (queue_entry_t)search_group);
849 }
850 }
851 }
852
853 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */