]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_grrr.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / sched_grrr.c
1 /*
2 * Copyright (c) 2009-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
34
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
38
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/counters.h>
42 #include <kern/cpu_number.h>
43 #include <kern/cpu_data.h>
44 #include <kern/debug.h>
45 #include <kern/macro_help.h>
46 #include <kern/machine.h>
47 #include <kern/misc_protos.h>
48 #include <kern/processor.h>
49 #include <kern/queue.h>
50 #include <kern/sched.h>
51 #include <kern/sched_prim.h>
52 #include <kern/syscall_subr.h>
53 #include <kern/task.h>
54 #include <kern/thread.h>
55
56 #include <vm/pmap.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_map.h>
59
60 #include <mach/sdt.h>
61
62 #include <sys/kdebug.h>
63
64 #if defined(CONFIG_SCHED_GRRR_CORE)
65
66 static void
67 grrr_priority_mapping_init(void);
68
69 static boolean_t
70 grrr_enqueue(
71 grrr_run_queue_t rq,
72 thread_t thread);
73
74 static thread_t
75 grrr_select(
76 grrr_run_queue_t rq);
77
78 static void
79 grrr_remove(
80 grrr_run_queue_t rq,
81 thread_t thread);
82
83
84 static void
85 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
86 grrr_group_t group);
87
88 static void
89 grrr_rescale_work(grrr_run_queue_t rq);
90
91 static void
92 grrr_runqueue_init(grrr_run_queue_t runq);
93
94 /* Map Mach priorities to ones suitable for proportional sharing */
95 static grrr_proportional_priority_t grrr_priority_mapping[NRQS];
96
97 /* Map each proportional priority to its group */
98 static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES];
99
100 uint32_t grrr_rescale_tick;
101
102 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
103
104 #if defined(CONFIG_SCHED_GRRR)
105
106 static void
107 sched_grrr_init(void);
108
109 static void
110 sched_grrr_timebase_init(void);
111
112 static void
113 sched_grrr_processor_init(processor_t processor);
114
115 static void
116 sched_grrr_pset_init(processor_set_t pset);
117
118 static void
119 sched_grrr_maintenance_continuation(void);
120
121 static thread_t
122 sched_grrr_choose_thread(processor_t processor,
123 int priority,
124 ast_t reason);
125
126 static thread_t
127 sched_grrr_steal_thread(processor_set_t pset);
128
129 static int
130 sched_grrr_compute_priority(thread_t thread);
131
132 static processor_t
133 sched_grrr_choose_processor( processor_set_t pset,
134 processor_t processor,
135 thread_t thread);
136
137 static boolean_t
138 sched_grrr_processor_enqueue(
139 processor_t processor,
140 thread_t thread,
141 sched_options_t options);
142
143 static void
144 sched_grrr_processor_queue_shutdown(
145 processor_t processor);
146
147 static boolean_t
148 sched_grrr_processor_queue_remove(
149 processor_t processor,
150 thread_t thread);
151
152 static boolean_t
153 sched_grrr_processor_queue_empty(processor_t processor);
154
155 static boolean_t
156 sched_grrr_processor_queue_has_priority(processor_t processor,
157 int priority,
158 boolean_t gte);
159
160 static boolean_t
161 sched_grrr_priority_is_urgent(int priority);
162
163 static ast_t
164 sched_grrr_processor_csw_check(processor_t processor);
165
166 static uint32_t
167 sched_grrr_initial_quantum_size(thread_t thread);
168
169 static sched_mode_t
170 sched_grrr_initial_thread_sched_mode(task_t parent_task);
171
172 static boolean_t
173 sched_grrr_can_update_priority(thread_t thread);
174
175 static void
176 sched_grrr_update_priority(thread_t thread);
177
178 static void
179 sched_grrr_lightweight_update_priority(thread_t thread);
180
181 static int
182 sched_grrr_processor_runq_count(processor_t processor);
183
184 static uint64_t
185 sched_grrr_processor_runq_stats_count_sum(processor_t processor);
186
187 static int
188 sched_grrr_processor_bound_count(processor_t processor);
189
190 static void
191 sched_grrr_thread_update_scan(sched_update_scan_context_t scan_context);
192
193 const struct sched_dispatch_table sched_grrr_dispatch = {
194 .sched_name = "grrr",
195 .init = sched_grrr_init,
196 .timebase_init = sched_grrr_timebase_init,
197 .processor_init = sched_grrr_processor_init,
198 .pset_init = sched_grrr_pset_init,
199 .maintenance_continuation = sched_grrr_maintenance_continuation,
200 .choose_thread = sched_grrr_choose_thread,
201 .steal_thread_enabled = sched_steal_thread_DISABLED,
202 .steal_thread = sched_grrr_steal_thread,
203 .compute_timeshare_priority = sched_grrr_compute_priority,
204 .choose_node = sched_choose_node,
205 .choose_processor = sched_grrr_choose_processor,
206 .processor_enqueue = sched_grrr_processor_enqueue,
207 .processor_queue_shutdown = sched_grrr_processor_queue_shutdown,
208 .processor_queue_remove = sched_grrr_processor_queue_remove,
209 .processor_queue_empty = sched_grrr_processor_queue_empty,
210 .priority_is_urgent = sched_grrr_priority_is_urgent,
211 .processor_csw_check = sched_grrr_processor_csw_check,
212 .processor_queue_has_priority = sched_grrr_processor_queue_has_priority,
213 .initial_quantum_size = sched_grrr_initial_quantum_size,
214 .initial_thread_sched_mode = sched_grrr_initial_thread_sched_mode,
215 .can_update_priority = sched_grrr_can_update_priority,
216 .update_priority = sched_grrr_update_priority,
217 .lightweight_update_priority = sched_grrr_lightweight_update_priority,
218 .quantum_expire = sched_default_quantum_expire,
219 .processor_runq_count = sched_grrr_processor_runq_count,
220 .processor_runq_stats_count_sum = sched_grrr_processor_runq_stats_count_sum,
221 .processor_bound_count = sched_grrr_processor_bound_count,
222 .thread_update_scan = sched_grrr_thread_update_scan,
223 .multiple_psets_enabled = TRUE,
224 .sched_groups_enabled = FALSE,
225 .avoid_processor_enabled = FALSE,
226 .thread_avoid_processor = NULL,
227 .processor_balance = sched_SMT_balance,
228
229 .rt_runq = sched_rtlocal_runq,
230 .rt_init = sched_rtlocal_init,
231 .rt_queue_shutdown = sched_rtlocal_queue_shutdown,
232 .rt_runq_scan = sched_rtlocal_runq_scan,
233 .rt_runq_count_sum = sched_rtlocal_runq_count_sum,
234
235 .qos_max_parallelism = sched_qos_max_parallelism,
236 .check_spill = sched_check_spill,
237 .ipi_policy = sched_ipi_policy,
238 .thread_should_yield = sched_thread_should_yield,
239 .run_count_incr = sched_run_incr,
240 .run_count_decr = sched_run_decr,
241 .update_thread_bucket = sched_update_thread_bucket,
242 .pset_made_schedulable = sched_pset_made_schedulable,
243 };
244
245 extern int max_unsafe_quanta;
246
247 static uint32_t grrr_quantum_us;
248 static uint32_t grrr_quantum;
249
250 static uint64_t sched_grrr_tick_deadline;
251
252 static void
253 sched_grrr_init(void)
254 {
255 if (default_preemption_rate < 1) {
256 default_preemption_rate = 100;
257 }
258 grrr_quantum_us = (1000 * 1000) / default_preemption_rate;
259
260 printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us);
261
262 grrr_priority_mapping_init();
263 }
264
265 static void
266 sched_grrr_timebase_init(void)
267 {
268 uint64_t abstime;
269
270 /* standard timeslicing quantum */
271 clock_interval_to_absolutetime_interval(
272 grrr_quantum_us, NSEC_PER_USEC, &abstime);
273 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
274 grrr_quantum = (uint32_t)abstime;
275
276 thread_depress_time = 1 * grrr_quantum;
277 default_timeshare_computation = grrr_quantum / 2;
278 default_timeshare_constraint = grrr_quantum;
279
280 max_unsafe_computation = max_unsafe_quanta * grrr_quantum;
281 sched_safe_duration = 2 * max_unsafe_quanta * grrr_quantum;
282 }
283
284 static void
285 sched_grrr_processor_init(processor_t processor)
286 {
287 grrr_runqueue_init(&processor->grrr_runq);
288 }
289
290 static void
291 sched_grrr_pset_init(processor_set_t pset __unused)
292 {
293 }
294
295 static void
296 sched_grrr_maintenance_continuation(void)
297 {
298 uint64_t abstime = mach_absolute_time();
299
300 grrr_rescale_tick++;
301
302 /*
303 * Compute various averages.
304 */
305 compute_averages(1);
306
307 if (sched_grrr_tick_deadline == 0) {
308 sched_grrr_tick_deadline = abstime;
309 }
310
311 clock_deadline_for_periodic_event(10 * sched_one_second_interval, abstime,
312 &sched_grrr_tick_deadline);
313
314 assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline);
315 thread_block((thread_continue_t)sched_grrr_maintenance_continuation);
316 /*NOTREACHED*/
317 }
318
319 static thread_t
320 sched_grrr_choose_thread(processor_t processor,
321 int priority __unused,
322 ast_t reason __unused)
323 {
324 grrr_run_queue_t rq = &processor->grrr_runq;
325
326 return grrr_select(rq);
327 }
328
329 static thread_t
330 sched_grrr_steal_thread(processor_set_t pset)
331 {
332 pset_unlock(pset);
333
334 return THREAD_NULL;
335 }
336
337 static int
338 sched_grrr_compute_priority(thread_t thread)
339 {
340 return thread->base_pri;
341 }
342
343 static processor_t
344 sched_grrr_choose_processor( processor_set_t pset,
345 processor_t processor,
346 thread_t thread)
347 {
348 return choose_processor(pset, processor, thread);
349 }
350
351 static boolean_t
352 sched_grrr_processor_enqueue(
353 processor_t processor,
354 thread_t thread,
355 sched_options_t options __unused)
356 {
357 grrr_run_queue_t rq = &processor->grrr_runq;
358 boolean_t result;
359
360 result = grrr_enqueue(rq, thread);
361
362 thread->runq = processor;
363
364 return result;
365 }
366
367 static void
368 sched_grrr_processor_queue_shutdown(
369 processor_t processor)
370 {
371 processor_set_t pset = processor->processor_set;
372 thread_t thread;
373 queue_head_t tqueue, bqueue;
374
375 queue_init(&tqueue);
376 queue_init(&bqueue);
377
378 while ((thread = sched_grrr_choose_thread(processor, IDLEPRI, AST_NONE)) != THREAD_NULL) {
379 if (thread->bound_processor == PROCESSOR_NULL) {
380 enqueue_tail(&tqueue, (queue_entry_t)thread);
381 } else {
382 enqueue_tail(&bqueue, (queue_entry_t)thread);
383 }
384 }
385
386 while ((thread = (thread_t)(void *)dequeue_head(&bqueue)) != THREAD_NULL) {
387 sched_grrr_processor_enqueue(processor, thread, SCHED_TAILQ);
388 }
389
390 pset_unlock(pset);
391
392 while ((thread = (thread_t)(void *)dequeue_head(&tqueue)) != THREAD_NULL) {
393 thread_lock(thread);
394
395 thread_setrun(thread, SCHED_TAILQ);
396
397 thread_unlock(thread);
398 }
399 }
400
401 static boolean_t
402 sched_grrr_processor_queue_remove(
403 processor_t processor,
404 thread_t thread)
405 {
406 processor_set_t pset = processor->processor_set;
407
408 pset_lock(pset);
409
410 if (processor == thread->runq) {
411 /*
412 * Thread is on a run queue and we have a lock on
413 * that run queue.
414 */
415 grrr_run_queue_t rq = &processor->grrr_runq;
416
417 grrr_remove(rq, thread);
418 } else {
419 /*
420 * The thread left the run queue before we could
421 * lock the run queue.
422 */
423 assert(thread->runq == PROCESSOR_NULL);
424 processor = PROCESSOR_NULL;
425 }
426
427 pset_unlock(pset);
428
429 return processor != PROCESSOR_NULL;
430 }
431
432 static boolean_t
433 sched_grrr_processor_queue_empty(processor_t processor __unused)
434 {
435 boolean_t result;
436
437 result = (processor->grrr_runq.count == 0);
438
439 return result;
440 }
441
442 static boolean_t
443 sched_grrr_processor_queue_has_priority(processor_t processor,
444 int priority,
445 boolean_t gte __unused)
446 {
447 grrr_run_queue_t rq = &processor->grrr_runq;
448 unsigned int i;
449
450 i = grrr_group_mapping[grrr_priority_mapping[priority]];
451 for (; i < NUM_GRRR_GROUPS; i++) {
452 if (rq->groups[i].count > 0) {
453 return TRUE;
454 }
455 }
456
457 return FALSE;
458 }
459
460 /* Implement sched_preempt_pri in code */
461 static boolean_t
462 sched_grrr_priority_is_urgent(int priority)
463 {
464 if (priority <= BASEPRI_FOREGROUND) {
465 return FALSE;
466 }
467
468 if (priority < MINPRI_KERNEL) {
469 return TRUE;
470 }
471
472 if (priority >= BASEPRI_PREEMPT) {
473 return TRUE;
474 }
475
476 return FALSE;
477 }
478
479 static ast_t
480 sched_grrr_processor_csw_check(processor_t processor)
481 {
482 int count;
483
484 count = sched_grrr_processor_runq_count(processor);
485
486 if (count > 0) {
487 return AST_PREEMPT;
488 }
489
490 return AST_NONE;
491 }
492
493 static uint32_t
494 sched_grrr_initial_quantum_size(thread_t thread __unused)
495 {
496 return grrr_quantum;
497 }
498
499 static sched_mode_t
500 sched_grrr_initial_thread_sched_mode(task_t parent_task)
501 {
502 if (parent_task == kernel_task) {
503 return TH_MODE_FIXED;
504 } else {
505 return TH_MODE_TIMESHARE;
506 }
507 }
508
509 static boolean_t
510 sched_grrr_can_update_priority(thread_t thread __unused)
511 {
512 return FALSE;
513 }
514
515 static void
516 sched_grrr_update_priority(thread_t thread __unused)
517 {
518 return;
519 }
520
521 static void
522 sched_grrr_lightweight_update_priority(thread_t thread __unused)
523 {
524 return;
525 }
526
527 static int
528 sched_grrr_processor_runq_count(processor_t processor)
529 {
530 return processor->grrr_runq.count;
531 }
532
533 static uint64_t
534 sched_grrr_processor_runq_stats_count_sum(processor_t processor)
535 {
536 return processor->grrr_runq.runq_stats.count_sum;
537 }
538
539 static int
540 sched_grrr_processor_bound_count(__unused processor_t processor)
541 {
542 return 0;
543 }
544
545 static void
546 sched_grrr_thread_update_scan(__unused sched_update_scan_context_t scan_context)
547 {
548 return;
549 }
550
551 #endif /* defined(CONFIG_SCHED_GRRR) */
552
553 #if defined(CONFIG_SCHED_GRRR_CORE)
554
555 static void
556 grrr_priority_mapping_init(void)
557 {
558 unsigned int i;
559
560 /* Map 0->0 up to 10->20 */
561 for (i = 0; i <= 10; i++) {
562 grrr_priority_mapping[i] = (grrr_proportional_priority_t)(2 * i);
563 }
564
565 /* Map user priorities 11->33 up to 51 -> 153 */
566 for (i = 11; i <= 51; i++) {
567 grrr_priority_mapping[i] = (grrr_proportional_priority_t)(3 * i);
568 }
569
570 /* Map high priorities 52->180 up to 127->255 */
571 for (i = 52; i <= 127; i++) {
572 grrr_priority_mapping[i] = (grrr_proportional_priority_t)(128 + i);
573 }
574
575 for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) {
576 #if 0
577 unsigned j, k;
578 /* Calculate log(i); */
579 for (j = 0, k = 1; k <= i; j++, k *= 2) {
580 ;
581 }
582 #endif
583
584 /* Groups of 4 */
585 grrr_group_mapping[i] = (grrr_group_index_t)(i >> 2);
586 }
587 }
588
589 static thread_t
590 grrr_intragroup_schedule(grrr_group_t group)
591 {
592 thread_t thread;
593
594 if (group->count == 0) {
595 return THREAD_NULL;
596 }
597
598 thread = group->current_client;
599 if (thread == THREAD_NULL) {
600 thread = (thread_t)(void *)queue_first(&group->clients);
601 }
602
603 if (1 /* deficit */) {
604 group->current_client = (thread_t)(void *)queue_next((queue_entry_t)thread);
605 if (queue_end(&group->clients, (queue_entry_t)group->current_client)) {
606 group->current_client = (thread_t)(void *)queue_first(&group->clients);
607 }
608
609 thread = group->current_client;
610 }
611
612 return thread;
613 }
614
615 static thread_t
616 grrr_intergroup_schedule(grrr_run_queue_t rq)
617 {
618 thread_t thread;
619 grrr_group_t group;
620
621 if (rq->count == 0) {
622 return THREAD_NULL;
623 }
624
625 group = rq->current_group;
626
627 if (group == GRRR_GROUP_NULL) {
628 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
629 }
630
631 thread = grrr_intragroup_schedule(group);
632
633 if ((group->work >= (UINT32_MAX - 256)) || (rq->last_rescale_tick != grrr_rescale_tick)) {
634 grrr_rescale_work(rq);
635 }
636 group->work++;
637
638 if (queue_end(&rq->sorted_group_list, queue_next((queue_entry_t)group))) {
639 /* last group, go back to beginning */
640 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
641 } else {
642 grrr_group_t nextgroup = (grrr_group_t)queue_next((queue_entry_t)group);
643 uint64_t orderleft, orderright;
644
645 /*
646 * The well-ordering condition for intergroup selection is:
647 *
648 * (group->work+1) / (nextgroup->work+1) > (group->weight) / (nextgroup->weight)
649 *
650 * Multiply both sides by their denominators to avoid division
651 *
652 */
653 orderleft = (group->work + 1) * ((uint64_t)nextgroup->weight);
654 orderright = (nextgroup->work + 1) * ((uint64_t)group->weight);
655 if (orderleft > orderright) {
656 group = nextgroup;
657 } else {
658 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
659 }
660 }
661
662 rq->current_group = group;
663
664 return thread;
665 }
666
667 static void
668 grrr_runqueue_init(grrr_run_queue_t runq)
669 {
670 grrr_group_index_t index;
671
672 runq->count = 0;
673
674 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
675 unsigned int prisearch;
676
677 for (prisearch = 0;
678 prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES;
679 prisearch++) {
680 if (grrr_group_mapping[prisearch] == index) {
681 runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch;
682 break;
683 }
684 }
685
686 runq->groups[index].index = index;
687
688 queue_init(&runq->groups[index].clients);
689 runq->groups[index].count = 0;
690 runq->groups[index].weight = 0;
691 runq->groups[index].work = 0;
692 runq->groups[index].current_client = THREAD_NULL;
693 }
694
695 queue_init(&runq->sorted_group_list);
696 runq->weight = 0;
697 runq->current_group = GRRR_GROUP_NULL;
698 }
699
700 static void
701 grrr_rescale_work(grrr_run_queue_t rq)
702 {
703 grrr_group_index_t index;
704
705 /* avoid overflow by scaling by 1/8th */
706 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
707 rq->groups[index].work >>= 3;
708 }
709
710 rq->last_rescale_tick = grrr_rescale_tick;
711 }
712
713 static boolean_t
714 grrr_enqueue(
715 grrr_run_queue_t rq,
716 thread_t thread)
717 {
718 grrr_proportional_priority_t gpriority;
719 grrr_group_index_t gindex;
720 grrr_group_t group;
721
722 gpriority = grrr_priority_mapping[thread->sched_pri];
723 gindex = grrr_group_mapping[gpriority];
724 group = &rq->groups[gindex];
725
726 if (group->count == 0) {
727 /* Empty group, this is the first client */
728 enqueue_tail(&group->clients, (queue_entry_t)thread);
729 group->count = 1;
730 group->weight = gpriority;
731 group->current_client = thread;
732 } else {
733 /* Insert before the current client */
734 if (group->current_client == THREAD_NULL ||
735 queue_first(&group->clients) == (queue_entry_t)group->current_client) {
736 enqueue_head(&group->clients, (queue_entry_t)thread);
737 } else {
738 insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client));
739 }
740 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
741 group->count++;
742 group->weight += gpriority;
743
744 /* Since there was already a client, this is on the per-processor sorted list already */
745 remqueue((queue_entry_t)group);
746 }
747
748 grrr_sorted_list_insert_group(rq, group);
749
750 rq->count++;
751 rq->weight += gpriority;
752
753 return FALSE;
754 }
755
756 static thread_t
757 grrr_select(grrr_run_queue_t rq)
758 {
759 thread_t thread;
760
761 thread = grrr_intergroup_schedule(rq);
762 if (thread != THREAD_NULL) {
763 grrr_proportional_priority_t gpriority;
764 grrr_group_index_t gindex;
765 grrr_group_t group;
766
767 gpriority = grrr_priority_mapping[thread->sched_pri];
768 gindex = grrr_group_mapping[gpriority];
769 group = &rq->groups[gindex];
770
771 remqueue((queue_entry_t)thread);
772 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
773 group->count--;
774 group->weight -= gpriority;
775 if (group->current_client == thread) {
776 group->current_client = THREAD_NULL;
777 }
778
779 remqueue((queue_entry_t)group);
780 if (group->count == 0) {
781 if (rq->current_group == group) {
782 rq->current_group = GRRR_GROUP_NULL;
783 }
784 } else {
785 /* Need to re-insert in sorted location */
786 grrr_sorted_list_insert_group(rq, group);
787 }
788
789 rq->count--;
790 rq->weight -= gpriority;
791
792 thread->runq = PROCESSOR_NULL;
793 }
794
795 return thread;
796 }
797
798 static void
799 grrr_remove(
800 grrr_run_queue_t rq,
801 thread_t thread)
802 {
803 grrr_proportional_priority_t gpriority;
804 grrr_group_index_t gindex;
805 grrr_group_t group;
806
807 gpriority = grrr_priority_mapping[thread->sched_pri];
808 gindex = grrr_group_mapping[gpriority];
809 group = &rq->groups[gindex];
810
811 remqueue((queue_entry_t)thread);
812 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
813 group->count--;
814 group->weight -= gpriority;
815 if (group->current_client == thread) {
816 group->current_client = THREAD_NULL;
817 }
818
819 remqueue((queue_entry_t)group);
820 if (group->count == 0) {
821 if (rq->current_group == group) {
822 rq->current_group = GRRR_GROUP_NULL;
823 }
824 } else {
825 /* Need to re-insert in sorted location */
826 grrr_sorted_list_insert_group(rq, group);
827 }
828
829 rq->count--;
830 rq->weight -= gpriority;
831
832 thread->runq = PROCESSOR_NULL;
833 }
834
835 static void
836 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
837 grrr_group_t group)
838 {
839 /* Simple insertion sort */
840 if (queue_empty(&rq->sorted_group_list)) {
841 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
842 } else {
843 grrr_group_t search_group;
844
845 /* Start searching from the head (heaviest weight) for the first
846 * element less than us, so we can insert before it
847 */
848 search_group = (grrr_group_t)queue_first(&rq->sorted_group_list);
849 while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
850 if (search_group->weight < group->weight) {
851 /* we should be before this */
852 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
853 break;
854 }
855 if (search_group->weight == group->weight) {
856 /* Use group index as a tie breaker */
857 if (search_group->index < group->index) {
858 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
859 break;
860 }
861 }
862
863 /* otherwise, our weight is too small, keep going */
864 search_group = (grrr_group_t)queue_next((queue_entry_t)search_group);
865 }
866
867 if (queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
868 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
869 } else {
870 insque((queue_entry_t)group, (queue_entry_t)search_group);
871 }
872 }
873 }
874
875 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */