]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_grrr.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / kern / sched_grrr.c
1 /*
2 * Copyright (c) 2009-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
34
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
38
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/counters.h>
42 #include <kern/cpu_number.h>
43 #include <kern/cpu_data.h>
44 #include <kern/debug.h>
45 #include <kern/macro_help.h>
46 #include <kern/machine.h>
47 #include <kern/misc_protos.h>
48 #include <kern/processor.h>
49 #include <kern/queue.h>
50 #include <kern/sched.h>
51 #include <kern/sched_prim.h>
52 #include <kern/syscall_subr.h>
53 #include <kern/task.h>
54 #include <kern/thread.h>
55
56 #include <vm/pmap.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_map.h>
59
60 #include <mach/sdt.h>
61
62 #include <sys/kdebug.h>
63
64 #if defined(CONFIG_SCHED_GRRR_CORE)
65
66 static void
67 grrr_priority_mapping_init(void);
68
69 static boolean_t
70 grrr_enqueue(
71 grrr_run_queue_t rq,
72 thread_t thread);
73
74 static thread_t
75 grrr_select(
76 grrr_run_queue_t rq);
77
78 static void
79 grrr_remove(
80 grrr_run_queue_t rq,
81 thread_t thread);
82
83
84 static void
85 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
86 grrr_group_t group);
87
88 static void
89 grrr_rescale_work(grrr_run_queue_t rq);
90
91 static void
92 grrr_runqueue_init(grrr_run_queue_t runq);
93
94 /* Map Mach priorities to ones suitable for proportional sharing */
95 static grrr_proportional_priority_t grrr_priority_mapping[NRQS];
96
97 /* Map each proportional priority to its group */
98 static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES];
99
100 uint32_t grrr_rescale_tick;
101
102 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
103
104 #if defined(CONFIG_SCHED_GRRR)
105
106 static void
107 sched_grrr_init(void);
108
109 static void
110 sched_grrr_timebase_init(void);
111
112 static void
113 sched_grrr_processor_init(processor_t processor);
114
115 static void
116 sched_grrr_pset_init(processor_set_t pset);
117
118 static void
119 sched_grrr_maintenance_continuation(void);
120
121 static thread_t
122 sched_grrr_choose_thread(processor_t processor,
123 int priority,
124 ast_t reason);
125
126 static thread_t
127 sched_grrr_steal_thread(processor_set_t pset);
128
129 static int
130 sched_grrr_compute_priority(thread_t thread);
131
132 static processor_t
133 sched_grrr_choose_processor( processor_set_t pset,
134 processor_t processor,
135 thread_t thread);
136
137 static boolean_t
138 sched_grrr_processor_enqueue(
139 processor_t processor,
140 thread_t thread,
141 integer_t options);
142
143 static void
144 sched_grrr_processor_queue_shutdown(
145 processor_t processor);
146
147 static boolean_t
148 sched_grrr_processor_queue_remove(
149 processor_t processor,
150 thread_t thread);
151
152 static boolean_t
153 sched_grrr_processor_queue_empty(processor_t processor);
154
155 static boolean_t
156 sched_grrr_processor_queue_has_priority(processor_t processor,
157 int priority,
158 boolean_t gte);
159
160 static boolean_t
161 sched_grrr_priority_is_urgent(int priority);
162
163 static ast_t
164 sched_grrr_processor_csw_check(processor_t processor);
165
166 static uint32_t
167 sched_grrr_initial_quantum_size(thread_t thread);
168
169 static sched_mode_t
170 sched_grrr_initial_thread_sched_mode(task_t parent_task);
171
172 static boolean_t
173 sched_grrr_can_update_priority(thread_t thread);
174
175 static void
176 sched_grrr_update_priority(thread_t thread);
177
178 static void
179 sched_grrr_lightweight_update_priority(thread_t thread);
180
181 static int
182 sched_grrr_processor_runq_count(processor_t processor);
183
184 static uint64_t
185 sched_grrr_processor_runq_stats_count_sum(processor_t processor);
186
187 static int
188 sched_grrr_processor_bound_count(processor_t processor);
189
190 static void
191 sched_grrr_thread_update_scan(sched_update_scan_context_t scan_context);
192
193 const struct sched_dispatch_table sched_grrr_dispatch = {
194 .sched_name = "grrr",
195 .init = sched_grrr_init,
196 .timebase_init = sched_grrr_timebase_init,
197 .processor_init = sched_grrr_processor_init,
198 .pset_init = sched_grrr_pset_init,
199 .maintenance_continuation = sched_grrr_maintenance_continuation,
200 .choose_thread = sched_grrr_choose_thread,
201 .steal_thread_enabled = sched_steal_thread_DISABLED,
202 .steal_thread = sched_grrr_steal_thread,
203 .compute_timeshare_priority = sched_grrr_compute_priority,
204 .choose_processor = sched_grrr_choose_processor,
205 .processor_enqueue = sched_grrr_processor_enqueue,
206 .processor_queue_shutdown = sched_grrr_processor_queue_shutdown,
207 .processor_queue_remove = sched_grrr_processor_queue_remove,
208 .processor_queue_empty = sched_grrr_processor_queue_empty,
209 .priority_is_urgent = sched_grrr_priority_is_urgent,
210 .processor_csw_check = sched_grrr_processor_csw_check,
211 .processor_queue_has_priority = sched_grrr_processor_queue_has_priority,
212 .initial_quantum_size = sched_grrr_initial_quantum_size,
213 .initial_thread_sched_mode = sched_grrr_initial_thread_sched_mode,
214 .can_update_priority = sched_grrr_can_update_priority,
215 .update_priority = sched_grrr_update_priority,
216 .lightweight_update_priority = sched_grrr_lightweight_update_priority,
217 .quantum_expire = sched_default_quantum_expire,
218 .processor_runq_count = sched_grrr_processor_runq_count,
219 .processor_runq_stats_count_sum = sched_grrr_processor_runq_stats_count_sum,
220 .processor_bound_count = sched_grrr_processor_bound_count,
221 .thread_update_scan = sched_grrr_thread_update_scan,
222 .direct_dispatch_to_idle_processors = TRUE,
223 .multiple_psets_enabled = TRUE,
224 .sched_groups_enabled = FALSE,
225 .avoid_processor_enabled = FALSE,
226 .thread_avoid_processor = NULL,
227 .processor_balance = sched_SMT_balance,
228
229 .rt_runq = sched_rtglobal_runq,
230 .rt_init = sched_rtglobal_init,
231 .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
232 .rt_runq_scan = sched_rtglobal_runq_scan,
233 .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
234
235 .qos_max_parallelism = sched_qos_max_parallelism,
236 .check_spill = sched_check_spill,
237 .ipi_policy = sched_ipi_policy,
238 .thread_should_yield = sched_thread_should_yield,
239 };
240
241 extern int max_unsafe_quanta;
242
243 static uint32_t grrr_quantum_us;
244 static uint32_t grrr_quantum;
245
246 static uint64_t sched_grrr_tick_deadline;
247
248 static void
249 sched_grrr_init(void)
250 {
251 if (default_preemption_rate < 1) {
252 default_preemption_rate = 100;
253 }
254 grrr_quantum_us = (1000 * 1000) / default_preemption_rate;
255
256 printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us);
257
258 grrr_priority_mapping_init();
259 }
260
261 static void
262 sched_grrr_timebase_init(void)
263 {
264 uint64_t abstime;
265
266 /* standard timeslicing quantum */
267 clock_interval_to_absolutetime_interval(
268 grrr_quantum_us, NSEC_PER_USEC, &abstime);
269 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
270 grrr_quantum = (uint32_t)abstime;
271
272 thread_depress_time = 1 * grrr_quantum;
273 default_timeshare_computation = grrr_quantum / 2;
274 default_timeshare_constraint = grrr_quantum;
275
276 max_unsafe_computation = max_unsafe_quanta * grrr_quantum;
277 sched_safe_duration = 2 * max_unsafe_quanta * grrr_quantum;
278 }
279
280 static void
281 sched_grrr_processor_init(processor_t processor)
282 {
283 grrr_runqueue_init(&processor->grrr_runq);
284 }
285
286 static void
287 sched_grrr_pset_init(processor_set_t pset __unused)
288 {
289 }
290
291 static void
292 sched_grrr_maintenance_continuation(void)
293 {
294 uint64_t abstime = mach_absolute_time();
295
296 grrr_rescale_tick++;
297
298 /*
299 * Compute various averages.
300 */
301 compute_averages(1);
302
303 if (sched_grrr_tick_deadline == 0) {
304 sched_grrr_tick_deadline = abstime;
305 }
306
307 clock_deadline_for_periodic_event(10 * sched_one_second_interval, abstime,
308 &sched_grrr_tick_deadline);
309
310 assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline);
311 thread_block((thread_continue_t)sched_grrr_maintenance_continuation);
312 /*NOTREACHED*/
313 }
314
315 static thread_t
316 sched_grrr_choose_thread(processor_t processor,
317 int priority __unused,
318 ast_t reason __unused)
319 {
320 grrr_run_queue_t rq = &processor->grrr_runq;
321
322 return grrr_select(rq);
323 }
324
325 static thread_t
326 sched_grrr_steal_thread(processor_set_t pset)
327 {
328 pset_unlock(pset);
329
330 return THREAD_NULL;
331 }
332
333 static int
334 sched_grrr_compute_priority(thread_t thread)
335 {
336 return thread->base_pri;
337 }
338
339 static processor_t
340 sched_grrr_choose_processor( processor_set_t pset,
341 processor_t processor,
342 thread_t thread)
343 {
344 return choose_processor(pset, processor, thread);
345 }
346
347 static boolean_t
348 sched_grrr_processor_enqueue(
349 processor_t processor,
350 thread_t thread,
351 integer_t options __unused)
352 {
353 grrr_run_queue_t rq = &processor->grrr_runq;
354 boolean_t result;
355
356 result = grrr_enqueue(rq, thread);
357
358 thread->runq = processor;
359
360 return result;
361 }
362
363 static void
364 sched_grrr_processor_queue_shutdown(
365 processor_t processor)
366 {
367 processor_set_t pset = processor->processor_set;
368 thread_t thread;
369 queue_head_t tqueue, bqueue;
370
371 queue_init(&tqueue);
372 queue_init(&bqueue);
373
374 while ((thread = sched_grrr_choose_thread(processor, IDLEPRI, AST_NONE)) != THREAD_NULL) {
375 if (thread->bound_processor == PROCESSOR_NULL) {
376 enqueue_tail(&tqueue, (queue_entry_t)thread);
377 } else {
378 enqueue_tail(&bqueue, (queue_entry_t)thread);
379 }
380 }
381
382 while ((thread = (thread_t)(void *)dequeue_head(&bqueue)) != THREAD_NULL) {
383 sched_grrr_processor_enqueue(processor, thread, SCHED_TAILQ);
384 }
385
386 pset_unlock(pset);
387
388 while ((thread = (thread_t)(void *)dequeue_head(&tqueue)) != THREAD_NULL) {
389 thread_lock(thread);
390
391 thread_setrun(thread, SCHED_TAILQ);
392
393 thread_unlock(thread);
394 }
395 }
396
397 static boolean_t
398 sched_grrr_processor_queue_remove(
399 processor_t processor,
400 thread_t thread)
401 {
402 processor_set_t pset = processor->processor_set;
403
404 pset_lock(pset);
405
406 if (processor == thread->runq) {
407 /*
408 * Thread is on a run queue and we have a lock on
409 * that run queue.
410 */
411 grrr_run_queue_t rq = &processor->grrr_runq;
412
413 grrr_remove(rq, thread);
414 } else {
415 /*
416 * The thread left the run queue before we could
417 * lock the run queue.
418 */
419 assert(thread->runq == PROCESSOR_NULL);
420 processor = PROCESSOR_NULL;
421 }
422
423 pset_unlock(pset);
424
425 return processor != PROCESSOR_NULL;
426 }
427
428 static boolean_t
429 sched_grrr_processor_queue_empty(processor_t processor __unused)
430 {
431 boolean_t result;
432
433 result = (processor->grrr_runq.count == 0);
434
435 return result;
436 }
437
438 static boolean_t
439 sched_grrr_processor_queue_has_priority(processor_t processor,
440 int priority,
441 boolean_t gte __unused)
442 {
443 grrr_run_queue_t rq = &processor->grrr_runq;
444 unsigned int i;
445
446 i = grrr_group_mapping[grrr_priority_mapping[priority]];
447 for (; i < NUM_GRRR_GROUPS; i++) {
448 if (rq->groups[i].count > 0) {
449 return TRUE;
450 }
451 }
452
453 return FALSE;
454 }
455
456 /* Implement sched_preempt_pri in code */
457 static boolean_t
458 sched_grrr_priority_is_urgent(int priority)
459 {
460 if (priority <= BASEPRI_FOREGROUND) {
461 return FALSE;
462 }
463
464 if (priority < MINPRI_KERNEL) {
465 return TRUE;
466 }
467
468 if (priority >= BASEPRI_PREEMPT) {
469 return TRUE;
470 }
471
472 return FALSE;
473 }
474
475 static ast_t
476 sched_grrr_processor_csw_check(processor_t processor)
477 {
478 int count;
479
480 count = sched_grrr_processor_runq_count(processor);
481
482 if (count > 0) {
483 return AST_PREEMPT;
484 }
485
486 return AST_NONE;
487 }
488
489 static uint32_t
490 sched_grrr_initial_quantum_size(thread_t thread __unused)
491 {
492 return grrr_quantum;
493 }
494
495 static sched_mode_t
496 sched_grrr_initial_thread_sched_mode(task_t parent_task)
497 {
498 if (parent_task == kernel_task) {
499 return TH_MODE_FIXED;
500 } else {
501 return TH_MODE_TIMESHARE;
502 }
503 }
504
505 static boolean_t
506 sched_grrr_can_update_priority(thread_t thread __unused)
507 {
508 return FALSE;
509 }
510
511 static void
512 sched_grrr_update_priority(thread_t thread __unused)
513 {
514 return;
515 }
516
517 static void
518 sched_grrr_lightweight_update_priority(thread_t thread __unused)
519 {
520 return;
521 }
522
523 static int
524 sched_grrr_processor_runq_count(processor_t processor)
525 {
526 return processor->grrr_runq.count;
527 }
528
529 static uint64_t
530 sched_grrr_processor_runq_stats_count_sum(processor_t processor)
531 {
532 return processor->grrr_runq.runq_stats.count_sum;
533 }
534
535 static int
536 sched_grrr_processor_bound_count(__unused processor_t processor)
537 {
538 return 0;
539 }
540
541 static void
542 sched_grrr_thread_update_scan(__unused sched_update_scan_context_t scan_context)
543 {
544 return;
545 }
546
547 #endif /* defined(CONFIG_SCHED_GRRR) */
548
549 #if defined(CONFIG_SCHED_GRRR_CORE)
550
551 static void
552 grrr_priority_mapping_init(void)
553 {
554 unsigned int i;
555
556 /* Map 0->0 up to 10->20 */
557 for (i = 0; i <= 10; i++) {
558 grrr_priority_mapping[i] = 2 * i;
559 }
560
561 /* Map user priorities 11->33 up to 51 -> 153 */
562 for (i = 11; i <= 51; i++) {
563 grrr_priority_mapping[i] = 3 * i;
564 }
565
566 /* Map high priorities 52->180 up to 127->255 */
567 for (i = 52; i <= 127; i++) {
568 grrr_priority_mapping[i] = 128 + i;
569 }
570
571 for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) {
572 #if 0
573 unsigned j, k;
574 /* Calculate log(i); */
575 for (j = 0, k = 1; k <= i; j++, k *= 2) {
576 ;
577 }
578 #endif
579
580 /* Groups of 4 */
581 grrr_group_mapping[i] = i >> 2;
582 }
583 }
584
585 static thread_t
586 grrr_intragroup_schedule(grrr_group_t group)
587 {
588 thread_t thread;
589
590 if (group->count == 0) {
591 return THREAD_NULL;
592 }
593
594 thread = group->current_client;
595 if (thread == THREAD_NULL) {
596 thread = (thread_t)(void *)queue_first(&group->clients);
597 }
598
599 if (1 /* deficit */) {
600 group->current_client = (thread_t)(void *)queue_next((queue_entry_t)thread);
601 if (queue_end(&group->clients, (queue_entry_t)group->current_client)) {
602 group->current_client = (thread_t)(void *)queue_first(&group->clients);
603 }
604
605 thread = group->current_client;
606 }
607
608 return thread;
609 }
610
611 static thread_t
612 grrr_intergroup_schedule(grrr_run_queue_t rq)
613 {
614 thread_t thread;
615 grrr_group_t group;
616
617 if (rq->count == 0) {
618 return THREAD_NULL;
619 }
620
621 group = rq->current_group;
622
623 if (group == GRRR_GROUP_NULL) {
624 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
625 }
626
627 thread = grrr_intragroup_schedule(group);
628
629 if ((group->work >= (UINT32_MAX - 256)) || (rq->last_rescale_tick != grrr_rescale_tick)) {
630 grrr_rescale_work(rq);
631 }
632 group->work++;
633
634 if (queue_end(&rq->sorted_group_list, queue_next((queue_entry_t)group))) {
635 /* last group, go back to beginning */
636 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
637 } else {
638 grrr_group_t nextgroup = (grrr_group_t)queue_next((queue_entry_t)group);
639 uint64_t orderleft, orderright;
640
641 /*
642 * The well-ordering condition for intergroup selection is:
643 *
644 * (group->work+1) / (nextgroup->work+1) > (group->weight) / (nextgroup->weight)
645 *
646 * Multiply both sides by their denominators to avoid division
647 *
648 */
649 orderleft = (group->work + 1) * ((uint64_t)nextgroup->weight);
650 orderright = (nextgroup->work + 1) * ((uint64_t)group->weight);
651 if (orderleft > orderright) {
652 group = nextgroup;
653 } else {
654 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
655 }
656 }
657
658 rq->current_group = group;
659
660 return thread;
661 }
662
663 static void
664 grrr_runqueue_init(grrr_run_queue_t runq)
665 {
666 grrr_group_index_t index;
667
668 runq->count = 0;
669
670 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
671 unsigned int prisearch;
672
673 for (prisearch = 0;
674 prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES;
675 prisearch++) {
676 if (grrr_group_mapping[prisearch] == index) {
677 runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch;
678 break;
679 }
680 }
681
682 runq->groups[index].index = index;
683
684 queue_init(&runq->groups[index].clients);
685 runq->groups[index].count = 0;
686 runq->groups[index].weight = 0;
687 runq->groups[index].work = 0;
688 runq->groups[index].current_client = THREAD_NULL;
689 }
690
691 queue_init(&runq->sorted_group_list);
692 runq->weight = 0;
693 runq->current_group = GRRR_GROUP_NULL;
694 }
695
696 static void
697 grrr_rescale_work(grrr_run_queue_t rq)
698 {
699 grrr_group_index_t index;
700
701 /* avoid overflow by scaling by 1/8th */
702 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
703 rq->groups[index].work >>= 3;
704 }
705
706 rq->last_rescale_tick = grrr_rescale_tick;
707 }
708
709 static boolean_t
710 grrr_enqueue(
711 grrr_run_queue_t rq,
712 thread_t thread)
713 {
714 grrr_proportional_priority_t gpriority;
715 grrr_group_index_t gindex;
716 grrr_group_t group;
717
718 gpriority = grrr_priority_mapping[thread->sched_pri];
719 gindex = grrr_group_mapping[gpriority];
720 group = &rq->groups[gindex];
721
722 #if 0
723 thread->grrr_deficit = 0;
724 #endif
725
726 if (group->count == 0) {
727 /* Empty group, this is the first client */
728 enqueue_tail(&group->clients, (queue_entry_t)thread);
729 group->count = 1;
730 group->weight = gpriority;
731 group->current_client = thread;
732 } else {
733 /* Insert before the current client */
734 if (group->current_client == THREAD_NULL ||
735 queue_first(&group->clients) == (queue_entry_t)group->current_client) {
736 enqueue_head(&group->clients, (queue_entry_t)thread);
737 } else {
738 insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client));
739 }
740 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
741 group->count++;
742 group->weight += gpriority;
743
744 /* Since there was already a client, this is on the per-processor sorted list already */
745 remqueue((queue_entry_t)group);
746 }
747
748 grrr_sorted_list_insert_group(rq, group);
749
750 rq->count++;
751 rq->weight += gpriority;
752
753 return FALSE;
754 }
755
756 static thread_t
757 grrr_select(grrr_run_queue_t rq)
758 {
759 thread_t thread;
760
761 thread = grrr_intergroup_schedule(rq);
762 if (thread != THREAD_NULL) {
763 grrr_proportional_priority_t gpriority;
764 grrr_group_index_t gindex;
765 grrr_group_t group;
766
767 gpriority = grrr_priority_mapping[thread->sched_pri];
768 gindex = grrr_group_mapping[gpriority];
769 group = &rq->groups[gindex];
770
771 remqueue((queue_entry_t)thread);
772 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
773 group->count--;
774 group->weight -= gpriority;
775 if (group->current_client == thread) {
776 group->current_client = THREAD_NULL;
777 }
778
779 remqueue((queue_entry_t)group);
780 if (group->count == 0) {
781 if (rq->current_group == group) {
782 rq->current_group = GRRR_GROUP_NULL;
783 }
784 } else {
785 /* Need to re-insert in sorted location */
786 grrr_sorted_list_insert_group(rq, group);
787 }
788
789 rq->count--;
790 rq->weight -= gpriority;
791
792 thread->runq = PROCESSOR_NULL;
793 }
794
795 return thread;
796 }
797
798 static void
799 grrr_remove(
800 grrr_run_queue_t rq,
801 thread_t thread)
802 {
803 grrr_proportional_priority_t gpriority;
804 grrr_group_index_t gindex;
805 grrr_group_t group;
806
807 gpriority = grrr_priority_mapping[thread->sched_pri];
808 gindex = grrr_group_mapping[gpriority];
809 group = &rq->groups[gindex];
810
811 remqueue((queue_entry_t)thread);
812 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
813 group->count--;
814 group->weight -= gpriority;
815 if (group->current_client == thread) {
816 group->current_client = THREAD_NULL;
817 }
818
819 remqueue((queue_entry_t)group);
820 if (group->count == 0) {
821 if (rq->current_group == group) {
822 rq->current_group = GRRR_GROUP_NULL;
823 }
824 } else {
825 /* Need to re-insert in sorted location */
826 grrr_sorted_list_insert_group(rq, group);
827 }
828
829 rq->count--;
830 rq->weight -= gpriority;
831
832 thread->runq = PROCESSOR_NULL;
833 }
834
835 static void
836 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
837 grrr_group_t group)
838 {
839 /* Simple insertion sort */
840 if (queue_empty(&rq->sorted_group_list)) {
841 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
842 } else {
843 grrr_group_t search_group;
844
845 /* Start searching from the head (heaviest weight) for the first
846 * element less than us, so we can insert before it
847 */
848 search_group = (grrr_group_t)queue_first(&rq->sorted_group_list);
849 while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
850 if (search_group->weight < group->weight) {
851 /* we should be before this */
852 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
853 break;
854 }
855 if (search_group->weight == group->weight) {
856 /* Use group index as a tie breaker */
857 if (search_group->index < group->index) {
858 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
859 break;
860 }
861 }
862
863 /* otherwise, our weight is too small, keep going */
864 search_group = (grrr_group_t)queue_next((queue_entry_t)search_group);
865 }
866
867 if (queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
868 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
869 } else {
870 insque((queue_entry_t)group, (queue_entry_t)search_group);
871 }
872 }
873 }
874
875 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */