]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_grrr.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / kern / sched_grrr.c
CommitLineData
6d2010ae 1/*
39037602 2 * Copyright (c) 2009-2016 Apple Inc. All rights reserved.
6d2010ae
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39037602 5 *
6d2010ae
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39037602 14 *
6d2010ae
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39037602 17 *
6d2010ae
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39037602 25 *
6d2010ae
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/machine.h>
31#include <mach/policy.h>
32#include <mach/sync_policy.h>
33#include <mach/thread_act.h>
34
35#include <machine/machine_routines.h>
36#include <machine/sched_param.h>
37#include <machine/machine_cpu.h>
38
39#include <kern/kern_types.h>
40#include <kern/clock.h>
41#include <kern/counters.h>
42#include <kern/cpu_number.h>
43#include <kern/cpu_data.h>
44#include <kern/debug.h>
6d2010ae
A
45#include <kern/macro_help.h>
46#include <kern/machine.h>
47#include <kern/misc_protos.h>
48#include <kern/processor.h>
49#include <kern/queue.h>
50#include <kern/sched.h>
51#include <kern/sched_prim.h>
52#include <kern/syscall_subr.h>
53#include <kern/task.h>
54#include <kern/thread.h>
6d2010ae
A
55
56#include <vm/pmap.h>
57#include <vm/vm_kern.h>
58#include <vm/vm_map.h>
59
60#include <mach/sdt.h>
61
62#include <sys/kdebug.h>
63
64#if defined(CONFIG_SCHED_GRRR_CORE)
65
66static void
67grrr_priority_mapping_init(void);
68
69static boolean_t
70grrr_enqueue(
71 grrr_run_queue_t rq,
72 thread_t thread);
39037602 73
6d2010ae
A
74static thread_t
75grrr_select(
76 grrr_run_queue_t rq);
77
78static void
79grrr_remove(
80 grrr_run_queue_t rq,
81 thread_t thread);
82
83
84static void
85grrr_sorted_list_insert_group(grrr_run_queue_t rq,
86 grrr_group_t group);
87
88static void
89grrr_rescale_work(grrr_run_queue_t rq);
90
91static void
92grrr_runqueue_init(grrr_run_queue_t runq);
93
94/* Map Mach priorities to ones suitable for proportional sharing */
95static grrr_proportional_priority_t grrr_priority_mapping[NRQS];
96
97/* Map each proportional priority to its group */
98static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES];
99
100uint32_t grrr_rescale_tick;
101
102#endif /* defined(CONFIG_SCHED_GRRR_CORE) */
103
104#if defined(CONFIG_SCHED_GRRR)
105
106static void
107sched_grrr_init(void);
108
109static void
110sched_grrr_timebase_init(void);
111
112static void
113sched_grrr_processor_init(processor_t processor);
114
115static void
116sched_grrr_pset_init(processor_set_t pset);
117
118static void
119sched_grrr_maintenance_continuation(void);
120
121static thread_t
122sched_grrr_choose_thread(processor_t processor,
fe8ab488
A
123 int priority,
124 ast_t reason);
6d2010ae
A
125
126static thread_t
127sched_grrr_steal_thread(processor_set_t pset);
128
3e170ce0
A
129static int
130sched_grrr_compute_priority(thread_t thread);
6d2010ae
A
131
132static processor_t
133sched_grrr_choose_processor( processor_set_t pset,
134 processor_t processor,
135 thread_t thread);
136
137static boolean_t
138sched_grrr_processor_enqueue(
139 processor_t processor,
140 thread_t thread,
141 integer_t options);
142
143static void
144sched_grrr_processor_queue_shutdown(
145 processor_t processor);
146
147static boolean_t
148sched_grrr_processor_queue_remove(
149 processor_t processor,
150 thread_t thread);
151
152static boolean_t
153sched_grrr_processor_queue_empty(processor_t processor);
154
155static boolean_t
156sched_grrr_processor_queue_has_priority(processor_t processor,
157 int priority,
158 boolean_t gte);
159
160static boolean_t
161sched_grrr_priority_is_urgent(int priority);
162
163static ast_t
164sched_grrr_processor_csw_check(processor_t processor);
165
166static uint32_t
167sched_grrr_initial_quantum_size(thread_t thread);
168
169static sched_mode_t
170sched_grrr_initial_thread_sched_mode(task_t parent_task);
171
6d2010ae
A
172static boolean_t
173sched_grrr_can_update_priority(thread_t thread);
174
175static void
176sched_grrr_update_priority(thread_t thread);
177
178static void
179sched_grrr_lightweight_update_priority(thread_t thread);
180
6d2010ae
A
181static int
182sched_grrr_processor_runq_count(processor_t processor);
183
184static uint64_t
185sched_grrr_processor_runq_stats_count_sum(processor_t processor);
186
fe8ab488
A
187static int
188sched_grrr_processor_bound_count(processor_t processor);
189
190static void
3e170ce0 191sched_grrr_thread_update_scan(sched_update_scan_context_t scan_context);
fe8ab488 192
6d2010ae 193const struct sched_dispatch_table sched_grrr_dispatch = {
3e170ce0 194 .sched_name = "grrr",
fe8ab488
A
195 .init = sched_grrr_init,
196 .timebase_init = sched_grrr_timebase_init,
197 .processor_init = sched_grrr_processor_init,
198 .pset_init = sched_grrr_pset_init,
199 .maintenance_continuation = sched_grrr_maintenance_continuation,
200 .choose_thread = sched_grrr_choose_thread,
3e170ce0 201 .steal_thread_enabled = FALSE,
fe8ab488 202 .steal_thread = sched_grrr_steal_thread,
3e170ce0 203 .compute_timeshare_priority = sched_grrr_compute_priority,
fe8ab488
A
204 .choose_processor = sched_grrr_choose_processor,
205 .processor_enqueue = sched_grrr_processor_enqueue,
206 .processor_queue_shutdown = sched_grrr_processor_queue_shutdown,
207 .processor_queue_remove = sched_grrr_processor_queue_remove,
208 .processor_queue_empty = sched_grrr_processor_queue_empty,
209 .priority_is_urgent = sched_grrr_priority_is_urgent,
210 .processor_csw_check = sched_grrr_processor_csw_check,
211 .processor_queue_has_priority = sched_grrr_processor_queue_has_priority,
212 .initial_quantum_size = sched_grrr_initial_quantum_size,
213 .initial_thread_sched_mode = sched_grrr_initial_thread_sched_mode,
214 .can_update_priority = sched_grrr_can_update_priority,
215 .update_priority = sched_grrr_update_priority,
216 .lightweight_update_priority = sched_grrr_lightweight_update_priority,
3e170ce0 217 .quantum_expire = sched_default_quantum_expire,
fe8ab488
A
218 .processor_runq_count = sched_grrr_processor_runq_count,
219 .processor_runq_stats_count_sum = sched_grrr_processor_runq_stats_count_sum,
fe8ab488
A
220 .processor_bound_count = sched_grrr_processor_bound_count,
221 .thread_update_scan = sched_grrr_thread_update_scan,
222 .direct_dispatch_to_idle_processors = TRUE,
3e170ce0
A
223 .multiple_psets_enabled = TRUE,
224 .sched_groups_enabled = FALSE,
5ba3f43e
A
225 .avoid_processor_enabled = FALSE,
226 .thread_avoid_processor = NULL,
227 .processor_balance = sched_SMT_balance,
228
229 .rt_runq = sched_rtglobal_runq,
230 .rt_init = sched_rtglobal_init,
231 .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
232 .rt_runq_scan = sched_rtglobal_runq_scan,
233 .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
234
235 .qos_max_parallelism = sched_qos_max_parallelism,
236 .check_spill = sched_check_spill,
237 .ipi_policy = sched_ipi_policy,
238 .thread_should_yield = sched_thread_should_yield,
6d2010ae
A
239};
240
6d2010ae
A
241extern int max_unsafe_quanta;
242
243static uint32_t grrr_quantum_us;
244static uint32_t grrr_quantum;
245
246static uint64_t sched_grrr_tick_deadline;
247
248static void
249sched_grrr_init(void)
250{
251 if (default_preemption_rate < 1)
252 default_preemption_rate = 100;
253 grrr_quantum_us = (1000 * 1000) / default_preemption_rate;
39037602 254
6d2010ae
A
255 printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us);
256
257 grrr_priority_mapping_init();
258}
259
260static void
261sched_grrr_timebase_init(void)
262{
263 uint64_t abstime;
264
265 /* standard timeslicing quantum */
266 clock_interval_to_absolutetime_interval(
267 grrr_quantum_us, NSEC_PER_USEC, &abstime);
268 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
269 grrr_quantum = (uint32_t)abstime;
39037602 270
6d2010ae
A
271 thread_depress_time = 1 * grrr_quantum;
272 default_timeshare_computation = grrr_quantum / 2;
273 default_timeshare_constraint = grrr_quantum;
39037602 274
6d2010ae
A
275 max_unsafe_computation = max_unsafe_quanta * grrr_quantum;
276 sched_safe_duration = 2 * max_unsafe_quanta * grrr_quantum;
277
278}
279
280static void
281sched_grrr_processor_init(processor_t processor)
282{
283 grrr_runqueue_init(&processor->grrr_runq);
284}
285
286static void
287sched_grrr_pset_init(processor_set_t pset __unused)
288{
289}
290
291static void
292sched_grrr_maintenance_continuation(void)
293{
294 uint64_t abstime = mach_absolute_time();
39037602 295
6d2010ae 296 grrr_rescale_tick++;
39037602 297
6d2010ae
A
298 /*
299 * Compute various averages.
300 */
39236c6e 301 compute_averages(1);
39037602 302
6d2010ae
A
303 if (sched_grrr_tick_deadline == 0)
304 sched_grrr_tick_deadline = abstime;
39037602 305
6d2010ae
A
306 clock_deadline_for_periodic_event(10*sched_one_second_interval, abstime,
307 &sched_grrr_tick_deadline);
39037602 308
6d2010ae
A
309 assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline);
310 thread_block((thread_continue_t)sched_grrr_maintenance_continuation);
311 /*NOTREACHED*/
312}
313
6d2010ae
A
314static thread_t
315sched_grrr_choose_thread(processor_t processor,
fe8ab488
A
316 int priority __unused,
317 ast_t reason __unused)
6d2010ae
A
318{
319 grrr_run_queue_t rq = &processor->grrr_runq;
39037602
A
320
321 return grrr_select(rq);
6d2010ae
A
322}
323
324static thread_t
325sched_grrr_steal_thread(processor_set_t pset)
326{
327 pset_unlock(pset);
39037602
A
328
329 return THREAD_NULL;
6d2010ae
A
330}
331
3e170ce0
A
332static int
333sched_grrr_compute_priority(thread_t thread)
6d2010ae 334{
3e170ce0 335 return thread->base_pri;
6d2010ae
A
336}
337
338static processor_t
339sched_grrr_choose_processor( processor_set_t pset,
340 processor_t processor,
341 thread_t thread)
342{
343 return choose_processor(pset, processor, thread);
344}
345
346static boolean_t
347sched_grrr_processor_enqueue(
348 processor_t processor,
349 thread_t thread,
350 integer_t options __unused)
351{
352 grrr_run_queue_t rq = &processor->grrr_runq;
353 boolean_t result;
39037602 354
6d2010ae 355 result = grrr_enqueue(rq, thread);
39037602 356
6d2010ae 357 thread->runq = processor;
39037602 358
6d2010ae
A
359 return result;
360}
361
362static void
363sched_grrr_processor_queue_shutdown(
364 processor_t processor)
365{
366 processor_set_t pset = processor->processor_set;
367 thread_t thread;
368 queue_head_t tqueue, bqueue;
39037602 369
6d2010ae
A
370 queue_init(&tqueue);
371 queue_init(&bqueue);
39037602 372
fe8ab488 373 while ((thread = sched_grrr_choose_thread(processor, IDLEPRI, AST_NONE)) != THREAD_NULL) {
6d2010ae
A
374 if (thread->bound_processor == PROCESSOR_NULL) {
375 enqueue_tail(&tqueue, (queue_entry_t)thread);
376 } else {
39037602 377 enqueue_tail(&bqueue, (queue_entry_t)thread);
6d2010ae
A
378 }
379 }
39037602 380
39236c6e 381 while ((thread = (thread_t)(void *)dequeue_head(&bqueue)) != THREAD_NULL) {
6d2010ae 382 sched_grrr_processor_enqueue(processor, thread, SCHED_TAILQ);
39037602
A
383 }
384
6d2010ae 385 pset_unlock(pset);
39037602 386
39236c6e 387 while ((thread = (thread_t)(void *)dequeue_head(&tqueue)) != THREAD_NULL) {
6d2010ae 388 thread_lock(thread);
39037602 389
6d2010ae 390 thread_setrun(thread, SCHED_TAILQ);
39037602 391
6d2010ae
A
392 thread_unlock(thread);
393 }
394}
395
396static boolean_t
397sched_grrr_processor_queue_remove(
398 processor_t processor,
399 thread_t thread)
400{
39037602
A
401 processor_set_t pset = processor->processor_set;
402
403 pset_lock(pset);
404
6d2010ae
A
405 if (processor == thread->runq) {
406 /*
407 * Thread is on a run queue and we have a lock on
408 * that run queue.
409 */
410 grrr_run_queue_t rq = &processor->grrr_runq;
411
412 grrr_remove(rq, thread);
413 } else {
414 /*
415 * The thread left the run queue before we could
39037602 416 * lock the run queue.
6d2010ae
A
417 */
418 assert(thread->runq == PROCESSOR_NULL);
39037602 419 processor = PROCESSOR_NULL;
6d2010ae 420 }
39037602
A
421
422 pset_unlock(pset);
423
424 return (processor != PROCESSOR_NULL);
6d2010ae 425}
39037602 426
6d2010ae
A
427static boolean_t
428sched_grrr_processor_queue_empty(processor_t processor __unused)
429{
430 boolean_t result;
39037602 431
6d2010ae 432 result = (processor->grrr_runq.count == 0);
39037602 433
6d2010ae
A
434 return result;
435}
436
437static boolean_t
438sched_grrr_processor_queue_has_priority(processor_t processor,
439 int priority,
440 boolean_t gte __unused)
441{
442 grrr_run_queue_t rq = &processor->grrr_runq;
443 unsigned int i;
444
445 i = grrr_group_mapping[grrr_priority_mapping[priority]];
446 for ( ; i < NUM_GRRR_GROUPS; i++) {
447 if (rq->groups[i].count > 0)
39037602 448 return TRUE;
6d2010ae 449 }
39037602
A
450
451 return FALSE;
6d2010ae
A
452}
453
454/* Implement sched_preempt_pri in code */
455static boolean_t
456sched_grrr_priority_is_urgent(int priority)
457{
458 if (priority <= BASEPRI_FOREGROUND)
459 return FALSE;
39037602 460
6d2010ae
A
461 if (priority < MINPRI_KERNEL)
462 return TRUE;
463
464 if (priority >= BASEPRI_PREEMPT)
465 return TRUE;
39037602 466
6d2010ae
A
467 return FALSE;
468}
469
470static ast_t
471sched_grrr_processor_csw_check(processor_t processor)
472{
473 int count;
39037602 474
6d2010ae 475 count = sched_grrr_processor_runq_count(processor);
39037602
A
476
477 if (count > 0)
6d2010ae 478 return AST_PREEMPT;
39037602 479
6d2010ae
A
480 return AST_NONE;
481}
482
483static uint32_t
484sched_grrr_initial_quantum_size(thread_t thread __unused)
485{
486 return grrr_quantum;
487}
488
489static sched_mode_t
490sched_grrr_initial_thread_sched_mode(task_t parent_task)
491{
492 if (parent_task == kernel_task)
493 return TH_MODE_FIXED;
494 else
39037602 495 return TH_MODE_TIMESHARE;
6d2010ae
A
496}
497
6d2010ae
A
498static boolean_t
499sched_grrr_can_update_priority(thread_t thread __unused)
500{
501 return FALSE;
502}
503
504static void
505sched_grrr_update_priority(thread_t thread __unused)
506{
39037602 507 return;
6d2010ae
A
508}
509
510static void
511sched_grrr_lightweight_update_priority(thread_t thread __unused)
512{
513 return;
514}
515
6d2010ae
A
516static int
517sched_grrr_processor_runq_count(processor_t processor)
518{
519 return processor->grrr_runq.count;
520}
521
522static uint64_t
523sched_grrr_processor_runq_stats_count_sum(processor_t processor)
524{
525 return processor->grrr_runq.runq_stats.count_sum;
526}
527
fe8ab488
A
528static int
529sched_grrr_processor_bound_count(__unused processor_t processor)
530{
531 return 0;
532}
533
534static void
3e170ce0 535sched_grrr_thread_update_scan(__unused sched_update_scan_context_t scan_context)
fe8ab488 536{
39037602 537 return;
fe8ab488
A
538}
539
6d2010ae
A
540#endif /* defined(CONFIG_SCHED_GRRR) */
541
542#if defined(CONFIG_SCHED_GRRR_CORE)
543
544static void
545grrr_priority_mapping_init(void)
546{
547 unsigned int i;
39037602 548
6d2010ae
A
549 /* Map 0->0 up to 10->20 */
550 for (i=0; i <= 10; i++) {
551 grrr_priority_mapping[i] = 2*i;
552 }
39037602 553
6d2010ae
A
554 /* Map user priorities 11->33 up to 51 -> 153 */
555 for (i=11; i <= 51; i++) {
556 grrr_priority_mapping[i] = 3*i;
557 }
39037602 558
6d2010ae
A
559 /* Map high priorities 52->180 up to 127->255 */
560 for (i=52; i <= 127; i++) {
561 grrr_priority_mapping[i] = 128 + i;
562 }
39037602 563
6d2010ae 564 for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) {
39037602
A
565
566#if 0
6d2010ae
A
567 unsigned j, k;
568 /* Calculate log(i); */
569 for (j=0, k=1; k <= i; j++, k *= 2);
570#endif
39037602 571
6d2010ae
A
572 /* Groups of 4 */
573 grrr_group_mapping[i] = i >> 2;
574 }
6d2010ae
A
575}
576
577static thread_t
578grrr_intragroup_schedule(grrr_group_t group)
579{
580 thread_t thread;
581
582 if (group->count == 0) {
583 return THREAD_NULL;
584 }
39037602 585
6d2010ae
A
586 thread = group->current_client;
587 if (thread == THREAD_NULL) {
39236c6e 588 thread = (thread_t)(void *)queue_first(&group->clients);
6d2010ae 589 }
39037602 590
6d2010ae 591 if (1 /* deficit */) {
39236c6e 592 group->current_client = (thread_t)(void *)queue_next((queue_entry_t)thread);
6d2010ae 593 if (queue_end(&group->clients, (queue_entry_t)group->current_client)) {
39236c6e 594 group->current_client = (thread_t)(void *)queue_first(&group->clients);
6d2010ae 595 }
39037602 596
6d2010ae
A
597 thread = group->current_client;
598 }
39037602 599
6d2010ae
A
600 return thread;
601}
602
603static thread_t
604grrr_intergroup_schedule(grrr_run_queue_t rq)
605{
606 thread_t thread;
607 grrr_group_t group;
39037602 608
6d2010ae
A
609 if (rq->count == 0) {
610 return THREAD_NULL;
611 }
39037602 612
6d2010ae 613 group = rq->current_group;
39037602 614
6d2010ae
A
615 if (group == GRRR_GROUP_NULL) {
616 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
617 }
39037602 618
6d2010ae 619 thread = grrr_intragroup_schedule(group);
39037602 620
6d2010ae
A
621 if ((group->work >= (UINT32_MAX-256)) || (rq->last_rescale_tick != grrr_rescale_tick)) {
622 grrr_rescale_work(rq);
623 }
624 group->work++;
39037602 625
6d2010ae
A
626 if (queue_end(&rq->sorted_group_list, queue_next((queue_entry_t)group))) {
627 /* last group, go back to beginning */
628 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
629 } else {
630 grrr_group_t nextgroup = (grrr_group_t)queue_next((queue_entry_t)group);
631 uint64_t orderleft, orderright;
39037602 632
6d2010ae
A
633 /*
634 * The well-ordering condition for intergroup selection is:
635 *
636 * (group->work+1) / (nextgroup->work+1) > (group->weight) / (nextgroup->weight)
637 *
638 * Multiply both sides by their denominators to avoid division
639 *
640 */
641 orderleft = (group->work + 1) * ((uint64_t)nextgroup->weight);
642 orderright = (nextgroup->work + 1) * ((uint64_t)group->weight);
643 if (orderleft > orderright) {
644 group = nextgroup;
645 } else {
646 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
647 }
648 }
39037602 649
6d2010ae 650 rq->current_group = group;
39037602 651
6d2010ae
A
652 return thread;
653}
654
655static void
656grrr_runqueue_init(grrr_run_queue_t runq)
657{
658 grrr_group_index_t index;
39037602 659
6d2010ae 660 runq->count = 0;
39037602 661
6d2010ae
A
662 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
663 unsigned int prisearch;
664
665 for (prisearch = 0;
666 prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES;
667 prisearch++) {
668 if (grrr_group_mapping[prisearch] == index) {
669 runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch;
670 break;
671 }
672 }
39037602 673
6d2010ae
A
674 runq->groups[index].index = index;
675
676 queue_init(&runq->groups[index].clients);
677 runq->groups[index].count = 0;
678 runq->groups[index].weight = 0;
679 runq->groups[index].work = 0;
680 runq->groups[index].current_client = THREAD_NULL;
681 }
39037602 682
6d2010ae
A
683 queue_init(&runq->sorted_group_list);
684 runq->weight = 0;
685 runq->current_group = GRRR_GROUP_NULL;
686}
687
688static void
689grrr_rescale_work(grrr_run_queue_t rq)
690{
691 grrr_group_index_t index;
692
693 /* avoid overflow by scaling by 1/8th */
694 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
695 rq->groups[index].work >>= 3;
696 }
697
698 rq->last_rescale_tick = grrr_rescale_tick;
699}
700
701static boolean_t
702grrr_enqueue(
703 grrr_run_queue_t rq,
704 thread_t thread)
39037602 705{
6d2010ae
A
706 grrr_proportional_priority_t gpriority;
707 grrr_group_index_t gindex;
708 grrr_group_t group;
709
710 gpriority = grrr_priority_mapping[thread->sched_pri];
711 gindex = grrr_group_mapping[gpriority];
712 group = &rq->groups[gindex];
713
714#if 0
715 thread->grrr_deficit = 0;
716#endif
39037602 717
6d2010ae
A
718 if (group->count == 0) {
719 /* Empty group, this is the first client */
720 enqueue_tail(&group->clients, (queue_entry_t)thread);
721 group->count = 1;
722 group->weight = gpriority;
723 group->current_client = thread;
724 } else {
725 /* Insert before the current client */
726 if (group->current_client == THREAD_NULL ||
727 queue_first(&group->clients) == (queue_entry_t)group->current_client) {
728 enqueue_head(&group->clients, (queue_entry_t)thread);
729 } else {
730 insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client));
731 }
732 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
733 group->count++;
734 group->weight += gpriority;
735
736 /* Since there was already a client, this is on the per-processor sorted list already */
737 remqueue((queue_entry_t)group);
738 }
39037602 739
6d2010ae
A
740 grrr_sorted_list_insert_group(rq, group);
741
742 rq->count++;
743 rq->weight += gpriority;
39037602
A
744
745 return FALSE;
6d2010ae
A
746}
747
748static thread_t
749grrr_select(grrr_run_queue_t rq)
750{
751 thread_t thread;
752
753 thread = grrr_intergroup_schedule(rq);
754 if (thread != THREAD_NULL) {
755 grrr_proportional_priority_t gpriority;
756 grrr_group_index_t gindex;
757 grrr_group_t group;
39037602 758
6d2010ae
A
759 gpriority = grrr_priority_mapping[thread->sched_pri];
760 gindex = grrr_group_mapping[gpriority];
761 group = &rq->groups[gindex];
39037602 762
6d2010ae
A
763 remqueue((queue_entry_t)thread);
764 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
765 group->count--;
766 group->weight -= gpriority;
767 if (group->current_client == thread) {
768 group->current_client = THREAD_NULL;
769 }
39037602 770
6d2010ae
A
771 remqueue((queue_entry_t)group);
772 if (group->count == 0) {
773 if (rq->current_group == group) {
774 rq->current_group = GRRR_GROUP_NULL;
775 }
776 } else {
777 /* Need to re-insert in sorted location */
778 grrr_sorted_list_insert_group(rq, group);
779 }
39037602 780
6d2010ae
A
781 rq->count--;
782 rq->weight -= gpriority;
39037602 783
6d2010ae 784 thread->runq = PROCESSOR_NULL;
39037602
A
785 }
786
787 return thread;
6d2010ae
A
788}
789
790static void
791grrr_remove(
792 grrr_run_queue_t rq,
793 thread_t thread)
39037602 794{
6d2010ae
A
795 grrr_proportional_priority_t gpriority;
796 grrr_group_index_t gindex;
797 grrr_group_t group;
39037602 798
6d2010ae
A
799 gpriority = grrr_priority_mapping[thread->sched_pri];
800 gindex = grrr_group_mapping[gpriority];
801 group = &rq->groups[gindex];
39037602 802
6d2010ae
A
803 remqueue((queue_entry_t)thread);
804 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
805 group->count--;
806 group->weight -= gpriority;
807 if (group->current_client == thread) {
808 group->current_client = THREAD_NULL;
809 }
39037602 810
6d2010ae
A
811 remqueue((queue_entry_t)group);
812 if (group->count == 0) {
813 if (rq->current_group == group) {
814 rq->current_group = GRRR_GROUP_NULL;
815 }
816 } else {
817 /* Need to re-insert in sorted location */
818 grrr_sorted_list_insert_group(rq, group);
819 }
39037602 820
6d2010ae
A
821 rq->count--;
822 rq->weight -= gpriority;
39037602 823
6d2010ae
A
824 thread->runq = PROCESSOR_NULL;
825}
826
827static void
828grrr_sorted_list_insert_group(grrr_run_queue_t rq,
829 grrr_group_t group)
830{
831 /* Simple insertion sort */
832 if (queue_empty(&rq->sorted_group_list)) {
833 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
834 } else {
835 grrr_group_t search_group;
39037602 836
6d2010ae
A
837 /* Start searching from the head (heaviest weight) for the first
838 * element less than us, so we can insert before it
839 */
840 search_group = (grrr_group_t)queue_first(&rq->sorted_group_list);
841 while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group) ) {
39037602 842
6d2010ae
A
843 if (search_group->weight < group->weight) {
844 /* we should be before this */
845 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
846 break;
847 } if (search_group->weight == group->weight) {
848 /* Use group index as a tie breaker */
849 if (search_group->index < group->index) {
850 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
851 break;
852 }
853 }
39037602 854
6d2010ae
A
855 /* otherwise, our weight is too small, keep going */
856 search_group = (grrr_group_t)queue_next((queue_entry_t)search_group);
857 }
39037602 858
6d2010ae
A
859 if (queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
860 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
861 } else {
862 insque((queue_entry_t)group, (queue_entry_t)search_group);
863 }
864 }
865}
866
867#endif /* defined(CONFIG_SCHED_GRRR_CORE) */