]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_grrr.c
d27b29e87b7e2c2e5c8f784a6d48964dbe3ec901
[apple/xnu.git] / osfmk / kern / sched_grrr.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
34
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
38
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/counters.h>
42 #include <kern/cpu_number.h>
43 #include <kern/cpu_data.h>
44 #include <kern/debug.h>
45 #include <kern/lock.h>
46 #include <kern/macro_help.h>
47 #include <kern/machine.h>
48 #include <kern/misc_protos.h>
49 #include <kern/processor.h>
50 #include <kern/queue.h>
51 #include <kern/sched.h>
52 #include <kern/sched_prim.h>
53 #include <kern/syscall_subr.h>
54 #include <kern/task.h>
55 #include <kern/thread.h>
56 #include <kern/wait_queue.h>
57
58 #include <vm/pmap.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_map.h>
61
62 #include <mach/sdt.h>
63
64 #include <sys/kdebug.h>
65
66 #if defined(CONFIG_SCHED_GRRR_CORE)
67
68 static void
69 grrr_priority_mapping_init(void);
70
71 static boolean_t
72 grrr_enqueue(
73 grrr_run_queue_t rq,
74 thread_t thread);
75
76 static thread_t
77 grrr_select(
78 grrr_run_queue_t rq);
79
80 static void
81 grrr_remove(
82 grrr_run_queue_t rq,
83 thread_t thread);
84
85
86 static void
87 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
88 grrr_group_t group);
89
90 static void
91 grrr_rescale_work(grrr_run_queue_t rq);
92
93 static void
94 grrr_runqueue_init(grrr_run_queue_t runq);
95
96 /* Map Mach priorities to ones suitable for proportional sharing */
97 static grrr_proportional_priority_t grrr_priority_mapping[NRQS];
98
99 /* Map each proportional priority to its group */
100 static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES];
101
102 uint32_t grrr_rescale_tick;
103
104 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
105
106 #if defined(CONFIG_SCHED_GRRR)
107
108 static void
109 sched_grrr_init(void);
110
111 static void
112 sched_grrr_timebase_init(void);
113
114 static void
115 sched_grrr_processor_init(processor_t processor);
116
117 static void
118 sched_grrr_pset_init(processor_set_t pset);
119
120 static void
121 sched_grrr_maintenance_continuation(void);
122
123 static thread_t
124 sched_grrr_choose_thread(processor_t processor,
125 int priority);
126
127 static thread_t
128 sched_grrr_steal_thread(processor_set_t pset);
129
130 static void
131 sched_grrr_compute_priority(thread_t thread,
132 boolean_t override_depress);
133
134 static processor_t
135 sched_grrr_choose_processor( processor_set_t pset,
136 processor_t processor,
137 thread_t thread);
138
139 static boolean_t
140 sched_grrr_processor_enqueue(
141 processor_t processor,
142 thread_t thread,
143 integer_t options);
144
145 static void
146 sched_grrr_processor_queue_shutdown(
147 processor_t processor);
148
149 static boolean_t
150 sched_grrr_processor_queue_remove(
151 processor_t processor,
152 thread_t thread);
153
154 static boolean_t
155 sched_grrr_processor_queue_empty(processor_t processor);
156
157 static boolean_t
158 sched_grrr_processor_queue_has_priority(processor_t processor,
159 int priority,
160 boolean_t gte);
161
162 static boolean_t
163 sched_grrr_priority_is_urgent(int priority);
164
165 static ast_t
166 sched_grrr_processor_csw_check(processor_t processor);
167
168 static uint32_t
169 sched_grrr_initial_quantum_size(thread_t thread);
170
171 static sched_mode_t
172 sched_grrr_initial_thread_sched_mode(task_t parent_task);
173
174 static boolean_t
175 sched_grrr_supports_timeshare_mode(void);
176
177 static boolean_t
178 sched_grrr_can_update_priority(thread_t thread);
179
180 static void
181 sched_grrr_update_priority(thread_t thread);
182
183 static void
184 sched_grrr_lightweight_update_priority(thread_t thread);
185
186 static void
187 sched_grrr_quantum_expire(thread_t thread);
188
189 static boolean_t
190 sched_grrr_should_current_thread_rechoose_processor(processor_t processor);
191
192 static int
193 sched_grrr_processor_runq_count(processor_t processor);
194
195 static uint64_t
196 sched_grrr_processor_runq_stats_count_sum(processor_t processor);
197
198 const struct sched_dispatch_table sched_grrr_dispatch = {
199 sched_grrr_init,
200 sched_grrr_timebase_init,
201 sched_grrr_processor_init,
202 sched_grrr_pset_init,
203 sched_grrr_maintenance_continuation,
204 sched_grrr_choose_thread,
205 sched_grrr_steal_thread,
206 sched_grrr_compute_priority,
207 sched_grrr_choose_processor,
208 sched_grrr_processor_enqueue,
209 sched_grrr_processor_queue_shutdown,
210 sched_grrr_processor_queue_remove,
211 sched_grrr_processor_queue_empty,
212 sched_grrr_priority_is_urgent,
213 sched_grrr_processor_csw_check,
214 sched_grrr_processor_queue_has_priority,
215 sched_grrr_initial_quantum_size,
216 sched_grrr_initial_thread_sched_mode,
217 sched_grrr_supports_timeshare_mode,
218 sched_grrr_can_update_priority,
219 sched_grrr_update_priority,
220 sched_grrr_lightweight_update_priority,
221 sched_grrr_quantum_expire,
222 sched_grrr_should_current_thread_rechoose_processor,
223 sched_grrr_processor_runq_count,
224 sched_grrr_processor_runq_stats_count_sum,
225 sched_grrr_fairshare_init,
226 sched_grrr_fairshare_runq_count,
227 sched_grrr_fairshare_runq_stats_count_sum,
228 sched_grrr_fairshare_enqueue,
229 sched_grrr_fairshare_dequeue,
230 sched_grrr_fairshare_queue_remove,
231 TRUE /* direct_dispatch_to_idle_processors */
232 };
233
234 extern int default_preemption_rate;
235 extern int max_unsafe_quanta;
236
237 static uint32_t grrr_quantum_us;
238 static uint32_t grrr_quantum;
239
240 static uint64_t sched_grrr_tick_deadline;
241
242 static void
243 sched_grrr_init(void)
244 {
245 if (default_preemption_rate < 1)
246 default_preemption_rate = 100;
247 grrr_quantum_us = (1000 * 1000) / default_preemption_rate;
248
249 printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us);
250
251 grrr_priority_mapping_init();
252 }
253
254 static void
255 sched_grrr_timebase_init(void)
256 {
257 uint64_t abstime;
258
259 /* standard timeslicing quantum */
260 clock_interval_to_absolutetime_interval(
261 grrr_quantum_us, NSEC_PER_USEC, &abstime);
262 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
263 grrr_quantum = (uint32_t)abstime;
264
265 thread_depress_time = 1 * grrr_quantum;
266 default_timeshare_computation = grrr_quantum / 2;
267 default_timeshare_constraint = grrr_quantum;
268
269 max_unsafe_computation = max_unsafe_quanta * grrr_quantum;
270 sched_safe_duration = 2 * max_unsafe_quanta * grrr_quantum;
271
272 }
273
274 static void
275 sched_grrr_processor_init(processor_t processor)
276 {
277 grrr_runqueue_init(&processor->grrr_runq);
278 }
279
280 static void
281 sched_grrr_pset_init(processor_set_t pset __unused)
282 {
283 }
284
285 static void
286 sched_grrr_maintenance_continuation(void)
287 {
288 uint64_t abstime = mach_absolute_time();
289
290 grrr_rescale_tick++;
291
292 /*
293 * Compute various averages.
294 */
295 compute_averages();
296
297 if (sched_grrr_tick_deadline == 0)
298 sched_grrr_tick_deadline = abstime;
299
300 clock_deadline_for_periodic_event(10*sched_one_second_interval, abstime,
301 &sched_grrr_tick_deadline);
302
303 assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline);
304 thread_block((thread_continue_t)sched_grrr_maintenance_continuation);
305 /*NOTREACHED*/
306 }
307
308
309 static thread_t
310 sched_grrr_choose_thread(processor_t processor,
311 int priority __unused)
312 {
313 grrr_run_queue_t rq = &processor->grrr_runq;
314
315 return grrr_select(rq);
316 }
317
318 static thread_t
319 sched_grrr_steal_thread(processor_set_t pset)
320 {
321 pset_unlock(pset);
322
323 return (THREAD_NULL);
324
325 }
326
327 static void
328 sched_grrr_compute_priority(thread_t thread,
329 boolean_t override_depress __unused)
330 {
331 set_sched_pri(thread, thread->priority);
332 }
333
334 static processor_t
335 sched_grrr_choose_processor( processor_set_t pset,
336 processor_t processor,
337 thread_t thread)
338 {
339 return choose_processor(pset, processor, thread);
340 }
341
342 static boolean_t
343 sched_grrr_processor_enqueue(
344 processor_t processor,
345 thread_t thread,
346 integer_t options __unused)
347 {
348 grrr_run_queue_t rq = &processor->grrr_runq;
349 boolean_t result;
350
351 result = grrr_enqueue(rq, thread);
352
353 thread->runq = processor;
354
355 return result;
356 }
357
358 static void
359 sched_grrr_processor_queue_shutdown(
360 processor_t processor)
361 {
362 processor_set_t pset = processor->processor_set;
363 thread_t thread;
364 queue_head_t tqueue, bqueue;
365
366 queue_init(&tqueue);
367 queue_init(&bqueue);
368
369 while ((thread = sched_grrr_choose_thread(processor, IDLEPRI)) != THREAD_NULL) {
370 if (thread->bound_processor == PROCESSOR_NULL) {
371 enqueue_tail(&tqueue, (queue_entry_t)thread);
372 } else {
373 enqueue_tail(&bqueue, (queue_entry_t)thread);
374 }
375 }
376
377 while ((thread = (thread_t)dequeue_head(&bqueue)) != THREAD_NULL) {
378 sched_grrr_processor_enqueue(processor, thread, SCHED_TAILQ);
379 }
380
381 pset_unlock(pset);
382
383 while ((thread = (thread_t)dequeue_head(&tqueue)) != THREAD_NULL) {
384 thread_lock(thread);
385
386 thread_setrun(thread, SCHED_TAILQ);
387
388 thread_unlock(thread);
389 }
390 }
391
392 static boolean_t
393 sched_grrr_processor_queue_remove(
394 processor_t processor,
395 thread_t thread)
396 {
397 void * rqlock;
398
399 rqlock = &processor->processor_set->sched_lock;
400 simple_lock(rqlock);
401
402 if (processor == thread->runq) {
403 /*
404 * Thread is on a run queue and we have a lock on
405 * that run queue.
406 */
407 grrr_run_queue_t rq = &processor->grrr_runq;
408
409 grrr_remove(rq, thread);
410 } else {
411 /*
412 * The thread left the run queue before we could
413 * lock the run queue.
414 */
415 assert(thread->runq == PROCESSOR_NULL);
416 processor = PROCESSOR_NULL;
417 }
418
419 simple_unlock(rqlock);
420
421 return (processor != PROCESSOR_NULL);
422 }
423
424 static boolean_t
425 sched_grrr_processor_queue_empty(processor_t processor __unused)
426 {
427 boolean_t result;
428
429 result = (processor->grrr_runq.count == 0);
430
431 return result;
432 }
433
434 static boolean_t
435 sched_grrr_processor_queue_has_priority(processor_t processor,
436 int priority,
437 boolean_t gte __unused)
438 {
439 grrr_run_queue_t rq = &processor->grrr_runq;
440 unsigned int i;
441
442 i = grrr_group_mapping[grrr_priority_mapping[priority]];
443 for ( ; i < NUM_GRRR_GROUPS; i++) {
444 if (rq->groups[i].count > 0)
445 return (TRUE);
446 }
447
448 return (FALSE);
449 }
450
451 /* Implement sched_preempt_pri in code */
452 static boolean_t
453 sched_grrr_priority_is_urgent(int priority)
454 {
455 if (priority <= BASEPRI_FOREGROUND)
456 return FALSE;
457
458 if (priority < MINPRI_KERNEL)
459 return TRUE;
460
461 if (priority >= BASEPRI_PREEMPT)
462 return TRUE;
463
464 return FALSE;
465 }
466
467 static ast_t
468 sched_grrr_processor_csw_check(processor_t processor)
469 {
470 int count;
471
472 count = sched_grrr_processor_runq_count(processor);
473
474 if (count > 0) {
475
476 return AST_PREEMPT;
477 }
478
479 return AST_NONE;
480 }
481
482 static uint32_t
483 sched_grrr_initial_quantum_size(thread_t thread __unused)
484 {
485 return grrr_quantum;
486 }
487
488 static sched_mode_t
489 sched_grrr_initial_thread_sched_mode(task_t parent_task)
490 {
491 if (parent_task == kernel_task)
492 return TH_MODE_FIXED;
493 else
494 return TH_MODE_TIMESHARE;
495 }
496
497 static boolean_t
498 sched_grrr_supports_timeshare_mode(void)
499 {
500 return TRUE;
501 }
502
503 static boolean_t
504 sched_grrr_can_update_priority(thread_t thread __unused)
505 {
506 return FALSE;
507 }
508
509 static void
510 sched_grrr_update_priority(thread_t thread __unused)
511 {
512
513 }
514
515 static void
516 sched_grrr_lightweight_update_priority(thread_t thread __unused)
517 {
518 return;
519 }
520
521 static void
522 sched_grrr_quantum_expire(
523 thread_t thread __unused)
524 {
525 }
526
527
528 static boolean_t
529 sched_grrr_should_current_thread_rechoose_processor(processor_t processor __unused)
530 {
531 return (TRUE);
532 }
533
534 static int
535 sched_grrr_processor_runq_count(processor_t processor)
536 {
537 return processor->grrr_runq.count;
538 }
539
540 static uint64_t
541 sched_grrr_processor_runq_stats_count_sum(processor_t processor)
542 {
543 return processor->grrr_runq.runq_stats.count_sum;
544 }
545
546 #endif /* defined(CONFIG_SCHED_GRRR) */
547
548 #if defined(CONFIG_SCHED_GRRR_CORE)
549
550 static void
551 grrr_priority_mapping_init(void)
552 {
553 unsigned int i;
554
555 /* Map 0->0 up to 10->20 */
556 for (i=0; i <= 10; i++) {
557 grrr_priority_mapping[i] = 2*i;
558 }
559
560 /* Map user priorities 11->33 up to 51 -> 153 */
561 for (i=11; i <= 51; i++) {
562 grrr_priority_mapping[i] = 3*i;
563 }
564
565 /* Map high priorities 52->180 up to 127->255 */
566 for (i=52; i <= 127; i++) {
567 grrr_priority_mapping[i] = 128 + i;
568 }
569
570 for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) {
571
572 #if 0
573 unsigned j, k;
574 /* Calculate log(i); */
575 for (j=0, k=1; k <= i; j++, k *= 2);
576 #endif
577
578 /* Groups of 4 */
579 grrr_group_mapping[i] = i >> 2;
580 }
581
582 }
583
584 static thread_t
585 grrr_intragroup_schedule(grrr_group_t group)
586 {
587 thread_t thread;
588
589 if (group->count == 0) {
590 return THREAD_NULL;
591 }
592
593 thread = group->current_client;
594 if (thread == THREAD_NULL) {
595 thread = (thread_t)queue_first(&group->clients);
596 }
597
598 if (1 /* deficit */) {
599 group->current_client = (thread_t)queue_next((queue_entry_t)thread);
600 if (queue_end(&group->clients, (queue_entry_t)group->current_client)) {
601 group->current_client = (thread_t)queue_first(&group->clients);
602 }
603
604 thread = group->current_client;
605 }
606
607 return thread;
608 }
609
610 static thread_t
611 grrr_intergroup_schedule(grrr_run_queue_t rq)
612 {
613 thread_t thread;
614 grrr_group_t group;
615
616 if (rq->count == 0) {
617 return THREAD_NULL;
618 }
619
620 group = rq->current_group;
621
622 if (group == GRRR_GROUP_NULL) {
623 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
624 }
625
626 thread = grrr_intragroup_schedule(group);
627
628 if ((group->work >= (UINT32_MAX-256)) || (rq->last_rescale_tick != grrr_rescale_tick)) {
629 grrr_rescale_work(rq);
630 }
631 group->work++;
632
633 if (queue_end(&rq->sorted_group_list, queue_next((queue_entry_t)group))) {
634 /* last group, go back to beginning */
635 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
636 } else {
637 grrr_group_t nextgroup = (grrr_group_t)queue_next((queue_entry_t)group);
638 uint64_t orderleft, orderright;
639
640 /*
641 * The well-ordering condition for intergroup selection is:
642 *
643 * (group->work+1) / (nextgroup->work+1) > (group->weight) / (nextgroup->weight)
644 *
645 * Multiply both sides by their denominators to avoid division
646 *
647 */
648 orderleft = (group->work + 1) * ((uint64_t)nextgroup->weight);
649 orderright = (nextgroup->work + 1) * ((uint64_t)group->weight);
650 if (orderleft > orderright) {
651 group = nextgroup;
652 } else {
653 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
654 }
655 }
656
657 rq->current_group = group;
658
659 return thread;
660 }
661
662 static void
663 grrr_runqueue_init(grrr_run_queue_t runq)
664 {
665 grrr_group_index_t index;
666
667 runq->count = 0;
668
669 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
670 unsigned int prisearch;
671
672 for (prisearch = 0;
673 prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES;
674 prisearch++) {
675 if (grrr_group_mapping[prisearch] == index) {
676 runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch;
677 break;
678 }
679 }
680
681 runq->groups[index].index = index;
682
683 queue_init(&runq->groups[index].clients);
684 runq->groups[index].count = 0;
685 runq->groups[index].weight = 0;
686 runq->groups[index].work = 0;
687 runq->groups[index].current_client = THREAD_NULL;
688 }
689
690 queue_init(&runq->sorted_group_list);
691 runq->weight = 0;
692 runq->current_group = GRRR_GROUP_NULL;
693 }
694
695 static void
696 grrr_rescale_work(grrr_run_queue_t rq)
697 {
698 grrr_group_index_t index;
699
700 /* avoid overflow by scaling by 1/8th */
701 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
702 rq->groups[index].work >>= 3;
703 }
704
705 rq->last_rescale_tick = grrr_rescale_tick;
706 }
707
708 static boolean_t
709 grrr_enqueue(
710 grrr_run_queue_t rq,
711 thread_t thread)
712 {
713 grrr_proportional_priority_t gpriority;
714 grrr_group_index_t gindex;
715 grrr_group_t group;
716
717 gpriority = grrr_priority_mapping[thread->sched_pri];
718 gindex = grrr_group_mapping[gpriority];
719 group = &rq->groups[gindex];
720
721 #if 0
722 thread->grrr_deficit = 0;
723 #endif
724
725 if (group->count == 0) {
726 /* Empty group, this is the first client */
727 enqueue_tail(&group->clients, (queue_entry_t)thread);
728 group->count = 1;
729 group->weight = gpriority;
730 group->current_client = thread;
731 } else {
732 /* Insert before the current client */
733 if (group->current_client == THREAD_NULL ||
734 queue_first(&group->clients) == (queue_entry_t)group->current_client) {
735 enqueue_head(&group->clients, (queue_entry_t)thread);
736 } else {
737 insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client));
738 }
739 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
740 group->count++;
741 group->weight += gpriority;
742
743 /* Since there was already a client, this is on the per-processor sorted list already */
744 remqueue((queue_entry_t)group);
745 }
746
747 grrr_sorted_list_insert_group(rq, group);
748
749 rq->count++;
750 rq->weight += gpriority;
751
752 return (FALSE);
753 }
754
755 static thread_t
756 grrr_select(grrr_run_queue_t rq)
757 {
758 thread_t thread;
759
760 thread = grrr_intergroup_schedule(rq);
761 if (thread != THREAD_NULL) {
762 grrr_proportional_priority_t gpriority;
763 grrr_group_index_t gindex;
764 grrr_group_t group;
765
766 gpriority = grrr_priority_mapping[thread->sched_pri];
767 gindex = grrr_group_mapping[gpriority];
768 group = &rq->groups[gindex];
769
770 remqueue((queue_entry_t)thread);
771 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
772 group->count--;
773 group->weight -= gpriority;
774 if (group->current_client == thread) {
775 group->current_client = THREAD_NULL;
776 }
777
778 remqueue((queue_entry_t)group);
779 if (group->count == 0) {
780 if (rq->current_group == group) {
781 rq->current_group = GRRR_GROUP_NULL;
782 }
783 } else {
784 /* Need to re-insert in sorted location */
785 grrr_sorted_list_insert_group(rq, group);
786 }
787
788 rq->count--;
789 rq->weight -= gpriority;
790
791 thread->runq = PROCESSOR_NULL;
792 }
793
794
795 return (thread);
796 }
797
798 static void
799 grrr_remove(
800 grrr_run_queue_t rq,
801 thread_t thread)
802 {
803 grrr_proportional_priority_t gpriority;
804 grrr_group_index_t gindex;
805 grrr_group_t group;
806
807 gpriority = grrr_priority_mapping[thread->sched_pri];
808 gindex = grrr_group_mapping[gpriority];
809 group = &rq->groups[gindex];
810
811 remqueue((queue_entry_t)thread);
812 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
813 group->count--;
814 group->weight -= gpriority;
815 if (group->current_client == thread) {
816 group->current_client = THREAD_NULL;
817 }
818
819 remqueue((queue_entry_t)group);
820 if (group->count == 0) {
821 if (rq->current_group == group) {
822 rq->current_group = GRRR_GROUP_NULL;
823 }
824 } else {
825 /* Need to re-insert in sorted location */
826 grrr_sorted_list_insert_group(rq, group);
827 }
828
829 rq->count--;
830 rq->weight -= gpriority;
831
832 thread->runq = PROCESSOR_NULL;
833 }
834
835 static void
836 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
837 grrr_group_t group)
838 {
839 /* Simple insertion sort */
840 if (queue_empty(&rq->sorted_group_list)) {
841 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
842 } else {
843 grrr_group_t search_group;
844
845 /* Start searching from the head (heaviest weight) for the first
846 * element less than us, so we can insert before it
847 */
848 search_group = (grrr_group_t)queue_first(&rq->sorted_group_list);
849 while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group) ) {
850
851 if (search_group->weight < group->weight) {
852 /* we should be before this */
853 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
854 break;
855 } if (search_group->weight == group->weight) {
856 /* Use group index as a tie breaker */
857 if (search_group->index < group->index) {
858 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
859 break;
860 }
861 }
862
863 /* otherwise, our weight is too small, keep going */
864 search_group = (grrr_group_t)queue_next((queue_entry_t)search_group);
865 }
866
867 if (queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
868 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
869 } else {
870 insque((queue_entry_t)group, (queue_entry_t)search_group);
871 }
872 }
873 }
874
875 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
876
877 #if defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY)
878
879 static struct grrr_run_queue fs_grrr_runq;
880 #define FS_GRRR_RUNQ ((processor_t)-2)
881 decl_simple_lock_data(static,fs_grrr_lock);
882
883 void
884 sched_grrr_fairshare_init(void)
885 {
886 grrr_priority_mapping_init();
887
888 simple_lock_init(&fs_grrr_lock, 0);
889 grrr_runqueue_init(&fs_grrr_runq);
890 }
891
892
893 int
894 sched_grrr_fairshare_runq_count(void)
895 {
896 return fs_grrr_runq.count;
897 }
898
899 uint64_t
900 sched_grrr_fairshare_runq_stats_count_sum(void)
901 {
902 return fs_grrr_runq.runq_stats.count_sum;
903 }
904
905 void
906 sched_grrr_fairshare_enqueue(thread_t thread)
907 {
908 simple_lock(&fs_grrr_lock);
909
910 (void)grrr_enqueue(&fs_grrr_runq, thread);
911
912 thread->runq = FS_GRRR_RUNQ;
913
914 simple_unlock(&fs_grrr_lock);
915 }
916
917 thread_t sched_grrr_fairshare_dequeue(void)
918 {
919 thread_t thread;
920
921 simple_lock(&fs_grrr_lock);
922 if (fs_grrr_runq.count > 0) {
923 thread = grrr_select(&fs_grrr_runq);
924
925 simple_unlock(&fs_grrr_lock);
926
927 return (thread);
928 }
929 simple_unlock(&fs_grrr_lock);
930
931 return THREAD_NULL;
932 }
933
934 boolean_t sched_grrr_fairshare_queue_remove(thread_t thread)
935 {
936
937 simple_lock(&fs_grrr_lock);
938
939 if (FS_GRRR_RUNQ == thread->runq) {
940 grrr_remove(&fs_grrr_runq, thread);
941
942 simple_unlock(&fs_grrr_lock);
943 return (TRUE);
944 }
945 else {
946 /*
947 * The thread left the run queue before we could
948 * lock the run queue.
949 */
950 assert(thread->runq == PROCESSOR_NULL);
951 simple_unlock(&fs_grrr_lock);
952 return (FALSE);
953 }
954 }
955
956 #endif /* defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) */