]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_grrr.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / kern / sched_grrr.c
CommitLineData
6d2010ae 1/*
39037602 2 * Copyright (c) 2009-2016 Apple Inc. All rights reserved.
6d2010ae
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39037602 5 *
6d2010ae
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39037602 14 *
6d2010ae
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39037602 17 *
6d2010ae
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39037602 25 *
6d2010ae
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/machine.h>
31#include <mach/policy.h>
32#include <mach/sync_policy.h>
33#include <mach/thread_act.h>
34
35#include <machine/machine_routines.h>
36#include <machine/sched_param.h>
37#include <machine/machine_cpu.h>
38
39#include <kern/kern_types.h>
40#include <kern/clock.h>
41#include <kern/counters.h>
42#include <kern/cpu_number.h>
43#include <kern/cpu_data.h>
44#include <kern/debug.h>
6d2010ae
A
45#include <kern/macro_help.h>
46#include <kern/machine.h>
47#include <kern/misc_protos.h>
48#include <kern/processor.h>
49#include <kern/queue.h>
50#include <kern/sched.h>
51#include <kern/sched_prim.h>
52#include <kern/syscall_subr.h>
53#include <kern/task.h>
54#include <kern/thread.h>
6d2010ae
A
55
56#include <vm/pmap.h>
57#include <vm/vm_kern.h>
58#include <vm/vm_map.h>
59
60#include <mach/sdt.h>
61
62#include <sys/kdebug.h>
63
64#if defined(CONFIG_SCHED_GRRR_CORE)
65
66static void
67grrr_priority_mapping_init(void);
68
69static boolean_t
70grrr_enqueue(
0a7de745
A
71 grrr_run_queue_t rq,
72 thread_t thread);
39037602 73
6d2010ae
A
74static thread_t
75grrr_select(
0a7de745 76 grrr_run_queue_t rq);
6d2010ae
A
77
78static void
79grrr_remove(
0a7de745
A
80 grrr_run_queue_t rq,
81 thread_t thread);
6d2010ae
A
82
83
84static void
85grrr_sorted_list_insert_group(grrr_run_queue_t rq,
0a7de745 86 grrr_group_t group);
6d2010ae
A
87
88static void
89grrr_rescale_work(grrr_run_queue_t rq);
90
91static void
0a7de745 92grrr_runqueue_init(grrr_run_queue_t runq);
6d2010ae
A
93
94/* Map Mach priorities to ones suitable for proportional sharing */
95static grrr_proportional_priority_t grrr_priority_mapping[NRQS];
96
97/* Map each proportional priority to its group */
98static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES];
99
0a7de745 100uint32_t grrr_rescale_tick;
6d2010ae
A
101
102#endif /* defined(CONFIG_SCHED_GRRR_CORE) */
103
104#if defined(CONFIG_SCHED_GRRR)
105
106static void
107sched_grrr_init(void);
108
109static void
110sched_grrr_timebase_init(void);
111
112static void
113sched_grrr_processor_init(processor_t processor);
114
115static void
116sched_grrr_pset_init(processor_set_t pset);
117
118static void
119sched_grrr_maintenance_continuation(void);
120
121static thread_t
0a7de745
A
122sched_grrr_choose_thread(processor_t processor,
123 int priority,
124 ast_t reason);
6d2010ae
A
125
126static thread_t
0a7de745 127sched_grrr_steal_thread(processor_set_t pset);
6d2010ae 128
3e170ce0
A
129static int
130sched_grrr_compute_priority(thread_t thread);
6d2010ae
A
131
132static processor_t
0a7de745
A
133sched_grrr_choose_processor( processor_set_t pset,
134 processor_t processor,
135 thread_t thread);
6d2010ae
A
136
137static boolean_t
138sched_grrr_processor_enqueue(
0a7de745
A
139 processor_t processor,
140 thread_t thread,
cb323159 141 sched_options_t options);
6d2010ae
A
142
143static void
144sched_grrr_processor_queue_shutdown(
0a7de745 145 processor_t processor);
6d2010ae
A
146
147static boolean_t
148sched_grrr_processor_queue_remove(
0a7de745
A
149 processor_t processor,
150 thread_t thread);
6d2010ae
A
151
152static boolean_t
0a7de745 153sched_grrr_processor_queue_empty(processor_t processor);
6d2010ae
A
154
155static boolean_t
0a7de745
A
156sched_grrr_processor_queue_has_priority(processor_t processor,
157 int priority,
158 boolean_t gte);
6d2010ae
A
159
160static boolean_t
161sched_grrr_priority_is_urgent(int priority);
162
163static ast_t
164sched_grrr_processor_csw_check(processor_t processor);
165
166static uint32_t
167sched_grrr_initial_quantum_size(thread_t thread);
168
169static sched_mode_t
170sched_grrr_initial_thread_sched_mode(task_t parent_task);
171
6d2010ae 172static boolean_t
0a7de745 173sched_grrr_can_update_priority(thread_t thread);
6d2010ae
A
174
175static void
0a7de745 176sched_grrr_update_priority(thread_t thread);
6d2010ae
A
177
178static void
0a7de745 179sched_grrr_lightweight_update_priority(thread_t thread);
6d2010ae 180
6d2010ae 181static int
0a7de745 182sched_grrr_processor_runq_count(processor_t processor);
6d2010ae
A
183
184static uint64_t
185sched_grrr_processor_runq_stats_count_sum(processor_t processor);
186
fe8ab488 187static int
0a7de745 188sched_grrr_processor_bound_count(processor_t processor);
fe8ab488
A
189
190static void
3e170ce0 191sched_grrr_thread_update_scan(sched_update_scan_context_t scan_context);
fe8ab488 192
6d2010ae 193const struct sched_dispatch_table sched_grrr_dispatch = {
3e170ce0 194 .sched_name = "grrr",
fe8ab488
A
195 .init = sched_grrr_init,
196 .timebase_init = sched_grrr_timebase_init,
197 .processor_init = sched_grrr_processor_init,
198 .pset_init = sched_grrr_pset_init,
199 .maintenance_continuation = sched_grrr_maintenance_continuation,
200 .choose_thread = sched_grrr_choose_thread,
0a7de745 201 .steal_thread_enabled = sched_steal_thread_DISABLED,
fe8ab488 202 .steal_thread = sched_grrr_steal_thread,
3e170ce0 203 .compute_timeshare_priority = sched_grrr_compute_priority,
fe8ab488
A
204 .choose_processor = sched_grrr_choose_processor,
205 .processor_enqueue = sched_grrr_processor_enqueue,
206 .processor_queue_shutdown = sched_grrr_processor_queue_shutdown,
207 .processor_queue_remove = sched_grrr_processor_queue_remove,
208 .processor_queue_empty = sched_grrr_processor_queue_empty,
209 .priority_is_urgent = sched_grrr_priority_is_urgent,
210 .processor_csw_check = sched_grrr_processor_csw_check,
211 .processor_queue_has_priority = sched_grrr_processor_queue_has_priority,
212 .initial_quantum_size = sched_grrr_initial_quantum_size,
213 .initial_thread_sched_mode = sched_grrr_initial_thread_sched_mode,
214 .can_update_priority = sched_grrr_can_update_priority,
215 .update_priority = sched_grrr_update_priority,
216 .lightweight_update_priority = sched_grrr_lightweight_update_priority,
3e170ce0 217 .quantum_expire = sched_default_quantum_expire,
fe8ab488
A
218 .processor_runq_count = sched_grrr_processor_runq_count,
219 .processor_runq_stats_count_sum = sched_grrr_processor_runq_stats_count_sum,
fe8ab488
A
220 .processor_bound_count = sched_grrr_processor_bound_count,
221 .thread_update_scan = sched_grrr_thread_update_scan,
3e170ce0
A
222 .multiple_psets_enabled = TRUE,
223 .sched_groups_enabled = FALSE,
5ba3f43e
A
224 .avoid_processor_enabled = FALSE,
225 .thread_avoid_processor = NULL,
226 .processor_balance = sched_SMT_balance,
227
228 .rt_runq = sched_rtglobal_runq,
229 .rt_init = sched_rtglobal_init,
230 .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
231 .rt_runq_scan = sched_rtglobal_runq_scan,
232 .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
233
234 .qos_max_parallelism = sched_qos_max_parallelism,
235 .check_spill = sched_check_spill,
236 .ipi_policy = sched_ipi_policy,
237 .thread_should_yield = sched_thread_should_yield,
cb323159
A
238 .run_count_incr = sched_run_incr,
239 .run_count_decr = sched_run_decr,
240 .update_thread_bucket = sched_update_thread_bucket,
241 .pset_made_schedulable = sched_pset_made_schedulable,
6d2010ae
A
242};
243
0a7de745 244extern int max_unsafe_quanta;
6d2010ae
A
245
246static uint32_t grrr_quantum_us;
247static uint32_t grrr_quantum;
248
0a7de745 249static uint64_t sched_grrr_tick_deadline;
6d2010ae
A
250
251static void
252sched_grrr_init(void)
253{
0a7de745 254 if (default_preemption_rate < 1) {
6d2010ae 255 default_preemption_rate = 100;
0a7de745 256 }
6d2010ae 257 grrr_quantum_us = (1000 * 1000) / default_preemption_rate;
39037602 258
6d2010ae
A
259 printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us);
260
261 grrr_priority_mapping_init();
262}
263
264static void
265sched_grrr_timebase_init(void)
266{
0a7de745 267 uint64_t abstime;
6d2010ae
A
268
269 /* standard timeslicing quantum */
270 clock_interval_to_absolutetime_interval(
0a7de745 271 grrr_quantum_us, NSEC_PER_USEC, &abstime);
6d2010ae
A
272 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
273 grrr_quantum = (uint32_t)abstime;
39037602 274
6d2010ae
A
275 thread_depress_time = 1 * grrr_quantum;
276 default_timeshare_computation = grrr_quantum / 2;
277 default_timeshare_constraint = grrr_quantum;
39037602 278
6d2010ae
A
279 max_unsafe_computation = max_unsafe_quanta * grrr_quantum;
280 sched_safe_duration = 2 * max_unsafe_quanta * grrr_quantum;
6d2010ae
A
281}
282
283static void
284sched_grrr_processor_init(processor_t processor)
285{
286 grrr_runqueue_init(&processor->grrr_runq);
287}
288
289static void
290sched_grrr_pset_init(processor_set_t pset __unused)
291{
292}
293
294static void
295sched_grrr_maintenance_continuation(void)
296{
0a7de745 297 uint64_t abstime = mach_absolute_time();
39037602 298
6d2010ae 299 grrr_rescale_tick++;
39037602 300
6d2010ae
A
301 /*
302 * Compute various averages.
303 */
39236c6e 304 compute_averages(1);
39037602 305
0a7de745 306 if (sched_grrr_tick_deadline == 0) {
6d2010ae 307 sched_grrr_tick_deadline = abstime;
0a7de745 308 }
39037602 309
0a7de745
A
310 clock_deadline_for_periodic_event(10 * sched_one_second_interval, abstime,
311 &sched_grrr_tick_deadline);
39037602 312
6d2010ae
A
313 assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline);
314 thread_block((thread_continue_t)sched_grrr_maintenance_continuation);
315 /*NOTREACHED*/
316}
317
6d2010ae 318static thread_t
0a7de745
A
319sched_grrr_choose_thread(processor_t processor,
320 int priority __unused,
321 ast_t reason __unused)
6d2010ae 322{
0a7de745 323 grrr_run_queue_t rq = &processor->grrr_runq;
39037602 324
0a7de745 325 return grrr_select(rq);
6d2010ae
A
326}
327
328static thread_t
0a7de745 329sched_grrr_steal_thread(processor_set_t pset)
6d2010ae
A
330{
331 pset_unlock(pset);
39037602
A
332
333 return THREAD_NULL;
6d2010ae
A
334}
335
3e170ce0
A
336static int
337sched_grrr_compute_priority(thread_t thread)
6d2010ae 338{
3e170ce0 339 return thread->base_pri;
6d2010ae
A
340}
341
342static processor_t
0a7de745
A
343sched_grrr_choose_processor( processor_set_t pset,
344 processor_t processor,
345 thread_t thread)
6d2010ae
A
346{
347 return choose_processor(pset, processor, thread);
348}
349
350static boolean_t
351sched_grrr_processor_enqueue(
0a7de745
A
352 processor_t processor,
353 thread_t thread,
cb323159 354 sched_options_t options __unused)
6d2010ae 355{
0a7de745
A
356 grrr_run_queue_t rq = &processor->grrr_runq;
357 boolean_t result;
39037602 358
6d2010ae 359 result = grrr_enqueue(rq, thread);
39037602 360
6d2010ae 361 thread->runq = processor;
39037602 362
6d2010ae
A
363 return result;
364}
365
366static void
367sched_grrr_processor_queue_shutdown(
0a7de745 368 processor_t processor)
6d2010ae 369{
0a7de745
A
370 processor_set_t pset = processor->processor_set;
371 thread_t thread;
372 queue_head_t tqueue, bqueue;
39037602 373
6d2010ae
A
374 queue_init(&tqueue);
375 queue_init(&bqueue);
39037602 376
fe8ab488 377 while ((thread = sched_grrr_choose_thread(processor, IDLEPRI, AST_NONE)) != THREAD_NULL) {
6d2010ae
A
378 if (thread->bound_processor == PROCESSOR_NULL) {
379 enqueue_tail(&tqueue, (queue_entry_t)thread);
380 } else {
39037602 381 enqueue_tail(&bqueue, (queue_entry_t)thread);
6d2010ae
A
382 }
383 }
39037602 384
39236c6e 385 while ((thread = (thread_t)(void *)dequeue_head(&bqueue)) != THREAD_NULL) {
6d2010ae 386 sched_grrr_processor_enqueue(processor, thread, SCHED_TAILQ);
39037602
A
387 }
388
6d2010ae 389 pset_unlock(pset);
39037602 390
39236c6e 391 while ((thread = (thread_t)(void *)dequeue_head(&tqueue)) != THREAD_NULL) {
6d2010ae 392 thread_lock(thread);
39037602 393
6d2010ae 394 thread_setrun(thread, SCHED_TAILQ);
39037602 395
6d2010ae
A
396 thread_unlock(thread);
397 }
398}
399
400static boolean_t
401sched_grrr_processor_queue_remove(
0a7de745
A
402 processor_t processor,
403 thread_t thread)
6d2010ae 404{
39037602
A
405 processor_set_t pset = processor->processor_set;
406
407 pset_lock(pset);
408
6d2010ae
A
409 if (processor == thread->runq) {
410 /*
411 * Thread is on a run queue and we have a lock on
412 * that run queue.
413 */
0a7de745 414 grrr_run_queue_t rq = &processor->grrr_runq;
6d2010ae
A
415
416 grrr_remove(rq, thread);
417 } else {
418 /*
419 * The thread left the run queue before we could
39037602 420 * lock the run queue.
6d2010ae
A
421 */
422 assert(thread->runq == PROCESSOR_NULL);
39037602 423 processor = PROCESSOR_NULL;
6d2010ae 424 }
39037602
A
425
426 pset_unlock(pset);
427
0a7de745 428 return processor != PROCESSOR_NULL;
6d2010ae 429}
39037602 430
6d2010ae 431static boolean_t
0a7de745 432sched_grrr_processor_queue_empty(processor_t processor __unused)
6d2010ae
A
433{
434 boolean_t result;
39037602 435
6d2010ae 436 result = (processor->grrr_runq.count == 0);
39037602 437
6d2010ae
A
438 return result;
439}
440
441static boolean_t
0a7de745
A
442sched_grrr_processor_queue_has_priority(processor_t processor,
443 int priority,
444 boolean_t gte __unused)
6d2010ae 445{
0a7de745
A
446 grrr_run_queue_t rq = &processor->grrr_runq;
447 unsigned int i;
6d2010ae
A
448
449 i = grrr_group_mapping[grrr_priority_mapping[priority]];
0a7de745
A
450 for (; i < NUM_GRRR_GROUPS; i++) {
451 if (rq->groups[i].count > 0) {
39037602 452 return TRUE;
0a7de745 453 }
6d2010ae 454 }
39037602
A
455
456 return FALSE;
6d2010ae
A
457}
458
459/* Implement sched_preempt_pri in code */
460static boolean_t
461sched_grrr_priority_is_urgent(int priority)
462{
0a7de745 463 if (priority <= BASEPRI_FOREGROUND) {
6d2010ae 464 return FALSE;
0a7de745 465 }
39037602 466
0a7de745 467 if (priority < MINPRI_KERNEL) {
6d2010ae 468 return TRUE;
0a7de745 469 }
6d2010ae 470
0a7de745 471 if (priority >= BASEPRI_PREEMPT) {
6d2010ae 472 return TRUE;
0a7de745 473 }
39037602 474
6d2010ae
A
475 return FALSE;
476}
477
478static ast_t
479sched_grrr_processor_csw_check(processor_t processor)
480{
0a7de745 481 int count;
39037602 482
6d2010ae 483 count = sched_grrr_processor_runq_count(processor);
39037602 484
0a7de745 485 if (count > 0) {
6d2010ae 486 return AST_PREEMPT;
0a7de745 487 }
39037602 488
6d2010ae
A
489 return AST_NONE;
490}
491
492static uint32_t
493sched_grrr_initial_quantum_size(thread_t thread __unused)
494{
495 return grrr_quantum;
496}
497
498static sched_mode_t
499sched_grrr_initial_thread_sched_mode(task_t parent_task)
500{
0a7de745 501 if (parent_task == kernel_task) {
6d2010ae 502 return TH_MODE_FIXED;
0a7de745 503 } else {
39037602 504 return TH_MODE_TIMESHARE;
0a7de745 505 }
6d2010ae
A
506}
507
6d2010ae 508static boolean_t
0a7de745 509sched_grrr_can_update_priority(thread_t thread __unused)
6d2010ae
A
510{
511 return FALSE;
512}
513
514static void
0a7de745 515sched_grrr_update_priority(thread_t thread __unused)
6d2010ae 516{
39037602 517 return;
6d2010ae
A
518}
519
520static void
0a7de745 521sched_grrr_lightweight_update_priority(thread_t thread __unused)
6d2010ae
A
522{
523 return;
524}
525
6d2010ae 526static int
0a7de745 527sched_grrr_processor_runq_count(processor_t processor)
6d2010ae
A
528{
529 return processor->grrr_runq.count;
530}
531
532static uint64_t
0a7de745 533sched_grrr_processor_runq_stats_count_sum(processor_t processor)
6d2010ae
A
534{
535 return processor->grrr_runq.runq_stats.count_sum;
536}
537
fe8ab488 538static int
0a7de745 539sched_grrr_processor_bound_count(__unused processor_t processor)
fe8ab488
A
540{
541 return 0;
542}
543
544static void
3e170ce0 545sched_grrr_thread_update_scan(__unused sched_update_scan_context_t scan_context)
fe8ab488 546{
39037602 547 return;
fe8ab488
A
548}
549
6d2010ae
A
550#endif /* defined(CONFIG_SCHED_GRRR) */
551
552#if defined(CONFIG_SCHED_GRRR_CORE)
553
554static void
555grrr_priority_mapping_init(void)
556{
557 unsigned int i;
39037602 558
6d2010ae 559 /* Map 0->0 up to 10->20 */
0a7de745
A
560 for (i = 0; i <= 10; i++) {
561 grrr_priority_mapping[i] = 2 * i;
6d2010ae 562 }
39037602 563
6d2010ae 564 /* Map user priorities 11->33 up to 51 -> 153 */
0a7de745
A
565 for (i = 11; i <= 51; i++) {
566 grrr_priority_mapping[i] = 3 * i;
6d2010ae 567 }
39037602 568
6d2010ae 569 /* Map high priorities 52->180 up to 127->255 */
0a7de745 570 for (i = 52; i <= 127; i++) {
6d2010ae
A
571 grrr_priority_mapping[i] = 128 + i;
572 }
39037602 573
6d2010ae 574 for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) {
39037602 575#if 0
6d2010ae
A
576 unsigned j, k;
577 /* Calculate log(i); */
0a7de745
A
578 for (j = 0, k = 1; k <= i; j++, k *= 2) {
579 ;
580 }
6d2010ae 581#endif
39037602 582
6d2010ae
A
583 /* Groups of 4 */
584 grrr_group_mapping[i] = i >> 2;
585 }
6d2010ae
A
586}
587
588static thread_t
589grrr_intragroup_schedule(grrr_group_t group)
590{
591 thread_t thread;
592
593 if (group->count == 0) {
594 return THREAD_NULL;
595 }
39037602 596
6d2010ae
A
597 thread = group->current_client;
598 if (thread == THREAD_NULL) {
39236c6e 599 thread = (thread_t)(void *)queue_first(&group->clients);
6d2010ae 600 }
39037602 601
6d2010ae 602 if (1 /* deficit */) {
39236c6e 603 group->current_client = (thread_t)(void *)queue_next((queue_entry_t)thread);
6d2010ae 604 if (queue_end(&group->clients, (queue_entry_t)group->current_client)) {
39236c6e 605 group->current_client = (thread_t)(void *)queue_first(&group->clients);
6d2010ae 606 }
39037602 607
6d2010ae
A
608 thread = group->current_client;
609 }
39037602 610
6d2010ae
A
611 return thread;
612}
613
614static thread_t
615grrr_intergroup_schedule(grrr_run_queue_t rq)
616{
617 thread_t thread;
618 grrr_group_t group;
39037602 619
6d2010ae
A
620 if (rq->count == 0) {
621 return THREAD_NULL;
622 }
39037602 623
6d2010ae 624 group = rq->current_group;
39037602 625
6d2010ae
A
626 if (group == GRRR_GROUP_NULL) {
627 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
628 }
39037602 629
6d2010ae 630 thread = grrr_intragroup_schedule(group);
39037602 631
0a7de745 632 if ((group->work >= (UINT32_MAX - 256)) || (rq->last_rescale_tick != grrr_rescale_tick)) {
6d2010ae
A
633 grrr_rescale_work(rq);
634 }
635 group->work++;
39037602 636
6d2010ae
A
637 if (queue_end(&rq->sorted_group_list, queue_next((queue_entry_t)group))) {
638 /* last group, go back to beginning */
639 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
640 } else {
641 grrr_group_t nextgroup = (grrr_group_t)queue_next((queue_entry_t)group);
642 uint64_t orderleft, orderright;
39037602 643
6d2010ae
A
644 /*
645 * The well-ordering condition for intergroup selection is:
646 *
647 * (group->work+1) / (nextgroup->work+1) > (group->weight) / (nextgroup->weight)
648 *
649 * Multiply both sides by their denominators to avoid division
650 *
651 */
652 orderleft = (group->work + 1) * ((uint64_t)nextgroup->weight);
653 orderright = (nextgroup->work + 1) * ((uint64_t)group->weight);
654 if (orderleft > orderright) {
655 group = nextgroup;
656 } else {
657 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
658 }
659 }
39037602 660
6d2010ae 661 rq->current_group = group;
39037602 662
6d2010ae
A
663 return thread;
664}
665
666static void
0a7de745 667grrr_runqueue_init(grrr_run_queue_t runq)
6d2010ae
A
668{
669 grrr_group_index_t index;
39037602 670
6d2010ae 671 runq->count = 0;
39037602 672
6d2010ae
A
673 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
674 unsigned int prisearch;
675
676 for (prisearch = 0;
0a7de745
A
677 prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES;
678 prisearch++) {
6d2010ae
A
679 if (grrr_group_mapping[prisearch] == index) {
680 runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch;
681 break;
682 }
683 }
39037602 684
6d2010ae
A
685 runq->groups[index].index = index;
686
687 queue_init(&runq->groups[index].clients);
688 runq->groups[index].count = 0;
689 runq->groups[index].weight = 0;
690 runq->groups[index].work = 0;
691 runq->groups[index].current_client = THREAD_NULL;
692 }
39037602 693
6d2010ae
A
694 queue_init(&runq->sorted_group_list);
695 runq->weight = 0;
696 runq->current_group = GRRR_GROUP_NULL;
697}
698
699static void
700grrr_rescale_work(grrr_run_queue_t rq)
701{
702 grrr_group_index_t index;
703
704 /* avoid overflow by scaling by 1/8th */
705 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
706 rq->groups[index].work >>= 3;
707 }
708
709 rq->last_rescale_tick = grrr_rescale_tick;
710}
711
712static boolean_t
713grrr_enqueue(
0a7de745
A
714 grrr_run_queue_t rq,
715 thread_t thread)
39037602 716{
0a7de745
A
717 grrr_proportional_priority_t gpriority;
718 grrr_group_index_t gindex;
719 grrr_group_t group;
6d2010ae
A
720
721 gpriority = grrr_priority_mapping[thread->sched_pri];
722 gindex = grrr_group_mapping[gpriority];
723 group = &rq->groups[gindex];
724
725#if 0
726 thread->grrr_deficit = 0;
727#endif
39037602 728
6d2010ae
A
729 if (group->count == 0) {
730 /* Empty group, this is the first client */
731 enqueue_tail(&group->clients, (queue_entry_t)thread);
732 group->count = 1;
733 group->weight = gpriority;
734 group->current_client = thread;
735 } else {
736 /* Insert before the current client */
737 if (group->current_client == THREAD_NULL ||
0a7de745 738 queue_first(&group->clients) == (queue_entry_t)group->current_client) {
6d2010ae
A
739 enqueue_head(&group->clients, (queue_entry_t)thread);
740 } else {
741 insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client));
742 }
743 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
744 group->count++;
745 group->weight += gpriority;
746
747 /* Since there was already a client, this is on the per-processor sorted list already */
748 remqueue((queue_entry_t)group);
749 }
39037602 750
6d2010ae
A
751 grrr_sorted_list_insert_group(rq, group);
752
753 rq->count++;
754 rq->weight += gpriority;
39037602
A
755
756 return FALSE;
6d2010ae
A
757}
758
759static thread_t
0a7de745 760grrr_select(grrr_run_queue_t rq)
6d2010ae 761{
0a7de745 762 thread_t thread;
6d2010ae
A
763
764 thread = grrr_intergroup_schedule(rq);
765 if (thread != THREAD_NULL) {
0a7de745
A
766 grrr_proportional_priority_t gpriority;
767 grrr_group_index_t gindex;
768 grrr_group_t group;
39037602 769
6d2010ae
A
770 gpriority = grrr_priority_mapping[thread->sched_pri];
771 gindex = grrr_group_mapping[gpriority];
772 group = &rq->groups[gindex];
39037602 773
6d2010ae
A
774 remqueue((queue_entry_t)thread);
775 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
776 group->count--;
777 group->weight -= gpriority;
778 if (group->current_client == thread) {
779 group->current_client = THREAD_NULL;
780 }
39037602 781
6d2010ae
A
782 remqueue((queue_entry_t)group);
783 if (group->count == 0) {
784 if (rq->current_group == group) {
785 rq->current_group = GRRR_GROUP_NULL;
786 }
787 } else {
788 /* Need to re-insert in sorted location */
789 grrr_sorted_list_insert_group(rq, group);
790 }
39037602 791
6d2010ae
A
792 rq->count--;
793 rq->weight -= gpriority;
39037602 794
6d2010ae 795 thread->runq = PROCESSOR_NULL;
39037602
A
796 }
797
798 return thread;
6d2010ae
A
799}
800
801static void
802grrr_remove(
0a7de745
A
803 grrr_run_queue_t rq,
804 thread_t thread)
39037602 805{
0a7de745
A
806 grrr_proportional_priority_t gpriority;
807 grrr_group_index_t gindex;
808 grrr_group_t group;
39037602 809
6d2010ae
A
810 gpriority = grrr_priority_mapping[thread->sched_pri];
811 gindex = grrr_group_mapping[gpriority];
812 group = &rq->groups[gindex];
39037602 813
6d2010ae
A
814 remqueue((queue_entry_t)thread);
815 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
816 group->count--;
817 group->weight -= gpriority;
818 if (group->current_client == thread) {
819 group->current_client = THREAD_NULL;
820 }
39037602 821
6d2010ae
A
822 remqueue((queue_entry_t)group);
823 if (group->count == 0) {
824 if (rq->current_group == group) {
825 rq->current_group = GRRR_GROUP_NULL;
826 }
827 } else {
828 /* Need to re-insert in sorted location */
829 grrr_sorted_list_insert_group(rq, group);
830 }
39037602 831
6d2010ae
A
832 rq->count--;
833 rq->weight -= gpriority;
39037602 834
6d2010ae
A
835 thread->runq = PROCESSOR_NULL;
836}
837
838static void
839grrr_sorted_list_insert_group(grrr_run_queue_t rq,
0a7de745 840 grrr_group_t group)
6d2010ae
A
841{
842 /* Simple insertion sort */
843 if (queue_empty(&rq->sorted_group_list)) {
844 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
845 } else {
846 grrr_group_t search_group;
39037602 847
6d2010ae
A
848 /* Start searching from the head (heaviest weight) for the first
849 * element less than us, so we can insert before it
850 */
851 search_group = (grrr_group_t)queue_first(&rq->sorted_group_list);
0a7de745 852 while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
6d2010ae
A
853 if (search_group->weight < group->weight) {
854 /* we should be before this */
855 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
856 break;
0a7de745
A
857 }
858 if (search_group->weight == group->weight) {
6d2010ae
A
859 /* Use group index as a tie breaker */
860 if (search_group->index < group->index) {
861 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
862 break;
863 }
864 }
39037602 865
6d2010ae
A
866 /* otherwise, our weight is too small, keep going */
867 search_group = (grrr_group_t)queue_next((queue_entry_t)search_group);
868 }
39037602 869
6d2010ae
A
870 if (queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
871 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
872 } else {
873 insque((queue_entry_t)group, (queue_entry_t)search_group);
874 }
875 }
876}
877
878#endif /* defined(CONFIG_SCHED_GRRR_CORE) */