]>
Commit | Line | Data |
---|---|---|
6d2010ae A |
1 | /* |
2 | * Copyright (c) 2009 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <mach/mach_types.h> | |
30 | #include <mach/machine.h> | |
31 | #include <mach/policy.h> | |
32 | #include <mach/sync_policy.h> | |
33 | #include <mach/thread_act.h> | |
34 | ||
35 | #include <machine/machine_routines.h> | |
36 | #include <machine/sched_param.h> | |
37 | #include <machine/machine_cpu.h> | |
38 | ||
39 | #include <kern/kern_types.h> | |
40 | #include <kern/clock.h> | |
41 | #include <kern/counters.h> | |
42 | #include <kern/cpu_number.h> | |
43 | #include <kern/cpu_data.h> | |
44 | #include <kern/debug.h> | |
45 | #include <kern/lock.h> | |
46 | #include <kern/macro_help.h> | |
47 | #include <kern/machine.h> | |
48 | #include <kern/misc_protos.h> | |
49 | #include <kern/processor.h> | |
50 | #include <kern/queue.h> | |
51 | #include <kern/sched.h> | |
52 | #include <kern/sched_prim.h> | |
53 | #include <kern/syscall_subr.h> | |
54 | #include <kern/task.h> | |
55 | #include <kern/thread.h> | |
56 | #include <kern/wait_queue.h> | |
57 | ||
58 | #include <vm/pmap.h> | |
59 | #include <vm/vm_kern.h> | |
60 | #include <vm/vm_map.h> | |
61 | ||
62 | #include <mach/sdt.h> | |
63 | ||
64 | #include <sys/kdebug.h> | |
65 | ||
66 | #if defined(CONFIG_SCHED_GRRR_CORE) | |
67 | ||
68 | static void | |
69 | grrr_priority_mapping_init(void); | |
70 | ||
71 | static boolean_t | |
72 | grrr_enqueue( | |
73 | grrr_run_queue_t rq, | |
74 | thread_t thread); | |
75 | ||
76 | static thread_t | |
77 | grrr_select( | |
78 | grrr_run_queue_t rq); | |
79 | ||
80 | static void | |
81 | grrr_remove( | |
82 | grrr_run_queue_t rq, | |
83 | thread_t thread); | |
84 | ||
85 | ||
86 | static void | |
87 | grrr_sorted_list_insert_group(grrr_run_queue_t rq, | |
88 | grrr_group_t group); | |
89 | ||
90 | static void | |
91 | grrr_rescale_work(grrr_run_queue_t rq); | |
92 | ||
93 | static void | |
94 | grrr_runqueue_init(grrr_run_queue_t runq); | |
95 | ||
96 | /* Map Mach priorities to ones suitable for proportional sharing */ | |
97 | static grrr_proportional_priority_t grrr_priority_mapping[NRQS]; | |
98 | ||
99 | /* Map each proportional priority to its group */ | |
100 | static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES]; | |
101 | ||
102 | uint32_t grrr_rescale_tick; | |
103 | ||
104 | #endif /* defined(CONFIG_SCHED_GRRR_CORE) */ | |
105 | ||
106 | #if defined(CONFIG_SCHED_GRRR) | |
107 | ||
108 | static void | |
109 | sched_grrr_init(void); | |
110 | ||
111 | static void | |
112 | sched_grrr_timebase_init(void); | |
113 | ||
114 | static void | |
115 | sched_grrr_processor_init(processor_t processor); | |
116 | ||
117 | static void | |
118 | sched_grrr_pset_init(processor_set_t pset); | |
119 | ||
120 | static void | |
121 | sched_grrr_maintenance_continuation(void); | |
122 | ||
123 | static thread_t | |
124 | sched_grrr_choose_thread(processor_t processor, | |
125 | int priority); | |
126 | ||
127 | static thread_t | |
128 | sched_grrr_steal_thread(processor_set_t pset); | |
129 | ||
130 | static void | |
131 | sched_grrr_compute_priority(thread_t thread, | |
132 | boolean_t override_depress); | |
133 | ||
134 | static processor_t | |
135 | sched_grrr_choose_processor( processor_set_t pset, | |
136 | processor_t processor, | |
137 | thread_t thread); | |
138 | ||
139 | static boolean_t | |
140 | sched_grrr_processor_enqueue( | |
141 | processor_t processor, | |
142 | thread_t thread, | |
143 | integer_t options); | |
144 | ||
145 | static void | |
146 | sched_grrr_processor_queue_shutdown( | |
147 | processor_t processor); | |
148 | ||
149 | static boolean_t | |
150 | sched_grrr_processor_queue_remove( | |
151 | processor_t processor, | |
152 | thread_t thread); | |
153 | ||
154 | static boolean_t | |
155 | sched_grrr_processor_queue_empty(processor_t processor); | |
156 | ||
157 | static boolean_t | |
158 | sched_grrr_processor_queue_has_priority(processor_t processor, | |
159 | int priority, | |
160 | boolean_t gte); | |
161 | ||
162 | static boolean_t | |
163 | sched_grrr_priority_is_urgent(int priority); | |
164 | ||
165 | static ast_t | |
166 | sched_grrr_processor_csw_check(processor_t processor); | |
167 | ||
168 | static uint32_t | |
169 | sched_grrr_initial_quantum_size(thread_t thread); | |
170 | ||
171 | static sched_mode_t | |
172 | sched_grrr_initial_thread_sched_mode(task_t parent_task); | |
173 | ||
174 | static boolean_t | |
175 | sched_grrr_supports_timeshare_mode(void); | |
176 | ||
177 | static boolean_t | |
178 | sched_grrr_can_update_priority(thread_t thread); | |
179 | ||
180 | static void | |
181 | sched_grrr_update_priority(thread_t thread); | |
182 | ||
183 | static void | |
184 | sched_grrr_lightweight_update_priority(thread_t thread); | |
185 | ||
186 | static void | |
187 | sched_grrr_quantum_expire(thread_t thread); | |
188 | ||
189 | static boolean_t | |
190 | sched_grrr_should_current_thread_rechoose_processor(processor_t processor); | |
191 | ||
192 | static int | |
193 | sched_grrr_processor_runq_count(processor_t processor); | |
194 | ||
195 | static uint64_t | |
196 | sched_grrr_processor_runq_stats_count_sum(processor_t processor); | |
197 | ||
198 | const struct sched_dispatch_table sched_grrr_dispatch = { | |
199 | sched_grrr_init, | |
200 | sched_grrr_timebase_init, | |
201 | sched_grrr_processor_init, | |
202 | sched_grrr_pset_init, | |
203 | sched_grrr_maintenance_continuation, | |
204 | sched_grrr_choose_thread, | |
205 | sched_grrr_steal_thread, | |
206 | sched_grrr_compute_priority, | |
207 | sched_grrr_choose_processor, | |
208 | sched_grrr_processor_enqueue, | |
209 | sched_grrr_processor_queue_shutdown, | |
210 | sched_grrr_processor_queue_remove, | |
211 | sched_grrr_processor_queue_empty, | |
212 | sched_grrr_priority_is_urgent, | |
213 | sched_grrr_processor_csw_check, | |
214 | sched_grrr_processor_queue_has_priority, | |
215 | sched_grrr_initial_quantum_size, | |
216 | sched_grrr_initial_thread_sched_mode, | |
217 | sched_grrr_supports_timeshare_mode, | |
218 | sched_grrr_can_update_priority, | |
219 | sched_grrr_update_priority, | |
220 | sched_grrr_lightweight_update_priority, | |
221 | sched_grrr_quantum_expire, | |
222 | sched_grrr_should_current_thread_rechoose_processor, | |
223 | sched_grrr_processor_runq_count, | |
224 | sched_grrr_processor_runq_stats_count_sum, | |
225 | sched_grrr_fairshare_init, | |
226 | sched_grrr_fairshare_runq_count, | |
227 | sched_grrr_fairshare_runq_stats_count_sum, | |
228 | sched_grrr_fairshare_enqueue, | |
229 | sched_grrr_fairshare_dequeue, | |
230 | sched_grrr_fairshare_queue_remove, | |
231 | TRUE /* direct_dispatch_to_idle_processors */ | |
232 | }; | |
233 | ||
6d2010ae A |
234 | extern int max_unsafe_quanta; |
235 | ||
236 | static uint32_t grrr_quantum_us; | |
237 | static uint32_t grrr_quantum; | |
238 | ||
239 | static uint64_t sched_grrr_tick_deadline; | |
240 | ||
241 | static void | |
242 | sched_grrr_init(void) | |
243 | { | |
244 | if (default_preemption_rate < 1) | |
245 | default_preemption_rate = 100; | |
246 | grrr_quantum_us = (1000 * 1000) / default_preemption_rate; | |
247 | ||
248 | printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us); | |
249 | ||
250 | grrr_priority_mapping_init(); | |
251 | } | |
252 | ||
253 | static void | |
254 | sched_grrr_timebase_init(void) | |
255 | { | |
256 | uint64_t abstime; | |
257 | ||
258 | /* standard timeslicing quantum */ | |
259 | clock_interval_to_absolutetime_interval( | |
260 | grrr_quantum_us, NSEC_PER_USEC, &abstime); | |
261 | assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); | |
262 | grrr_quantum = (uint32_t)abstime; | |
263 | ||
264 | thread_depress_time = 1 * grrr_quantum; | |
265 | default_timeshare_computation = grrr_quantum / 2; | |
266 | default_timeshare_constraint = grrr_quantum; | |
267 | ||
268 | max_unsafe_computation = max_unsafe_quanta * grrr_quantum; | |
269 | sched_safe_duration = 2 * max_unsafe_quanta * grrr_quantum; | |
270 | ||
271 | } | |
272 | ||
273 | static void | |
274 | sched_grrr_processor_init(processor_t processor) | |
275 | { | |
276 | grrr_runqueue_init(&processor->grrr_runq); | |
277 | } | |
278 | ||
279 | static void | |
280 | sched_grrr_pset_init(processor_set_t pset __unused) | |
281 | { | |
282 | } | |
283 | ||
284 | static void | |
285 | sched_grrr_maintenance_continuation(void) | |
286 | { | |
287 | uint64_t abstime = mach_absolute_time(); | |
288 | ||
289 | grrr_rescale_tick++; | |
290 | ||
291 | /* | |
292 | * Compute various averages. | |
293 | */ | |
39236c6e | 294 | compute_averages(1); |
6d2010ae A |
295 | |
296 | if (sched_grrr_tick_deadline == 0) | |
297 | sched_grrr_tick_deadline = abstime; | |
298 | ||
299 | clock_deadline_for_periodic_event(10*sched_one_second_interval, abstime, | |
300 | &sched_grrr_tick_deadline); | |
301 | ||
302 | assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline); | |
303 | thread_block((thread_continue_t)sched_grrr_maintenance_continuation); | |
304 | /*NOTREACHED*/ | |
305 | } | |
306 | ||
307 | ||
308 | static thread_t | |
309 | sched_grrr_choose_thread(processor_t processor, | |
310 | int priority __unused) | |
311 | { | |
312 | grrr_run_queue_t rq = &processor->grrr_runq; | |
313 | ||
314 | return grrr_select(rq); | |
315 | } | |
316 | ||
317 | static thread_t | |
318 | sched_grrr_steal_thread(processor_set_t pset) | |
319 | { | |
320 | pset_unlock(pset); | |
321 | ||
322 | return (THREAD_NULL); | |
323 | ||
324 | } | |
325 | ||
326 | static void | |
327 | sched_grrr_compute_priority(thread_t thread, | |
328 | boolean_t override_depress __unused) | |
329 | { | |
330 | set_sched_pri(thread, thread->priority); | |
331 | } | |
332 | ||
333 | static processor_t | |
334 | sched_grrr_choose_processor( processor_set_t pset, | |
335 | processor_t processor, | |
336 | thread_t thread) | |
337 | { | |
338 | return choose_processor(pset, processor, thread); | |
339 | } | |
340 | ||
341 | static boolean_t | |
342 | sched_grrr_processor_enqueue( | |
343 | processor_t processor, | |
344 | thread_t thread, | |
345 | integer_t options __unused) | |
346 | { | |
347 | grrr_run_queue_t rq = &processor->grrr_runq; | |
348 | boolean_t result; | |
349 | ||
350 | result = grrr_enqueue(rq, thread); | |
351 | ||
352 | thread->runq = processor; | |
353 | ||
354 | return result; | |
355 | } | |
356 | ||
357 | static void | |
358 | sched_grrr_processor_queue_shutdown( | |
359 | processor_t processor) | |
360 | { | |
361 | processor_set_t pset = processor->processor_set; | |
362 | thread_t thread; | |
363 | queue_head_t tqueue, bqueue; | |
364 | ||
365 | queue_init(&tqueue); | |
366 | queue_init(&bqueue); | |
367 | ||
368 | while ((thread = sched_grrr_choose_thread(processor, IDLEPRI)) != THREAD_NULL) { | |
369 | if (thread->bound_processor == PROCESSOR_NULL) { | |
370 | enqueue_tail(&tqueue, (queue_entry_t)thread); | |
371 | } else { | |
372 | enqueue_tail(&bqueue, (queue_entry_t)thread); | |
373 | } | |
374 | } | |
375 | ||
39236c6e | 376 | while ((thread = (thread_t)(void *)dequeue_head(&bqueue)) != THREAD_NULL) { |
6d2010ae A |
377 | sched_grrr_processor_enqueue(processor, thread, SCHED_TAILQ); |
378 | } | |
379 | ||
380 | pset_unlock(pset); | |
381 | ||
39236c6e | 382 | while ((thread = (thread_t)(void *)dequeue_head(&tqueue)) != THREAD_NULL) { |
6d2010ae A |
383 | thread_lock(thread); |
384 | ||
385 | thread_setrun(thread, SCHED_TAILQ); | |
386 | ||
387 | thread_unlock(thread); | |
388 | } | |
389 | } | |
390 | ||
391 | static boolean_t | |
392 | sched_grrr_processor_queue_remove( | |
393 | processor_t processor, | |
394 | thread_t thread) | |
395 | { | |
396 | void * rqlock; | |
397 | ||
398 | rqlock = &processor->processor_set->sched_lock; | |
399 | simple_lock(rqlock); | |
400 | ||
401 | if (processor == thread->runq) { | |
402 | /* | |
403 | * Thread is on a run queue and we have a lock on | |
404 | * that run queue. | |
405 | */ | |
406 | grrr_run_queue_t rq = &processor->grrr_runq; | |
407 | ||
408 | grrr_remove(rq, thread); | |
409 | } else { | |
410 | /* | |
411 | * The thread left the run queue before we could | |
412 | * lock the run queue. | |
413 | */ | |
414 | assert(thread->runq == PROCESSOR_NULL); | |
415 | processor = PROCESSOR_NULL; | |
416 | } | |
417 | ||
418 | simple_unlock(rqlock); | |
419 | ||
420 | return (processor != PROCESSOR_NULL); | |
421 | } | |
422 | ||
423 | static boolean_t | |
424 | sched_grrr_processor_queue_empty(processor_t processor __unused) | |
425 | { | |
426 | boolean_t result; | |
427 | ||
428 | result = (processor->grrr_runq.count == 0); | |
429 | ||
430 | return result; | |
431 | } | |
432 | ||
433 | static boolean_t | |
434 | sched_grrr_processor_queue_has_priority(processor_t processor, | |
435 | int priority, | |
436 | boolean_t gte __unused) | |
437 | { | |
438 | grrr_run_queue_t rq = &processor->grrr_runq; | |
439 | unsigned int i; | |
440 | ||
441 | i = grrr_group_mapping[grrr_priority_mapping[priority]]; | |
442 | for ( ; i < NUM_GRRR_GROUPS; i++) { | |
443 | if (rq->groups[i].count > 0) | |
444 | return (TRUE); | |
445 | } | |
446 | ||
447 | return (FALSE); | |
448 | } | |
449 | ||
450 | /* Implement sched_preempt_pri in code */ | |
451 | static boolean_t | |
452 | sched_grrr_priority_is_urgent(int priority) | |
453 | { | |
454 | if (priority <= BASEPRI_FOREGROUND) | |
455 | return FALSE; | |
456 | ||
457 | if (priority < MINPRI_KERNEL) | |
458 | return TRUE; | |
459 | ||
460 | if (priority >= BASEPRI_PREEMPT) | |
461 | return TRUE; | |
462 | ||
463 | return FALSE; | |
464 | } | |
465 | ||
466 | static ast_t | |
467 | sched_grrr_processor_csw_check(processor_t processor) | |
468 | { | |
469 | int count; | |
470 | ||
471 | count = sched_grrr_processor_runq_count(processor); | |
472 | ||
473 | if (count > 0) { | |
474 | ||
475 | return AST_PREEMPT; | |
476 | } | |
477 | ||
478 | return AST_NONE; | |
479 | } | |
480 | ||
481 | static uint32_t | |
482 | sched_grrr_initial_quantum_size(thread_t thread __unused) | |
483 | { | |
484 | return grrr_quantum; | |
485 | } | |
486 | ||
487 | static sched_mode_t | |
488 | sched_grrr_initial_thread_sched_mode(task_t parent_task) | |
489 | { | |
490 | if (parent_task == kernel_task) | |
491 | return TH_MODE_FIXED; | |
492 | else | |
493 | return TH_MODE_TIMESHARE; | |
494 | } | |
495 | ||
496 | static boolean_t | |
497 | sched_grrr_supports_timeshare_mode(void) | |
498 | { | |
499 | return TRUE; | |
500 | } | |
501 | ||
502 | static boolean_t | |
503 | sched_grrr_can_update_priority(thread_t thread __unused) | |
504 | { | |
505 | return FALSE; | |
506 | } | |
507 | ||
508 | static void | |
509 | sched_grrr_update_priority(thread_t thread __unused) | |
510 | { | |
511 | ||
512 | } | |
513 | ||
514 | static void | |
515 | sched_grrr_lightweight_update_priority(thread_t thread __unused) | |
516 | { | |
517 | return; | |
518 | } | |
519 | ||
520 | static void | |
521 | sched_grrr_quantum_expire( | |
522 | thread_t thread __unused) | |
523 | { | |
524 | } | |
525 | ||
526 | ||
527 | static boolean_t | |
528 | sched_grrr_should_current_thread_rechoose_processor(processor_t processor __unused) | |
529 | { | |
530 | return (TRUE); | |
531 | } | |
532 | ||
533 | static int | |
534 | sched_grrr_processor_runq_count(processor_t processor) | |
535 | { | |
536 | return processor->grrr_runq.count; | |
537 | } | |
538 | ||
539 | static uint64_t | |
540 | sched_grrr_processor_runq_stats_count_sum(processor_t processor) | |
541 | { | |
542 | return processor->grrr_runq.runq_stats.count_sum; | |
543 | } | |
544 | ||
545 | #endif /* defined(CONFIG_SCHED_GRRR) */ | |
546 | ||
547 | #if defined(CONFIG_SCHED_GRRR_CORE) | |
548 | ||
549 | static void | |
550 | grrr_priority_mapping_init(void) | |
551 | { | |
552 | unsigned int i; | |
553 | ||
554 | /* Map 0->0 up to 10->20 */ | |
555 | for (i=0; i <= 10; i++) { | |
556 | grrr_priority_mapping[i] = 2*i; | |
557 | } | |
558 | ||
559 | /* Map user priorities 11->33 up to 51 -> 153 */ | |
560 | for (i=11; i <= 51; i++) { | |
561 | grrr_priority_mapping[i] = 3*i; | |
562 | } | |
563 | ||
564 | /* Map high priorities 52->180 up to 127->255 */ | |
565 | for (i=52; i <= 127; i++) { | |
566 | grrr_priority_mapping[i] = 128 + i; | |
567 | } | |
568 | ||
569 | for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) { | |
570 | ||
571 | #if 0 | |
572 | unsigned j, k; | |
573 | /* Calculate log(i); */ | |
574 | for (j=0, k=1; k <= i; j++, k *= 2); | |
575 | #endif | |
576 | ||
577 | /* Groups of 4 */ | |
578 | grrr_group_mapping[i] = i >> 2; | |
579 | } | |
580 | ||
581 | } | |
582 | ||
583 | static thread_t | |
584 | grrr_intragroup_schedule(grrr_group_t group) | |
585 | { | |
586 | thread_t thread; | |
587 | ||
588 | if (group->count == 0) { | |
589 | return THREAD_NULL; | |
590 | } | |
591 | ||
592 | thread = group->current_client; | |
593 | if (thread == THREAD_NULL) { | |
39236c6e | 594 | thread = (thread_t)(void *)queue_first(&group->clients); |
6d2010ae A |
595 | } |
596 | ||
597 | if (1 /* deficit */) { | |
39236c6e | 598 | group->current_client = (thread_t)(void *)queue_next((queue_entry_t)thread); |
6d2010ae | 599 | if (queue_end(&group->clients, (queue_entry_t)group->current_client)) { |
39236c6e | 600 | group->current_client = (thread_t)(void *)queue_first(&group->clients); |
6d2010ae A |
601 | } |
602 | ||
603 | thread = group->current_client; | |
604 | } | |
605 | ||
606 | return thread; | |
607 | } | |
608 | ||
609 | static thread_t | |
610 | grrr_intergroup_schedule(grrr_run_queue_t rq) | |
611 | { | |
612 | thread_t thread; | |
613 | grrr_group_t group; | |
614 | ||
615 | if (rq->count == 0) { | |
616 | return THREAD_NULL; | |
617 | } | |
618 | ||
619 | group = rq->current_group; | |
620 | ||
621 | if (group == GRRR_GROUP_NULL) { | |
622 | group = (grrr_group_t)queue_first(&rq->sorted_group_list); | |
623 | } | |
624 | ||
625 | thread = grrr_intragroup_schedule(group); | |
626 | ||
627 | if ((group->work >= (UINT32_MAX-256)) || (rq->last_rescale_tick != grrr_rescale_tick)) { | |
628 | grrr_rescale_work(rq); | |
629 | } | |
630 | group->work++; | |
631 | ||
632 | if (queue_end(&rq->sorted_group_list, queue_next((queue_entry_t)group))) { | |
633 | /* last group, go back to beginning */ | |
634 | group = (grrr_group_t)queue_first(&rq->sorted_group_list); | |
635 | } else { | |
636 | grrr_group_t nextgroup = (grrr_group_t)queue_next((queue_entry_t)group); | |
637 | uint64_t orderleft, orderright; | |
638 | ||
639 | /* | |
640 | * The well-ordering condition for intergroup selection is: | |
641 | * | |
642 | * (group->work+1) / (nextgroup->work+1) > (group->weight) / (nextgroup->weight) | |
643 | * | |
644 | * Multiply both sides by their denominators to avoid division | |
645 | * | |
646 | */ | |
647 | orderleft = (group->work + 1) * ((uint64_t)nextgroup->weight); | |
648 | orderright = (nextgroup->work + 1) * ((uint64_t)group->weight); | |
649 | if (orderleft > orderright) { | |
650 | group = nextgroup; | |
651 | } else { | |
652 | group = (grrr_group_t)queue_first(&rq->sorted_group_list); | |
653 | } | |
654 | } | |
655 | ||
656 | rq->current_group = group; | |
657 | ||
658 | return thread; | |
659 | } | |
660 | ||
661 | static void | |
662 | grrr_runqueue_init(grrr_run_queue_t runq) | |
663 | { | |
664 | grrr_group_index_t index; | |
665 | ||
666 | runq->count = 0; | |
667 | ||
668 | for (index = 0; index < NUM_GRRR_GROUPS; index++) { | |
669 | unsigned int prisearch; | |
670 | ||
671 | for (prisearch = 0; | |
672 | prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES; | |
673 | prisearch++) { | |
674 | if (grrr_group_mapping[prisearch] == index) { | |
675 | runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch; | |
676 | break; | |
677 | } | |
678 | } | |
679 | ||
680 | runq->groups[index].index = index; | |
681 | ||
682 | queue_init(&runq->groups[index].clients); | |
683 | runq->groups[index].count = 0; | |
684 | runq->groups[index].weight = 0; | |
685 | runq->groups[index].work = 0; | |
686 | runq->groups[index].current_client = THREAD_NULL; | |
687 | } | |
688 | ||
689 | queue_init(&runq->sorted_group_list); | |
690 | runq->weight = 0; | |
691 | runq->current_group = GRRR_GROUP_NULL; | |
692 | } | |
693 | ||
694 | static void | |
695 | grrr_rescale_work(grrr_run_queue_t rq) | |
696 | { | |
697 | grrr_group_index_t index; | |
698 | ||
699 | /* avoid overflow by scaling by 1/8th */ | |
700 | for (index = 0; index < NUM_GRRR_GROUPS; index++) { | |
701 | rq->groups[index].work >>= 3; | |
702 | } | |
703 | ||
704 | rq->last_rescale_tick = grrr_rescale_tick; | |
705 | } | |
706 | ||
707 | static boolean_t | |
708 | grrr_enqueue( | |
709 | grrr_run_queue_t rq, | |
710 | thread_t thread) | |
711 | { | |
712 | grrr_proportional_priority_t gpriority; | |
713 | grrr_group_index_t gindex; | |
714 | grrr_group_t group; | |
715 | ||
716 | gpriority = grrr_priority_mapping[thread->sched_pri]; | |
717 | gindex = grrr_group_mapping[gpriority]; | |
718 | group = &rq->groups[gindex]; | |
719 | ||
720 | #if 0 | |
721 | thread->grrr_deficit = 0; | |
722 | #endif | |
723 | ||
724 | if (group->count == 0) { | |
725 | /* Empty group, this is the first client */ | |
726 | enqueue_tail(&group->clients, (queue_entry_t)thread); | |
727 | group->count = 1; | |
728 | group->weight = gpriority; | |
729 | group->current_client = thread; | |
730 | } else { | |
731 | /* Insert before the current client */ | |
732 | if (group->current_client == THREAD_NULL || | |
733 | queue_first(&group->clients) == (queue_entry_t)group->current_client) { | |
734 | enqueue_head(&group->clients, (queue_entry_t)thread); | |
735 | } else { | |
736 | insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client)); | |
737 | } | |
738 | SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); | |
739 | group->count++; | |
740 | group->weight += gpriority; | |
741 | ||
742 | /* Since there was already a client, this is on the per-processor sorted list already */ | |
743 | remqueue((queue_entry_t)group); | |
744 | } | |
745 | ||
746 | grrr_sorted_list_insert_group(rq, group); | |
747 | ||
748 | rq->count++; | |
749 | rq->weight += gpriority; | |
750 | ||
751 | return (FALSE); | |
752 | } | |
753 | ||
754 | static thread_t | |
755 | grrr_select(grrr_run_queue_t rq) | |
756 | { | |
757 | thread_t thread; | |
758 | ||
759 | thread = grrr_intergroup_schedule(rq); | |
760 | if (thread != THREAD_NULL) { | |
761 | grrr_proportional_priority_t gpriority; | |
762 | grrr_group_index_t gindex; | |
763 | grrr_group_t group; | |
764 | ||
765 | gpriority = grrr_priority_mapping[thread->sched_pri]; | |
766 | gindex = grrr_group_mapping[gpriority]; | |
767 | group = &rq->groups[gindex]; | |
768 | ||
769 | remqueue((queue_entry_t)thread); | |
770 | SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); | |
771 | group->count--; | |
772 | group->weight -= gpriority; | |
773 | if (group->current_client == thread) { | |
774 | group->current_client = THREAD_NULL; | |
775 | } | |
776 | ||
777 | remqueue((queue_entry_t)group); | |
778 | if (group->count == 0) { | |
779 | if (rq->current_group == group) { | |
780 | rq->current_group = GRRR_GROUP_NULL; | |
781 | } | |
782 | } else { | |
783 | /* Need to re-insert in sorted location */ | |
784 | grrr_sorted_list_insert_group(rq, group); | |
785 | } | |
786 | ||
787 | rq->count--; | |
788 | rq->weight -= gpriority; | |
789 | ||
790 | thread->runq = PROCESSOR_NULL; | |
791 | } | |
792 | ||
793 | ||
794 | return (thread); | |
795 | } | |
796 | ||
797 | static void | |
798 | grrr_remove( | |
799 | grrr_run_queue_t rq, | |
800 | thread_t thread) | |
801 | { | |
802 | grrr_proportional_priority_t gpriority; | |
803 | grrr_group_index_t gindex; | |
804 | grrr_group_t group; | |
805 | ||
806 | gpriority = grrr_priority_mapping[thread->sched_pri]; | |
807 | gindex = grrr_group_mapping[gpriority]; | |
808 | group = &rq->groups[gindex]; | |
809 | ||
810 | remqueue((queue_entry_t)thread); | |
811 | SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); | |
812 | group->count--; | |
813 | group->weight -= gpriority; | |
814 | if (group->current_client == thread) { | |
815 | group->current_client = THREAD_NULL; | |
816 | } | |
817 | ||
818 | remqueue((queue_entry_t)group); | |
819 | if (group->count == 0) { | |
820 | if (rq->current_group == group) { | |
821 | rq->current_group = GRRR_GROUP_NULL; | |
822 | } | |
823 | } else { | |
824 | /* Need to re-insert in sorted location */ | |
825 | grrr_sorted_list_insert_group(rq, group); | |
826 | } | |
827 | ||
828 | rq->count--; | |
829 | rq->weight -= gpriority; | |
830 | ||
831 | thread->runq = PROCESSOR_NULL; | |
832 | } | |
833 | ||
834 | static void | |
835 | grrr_sorted_list_insert_group(grrr_run_queue_t rq, | |
836 | grrr_group_t group) | |
837 | { | |
838 | /* Simple insertion sort */ | |
839 | if (queue_empty(&rq->sorted_group_list)) { | |
840 | enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group); | |
841 | } else { | |
842 | grrr_group_t search_group; | |
843 | ||
844 | /* Start searching from the head (heaviest weight) for the first | |
845 | * element less than us, so we can insert before it | |
846 | */ | |
847 | search_group = (grrr_group_t)queue_first(&rq->sorted_group_list); | |
848 | while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group) ) { | |
849 | ||
850 | if (search_group->weight < group->weight) { | |
851 | /* we should be before this */ | |
852 | search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group); | |
853 | break; | |
854 | } if (search_group->weight == group->weight) { | |
855 | /* Use group index as a tie breaker */ | |
856 | if (search_group->index < group->index) { | |
857 | search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group); | |
858 | break; | |
859 | } | |
860 | } | |
861 | ||
862 | /* otherwise, our weight is too small, keep going */ | |
863 | search_group = (grrr_group_t)queue_next((queue_entry_t)search_group); | |
864 | } | |
865 | ||
866 | if (queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) { | |
867 | enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group); | |
868 | } else { | |
869 | insque((queue_entry_t)group, (queue_entry_t)search_group); | |
870 | } | |
871 | } | |
872 | } | |
873 | ||
874 | #endif /* defined(CONFIG_SCHED_GRRR_CORE) */ | |
875 | ||
876 | #if defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) | |
877 | ||
878 | static struct grrr_run_queue fs_grrr_runq; | |
879 | #define FS_GRRR_RUNQ ((processor_t)-2) | |
880 | decl_simple_lock_data(static,fs_grrr_lock); | |
881 | ||
882 | void | |
883 | sched_grrr_fairshare_init(void) | |
884 | { | |
885 | grrr_priority_mapping_init(); | |
886 | ||
887 | simple_lock_init(&fs_grrr_lock, 0); | |
888 | grrr_runqueue_init(&fs_grrr_runq); | |
889 | } | |
890 | ||
891 | ||
892 | int | |
893 | sched_grrr_fairshare_runq_count(void) | |
894 | { | |
895 | return fs_grrr_runq.count; | |
896 | } | |
897 | ||
898 | uint64_t | |
899 | sched_grrr_fairshare_runq_stats_count_sum(void) | |
900 | { | |
901 | return fs_grrr_runq.runq_stats.count_sum; | |
902 | } | |
903 | ||
904 | void | |
905 | sched_grrr_fairshare_enqueue(thread_t thread) | |
906 | { | |
907 | simple_lock(&fs_grrr_lock); | |
908 | ||
909 | (void)grrr_enqueue(&fs_grrr_runq, thread); | |
910 | ||
911 | thread->runq = FS_GRRR_RUNQ; | |
912 | ||
913 | simple_unlock(&fs_grrr_lock); | |
914 | } | |
915 | ||
916 | thread_t sched_grrr_fairshare_dequeue(void) | |
917 | { | |
918 | thread_t thread; | |
919 | ||
920 | simple_lock(&fs_grrr_lock); | |
921 | if (fs_grrr_runq.count > 0) { | |
922 | thread = grrr_select(&fs_grrr_runq); | |
923 | ||
924 | simple_unlock(&fs_grrr_lock); | |
925 | ||
926 | return (thread); | |
927 | } | |
928 | simple_unlock(&fs_grrr_lock); | |
929 | ||
930 | return THREAD_NULL; | |
931 | } | |
932 | ||
933 | boolean_t sched_grrr_fairshare_queue_remove(thread_t thread) | |
934 | { | |
935 | ||
936 | simple_lock(&fs_grrr_lock); | |
937 | ||
938 | if (FS_GRRR_RUNQ == thread->runq) { | |
939 | grrr_remove(&fs_grrr_runq, thread); | |
940 | ||
941 | simple_unlock(&fs_grrr_lock); | |
942 | return (TRUE); | |
943 | } | |
944 | else { | |
945 | /* | |
946 | * The thread left the run queue before we could | |
947 | * lock the run queue. | |
948 | */ | |
949 | assert(thread->runq == PROCESSOR_NULL); | |
950 | simple_unlock(&fs_grrr_lock); | |
951 | return (FALSE); | |
952 | } | |
953 | } | |
954 | ||
955 | #endif /* defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) */ |