static boolean_t
grrr_enqueue(
- grrr_run_queue_t rq,
- thread_t thread);
+ grrr_run_queue_t rq,
+ thread_t thread);
static thread_t
grrr_select(
- grrr_run_queue_t rq);
+ grrr_run_queue_t rq);
static void
grrr_remove(
- grrr_run_queue_t rq,
- thread_t thread);
+ grrr_run_queue_t rq,
+ thread_t thread);
static void
grrr_sorted_list_insert_group(grrr_run_queue_t rq,
- grrr_group_t group);
+ grrr_group_t group);
static void
grrr_rescale_work(grrr_run_queue_t rq);
static void
-grrr_runqueue_init(grrr_run_queue_t runq);
+grrr_runqueue_init(grrr_run_queue_t runq);
/* Map Mach priorities to ones suitable for proportional sharing */
static grrr_proportional_priority_t grrr_priority_mapping[NRQS];
/* Map each proportional priority to its group */
static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES];
-uint32_t grrr_rescale_tick;
+uint32_t grrr_rescale_tick;
#endif /* defined(CONFIG_SCHED_GRRR_CORE) */
sched_grrr_maintenance_continuation(void);
static thread_t
-sched_grrr_choose_thread(processor_t processor,
- int priority,
- ast_t reason);
+sched_grrr_choose_thread(processor_t processor,
+ int priority,
+ ast_t reason);
static thread_t
-sched_grrr_steal_thread(processor_set_t pset);
+sched_grrr_steal_thread(processor_set_t pset);
static int
sched_grrr_compute_priority(thread_t thread);
static processor_t
-sched_grrr_choose_processor( processor_set_t pset,
- processor_t processor,
- thread_t thread);
+sched_grrr_choose_processor( processor_set_t pset,
+ processor_t processor,
+ thread_t thread);
static boolean_t
sched_grrr_processor_enqueue(
- processor_t processor,
- thread_t thread,
- integer_t options);
+ processor_t processor,
+ thread_t thread,
+ sched_options_t options);
static void
sched_grrr_processor_queue_shutdown(
- processor_t processor);
+ processor_t processor);
static boolean_t
sched_grrr_processor_queue_remove(
- processor_t processor,
- thread_t thread);
+ processor_t processor,
+ thread_t thread);
static boolean_t
-sched_grrr_processor_queue_empty(processor_t processor);
+sched_grrr_processor_queue_empty(processor_t processor);
static boolean_t
-sched_grrr_processor_queue_has_priority(processor_t processor,
- int priority,
- boolean_t gte);
+sched_grrr_processor_queue_has_priority(processor_t processor,
+ int priority,
+ boolean_t gte);
static boolean_t
sched_grrr_priority_is_urgent(int priority);
sched_grrr_initial_thread_sched_mode(task_t parent_task);
static boolean_t
-sched_grrr_can_update_priority(thread_t thread);
+sched_grrr_can_update_priority(thread_t thread);
static void
-sched_grrr_update_priority(thread_t thread);
+sched_grrr_update_priority(thread_t thread);
static void
-sched_grrr_lightweight_update_priority(thread_t thread);
+sched_grrr_lightweight_update_priority(thread_t thread);
static int
-sched_grrr_processor_runq_count(processor_t processor);
+sched_grrr_processor_runq_count(processor_t processor);
static uint64_t
sched_grrr_processor_runq_stats_count_sum(processor_t processor);
static int
-sched_grrr_processor_bound_count(processor_t processor);
+sched_grrr_processor_bound_count(processor_t processor);
static void
sched_grrr_thread_update_scan(sched_update_scan_context_t scan_context);
.pset_init = sched_grrr_pset_init,
.maintenance_continuation = sched_grrr_maintenance_continuation,
.choose_thread = sched_grrr_choose_thread,
- .steal_thread_enabled = FALSE,
+ .steal_thread_enabled = sched_steal_thread_DISABLED,
.steal_thread = sched_grrr_steal_thread,
.compute_timeshare_priority = sched_grrr_compute_priority,
.choose_processor = sched_grrr_choose_processor,
.processor_runq_stats_count_sum = sched_grrr_processor_runq_stats_count_sum,
.processor_bound_count = sched_grrr_processor_bound_count,
.thread_update_scan = sched_grrr_thread_update_scan,
- .direct_dispatch_to_idle_processors = TRUE,
.multiple_psets_enabled = TRUE,
.sched_groups_enabled = FALSE,
.avoid_processor_enabled = FALSE,
.check_spill = sched_check_spill,
.ipi_policy = sched_ipi_policy,
.thread_should_yield = sched_thread_should_yield,
+ .run_count_incr = sched_run_incr,
+ .run_count_decr = sched_run_decr,
+ .update_thread_bucket = sched_update_thread_bucket,
+ .pset_made_schedulable = sched_pset_made_schedulable,
};
-extern int max_unsafe_quanta;
+extern int max_unsafe_quanta;
static uint32_t grrr_quantum_us;
static uint32_t grrr_quantum;
-static uint64_t sched_grrr_tick_deadline;
+static uint64_t sched_grrr_tick_deadline;
static void
sched_grrr_init(void)
{
- if (default_preemption_rate < 1)
+ if (default_preemption_rate < 1) {
default_preemption_rate = 100;
+ }
grrr_quantum_us = (1000 * 1000) / default_preemption_rate;
printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us);
static void
sched_grrr_timebase_init(void)
{
- uint64_t abstime;
+ uint64_t abstime;
/* standard timeslicing quantum */
clock_interval_to_absolutetime_interval(
- grrr_quantum_us, NSEC_PER_USEC, &abstime);
+ grrr_quantum_us, NSEC_PER_USEC, &abstime);
assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
grrr_quantum = (uint32_t)abstime;
max_unsafe_computation = max_unsafe_quanta * grrr_quantum;
sched_safe_duration = 2 * max_unsafe_quanta * grrr_quantum;
-
}
static void
static void
sched_grrr_maintenance_continuation(void)
{
- uint64_t abstime = mach_absolute_time();
+ uint64_t abstime = mach_absolute_time();
grrr_rescale_tick++;
*/
compute_averages(1);
- if (sched_grrr_tick_deadline == 0)
+ if (sched_grrr_tick_deadline == 0) {
sched_grrr_tick_deadline = abstime;
+ }
- clock_deadline_for_periodic_event(10*sched_one_second_interval, abstime,
- &sched_grrr_tick_deadline);
+ clock_deadline_for_periodic_event(10 * sched_one_second_interval, abstime,
+ &sched_grrr_tick_deadline);
assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline);
thread_block((thread_continue_t)sched_grrr_maintenance_continuation);
}
static thread_t
-sched_grrr_choose_thread(processor_t processor,
- int priority __unused,
- ast_t reason __unused)
+sched_grrr_choose_thread(processor_t processor,
+ int priority __unused,
+ ast_t reason __unused)
{
- grrr_run_queue_t rq = &processor->grrr_runq;
+ grrr_run_queue_t rq = &processor->grrr_runq;
- return grrr_select(rq);
+ return grrr_select(rq);
}
static thread_t
-sched_grrr_steal_thread(processor_set_t pset)
+sched_grrr_steal_thread(processor_set_t pset)
{
pset_unlock(pset);
}
static processor_t
-sched_grrr_choose_processor( processor_set_t pset,
- processor_t processor,
- thread_t thread)
+sched_grrr_choose_processor( processor_set_t pset,
+ processor_t processor,
+ thread_t thread)
{
return choose_processor(pset, processor, thread);
}
static boolean_t
sched_grrr_processor_enqueue(
- processor_t processor,
- thread_t thread,
- integer_t options __unused)
+ processor_t processor,
+ thread_t thread,
+ sched_options_t options __unused)
{
- grrr_run_queue_t rq = &processor->grrr_runq;
- boolean_t result;
+ grrr_run_queue_t rq = &processor->grrr_runq;
+ boolean_t result;
result = grrr_enqueue(rq, thread);
static void
sched_grrr_processor_queue_shutdown(
- processor_t processor)
+ processor_t processor)
{
- processor_set_t pset = processor->processor_set;
- thread_t thread;
- queue_head_t tqueue, bqueue;
+ processor_set_t pset = processor->processor_set;
+ thread_t thread;
+ queue_head_t tqueue, bqueue;
queue_init(&tqueue);
queue_init(&bqueue);
static boolean_t
sched_grrr_processor_queue_remove(
- processor_t processor,
- thread_t thread)
+ processor_t processor,
+ thread_t thread)
{
processor_set_t pset = processor->processor_set;
* Thread is on a run queue and we have a lock on
* that run queue.
*/
- grrr_run_queue_t rq = &processor->grrr_runq;
+ grrr_run_queue_t rq = &processor->grrr_runq;
grrr_remove(rq, thread);
} else {
pset_unlock(pset);
- return (processor != PROCESSOR_NULL);
+ return processor != PROCESSOR_NULL;
}
static boolean_t
-sched_grrr_processor_queue_empty(processor_t processor __unused)
+sched_grrr_processor_queue_empty(processor_t processor __unused)
{
boolean_t result;
}
static boolean_t
-sched_grrr_processor_queue_has_priority(processor_t processor,
- int priority,
- boolean_t gte __unused)
+sched_grrr_processor_queue_has_priority(processor_t processor,
+ int priority,
+ boolean_t gte __unused)
{
- grrr_run_queue_t rq = &processor->grrr_runq;
- unsigned int i;
+ grrr_run_queue_t rq = &processor->grrr_runq;
+ unsigned int i;
i = grrr_group_mapping[grrr_priority_mapping[priority]];
- for ( ; i < NUM_GRRR_GROUPS; i++) {
- if (rq->groups[i].count > 0)
+ for (; i < NUM_GRRR_GROUPS; i++) {
+ if (rq->groups[i].count > 0) {
return TRUE;
+ }
}
return FALSE;
static boolean_t
sched_grrr_priority_is_urgent(int priority)
{
- if (priority <= BASEPRI_FOREGROUND)
+ if (priority <= BASEPRI_FOREGROUND) {
return FALSE;
+ }
- if (priority < MINPRI_KERNEL)
+ if (priority < MINPRI_KERNEL) {
return TRUE;
+ }
- if (priority >= BASEPRI_PREEMPT)
+ if (priority >= BASEPRI_PREEMPT) {
return TRUE;
+ }
return FALSE;
}
static ast_t
sched_grrr_processor_csw_check(processor_t processor)
{
- int count;
+ int count;
count = sched_grrr_processor_runq_count(processor);
- if (count > 0)
+ if (count > 0) {
return AST_PREEMPT;
+ }
return AST_NONE;
}
static sched_mode_t
sched_grrr_initial_thread_sched_mode(task_t parent_task)
{
- if (parent_task == kernel_task)
+ if (parent_task == kernel_task) {
return TH_MODE_FIXED;
- else
+ } else {
return TH_MODE_TIMESHARE;
+ }
}
static boolean_t
-sched_grrr_can_update_priority(thread_t thread __unused)
+sched_grrr_can_update_priority(thread_t thread __unused)
{
return FALSE;
}
static void
-sched_grrr_update_priority(thread_t thread __unused)
+sched_grrr_update_priority(thread_t thread __unused)
{
return;
}
static void
-sched_grrr_lightweight_update_priority(thread_t thread __unused)
+sched_grrr_lightweight_update_priority(thread_t thread __unused)
{
return;
}
static int
-sched_grrr_processor_runq_count(processor_t processor)
+sched_grrr_processor_runq_count(processor_t processor)
{
return processor->grrr_runq.count;
}
static uint64_t
-sched_grrr_processor_runq_stats_count_sum(processor_t processor)
+sched_grrr_processor_runq_stats_count_sum(processor_t processor)
{
return processor->grrr_runq.runq_stats.count_sum;
}
static int
-sched_grrr_processor_bound_count(__unused processor_t processor)
+sched_grrr_processor_bound_count(__unused processor_t processor)
{
return 0;
}
unsigned int i;
/* Map 0->0 up to 10->20 */
- for (i=0; i <= 10; i++) {
- grrr_priority_mapping[i] = 2*i;
+ for (i = 0; i <= 10; i++) {
+ grrr_priority_mapping[i] = 2 * i;
}
/* Map user priorities 11->33 up to 51 -> 153 */
- for (i=11; i <= 51; i++) {
- grrr_priority_mapping[i] = 3*i;
+ for (i = 11; i <= 51; i++) {
+ grrr_priority_mapping[i] = 3 * i;
}
/* Map high priorities 52->180 up to 127->255 */
- for (i=52; i <= 127; i++) {
+ for (i = 52; i <= 127; i++) {
grrr_priority_mapping[i] = 128 + i;
}
for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) {
-
#if 0
unsigned j, k;
/* Calculate log(i); */
- for (j=0, k=1; k <= i; j++, k *= 2);
+ for (j = 0, k = 1; k <= i; j++, k *= 2) {
+ ;
+ }
#endif
/* Groups of 4 */
thread = grrr_intragroup_schedule(group);
- if ((group->work >= (UINT32_MAX-256)) || (rq->last_rescale_tick != grrr_rescale_tick)) {
+ if ((group->work >= (UINT32_MAX - 256)) || (rq->last_rescale_tick != grrr_rescale_tick)) {
grrr_rescale_work(rq);
}
group->work++;
}
static void
-grrr_runqueue_init(grrr_run_queue_t runq)
+grrr_runqueue_init(grrr_run_queue_t runq)
{
grrr_group_index_t index;
unsigned int prisearch;
for (prisearch = 0;
- prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES;
- prisearch++) {
+ prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES;
+ prisearch++) {
if (grrr_group_mapping[prisearch] == index) {
runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch;
break;
static boolean_t
grrr_enqueue(
- grrr_run_queue_t rq,
- thread_t thread)
+ grrr_run_queue_t rq,
+ thread_t thread)
{
- grrr_proportional_priority_t gpriority;
- grrr_group_index_t gindex;
- grrr_group_t group;
+ grrr_proportional_priority_t gpriority;
+ grrr_group_index_t gindex;
+ grrr_group_t group;
gpriority = grrr_priority_mapping[thread->sched_pri];
gindex = grrr_group_mapping[gpriority];
} else {
/* Insert before the current client */
if (group->current_client == THREAD_NULL ||
- queue_first(&group->clients) == (queue_entry_t)group->current_client) {
+ queue_first(&group->clients) == (queue_entry_t)group->current_client) {
enqueue_head(&group->clients, (queue_entry_t)thread);
} else {
insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client));
}
static thread_t
-grrr_select(grrr_run_queue_t rq)
+grrr_select(grrr_run_queue_t rq)
{
- thread_t thread;
+ thread_t thread;
thread = grrr_intergroup_schedule(rq);
if (thread != THREAD_NULL) {
- grrr_proportional_priority_t gpriority;
- grrr_group_index_t gindex;
- grrr_group_t group;
+ grrr_proportional_priority_t gpriority;
+ grrr_group_index_t gindex;
+ grrr_group_t group;
gpriority = grrr_priority_mapping[thread->sched_pri];
gindex = grrr_group_mapping[gpriority];
static void
grrr_remove(
- grrr_run_queue_t rq,
- thread_t thread)
+ grrr_run_queue_t rq,
+ thread_t thread)
{
- grrr_proportional_priority_t gpriority;
- grrr_group_index_t gindex;
- grrr_group_t group;
+ grrr_proportional_priority_t gpriority;
+ grrr_group_index_t gindex;
+ grrr_group_t group;
gpriority = grrr_priority_mapping[thread->sched_pri];
gindex = grrr_group_mapping[gpriority];
static void
grrr_sorted_list_insert_group(grrr_run_queue_t rq,
- grrr_group_t group)
+ grrr_group_t group)
{
/* Simple insertion sort */
if (queue_empty(&rq->sorted_group_list)) {
* element less than us, so we can insert before it
*/
search_group = (grrr_group_t)queue_first(&rq->sorted_group_list);
- while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group) ) {
-
+ while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
if (search_group->weight < group->weight) {
/* we should be before this */
search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
break;
- } if (search_group->weight == group->weight) {
+ }
+ if (search_group->weight == group->weight) {
/* Use group index as a tie breaker */
if (search_group->index < group->index) {
search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);