X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d190cdc3f5544636abb56dc1874be391d3e1b148..cb3231590a3c94ab4375e2228bd5e86b0cf1ad7e:/osfmk/kern/priority.c?ds=sidebyside diff --git a/osfmk/kern/priority.c b/osfmk/kern/priority.c index dcf7715bd..5ac1ce756 100644 --- a/osfmk/kern/priority.c +++ b/osfmk/kern/priority.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2010 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -76,12 +76,16 @@ #include #include #include +#include +#include #ifdef CONFIG_MACH_APPROXIMATE_TIME #include /* for commpage_update_mach_approximate_time */ #endif -static void sched_update_thread_bucket(thread_t thread); +#if MONOTONIC +#include +#endif /* MONOTONIC */ /* * thread_quantum_expire: @@ -93,15 +97,13 @@ static void sched_update_thread_bucket(thread_t thread); void thread_quantum_expire( - timer_call_param_t p0, - timer_call_param_t p1) + timer_call_param_t p0, + timer_call_param_t p1) { - processor_t processor = p0; - thread_t thread = p1; - ast_t preempt; - uint64_t ctime; - int urgency; - uint64_t ignore1, ignore2; + processor_t processor = p0; + thread_t thread = p1; + ast_t preempt; + uint64_t ctime; assert(processor == current_processor()); assert(thread == current_thread()); @@ -113,22 +115,22 @@ thread_quantum_expire( /* * We bill CPU time to both the individual thread and its task. * - * Because this balance adjustment could potentially attempt to wake this very - * thread, we must credit the ledger before taking the thread lock. The ledger - * pointers are only manipulated by the thread itself at the ast boundary. + * Because this balance adjustment could potentially attempt to wake this + * very thread, we must credit the ledger before taking the thread lock. + * The ledger pointers are only manipulated by the thread itself at the ast + * boundary. * - * TODO: This fails to account for the time between when the timer was armed and when it fired. - * It should be based on the system_timer and running a thread_timer_event operation here. + * TODO: This fails to account for the time between when the timer was + * armed and when it fired. It should be based on the system_timer and + * running a timer_update operation here. */ ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining); ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining); -#ifdef CONFIG_BANK if (thread->t_bankledger) { ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time, - (thread->quantum_remaining - thread->t_deduct_bank_ledger_time)); + (thread->quantum_remaining - thread->t_deduct_bank_ledger_time)); } thread->t_deduct_bank_ledger_time = 0; -#endif ctime = mach_absolute_time(); @@ -136,6 +138,10 @@ thread_quantum_expire( commpage_update_mach_approximate_time(ctime); #endif +#if MONOTONIC + mt_sched_update(thread); +#endif /* MONOTONIC */ + thread_lock(thread); /* @@ -148,16 +154,18 @@ thread_quantum_expire( /* * Check for fail-safe trip. */ - if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) && - !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) && - !(thread->options & TH_OPT_SYSTEM_CRITICAL)) { - uint64_t new_computation; - - new_computation = ctime - thread->computation_epoch; - new_computation += thread->computation_metered; - if (new_computation > max_unsafe_computation) { - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE, - (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0); + if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) && + !(thread->sched_flags & TH_SFLAG_PROMOTED) && + !(thread->kern_promotion_schedpri != 0) && + !(thread->sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) && + !(thread->options & TH_OPT_SYSTEM_CRITICAL)) { + uint64_t new_computation; + + new_computation = ctime - thread->computation_epoch; + new_computation += thread->computation_metered; + if (new_computation > max_unsafe_computation) { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE) | DBG_FUNC_NONE, + (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0); thread->safe_release = ctime + sched_safe_duration; @@ -168,20 +176,15 @@ thread_quantum_expire( /* * Recompute scheduled priority if appropriate. */ - if (SCHED(can_update_priority)(thread)) + if (SCHED(can_update_priority)(thread)) { SCHED(update_priority)(thread); - else + } else { SCHED(lightweight_update_priority)(thread); + } - if (thread->sched_mode != TH_MODE_REALTIME) + if (thread->sched_mode != TH_MODE_REALTIME) { SCHED(quantum_expire)(thread); - - processor->current_pri = thread->sched_pri; - processor->current_thmode = thread->sched_mode; - - /* Tell platform layer that we are still running this thread */ - urgency = thread_get_urgency(thread, &ignore1, &ignore2); - machine_thread_going_on_core(thread, urgency, 0, 0); + } /* * This quantum is up, give this thread another. @@ -198,31 +201,57 @@ thread_quantum_expire( * during privilege transitions, synthesize an event now. */ if (!thread->precise_user_kernel_time) { - timer_switch(PROCESSOR_DATA(processor, current_state), - ctime, - PROCESSOR_DATA(processor, current_state)); - timer_switch(PROCESSOR_DATA(processor, thread_timer), - ctime, - PROCESSOR_DATA(processor, thread_timer)); + timer_update(PROCESSOR_DATA(processor, current_state), ctime); + timer_update(PROCESSOR_DATA(processor, thread_timer), ctime); + timer_update(&thread->runnable_timer, ctime); } + processor->quantum_end = ctime + thread->quantum_remaining; /* - * Context switch check. + * Context switch check + * + * non-urgent flags don't affect kernel threads, so upgrade to urgent + * to ensure that rebalancing and non-recommendation kick in quickly. */ - if ((preempt = csw_check(processor, AST_QUANTUM)) != AST_NONE) + + ast_t check_reason = AST_QUANTUM; + if (thread->task == kernel_task) { + check_reason |= AST_URGENT; + } + + if ((preempt = csw_check(thread, processor, check_reason)) != AST_NONE) { ast_on(preempt); + } + + /* + * AST_KEVENT does not send an IPI when setting the AST, + * to avoid waiting for the next context switch to propagate the AST, + * the AST is propagated here at quantum expiration. + */ + ast_propagate(thread); thread_unlock(thread); - timer_call_enter1(&processor->quantum_timer, thread, - processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL); + timer_call_quantum_timer_enter(&processor->quantum_timer, thread, + processor->quantum_end, ctime); + + /* Tell platform layer that we are still running this thread */ + thread_urgency_t urgency = thread_get_urgency(thread, NULL, NULL); + machine_thread_going_on_core(thread, urgency, 0, 0, ctime); + machine_switch_perfcontrol_state_update(QUANTUM_EXPIRY, ctime, + 0, thread); #if defined(CONFIG_SCHED_TIMESHARE_CORE) sched_timeshare_consider_maintenance(ctime); #endif /* CONFIG_SCHED_TIMESHARE_CORE */ +#if __arm__ || __arm64__ + if (thread->sched_mode == TH_MODE_REALTIME) { + sched_consider_recommended_cores(ctime, thread); + } +#endif /* __arm__ || __arm64__ */ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0); } @@ -241,17 +270,83 @@ void sched_set_thread_base_priority(thread_t thread, int priority) { assert(priority >= MINPRI); + uint64_t ctime = 0; - if (thread->sched_mode == TH_MODE_REALTIME) + if (thread->sched_mode == TH_MODE_REALTIME) { assert(priority <= BASEPRI_RTQUEUES); - else + } else { assert(priority < BASEPRI_RTQUEUES); + } + int old_base_pri = thread->base_pri; + thread->req_base_pri = priority; + if (thread->sched_flags & TH_SFLAG_BASE_PRI_FROZEN) { + priority = MAX(priority, old_base_pri); + } thread->base_pri = priority; - sched_update_thread_bucket(thread); + if ((thread->state & TH_RUN) == TH_RUN) { + assert(thread->last_made_runnable_time != THREAD_NOT_RUNNABLE); + ctime = mach_approximate_time(); + thread->last_basepri_change_time = ctime; + } else { + assert(thread->last_basepri_change_time == THREAD_NOT_RUNNABLE); + assert(thread->last_made_runnable_time == THREAD_NOT_RUNNABLE); + } + + /* + * Currently the perfcontrol_attr depends on the base pri of the + * thread. Therefore, we use this function as the hook for the + * perfcontrol callout. + */ + if (thread == current_thread() && old_base_pri != priority) { + if (!ctime) { + ctime = mach_approximate_time(); + } + machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE, + ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, thread); + } +#if !CONFIG_SCHED_CLUTCH + /* For the clutch scheduler, this operation is done in set_sched_pri() */ + SCHED(update_thread_bucket)(thread); +#endif /* !CONFIG_SCHED_CLUTCH */ + + thread_recompute_sched_pri(thread, SETPRI_DEFAULT); +} + +/* + * sched_set_kernel_thread_priority: + * + * Set the absolute base priority of the thread + * and reset its scheduled priority. + * + * Called with the thread unlocked. + */ +void +sched_set_kernel_thread_priority(thread_t thread, int new_priority) +{ + spl_t s = splsched(); + + thread_lock(thread); + + assert(thread->sched_mode != TH_MODE_REALTIME); + assert(thread->effective_policy.thep_qos == THREAD_QOS_UNSPECIFIED); - thread_recompute_sched_pri(thread, FALSE); + if (new_priority > thread->max_priority) { + new_priority = thread->max_priority; + } +#if CONFIG_EMBEDDED + if (new_priority < MAXPRI_THROTTLE) { + new_priority = MAXPRI_THROTTLE; + } +#endif /* CONFIG_EMBEDDED */ + + thread->importance = new_priority - thread->task_priority; + + sched_set_thread_base_priority(thread, new_priority); + + thread_unlock(thread); + splx(s); } /* @@ -261,37 +356,76 @@ sched_set_thread_base_priority(thread_t thread, int priority) * according to its base priority if the * thread has not been promoted or depressed. * - * This is the standard way to push base_pri changes into sched_pri, - * or to recalculate the appropriate sched_pri after clearing + * This is the only way to push base_pri changes into sched_pri, + * or to recalculate the appropriate sched_pri after changing * a promotion or depression. * * Called at splsched with the thread locked. + * + * TODO: Add an 'update urgency' flag to avoid urgency callouts on every rwlock operation */ void -thread_recompute_sched_pri( - thread_t thread, - boolean_t override_depress) +thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options) { - int priority; + uint32_t sched_flags = thread->sched_flags; + sched_mode_t sched_mode = thread->sched_mode; + + int priority = thread->base_pri; - if (thread->sched_mode == TH_MODE_TIMESHARE) + if (sched_mode == TH_MODE_TIMESHARE) { priority = SCHED(compute_timeshare_priority)(thread); - else - priority = thread->base_pri; + } + + if (sched_flags & TH_SFLAG_DEPRESS) { + /* thread_yield_internal overrides kernel mutex promotion */ + priority = DEPRESSPRI; + } else { + /* poll-depress is overridden by mutex promotion and promote-reasons */ + if ((sched_flags & TH_SFLAG_POLLDEPRESS)) { + priority = DEPRESSPRI; + } + + if (thread->kern_promotion_schedpri > 0) { + priority = MAX(priority, thread->kern_promotion_schedpri); + + if (sched_mode != TH_MODE_REALTIME) { + priority = MIN(priority, MAXPRI_PROMOTE); + } + } + + if (sched_flags & TH_SFLAG_PROMOTED) { + priority = MAX(priority, thread->promotion_priority); - if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) && - (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) || override_depress)) { - set_sched_pri(thread, priority); + if (sched_mode != TH_MODE_REALTIME) { + priority = MIN(priority, MAXPRI_PROMOTE); + } + } + + if (sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) { + if (sched_flags & TH_SFLAG_RW_PROMOTED) { + priority = MAX(priority, MINPRI_RWLOCK); + } + + if (sched_flags & TH_SFLAG_WAITQ_PROMOTED) { + priority = MAX(priority, MINPRI_WAITQ); + } + + if (sched_flags & TH_SFLAG_EXEC_PROMOTED) { + priority = MAX(priority, MINPRI_EXEC); + } + } } + + set_sched_pri(thread, priority, options); } void sched_default_quantum_expire(thread_t thread __unused) { - /* - * No special behavior when a timeshare, fixed, or realtime thread - * uses up its entire quantum - */ + /* + * No special behavior when a timeshare, fixed, or realtime thread + * uses up its entire quantum + */ } #if defined(CONFIG_SCHED_TIMESHARE_CORE) @@ -323,29 +457,25 @@ lightweight_update_priority(thread_t thread) * during contention for processor * resources. */ - if (thread->pri_shift < INT8_MAX) + if (thread->pri_shift < INT8_MAX) { thread->sched_usage += delta; + } thread->cpu_delta += delta; - priority = sched_compute_timeshare_priority(thread); - +#if CONFIG_SCHED_CLUTCH /* - * Adjust the scheduled priority like thread_recompute_sched_pri, - * except with the benefit of knowing the thread is on this core. + * Update the CPU usage for the thread group to which the thread belongs. + * The implementation assumes that the thread ran for the entire delta + * as part of the same thread group. */ - if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) && - (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) && - priority != thread->sched_pri) { - - thread->sched_pri = priority; - - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY), - (uintptr_t)thread_tid(thread), - thread->base_pri, - thread->sched_pri, - 0, /* eventually, 'reason' */ - 0); + sched_clutch_cpu_usage_update(thread, delta); +#endif /* CONFIG_SCHED_CLUTCH */ + + priority = sched_compute_timeshare_priority(thread); + + if (priority != thread->sched_pri) { + thread_recompute_sched_pri(thread, SETPRI_LAZY); } } } @@ -357,17 +487,40 @@ lightweight_update_priority(thread_t thread) * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the * +/- is determined by the sign of shift 2. */ -struct shift_data { - int shift1; - int shift2; -}; -#define SCHED_DECAY_TICKS 32 -static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = { - {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7}, - {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13}, - {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18}, - {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27} +const struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = { + { .shift1 = 1, .shift2 = 1 }, + { .shift1 = 1, .shift2 = 3 }, + { .shift1 = 1, .shift2 = -3 }, + { .shift1 = 2, .shift2 = -7 }, + { .shift1 = 3, .shift2 = 5 }, + { .shift1 = 3, .shift2 = -5 }, + { .shift1 = 4, .shift2 = -8 }, + { .shift1 = 5, .shift2 = 7 }, + { .shift1 = 5, .shift2 = -7 }, + { .shift1 = 6, .shift2 = -10 }, + { .shift1 = 7, .shift2 = 10 }, + { .shift1 = 7, .shift2 = -9 }, + { .shift1 = 8, .shift2 = -11 }, + { .shift1 = 9, .shift2 = 12 }, + { .shift1 = 9, .shift2 = -11 }, + { .shift1 = 10, .shift2 = -13 }, + { .shift1 = 11, .shift2 = 14 }, + { .shift1 = 11, .shift2 = -13 }, + { .shift1 = 12, .shift2 = -15 }, + { .shift1 = 13, .shift2 = 17 }, + { .shift1 = 13, .shift2 = -15 }, + { .shift1 = 14, .shift2 = -17 }, + { .shift1 = 15, .shift2 = 19 }, + { .shift1 = 16, .shift2 = 18 }, + { .shift1 = 16, .shift2 = -19 }, + { .shift1 = 17, .shift2 = 22 }, + { .shift1 = 18, .shift2 = 20 }, + { .shift1 = 18, .shift2 = -20 }, + { .shift1 = 19, .shift2 = 26 }, + { .shift1 = 20, .shift2 = 22 }, + { .shift1 = 20, .shift2 = -22 }, + { .shift1 = 21, .shift2 = -27 } }; /* @@ -378,20 +531,57 @@ static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = { extern int sched_pri_decay_band_limit; +/* Only use the decay floor logic on embedded non-clutch schedulers */ +#if CONFIG_EMBEDDED && !CONFIG_SCHED_CLUTCH + +int +sched_compute_timeshare_priority(thread_t thread) +{ + int decay_amount = (thread->sched_usage >> thread->pri_shift); + int decay_limit = sched_pri_decay_band_limit; + + if (thread->base_pri > BASEPRI_FOREGROUND) { + decay_limit += (thread->base_pri - BASEPRI_FOREGROUND); + } + + if (decay_amount > decay_limit) { + decay_amount = decay_limit; + } + + /* start with base priority */ + int priority = thread->base_pri - decay_amount; + + if (priority < MAXPRI_THROTTLE) { + if (thread->task->max_priority > MAXPRI_THROTTLE) { + priority = MAXPRI_THROTTLE; + } else if (priority < MINPRI_USER) { + priority = MINPRI_USER; + } + } else if (priority > MAXPRI_KERNEL) { + priority = MAXPRI_KERNEL; + } + + return priority; +} + +#else /* CONFIG_EMBEDDED && !CONFIG_SCHED_CLUTCH */ + int sched_compute_timeshare_priority(thread_t thread) { /* start with base priority */ int priority = thread->base_pri - (thread->sched_usage >> thread->pri_shift); - if (priority < MINPRI_USER) + if (priority < MINPRI_USER) { priority = MINPRI_USER; - else if (priority > MAXPRI_KERNEL) + } else if (priority > MAXPRI_KERNEL) { priority = MAXPRI_KERNEL; + } return priority; } +#endif /* CONFIG_EMBEDDED && !CONFIG_SCHED_CLUTCH */ /* * can_update_priority @@ -402,12 +592,13 @@ sched_compute_timeshare_priority(thread_t thread) */ boolean_t can_update_priority( - thread_t thread) + thread_t thread) { - if (sched_tick == thread->sched_stamp) - return (FALSE); - else - return (TRUE); + if (sched_tick == thread->sched_stamp) { + return FALSE; + } else { + return TRUE; + } } /* @@ -419,7 +610,7 @@ can_update_priority( */ void update_priority( - thread_t thread) + thread_t thread) { uint32_t ticks, delta; @@ -428,11 +619,10 @@ update_priority( thread->sched_stamp += ticks; - thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket]; - /* If requested, accelerate aging of sched_usage */ - if (sched_decay_usage_age_factor > 1) + if (sched_decay_usage_age_factor > 1) { ticks *= sched_decay_usage_age_factor; + } /* * Gather cpu usage data. @@ -440,28 +630,38 @@ update_priority( thread_timer_delta(thread, delta); if (ticks < SCHED_DECAY_TICKS) { /* - * Accumulate timesharing usage only - * during contention for processor - * resources. + * Accumulate timesharing usage only during contention for processor + * resources. Use the pri_shift from the previous tick window to + * determine if the system was in a contended state. */ - if (thread->pri_shift < INT8_MAX) + if (thread->pri_shift < INT8_MAX) { thread->sched_usage += delta; + } thread->cpu_usage += delta + thread->cpu_delta; thread->cpu_delta = 0; - struct shift_data *shiftp = &sched_decay_shifts[ticks]; +#if CONFIG_SCHED_CLUTCH + /* + * Update the CPU usage for the thread group to which the thread belongs. + * The implementation assumes that the thread ran for the entire delta + * as part of the same thread group. + */ + sched_clutch_cpu_usage_update(thread, delta); +#endif /* CONFIG_SCHED_CLUTCH */ + + const struct shift_data *shiftp = &sched_decay_shifts[ticks]; if (shiftp->shift2 > 0) { thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) + - (thread->cpu_usage >> shiftp->shift2); + (thread->cpu_usage >> shiftp->shift2); thread->sched_usage = (thread->sched_usage >> shiftp->shift1) + - (thread->sched_usage >> shiftp->shift2); + (thread->sched_usage >> shiftp->shift2); } else { thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) - - (thread->cpu_usage >> -(shiftp->shift2)); + (thread->cpu_usage >> -(shiftp->shift2)); thread->sched_usage = (thread->sched_usage >> shiftp->shift1) - - (thread->sched_usage >> -(shiftp->shift2)); + (thread->sched_usage >> -(shiftp->shift2)); } } else { thread->cpu_usage = thread->cpu_delta = 0; @@ -477,36 +677,22 @@ update_priority( } /* - * Recompute scheduled priority if appropriate. + * Now that the thread's CPU usage has been accumulated and aged + * based on contention of the previous tick window, update the + * pri_shift of the thread to match the current global load/shift + * values. The updated pri_shift would be used to calculate the + * new priority of the thread. */ - if (thread->sched_mode == TH_MODE_TIMESHARE) { - int priority = sched_compute_timeshare_priority(thread); - - /* - * Adjust the scheduled priority like thread_recompute_sched_pri, - * except without setting an AST. - */ - if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) && - (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) && - priority != thread->sched_pri) { - - boolean_t removed = thread_run_queue_remove(thread); - - thread->sched_pri = priority; - - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY), - (uintptr_t)thread_tid(thread), - thread->base_pri, - thread->sched_pri, - 0, /* eventually, 'reason' */ - 0); +#if CONFIG_SCHED_CLUTCH + thread->pri_shift = sched_clutch_thread_pri_shift(thread, thread->th_sched_bucket); +#else /* CONFIG_SCHED_CLUTCH */ + thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket]; +#endif /* CONFIG_SCHED_CLUTCH */ - if (removed) - thread_run_queue_reinsert(thread, SCHED_TAILQ); - } + /* Recompute scheduled priority if appropriate. */ + if (thread->sched_mode == TH_MODE_TIMESHARE) { + thread_recompute_sched_pri(thread, SETPRI_LAZY); } - - return; } #endif /* CONFIG_SCHED_TIMESHARE_CORE */ @@ -515,38 +701,40 @@ update_priority( /* * TH_BUCKET_RUN is a count of *all* runnable non-idle threads. * Each other bucket is a count of the runnable non-idle threads - * with that property. + * with that property. All updates to these counts should be + * performed with os_atomic_* operations. + * + * For the clutch scheduler, this global bucket is used only for + * keeping the total global run count. */ -volatile uint32_t sched_run_buckets[TH_BUCKET_MAX]; +uint32_t sched_run_buckets[TH_BUCKET_MAX]; static void sched_incr_bucket(sched_bucket_t bucket) { assert(bucket >= TH_BUCKET_FIXPRI && - bucket <= TH_BUCKET_SHARE_BG); + bucket <= TH_BUCKET_SHARE_BG); - hw_atomic_add(&sched_run_buckets[bucket], 1); + os_atomic_inc(&sched_run_buckets[bucket], relaxed); } static void sched_decr_bucket(sched_bucket_t bucket) { assert(bucket >= TH_BUCKET_FIXPRI && - bucket <= TH_BUCKET_SHARE_BG); + bucket <= TH_BUCKET_SHARE_BG); - assert(sched_run_buckets[bucket] > 0); + assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0); - hw_atomic_sub(&sched_run_buckets[bucket], 1); + os_atomic_dec(&sched_run_buckets[bucket], relaxed); } -/* TH_RUN & !TH_IDLE controls whether a thread has a run count */ - uint32_t sched_run_incr(thread_t thread) { - assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN); + assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN); - uint32_t new_count = hw_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], 1); + uint32_t new_count = os_atomic_inc(&sched_run_buckets[TH_BUCKET_RUN], relaxed); sched_incr_bucket(thread->th_sched_bucket); @@ -556,16 +744,16 @@ sched_run_incr(thread_t thread) uint32_t sched_run_decr(thread_t thread) { - assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN); + assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN); sched_decr_bucket(thread->th_sched_bucket); - uint32_t new_count = hw_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], 1); + uint32_t new_count = os_atomic_dec(&sched_run_buckets[TH_BUCKET_RUN], relaxed); return new_count; } -static void +void sched_update_thread_bucket(thread_t thread) { sched_bucket_t old_bucket = thread->th_sched_bucket; @@ -578,12 +766,15 @@ sched_update_thread_bucket(thread_t thread) break; case TH_MODE_TIMESHARE: - if (thread->base_pri > BASEPRI_UTILITY) + if (thread->base_pri > BASEPRI_DEFAULT) { new_bucket = TH_BUCKET_SHARE_FG; - else if (thread->base_pri > MAXPRI_THROTTLE) + } else if (thread->base_pri > BASEPRI_UTILITY) { + new_bucket = TH_BUCKET_SHARE_DF; + } else if (thread->base_pri > MAXPRI_THROTTLE) { new_bucket = TH_BUCKET_SHARE_UT; - else + } else { new_bucket = TH_BUCKET_SHARE_BG; + } break; default: @@ -595,7 +786,7 @@ sched_update_thread_bucket(thread_t thread) thread->th_sched_bucket = new_bucket; thread->pri_shift = sched_pri_shifts[new_bucket]; - if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) { + if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) { sched_decr_bucket(old_bucket); sched_incr_bucket(new_bucket); } @@ -627,7 +818,7 @@ sched_set_thread_mode(thread_t thread, sched_mode_t new_mode) thread->sched_mode = new_mode; - sched_update_thread_bucket(thread); + SCHED(update_thread_bucket)(thread); } /* @@ -639,8 +830,9 @@ sched_thread_mode_demote(thread_t thread, uint32_t reason) assert(reason & TH_SFLAG_DEMOTED_MASK); assert((thread->sched_flags & reason) != reason); - if (thread->policy_reset) + if (thread->policy_reset) { return; + } if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) { /* Another demotion reason is already active */ @@ -660,8 +852,9 @@ sched_thread_mode_demote(thread_t thread, uint32_t reason) thread_recompute_priority(thread); - if (removed) + if (removed) { thread_run_queue_reinsert(thread, SCHED_TAILQ); + } } /* @@ -691,8 +884,84 @@ sched_thread_mode_undemote(thread_t thread, uint32_t reason) thread_recompute_priority(thread); - if (removed) + if (removed) { thread_run_queue_reinsert(thread, SCHED_TAILQ); + } +} + +/* + * Promote thread to have a sched pri floor for a specific reason + * + * Promotion must not last past syscall boundary + * Clients must always pair promote and demote 1:1, + * Handling nesting of the same promote reason is the client's responsibility + * + * Called at splsched with thread locked + */ +void +sched_thread_promote_reason(thread_t thread, + uint32_t reason, + __kdebug_only uintptr_t trace_obj /* already unslid */) +{ + assert(reason & TH_SFLAG_PROMOTE_REASON_MASK); + assert((thread->sched_flags & reason) != reason); + + switch (reason) { + case TH_SFLAG_RW_PROMOTED: + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_PROMOTE), + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); + break; + case TH_SFLAG_WAITQ_PROMOTED: + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_PROMOTE), + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); + break; + case TH_SFLAG_EXEC_PROMOTED: + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_PROMOTE), + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); + break; + } + + thread->sched_flags |= reason; + + thread_recompute_sched_pri(thread, SETPRI_DEFAULT); } +/* + * End a specific promotion reason + * Demotes a thread back to its expected priority without the promotion in place + * + * Called at splsched with thread locked + */ +void +sched_thread_unpromote_reason(thread_t thread, + uint32_t reason, + __kdebug_only uintptr_t trace_obj /* already unslid */) +{ + assert(reason & TH_SFLAG_PROMOTE_REASON_MASK); + assert((thread->sched_flags & reason) == reason); + + switch (reason) { + case TH_SFLAG_RW_PROMOTED: + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE), + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); + break; + case TH_SFLAG_WAITQ_PROMOTED: + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_DEMOTE), + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); + break; + case TH_SFLAG_EXEC_PROMOTED: + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_DEMOTE), + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); + break; + } + thread->sched_flags &= ~reason; + + thread_recompute_sched_pri(thread, SETPRI_DEFAULT); +}