X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/316670eb35587141e969394ae8537d66b9211e80..c3c9b80d004dbbfdf763edeb97968c6997e3b45b:/osfmk/kern/priority.c diff --git a/osfmk/kern/priority.c b/osfmk/kern/priority.c index a7fa6ea44..abdfa868e 100644 --- a/osfmk/kern/priority.c +++ b/osfmk/kern/priority.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2010 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,45 +22,45 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ /* - * File: clock_prim.c + * File: priority.c * Author: Avadis Tevanian, Jr. * Date: 1986 * - * Clock primitives. + * Priority related scheduler bits. */ #include @@ -75,6 +75,17 @@ #include #include #include +#include +#include +#include + +#ifdef CONFIG_MACH_APPROXIMATE_TIME +#include /* for commpage_update_mach_approximate_time */ +#endif + +#if MONOTONIC +#include +#endif /* MONOTONIC */ /* * thread_quantum_expire: @@ -86,24 +97,50 @@ void thread_quantum_expire( - timer_call_param_t p0, - timer_call_param_t p1) + timer_call_param_t p0, + timer_call_param_t p1) { - processor_t processor = p0; - thread_t thread = p1; - ast_t preempt; + processor_t processor = p0; + thread_t thread = p1; + ast_t preempt; + uint64_t ctime; + + assert(processor == current_processor()); + assert(thread == current_thread()); - SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_START, 0, 0, 0, 0, 0); + + SCHED_STATS_INC(quantum_timer_expirations); /* * We bill CPU time to both the individual thread and its task. * - * Because this balance adjustment could potentially attempt to wake this very - * thread, we must credit the ledger before taking the thread lock. The ledger - * pointers are only manipulated by the thread itself at the ast boundary. + * Because this balance adjustment could potentially attempt to wake this + * very thread, we must credit the ledger before taking the thread lock. + * The ledger pointers are only manipulated by the thread itself at the ast + * boundary. + * + * TODO: This fails to account for the time between when the timer was + * armed and when it fired. It should be based on the system_timer and + * running a timer_update operation here. */ - ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->current_quantum); - ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->current_quantum); + ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining); + ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining); + if (thread->t_bankledger) { + ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time, + (thread->quantum_remaining - thread->t_deduct_bank_ledger_time)); + } + thread->t_deduct_bank_ledger_time = 0; + ctime = mach_absolute_time(); + +#ifdef CONFIG_MACH_APPROXIMATE_TIME + commpage_update_mach_approximate_time(ctime); +#endif + sched_update_pset_avg_execution_time(processor->processor_set, thread->quantum_remaining, ctime, thread->th_sched_bucket); + +#if MONOTONIC + mt_sched_update(thread); +#endif /* MONOTONIC */ thread_lock(thread); @@ -111,62 +148,50 @@ thread_quantum_expire( * We've run up until our quantum expiration, and will (potentially) * continue without re-entering the scheduler, so update this now. */ - thread->last_run_time = processor->quantum_end; + processor->last_dispatch = ctime; + thread->last_run_time = ctime; /* * Check for fail-safe trip. */ - if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) && - !(thread->sched_flags & TH_SFLAG_PROMOTED) && - !(thread->options & TH_OPT_SYSTEM_CRITICAL)) { - uint64_t new_computation; - - new_computation = processor->quantum_end - thread->computation_epoch; - new_computation += thread->computation_metered; - if (new_computation > max_unsafe_computation) { - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE, - (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0); - - if (thread->sched_mode == TH_MODE_REALTIME) { - thread->priority = DEPRESSPRI; - } - - thread->saved_mode = thread->sched_mode; + if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) && + !(thread->sched_flags & TH_SFLAG_PROMOTED) && + !(thread->kern_promotion_schedpri != 0) && + !(thread->sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) && + !(thread->options & TH_OPT_SYSTEM_CRITICAL)) { + uint64_t new_computation; - if (SCHED(supports_timeshare_mode)) { - sched_share_incr(); - thread->sched_mode = TH_MODE_TIMESHARE; - } else { - /* XXX handle fixed->fixed case */ - thread->sched_mode = TH_MODE_FIXED; - } + new_computation = ctime - thread->computation_epoch; + new_computation += thread->computation_metered; + if (new_computation > max_unsafe_computation) { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE) | DBG_FUNC_NONE, + (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0); + + thread->safe_release = ctime + sched_safe_duration; - thread->safe_release = processor->quantum_end + sched_safe_duration; - thread->sched_flags |= TH_SFLAG_FAILSAFE; + sched_thread_mode_demote(thread, TH_SFLAG_FAILSAFE); } } - + /* * Recompute scheduled priority if appropriate. */ - if (SCHED(can_update_priority)(thread)) + if (SCHED(can_update_priority)(thread)) { SCHED(update_priority)(thread); - else + } else { SCHED(lightweight_update_priority)(thread); + } - SCHED(quantum_expire)(thread); - - processor->current_pri = thread->sched_pri; - processor->current_thmode = thread->sched_mode; + if (thread->sched_mode != TH_MODE_REALTIME) { + SCHED(quantum_expire)(thread); + } /* * This quantum is up, give this thread another. */ - if (first_timeslice(processor)) - processor->timeslice--; + processor->first_timeslice = FALSE; thread_quantum_init(thread); - thread->last_quantum_refill_time = processor->quantum_end; /* Reload precise timing global policy to thread-local policy */ thread->precise_user_kernel_time = use_precise_user_kernel_time(thread); @@ -176,41 +201,225 @@ thread_quantum_expire( * during privilege transitions, synthesize an event now. */ if (!thread->precise_user_kernel_time) { - timer_switch(PROCESSOR_DATA(processor, current_state), - processor->quantum_end, - PROCESSOR_DATA(processor, current_state)); - timer_switch(PROCESSOR_DATA(processor, thread_timer), - processor->quantum_end, - PROCESSOR_DATA(processor, thread_timer)); + timer_update(processor->current_state, ctime); + timer_update(processor->thread_timer, ctime); + timer_update(&thread->runnable_timer, ctime); } - processor->quantum_end = mach_absolute_time() + thread->current_quantum; - timer_call_enter1(&processor->quantum_timer, thread, - processor->quantum_end, TIMER_CALL_CRITICAL); + + processor->quantum_end = ctime + thread->quantum_remaining; /* - * Context switch check. + * Context switch check + * + * non-urgent flags don't affect kernel threads, so upgrade to urgent + * to ensure that rebalancing and non-recommendation kick in quickly. */ - if ((preempt = csw_check(processor)) != AST_NONE) + + ast_t check_reason = AST_QUANTUM; + if (thread->task == kernel_task) { + check_reason |= AST_URGENT; + } + + if ((preempt = csw_check(thread, processor, check_reason)) != AST_NONE) { ast_on(preempt); - else { - processor_set_t pset = processor->processor_set; + } + + /* + * AST_KEVENT does not send an IPI when setting the AST, + * to avoid waiting for the next context switch to propagate the AST, + * the AST is propagated here at quantum expiration. + */ + ast_propagate(thread); + + thread_unlock(thread); + running_timer_enter(processor, RUNNING_TIMER_QUANTUM, thread, + processor->quantum_end, ctime); + + /* Tell platform layer that we are still running this thread */ + thread_urgency_t urgency = thread_get_urgency(thread, NULL, NULL); + machine_thread_going_on_core(thread, urgency, 0, 0, ctime); + machine_switch_perfcontrol_state_update(QUANTUM_EXPIRY, ctime, + 0, thread); + +#if defined(CONFIG_SCHED_TIMESHARE_CORE) + sched_timeshare_consider_maintenance(ctime); +#endif /* CONFIG_SCHED_TIMESHARE_CORE */ + +#if __arm__ || __arm64__ + if (thread->sched_mode == TH_MODE_REALTIME) { + sched_consider_recommended_cores(ctime, thread); + } +#endif /* __arm__ || __arm64__ */ + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0); +} + +/* + * sched_set_thread_base_priority: + * + * Set the base priority of the thread + * and reset its scheduled priority. + * + * This is the only path to change base_pri. + * + * Called with the thread locked. + */ +void +sched_set_thread_base_priority(thread_t thread, int priority) +{ + assert(priority >= MINPRI); + uint64_t ctime = 0; - pset_lock(pset); + if (thread->sched_mode == TH_MODE_REALTIME) { + assert(priority <= BASEPRI_RTQUEUES); + } else { + assert(priority < BASEPRI_RTQUEUES); + } - pset_pri_hint(pset, processor, processor->current_pri); - pset_count_hint(pset, processor, SCHED(processor_runq_count)(processor)); + int old_base_pri = thread->base_pri; + thread->req_base_pri = (int16_t)priority; + if (thread->sched_flags & TH_SFLAG_BASE_PRI_FROZEN) { + priority = MAX(priority, old_base_pri); + } + thread->base_pri = (int16_t)priority; - pset_unlock(pset); + if ((thread->state & TH_RUN) == TH_RUN) { + assert(thread->last_made_runnable_time != THREAD_NOT_RUNNABLE); + ctime = mach_approximate_time(); + thread->last_basepri_change_time = ctime; + } else { + assert(thread->last_basepri_change_time == THREAD_NOT_RUNNABLE); + assert(thread->last_made_runnable_time == THREAD_NOT_RUNNABLE); } + /* + * Currently the perfcontrol_attr depends on the base pri of the + * thread. Therefore, we use this function as the hook for the + * perfcontrol callout. + */ + if (thread == current_thread() && old_base_pri != priority) { + if (!ctime) { + ctime = mach_approximate_time(); + } + machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE, + ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, thread); + } +#if !CONFIG_SCHED_CLUTCH + /* For the clutch scheduler, this operation is done in set_sched_pri() */ + SCHED(update_thread_bucket)(thread); +#endif /* !CONFIG_SCHED_CLUTCH */ + + thread_recompute_sched_pri(thread, SETPRI_DEFAULT); +} + +/* + * sched_set_kernel_thread_priority: + * + * Set the absolute base priority of the thread + * and reset its scheduled priority. + * + * Called with the thread unlocked. + */ +void +sched_set_kernel_thread_priority(thread_t thread, int new_priority) +{ + spl_t s = splsched(); + + thread_lock(thread); + + assert(thread->sched_mode != TH_MODE_REALTIME); + assert(thread->effective_policy.thep_qos == THREAD_QOS_UNSPECIFIED); + + if (new_priority > thread->max_priority) { + new_priority = thread->max_priority; + } +#if !defined(XNU_TARGET_OS_OSX) + if (new_priority < MAXPRI_THROTTLE) { + new_priority = MAXPRI_THROTTLE; + } +#endif /* !defined(XNU_TARGET_OS_OSX) */ + + thread->importance = new_priority - thread->task_priority; + + sched_set_thread_base_priority(thread, new_priority); + thread_unlock(thread); + splx(s); } -#if defined(CONFIG_SCHED_TRADITIONAL) +/* + * thread_recompute_sched_pri: + * + * Reset the scheduled priority of the thread + * according to its base priority if the + * thread has not been promoted or depressed. + * + * This is the only way to push base_pri changes into sched_pri, + * or to recalculate the appropriate sched_pri after changing + * a promotion or depression. + * + * Called at splsched with the thread locked. + * + * TODO: Add an 'update urgency' flag to avoid urgency callouts on every rwlock operation + */ +void +thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options) +{ + uint32_t sched_flags = thread->sched_flags; + sched_mode_t sched_mode = thread->sched_mode; + + int16_t priority = thread->base_pri; + + if (sched_mode == TH_MODE_TIMESHARE) { + priority = (int16_t)SCHED(compute_timeshare_priority)(thread); + } + + if (sched_flags & TH_SFLAG_DEPRESS) { + /* thread_yield_internal overrides kernel mutex promotion */ + priority = DEPRESSPRI; + } else { + /* poll-depress is overridden by mutex promotion and promote-reasons */ + if ((sched_flags & TH_SFLAG_POLLDEPRESS)) { + priority = DEPRESSPRI; + } + + if (thread->kern_promotion_schedpri > 0) { + priority = MAX(priority, thread->kern_promotion_schedpri); + + if (sched_mode != TH_MODE_REALTIME) { + priority = MIN(priority, MAXPRI_PROMOTE); + } + } + + if (sched_flags & TH_SFLAG_PROMOTED) { + priority = MAX(priority, thread->promotion_priority); + + if (sched_mode != TH_MODE_REALTIME) { + priority = MIN(priority, MAXPRI_PROMOTE); + } + } + + if (sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) { + if (sched_flags & TH_SFLAG_RW_PROMOTED) { + priority = MAX(priority, MINPRI_RWLOCK); + } + + if (sched_flags & TH_SFLAG_WAITQ_PROMOTED) { + priority = MAX(priority, MINPRI_WAITQ); + } + + if (sched_flags & TH_SFLAG_EXEC_PROMOTED) { + priority = MAX(priority, MINPRI_EXEC); + } + } + } + + set_sched_pri(thread, priority, options); +} void -sched_traditional_quantum_expire(thread_t thread __unused) +sched_default_quantum_expire(thread_t thread __unused) { /* * No special behavior when a timeshare, fixed, or realtime thread @@ -218,33 +427,63 @@ sched_traditional_quantum_expire(thread_t thread __unused) */ } +int smt_timeshare_enabled = 1; +int smt_sched_bonus_16ths = 8; + +#if defined(CONFIG_SCHED_TIMESHARE_CORE) + +/* + * lightweight_update_priority: + * + * Update the scheduled priority for + * a timesharing thread. + * + * Only for use on the current thread. + * + * Called with the thread locked. + */ void lightweight_update_priority(thread_t thread) { + assert(thread->runq == PROCESSOR_NULL); + assert(thread == current_thread()); + if (thread->sched_mode == TH_MODE_TIMESHARE) { - register uint32_t delta; - + int priority; + uint32_t delta; + thread_timer_delta(thread, delta); - + /* * Accumulate timesharing usage only * during contention for processor * resources. */ - if (thread->pri_shift < INT8_MAX) - thread->sched_usage += delta; - + if (thread->pri_shift < INT8_MAX) { + if (thread_no_smt(thread) && smt_timeshare_enabled) { + thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4)); + } else { + thread->sched_usage += delta; + } + } + thread->cpu_delta += delta; - + +#if CONFIG_SCHED_CLUTCH /* - * Adjust the scheduled priority if - * the thread has not been promoted - * and is not depressed. + * Update the CPU usage for the thread group to which the thread belongs. + * The implementation assumes that the thread ran for the entire delta + * as part of the same thread group. */ - if ( !(thread->sched_flags & TH_SFLAG_PROMOTED) && - !(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) ) - compute_my_priority(thread); - } + sched_clutch_cpu_usage_update(thread, delta); +#endif /* CONFIG_SCHED_CLUTCH */ + + priority = sched_compute_timeshare_priority(thread); + + if (priority != thread->sched_pri) { + thread_recompute_sched_pri(thread, SETPRI_LAZY); + } + } } /* @@ -254,127 +493,112 @@ lightweight_update_priority(thread_t thread) * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the * +/- is determined by the sign of shift 2. */ -struct shift_data { - int shift1; - int shift2; -}; -#define SCHED_DECAY_TICKS 32 -static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = { - {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7}, - {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13}, - {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18}, - {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27} +const struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = { + { .shift1 = 1, .shift2 = 1 }, + { .shift1 = 1, .shift2 = 3 }, + { .shift1 = 1, .shift2 = -3 }, + { .shift1 = 2, .shift2 = -7 }, + { .shift1 = 3, .shift2 = 5 }, + { .shift1 = 3, .shift2 = -5 }, + { .shift1 = 4, .shift2 = -8 }, + { .shift1 = 5, .shift2 = 7 }, + { .shift1 = 5, .shift2 = -7 }, + { .shift1 = 6, .shift2 = -10 }, + { .shift1 = 7, .shift2 = 10 }, + { .shift1 = 7, .shift2 = -9 }, + { .shift1 = 8, .shift2 = -11 }, + { .shift1 = 9, .shift2 = 12 }, + { .shift1 = 9, .shift2 = -11 }, + { .shift1 = 10, .shift2 = -13 }, + { .shift1 = 11, .shift2 = 14 }, + { .shift1 = 11, .shift2 = -13 }, + { .shift1 = 12, .shift2 = -15 }, + { .shift1 = 13, .shift2 = 17 }, + { .shift1 = 13, .shift2 = -15 }, + { .shift1 = 14, .shift2 = -17 }, + { .shift1 = 15, .shift2 = 19 }, + { .shift1 = 16, .shift2 = 18 }, + { .shift1 = 16, .shift2 = -19 }, + { .shift1 = 17, .shift2 = 22 }, + { .shift1 = 18, .shift2 = 20 }, + { .shift1 = 18, .shift2 = -20 }, + { .shift1 = 19, .shift2 = 26 }, + { .shift1 = 20, .shift2 = 22 }, + { .shift1 = 20, .shift2 = -22 }, + { .shift1 = 21, .shift2 = -27 } }; /* - * do_priority_computation: + * sched_compute_timeshare_priority: * * Calculate the timesharing priority based upon usage and load. */ -#ifdef CONFIG_EMBEDDED - -#define do_priority_computation(thread, pri) \ - MACRO_BEGIN \ - (pri) = (thread)->priority /* start with base priority */ \ - - ((thread)->sched_usage >> (thread)->pri_shift); \ - if ((pri) < MAXPRI_THROTTLE) { \ - if ((thread)->task->max_priority > MAXPRI_THROTTLE) \ - (pri) = MAXPRI_THROTTLE; \ - else \ - if ((pri) < MINPRI_USER) \ - (pri) = MINPRI_USER; \ - } else \ - if ((pri) > MAXPRI_KERNEL) \ - (pri) = MAXPRI_KERNEL; \ - MACRO_END - -#else - -#define do_priority_computation(thread, pri) \ - MACRO_BEGIN \ - (pri) = (thread)->priority /* start with base priority */ \ - - ((thread)->sched_usage >> (thread)->pri_shift); \ - if ((pri) < MINPRI_USER) \ - (pri) = MINPRI_USER; \ - else \ - if ((pri) > MAXPRI_KERNEL) \ - (pri) = MAXPRI_KERNEL; \ - MACRO_END - -#endif /* defined(CONFIG_SCHED_TRADITIONAL) */ +extern int sched_pri_decay_band_limit; -#endif -/* - * set_priority: - * - * Set the base priority of the thread - * and reset its scheduled priority. - * - * Called with the thread locked. - */ -void -set_priority( - register thread_t thread, - register int priority) +/* Only use the decay floor logic on non-macOS and non-clutch schedulers */ +#if !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH + +int +sched_compute_timeshare_priority(thread_t thread) { - thread->priority = priority; - SCHED(compute_priority)(thread, FALSE); -} + int decay_amount; + int decay_limit = sched_pri_decay_band_limit; -#if defined(CONFIG_SCHED_TRADITIONAL) + if (thread->base_pri > BASEPRI_FOREGROUND) { + decay_limit += (thread->base_pri - BASEPRI_FOREGROUND); + } -/* - * compute_priority: - * - * Reset the scheduled priority of the thread - * according to its base priority if the - * thread has not been promoted or depressed. - * - * Called with the thread locked. - */ -void -compute_priority( - register thread_t thread, - boolean_t override_depress) -{ - register int priority; + if (thread->pri_shift == INT8_MAX) { + decay_amount = 0; + } else { + decay_amount = (thread->sched_usage >> thread->pri_shift); + } + + if (decay_amount > decay_limit) { + decay_amount = decay_limit; + } - if ( !(thread->sched_flags & TH_SFLAG_PROMOTED) && - (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) || - override_depress ) ) { - if (thread->sched_mode == TH_MODE_TIMESHARE) - do_priority_computation(thread, priority); - else - priority = thread->priority; + /* start with base priority */ + int priority = thread->base_pri - decay_amount; - set_sched_pri(thread, priority); + if (priority < MAXPRI_THROTTLE) { + if (thread->task->max_priority > MAXPRI_THROTTLE) { + priority = MAXPRI_THROTTLE; + } else if (priority < MINPRI_USER) { + priority = MINPRI_USER; + } + } else if (priority > MAXPRI_KERNEL) { + priority = MAXPRI_KERNEL; } + + return priority; } -/* - * compute_my_priority: - * - * Reset the scheduled priority for - * a timesharing thread. - * - * Only for use on the current thread - * if timesharing and not depressed. - * - * Called with the thread locked. - */ -void -compute_my_priority( - register thread_t thread) +#else /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */ + +int +sched_compute_timeshare_priority(thread_t thread) { - register int priority; + /* start with base priority */ + int priority = thread->base_pri; - do_priority_computation(thread, priority); - assert(thread->runq == PROCESSOR_NULL); - thread->sched_pri = priority; + if (thread->pri_shift != INT8_MAX) { + priority -= (thread->sched_usage >> thread->pri_shift); + } + + if (priority < MINPRI_USER) { + priority = MINPRI_USER; + } else if (priority > MAXPRI_KERNEL) { + priority = MAXPRI_KERNEL; + } + + return priority; } +#endif /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */ + /* * can_update_priority * @@ -384,12 +608,13 @@ compute_my_priority( */ boolean_t can_update_priority( - thread_t thread) + thread_t thread) { - if (sched_tick == thread->sched_stamp) - return (FALSE); - else - return (TRUE); + if (sched_tick == thread->sched_stamp) { + return FALSE; + } else { + return TRUE; + } } /* @@ -401,53 +626,64 @@ can_update_priority( */ void update_priority( - register thread_t thread) + thread_t thread) { - register unsigned ticks; - register uint32_t delta; + uint32_t ticks, delta; ticks = sched_tick - thread->sched_stamp; assert(ticks != 0); + thread->sched_stamp += ticks; - thread->pri_shift = sched_pri_shift; + + /* If requested, accelerate aging of sched_usage */ + if (sched_decay_usage_age_factor > 1) { + ticks *= sched_decay_usage_age_factor; + } /* * Gather cpu usage data. */ thread_timer_delta(thread, delta); if (ticks < SCHED_DECAY_TICKS) { - register struct shift_data *shiftp; - /* - * Accumulate timesharing usage only - * during contention for processor - * resources. + * Accumulate timesharing usage only during contention for processor + * resources. Use the pri_shift from the previous tick window to + * determine if the system was in a contended state. */ - if (thread->pri_shift < INT8_MAX) - thread->sched_usage += delta; + if (thread->pri_shift < INT8_MAX) { + if (thread_no_smt(thread) && smt_timeshare_enabled) { + thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4)); + } else { + thread->sched_usage += delta; + } + } thread->cpu_usage += delta + thread->cpu_delta; thread->cpu_delta = 0; - shiftp = &sched_decay_shifts[ticks]; +#if CONFIG_SCHED_CLUTCH + /* + * Update the CPU usage for the thread group to which the thread belongs. + * The implementation assumes that the thread ran for the entire delta + * as part of the same thread group. + */ + sched_clutch_cpu_usage_update(thread, delta); +#endif /* CONFIG_SCHED_CLUTCH */ + + const struct shift_data *shiftp = &sched_decay_shifts[ticks]; + if (shiftp->shift2 > 0) { - thread->cpu_usage = - (thread->cpu_usage >> shiftp->shift1) + - (thread->cpu_usage >> shiftp->shift2); - thread->sched_usage = - (thread->sched_usage >> shiftp->shift1) + - (thread->sched_usage >> shiftp->shift2); + thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) + + (thread->cpu_usage >> shiftp->shift2); + thread->sched_usage = (thread->sched_usage >> shiftp->shift1) + + (thread->sched_usage >> shiftp->shift2); + } else { + thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) - + (thread->cpu_usage >> -(shiftp->shift2)); + thread->sched_usage = (thread->sched_usage >> shiftp->shift1) - + (thread->sched_usage >> -(shiftp->shift2)); } - else { - thread->cpu_usage = - (thread->cpu_usage >> shiftp->shift1) - - (thread->cpu_usage >> -(shiftp->shift2)); - thread->sched_usage = - (thread->sched_usage >> shiftp->shift1) - - (thread->sched_usage >> -(shiftp->shift2)); - } - } - else { + } else { thread->cpu_usage = thread->cpu_delta = 0; thread->sched_usage = 0; } @@ -455,89 +691,398 @@ update_priority( /* * Check for fail-safe release. */ - if ( (thread->sched_flags & TH_SFLAG_FAILSAFE) && - mach_absolute_time() >= thread->safe_release ) { - if (thread->saved_mode != TH_MODE_TIMESHARE) { - if (thread->saved_mode == TH_MODE_REALTIME) { - thread->priority = BASEPRI_RTQUEUES; - } + if ((thread->sched_flags & TH_SFLAG_FAILSAFE) && + mach_absolute_time() >= thread->safe_release) { + sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE); + } + + /* + * Now that the thread's CPU usage has been accumulated and aged + * based on contention of the previous tick window, update the + * pri_shift of the thread to match the current global load/shift + * values. The updated pri_shift would be used to calculate the + * new priority of the thread. + */ +#if CONFIG_SCHED_CLUTCH + thread->pri_shift = sched_clutch_thread_pri_shift(thread, thread->th_sched_bucket); +#else /* CONFIG_SCHED_CLUTCH */ + thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket]; +#endif /* CONFIG_SCHED_CLUTCH */ + + /* Recompute scheduled priority if appropriate. */ + if (thread->sched_mode == TH_MODE_TIMESHARE) { + thread_recompute_sched_pri(thread, SETPRI_LAZY); + } +} + +#endif /* CONFIG_SCHED_TIMESHARE_CORE */ + + +/* + * TH_BUCKET_RUN is a count of *all* runnable non-idle threads. + * Each other bucket is a count of the runnable non-idle threads + * with that property. All updates to these counts should be + * performed with os_atomic_* operations. + * + * For the clutch scheduler, this global bucket is used only for + * keeping the total global run count. + */ +uint32_t sched_run_buckets[TH_BUCKET_MAX]; + +static void +sched_incr_bucket(sched_bucket_t bucket) +{ + assert(bucket >= TH_BUCKET_FIXPRI && + bucket <= TH_BUCKET_SHARE_BG); + + os_atomic_inc(&sched_run_buckets[bucket], relaxed); +} + +static void +sched_decr_bucket(sched_bucket_t bucket) +{ + assert(bucket >= TH_BUCKET_FIXPRI && + bucket <= TH_BUCKET_SHARE_BG); + + assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0); + + os_atomic_dec(&sched_run_buckets[bucket], relaxed); +} + +static void +sched_add_bucket(sched_bucket_t bucket, uint8_t run_weight) +{ + assert(bucket >= TH_BUCKET_FIXPRI && + bucket <= TH_BUCKET_SHARE_BG); + + os_atomic_add(&sched_run_buckets[bucket], run_weight, relaxed); +} + +static void +sched_sub_bucket(sched_bucket_t bucket, uint8_t run_weight) +{ + assert(bucket >= TH_BUCKET_FIXPRI && + bucket <= TH_BUCKET_SHARE_BG); + + assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0); + + os_atomic_sub(&sched_run_buckets[bucket], run_weight, relaxed); +} + +uint32_t +sched_run_incr(thread_t thread) +{ + assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN); + + uint32_t new_count = os_atomic_inc(&sched_run_buckets[TH_BUCKET_RUN], relaxed); + + sched_incr_bucket(thread->th_sched_bucket); + + return new_count; +} + +uint32_t +sched_run_decr(thread_t thread) +{ + assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN); + + sched_decr_bucket(thread->th_sched_bucket); + + uint32_t new_count = os_atomic_dec(&sched_run_buckets[TH_BUCKET_RUN], relaxed); + + return new_count; +} + +uint32_t +sched_smt_run_incr(thread_t thread) +{ + assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN); + + uint8_t run_weight = (thread_no_smt(thread) && smt_timeshare_enabled) ? 2 : 1; + thread->sched_saved_run_weight = run_weight; + + uint32_t new_count = os_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed); + + sched_add_bucket(thread->th_sched_bucket, run_weight); + + return new_count; +} + +uint32_t +sched_smt_run_decr(thread_t thread) +{ + assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN); + + uint8_t run_weight = thread->sched_saved_run_weight; + + sched_sub_bucket(thread->th_sched_bucket, run_weight); + + uint32_t new_count = os_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed); + + return new_count; +} + +void +sched_update_thread_bucket(thread_t thread) +{ + sched_bucket_t old_bucket = thread->th_sched_bucket; + sched_bucket_t new_bucket = TH_BUCKET_RUN; - thread->sched_mode = thread->saved_mode; - thread->saved_mode = TH_MODE_NONE; + switch (thread->sched_mode) { + case TH_MODE_FIXED: + case TH_MODE_REALTIME: + new_bucket = TH_BUCKET_FIXPRI; + break; - if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) - sched_share_decr(); + case TH_MODE_TIMESHARE: + if (thread->base_pri > BASEPRI_DEFAULT) { + new_bucket = TH_BUCKET_SHARE_FG; + } else if (thread->base_pri > BASEPRI_UTILITY) { + new_bucket = TH_BUCKET_SHARE_DF; + } else if (thread->base_pri > MAXPRI_THROTTLE) { + new_bucket = TH_BUCKET_SHARE_UT; + } else { + new_bucket = TH_BUCKET_SHARE_BG; + } + break; + + default: + panic("unexpected mode: %d", thread->sched_mode); + break; + } - if (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) - set_sched_pri(thread, thread->priority); + if (old_bucket != new_bucket) { + thread->th_sched_bucket = new_bucket; + thread->pri_shift = sched_pri_shifts[new_bucket]; + + if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) { + sched_decr_bucket(old_bucket); + sched_incr_bucket(new_bucket); } + } +} - thread->sched_flags &= ~TH_SFLAG_FAILSAFE; - } - -#if CONFIG_EMBEDDED - /* Check for pending throttle transitions, and safely switch queues */ - if (thread->sched_flags & TH_SFLAG_PENDING_THROTTLE_MASK) { - boolean_t removed = thread_run_queue_remove(thread); - - if (thread->sched_flags & TH_SFLAG_PENDING_THROTTLE_DEMOTION) { - if (thread->sched_mode == TH_MODE_REALTIME) { - thread->saved_mode = thread->sched_mode; - thread->sched_mode = TH_MODE_TIMESHARE; - - if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) - sched_share_incr(); - } else { - /* - * It's possible that this is a realtime thread that has - * already tripped the failsafe, in which case saved_mode - * is already set correctly. - */ - if (!(thread->sched_flags & TH_SFLAG_FAILSAFE)) { - thread->saved_mode = thread->sched_mode; - } - thread->sched_flags &= ~TH_SFLAG_FAILSAFE; - } - thread->sched_flags |= TH_SFLAG_THROTTLED; +void +sched_smt_update_thread_bucket(thread_t thread) +{ + sched_bucket_t old_bucket = thread->th_sched_bucket; + sched_bucket_t new_bucket = TH_BUCKET_RUN; - } else { - if ((thread->sched_mode == TH_MODE_TIMESHARE) - && (thread->saved_mode == TH_MODE_REALTIME)) { - if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) - sched_share_decr(); - } - - thread->sched_mode = thread->saved_mode; - thread->saved_mode = TH_MODE_NONE; - thread->sched_flags &= ~TH_SFLAG_THROTTLED; - } + switch (thread->sched_mode) { + case TH_MODE_FIXED: + case TH_MODE_REALTIME: + new_bucket = TH_BUCKET_FIXPRI; + break; + + case TH_MODE_TIMESHARE: + if (thread->base_pri > BASEPRI_DEFAULT) { + new_bucket = TH_BUCKET_SHARE_FG; + } else if (thread->base_pri > BASEPRI_UTILITY) { + new_bucket = TH_BUCKET_SHARE_DF; + } else if (thread->base_pri > MAXPRI_THROTTLE) { + new_bucket = TH_BUCKET_SHARE_UT; + } else { + new_bucket = TH_BUCKET_SHARE_BG; + } + break; + + default: + panic("unexpected mode: %d", thread->sched_mode); + break; + } - thread->sched_flags &= ~(TH_SFLAG_PENDING_THROTTLE_MASK); + if (old_bucket != new_bucket) { + thread->th_sched_bucket = new_bucket; + thread->pri_shift = sched_pri_shifts[new_bucket]; - if (removed) - thread_setrun(thread, SCHED_TAILQ); + if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) { + sched_sub_bucket(old_bucket, thread->sched_saved_run_weight); + sched_add_bucket(new_bucket, thread->sched_saved_run_weight); + } } -#endif +} +/* + * Set the thread's true scheduling mode + * Called with thread mutex and thread locked + * The thread has already been removed from the runqueue. + * + * (saved_mode is handled before this point) + */ +void +sched_set_thread_mode(thread_t thread, sched_mode_t new_mode) +{ + assert(thread->runq == PROCESSOR_NULL); + + switch (new_mode) { + case TH_MODE_FIXED: + case TH_MODE_REALTIME: + case TH_MODE_TIMESHARE: + break; + + default: + panic("unexpected mode: %d", new_mode); + break; + } + +#if CONFIG_SCHED_AUTO_JOIN /* - * Recompute scheduled priority if appropriate. + * Realtime threads might have auto-joined a work interval based on + * make runnable relationships. If such an RT thread is now being demoted + * to non-RT, unjoin the thread from the work interval. */ - if ( (thread->sched_mode == TH_MODE_TIMESHARE) && - !(thread->sched_flags & TH_SFLAG_PROMOTED) && - !(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) ) { - register int new_pri; - - do_priority_computation(thread, new_pri); - if (new_pri != thread->sched_pri) { - boolean_t removed = thread_run_queue_remove(thread); - - thread->sched_pri = new_pri; - if (removed) - thread_setrun(thread, SCHED_TAILQ); - } + if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) && (new_mode != TH_MODE_REALTIME)) { + assert((thread->sched_mode == TH_MODE_REALTIME) || (thread->th_work_interval_flags & TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK)); + work_interval_auto_join_demote(thread); + } +#endif /* CONFIG_SCHED_AUTO_JOIN */ + + thread->sched_mode = new_mode; + + SCHED(update_thread_bucket)(thread); +} + +/* + * Demote the true scheduler mode to timeshare (called with the thread locked) + */ +void +sched_thread_mode_demote(thread_t thread, uint32_t reason) +{ + assert(reason & TH_SFLAG_DEMOTED_MASK); + assert((thread->sched_flags & reason) != reason); + + if (thread->policy_reset) { + return; + } + + if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) { + /* Another demotion reason is already active */ + thread->sched_flags |= reason; + return; + } + + assert(thread->saved_mode == TH_MODE_NONE); + + boolean_t removed = thread_run_queue_remove(thread); + + thread->sched_flags |= reason; + + thread->saved_mode = thread->sched_mode; + + sched_set_thread_mode(thread, TH_MODE_TIMESHARE); + + thread_recompute_priority(thread); + + if (removed) { + thread_run_queue_reinsert(thread, SCHED_TAILQ); + } +} + +/* + * Un-demote the true scheduler mode back to the saved mode (called with the thread locked) + */ +void +sched_thread_mode_undemote(thread_t thread, uint32_t reason) +{ + assert(reason & TH_SFLAG_DEMOTED_MASK); + assert((thread->sched_flags & reason) == reason); + assert(thread->saved_mode != TH_MODE_NONE); + assert(thread->sched_mode == TH_MODE_TIMESHARE); + assert(thread->policy_reset == 0); + + thread->sched_flags &= ~reason; + + if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) { + /* Another demotion reason is still active */ + return; + } + + boolean_t removed = thread_run_queue_remove(thread); + + sched_set_thread_mode(thread, thread->saved_mode); + + thread->saved_mode = TH_MODE_NONE; + + thread_recompute_priority(thread); + + if (removed) { + thread_run_queue_reinsert(thread, SCHED_TAILQ); } - - return; } -#endif /* CONFIG_SCHED_TRADITIONAL */ +/* + * Promote thread to have a sched pri floor for a specific reason + * + * Promotion must not last past syscall boundary + * Clients must always pair promote and demote 1:1, + * Handling nesting of the same promote reason is the client's responsibility + * + * Called at splsched with thread locked + */ +void +sched_thread_promote_reason(thread_t thread, + uint32_t reason, + __kdebug_only uintptr_t trace_obj /* already unslid */) +{ + assert(reason & TH_SFLAG_PROMOTE_REASON_MASK); + assert((thread->sched_flags & reason) != reason); + + switch (reason) { + case TH_SFLAG_RW_PROMOTED: + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_PROMOTE), + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); + break; + case TH_SFLAG_WAITQ_PROMOTED: + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_PROMOTE), + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); + break; + case TH_SFLAG_EXEC_PROMOTED: + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_PROMOTE), + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); + break; + } + + thread->sched_flags |= reason; + + thread_recompute_sched_pri(thread, SETPRI_DEFAULT); +} + +/* + * End a specific promotion reason + * Demotes a thread back to its expected priority without the promotion in place + * + * Called at splsched with thread locked + */ +void +sched_thread_unpromote_reason(thread_t thread, + uint32_t reason, + __kdebug_only uintptr_t trace_obj /* already unslid */) +{ + assert(reason & TH_SFLAG_PROMOTE_REASON_MASK); + assert((thread->sched_flags & reason) == reason); + + switch (reason) { + case TH_SFLAG_RW_PROMOTED: + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE), + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); + break; + case TH_SFLAG_WAITQ_PROMOTED: + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_DEMOTE), + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); + break; + case TH_SFLAG_EXEC_PROMOTED: + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_DEMOTE), + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); + break; + } + + thread->sched_flags &= ~reason; + + thread_recompute_sched_pri(thread, SETPRI_DEFAULT); +}