/*
- * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
-/*
+/*
* Mach Operating System
* Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
#include <kern/ledger.h>
#include <machine/machparam.h>
#include <kern/machine.h>
+#include <kern/policy_internal.h>
+#include <kern/sched_clutch.h>
#ifdef CONFIG_MACH_APPROXIMATE_TIME
#include <machine/commpage.h> /* for commpage_update_mach_approximate_time */
#include <kern/monotonic.h>
#endif /* MONOTONIC */
-static void sched_update_thread_bucket(thread_t thread);
-
/*
* thread_quantum_expire:
*
void
thread_quantum_expire(
- timer_call_param_t p0,
- timer_call_param_t p1)
+ timer_call_param_t p0,
+ timer_call_param_t p1)
{
- processor_t processor = p0;
- thread_t thread = p1;
- ast_t preempt;
- uint64_t ctime;
- int urgency;
- uint64_t ignore1, ignore2;
+ processor_t processor = p0;
+ thread_t thread = p1;
+ ast_t preempt;
+ uint64_t ctime;
assert(processor == current_processor());
assert(thread == current_thread());
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_START, 0, 0, 0, 0, 0);
- SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor);
+ SCHED_STATS_INC(quantum_timer_expirations);
/*
* We bill CPU time to both the individual thread and its task.
*
- * Because this balance adjustment could potentially attempt to wake this very
- * thread, we must credit the ledger before taking the thread lock. The ledger
- * pointers are only manipulated by the thread itself at the ast boundary.
+ * Because this balance adjustment could potentially attempt to wake this
+ * very thread, we must credit the ledger before taking the thread lock.
+ * The ledger pointers are only manipulated by the thread itself at the ast
+ * boundary.
*
- * TODO: This fails to account for the time between when the timer was armed and when it fired.
- * It should be based on the system_timer and running a thread_timer_event operation here.
+ * TODO: This fails to account for the time between when the timer was
+ * armed and when it fired. It should be based on the system_timer and
+ * running a timer_update operation here.
*/
ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining);
ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining);
if (thread->t_bankledger) {
ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time,
- (thread->quantum_remaining - thread->t_deduct_bank_ledger_time));
+ (thread->quantum_remaining - thread->t_deduct_bank_ledger_time));
}
thread->t_deduct_bank_ledger_time = 0;
-
ctime = mach_absolute_time();
#ifdef CONFIG_MACH_APPROXIMATE_TIME
commpage_update_mach_approximate_time(ctime);
#endif
+ sched_update_pset_avg_execution_time(processor->processor_set, thread->quantum_remaining, ctime, thread->th_sched_bucket);
#if MONOTONIC
mt_sched_update(thread);
/*
* Check for fail-safe trip.
*/
- if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
- !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) &&
- !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
- uint64_t new_computation;
-
- new_computation = ctime - thread->computation_epoch;
- new_computation += thread->computation_metered;
- if (new_computation > max_unsafe_computation) {
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE,
- (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
+ if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
+ !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
+ !(thread->kern_promotion_schedpri != 0) &&
+ !(thread->sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) &&
+ !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
+ uint64_t new_computation;
+
+ new_computation = ctime - thread->computation_epoch;
+ new_computation += thread->computation_metered;
+ if (new_computation > max_unsafe_computation) {
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE) | DBG_FUNC_NONE,
+ (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
thread->safe_release = ctime + sched_safe_duration;
/*
* Recompute scheduled priority if appropriate.
*/
- if (SCHED(can_update_priority)(thread))
+ if (SCHED(can_update_priority)(thread)) {
SCHED(update_priority)(thread);
- else
+ } else {
SCHED(lightweight_update_priority)(thread);
+ }
- if (thread->sched_mode != TH_MODE_REALTIME)
+ if (thread->sched_mode != TH_MODE_REALTIME) {
SCHED(quantum_expire)(thread);
-
- processor_state_update_from_thread(processor, thread);
+ }
/*
* This quantum is up, give this thread another.
* during privilege transitions, synthesize an event now.
*/
if (!thread->precise_user_kernel_time) {
- timer_switch(PROCESSOR_DATA(processor, current_state),
- ctime,
- PROCESSOR_DATA(processor, current_state));
- timer_switch(PROCESSOR_DATA(processor, thread_timer),
- ctime,
- PROCESSOR_DATA(processor, thread_timer));
+ timer_update(processor->current_state, ctime);
+ timer_update(processor->thread_timer, ctime);
+ timer_update(&thread->runnable_timer, ctime);
}
*/
ast_t check_reason = AST_QUANTUM;
- if (thread->task == kernel_task)
+ if (thread->task == kernel_task) {
check_reason |= AST_URGENT;
+ }
- if ((preempt = csw_check(processor, check_reason)) != AST_NONE)
+ if ((preempt = csw_check(thread, processor, check_reason)) != AST_NONE) {
ast_on(preempt);
+ }
/*
* AST_KEVENT does not send an IPI when setting the AST,
ast_propagate(thread);
thread_unlock(thread);
-
- timer_call_quantum_timer_enter(&processor->quantum_timer, thread,
- processor->quantum_end, ctime);
+ running_timer_enter(processor, RUNNING_TIMER_QUANTUM, thread,
+ processor->quantum_end, ctime);
/* Tell platform layer that we are still running this thread */
- urgency = thread_get_urgency(thread, &ignore1, &ignore2);
+ thread_urgency_t urgency = thread_get_urgency(thread, NULL, NULL);
machine_thread_going_on_core(thread, urgency, 0, 0, ctime);
machine_switch_perfcontrol_state_update(QUANTUM_EXPIRY, ctime,
- 0, thread);
+ 0, thread);
#if defined(CONFIG_SCHED_TIMESHARE_CORE)
sched_timeshare_consider_maintenance(ctime);
#endif /* CONFIG_SCHED_TIMESHARE_CORE */
#if __arm__ || __arm64__
- if (thread->sched_mode == TH_MODE_REALTIME)
+ if (thread->sched_mode == TH_MODE_REALTIME) {
sched_consider_recommended_cores(ctime, thread);
+ }
#endif /* __arm__ || __arm64__ */
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0);
assert(priority >= MINPRI);
uint64_t ctime = 0;
- if (thread->sched_mode == TH_MODE_REALTIME)
+ if (thread->sched_mode == TH_MODE_REALTIME) {
assert(priority <= BASEPRI_RTQUEUES);
- else
+ } else {
assert(priority < BASEPRI_RTQUEUES);
+ }
int old_base_pri = thread->base_pri;
- thread->base_pri = priority;
+ thread->req_base_pri = (int16_t)priority;
+ if (thread->sched_flags & TH_SFLAG_BASE_PRI_FROZEN) {
+ priority = MAX(priority, old_base_pri);
+ }
+ thread->base_pri = (int16_t)priority;
if ((thread->state & TH_RUN) == TH_RUN) {
assert(thread->last_made_runnable_time != THREAD_NOT_RUNNABLE);
assert(thread->last_made_runnable_time == THREAD_NOT_RUNNABLE);
}
- /*
- * Currently the perfcontrol_attr depends on the base pri of the
- * thread. Therefore, we use this function as the hook for the
- * perfcontrol callout.
+ /*
+ * Currently the perfcontrol_attr depends on the base pri of the
+ * thread. Therefore, we use this function as the hook for the
+ * perfcontrol callout.
*/
if (thread == current_thread() && old_base_pri != priority) {
if (!ctime) {
- ctime = mach_approximate_time();
+ ctime = mach_approximate_time();
}
machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
- ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, thread);
+ ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, thread);
+ }
+#if !CONFIG_SCHED_CLUTCH
+ /* For the clutch scheduler, this operation is done in set_sched_pri() */
+ SCHED(update_thread_bucket)(thread);
+#endif /* !CONFIG_SCHED_CLUTCH */
+
+ thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
+}
+
+/*
+ * sched_set_kernel_thread_priority:
+ *
+ * Set the absolute base priority of the thread
+ * and reset its scheduled priority.
+ *
+ * Called with the thread unlocked.
+ */
+void
+sched_set_kernel_thread_priority(thread_t thread, int new_priority)
+{
+ spl_t s = splsched();
+
+ thread_lock(thread);
+
+ assert(thread->sched_mode != TH_MODE_REALTIME);
+ assert(thread->effective_policy.thep_qos == THREAD_QOS_UNSPECIFIED);
+
+ if (new_priority > thread->max_priority) {
+ new_priority = thread->max_priority;
}
- sched_update_thread_bucket(thread);
+#if !defined(XNU_TARGET_OS_OSX)
+ if (new_priority < MAXPRI_THROTTLE) {
+ new_priority = MAXPRI_THROTTLE;
+ }
+#endif /* !defined(XNU_TARGET_OS_OSX) */
+
+ thread->importance = new_priority - thread->task_priority;
- thread_recompute_sched_pri(thread, FALSE);
+ sched_set_thread_base_priority(thread, new_priority);
+
+ thread_unlock(thread);
+ splx(s);
}
/*
* according to its base priority if the
* thread has not been promoted or depressed.
*
- * This is the standard way to push base_pri changes into sched_pri,
- * or to recalculate the appropriate sched_pri after clearing
+ * This is the only way to push base_pri changes into sched_pri,
+ * or to recalculate the appropriate sched_pri after changing
* a promotion or depression.
*
* Called at splsched with the thread locked.
+ *
+ * TODO: Add an 'update urgency' flag to avoid urgency callouts on every rwlock operation
*/
void
-thread_recompute_sched_pri(
- thread_t thread,
- boolean_t override_depress)
+thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options)
{
- int priority;
+ uint32_t sched_flags = thread->sched_flags;
+ sched_mode_t sched_mode = thread->sched_mode;
- if (thread->sched_mode == TH_MODE_TIMESHARE)
- priority = SCHED(compute_timeshare_priority)(thread);
- else
- priority = thread->base_pri;
+ int16_t priority = thread->base_pri;
- if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
- (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) || override_depress)) {
- set_sched_pri(thread, priority);
+ if (sched_mode == TH_MODE_TIMESHARE) {
+ priority = (int16_t)SCHED(compute_timeshare_priority)(thread);
}
+
+ if (sched_flags & TH_SFLAG_DEPRESS) {
+ /* thread_yield_internal overrides kernel mutex promotion */
+ priority = DEPRESSPRI;
+ } else {
+ /* poll-depress is overridden by mutex promotion and promote-reasons */
+ if ((sched_flags & TH_SFLAG_POLLDEPRESS)) {
+ priority = DEPRESSPRI;
+ }
+
+ if (thread->kern_promotion_schedpri > 0) {
+ priority = MAX(priority, thread->kern_promotion_schedpri);
+
+ if (sched_mode != TH_MODE_REALTIME) {
+ priority = MIN(priority, MAXPRI_PROMOTE);
+ }
+ }
+
+ if (sched_flags & TH_SFLAG_PROMOTED) {
+ priority = MAX(priority, thread->promotion_priority);
+
+ if (sched_mode != TH_MODE_REALTIME) {
+ priority = MIN(priority, MAXPRI_PROMOTE);
+ }
+ }
+
+ if (sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) {
+ if (sched_flags & TH_SFLAG_RW_PROMOTED) {
+ priority = MAX(priority, MINPRI_RWLOCK);
+ }
+
+ if (sched_flags & TH_SFLAG_WAITQ_PROMOTED) {
+ priority = MAX(priority, MINPRI_WAITQ);
+ }
+
+ if (sched_flags & TH_SFLAG_EXEC_PROMOTED) {
+ priority = MAX(priority, MINPRI_EXEC);
+ }
+ }
+ }
+
+ set_sched_pri(thread, priority, options);
}
void
sched_default_quantum_expire(thread_t thread __unused)
{
- /*
- * No special behavior when a timeshare, fixed, or realtime thread
- * uses up its entire quantum
- */
+ /*
+ * No special behavior when a timeshare, fixed, or realtime thread
+ * uses up its entire quantum
+ */
}
+int smt_timeshare_enabled = 1;
+int smt_sched_bonus_16ths = 8;
+
#if defined(CONFIG_SCHED_TIMESHARE_CORE)
/*
* during contention for processor
* resources.
*/
- if (thread->pri_shift < INT8_MAX)
- thread->sched_usage += delta;
+ if (thread->pri_shift < INT8_MAX) {
+ if (thread_no_smt(thread) && smt_timeshare_enabled) {
+ thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4));
+ } else {
+ thread->sched_usage += delta;
+ }
+ }
thread->cpu_delta += delta;
- priority = sched_compute_timeshare_priority(thread);
-
+#if CONFIG_SCHED_CLUTCH
/*
- * Adjust the scheduled priority like thread_recompute_sched_pri,
- * except with the benefit of knowing the thread is on this core.
+ * Update the CPU usage for the thread group to which the thread belongs.
+ * The implementation assumes that the thread ran for the entire delta
+ * as part of the same thread group.
*/
- if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
- (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
- priority != thread->sched_pri) {
-
- thread->sched_pri = priority;
-
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
- (uintptr_t)thread_tid(thread),
- thread->base_pri,
- thread->sched_pri,
- thread->sched_usage,
- 0);
+ sched_clutch_cpu_usage_update(thread, delta);
+#endif /* CONFIG_SCHED_CLUTCH */
+
+ priority = sched_compute_timeshare_priority(thread);
+
+ if (priority != thread->sched_pri) {
+ thread_recompute_sched_pri(thread, SETPRI_LAZY);
}
}
}
* is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
* +/- is determined by the sign of shift 2.
*/
-struct shift_data {
- int shift1;
- int shift2;
-};
-#define SCHED_DECAY_TICKS 32
-static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
- {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
- {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
- {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
- {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
+const struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
+ { .shift1 = 1, .shift2 = 1 },
+ { .shift1 = 1, .shift2 = 3 },
+ { .shift1 = 1, .shift2 = -3 },
+ { .shift1 = 2, .shift2 = -7 },
+ { .shift1 = 3, .shift2 = 5 },
+ { .shift1 = 3, .shift2 = -5 },
+ { .shift1 = 4, .shift2 = -8 },
+ { .shift1 = 5, .shift2 = 7 },
+ { .shift1 = 5, .shift2 = -7 },
+ { .shift1 = 6, .shift2 = -10 },
+ { .shift1 = 7, .shift2 = 10 },
+ { .shift1 = 7, .shift2 = -9 },
+ { .shift1 = 8, .shift2 = -11 },
+ { .shift1 = 9, .shift2 = 12 },
+ { .shift1 = 9, .shift2 = -11 },
+ { .shift1 = 10, .shift2 = -13 },
+ { .shift1 = 11, .shift2 = 14 },
+ { .shift1 = 11, .shift2 = -13 },
+ { .shift1 = 12, .shift2 = -15 },
+ { .shift1 = 13, .shift2 = 17 },
+ { .shift1 = 13, .shift2 = -15 },
+ { .shift1 = 14, .shift2 = -17 },
+ { .shift1 = 15, .shift2 = 19 },
+ { .shift1 = 16, .shift2 = 18 },
+ { .shift1 = 16, .shift2 = -19 },
+ { .shift1 = 17, .shift2 = 22 },
+ { .shift1 = 18, .shift2 = 20 },
+ { .shift1 = 18, .shift2 = -20 },
+ { .shift1 = 19, .shift2 = 26 },
+ { .shift1 = 20, .shift2 = 22 },
+ { .shift1 = 20, .shift2 = -22 },
+ { .shift1 = 21, .shift2 = -27 }
};
/*
*/
extern int sched_pri_decay_band_limit;
-#ifdef CONFIG_EMBEDDED
+
+/* Only use the decay floor logic on non-macOS and non-clutch schedulers */
+#if !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH
int
sched_compute_timeshare_priority(thread_t thread)
{
- int decay_amount = (thread->sched_usage >> thread->pri_shift);
+ int decay_amount;
int decay_limit = sched_pri_decay_band_limit;
if (thread->base_pri > BASEPRI_FOREGROUND) {
decay_limit += (thread->base_pri - BASEPRI_FOREGROUND);
}
+ if (thread->pri_shift == INT8_MAX) {
+ decay_amount = 0;
+ } else {
+ decay_amount = (thread->sched_usage >> thread->pri_shift);
+ }
+
if (decay_amount > decay_limit) {
decay_amount = decay_limit;
}
return priority;
}
-#else /* CONFIG_EMBEDDED */
+#else /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */
int
sched_compute_timeshare_priority(thread_t thread)
{
/* start with base priority */
- int priority = thread->base_pri - (thread->sched_usage >> thread->pri_shift);
+ int priority = thread->base_pri;
+
+ if (thread->pri_shift != INT8_MAX) {
+ priority -= (thread->sched_usage >> thread->pri_shift);
+ }
- if (priority < MINPRI_USER)
+ if (priority < MINPRI_USER) {
priority = MINPRI_USER;
- else if (priority > MAXPRI_KERNEL)
+ } else if (priority > MAXPRI_KERNEL) {
priority = MAXPRI_KERNEL;
+ }
return priority;
}
-#endif /* CONFIG_EMBEDDED */
+#endif /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */
/*
* can_update_priority
*/
boolean_t
can_update_priority(
- thread_t thread)
+ thread_t thread)
{
- if (sched_tick == thread->sched_stamp)
- return (FALSE);
- else
- return (TRUE);
+ if (sched_tick == thread->sched_stamp) {
+ return FALSE;
+ } else {
+ return TRUE;
+ }
}
/*
*/
void
update_priority(
- thread_t thread)
+ thread_t thread)
{
uint32_t ticks, delta;
thread->sched_stamp += ticks;
- thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket];
-
/* If requested, accelerate aging of sched_usage */
- if (sched_decay_usage_age_factor > 1)
+ if (sched_decay_usage_age_factor > 1) {
ticks *= sched_decay_usage_age_factor;
+ }
/*
* Gather cpu usage data.
thread_timer_delta(thread, delta);
if (ticks < SCHED_DECAY_TICKS) {
/*
- * Accumulate timesharing usage only
- * during contention for processor
- * resources.
+ * Accumulate timesharing usage only during contention for processor
+ * resources. Use the pri_shift from the previous tick window to
+ * determine if the system was in a contended state.
*/
- if (thread->pri_shift < INT8_MAX)
- thread->sched_usage += delta;
+ if (thread->pri_shift < INT8_MAX) {
+ if (thread_no_smt(thread) && smt_timeshare_enabled) {
+ thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4));
+ } else {
+ thread->sched_usage += delta;
+ }
+ }
thread->cpu_usage += delta + thread->cpu_delta;
thread->cpu_delta = 0;
- struct shift_data *shiftp = &sched_decay_shifts[ticks];
+#if CONFIG_SCHED_CLUTCH
+ /*
+ * Update the CPU usage for the thread group to which the thread belongs.
+ * The implementation assumes that the thread ran for the entire delta
+ * as part of the same thread group.
+ */
+ sched_clutch_cpu_usage_update(thread, delta);
+#endif /* CONFIG_SCHED_CLUTCH */
+
+ const struct shift_data *shiftp = &sched_decay_shifts[ticks];
if (shiftp->shift2 > 0) {
thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) +
- (thread->cpu_usage >> shiftp->shift2);
+ (thread->cpu_usage >> shiftp->shift2);
thread->sched_usage = (thread->sched_usage >> shiftp->shift1) +
- (thread->sched_usage >> shiftp->shift2);
+ (thread->sched_usage >> shiftp->shift2);
} else {
thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) -
- (thread->cpu_usage >> -(shiftp->shift2));
+ (thread->cpu_usage >> -(shiftp->shift2));
thread->sched_usage = (thread->sched_usage >> shiftp->shift1) -
- (thread->sched_usage >> -(shiftp->shift2));
+ (thread->sched_usage >> -(shiftp->shift2));
}
} else {
thread->cpu_usage = thread->cpu_delta = 0;
}
/*
- * Recompute scheduled priority if appropriate.
+ * Now that the thread's CPU usage has been accumulated and aged
+ * based on contention of the previous tick window, update the
+ * pri_shift of the thread to match the current global load/shift
+ * values. The updated pri_shift would be used to calculate the
+ * new priority of the thread.
*/
- if (thread->sched_mode == TH_MODE_TIMESHARE) {
- int priority = sched_compute_timeshare_priority(thread);
-
- /*
- * Adjust the scheduled priority like thread_recompute_sched_pri,
- * except without setting an AST.
- */
- if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
- (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
- priority != thread->sched_pri) {
-
- boolean_t removed = thread_run_queue_remove(thread);
-
- thread->sched_pri = priority;
-
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
- (uintptr_t)thread_tid(thread),
- thread->base_pri,
- thread->sched_pri,
- thread->sched_usage,
- 0);
+#if CONFIG_SCHED_CLUTCH
+ thread->pri_shift = sched_clutch_thread_pri_shift(thread, thread->th_sched_bucket);
+#else /* CONFIG_SCHED_CLUTCH */
+ thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket];
+#endif /* CONFIG_SCHED_CLUTCH */
- if (removed)
- thread_run_queue_reinsert(thread, SCHED_TAILQ);
- }
+ /* Recompute scheduled priority if appropriate. */
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ thread_recompute_sched_pri(thread, SETPRI_LAZY);
}
-
- return;
}
#endif /* CONFIG_SCHED_TIMESHARE_CORE */
/*
* TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
* Each other bucket is a count of the runnable non-idle threads
- * with that property.
+ * with that property. All updates to these counts should be
+ * performed with os_atomic_* operations.
+ *
+ * For the clutch scheduler, this global bucket is used only for
+ * keeping the total global run count.
*/
-volatile uint32_t sched_run_buckets[TH_BUCKET_MAX];
+uint32_t sched_run_buckets[TH_BUCKET_MAX];
static void
sched_incr_bucket(sched_bucket_t bucket)
{
assert(bucket >= TH_BUCKET_FIXPRI &&
- bucket <= TH_BUCKET_SHARE_BG);
+ bucket <= TH_BUCKET_SHARE_BG);
- hw_atomic_add(&sched_run_buckets[bucket], 1);
+ os_atomic_inc(&sched_run_buckets[bucket], relaxed);
}
static void
sched_decr_bucket(sched_bucket_t bucket)
{
assert(bucket >= TH_BUCKET_FIXPRI &&
- bucket <= TH_BUCKET_SHARE_BG);
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0);
- assert(sched_run_buckets[bucket] > 0);
+ os_atomic_dec(&sched_run_buckets[bucket], relaxed);
+}
- hw_atomic_sub(&sched_run_buckets[bucket], 1);
+static void
+sched_add_bucket(sched_bucket_t bucket, uint8_t run_weight)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ os_atomic_add(&sched_run_buckets[bucket], run_weight, relaxed);
}
-/* TH_RUN & !TH_IDLE controls whether a thread has a run count */
+static void
+sched_sub_bucket(sched_bucket_t bucket, uint8_t run_weight)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0);
+
+ os_atomic_sub(&sched_run_buckets[bucket], run_weight, relaxed);
+}
uint32_t
sched_run_incr(thread_t thread)
{
- assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
+ assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN);
- uint32_t new_count = hw_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], 1);
+ uint32_t new_count = os_atomic_inc(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
sched_incr_bucket(thread->th_sched_bucket);
uint32_t
sched_run_decr(thread_t thread)
{
- assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN);
+ assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN);
sched_decr_bucket(thread->th_sched_bucket);
- uint32_t new_count = hw_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], 1);
+ uint32_t new_count = os_atomic_dec(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
return new_count;
}
-static void
+uint32_t
+sched_smt_run_incr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN);
+
+ uint8_t run_weight = (thread_no_smt(thread) && smt_timeshare_enabled) ? 2 : 1;
+ thread->sched_saved_run_weight = run_weight;
+
+ uint32_t new_count = os_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed);
+
+ sched_add_bucket(thread->th_sched_bucket, run_weight);
+
+ return new_count;
+}
+
+uint32_t
+sched_smt_run_decr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN);
+
+ uint8_t run_weight = thread->sched_saved_run_weight;
+
+ sched_sub_bucket(thread->th_sched_bucket, run_weight);
+
+ uint32_t new_count = os_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed);
+
+ return new_count;
+}
+
+void
sched_update_thread_bucket(thread_t thread)
{
sched_bucket_t old_bucket = thread->th_sched_bucket;
break;
case TH_MODE_TIMESHARE:
- if (thread->base_pri > BASEPRI_UTILITY)
+ if (thread->base_pri > BASEPRI_DEFAULT) {
new_bucket = TH_BUCKET_SHARE_FG;
- else if (thread->base_pri > MAXPRI_THROTTLE)
+ } else if (thread->base_pri > BASEPRI_UTILITY) {
+ new_bucket = TH_BUCKET_SHARE_DF;
+ } else if (thread->base_pri > MAXPRI_THROTTLE) {
new_bucket = TH_BUCKET_SHARE_UT;
- else
+ } else {
new_bucket = TH_BUCKET_SHARE_BG;
+ }
break;
default:
thread->th_sched_bucket = new_bucket;
thread->pri_shift = sched_pri_shifts[new_bucket];
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
+ if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
sched_decr_bucket(old_bucket);
sched_incr_bucket(new_bucket);
}
}
}
+void
+sched_smt_update_thread_bucket(thread_t thread)
+{
+ sched_bucket_t old_bucket = thread->th_sched_bucket;
+ sched_bucket_t new_bucket = TH_BUCKET_RUN;
+
+ switch (thread->sched_mode) {
+ case TH_MODE_FIXED:
+ case TH_MODE_REALTIME:
+ new_bucket = TH_BUCKET_FIXPRI;
+ break;
+
+ case TH_MODE_TIMESHARE:
+ if (thread->base_pri > BASEPRI_DEFAULT) {
+ new_bucket = TH_BUCKET_SHARE_FG;
+ } else if (thread->base_pri > BASEPRI_UTILITY) {
+ new_bucket = TH_BUCKET_SHARE_DF;
+ } else if (thread->base_pri > MAXPRI_THROTTLE) {
+ new_bucket = TH_BUCKET_SHARE_UT;
+ } else {
+ new_bucket = TH_BUCKET_SHARE_BG;
+ }
+ break;
+
+ default:
+ panic("unexpected mode: %d", thread->sched_mode);
+ break;
+ }
+
+ if (old_bucket != new_bucket) {
+ thread->th_sched_bucket = new_bucket;
+ thread->pri_shift = sched_pri_shifts[new_bucket];
+
+ if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
+ sched_sub_bucket(old_bucket, thread->sched_saved_run_weight);
+ sched_add_bucket(new_bucket, thread->sched_saved_run_weight);
+ }
+ }
+}
+
/*
* Set the thread's true scheduling mode
* Called with thread mutex and thread locked
break;
}
+#if CONFIG_SCHED_AUTO_JOIN
+ /*
+ * Realtime threads might have auto-joined a work interval based on
+ * make runnable relationships. If such an RT thread is now being demoted
+ * to non-RT, unjoin the thread from the work interval.
+ */
+ if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) && (new_mode != TH_MODE_REALTIME)) {
+ assert((thread->sched_mode == TH_MODE_REALTIME) || (thread->th_work_interval_flags & TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK));
+ work_interval_auto_join_demote(thread);
+ }
+#endif /* CONFIG_SCHED_AUTO_JOIN */
+
thread->sched_mode = new_mode;
- sched_update_thread_bucket(thread);
+ SCHED(update_thread_bucket)(thread);
}
/*
assert(reason & TH_SFLAG_DEMOTED_MASK);
assert((thread->sched_flags & reason) != reason);
- if (thread->policy_reset)
+ if (thread->policy_reset) {
return;
+ }
if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
/* Another demotion reason is already active */
thread_recompute_priority(thread);
- if (removed)
+ if (removed) {
thread_run_queue_reinsert(thread, SCHED_TAILQ);
+ }
}
/*
thread_recompute_priority(thread);
- if (removed)
+ if (removed) {
thread_run_queue_reinsert(thread, SCHED_TAILQ);
+ }
}
+/*
+ * Promote thread to have a sched pri floor for a specific reason
+ *
+ * Promotion must not last past syscall boundary
+ * Clients must always pair promote and demote 1:1,
+ * Handling nesting of the same promote reason is the client's responsibility
+ *
+ * Called at splsched with thread locked
+ */
+void
+sched_thread_promote_reason(thread_t thread,
+ uint32_t reason,
+ __kdebug_only uintptr_t trace_obj /* already unslid */)
+{
+ assert(reason & TH_SFLAG_PROMOTE_REASON_MASK);
+ assert((thread->sched_flags & reason) != reason);
+ switch (reason) {
+ case TH_SFLAG_RW_PROMOTED:
+ KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_PROMOTE),
+ thread_tid(thread), thread->sched_pri,
+ thread->base_pri, trace_obj);
+ break;
+ case TH_SFLAG_WAITQ_PROMOTED:
+ KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_PROMOTE),
+ thread_tid(thread), thread->sched_pri,
+ thread->base_pri, trace_obj);
+ break;
+ case TH_SFLAG_EXEC_PROMOTED:
+ KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_PROMOTE),
+ thread_tid(thread), thread->sched_pri,
+ thread->base_pri, trace_obj);
+ break;
+ }
+
+ thread->sched_flags |= reason;
+
+ thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
+}
+
+/*
+ * End a specific promotion reason
+ * Demotes a thread back to its expected priority without the promotion in place
+ *
+ * Called at splsched with thread locked
+ */
+void
+sched_thread_unpromote_reason(thread_t thread,
+ uint32_t reason,
+ __kdebug_only uintptr_t trace_obj /* already unslid */)
+{
+ assert(reason & TH_SFLAG_PROMOTE_REASON_MASK);
+ assert((thread->sched_flags & reason) == reason);
+
+ switch (reason) {
+ case TH_SFLAG_RW_PROMOTED:
+ KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE),
+ thread_tid(thread), thread->sched_pri,
+ thread->base_pri, trace_obj);
+ break;
+ case TH_SFLAG_WAITQ_PROMOTED:
+ KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_DEMOTE),
+ thread_tid(thread), thread->sched_pri,
+ thread->base_pri, trace_obj);
+ break;
+ case TH_SFLAG_EXEC_PROMOTED:
+ KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_DEMOTE),
+ thread_tid(thread), thread->sched_pri,
+ thread->base_pri, trace_obj);
+ break;
+ }
+
+ thread->sched_flags &= ~reason;
+
+ thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
+}