/*
- * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
-/*
+/*
* Mach Operating System
* Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
#include <kern/ledger.h>
#include <machine/machparam.h>
#include <kern/machine.h>
+#include <kern/policy_internal.h>
+#include <kern/sched_clutch.h>
#ifdef CONFIG_MACH_APPROXIMATE_TIME
#include <machine/commpage.h> /* for commpage_update_mach_approximate_time */
#endif
+#if MONOTONIC
+#include <kern/monotonic.h>
+#endif /* MONOTONIC */
+
/*
* thread_quantum_expire:
*
void
thread_quantum_expire(
- timer_call_param_t p0,
- timer_call_param_t p1)
+ timer_call_param_t p0,
+ timer_call_param_t p1)
{
- processor_t processor = p0;
- thread_t thread = p1;
- ast_t preempt;
- uint64_t ctime;
- int urgency;
- uint64_t ignore1, ignore2;
+ processor_t processor = p0;
+ thread_t thread = p1;
+ ast_t preempt;
+ uint64_t ctime;
assert(processor == current_processor());
assert(thread == current_thread());
/*
* We bill CPU time to both the individual thread and its task.
*
- * Because this balance adjustment could potentially attempt to wake this very
- * thread, we must credit the ledger before taking the thread lock. The ledger
- * pointers are only manipulated by the thread itself at the ast boundary.
+ * Because this balance adjustment could potentially attempt to wake this
+ * very thread, we must credit the ledger before taking the thread lock.
+ * The ledger pointers are only manipulated by the thread itself at the ast
+ * boundary.
+ *
+ * TODO: This fails to account for the time between when the timer was
+ * armed and when it fired. It should be based on the system_timer and
+ * running a timer_update operation here.
*/
ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining);
ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining);
-#ifdef CONFIG_BANK
if (thread->t_bankledger) {
ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time,
- (thread->quantum_remaining - thread->t_deduct_bank_ledger_time));
+ (thread->quantum_remaining - thread->t_deduct_bank_ledger_time));
}
thread->t_deduct_bank_ledger_time = 0;
-#endif
ctime = mach_absolute_time();
commpage_update_mach_approximate_time(ctime);
#endif
+#if MONOTONIC
+ mt_sched_update(thread);
+#endif /* MONOTONIC */
+
thread_lock(thread);
/*
/*
* Check for fail-safe trip.
*/
- if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
- !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) &&
- !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
- uint64_t new_computation;
-
- new_computation = ctime - thread->computation_epoch;
- new_computation += thread->computation_metered;
- if (new_computation > max_unsafe_computation) {
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE,
- (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
+ if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
+ !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
+ !(thread->kern_promotion_schedpri != 0) &&
+ !(thread->sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) &&
+ !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
+ uint64_t new_computation;
+
+ new_computation = ctime - thread->computation_epoch;
+ new_computation += thread->computation_metered;
+ if (new_computation > max_unsafe_computation) {
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE) | DBG_FUNC_NONE,
+ (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
thread->safe_release = ctime + sched_safe_duration;
/*
* Recompute scheduled priority if appropriate.
*/
- if (SCHED(can_update_priority)(thread))
+ if (SCHED(can_update_priority)(thread)) {
SCHED(update_priority)(thread);
- else
+ } else {
SCHED(lightweight_update_priority)(thread);
+ }
- if (thread->sched_mode != TH_MODE_REALTIME)
+ if (thread->sched_mode != TH_MODE_REALTIME) {
SCHED(quantum_expire)(thread);
-
- processor->current_pri = thread->sched_pri;
- processor->current_thmode = thread->sched_mode;
-
- /* Tell platform layer that we are still running this thread */
- urgency = thread_get_urgency(thread, &ignore1, &ignore2);
- machine_thread_going_on_core(thread, urgency, 0);
+ }
/*
* This quantum is up, give this thread another.
* during privilege transitions, synthesize an event now.
*/
if (!thread->precise_user_kernel_time) {
- timer_switch(PROCESSOR_DATA(processor, current_state),
- ctime,
- PROCESSOR_DATA(processor, current_state));
- timer_switch(PROCESSOR_DATA(processor, thread_timer),
- ctime,
- PROCESSOR_DATA(processor, thread_timer));
+ timer_update(PROCESSOR_DATA(processor, current_state), ctime);
+ timer_update(PROCESSOR_DATA(processor, thread_timer), ctime);
+ timer_update(&thread->runnable_timer, ctime);
}
+
processor->quantum_end = ctime + thread->quantum_remaining;
/*
- * Context switch check.
+ * Context switch check
+ *
+ * non-urgent flags don't affect kernel threads, so upgrade to urgent
+ * to ensure that rebalancing and non-recommendation kick in quickly.
*/
- if ((preempt = csw_check(processor, AST_QUANTUM)) != AST_NONE)
+
+ ast_t check_reason = AST_QUANTUM;
+ if (thread->task == kernel_task) {
+ check_reason |= AST_URGENT;
+ }
+
+ if ((preempt = csw_check(thread, processor, check_reason)) != AST_NONE) {
ast_on(preempt);
+ }
+
+ /*
+ * AST_KEVENT does not send an IPI when setting the AST,
+ * to avoid waiting for the next context switch to propagate the AST,
+ * the AST is propagated here at quantum expiration.
+ */
+ ast_propagate(thread);
thread_unlock(thread);
- timer_call_enter1(&processor->quantum_timer, thread,
- processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+ timer_call_quantum_timer_enter(&processor->quantum_timer, thread,
+ processor->quantum_end, ctime);
+
+ /* Tell platform layer that we are still running this thread */
+ thread_urgency_t urgency = thread_get_urgency(thread, NULL, NULL);
+ machine_thread_going_on_core(thread, urgency, 0, 0, ctime);
+ machine_switch_perfcontrol_state_update(QUANTUM_EXPIRY, ctime,
+ 0, thread);
#if defined(CONFIG_SCHED_TIMESHARE_CORE)
sched_timeshare_consider_maintenance(ctime);
#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+#if __arm__ || __arm64__
+ if (thread->sched_mode == TH_MODE_REALTIME) {
+ sched_consider_recommended_cores(ctime, thread);
+ }
+#endif /* __arm__ || __arm64__ */
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0);
}
void
sched_set_thread_base_priority(thread_t thread, int priority)
{
- int old_priority = thread->base_pri;
+ assert(priority >= MINPRI);
+ uint64_t ctime = 0;
+
+ if (thread->sched_mode == TH_MODE_REALTIME) {
+ assert(priority <= BASEPRI_RTQUEUES);
+ } else {
+ assert(priority < BASEPRI_RTQUEUES);
+ }
+
+ int old_base_pri = thread->base_pri;
+ thread->req_base_pri = priority;
+ if (thread->sched_flags & TH_SFLAG_BASE_PRI_FROZEN) {
+ priority = MAX(priority, old_base_pri);
+ }
thread->base_pri = priority;
- /* A thread is 'throttled' when its base priority is at or below MAXPRI_THROTTLE */
- if ((priority > MAXPRI_THROTTLE) && (old_priority <= MAXPRI_THROTTLE)) {
- sched_set_thread_throttled(thread, FALSE);
- } else if ((priority <= MAXPRI_THROTTLE) && (old_priority > MAXPRI_THROTTLE)) {
- sched_set_thread_throttled(thread, TRUE);
+ if ((thread->state & TH_RUN) == TH_RUN) {
+ assert(thread->last_made_runnable_time != THREAD_NOT_RUNNABLE);
+ ctime = mach_approximate_time();
+ thread->last_basepri_change_time = ctime;
+ } else {
+ assert(thread->last_basepri_change_time == THREAD_NOT_RUNNABLE);
+ assert(thread->last_made_runnable_time == THREAD_NOT_RUNNABLE);
+ }
+
+ /*
+ * Currently the perfcontrol_attr depends on the base pri of the
+ * thread. Therefore, we use this function as the hook for the
+ * perfcontrol callout.
+ */
+ if (thread == current_thread() && old_base_pri != priority) {
+ if (!ctime) {
+ ctime = mach_approximate_time();
+ }
+ machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
+ ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, thread);
+ }
+#if !CONFIG_SCHED_CLUTCH
+ /* For the clutch scheduler, this operation is done in set_sched_pri() */
+ SCHED(update_thread_bucket)(thread);
+#endif /* !CONFIG_SCHED_CLUTCH */
+
+ thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
+}
+
+/*
+ * sched_set_kernel_thread_priority:
+ *
+ * Set the absolute base priority of the thread
+ * and reset its scheduled priority.
+ *
+ * Called with the thread unlocked.
+ */
+void
+sched_set_kernel_thread_priority(thread_t thread, int new_priority)
+{
+ spl_t s = splsched();
+
+ thread_lock(thread);
+
+ assert(thread->sched_mode != TH_MODE_REALTIME);
+ assert(thread->effective_policy.thep_qos == THREAD_QOS_UNSPECIFIED);
+
+ if (new_priority > thread->max_priority) {
+ new_priority = thread->max_priority;
+ }
+#if CONFIG_EMBEDDED
+ if (new_priority < MAXPRI_THROTTLE) {
+ new_priority = MAXPRI_THROTTLE;
}
+#endif /* CONFIG_EMBEDDED */
- thread_recompute_sched_pri(thread, FALSE);
+ thread->importance = new_priority - thread->task_priority;
+
+ sched_set_thread_base_priority(thread, new_priority);
+
+ thread_unlock(thread);
+ splx(s);
}
/*
* according to its base priority if the
* thread has not been promoted or depressed.
*
- * This is the standard way to push base_pri changes into sched_pri,
- * or to recalculate the appropriate sched_pri after clearing
+ * This is the only way to push base_pri changes into sched_pri,
+ * or to recalculate the appropriate sched_pri after changing
* a promotion or depression.
*
* Called at splsched with the thread locked.
+ *
+ * TODO: Add an 'update urgency' flag to avoid urgency callouts on every rwlock operation
*/
void
-thread_recompute_sched_pri(
- thread_t thread,
- boolean_t override_depress)
+thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options)
{
- int priority;
+ uint32_t sched_flags = thread->sched_flags;
+ sched_mode_t sched_mode = thread->sched_mode;
+
+ int priority = thread->base_pri;
- if (thread->sched_mode == TH_MODE_TIMESHARE)
+ if (sched_mode == TH_MODE_TIMESHARE) {
priority = SCHED(compute_timeshare_priority)(thread);
- else
- priority = thread->base_pri;
+ }
+
+ if (sched_flags & TH_SFLAG_DEPRESS) {
+ /* thread_yield_internal overrides kernel mutex promotion */
+ priority = DEPRESSPRI;
+ } else {
+ /* poll-depress is overridden by mutex promotion and promote-reasons */
+ if ((sched_flags & TH_SFLAG_POLLDEPRESS)) {
+ priority = DEPRESSPRI;
+ }
- if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
- (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) || override_depress)) {
- set_sched_pri(thread, priority);
+ if (thread->kern_promotion_schedpri > 0) {
+ priority = MAX(priority, thread->kern_promotion_schedpri);
+
+ if (sched_mode != TH_MODE_REALTIME) {
+ priority = MIN(priority, MAXPRI_PROMOTE);
+ }
+ }
+
+ if (sched_flags & TH_SFLAG_PROMOTED) {
+ priority = MAX(priority, thread->promotion_priority);
+
+ if (sched_mode != TH_MODE_REALTIME) {
+ priority = MIN(priority, MAXPRI_PROMOTE);
+ }
+ }
+
+ if (sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) {
+ if (sched_flags & TH_SFLAG_RW_PROMOTED) {
+ priority = MAX(priority, MINPRI_RWLOCK);
+ }
+
+ if (sched_flags & TH_SFLAG_WAITQ_PROMOTED) {
+ priority = MAX(priority, MINPRI_WAITQ);
+ }
+
+ if (sched_flags & TH_SFLAG_EXEC_PROMOTED) {
+ priority = MAX(priority, MINPRI_EXEC);
+ }
+ }
}
+
+ set_sched_pri(thread, priority, options);
}
void
sched_default_quantum_expire(thread_t thread __unused)
{
- /*
- * No special behavior when a timeshare, fixed, or realtime thread
- * uses up its entire quantum
- */
+ /*
+ * No special behavior when a timeshare, fixed, or realtime thread
+ * uses up its entire quantum
+ */
}
#if defined(CONFIG_SCHED_TIMESHARE_CORE)
* during contention for processor
* resources.
*/
- if (thread->pri_shift < INT8_MAX)
+ if (thread->pri_shift < INT8_MAX) {
thread->sched_usage += delta;
+ }
thread->cpu_delta += delta;
- priority = sched_compute_timeshare_priority(thread);
-
+#if CONFIG_SCHED_CLUTCH
/*
- * Adjust the scheduled priority like thread_recompute_sched_pri,
- * except with the benefit of knowing the thread is on this core.
+ * Update the CPU usage for the thread group to which the thread belongs.
+ * The implementation assumes that the thread ran for the entire delta
+ * as part of the same thread group.
*/
- if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
- (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
- priority != thread->sched_pri) {
-
- thread->sched_pri = priority;
-
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
- (uintptr_t)thread_tid(thread),
- thread->base_pri,
- thread->sched_pri,
- 0, /* eventually, 'reason' */
- 0);
+ sched_clutch_cpu_usage_update(thread, delta);
+#endif /* CONFIG_SCHED_CLUTCH */
+
+ priority = sched_compute_timeshare_priority(thread);
+
+ if (priority != thread->sched_pri) {
+ thread_recompute_sched_pri(thread, SETPRI_LAZY);
}
}
}
* is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
* +/- is determined by the sign of shift 2.
*/
-struct shift_data {
- int shift1;
- int shift2;
-};
-#define SCHED_DECAY_TICKS 32
-static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
- {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
- {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
- {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
- {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
+const struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
+ { .shift1 = 1, .shift2 = 1 },
+ { .shift1 = 1, .shift2 = 3 },
+ { .shift1 = 1, .shift2 = -3 },
+ { .shift1 = 2, .shift2 = -7 },
+ { .shift1 = 3, .shift2 = 5 },
+ { .shift1 = 3, .shift2 = -5 },
+ { .shift1 = 4, .shift2 = -8 },
+ { .shift1 = 5, .shift2 = 7 },
+ { .shift1 = 5, .shift2 = -7 },
+ { .shift1 = 6, .shift2 = -10 },
+ { .shift1 = 7, .shift2 = 10 },
+ { .shift1 = 7, .shift2 = -9 },
+ { .shift1 = 8, .shift2 = -11 },
+ { .shift1 = 9, .shift2 = 12 },
+ { .shift1 = 9, .shift2 = -11 },
+ { .shift1 = 10, .shift2 = -13 },
+ { .shift1 = 11, .shift2 = 14 },
+ { .shift1 = 11, .shift2 = -13 },
+ { .shift1 = 12, .shift2 = -15 },
+ { .shift1 = 13, .shift2 = 17 },
+ { .shift1 = 13, .shift2 = -15 },
+ { .shift1 = 14, .shift2 = -17 },
+ { .shift1 = 15, .shift2 = 19 },
+ { .shift1 = 16, .shift2 = 18 },
+ { .shift1 = 16, .shift2 = -19 },
+ { .shift1 = 17, .shift2 = 22 },
+ { .shift1 = 18, .shift2 = 20 },
+ { .shift1 = 18, .shift2 = -20 },
+ { .shift1 = 19, .shift2 = 26 },
+ { .shift1 = 20, .shift2 = 22 },
+ { .shift1 = 20, .shift2 = -22 },
+ { .shift1 = 21, .shift2 = -27 }
};
/*
extern int sched_pri_decay_band_limit;
+/* Only use the decay floor logic on embedded non-clutch schedulers */
+#if CONFIG_EMBEDDED && !CONFIG_SCHED_CLUTCH
+
+int
+sched_compute_timeshare_priority(thread_t thread)
+{
+ int decay_amount = (thread->sched_usage >> thread->pri_shift);
+ int decay_limit = sched_pri_decay_band_limit;
+
+ if (thread->base_pri > BASEPRI_FOREGROUND) {
+ decay_limit += (thread->base_pri - BASEPRI_FOREGROUND);
+ }
+
+ if (decay_amount > decay_limit) {
+ decay_amount = decay_limit;
+ }
+
+ /* start with base priority */
+ int priority = thread->base_pri - decay_amount;
+
+ if (priority < MAXPRI_THROTTLE) {
+ if (thread->task->max_priority > MAXPRI_THROTTLE) {
+ priority = MAXPRI_THROTTLE;
+ } else if (priority < MINPRI_USER) {
+ priority = MINPRI_USER;
+ }
+ } else if (priority > MAXPRI_KERNEL) {
+ priority = MAXPRI_KERNEL;
+ }
+
+ return priority;
+}
+
+#else /* CONFIG_EMBEDDED && !CONFIG_SCHED_CLUTCH */
+
int
sched_compute_timeshare_priority(thread_t thread)
{
/* start with base priority */
int priority = thread->base_pri - (thread->sched_usage >> thread->pri_shift);
- if (priority < MINPRI_USER)
+ if (priority < MINPRI_USER) {
priority = MINPRI_USER;
- else if (priority > MAXPRI_KERNEL)
+ } else if (priority > MAXPRI_KERNEL) {
priority = MAXPRI_KERNEL;
+ }
return priority;
}
+#endif /* CONFIG_EMBEDDED && !CONFIG_SCHED_CLUTCH */
/*
* can_update_priority
*/
boolean_t
can_update_priority(
- thread_t thread)
+ thread_t thread)
{
- if (sched_tick == thread->sched_stamp)
- return (FALSE);
- else
- return (TRUE);
+ if (sched_tick == thread->sched_stamp) {
+ return FALSE;
+ } else {
+ return TRUE;
+ }
}
/*
*/
void
update_priority(
- register thread_t thread)
+ thread_t thread)
{
- register unsigned ticks;
- register uint32_t delta;
+ uint32_t ticks, delta;
ticks = sched_tick - thread->sched_stamp;
assert(ticks != 0);
+
thread->sched_stamp += ticks;
- if (sched_use_combined_fgbg_decay)
- thread->pri_shift = sched_combined_fgbg_pri_shift;
- else if (thread->sched_flags & TH_SFLAG_THROTTLED)
- thread->pri_shift = sched_background_pri_shift;
- else
- thread->pri_shift = sched_pri_shift;
/* If requested, accelerate aging of sched_usage */
- if (sched_decay_usage_age_factor > 1)
+ if (sched_decay_usage_age_factor > 1) {
ticks *= sched_decay_usage_age_factor;
+ }
/*
* Gather cpu usage data.
*/
thread_timer_delta(thread, delta);
if (ticks < SCHED_DECAY_TICKS) {
- register struct shift_data *shiftp;
-
/*
- * Accumulate timesharing usage only
- * during contention for processor
- * resources.
+ * Accumulate timesharing usage only during contention for processor
+ * resources. Use the pri_shift from the previous tick window to
+ * determine if the system was in a contended state.
*/
- if (thread->pri_shift < INT8_MAX)
+ if (thread->pri_shift < INT8_MAX) {
thread->sched_usage += delta;
+ }
thread->cpu_usage += delta + thread->cpu_delta;
thread->cpu_delta = 0;
- shiftp = &sched_decay_shifts[ticks];
+#if CONFIG_SCHED_CLUTCH
+ /*
+ * Update the CPU usage for the thread group to which the thread belongs.
+ * The implementation assumes that the thread ran for the entire delta
+ * as part of the same thread group.
+ */
+ sched_clutch_cpu_usage_update(thread, delta);
+#endif /* CONFIG_SCHED_CLUTCH */
+
+ const struct shift_data *shiftp = &sched_decay_shifts[ticks];
+
if (shiftp->shift2 > 0) {
- thread->cpu_usage =
- (thread->cpu_usage >> shiftp->shift1) +
- (thread->cpu_usage >> shiftp->shift2);
- thread->sched_usage =
- (thread->sched_usage >> shiftp->shift1) +
- (thread->sched_usage >> shiftp->shift2);
- }
- else {
- thread->cpu_usage =
- (thread->cpu_usage >> shiftp->shift1) -
- (thread->cpu_usage >> -(shiftp->shift2));
- thread->sched_usage =
- (thread->sched_usage >> shiftp->shift1) -
- (thread->sched_usage >> -(shiftp->shift2));
+ thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) +
+ (thread->cpu_usage >> shiftp->shift2);
+ thread->sched_usage = (thread->sched_usage >> shiftp->shift1) +
+ (thread->sched_usage >> shiftp->shift2);
+ } else {
+ thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) -
+ (thread->cpu_usage >> -(shiftp->shift2));
+ thread->sched_usage = (thread->sched_usage >> shiftp->shift1) -
+ (thread->sched_usage >> -(shiftp->shift2));
}
- }
- else {
+ } else {
thread->cpu_usage = thread->cpu_delta = 0;
thread->sched_usage = 0;
}
}
/*
- * Recompute scheduled priority if appropriate.
+ * Now that the thread's CPU usage has been accumulated and aged
+ * based on contention of the previous tick window, update the
+ * pri_shift of the thread to match the current global load/shift
+ * values. The updated pri_shift would be used to calculate the
+ * new priority of the thread.
*/
- if (thread->sched_mode == TH_MODE_TIMESHARE) {
- int priority = sched_compute_timeshare_priority(thread);
+#if CONFIG_SCHED_CLUTCH
+ thread->pri_shift = sched_clutch_thread_pri_shift(thread, thread->th_sched_bucket);
+#else /* CONFIG_SCHED_CLUTCH */
+ thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket];
+#endif /* CONFIG_SCHED_CLUTCH */
- /*
- * Adjust the scheduled priority like thread_recompute_sched_pri,
- * except without setting an AST.
- */
- if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
- (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
- priority != thread->sched_pri) {
+ /* Recompute scheduled priority if appropriate. */
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ thread_recompute_sched_pri(thread, SETPRI_LAZY);
+ }
+}
- boolean_t removed = thread_run_queue_remove(thread);
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
- thread->sched_pri = priority;
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
- (uintptr_t)thread_tid(thread),
- thread->base_pri,
- thread->sched_pri,
- 0, /* eventually, 'reason' */
- 0);
+/*
+ * TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
+ * Each other bucket is a count of the runnable non-idle threads
+ * with that property. All updates to these counts should be
+ * performed with os_atomic_* operations.
+ *
+ * For the clutch scheduler, this global bucket is used only for
+ * keeping the total global run count.
+ */
+uint32_t sched_run_buckets[TH_BUCKET_MAX];
- if (removed)
- thread_run_queue_reinsert(thread, SCHED_TAILQ);
- }
- }
+static void
+sched_incr_bucket(sched_bucket_t bucket)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
- return;
+ os_atomic_inc(&sched_run_buckets[bucket], relaxed);
}
-#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+static void
+sched_decr_bucket(sched_bucket_t bucket)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
-#if MACH_ASSERT
-/* sched_mode == TH_MODE_TIMESHARE controls whether a thread has a timeshare count when it has a run count */
+ assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0);
-void sched_share_incr(thread_t thread) {
- assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
- assert(thread->sched_mode == TH_MODE_TIMESHARE);
- assert(thread->SHARE_COUNT == 0);
- thread->SHARE_COUNT++;
- (void)hw_atomic_add(&sched_share_count, 1);
+ os_atomic_dec(&sched_run_buckets[bucket], relaxed);
}
-void sched_share_decr(thread_t thread) {
- assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN || thread->sched_mode != TH_MODE_TIMESHARE);
- assert(thread->SHARE_COUNT == 1);
- (void)hw_atomic_sub(&sched_share_count, 1);
- thread->SHARE_COUNT--;
+uint32_t
+sched_run_incr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN);
+
+ uint32_t new_count = os_atomic_inc(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
+
+ sched_incr_bucket(thread->th_sched_bucket);
+
+ return new_count;
}
-/* TH_SFLAG_THROTTLED controls whether a thread has a background count when it has a run count and a share count */
+uint32_t
+sched_run_decr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN);
-void sched_background_incr(thread_t thread) {
- assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
- assert(thread->sched_mode == TH_MODE_TIMESHARE);
- assert((thread->sched_flags & TH_SFLAG_THROTTLED) == TH_SFLAG_THROTTLED);
+ sched_decr_bucket(thread->th_sched_bucket);
- assert(thread->BG_COUNT == 0);
- thread->BG_COUNT++;
- int val = hw_atomic_add(&sched_background_count, 1);
- assert(val >= 0);
+ uint32_t new_count = os_atomic_dec(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
- /* Always do the background change while holding a share count */
- assert(thread->SHARE_COUNT == 1);
+ return new_count;
}
-void sched_background_decr(thread_t thread) {
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE)
- assert((thread->sched_flags & TH_SFLAG_THROTTLED) != TH_SFLAG_THROTTLED);
- assert(thread->BG_COUNT == 1);
- int val = hw_atomic_sub(&sched_background_count, 1);
- thread->BG_COUNT--;
- assert(val >= 0);
- assert(thread->BG_COUNT == 0);
-
- /* Always do the background change while holding a share count */
- assert(thread->SHARE_COUNT == 1);
-}
+void
+sched_update_thread_bucket(thread_t thread)
+{
+ sched_bucket_t old_bucket = thread->th_sched_bucket;
+ sched_bucket_t new_bucket = TH_BUCKET_RUN;
+
+ switch (thread->sched_mode) {
+ case TH_MODE_FIXED:
+ case TH_MODE_REALTIME:
+ new_bucket = TH_BUCKET_FIXPRI;
+ break;
+
+ case TH_MODE_TIMESHARE:
+ if (thread->base_pri > BASEPRI_DEFAULT) {
+ new_bucket = TH_BUCKET_SHARE_FG;
+ } else if (thread->base_pri > BASEPRI_UTILITY) {
+ new_bucket = TH_BUCKET_SHARE_DF;
+ } else if (thread->base_pri > MAXPRI_THROTTLE) {
+ new_bucket = TH_BUCKET_SHARE_UT;
+ } else {
+ new_bucket = TH_BUCKET_SHARE_BG;
+ }
+ break;
+ default:
+ panic("unexpected mode: %d", thread->sched_mode);
+ break;
+ }
-void
-assert_thread_sched_count(thread_t thread) {
- /* Only 0 or 1 are acceptable values */
- assert(thread->BG_COUNT == 0 || thread->BG_COUNT == 1);
- assert(thread->SHARE_COUNT == 0 || thread->SHARE_COUNT == 1);
-
- /* BG is only allowed when you already have a share count */
- if (thread->BG_COUNT == 1)
- assert(thread->SHARE_COUNT == 1);
- if (thread->SHARE_COUNT == 0)
- assert(thread->BG_COUNT == 0);
-
- if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN ||
- (thread->sched_mode != TH_MODE_TIMESHARE))
- assert(thread->SHARE_COUNT == 0);
-
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN &&
- (thread->sched_mode == TH_MODE_TIMESHARE))
- assert(thread->SHARE_COUNT == 1);
-
- if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN ||
- (thread->sched_mode != TH_MODE_TIMESHARE) ||
- !(thread->sched_flags & TH_SFLAG_THROTTLED))
- assert(thread->BG_COUNT == 0);
-
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN &&
- (thread->sched_mode == TH_MODE_TIMESHARE) &&
- (thread->sched_flags & TH_SFLAG_THROTTLED))
- assert(thread->BG_COUNT == 1);
-}
+ if (old_bucket != new_bucket) {
+ thread->th_sched_bucket = new_bucket;
+ thread->pri_shift = sched_pri_shifts[new_bucket];
-#endif /* MACH_ASSERT */
+ if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
+ sched_decr_bucket(old_bucket);
+ sched_incr_bucket(new_bucket);
+ }
+ }
+}
/*
* Set the thread's true scheduling mode
void
sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
{
- assert_thread_sched_count(thread);
assert(thread->runq == PROCESSOR_NULL);
- sched_mode_t old_mode = thread->sched_mode;
-
- thread->sched_mode = new_mode;
-
switch (new_mode) {
- case TH_MODE_FIXED:
- case TH_MODE_REALTIME:
- if (old_mode == TH_MODE_TIMESHARE) {
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
- if (thread->sched_flags & TH_SFLAG_THROTTLED)
- sched_background_decr(thread);
-
- sched_share_decr(thread);
- }
- }
- break;
-
- case TH_MODE_TIMESHARE:
- if (old_mode != TH_MODE_TIMESHARE) {
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
- sched_share_incr(thread);
-
- if (thread->sched_flags & TH_SFLAG_THROTTLED)
- sched_background_incr(thread);
- }
- }
- break;
-
- default:
- panic("unexpected mode: %d", new_mode);
- break;
+ case TH_MODE_FIXED:
+ case TH_MODE_REALTIME:
+ case TH_MODE_TIMESHARE:
+ break;
+
+ default:
+ panic("unexpected mode: %d", new_mode);
+ break;
}
- assert_thread_sched_count(thread);
+ thread->sched_mode = new_mode;
+
+ SCHED(update_thread_bucket)(thread);
}
/*
{
assert(reason & TH_SFLAG_DEMOTED_MASK);
assert((thread->sched_flags & reason) != reason);
- assert_thread_sched_count(thread);
- if (thread->policy_reset)
+ if (thread->policy_reset) {
return;
+ }
if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
/* Another demotion reason is already active */
thread_recompute_priority(thread);
- if (removed)
+ if (removed) {
thread_run_queue_reinsert(thread, SCHED_TAILQ);
-
- assert_thread_sched_count(thread);
+ }
}
/*
assert(thread->sched_mode == TH_MODE_TIMESHARE);
assert(thread->policy_reset == 0);
- assert_thread_sched_count(thread);
-
thread->sched_flags &= ~reason;
if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
thread_recompute_priority(thread);
- if (removed)
+ if (removed) {
thread_run_queue_reinsert(thread, SCHED_TAILQ);
+ }
}
/*
- * Set the thread to be categorized as 'background'
- * Called with thread mutex and thread lock held
+ * Promote thread to have a sched pri floor for a specific reason
+ *
+ * Promotion must not last past syscall boundary
+ * Clients must always pair promote and demote 1:1,
+ * Handling nesting of the same promote reason is the client's responsibility
*
- * TODO: Eventually, 'background' should be a true sched_mode.
+ * Called at splsched with thread locked
*/
void
-sched_set_thread_throttled(thread_t thread, boolean_t wants_throttle)
+sched_thread_promote_reason(thread_t thread,
+ uint32_t reason,
+ __kdebug_only uintptr_t trace_obj /* already unslid */)
{
- if (thread->policy_reset)
- return;
+ assert(reason & TH_SFLAG_PROMOTE_REASON_MASK);
+ assert((thread->sched_flags & reason) != reason);
- assert(((thread->sched_flags & TH_SFLAG_THROTTLED) ? TRUE : FALSE) != wants_throttle);
+ switch (reason) {
+ case TH_SFLAG_RW_PROMOTED:
+ KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_PROMOTE),
+ thread_tid(thread), thread->sched_pri,
+ thread->base_pri, trace_obj);
+ break;
+ case TH_SFLAG_WAITQ_PROMOTED:
+ KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_PROMOTE),
+ thread_tid(thread), thread->sched_pri,
+ thread->base_pri, trace_obj);
+ break;
+ case TH_SFLAG_EXEC_PROMOTED:
+ KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_PROMOTE),
+ thread_tid(thread), thread->sched_pri,
+ thread->base_pri, trace_obj);
+ break;
+ }
- assert_thread_sched_count(thread);
+ thread->sched_flags |= reason;
- if (wants_throttle) {
- thread->sched_flags |= TH_SFLAG_THROTTLED;
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) {
- sched_background_incr(thread);
- }
- } else {
- thread->sched_flags &= ~TH_SFLAG_THROTTLED;
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) {
- sched_background_decr(thread);
- }
+ thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
+}
+
+/*
+ * End a specific promotion reason
+ * Demotes a thread back to its expected priority without the promotion in place
+ *
+ * Called at splsched with thread locked
+ */
+void
+sched_thread_unpromote_reason(thread_t thread,
+ uint32_t reason,
+ __kdebug_only uintptr_t trace_obj /* already unslid */)
+{
+ assert(reason & TH_SFLAG_PROMOTE_REASON_MASK);
+ assert((thread->sched_flags & reason) == reason);
+
+ switch (reason) {
+ case TH_SFLAG_RW_PROMOTED:
+ KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE),
+ thread_tid(thread), thread->sched_pri,
+ thread->base_pri, trace_obj);
+ break;
+ case TH_SFLAG_WAITQ_PROMOTED:
+ KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_DEMOTE),
+ thread_tid(thread), thread->sched_pri,
+ thread->base_pri, trace_obj);
+ break;
+ case TH_SFLAG_EXEC_PROMOTED:
+ KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_DEMOTE),
+ thread_tid(thread), thread->sched_pri,
+ thread->base_pri, trace_obj);
+ break;
}
- assert_thread_sched_count(thread);
-}
+ thread->sched_flags &= ~reason;
+ thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
+}