X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/0a7de7458d150b5d4dffc935ba399be265ef0a1a..c3c9b80d004dbbfdf763edeb97968c6997e3b45b:/osfmk/kern/priority.c diff --git a/osfmk/kern/priority.c b/osfmk/kern/priority.c index 26c60c043..abdfa868e 100644 --- a/osfmk/kern/priority.c +++ b/osfmk/kern/priority.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2010 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -76,6 +76,8 @@ #include #include #include +#include +#include #ifdef CONFIG_MACH_APPROXIMATE_TIME #include /* for commpage_update_mach_approximate_time */ @@ -85,8 +87,6 @@ #include #endif /* MONOTONIC */ -static void sched_update_thread_bucket(thread_t thread); - /* * thread_quantum_expire: * @@ -110,7 +110,7 @@ thread_quantum_expire( KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_START, 0, 0, 0, 0, 0); - SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor); + SCHED_STATS_INC(quantum_timer_expirations); /* * We bill CPU time to both the individual thread and its task. @@ -131,12 +131,12 @@ thread_quantum_expire( (thread->quantum_remaining - thread->t_deduct_bank_ledger_time)); } thread->t_deduct_bank_ledger_time = 0; - ctime = mach_absolute_time(); #ifdef CONFIG_MACH_APPROXIMATE_TIME commpage_update_mach_approximate_time(ctime); #endif + sched_update_pset_avg_execution_time(processor->processor_set, thread->quantum_remaining, ctime, thread->th_sched_bucket); #if MONOTONIC mt_sched_update(thread); @@ -156,6 +156,7 @@ thread_quantum_expire( */ if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) && !(thread->sched_flags & TH_SFLAG_PROMOTED) && + !(thread->kern_promotion_schedpri != 0) && !(thread->sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) && !(thread->options & TH_OPT_SYSTEM_CRITICAL)) { uint64_t new_computation; @@ -200,8 +201,8 @@ thread_quantum_expire( * during privilege transitions, synthesize an event now. */ if (!thread->precise_user_kernel_time) { - timer_update(PROCESSOR_DATA(processor, current_state), ctime); - timer_update(PROCESSOR_DATA(processor, thread_timer), ctime); + timer_update(processor->current_state, ctime); + timer_update(processor->thread_timer, ctime); timer_update(&thread->runnable_timer, ctime); } @@ -232,8 +233,7 @@ thread_quantum_expire( ast_propagate(thread); thread_unlock(thread); - - timer_call_quantum_timer_enter(&processor->quantum_timer, thread, + running_timer_enter(processor, RUNNING_TIMER_QUANTUM, thread, processor->quantum_end, ctime); /* Tell platform layer that we are still running this thread */ @@ -278,7 +278,11 @@ sched_set_thread_base_priority(thread_t thread, int priority) } int old_base_pri = thread->base_pri; - thread->base_pri = priority; + thread->req_base_pri = (int16_t)priority; + if (thread->sched_flags & TH_SFLAG_BASE_PRI_FROZEN) { + priority = MAX(priority, old_base_pri); + } + thread->base_pri = (int16_t)priority; if ((thread->state & TH_RUN) == TH_RUN) { assert(thread->last_made_runnable_time != THREAD_NOT_RUNNABLE); @@ -301,11 +305,49 @@ sched_set_thread_base_priority(thread_t thread, int priority) machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE, ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, thread); } - sched_update_thread_bucket(thread); +#if !CONFIG_SCHED_CLUTCH + /* For the clutch scheduler, this operation is done in set_sched_pri() */ + SCHED(update_thread_bucket)(thread); +#endif /* !CONFIG_SCHED_CLUTCH */ thread_recompute_sched_pri(thread, SETPRI_DEFAULT); } +/* + * sched_set_kernel_thread_priority: + * + * Set the absolute base priority of the thread + * and reset its scheduled priority. + * + * Called with the thread unlocked. + */ +void +sched_set_kernel_thread_priority(thread_t thread, int new_priority) +{ + spl_t s = splsched(); + + thread_lock(thread); + + assert(thread->sched_mode != TH_MODE_REALTIME); + assert(thread->effective_policy.thep_qos == THREAD_QOS_UNSPECIFIED); + + if (new_priority > thread->max_priority) { + new_priority = thread->max_priority; + } +#if !defined(XNU_TARGET_OS_OSX) + if (new_priority < MAXPRI_THROTTLE) { + new_priority = MAXPRI_THROTTLE; + } +#endif /* !defined(XNU_TARGET_OS_OSX) */ + + thread->importance = new_priority - thread->task_priority; + + sched_set_thread_base_priority(thread, new_priority); + + thread_unlock(thread); + splx(s); +} + /* * thread_recompute_sched_pri: * @@ -327,10 +369,10 @@ thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options) uint32_t sched_flags = thread->sched_flags; sched_mode_t sched_mode = thread->sched_mode; - int priority = thread->base_pri; + int16_t priority = thread->base_pri; if (sched_mode == TH_MODE_TIMESHARE) { - priority = SCHED(compute_timeshare_priority)(thread); + priority = (int16_t)SCHED(compute_timeshare_priority)(thread); } if (sched_flags & TH_SFLAG_DEPRESS) { @@ -342,6 +384,14 @@ thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options) priority = DEPRESSPRI; } + if (thread->kern_promotion_schedpri > 0) { + priority = MAX(priority, thread->kern_promotion_schedpri); + + if (sched_mode != TH_MODE_REALTIME) { + priority = MIN(priority, MAXPRI_PROMOTE); + } + } + if (sched_flags & TH_SFLAG_PROMOTED) { priority = MAX(priority, thread->promotion_priority); @@ -377,6 +427,9 @@ sched_default_quantum_expire(thread_t thread __unused) */ } +int smt_timeshare_enabled = 1; +int smt_sched_bonus_16ths = 8; + #if defined(CONFIG_SCHED_TIMESHARE_CORE) /* @@ -407,11 +460,24 @@ lightweight_update_priority(thread_t thread) * resources. */ if (thread->pri_shift < INT8_MAX) { - thread->sched_usage += delta; + if (thread_no_smt(thread) && smt_timeshare_enabled) { + thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4)); + } else { + thread->sched_usage += delta; + } } thread->cpu_delta += delta; +#if CONFIG_SCHED_CLUTCH + /* + * Update the CPU usage for the thread group to which the thread belongs. + * The implementation assumes that the thread ran for the entire delta + * as part of the same thread group. + */ + sched_clutch_cpu_usage_update(thread, delta); +#endif /* CONFIG_SCHED_CLUTCH */ + priority = sched_compute_timeshare_priority(thread); if (priority != thread->sched_pri) { @@ -427,17 +493,40 @@ lightweight_update_priority(thread_t thread) * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the * +/- is determined by the sign of shift 2. */ -struct shift_data { - int shift1; - int shift2; -}; -#define SCHED_DECAY_TICKS 32 -static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = { - {1, 1}, {1, 3}, {1, -3}, {2, -7}, {3, 5}, {3, -5}, {4, -8}, {5, 7}, - {5, -7}, {6, -10}, {7, 10}, {7, -9}, {8, -11}, {9, 12}, {9, -11}, {10, -13}, - {11, 14}, {11, -13}, {12, -15}, {13, 17}, {13, -15}, {14, -17}, {15, 19}, {16, 18}, - {16, -19}, {17, 22}, {18, 20}, {18, -20}, {19, 26}, {20, 22}, {20, -22}, {21, -27} +const struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = { + { .shift1 = 1, .shift2 = 1 }, + { .shift1 = 1, .shift2 = 3 }, + { .shift1 = 1, .shift2 = -3 }, + { .shift1 = 2, .shift2 = -7 }, + { .shift1 = 3, .shift2 = 5 }, + { .shift1 = 3, .shift2 = -5 }, + { .shift1 = 4, .shift2 = -8 }, + { .shift1 = 5, .shift2 = 7 }, + { .shift1 = 5, .shift2 = -7 }, + { .shift1 = 6, .shift2 = -10 }, + { .shift1 = 7, .shift2 = 10 }, + { .shift1 = 7, .shift2 = -9 }, + { .shift1 = 8, .shift2 = -11 }, + { .shift1 = 9, .shift2 = 12 }, + { .shift1 = 9, .shift2 = -11 }, + { .shift1 = 10, .shift2 = -13 }, + { .shift1 = 11, .shift2 = 14 }, + { .shift1 = 11, .shift2 = -13 }, + { .shift1 = 12, .shift2 = -15 }, + { .shift1 = 13, .shift2 = 17 }, + { .shift1 = 13, .shift2 = -15 }, + { .shift1 = 14, .shift2 = -17 }, + { .shift1 = 15, .shift2 = 19 }, + { .shift1 = 16, .shift2 = 18 }, + { .shift1 = 16, .shift2 = -19 }, + { .shift1 = 17, .shift2 = 22 }, + { .shift1 = 18, .shift2 = 20 }, + { .shift1 = 18, .shift2 = -20 }, + { .shift1 = 19, .shift2 = 26 }, + { .shift1 = 20, .shift2 = 22 }, + { .shift1 = 20, .shift2 = -22 }, + { .shift1 = 21, .shift2 = -27 } }; /* @@ -447,18 +536,26 @@ static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = { */ extern int sched_pri_decay_band_limit; -#ifdef CONFIG_EMBEDDED + +/* Only use the decay floor logic on non-macOS and non-clutch schedulers */ +#if !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH int sched_compute_timeshare_priority(thread_t thread) { - int decay_amount = (thread->sched_usage >> thread->pri_shift); + int decay_amount; int decay_limit = sched_pri_decay_band_limit; if (thread->base_pri > BASEPRI_FOREGROUND) { decay_limit += (thread->base_pri - BASEPRI_FOREGROUND); } + if (thread->pri_shift == INT8_MAX) { + decay_amount = 0; + } else { + decay_amount = (thread->sched_usage >> thread->pri_shift); + } + if (decay_amount > decay_limit) { decay_amount = decay_limit; } @@ -479,13 +576,17 @@ sched_compute_timeshare_priority(thread_t thread) return priority; } -#else /* CONFIG_EMBEDDED */ +#else /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */ int sched_compute_timeshare_priority(thread_t thread) { /* start with base priority */ - int priority = thread->base_pri - (thread->sched_usage >> thread->pri_shift); + int priority = thread->base_pri; + + if (thread->pri_shift != INT8_MAX) { + priority -= (thread->sched_usage >> thread->pri_shift); + } if (priority < MINPRI_USER) { priority = MINPRI_USER; @@ -496,7 +597,7 @@ sched_compute_timeshare_priority(thread_t thread) return priority; } -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */ /* * can_update_priority @@ -550,13 +651,26 @@ update_priority( * determine if the system was in a contended state. */ if (thread->pri_shift < INT8_MAX) { - thread->sched_usage += delta; + if (thread_no_smt(thread) && smt_timeshare_enabled) { + thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4)); + } else { + thread->sched_usage += delta; + } } thread->cpu_usage += delta + thread->cpu_delta; thread->cpu_delta = 0; - struct shift_data *shiftp = &sched_decay_shifts[ticks]; +#if CONFIG_SCHED_CLUTCH + /* + * Update the CPU usage for the thread group to which the thread belongs. + * The implementation assumes that the thread ran for the entire delta + * as part of the same thread group. + */ + sched_clutch_cpu_usage_update(thread, delta); +#endif /* CONFIG_SCHED_CLUTCH */ + + const struct shift_data *shiftp = &sched_decay_shifts[ticks]; if (shiftp->shift2 > 0) { thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) + @@ -589,7 +703,11 @@ update_priority( * values. The updated pri_shift would be used to calculate the * new priority of the thread. */ +#if CONFIG_SCHED_CLUTCH + thread->pri_shift = sched_clutch_thread_pri_shift(thread, thread->th_sched_bucket); +#else /* CONFIG_SCHED_CLUTCH */ thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket]; +#endif /* CONFIG_SCHED_CLUTCH */ /* Recompute scheduled priority if appropriate. */ if (thread->sched_mode == TH_MODE_TIMESHARE) { @@ -603,9 +721,13 @@ update_priority( /* * TH_BUCKET_RUN is a count of *all* runnable non-idle threads. * Each other bucket is a count of the runnable non-idle threads - * with that property. + * with that property. All updates to these counts should be + * performed with os_atomic_* operations. + * + * For the clutch scheduler, this global bucket is used only for + * keeping the total global run count. */ -volatile uint32_t sched_run_buckets[TH_BUCKET_MAX]; +uint32_t sched_run_buckets[TH_BUCKET_MAX]; static void sched_incr_bucket(sched_bucket_t bucket) @@ -613,7 +735,7 @@ sched_incr_bucket(sched_bucket_t bucket) assert(bucket >= TH_BUCKET_FIXPRI && bucket <= TH_BUCKET_SHARE_BG); - hw_atomic_add(&sched_run_buckets[bucket], 1); + os_atomic_inc(&sched_run_buckets[bucket], relaxed); } static void @@ -622,19 +744,37 @@ sched_decr_bucket(sched_bucket_t bucket) assert(bucket >= TH_BUCKET_FIXPRI && bucket <= TH_BUCKET_SHARE_BG); - assert(sched_run_buckets[bucket] > 0); + assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0); + + os_atomic_dec(&sched_run_buckets[bucket], relaxed); +} + +static void +sched_add_bucket(sched_bucket_t bucket, uint8_t run_weight) +{ + assert(bucket >= TH_BUCKET_FIXPRI && + bucket <= TH_BUCKET_SHARE_BG); - hw_atomic_sub(&sched_run_buckets[bucket], 1); + os_atomic_add(&sched_run_buckets[bucket], run_weight, relaxed); } -/* TH_RUN & !TH_IDLE controls whether a thread has a run count */ +static void +sched_sub_bucket(sched_bucket_t bucket, uint8_t run_weight) +{ + assert(bucket >= TH_BUCKET_FIXPRI && + bucket <= TH_BUCKET_SHARE_BG); + + assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0); + + os_atomic_sub(&sched_run_buckets[bucket], run_weight, relaxed); +} uint32_t sched_run_incr(thread_t thread) { assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN); - uint32_t new_count = hw_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], 1); + uint32_t new_count = os_atomic_inc(&sched_run_buckets[TH_BUCKET_RUN], relaxed); sched_incr_bucket(thread->th_sched_bucket); @@ -648,12 +788,41 @@ sched_run_decr(thread_t thread) sched_decr_bucket(thread->th_sched_bucket); - uint32_t new_count = hw_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], 1); + uint32_t new_count = os_atomic_dec(&sched_run_buckets[TH_BUCKET_RUN], relaxed); return new_count; } -static void +uint32_t +sched_smt_run_incr(thread_t thread) +{ + assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN); + + uint8_t run_weight = (thread_no_smt(thread) && smt_timeshare_enabled) ? 2 : 1; + thread->sched_saved_run_weight = run_weight; + + uint32_t new_count = os_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed); + + sched_add_bucket(thread->th_sched_bucket, run_weight); + + return new_count; +} + +uint32_t +sched_smt_run_decr(thread_t thread) +{ + assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN); + + uint8_t run_weight = thread->sched_saved_run_weight; + + sched_sub_bucket(thread->th_sched_bucket, run_weight); + + uint32_t new_count = os_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed); + + return new_count; +} + +void sched_update_thread_bucket(thread_t thread) { sched_bucket_t old_bucket = thread->th_sched_bucket; @@ -693,6 +862,46 @@ sched_update_thread_bucket(thread_t thread) } } +void +sched_smt_update_thread_bucket(thread_t thread) +{ + sched_bucket_t old_bucket = thread->th_sched_bucket; + sched_bucket_t new_bucket = TH_BUCKET_RUN; + + switch (thread->sched_mode) { + case TH_MODE_FIXED: + case TH_MODE_REALTIME: + new_bucket = TH_BUCKET_FIXPRI; + break; + + case TH_MODE_TIMESHARE: + if (thread->base_pri > BASEPRI_DEFAULT) { + new_bucket = TH_BUCKET_SHARE_FG; + } else if (thread->base_pri > BASEPRI_UTILITY) { + new_bucket = TH_BUCKET_SHARE_DF; + } else if (thread->base_pri > MAXPRI_THROTTLE) { + new_bucket = TH_BUCKET_SHARE_UT; + } else { + new_bucket = TH_BUCKET_SHARE_BG; + } + break; + + default: + panic("unexpected mode: %d", thread->sched_mode); + break; + } + + if (old_bucket != new_bucket) { + thread->th_sched_bucket = new_bucket; + thread->pri_shift = sched_pri_shifts[new_bucket]; + + if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) { + sched_sub_bucket(old_bucket, thread->sched_saved_run_weight); + sched_add_bucket(new_bucket, thread->sched_saved_run_weight); + } + } +} + /* * Set the thread's true scheduling mode * Called with thread mutex and thread locked @@ -716,9 +925,21 @@ sched_set_thread_mode(thread_t thread, sched_mode_t new_mode) break; } +#if CONFIG_SCHED_AUTO_JOIN + /* + * Realtime threads might have auto-joined a work interval based on + * make runnable relationships. If such an RT thread is now being demoted + * to non-RT, unjoin the thread from the work interval. + */ + if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) && (new_mode != TH_MODE_REALTIME)) { + assert((thread->sched_mode == TH_MODE_REALTIME) || (thread->th_work_interval_flags & TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK)); + work_interval_auto_join_demote(thread); + } +#endif /* CONFIG_SCHED_AUTO_JOIN */ + thread->sched_mode = new_mode; - sched_update_thread_bucket(thread); + SCHED(update_thread_bucket)(thread); } /* @@ -789,95 +1010,6 @@ sched_thread_mode_undemote(thread_t thread, uint32_t reason) } } -/* - * Promote thread to a specific priority - * - * Promotion must not last past syscall boundary - * Clients must always pair promote and unpromote 1:1 - * - * Called at splsched with thread locked - */ -void -sched_thread_promote_to_pri(thread_t thread, - int priority, - __kdebug_only uintptr_t trace_obj /* already unslid */) -{ - assert((thread->sched_flags & TH_SFLAG_PROMOTED) != TH_SFLAG_PROMOTED); - assert(thread->promotion_priority == 0); - assert(priority <= MAXPRI_PROMOTE); - assert(priority > 0); - - KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PROMOTED), - thread_tid(thread), trace_obj, priority); - - thread->sched_flags |= TH_SFLAG_PROMOTED; - thread->promotion_priority = priority; - - thread_recompute_sched_pri(thread, SETPRI_DEFAULT); -} - - -/* - * Update a pre-existing priority promotion to have a higher priority floor - * Priority can only go up from the previous value - * Update must occur while a promotion is active - * - * Called at splsched with thread locked - */ -void -sched_thread_update_promotion_to_pri(thread_t thread, - int priority, - __kdebug_only uintptr_t trace_obj /* already unslid */) -{ - assert(thread->promotions > 0); - assert((thread->sched_flags & TH_SFLAG_PROMOTED) == TH_SFLAG_PROMOTED); - assert(thread->promotion_priority > 0); - assert(priority <= MAXPRI_PROMOTE); - - if (thread->promotion_priority < priority) { - KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PROMOTED_UPDATE), - thread_tid(thread), trace_obj, priority); - - thread->promotion_priority = priority; - thread_recompute_sched_pri(thread, SETPRI_DEFAULT); - } -} - -/* - * End a priority promotion - * Demotes a thread back to its expected priority without the promotion in place - * - * Called at splsched with thread locked - */ -void -sched_thread_unpromote(thread_t thread, - __kdebug_only uintptr_t trace_obj /* already unslid */) -{ - assert((thread->sched_flags & TH_SFLAG_PROMOTED) == TH_SFLAG_PROMOTED); - assert(thread->promotion_priority > 0); - - KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_UNPROMOTED), - thread_tid(thread), trace_obj, 0); - - thread->sched_flags &= ~TH_SFLAG_PROMOTED; - thread->promotion_priority = 0; - - thread_recompute_sched_pri(thread, SETPRI_DEFAULT); -} - -/* called with thread locked */ -void -assert_promotions_invariant(thread_t thread) -{ - if (thread->promotions > 0) { - assert((thread->sched_flags & TH_SFLAG_PROMOTED) == TH_SFLAG_PROMOTED); - } - - if (thread->promotions == 0) { - assert((thread->sched_flags & TH_SFLAG_PROMOTED) != TH_SFLAG_PROMOTED); - } -} - /* * Promote thread to have a sched pri floor for a specific reason *