X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/b0d623f7f2ae71ed96e60569f61f9a9a27016e80..c7d2c2c6ee645e10cbccdd01c6191873ec77239d:/osfmk/kern/sched_average.c diff --git a/osfmk/kern/sched_average.c b/osfmk/kern/sched_average.c index e20ddff73..a23ef953f 100644 --- a/osfmk/kern/sched_average.c +++ b/osfmk/kern/sched_average.c @@ -68,10 +68,14 @@ #include #include #include +#if CONFIG_TELEMETRY +#include +#endif uint32_t avenrun[3] = {0, 0, 0}; uint32_t mach_factor[3] = {0, 0, 0}; +#if defined(CONFIG_SCHED_TIMESHARE_CORE) /* * Values are scaled by LOAD_SCALE, defined in processor_info.h */ @@ -87,33 +91,45 @@ static uint32_t fract[3] = { #undef base #undef frac +#endif /* CONFIG_SCHED_TIMESHARE_CORE */ + static unsigned int sched_nrun; typedef void (*sched_avg_comp_t)( void *param); -#define SCHED_AVG_SECS(n) ((n) << SCHED_TICK_SHIFT) - static struct sched_average { sched_avg_comp_t comp; - void *param; - int period; - int tick; + void *param; + int period; /* in seconds */ + uint64_t deadline; } sched_average[] = { - { compute_averunnable, &sched_nrun, SCHED_AVG_SECS(5), 0 }, - { compute_stack_target, NULL, SCHED_AVG_SECS(5), 1 }, - { compute_memory_pressure, NULL, SCHED_AVG_SECS(1), 0 }, + { compute_averunnable, &sched_nrun, 5, 0 }, + { compute_stack_target, NULL, 5, 1 }, + { compute_memory_pressure, NULL, 1, 0 }, + { compute_zone_gc_throttle, NULL, 60, 0 }, + { compute_pageout_gc_throttle, NULL, 1, 0 }, + { compute_pmap_gc_throttle, NULL, 60, 0 }, +#if CONFIG_TELEMETRY + { compute_telemetry, NULL, 1, 0 }, +#endif { NULL, NULL, 0, 0 } }; typedef struct sched_average *sched_average_t; +/* The "stdelta" parameter represents the number of scheduler maintenance + * "ticks" that have elapsed since the last invocation, subject to + * integer division imprecision. + */ + void -compute_averages(void) +compute_averages(uint64_t stdelta) { - int ncpus, nthreads, nshared; - uint32_t factor_now, average_now, load_now = 0; + int ncpus, nthreads, nshared, nbackground, nshared_non_bg; + uint32_t factor_now, average_now, load_now = 0, background_load_now = 0, combined_fgbg_load_now = 0; sched_average_t avg; + uint64_t abstime, index; /* * Retrieve counts, ignoring @@ -122,6 +138,7 @@ compute_averages(void) ncpus = processor_avail_count; nthreads = sched_run_count - 1; nshared = sched_share_count; + nbackground = sched_background_count; /* * Load average and mach factor calculations for @@ -134,26 +151,67 @@ compute_averages(void) else factor_now = (ncpus - nthreads) * LOAD_SCALE; - sched_mach_factor = ((sched_mach_factor << 2) + factor_now) / 5; - sched_load_average = ((sched_load_average << 2) + average_now) / 5; - + /* For those statistics that formerly relied on being recomputed + * on timer ticks, advance by the approximate number of corresponding + * elapsed intervals, thus compensating for potential idle intervals. + */ + for (index = 0; index < stdelta; index++) { + sched_mach_factor = ((sched_mach_factor << 2) + factor_now) / 5; + sched_load_average = ((sched_load_average << 2) + average_now) / 5; + } /* - * Compute the timeshare priority - * conversion factor based on loading. + * Compute the timeshare priority conversion factor based on loading. + * Because our counters may be incremented and accessed + * concurrently with respect to each other, we may have + * windows where the invariant nthreads >= nshared >= nbackground + * is broken, so truncate values in these cases. */ + if (nshared > nthreads) nshared = nthreads; - if (nshared > ncpus) { + if (nbackground > nshared) + nbackground = nshared; + + nshared_non_bg = nshared - nbackground; + + if (nshared_non_bg > ncpus) { if (ncpus > 1) - load_now = nshared / ncpus; + load_now = nshared_non_bg / ncpus; else - load_now = nshared; + load_now = nshared_non_bg; if (load_now > NRQS - 1) load_now = NRQS - 1; } + if (nbackground > ncpus) { + if (ncpus > 1) + background_load_now = nbackground / ncpus; + else + background_load_now = nbackground; + + if (background_load_now > NRQS - 1) + background_load_now = NRQS - 1; + } + + if (nshared > ncpus) { + if (ncpus > 1) + combined_fgbg_load_now = nshared / ncpus; + else + combined_fgbg_load_now = nshared; + + if (combined_fgbg_load_now > NRQS - 1) + combined_fgbg_load_now = NRQS - 1; + } + + /* + * Sample total running threads. + */ + sched_nrun = nthreads; + +#if defined(CONFIG_SCHED_TIMESHARE_CORE) + /* * The conversion factor consists of * two components: a fixed value based @@ -166,16 +224,14 @@ compute_averages(void) * are discarded. */ sched_pri_shift = sched_fixed_shift - sched_load_shifts[load_now]; - - /* - * Sample total running threads. - */ - sched_nrun = nthreads; + sched_background_pri_shift = sched_fixed_shift - sched_load_shifts[background_load_now]; + sched_combined_fgbg_pri_shift = sched_fixed_shift - sched_load_shifts[combined_fgbg_load_now]; /* * Compute old-style Mach load averages. */ - { + + for (index = 0; index < stdelta; index++) { register int i; for (i = 0; i < 3; i++) { @@ -186,14 +242,24 @@ compute_averages(void) (average_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE; } } +#endif /* CONFIG_SCHED_TIMESHARE_CORE */ /* * Compute averages in other components. */ + abstime = mach_absolute_time(); for (avg = sched_average; avg->comp != NULL; ++avg) { - if (++avg->tick >= avg->period) { - (*avg->comp)(avg->param); - avg->tick = 0; + if (abstime >= avg->deadline) { + uint64_t period_abs = (avg->period * sched_one_second_interval); + uint64_t ninvokes = 1; + + ninvokes += (abstime - avg->deadline) / period_abs; + ninvokes = MIN(ninvokes, SCHED_TICK_MAX_DELTA); + + for (index = 0; index < ninvokes; index++) { + (*avg->comp)(avg->param); + } + avg->deadline = abstime + period_abs; } } }