#if CONFIG_TELEMETRY
#include <kern/telemetry.h>
#endif
+#include <kern/zalloc_internal.h>
#include <sys/kdebug.h>
{ compute_stack_target, NULL, 5, 1 },
{ compute_pageout_gc_throttle, NULL, 1, 0 },
{ compute_pmap_gc_throttle, NULL, 60, 0 },
+ { compute_zone_working_set_size, NULL, ZONE_WSS_UPDATE_PERIOD, 0 },
#if CONFIG_TELEMETRY
{ compute_telemetry, NULL, 1, 0 },
#endif
#define SCHED_LOAD_EWMA_UNSCALE(load) (((load) >> SCHED_LOAD_EWMA_ALPHA_SHIFT) + SCHED_LOAD_EWMA_ROUNDUP(load))
/*
- * Routine to capture the latest runnable counts and update sched_load */
+ * Routine to capture the latest runnable counts and update sched_load (only used for non-clutch schedulers)
+ */
void
compute_sched_load(void)
{
uint32_t ncpus = processor_avail_count;
uint32_t load_now[TH_BUCKET_MAX];
- load_now[TH_BUCKET_RUN] = sched_run_buckets[TH_BUCKET_RUN];
- load_now[TH_BUCKET_FIXPRI] = sched_run_buckets[TH_BUCKET_FIXPRI];
- load_now[TH_BUCKET_SHARE_FG] = sched_run_buckets[TH_BUCKET_SHARE_FG];
- load_now[TH_BUCKET_SHARE_DF] = sched_run_buckets[TH_BUCKET_SHARE_DF];
- load_now[TH_BUCKET_SHARE_UT] = sched_run_buckets[TH_BUCKET_SHARE_UT];
- load_now[TH_BUCKET_SHARE_BG] = sched_run_buckets[TH_BUCKET_SHARE_BG];
+ load_now[TH_BUCKET_RUN] = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
+ load_now[TH_BUCKET_FIXPRI] = os_atomic_load(&sched_run_buckets[TH_BUCKET_FIXPRI], relaxed);
+ load_now[TH_BUCKET_SHARE_FG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_FG], relaxed);
+ load_now[TH_BUCKET_SHARE_DF] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_DF], relaxed);
+ load_now[TH_BUCKET_SHARE_UT] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_UT], relaxed);
+ load_now[TH_BUCKET_SHARE_BG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_BG], relaxed);
assert(load_now[TH_BUCKET_RUN] >= 0);
assert(load_now[TH_BUCKET_FIXPRI] >= 0);
void
compute_averages(uint64_t stdelta)
{
- uint32_t nthreads = sched_run_buckets[TH_BUCKET_RUN] - 1;
+ uint32_t nthreads = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) - 1;
uint32_t ncpus = processor_avail_count;
/* Update the global pri_shifts based on the latest values */
for (uint32_t i = TH_BUCKET_SHARE_FG; i <= TH_BUCKET_SHARE_BG; i++) {
uint32_t bucket_load = SCHED_LOAD_EWMA_UNSCALE(sched_load[i]);
- sched_pri_shifts[i] = sched_fixed_shift - sched_load_shifts[bucket_load];
+ uint32_t shift = sched_fixed_shift - sched_load_shifts[bucket_load];
+
+ if (shift > SCHED_PRI_SHIFT_MAX) {
+ sched_pri_shifts[i] = INT8_MAX;
+ } else {
+ sched_pri_shifts[i] = shift;
+ }
}
/*