]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/sched_average.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / sched_average.c
index 709803b9e9a55d0d623437044e8077691ad86aa4..f2bd777ffeadb51590abc91526b1e2c2715934c5 100644 (file)
@@ -71,6 +71,7 @@
 #if CONFIG_TELEMETRY
 #include <kern/telemetry.h>
 #endif
+#include <kern/zalloc_internal.h>
 
 #include <sys/kdebug.h>
 
@@ -112,6 +113,7 @@ static struct sched_average {
        { compute_stack_target, NULL, 5, 1 },
        { compute_pageout_gc_throttle, NULL, 1, 0 },
        { compute_pmap_gc_throttle, NULL, 60, 0 },
+       { compute_zone_working_set_size, NULL, ZONE_WSS_UPDATE_PERIOD, 0 },
 #if CONFIG_TELEMETRY
        { compute_telemetry, NULL, 1, 0 },
 #endif
@@ -174,7 +176,8 @@ static_assert((SCHED_LOAD_EWMA_ALPHA_OLD + SCHED_LOAD_EWMA_ALPHA_NEW) == (1ul <<
 #define SCHED_LOAD_EWMA_UNSCALE(load)   (((load) >> SCHED_LOAD_EWMA_ALPHA_SHIFT) + SCHED_LOAD_EWMA_ROUNDUP(load))
 
 /*
- * Routine to capture the latest runnable counts and update sched_load */
+ * Routine to capture the latest runnable counts and update sched_load (only used for non-clutch schedulers)
+ */
 void
 compute_sched_load(void)
 {
@@ -187,12 +190,12 @@ compute_sched_load(void)
        uint32_t ncpus = processor_avail_count;
        uint32_t load_now[TH_BUCKET_MAX];
 
-       load_now[TH_BUCKET_RUN]      = sched_run_buckets[TH_BUCKET_RUN];
-       load_now[TH_BUCKET_FIXPRI]   = sched_run_buckets[TH_BUCKET_FIXPRI];
-       load_now[TH_BUCKET_SHARE_FG] = sched_run_buckets[TH_BUCKET_SHARE_FG];
-       load_now[TH_BUCKET_SHARE_DF] = sched_run_buckets[TH_BUCKET_SHARE_DF];
-       load_now[TH_BUCKET_SHARE_UT] = sched_run_buckets[TH_BUCKET_SHARE_UT];
-       load_now[TH_BUCKET_SHARE_BG] = sched_run_buckets[TH_BUCKET_SHARE_BG];
+       load_now[TH_BUCKET_RUN]      = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
+       load_now[TH_BUCKET_FIXPRI]   = os_atomic_load(&sched_run_buckets[TH_BUCKET_FIXPRI], relaxed);
+       load_now[TH_BUCKET_SHARE_FG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_FG], relaxed);
+       load_now[TH_BUCKET_SHARE_DF] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_DF], relaxed);
+       load_now[TH_BUCKET_SHARE_UT] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_UT], relaxed);
+       load_now[TH_BUCKET_SHARE_BG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_BG], relaxed);
 
        assert(load_now[TH_BUCKET_RUN] >= 0);
        assert(load_now[TH_BUCKET_FIXPRI] >= 0);
@@ -285,13 +288,19 @@ compute_sched_load(void)
 void
 compute_averages(uint64_t stdelta)
 {
-       uint32_t nthreads = sched_run_buckets[TH_BUCKET_RUN] - 1;
+       uint32_t nthreads = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) - 1;
        uint32_t ncpus = processor_avail_count;
 
        /* Update the global pri_shifts based on the latest values */
        for (uint32_t i = TH_BUCKET_SHARE_FG; i <= TH_BUCKET_SHARE_BG; i++) {
                uint32_t bucket_load = SCHED_LOAD_EWMA_UNSCALE(sched_load[i]);
-               sched_pri_shifts[i] = sched_fixed_shift - sched_load_shifts[bucket_load];
+               uint32_t shift = sched_fixed_shift - sched_load_shifts[bucket_load];
+
+               if (shift > SCHED_PRI_SHIFT_MAX) {
+                       sched_pri_shifts[i] = INT8_MAX;
+               } else {
+                       sched_pri_shifts[i] = shift;
+               }
        }
 
        /*