]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/mach_factor.c
xnu-344.tar.gz
[apple/xnu.git] / osfmk / kern / mach_factor.c
index eca59d7c67a66ad595089b868c74f9dce58df585..3b86b697085384dec6a76563615a1ed4932d9c5b 100644 (file)
 #include <mach/port.h>
 #endif /* MACH_KERNEL */
 
 #include <mach/port.h>
 #endif /* MACH_KERNEL */
 
-integer_t      avenrun[3] = {0, 0, 0};
-integer_t      mach_factor[3] = {0, 0, 0};
+uint32_t       avenrun[3] = {0, 0, 0};
+uint32_t       mach_factor[3] = {0, 0, 0};
 
 /*
  * Values are scaled by LOAD_SCALE, defined in processor_info.h
  */
 
 /*
  * Values are scaled by LOAD_SCALE, defined in processor_info.h
  */
-static long    fract[3] = {
-       800,                    /* (4.0/5.0) 5 second average */
-       966,                    /* (29.0/30.0) 30 second average */
-       983,                    /* (59.0/60.) 1 minute average */
+#define base(n)                ((n) << SCHED_TICK_SHIFT)
+#define frac(n)                (((base(n) - 1) * LOAD_SCALE) / base(n))
+
+static uint32_t                fract[3] = {
+       frac(5),                /* 5 second average */
+       frac(30),               /* 30 second average */
+       frac(60),               /* 1 minute average */
 };
 
 };
 
+#undef base
+#undef frac
+
 void
 compute_mach_factor(void)
 {
        register processor_set_t        pset;
 void
 compute_mach_factor(void)
 {
        register processor_set_t        pset;
-       register processor_t            processor;
        register int                            ncpus;
        register int                            nthreads;
        register int                            ncpus;
        register int                            nthreads;
-       register long                           factor_now = 0L;
-       register long                           average_now = 0L;
-       register long                           load_now = 0L;
+       register uint32_t                       factor_now = 0;
+       register uint32_t                       average_now = 0;
+       register uint32_t                       load_now = 0;
 
        pset = &default_pset;
 
        pset = &default_pset;
-       simple_lock(&pset->processors_lock);
        if ((ncpus = pset->processor_count) > 0) {
                /*
        if ((ncpus = pset->processor_count) > 0) {
                /*
-                *      Count number of threads.
+                *      Number of threads running in pset.
                 */
                 */
-               nthreads = pset->runq.count;
-               processor = (processor_t)queue_first(&pset->processors);
-               while (!queue_end(&pset->processors, (queue_entry_t)processor)) {
-                       nthreads += processor->runq.count;
-
-                       processor = (processor_t)queue_next(&processor->processors);
-               }
-
-               /*
-                * account for threads on cpus.
-                */
-               nthreads += ncpus - pset->idle_count; 
+               nthreads = pset->run_count;
 
                /*
                 *      The current thread (running this calculation)
 
                /*
                 *      The current thread (running this calculation)
@@ -119,28 +112,24 @@ compute_mach_factor(void)
                if (pset == &default_pset)
                        nthreads -= 1;
 
                if (pset == &default_pset)
                        nthreads -= 1;
 
-               if (nthreads >= ncpus)
+               if (nthreads > ncpus) {
                        factor_now = (ncpus * LOAD_SCALE) / (nthreads + 1);
                        factor_now = (ncpus * LOAD_SCALE) / (nthreads + 1);
-               else
-                       factor_now = (ncpus - nthreads) * LOAD_SCALE;
-
-               if (nthreads > ncpus)
                        load_now = (nthreads << SCHED_SHIFT) / ncpus;
                        load_now = (nthreads << SCHED_SHIFT) / ncpus;
+               }
                else
                else
-                       load_now = 0;
+                       factor_now = (ncpus - nthreads) * LOAD_SCALE;
 
                /*
                 *      Load average and mach factor calculations for
                 *      those that ask about these things.
                 */
 
                /*
                 *      Load average and mach factor calculations for
                 *      those that ask about these things.
                 */
-
-               average_now = (nthreads * LOAD_SCALE) / ncpus;
+               average_now = nthreads * LOAD_SCALE;
 
                pset->mach_factor =     ((pset->mach_factor << 2) + factor_now) / 5;
                pset->load_average = ((pset->load_average << 2) + average_now) / 5;
 
                /*
 
                pset->mach_factor =     ((pset->mach_factor << 2) + factor_now) / 5;
                pset->load_average = ((pset->load_average << 2) + average_now) / 5;
 
                /*
-                *      sched_load is the only thing used by scheduler.
+                *      sched_load is used by the timesharing algorithm.
                 */
                pset->sched_load = (pset->sched_load + load_now) >> 1;
        }
                 */
                pset->sched_load = (pset->sched_load + load_now) >> 1;
        }
@@ -149,13 +138,11 @@ compute_mach_factor(void)
                pset->sched_load = 0;
        }
 
                pset->sched_load = 0;
        }
 
-       simple_unlock(&pset->processors_lock);
-
        /*
        /*
-        *      And some ugly stuff to keep w happy.
+        * Compute old-style Mach load averages.
         */
        {
         */
        {
-               register int i;
+               register int            i;
 
                for (i = 0; i < 3; i++) {
                        mach_factor[i] = ((mach_factor[i] * fract[i]) +
 
                for (i = 0; i < 3; i++) {
                        mach_factor[i] = ((mach_factor[i] * fract[i]) +
@@ -165,4 +152,20 @@ compute_mach_factor(void)
                                                (average_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE;
                }
        }
                                                (average_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE;
                }
        }
+
+       /*
+        * Call out to BSD for averunnable.
+        */
+       {
+#define AVGTICK_PERIOD         (5 << SCHED_TICK_SHIFT)
+               static uint32_t         avgtick_count;
+               extern void                     compute_averunnable(
+                                                               int                             nrun);
+
+               if (++avgtick_count == 1)
+                       compute_averunnable(nthreads);
+               else
+               if (avgtick_count >= AVGTICK_PERIOD)
+                       avgtick_count = 0;
+       }
 }
 }