- ncpus = processor_avail_count;
- nthreads = sched_run_count - 1;
- nshared = sched_share_count;
+ uint32_t ncpus = processor_avail_count;
+
+ load_now[TH_BUCKET_RUN] = sched_run_buckets[TH_BUCKET_RUN];
+ load_now[TH_BUCKET_FIXPRI] = sched_run_buckets[TH_BUCKET_FIXPRI];
+ load_now[TH_BUCKET_SHARE_FG] = sched_run_buckets[TH_BUCKET_SHARE_FG];
+ load_now[TH_BUCKET_SHARE_UT] = sched_run_buckets[TH_BUCKET_SHARE_UT];
+ load_now[TH_BUCKET_SHARE_BG] = sched_run_buckets[TH_BUCKET_SHARE_BG];
+
+ assert(load_now[TH_BUCKET_RUN] >= 0);
+ assert(load_now[TH_BUCKET_FIXPRI] >= 0);
+
+ /* Ignore the current thread, which is a running fixpri thread */
+
+ uint32_t nthreads = load_now[TH_BUCKET_RUN] - 1;
+ uint32_t nfixpri = load_now[TH_BUCKET_FIXPRI] - 1;
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_LOAD) | DBG_FUNC_NONE,
+ load_now[TH_BUCKET_FIXPRI] - 1, load_now[TH_BUCKET_SHARE_FG],
+ load_now[TH_BUCKET_SHARE_BG], load_now[TH_BUCKET_SHARE_UT], 0);