-
- queue_head_t processors; /* all processors here */
- int processor_count;/* how many ? */
- decl_simple_lock_data(,sched_lock) /* lock for runq and above */
-
- struct run_queue runq; /* runq for this set */
-
- queue_head_t tasks; /* tasks assigned */
- int task_count; /* how many */
- queue_head_t threads; /* threads in this set */
- int thread_count; /* how many */
- int ref_count; /* structure ref count */
- boolean_t active; /* is pset in use */
- decl_mutex_data(, lock) /* lock for above */
-
- int timeshare_quanta; /* timeshare quantum factor */
+ queue_head_t idle_queue; /* idle processors */
+ queue_head_t idle_secondary_queue; /* idle secondary processors */
+
+ int online_processor_count;
+
+ int cpu_set_low, cpu_set_hi;
+ int cpu_set_count;
+
+#if __SMP__
+ decl_simple_lock_data(,sched_lock) /* lock for above */
+#endif
+
+#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_MULTIQ)
+ struct run_queue pset_runq; /* runq for this processor set */
+#endif
+
+#if defined(CONFIG_SCHED_TRADITIONAL)
+ int pset_runq_bound_count;
+ /* # of threads in runq bound to any processor in pset */
+#endif
+
+ /* CPUs that have been sent an unacknowledged remote AST for scheduling purposes */
+ uint64_t pending_AST_cpu_mask;
+#if defined(CONFIG_SCHED_DEFERRED_AST)
+ /*
+ * A seperate mask, for ASTs that we may be able to cancel. This is dependent on
+ * some level of support for requesting an AST on a processor, and then quashing
+ * that request later.
+ *
+ * The purpose of this field (and the associated codepaths) is to infer when we
+ * no longer need a processor that is DISPATCHING to come up, and to prevent it
+ * from coming out of IDLE if possible. This should serve to decrease the number
+ * of spurious ASTs in the system, and let processors spend longer periods in
+ * IDLE.
+ */
+ uint64_t pending_deferred_AST_cpu_mask;
+#endif