+#if __SMP__
+ decl_simple_lock_data(,sched_lock) /* lock for above */
+#endif
+
+#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_MULTIQ)
+ struct run_queue pset_runq; /* runq for this processor set */
+#endif
+
+#if defined(CONFIG_SCHED_TRADITIONAL)
+ int pset_runq_bound_count;
+ /* # of threads in runq bound to any processor in pset */
+#endif
+
+ /* CPUs that have been sent an unacknowledged remote AST for scheduling purposes */
+ uint64_t pending_AST_cpu_mask;
+#if defined(CONFIG_SCHED_DEFERRED_AST)
+ /*
+ * A seperate mask, for ASTs that we may be able to cancel. This is dependent on
+ * some level of support for requesting an AST on a processor, and then quashing
+ * that request later.
+ *
+ * The purpose of this field (and the associated codepaths) is to infer when we
+ * no longer need a processor that is DISPATCHING to come up, and to prevent it
+ * from coming out of IDLE if possible. This should serve to decrease the number
+ * of spurious ASTs in the system, and let processors spend longer periods in
+ * IDLE.
+ */
+ uint64_t pending_deferred_AST_cpu_mask;
+#endif