#include <kern/sched.h>
#include <mach/sfi_class.h>
#include <kern/processor_data.h>
+#include <kern/cpu_quiesce.h>
+
+/*
+ * Processor state is accessed by locking the scheduling lock
+ * for the assigned processor set.
+ *
+ * -------------------- SHUTDOWN
+ * / ^ ^
+ * _/ | \
+ * OFF_LINE ---> START ---> RUNNING ---> IDLE ---> DISPATCHING
+ * \_________________^ ^ ^______/ /
+ * \__________________/
+ *
+ * Most of these state transitions are externally driven as a
+ * a directive (for instance telling an IDLE processor to start
+ * coming out of the idle state to run a thread). However these
+ * are typically paired with a handshake by the processor itself
+ * to indicate that it has completed a transition of indeterminate
+ * length (for example, the DISPATCHING->RUNNING or START->RUNNING
+ * transitions must occur on the processor itself).
+ *
+ * The boot processor has some special cases, and skips the START state,
+ * since it has already bootstrapped and is ready to context switch threads.
+ *
+ * When a processor is in DISPATCHING or RUNNING state, the current_pri,
+ * current_thmode, and deadline fields should be set, so that other
+ * processors can evaluate if it is an appropriate candidate for preemption.
+ */
+#if defined(CONFIG_SCHED_DEFERRED_AST)
+/*
+ * -------------------- SHUTDOWN
+ * / ^ ^
+ * _/ | \
+ * OFF_LINE ---> START ---> RUNNING ---> IDLE ---> DISPATCHING
+ * \_________________^ ^ ^______/ ^_____ / /
+ * \__________________/
+ *
+ * A DISPATCHING processor may be put back into IDLE, if another
+ * processor determines that the target processor will have nothing to do
+ * upon reaching the RUNNING state. This is racy, but if the target
+ * responds and becomes RUNNING, it will not break the processor state
+ * machine.
+ *
+ * This change allows us to cancel an outstanding signal/AST on a processor
+ * (if such an operation is supported through hardware or software), and
+ * push the processor back into the IDLE state as a power optimization.
+ */
+#endif
+
+#define PROCESSOR_OFF_LINE 0 /* Not available */
+#define PROCESSOR_SHUTDOWN 1 /* Going off-line */
+#define PROCESSOR_START 2 /* Being started */
+/* 3 Formerly Inactive (unavailable) */
+#define PROCESSOR_IDLE 4 /* Idle (available) */
+#define PROCESSOR_DISPATCHING 5 /* Dispatching (idle -> active) */
+#define PROCESSOR_RUNNING 6 /* Normal execution */
+#define PROCESSOR_STATE_LEN (PROCESSOR_RUNNING+1)
typedef enum {
PSET_SMP,
} pset_cluster_type_t;
-struct processor_set {
- queue_head_t active_queue; /* active processors */
- queue_head_t idle_queue; /* idle processors */
- queue_head_t idle_secondary_queue; /* idle secondary processors */
- queue_head_t unused_queue; /* processors not recommended by CLPC */
+typedef bitmap_t cpumap_t;
- int online_processor_count;
- int active_processor_count;
- int load_average;
+struct processor_set {
+ int online_processor_count;
+ int load_average;
- int cpu_set_low, cpu_set_hi;
- int cpu_set_count;
- uint64_t cpu_bitmask;
- uint64_t recommended_bitmask;
+ int cpu_set_low, cpu_set_hi;
+ int cpu_set_count;
+ int last_chosen;
+ cpumap_t cpu_bitmask;
+ cpumap_t recommended_bitmask;
+ cpumap_t cpu_state_map[PROCESSOR_STATE_LEN];
+ cpumap_t primary_map;
#if __SMP__
decl_simple_lock_data(,sched_lock) /* lock for above */
#endif
/* CPUs that have been sent an unacknowledged remote AST for scheduling purposes */
- uint64_t pending_AST_cpu_mask;
+ cpumap_t pending_AST_cpu_mask;
#if defined(CONFIG_SCHED_DEFERRED_AST)
/*
* A separate mask, for ASTs that we may be able to cancel. This is dependent on
* of spurious ASTs in the system, and let processors spend longer periods in
* IDLE.
*/
- uint64_t pending_deferred_AST_cpu_mask;
+ cpumap_t pending_deferred_AST_cpu_mask;
#endif
- uint64_t pending_spill_cpu_mask;
+ cpumap_t pending_spill_cpu_mask;
struct ipc_port * pset_self; /* port for operations */
struct ipc_port * pset_name_self; /* port for information */
decl_lck_mtx_data(extern,tasks_corpse_lock)
struct processor {
- queue_chain_t processor_queue;/* idle/active queue link,
- * MUST remain the first element */
- int state; /* See below */
- boolean_t is_SMT;
- boolean_t is_recommended;
- struct thread
- *active_thread, /* thread running on processor */
- *next_thread, /* next thread when dispatched */
- *idle_thread; /* this processor's idle thread. */
+ int state; /* See above */
+ bool is_SMT;
+ bool is_recommended;
+ struct thread *active_thread; /* thread running on processor */
+ struct thread *next_thread; /* next thread when dispatched */
+ struct thread *idle_thread; /* this processor's idle thread. */
processor_set_t processor_set; /* assigned set */
int starting_pri; /* priority of current thread as it was when scheduled */
pset_cluster_type_t current_recommended_pset_type; /* Cluster type recommended for current thread */
int cpu_id; /* platform numeric id */
+ cpu_quiescent_state_t cpu_quiesce_state;
+ uint64_t cpu_quiesce_last_checkin;
timer_call_data_t quantum_timer; /* timer for quantum expiration */
uint64_t quantum_end; /* time when current quantum ends */
uint64_t last_dispatch; /* time of last dispatch */
+ uint64_t kperf_last_sample_time; /* time of last kperf sample */
+
uint64_t deadline; /* current deadline */
- boolean_t first_timeslice; /* has the quantum expired since context switch */
+ bool first_timeslice; /* has the quantum expired since context switch */
#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_MULTIQ)
struct run_queue runq; /* runq for this processor */
extern boolean_t sched_stats_active;
-/*
- * Processor state is accessed by locking the scheduling lock
- * for the assigned processor set.
- *
- * -------------------- SHUTDOWN
- * / ^ ^
- * _/ | \
- * OFF_LINE ---> START ---> RUNNING ---> IDLE ---> DISPATCHING
- * \_________________^ ^ ^______/ /
- * \__________________/
- *
- * Most of these state transitions are externally driven as a
- * a directive (for instance telling an IDLE processor to start
- * coming out of the idle state to run a thread). However these
- * are typically paired with a handshake by the processor itself
- * to indicate that it has completed a transition of indeterminate
- * length (for example, the DISPATCHING->RUNNING or START->RUNNING
- * transitions must occur on the processor itself).
- *
- * The boot processor has some special cases, and skips the START state,
- * since it has already bootstrapped and is ready to context switch threads.
- *
- * When a processor is in DISPATCHING or RUNNING state, the current_pri,
- * current_thmode, and deadline fields should be set, so that other
- * processors can evaluate if it is an appropriate candidate for preemption.
- */
-#if defined(CONFIG_SCHED_DEFERRED_AST)
-/*
- * -------------------- SHUTDOWN
- * / ^ ^
- * _/ | \
- * OFF_LINE ---> START ---> RUNNING ---> IDLE ---> DISPATCHING
- * \_________________^ ^ ^______/ ^_____ / /
- * \__________________/
- *
- * A DISPATCHING processor may be put back into IDLE, if another
- * processor determines that the target processor will have nothing to do
- * upon reaching the RUNNING state. This is racy, but if the target
- * responds and becomes RUNNING, it will not break the processor state
- * machine.
- *
- * This change allows us to cancel an outstanding signal/AST on a processor
- * (if such an operation is supported through hardware or software), and
- * push the processor back into the IDLE state as a power optimization.
- */
-#endif
-
-#define PROCESSOR_OFF_LINE 0 /* Not available */
-#define PROCESSOR_SHUTDOWN 1 /* Going off-line */
-#define PROCESSOR_START 2 /* Being started */
-/* 3 Formerly Inactive (unavailable) */
-#define PROCESSOR_IDLE 4 /* Idle (available) */
-#define PROCESSOR_DISPATCHING 5 /* Dispatching (idle -> active) */
-#define PROCESSOR_RUNNING 6 /* Normal execution */
-
extern processor_t current_processor(void);
/* Lock macros, always acquired and released with interrupts disabled (splsched()) */
#define pset_lock(p) simple_lock(&(p)->sched_lock)
#define pset_unlock(p) simple_unlock(&(p)->sched_lock)
#define pset_lock_init(p) simple_lock_init(&(p)->sched_lock, 0)
+#if defined(__arm__) || defined(__arm64__)
+#define pset_assert_locked(p) LCK_SPIN_ASSERT(&(p)->sched_lock, LCK_ASSERT_OWNED)
+#else
+/* See <rdar://problem/39630910> pset_lock() should be converted to use lck_spin_lock() instead of simple_lock() */
+#define pset_assert_locked(p) do { (void)p; } while(0)
+#endif
#define rt_lock_lock(p) simple_lock(&SCHED(rt_runq)(p)->rt_lock)
#define rt_lock_unlock(p) simple_unlock(&SCHED(rt_runq)(p)->rt_lock)
#define pset_lock(p) do { (void)p; } while(0)
#define pset_unlock(p) do { (void)p; } while(0)
#define pset_lock_init(p) do { (void)p; } while(0)
+#define pset_assert_locked(p) do { (void)p; } while(0)
#define rt_lock_lock(p) do { (void)p; } while(0)
#define rt_lock_unlock(p) do { (void)p; } while(0)
sfi_class_id_t sfi_class, pset_cluster_type_t pset_type,
perfcontrol_class_t perfctl_class);
+#define PSET_LOAD_NUMERATOR_SHIFT 16
+#define PSET_LOAD_FRACTIONAL_SHIFT 4
+
+inline static int
+sched_get_pset_load_average(processor_set_t pset)
+{
+ return pset->load_average >> (PSET_LOAD_NUMERATOR_SHIFT - PSET_LOAD_FRACTIONAL_SHIFT);
+}
+extern void sched_update_pset_load_average(processor_set_t pset);
+
+inline static void
+pset_update_processor_state(processor_set_t pset, processor_t processor, uint new_state)
+{
+ pset_assert_locked(pset);
+
+ uint old_state = processor->state;
+ uint cpuid = processor->cpu_id;
+
+ assert(processor->processor_set == pset);
+ assert(bit_test(pset->cpu_bitmask, cpuid));
+
+ assert(old_state < PROCESSOR_STATE_LEN);
+ assert(new_state < PROCESSOR_STATE_LEN);
+
+ processor->state = new_state;
+
+ bit_clear(pset->cpu_state_map[old_state], cpuid);
+ bit_set(pset->cpu_state_map[new_state], cpuid);
+
+ if ((old_state == PROCESSOR_RUNNING) || (new_state == PROCESSOR_RUNNING)) {
+ sched_update_pset_load_average(pset);
+ }
+}
+
#else /* MACH_KERNEL_PRIVATE */
__BEGIN_DECLS