decl_simple_lock_data(static,pset_node_lock)
queue_head_t tasks;
+queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */
int tasks_count;
+int terminated_tasks_count;
queue_head_t threads;
int threads_count;
decl_lck_mtx_data(,tasks_threads_lock)
processor_t master_processor;
int master_cpu = 0;
+boolean_t sched_stats_active = FALSE;
/* Forwards */
kern_return_t processor_set_things(
simple_lock_init(&pset_node_lock, 0);
queue_init(&tasks);
+ queue_init(&terminated_tasks);
queue_init(&threads);
simple_lock_init(&processor_list_lock, 0);
int cpu_id,
processor_set_t pset)
{
- run_queue_init(&processor->runq);
+ spl_t s;
+
+ if (processor != master_processor) {
+ /* Scheduler state deferred until sched_init() */
+ SCHED(processor_init)(processor);
+ }
processor->state = PROCESSOR_OFF_LINE;
processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
processor->processor_set = pset;
processor->current_pri = MINPRI;
+ processor->current_thmode = TH_MODE_NONE;
processor->cpu_id = cpu_id;
timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
+ processor->quantum_end = UINT64_MAX;
processor->deadline = UINT64_MAX;
processor->timeslice = 0;
- processor->processor_meta = PROCESSOR_META_NULL;
+ processor->processor_primary = processor; /* no SMT relationship known at this point */
+ processor->processor_secondary = NULL;
+ processor->is_SMT = FALSE;
processor->processor_self = IP_NULL;
processor_data_init(processor);
processor->processor_list = NULL;
+ s = splsched();
+ pset_lock(pset);
+ if (pset->cpu_set_count++ == 0)
+ pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
+ else {
+ pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low;
+ pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi;
+ }
+ pset_unlock(pset);
+ splx(s);
+
simple_lock(&processor_list_lock);
if (processor_list == NULL)
processor_list = processor;
}
void
-processor_meta_init(
+processor_set_primary(
processor_t processor,
processor_t primary)
{
- processor_meta_t pmeta = primary->processor_meta;
-
- if (pmeta == PROCESSOR_META_NULL) {
- pmeta = kalloc(sizeof (*pmeta));
-
- queue_init(&pmeta->idle_queue);
-
- pmeta->primary = primary;
+ assert(processor->processor_primary == primary || processor->processor_primary == processor);
+ /* Re-adjust primary point for this (possibly) secondary processor */
+ processor->processor_primary = primary;
+
+ assert(primary->processor_secondary == NULL || primary->processor_secondary == processor);
+ if (primary != processor) {
+ /* Link primary to secondary, assumes a 2-way SMT model
+ * We'll need to move to a queue if any future architecture
+ * requires otherwise.
+ */
+ assert(processor->processor_secondary == NULL);
+ primary->processor_secondary = processor;
+ /* Mark both processors as SMT siblings */
+ primary->is_SMT = TRUE;
+ processor->is_SMT = TRUE;
}
-
- processor->processor_meta = pmeta;
}
processor_set_t
pset_create(
pset_node_t node)
{
+#if defined(CONFIG_SCHED_MULTIQ)
+ /* multiq scheduler is not currently compatible with multiple psets */
+ if (sched_groups_enabled)
+ return processor_pset(master_processor);
+#endif /* defined(CONFIG_SCHED_MULTIQ) */
+
processor_set_t *prev, pset = kalloc(sizeof (*pset));
if (pset != PROCESSOR_SET_NULL) {
processor_set_t pset,
pset_node_t node)
{
+ if (pset != &pset0) {
+ /* Scheduler state deferred until sched_init() */
+ SCHED(pset_init)(pset);
+ }
+
queue_init(&pset->active_queue);
queue_init(&pset->idle_queue);
- pset->processor_count = 0;
- pset->low_pri = pset->low_count = PROCESSOR_NULL;
+ queue_init(&pset->idle_secondary_queue);
+ pset->online_processor_count = 0;
+ pset->cpu_set_low = pset->cpu_set_hi = 0;
+ pset->cpu_set_count = 0;
+ pset->pending_AST_cpu_mask = 0;
pset_lock_init(pset);
pset->pset_self = IP_NULL;
pset->pset_name_self = IP_NULL;
case PROCESSOR_CPU_LOAD_INFO:
{
- register processor_cpu_load_info_t cpu_load_info;
-
- if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
+ processor_cpu_load_info_t cpu_load_info;
+ timer_t idle_state;
+ uint64_t idle_time_snapshot1, idle_time_snapshot2;
+ uint64_t idle_time_tstamp1, idle_time_tstamp2;
+
+ /*
+ * We capture the accumulated idle time twice over
+ * the course of this function, as well as the timestamps
+ * when each were last updated. Since these are
+ * all done using non-atomic racy mechanisms, the
+ * most we can infer is whether values are stable.
+ * timer_grab() is the only function that can be
+ * used reliably on another processor's per-processor
+ * data.
+ */
+
+ if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
return (KERN_FAILURE);
- cpu_load_info = (processor_cpu_load_info_t) info;
- cpu_load_info->cpu_ticks[CPU_STATE_USER] =
+ cpu_load_info = (processor_cpu_load_info_t) info;
+ if (precise_user_kernel_time) {
+ cpu_load_info->cpu_ticks[CPU_STATE_USER] =
(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
- cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
+ cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
- cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
- (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, idle_state)) / hz_tick_interval);
+ } else {
+ uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) +
+ timer_grab(&PROCESSOR_DATA(processor, system_state));
+
+ cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval);
+ cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
+ }
+
+ idle_state = &PROCESSOR_DATA(processor, idle_state);
+ idle_time_snapshot1 = timer_grab(idle_state);
+ idle_time_tstamp1 = idle_state->tstamp;
+
+ /*
+ * Idle processors are not continually updating their
+ * per-processor idle timer, so it may be extremely
+ * out of date, resulting in an over-representation
+ * of non-idle time between two measurement
+ * intervals by e.g. top(1). If we are non-idle, or
+ * have evidence that the timer is being updated
+ * concurrently, we consider its value up-to-date.
+ */
+ if (PROCESSOR_DATA(processor, current_state) != idle_state) {
+ cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
+ (uint32_t)(idle_time_snapshot1 / hz_tick_interval);
+ } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
+ (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))){
+ /* Idle timer is being updated concurrently, second stamp is good enough */
+ cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
+ (uint32_t)(idle_time_snapshot2 / hz_tick_interval);
+ } else {
+ /*
+ * Idle timer may be very stale. Fortunately we have established
+ * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
+ */
+ idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
+
+ cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
+ (uint32_t)(idle_time_snapshot1 / hz_tick_interval);
+ }
+
cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
*count = PROCESSOR_CPU_LOAD_INFO_COUNT;
{
int state;
+ if (processor == PROCESSOR_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
state = processor->state;
if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
return(KERN_FAILURE);
{
return KERN_FAILURE;
}
-#elif defined(CONFIG_EMBEDDED)
-kern_return_t
-processor_set_threads(
- __unused processor_set_t pset,
- __unused thread_array_t *thread_list,
- __unused mach_msg_type_number_t *count)
-{
- return KERN_NOT_SUPPORTED;
-}
#else
kern_return_t
processor_set_threads(