#include <security/mac_mach_internal.h>
+
/*
* Exported interface
*/
queue_head_t tasks;
queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */
+queue_head_t corpse_tasks;
int tasks_count;
int terminated_tasks_count;
queue_head_t threads;
int threads_count;
decl_lck_mtx_data(,tasks_threads_lock)
+decl_lck_mtx_data(,tasks_corpse_lock)
processor_t processor_list;
unsigned int processor_count;
int master_cpu = 0;
boolean_t sched_stats_active = FALSE;
+processor_t processor_array[MAX_SCHED_CPUS] = { 0 };
+
+
+
void
processor_bootstrap(void)
{
queue_init(&tasks);
queue_init(&terminated_tasks);
queue_init(&threads);
+ queue_init(&corpse_tasks);
simple_lock_init(&processor_list_lock, 0);
spl_t s;
if (processor != master_processor) {
- /* Scheduler state deferred until sched_init() */
+ /* Scheduler state for master_processor initialized in sched_init() */
SCHED(processor_init)(processor);
}
processor->state = PROCESSOR_OFF_LINE;
processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
processor->processor_set = pset;
- processor->current_pri = MINPRI;
- processor->current_thmode = TH_MODE_NONE;
+ processor_state_update_idle(processor);
+ processor->starting_pri = MINPRI;
processor->cpu_id = cpu_id;
timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
processor->quantum_end = UINT64_MAX;
processor->processor_primary = processor; /* no SMT relationship known at this point */
processor->processor_secondary = NULL;
processor->is_SMT = FALSE;
- processor->is_recommended = TRUE;
+ processor->is_recommended = (pset->recommended_bitmask & (1ULL << cpu_id)) ? TRUE : FALSE;
processor->processor_self = IP_NULL;
processor_data_init(processor);
processor->processor_list = NULL;
s = splsched();
pset_lock(pset);
+ bit_set(pset->cpu_bitmask, cpu_id);
if (pset->cpu_set_count++ == 0)
pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
else {
processor_list_tail->processor_list = processor;
processor_list_tail = processor;
processor_count++;
+ assert(cpu_id < MAX_SCHED_CPUS);
+ processor_array[cpu_id] = processor;
simple_unlock(&processor_list_lock);
}
return (processor->processor_set);
}
+void
+processor_state_update_idle(processor_t processor)
+{
+ processor->current_pri = IDLEPRI;
+ processor->current_sfi_class = SFI_CLASS_KERNEL;
+ processor->current_recommended_pset_type = PSET_SMP;
+ processor->current_perfctl_class = PERFCONTROL_CLASS_IDLE;
+}
+
+void
+processor_state_update_from_thread(processor_t processor, thread_t thread)
+{
+ processor->current_pri = thread->sched_pri;
+ processor->current_sfi_class = thread->sfi_class;
+ processor->current_recommended_pset_type = recommended_pset_type(thread);
+ processor->current_perfctl_class = thread_get_perfcontrol_class(thread);
+}
+
+void
+processor_state_update_explicit(processor_t processor, int pri, sfi_class_id_t sfi_class,
+ pset_cluster_type_t pset_type, perfcontrol_class_t perfctl_class)
+{
+ processor->current_pri = pri;
+ processor->current_sfi_class = sfi_class;
+ processor->current_recommended_pset_type = pset_type;
+ processor->current_perfctl_class = perfctl_class;
+}
+
pset_node_t
pset_node_root(void)
{
return (pset);
}
+/*
+ * Find processor set in specified node with specified cluster_id.
+ * Returns default_pset if not found.
+ */
+processor_set_t
+pset_find(
+ uint32_t cluster_id,
+ processor_set_t default_pset)
+{
+ simple_lock(&pset_node_lock);
+ pset_node_t node = &pset_node0;
+ processor_set_t pset = NULL;
+
+ do {
+ pset = node->psets;
+ while (pset != NULL) {
+ if (pset->pset_cluster_id == cluster_id)
+ break;
+ pset = pset->pset_list;
+ }
+ } while ((node = node->node_list) != NULL);
+ simple_unlock(&pset_node_lock);
+ if (pset == NULL)
+ return default_pset;
+ return (pset);
+}
+
/*
* Initialize the given processor_set structure.
*/
pset_node_t node)
{
if (pset != &pset0) {
- /* Scheduler state deferred until sched_init() */
+ /* Scheduler state for pset0 initialized in sched_init() */
SCHED(pset_init)(pset);
+ SCHED(rt_init)(pset);
}
queue_init(&pset->active_queue);
queue_init(&pset->idle_queue);
queue_init(&pset->idle_secondary_queue);
+ queue_init(&pset->unused_queue);
pset->online_processor_count = 0;
+ pset->active_processor_count = 0;
+ pset->load_average = 0;
pset->cpu_set_low = pset->cpu_set_hi = 0;
pset->cpu_set_count = 0;
+ pset->cpu_bitmask = 0;
+ pset->recommended_bitmask = ~0ULL;
pset->pending_AST_cpu_mask = 0;
#if defined(CONFIG_SCHED_DEFERRED_AST)
pset->pending_deferred_AST_cpu_mask = 0;
#endif
+ pset->pending_spill_cpu_mask = 0;
pset_lock_init(pset);
pset->pset_self = IP_NULL;
pset->pset_name_self = IP_NULL;
pset->pset_list = PROCESSOR_SET_NULL;
pset->node = node;
+ pset->pset_cluster_type = PSET_SMP;
+ pset->pset_cluster_id = 0;
}
kern_return_t
kern_return_t
processor_info(
- register processor_t processor,
+ processor_t processor,
processor_flavor_t flavor,
host_t *host,
processor_info_t info,
mach_msg_type_number_t *count)
{
- register int cpu_id, state;
+ int cpu_id, state;
kern_return_t result;
if (processor == PROCESSOR_NULL)
case PROCESSOR_BASIC_INFO:
{
- register processor_basic_info_t basic_info;
+ processor_basic_info_t basic_info;
if (*count < PROCESSOR_BASIC_INFO_COUNT)
return (KERN_FAILURE);
return(KERN_INVALID_ARGUMENT);
if (flavor == PROCESSOR_SET_BASIC_INFO) {
- register processor_set_basic_info_t basic_info;
+ processor_set_basic_info_t basic_info;
if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
return(KERN_FAILURE);
return(KERN_SUCCESS);
}
else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
- register policy_timeshare_base_t ts_base;
+ policy_timeshare_base_t ts_base;
if (*count < POLICY_TIMESHARE_BASE_COUNT)
return(KERN_FAILURE);
return(KERN_SUCCESS);
}
else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
- register policy_fifo_base_t fifo_base;
+ policy_fifo_base_t fifo_base;
if (*count < POLICY_FIFO_BASE_COUNT)
return(KERN_FAILURE);
return(KERN_SUCCESS);
}
else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
- register policy_rr_base_t rr_base;
+ policy_rr_base_t rr_base;
if (*count < POLICY_RR_BASE_COUNT)
return(KERN_FAILURE);
return(KERN_SUCCESS);
}
else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
- register policy_timeshare_limit_t ts_limit;
+ policy_timeshare_limit_t ts_limit;
if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
return(KERN_FAILURE);
return(KERN_SUCCESS);
}
else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
- register policy_fifo_limit_t fifo_limit;
+ policy_fifo_limit_t fifo_limit;
if (*count < POLICY_FIFO_LIMIT_COUNT)
return(KERN_FAILURE);
return(KERN_SUCCESS);
}
else if (flavor == PROCESSOR_SET_RR_LIMITS) {
- register policy_rr_limit_t rr_limit;
+ policy_rr_limit_t rr_limit;
if (*count < POLICY_RR_LIMIT_COUNT)
return(KERN_FAILURE);
return(KERN_SUCCESS);
}
else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
- register int *enabled;
+ int *enabled;
if (*count < (sizeof(*enabled)/sizeof(int)))
return(KERN_FAILURE);
return (KERN_INVALID_PROCESSOR_SET);
if (flavor == PROCESSOR_SET_LOAD_INFO) {
- register processor_set_load_info_t load_info;
+ processor_set_load_info_t load_info;
if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
return(KERN_FAILURE);
mach_msg_type_number_t *count,
int type)
{
- unsigned int i , j, used;
+ unsigned int i;
task_t task;
thread_t thread;
lck_mtx_unlock(&tasks_threads_lock);
#if CONFIG_MACF
+ unsigned int j, used;
+
/* for each task, make sure we are allowed to examine it */
for (i = used = 0; i < actual_tasks; i++) {
if (mac_task_check_expose_task(task_list[i])) {
{
return KERN_FAILURE;
}
+#elif defined(CONFIG_EMBEDDED)
+kern_return_t
+processor_set_threads(
+ __unused processor_set_t pset,
+ __unused thread_array_t *thread_list,
+ __unused mach_msg_type_number_t *count)
+{
+ return KERN_NOT_SUPPORTED;
+}
#else
kern_return_t
processor_set_threads(
{
return;
}
+
+pset_cluster_type_t
+recommended_pset_type(thread_t thread)
+{
+ (void)thread;
+ return PSET_SMP;
+}