/*
- * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/kalloc.h>
#include <i386/cpu_affinity.h>
#include <i386/cpu_topology.h>
-#include <i386/cpu_data.h>
#include <i386/cpu_threads.h>
#include <i386/machine_cpu.h>
-#include <i386/machine_routines.h>
#include <i386/lock.h>
-#include <i386/mp.h>
-
-//#define TOPO_DEBUG 1
-#if TOPO_DEBUG
-#define DBG(x...) kprintf("DBG: " x)
-#else
-#define DBG(x...)
-#endif
+#include <i386/cpu_data.h>
+#include <i386/lapic.h>
+#include <i386/machine_routines.h>
__private_extern__ void qsort(
void * array,
static int x86_affinity_count = 0;
/*
- * cpu_topology_start() is called after all processors have been registered
+ * cpu_topology_sort() is called after all processors have been registered
* but before any non-boot processor id started.
* We establish canonical logical processor numbering - logical cpus must be
* contiguous, zero-based and assigned in physical (local apic id) order.
* of processors - in particular, for stopping/starting from CHUD.
*/
void
-cpu_topology_start(void)
+cpu_topology_sort(int ncpus)
{
- int ncpus = machine_info.max_cpus;
int i;
boolean_t istate;
+ processor_t lprim = NULL;
assert(machine_info.physical_cpu == 1);
assert(machine_info.logical_cpu == 1);
assert(master_cpu == 0);
assert(cpu_number() == 0);
assert(cpu_datap(0)->cpu_number == 0);
-
+
/* Lights out for this */
istate = ml_set_interrupts_enabled(FALSE);
-#ifdef TOPO_DEBUG
- DBG("cpu_topology_start() %d cpu%s registered\n",
- ncpus, (ncpus > 1) ? "s" : "");
- for (i = 0; i < ncpus; i++) {
- cpu_data_t *cpup = cpu_datap(i);
- DBG("\tcpu_data[%d]:0x%08x local apic 0x%x\n",
- i, (unsigned) cpup, cpup->cpu_phys_number);
+ if (topo_dbg) {
+ TOPO_DBG("cpu_topology_start() %d cpu%s registered\n",
+ ncpus, (ncpus > 1) ? "s" : "");
+ for (i = 0; i < ncpus; i++) {
+ cpu_data_t *cpup = cpu_datap(i);
+ TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n",
+ i, (void *) cpup, cpup->cpu_phys_number);
+ }
}
-#endif
+
/*
* Re-order the cpu_data_ptr vector sorting by physical id.
* Skip the boot processor, it's required to be correct.
sizeof(cpu_data_t *),
lapicid_cmp);
}
-#ifdef TOPO_DEBUG
- DBG("cpu_topology_start() after sorting:\n");
- for (i = 0; i < ncpus; i++) {
- cpu_data_t *cpup = cpu_datap(i);
- DBG("\tcpu_data[%d]:0x%08x local apic 0x%x\n",
- i, (unsigned) cpup, cpup->cpu_phys_number);
+ if (topo_dbg) {
+ TOPO_DBG("cpu_topology_start() after sorting:\n");
+ for (i = 0; i < ncpus; i++) {
+ cpu_data_t *cpup = cpu_datap(i);
+ TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n",
+ i, (void *) cpup, cpup->cpu_phys_number);
+ }
}
-#endif
/*
- * Fix up logical numbers and reset the map kept by the lapic code.
+ * Finalize logical numbers and map kept by the lapic code.
*/
- for (i = 1; i < ncpus; i++) {
+ for (i = 0; i < ncpus; i++) {
cpu_data_t *cpup = cpu_datap(i);
if (cpup->cpu_number != i) {
- kprintf("cpu_datap(%d):0x%08x local apic id 0x%x "
+ kprintf("cpu_datap(%d):%p local apic id 0x%x "
"remapped from %d\n",
- i, (unsigned) cpup, cpup->cpu_phys_number,
+ i, cpup, cpup->cpu_phys_number,
cpup->cpu_number);
}
cpup->cpu_number = i;
- cpup->lcpu.lnum = i;
lapic_cpu_map(cpup->cpu_phys_number, i);
+ x86_set_logical_topology(&cpup->lcpu, cpup->cpu_phys_number, i);
}
+ x86_validate_topology();
+
ml_set_interrupts_enabled(istate);
+ TOPO_DBG("cpu_topology_start() LLC is L%d\n", topoParms.LLCDepth + 1);
+
+ /*
+ * Let the CPU Power Management know that the topology is stable.
+ */
+ topoParms.stable = TRUE;
+ pmCPUStateInit();
/*
* Iterate over all logical cpus finding or creating the affinity set
- * for their L2 cache. Each affinity set possesses a processor set
+ * for their LLC cache. Each affinity set possesses a processor set
* into which each logical processor is added.
*/
- DBG("cpu_topology_start() creating affinity sets:\n");
+ TOPO_DBG("cpu_topology_start() creating affinity sets:\n");
for (i = 0; i < ncpus; i++) {
cpu_data_t *cpup = cpu_datap(i);
x86_lcpu_t *lcpup = cpu_to_lcpu(i);
- x86_cpu_cache_t *L2_cachep;
+ x86_cpu_cache_t *LLC_cachep;
x86_affinity_set_t *aset;
- L2_cachep = lcpup->caches[CPU_CACHE_DEPTH_L2];
- assert(L2_cachep->type == CPU_CACHE_TYPE_UNIF);
- aset = find_cache_affinity(L2_cachep);
+ LLC_cachep = lcpup->caches[topoParms.LLCDepth];
+ assert(LLC_cachep->type == CPU_CACHE_TYPE_UNIF);
+ aset = find_cache_affinity(LLC_cachep);
if (aset == NULL) {
aset = (x86_affinity_set_t *) kalloc(sizeof(*aset));
if (aset == NULL)
aset->next = x86_affinities;
x86_affinities = aset;
aset->num = x86_affinity_count++;
- aset->cache = L2_cachep;
+ aset->cache = LLC_cachep;
aset->pset = (i == master_cpu) ?
processor_pset(master_processor) :
pset_create(pset_node_root());
if (aset->pset == PROCESSOR_SET_NULL)
panic("cpu_topology_start: pset_create");
- DBG("\tnew set %p(%d) pset %p for cache %p\n",
+ TOPO_DBG("\tnew set %p(%d) pset %p for cache %p\n",
aset, aset->num, aset->pset, aset->cache);
}
- DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n",
- aset, aset->num, lcpup, lcpup->lnum, cpup, cpup->cpu_processor);
+ TOPO_DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n",
+ aset, aset->num, lcpup, lcpup->cpu_num, cpup, cpup->cpu_processor);
if (i != master_cpu)
processor_init(cpup->cpu_processor, i, aset->pset);
+
+ if (lcpup->core->num_lcpus > 1) {
+ if (lcpup->lnum == 0)
+ lprim = cpup->cpu_processor;
+
+ processor_meta_init(cpup->cpu_processor, lprim);
+ }
}
+}
- /*
- * Finally we start all processors (including the boot cpu we're
- * running on).
- */
- DBG("cpu_topology_start() processor_start():\n");
- for (i = 0; i < ncpus; i++) {
- DBG("\tlcpu %d\n", cpu_datap(i)->cpu_number);
+/* We got a request to start a CPU. Check that this CPU is within the
+ * max cpu limit set before we do.
+ */
+kern_return_t
+cpu_topology_start_cpu( int cpunum )
+{
+ int ncpus = machine_info.max_cpus;
+ int i = cpunum;
+
+ /* Decide whether to start a CPU, and actually start it */
+ TOPO_DBG("cpu_topology_start() processor_start():\n");
+ if( i < ncpus)
+ {
+ TOPO_DBG("\tlcpu %d\n", cpu_datap(i)->cpu_number);
processor_start(cpu_datap(i)->cpu_processor);
+ return KERN_SUCCESS;
}
+ else
+ return KERN_FAILURE;
}
static int
cpu_data_t *cpu_x = *((cpu_data_t **)(uintptr_t)x);
cpu_data_t *cpu_y = *((cpu_data_t **)(uintptr_t)y);
- DBG("lapicid_cmp(%p,%p) (%d,%d)\n",
+ TOPO_DBG("lapicid_cmp(%p,%p) (%d,%d)\n",
x, y, cpu_x->cpu_phys_number, cpu_y->cpu_phys_number);
if (cpu_x->cpu_phys_number < cpu_y->cpu_phys_number)
return -1;
if (affinity_num == aset->num)
break;
}
- return (aset == NULL) ? PROCESSOR_SET_NULL : aset->pset;
-
+ return (aset == NULL) ? PROCESSOR_SET_NULL : aset->pset;
}
uint64_t
if (level == 0) {
return machine_info.max_mem;
- } else if ( 1 <= level && level <= 3) {
+ } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) {
cachep = current_cpu_datap()->lcpu.caches[level-1];
return cachep ? cachep->cache_size : 0;
} else {
if (level == 0) {
return machine_info.max_cpus;
- } else if ( 1 <= level && level <= 3) {
+ } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) {
cachep = current_cpu_datap()->lcpu.caches[level-1];
return cachep ? cachep->nlcpus : 0;
} else {