X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d1ecb069dfe24481e4a83f44cb5217a2b06746d7..5c9f46613a83ebfc29a5b1f099448259e96a98f0:/osfmk/i386/cpu_topology.c diff --git a/osfmk/i386/cpu_topology.c b/osfmk/i386/cpu_topology.c index 24c4f5c81..e079e6e38 100644 --- a/osfmk/i386/cpu_topology.c +++ b/osfmk/i386/cpu_topology.c @@ -33,20 +33,11 @@ #include #include #include -#include +#include #include #include #include -//#define TOPO_DEBUG 1 -#if TOPO_DEBUG -#define DBG(x...) kprintf("DBG: " x) -#else -#define DBG(x...) -#endif -void debug_topology_print(void); -void validate_topology(void); - __private_extern__ void qsort( void * array, size_t nmembers, @@ -59,6 +50,19 @@ static x86_affinity_set_t *find_cache_affinity(x86_cpu_cache_t *L2_cachep); x86_affinity_set_t *x86_affinities = NULL; static int x86_affinity_count = 0; +extern cpu_data_t cpshadows[]; +/* Re-sort double-mapped CPU data shadows after topology discovery sorts the + * primary CPU data structures by physical/APIC CPU ID. + */ +static void cpu_shadow_sort(int ncpus) { + for (int i = 0; i < ncpus; i++) { + cpu_data_t *cpup = cpu_datap(i); + ptrdiff_t coff = cpup - cpu_datap(0); + + cpup->cd_shadow = &cpshadows[coff]; + } +} + /* * cpu_topology_sort() is called after all processors have been registered * but before any non-boot processor id started. @@ -85,15 +89,16 @@ cpu_topology_sort(int ncpus) /* Lights out for this */ istate = ml_set_interrupts_enabled(FALSE); -#ifdef TOPO_DEBUG - DBG("cpu_topology_start() %d cpu%s registered\n", - ncpus, (ncpus > 1) ? "s" : ""); - for (i = 0; i < ncpus; i++) { - cpu_data_t *cpup = cpu_datap(i); - DBG("\tcpu_data[%d]:0x%08x local apic 0x%x\n", - i, (unsigned) cpup, cpup->cpu_phys_number); + if (topo_dbg) { + TOPO_DBG("cpu_topology_start() %d cpu%s registered\n", + ncpus, (ncpus > 1) ? "s" : ""); + for (i = 0; i < ncpus; i++) { + cpu_data_t *cpup = cpu_datap(i); + TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n", + i, (void *) cpup, cpup->cpu_phys_number); + } } -#endif + /* * Re-order the cpu_data_ptr vector sorting by physical id. * Skip the boot processor, it's required to be correct. @@ -104,27 +109,20 @@ cpu_topology_sort(int ncpus) sizeof(cpu_data_t *), lapicid_cmp); } -#ifdef TOPO_DEBUG - DBG("cpu_topology_start() after sorting:\n"); - for (i = 0; i < ncpus; i++) { - cpu_data_t *cpup = cpu_datap(i); - DBG("\tcpu_data[%d]:0x%08x local apic 0x%x\n", - i, (unsigned) cpup, cpup->cpu_phys_number); + if (topo_dbg) { + TOPO_DBG("cpu_topology_start() after sorting:\n"); + for (i = 0; i < ncpus; i++) { + cpu_data_t *cpup = cpu_datap(i); + TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n", + i, (void *) cpup, cpup->cpu_phys_number); + } } -#endif /* - * Fix up logical numbers and reset the map kept by the lapic code. + * Finalize logical numbers and map kept by the lapic code. */ - for (i = 1; i < ncpus; i++) { + for (i = 0; i < ncpus; i++) { cpu_data_t *cpup = cpu_datap(i); - x86_core_t *core = cpup->lcpu.core; - x86_die_t *die = cpup->lcpu.die; - x86_pkg_t *pkg = cpup->lcpu.package; - - assert(core != NULL); - assert(die != NULL); - assert(pkg != NULL); if (cpup->cpu_number != i) { kprintf("cpu_datap(%d):%p local apic id 0x%x " @@ -133,22 +131,15 @@ cpu_topology_sort(int ncpus) cpup->cpu_number); } cpup->cpu_number = i; - cpup->lcpu.cpu_num = i; - cpup->lcpu.pnum = cpup->cpu_phys_number; lapic_cpu_map(cpup->cpu_phys_number, i); - x86_set_lcpu_numbers(&cpup->lcpu); - x86_set_core_numbers(core, &cpup->lcpu); - x86_set_die_numbers(die, &cpup->lcpu); - x86_set_pkg_numbers(pkg, &cpup->lcpu); + x86_set_logical_topology(&cpup->lcpu, cpup->cpu_phys_number, i); } -#if TOPO_DEBUG - debug_topology_print(); -#endif /* TOPO_DEBUG */ - validate_topology(); + cpu_shadow_sort(ncpus); + x86_validate_topology(); ml_set_interrupts_enabled(istate); - DBG("cpu_topology_start() LLC is L%d\n", topoParms.LLCDepth + 1); + TOPO_DBG("cpu_topology_start() LLC is L%d\n", topoParms.LLCDepth + 1); /* * Let the CPU Power Management know that the topology is stable. @@ -161,7 +152,7 @@ cpu_topology_sort(int ncpus) * for their LLC cache. Each affinity set possesses a processor set * into which each logical processor is added. */ - DBG("cpu_topology_start() creating affinity sets:\n"); + TOPO_DBG("cpu_topology_start() creating affinity sets:\n"); for (i = 0; i < ncpus; i++) { cpu_data_t *cpup = cpu_datap(i); x86_lcpu_t *lcpup = cpu_to_lcpu(i); @@ -184,11 +175,11 @@ cpu_topology_sort(int ncpus) pset_create(pset_node_root()); if (aset->pset == PROCESSOR_SET_NULL) panic("cpu_topology_start: pset_create"); - DBG("\tnew set %p(%d) pset %p for cache %p\n", + TOPO_DBG("\tnew set %p(%d) pset %p for cache %p\n", aset, aset->num, aset->pset, aset->cache); } - DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n", + TOPO_DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n", aset, aset->num, lcpup, lcpup->cpu_num, cpup, cpup->cpu_processor); if (i != master_cpu) @@ -198,7 +189,7 @@ cpu_topology_sort(int ncpus) if (lcpup->lnum == 0) lprim = cpup->cpu_processor; - processor_meta_init(cpup->cpu_processor, lprim); + processor_set_primary(cpup->cpu_processor, lprim); } } } @@ -213,10 +204,10 @@ cpu_topology_start_cpu( int cpunum ) int i = cpunum; /* Decide whether to start a CPU, and actually start it */ - DBG("cpu_topology_start() processor_start():\n"); + TOPO_DBG("cpu_topology_start() processor_start():\n"); if( i < ncpus) { - DBG("\tlcpu %d\n", cpu_datap(i)->cpu_number); + TOPO_DBG("\tlcpu %d\n", cpu_datap(i)->cpu_number); processor_start(cpu_datap(i)->cpu_processor); return KERN_SUCCESS; } @@ -230,7 +221,7 @@ lapicid_cmp(const void *x, const void *y) cpu_data_t *cpu_x = *((cpu_data_t **)(uintptr_t)x); cpu_data_t *cpu_y = *((cpu_data_t **)(uintptr_t)y); - DBG("lapicid_cmp(%p,%p) (%d,%d)\n", + TOPO_DBG("lapicid_cmp(%p,%p) (%d,%d)\n", x, y, cpu_x->cpu_phys_number, cpu_y->cpu_phys_number); if (cpu_x->cpu_phys_number < cpu_y->cpu_phys_number) return -1;