+ if (cpuid_features() & CPUID_FEATURE_HTT)
+ return (cpuid_info()->cpuid_logical_per_package /
+ cpuid_info()->cpuid_cores_per_package) > 1;
+ else
+ return FALSE;
+}
+
+static void
+x86_lcpu_init(int cpu)
+{
+ cpu_data_t *cpup;
+ x86_lcpu_t *lcpu;
+ int i;
+
+ cpup = cpu_datap(cpu);
+
+ lcpu = &cpup->lcpu;
+ lcpu->lcpu = lcpu;
+ lcpu->cpu = cpup;
+ lcpu->next = NULL;
+ lcpu->core = NULL;
+ lcpu->lnum = cpu;
+ lcpu->pnum = cpup->cpu_phys_number;
+ lcpu->halted = FALSE; /* XXX is this correct? */
+ lcpu->idle = FALSE; /* XXX is this correct? */
+ for (i = 0; i < MAX_CACHE_DEPTH; i += 1)
+ lcpu->caches[i] = NULL;
+
+ lcpu->master = (lcpu->pnum == (unsigned int) master_cpu);
+ lcpu->primary = (lcpu->pnum % cpuid_info()->cpuid_logical_per_package) == 0;
+}
+
+static x86_core_t *
+x86_core_alloc(int cpu)
+{
+ x86_core_t *core;
+ cpu_data_t *cpup;
+ uint32_t cpu_in_pkg;
+ uint32_t lcpus_per_core;
+
+ cpup = cpu_datap(cpu);
+
+ simple_lock(&x86_topo_lock);
+ if (free_cores != NULL) {
+ core = free_cores;
+ free_cores = core->next;
+ core->next = NULL;
+ simple_unlock(&x86_topo_lock);
+ } else {
+ simple_unlock(&x86_topo_lock);
+ core = kalloc(sizeof(x86_core_t));
+ if (core == NULL)
+ panic("x86_core_alloc() kalloc of x86_core_t failed!\n");
+ }
+
+ bzero((void *) core, sizeof(x86_core_t));
+
+ cpu_in_pkg = cpu % cpuid_info()->cpuid_logical_per_package;
+ lcpus_per_core = cpuid_info()->cpuid_logical_per_package /
+ cpuid_info()->cpuid_cores_per_package;
+
+ core->pcore_num = cpup->cpu_phys_number / lcpus_per_core;
+ core->lcore_num = core->pcore_num % cpuid_info()->cpuid_cores_per_package;
+
+ core->flags = X86CORE_FL_PRESENT | X86CORE_FL_READY;
+
+ return(core);
+}
+
+static void
+x86_core_free(x86_core_t *core)
+{
+ simple_lock(&x86_topo_lock);
+ core->next = free_cores;
+ free_cores = core;
+ simple_unlock(&x86_topo_lock);
+}
+
+static x86_pkg_t *
+x86_package_find(int cpu)
+{
+ x86_pkg_t *pkg;
+ cpu_data_t *cpup;
+ uint32_t pkg_num;
+
+ cpup = cpu_datap(cpu);
+
+ pkg_num = cpup->cpu_phys_number / cpuid_info()->cpuid_logical_per_package;
+
+ pkg = x86_pkgs;
+ while (pkg != NULL) {
+ if (pkg->ppkg_num == pkg_num)
+ break;
+ pkg = pkg->next;
+ }
+
+ return(pkg);
+}
+
+static x86_core_t *
+x86_core_find(int cpu)
+{
+ x86_core_t *core;
+ x86_pkg_t *pkg;
+ cpu_data_t *cpup;
+ uint32_t core_num;
+
+ cpup = cpu_datap(cpu);
+
+ core_num = cpup->cpu_phys_number
+ / (cpuid_info()->cpuid_logical_per_package
+ / cpuid_info()->cpuid_cores_per_package);
+
+ pkg = x86_package_find(cpu);
+ if (pkg == NULL)
+ return(NULL);
+
+ core = pkg->cores;
+ while (core != NULL) {
+ if (core->pcore_num == core_num)
+ break;
+ core = core->next;
+ }
+
+ return(core);
+}
+
+static void
+x86_core_add_lcpu(x86_core_t *core, x86_lcpu_t *lcpu)
+{
+ x86_cpu_cache_t *list;
+ x86_cpu_cache_t *cur;
+ x86_core_t *cur_core;
+ x86_lcpu_t *cur_lcpu;
+ boolean_t found;
+ int level;
+ int i;
+ uint32_t cpu_mask;
+
+ assert(core != NULL);
+ assert(lcpu != NULL);
+
+ /*
+ * Add the cache data to the topology.
+ */
+ list = x86_cache_list();
+
+ simple_lock(&x86_topo_lock);
+
+ while (list != NULL) {
+ /*
+ * Remove the cache from the front of the list.
+ */
+ cur = list;
+ list = cur->next;
+ cur->next = NULL;
+ level = cur->level - 1;
+
+ /*
+ * If the cache isn't shared then just put it where it
+ * belongs.
+ */
+ if (cur->nlcpus == 1) {
+ goto found_first;
+ }
+
+ /*
+ * We'll assume that all of the caches at a particular level
+ * have the same sharing. So if we have a cache already at
+ * this level, we'll just skip looking for the match.
+ */
+ if (lcpu->caches[level] != NULL) {
+ x86_cache_free(cur);
+ continue;
+ }
+
+ /*
+ * This is a shared cache, so we have to figure out if
+ * this is the first time we've seen this cache. We do
+ * this by searching through the package and seeing if
+ * a related core is already describing this cache.
+ *
+ * NOTE: This assumes that CPUs whose ID mod <# sharing cache>
+ * are indeed sharing the cache.
+ */
+ cpu_mask = lcpu->pnum & ~(cur->nlcpus - 1);
+ cur_core = core->package->cores;
+ found = FALSE;
+
+ while (cur_core != NULL && !found) {
+ cur_lcpu = cur_core->lcpus;
+ while (cur_lcpu != NULL && !found) {
+ if ((cur_lcpu->pnum & ~(cur->nlcpus - 1)) == cpu_mask) {
+ lcpu->caches[level] = cur_lcpu->caches[level];
+ found = TRUE;
+ x86_cache_free(cur);
+
+ /*
+ * Put the new CPU into the list of the cache.
+ */
+ cur = lcpu->caches[level];
+ for (i = 0; i < cur->nlcpus; i += 1) {
+ if (cur->cpus[i] == NULL) {
+ cur->cpus[i] = lcpu;
+ break;
+ }
+ }
+ }
+ cur_lcpu = cur_lcpu->next;
+ }
+
+ cur_core = cur_core->next;
+ }
+
+ if (!found) {
+found_first:
+ cur->next = lcpu->caches[level];
+ lcpu->caches[level] = cur;
+ cur->cpus[0] = lcpu;
+ }
+ }
+
+ /*
+ * Add the Logical CPU to the core.
+ */
+ lcpu->next = core->lcpus;
+ lcpu->core = core;
+ core->lcpus = lcpu;
+ core->num_lcpus += 1;
+
+ simple_unlock(&x86_topo_lock);
+}
+
+static x86_pkg_t *
+x86_package_alloc(int cpu)
+{
+ x86_pkg_t *pkg;
+ cpu_data_t *cpup;
+
+ cpup = cpu_datap(cpu);
+
+ simple_lock(&x86_topo_lock);
+ if (free_pkgs != NULL) {
+ pkg = free_pkgs;
+ free_pkgs = pkg->next;
+ pkg->next = NULL;
+ simple_unlock(&x86_topo_lock);
+ } else {
+ simple_unlock(&x86_topo_lock);
+ pkg = kalloc(sizeof(x86_pkg_t));
+ if (pkg == NULL)
+ panic("x86_package_alloc() kalloc of x86_pkg_t failed!\n");
+ }
+
+ bzero((void *) pkg, sizeof(x86_pkg_t));
+
+ pkg->ppkg_num = cpup->cpu_phys_number
+ / cpuid_info()->cpuid_logical_per_package;
+
+ pkg->lpkg_num = num_packages;
+ atomic_incl((long *) &num_packages, 1);
+
+ pkg->flags = X86PKG_FL_PRESENT | X86PKG_FL_READY;
+ return(pkg);
+}
+
+static void
+x86_package_free(x86_pkg_t *pkg)
+{
+ simple_lock(&x86_topo_lock);
+ pkg->next = free_pkgs;
+ free_pkgs = pkg;
+ atomic_decl((long *) &num_packages, 1);
+ simple_unlock(&x86_topo_lock);
+}
+
+static void
+x86_package_add_core(x86_pkg_t *pkg, x86_core_t *core)
+{
+ assert(pkg != NULL);
+ assert(core != NULL);
+
+ core->next = pkg->cores;
+ core->package = pkg;
+ pkg->cores = core;
+ pkg->num_cores += 1;