]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/cpu_threads.c
xnu-2050.22.13.tar.gz
[apple/xnu.git] / osfmk / i386 / cpu_threads.c
CommitLineData
91447636 1/*
d1ecb069 2 * Copyright (c) 2003-2010 Apple Inc. All rights reserved.
91447636 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28#include <vm/vm_kern.h>
2d21ac55 29#include <kern/kalloc.h>
7e4a7d39 30#include <kern/etimer.h>
91447636
A
31#include <mach/machine.h>
32#include <i386/cpu_threads.h>
33#include <i386/cpuid.h>
34#include <i386/machine_cpu.h>
2d21ac55 35#include <i386/pmCPU.h>
b0d623f7 36#include <i386/lock.h>
2d21ac55 37
7ddcb079
A
38#define DIVISOR_GUARD(denom) \
39 if ((denom) == 0) { \
40 kprintf("%s: %d Zero divisor: " #denom, \
41 __FILE__, __LINE__); \
42 }
b0d623f7 43
7ddcb079 44static void debug_topology_print(void);
c910b4d9 45
7ddcb079 46boolean_t topo_dbg = FALSE;
91447636 47
593a1d5f 48x86_pkg_t *x86_pkgs = NULL;
2d21ac55
A
49uint32_t num_Lx_caches[MAX_CACHE_DEPTH] = { 0 };
50
51static x86_pkg_t *free_pkgs = NULL;
593a1d5f 52static x86_die_t *free_dies = NULL;
2d21ac55 53static x86_core_t *free_cores = NULL;
593a1d5f 54static uint32_t num_dies = 0;
2d21ac55
A
55
56static x86_cpu_cache_t *x86_caches = NULL;
57static uint32_t num_caches = 0;
58
593a1d5f
A
59static boolean_t topoParmsInited = FALSE;
60x86_topology_parameters_t topoParms;
61
2d21ac55 62decl_simple_lock_data(, x86_topo_lock);
593a1d5f 63
7ddcb079
A
64static struct cpu_cache {
65 int level; int type;
66} cpu_caches [LCACHE_MAX] = {
316670eb
A
67 [L1D] = { 1, CPU_CACHE_TYPE_DATA },
68 [L1I] = { 1, CPU_CACHE_TYPE_INST },
69 [L2U] = { 2, CPU_CACHE_TYPE_UNIF },
70 [L3U] = { 3, CPU_CACHE_TYPE_UNIF },
7ddcb079
A
71};
72
593a1d5f
A
73static boolean_t
74cpu_is_hyperthreaded(void)
75{
76 i386_cpu_info_t *cpuinfo;
77
78 cpuinfo = cpuid_info();
79 return(cpuinfo->thread_count > cpuinfo->core_count);
80}
2d21ac55
A
81
82static x86_cpu_cache_t *
83x86_cache_alloc(void)
84{
85 x86_cpu_cache_t *cache;
86 int i;
87
88 if (x86_caches == NULL) {
89 cache = kalloc(sizeof(x86_cpu_cache_t) + (MAX_CPUS * sizeof(x86_lcpu_t *)));
90 if (cache == NULL)
91 return(NULL);
92 } else {
93 cache = x86_caches;
94 x86_caches = cache->next;
95 cache->next = NULL;
96 }
97
98 bzero(cache, sizeof(x86_cpu_cache_t));
99 cache->next = NULL;
100 cache->maxcpus = MAX_CPUS;
101 for (i = 0; i < cache->maxcpus; i += 1) {
102 cache->cpus[i] = NULL;
103 }
104
105 num_caches += 1;
106
107 return(cache);
108}
593a1d5f
A
109
110static void
111x86_LLC_info(void)
112{
7ddcb079 113 int cache_level = 0;
593a1d5f
A
114 uint32_t nCPUsSharing = 1;
115 i386_cpu_info_t *cpuinfo;
7ddcb079
A
116 struct cpu_cache *cachep;
117 int i;
593a1d5f
A
118
119 cpuinfo = cpuid_info();
120
7ddcb079 121 for (i = 0, cachep = &cpu_caches[0]; i < LCACHE_MAX; i++, cachep++) {
593a1d5f 122
7ddcb079
A
123 if (cachep->type == 0 || cpuid_info()->cache_size[i] == 0)
124 continue;
593a1d5f
A
125
126 /*
127 * Only worry about it if it's a deeper level than
128 * what we've seen before.
129 */
7ddcb079
A
130 if (cachep->level > cache_level) {
131 cache_level = cachep->level;
593a1d5f
A
132
133 /*
134 * Save the number of CPUs sharing this cache.
135 */
7ddcb079 136 nCPUsSharing = cpuinfo->cache_sharing[i];
593a1d5f
A
137 }
138 }
139
140 /*
141 * Make the level of the LLC be 0 based.
142 */
143 topoParms.LLCDepth = cache_level - 1;
144
145 /*
146 * nCPUsSharing represents the *maximum* number of cores or
147 * logical CPUs sharing the cache.
148 */
149 topoParms.maxSharingLLC = nCPUsSharing;
150
7e4a7d39
A
151 topoParms.nCoresSharingLLC = nCPUsSharing / (cpuinfo->thread_count /
152 cpuinfo->core_count);
593a1d5f
A
153 topoParms.nLCPUsSharingLLC = nCPUsSharing;
154
155 /*
156 * nCPUsSharing may not be the number of *active* cores or
157 * threads that are sharing the cache.
158 */
159 if (nCPUsSharing > cpuinfo->core_count)
160 topoParms.nCoresSharingLLC = cpuinfo->core_count;
161 if (nCPUsSharing > cpuinfo->thread_count)
162 topoParms.nLCPUsSharingLLC = cpuinfo->thread_count;
593a1d5f
A
163}
164
165static void
166initTopoParms(void)
167{
168 i386_cpu_info_t *cpuinfo;
169
d1ecb069
A
170 topoParms.stable = FALSE;
171
593a1d5f
A
172 cpuinfo = cpuid_info();
173
7ddcb079
A
174 PE_parse_boot_argn("-topo", &topo_dbg, sizeof(topo_dbg));
175
593a1d5f
A
176 /*
177 * We need to start with getting the LLC information correct.
178 */
179 x86_LLC_info();
180
181 /*
182 * Compute the number of threads (logical CPUs) per core.
183 */
7ddcb079 184 DIVISOR_GUARD(cpuinfo->core_count);
593a1d5f 185 topoParms.nLThreadsPerCore = cpuinfo->thread_count / cpuinfo->core_count;
7ddcb079 186 DIVISOR_GUARD(cpuinfo->cpuid_cores_per_package);
593a1d5f
A
187 topoParms.nPThreadsPerCore = cpuinfo->cpuid_logical_per_package / cpuinfo->cpuid_cores_per_package;
188
189 /*
190 * Compute the number of dies per package.
191 */
7ddcb079 192 DIVISOR_GUARD(topoParms.nCoresSharingLLC);
593a1d5f 193 topoParms.nLDiesPerPackage = cpuinfo->core_count / topoParms.nCoresSharingLLC;
7ddcb079
A
194 DIVISOR_GUARD(topoParms.nPThreadsPerCore);
195 DIVISOR_GUARD(topoParms.maxSharingLLC / topoParms.nPThreadsPerCore);
593a1d5f
A
196 topoParms.nPDiesPerPackage = cpuinfo->cpuid_cores_per_package / (topoParms.maxSharingLLC / topoParms.nPThreadsPerCore);
197
7ddcb079 198
593a1d5f
A
199 /*
200 * Compute the number of cores per die.
201 */
202 topoParms.nLCoresPerDie = topoParms.nCoresSharingLLC;
203 topoParms.nPCoresPerDie = (topoParms.maxSharingLLC / topoParms.nPThreadsPerCore);
204
205 /*
206 * Compute the number of threads per die.
207 */
208 topoParms.nLThreadsPerDie = topoParms.nLThreadsPerCore * topoParms.nLCoresPerDie;
209 topoParms.nPThreadsPerDie = topoParms.nPThreadsPerCore * topoParms.nPCoresPerDie;
210
211 /*
212 * Compute the number of cores per package.
213 */
214 topoParms.nLCoresPerPackage = topoParms.nLCoresPerDie * topoParms.nLDiesPerPackage;
215 topoParms.nPCoresPerPackage = topoParms.nPCoresPerDie * topoParms.nPDiesPerPackage;
216
217 /*
218 * Compute the number of threads per package.
219 */
220 topoParms.nLThreadsPerPackage = topoParms.nLThreadsPerCore * topoParms.nLCoresPerPackage;
221 topoParms.nPThreadsPerPackage = topoParms.nPThreadsPerCore * topoParms.nPCoresPerPackage;
222
7ddcb079
A
223 TOPO_DBG("\nCache Topology Parameters:\n");
224 TOPO_DBG("\tLLC Depth: %d\n", topoParms.LLCDepth);
225 TOPO_DBG("\tCores Sharing LLC: %d\n", topoParms.nCoresSharingLLC);
226 TOPO_DBG("\tThreads Sharing LLC: %d\n", topoParms.nLCPUsSharingLLC);
227 TOPO_DBG("\tmax Sharing of LLC: %d\n", topoParms.maxSharingLLC);
228
229 TOPO_DBG("\nLogical Topology Parameters:\n");
230 TOPO_DBG("\tThreads per Core: %d\n", topoParms.nLThreadsPerCore);
231 TOPO_DBG("\tCores per Die: %d\n", topoParms.nLCoresPerDie);
232 TOPO_DBG("\tThreads per Die: %d\n", topoParms.nLThreadsPerDie);
233 TOPO_DBG("\tDies per Package: %d\n", topoParms.nLDiesPerPackage);
234 TOPO_DBG("\tCores per Package: %d\n", topoParms.nLCoresPerPackage);
235 TOPO_DBG("\tThreads per Package: %d\n", topoParms.nLThreadsPerPackage);
236
237 TOPO_DBG("\nPhysical Topology Parameters:\n");
238 TOPO_DBG("\tThreads per Core: %d\n", topoParms.nPThreadsPerCore);
239 TOPO_DBG("\tCores per Die: %d\n", topoParms.nPCoresPerDie);
240 TOPO_DBG("\tThreads per Die: %d\n", topoParms.nPThreadsPerDie);
241 TOPO_DBG("\tDies per Package: %d\n", topoParms.nPDiesPerPackage);
242 TOPO_DBG("\tCores per Package: %d\n", topoParms.nPCoresPerPackage);
243 TOPO_DBG("\tThreads per Package: %d\n", topoParms.nPThreadsPerPackage);
593a1d5f
A
244
245 topoParmsInited = TRUE;
246}
2d21ac55
A
247
248static void
249x86_cache_free(x86_cpu_cache_t *cache)
250{
251 num_caches -= 1;
252 if (cache->level > 0 && cache->level <= MAX_CACHE_DEPTH)
253 num_Lx_caches[cache->level - 1] -= 1;
254 cache->next = x86_caches;
255 x86_caches = cache;
256}
257
258/*
259 * This returns a list of cache structures that represent the
260 * caches for a CPU. Some of the structures may have to be
261 * "freed" if they are actually shared between CPUs.
262 */
263static x86_cpu_cache_t *
264x86_cache_list(void)
265{
266 x86_cpu_cache_t *root = NULL;
267 x86_cpu_cache_t *cur = NULL;
268 x86_cpu_cache_t *last = NULL;
7ddcb079
A
269 struct cpu_cache *cachep;
270 int i;
2d21ac55 271
7ddcb079
A
272 /*
273 * Cons up a list driven not by CPUID leaf 4 (deterministic cache params)
274 * but by the table above plus parameters already cracked from cpuid...
275 */
276 for (i = 0, cachep = &cpu_caches[0]; i < LCACHE_MAX; i++, cachep++) {
2d21ac55 277
7ddcb079
A
278 if (cachep->type == 0 || cpuid_info()->cache_size[i] == 0)
279 continue;
280
2d21ac55 281 cur = x86_cache_alloc();
7ddcb079 282 if (cur == NULL)
2d21ac55 283 break;
2d21ac55 284
7ddcb079
A
285 cur->type = cachep->type;
286 cur->level = cachep->level;
287 cur->nlcpus = 0;
288 cur->maxcpus = cpuid_info()->cache_sharing[i];
289 cur->partitions = cpuid_info()->cache_partitions[i];
290 cur->cache_size = cpuid_info()->cache_size[i];
291 cur->line_size = cpuid_info()->cache_linesize;
2d21ac55
A
292
293 if (last == NULL) {
294 root = cur;
295 last = cur;
296 } else {
297 last->next = cur;
298 last = cur;
299 }
2d21ac55
A
300 num_Lx_caches[cur->level - 1] += 1;
301 }
7ddcb079 302 return root;
2d21ac55 303}
0c530ab8 304
7ddcb079 305
593a1d5f
A
306static x86_cpu_cache_t *
307x86_match_cache(x86_cpu_cache_t *list, x86_cpu_cache_t *matcher)
4452a7af 308{
593a1d5f 309 x86_cpu_cache_t *cur_cache;
b0d623f7 310
593a1d5f
A
311 cur_cache = list;
312 while (cur_cache != NULL) {
313 if (cur_cache->maxcpus == matcher->maxcpus
314 && cur_cache->type == matcher->type
315 && cur_cache->level == matcher->level
593a1d5f
A
316 && cur_cache->partitions == matcher->partitions
317 && cur_cache->line_size == matcher->line_size
318 && cur_cache->cache_size == matcher->cache_size)
319 break;
320
321 cur_cache = cur_cache->next;
322 }
323
324 return(cur_cache);
2d21ac55
A
325}
326
327static void
328x86_lcpu_init(int cpu)
329{
330 cpu_data_t *cpup;
331 x86_lcpu_t *lcpu;
332 int i;
333
334 cpup = cpu_datap(cpu);
335
336 lcpu = &cpup->lcpu;
337 lcpu->lcpu = lcpu;
338 lcpu->cpu = cpup;
593a1d5f
A
339 lcpu->next_in_core = NULL;
340 lcpu->next_in_die = NULL;
341 lcpu->next_in_pkg = NULL;
342 lcpu->core = NULL;
343 lcpu->die = NULL;
344 lcpu->package = NULL;
345 lcpu->cpu_num = cpu;
2d21ac55
A
346 lcpu->lnum = cpu;
347 lcpu->pnum = cpup->cpu_phys_number;
593a1d5f 348 lcpu->state = LCPU_OFF;
2d21ac55
A
349 for (i = 0; i < MAX_CACHE_DEPTH; i += 1)
350 lcpu->caches[i] = NULL;
351
593a1d5f
A
352 lcpu->master = (lcpu->cpu_num == (unsigned int) master_cpu);
353 lcpu->primary = (lcpu->pnum % topoParms.nPThreadsPerPackage) == 0;
2d21ac55
A
354}
355
356static x86_core_t *
357x86_core_alloc(int cpu)
358{
359 x86_core_t *core;
360 cpu_data_t *cpup;
2d21ac55
A
361
362 cpup = cpu_datap(cpu);
363
364 simple_lock(&x86_topo_lock);
365 if (free_cores != NULL) {
366 core = free_cores;
593a1d5f
A
367 free_cores = core->next_in_die;
368 core->next_in_die = NULL;
2d21ac55
A
369 simple_unlock(&x86_topo_lock);
370 } else {
371 simple_unlock(&x86_topo_lock);
372 core = kalloc(sizeof(x86_core_t));
373 if (core == NULL)
374 panic("x86_core_alloc() kalloc of x86_core_t failed!\n");
375 }
376
377 bzero((void *) core, sizeof(x86_core_t));
378
593a1d5f
A
379 core->pcore_num = cpup->cpu_phys_number / topoParms.nPThreadsPerCore;
380 core->lcore_num = core->pcore_num % topoParms.nPCoresPerPackage;
2d21ac55 381
cf7d32b8
A
382 core->flags = X86CORE_FL_PRESENT | X86CORE_FL_READY
383 | X86CORE_FL_HALTED | X86CORE_FL_IDLE;
2d21ac55
A
384
385 return(core);
386}
387
388static void
389x86_core_free(x86_core_t *core)
390{
391 simple_lock(&x86_topo_lock);
593a1d5f 392 core->next_in_die = free_cores;
2d21ac55
A
393 free_cores = core;
394 simple_unlock(&x86_topo_lock);
395}
396
397static x86_pkg_t *
398x86_package_find(int cpu)
399{
400 x86_pkg_t *pkg;
401 cpu_data_t *cpup;
402 uint32_t pkg_num;
403
404 cpup = cpu_datap(cpu);
405
593a1d5f 406 pkg_num = cpup->cpu_phys_number / topoParms.nPThreadsPerPackage;
2d21ac55
A
407
408 pkg = x86_pkgs;
409 while (pkg != NULL) {
410 if (pkg->ppkg_num == pkg_num)
411 break;
412 pkg = pkg->next;
413 }
414
415 return(pkg);
416}
593a1d5f
A
417
418static x86_die_t *
419x86_die_find(int cpu)
420{
421 x86_die_t *die;
422 x86_pkg_t *pkg;
423 cpu_data_t *cpup;
424 uint32_t die_num;
425
426 cpup = cpu_datap(cpu);
427
428 die_num = cpup->cpu_phys_number / topoParms.nPThreadsPerDie;
429
430 pkg = x86_package_find(cpu);
431 if (pkg == NULL)
432 return(NULL);
433
434 die = pkg->dies;
435 while (die != NULL) {
436 if (die->pdie_num == die_num)
437 break;
438 die = die->next_in_pkg;
439 }
440
441 return(die);
442}
2d21ac55
A
443
444static x86_core_t *
445x86_core_find(int cpu)
446{
447 x86_core_t *core;
593a1d5f 448 x86_die_t *die;
2d21ac55
A
449 cpu_data_t *cpup;
450 uint32_t core_num;
451
452 cpup = cpu_datap(cpu);
453
593a1d5f 454 core_num = cpup->cpu_phys_number / topoParms.nPThreadsPerCore;
2d21ac55 455
593a1d5f
A
456 die = x86_die_find(cpu);
457 if (die == NULL)
2d21ac55
A
458 return(NULL);
459
593a1d5f 460 core = die->cores;
2d21ac55
A
461 while (core != NULL) {
462 if (core->pcore_num == core_num)
463 break;
593a1d5f 464 core = core->next_in_die;
2d21ac55
A
465 }
466
467 return(core);
468}
593a1d5f
A
469
470void
471x86_set_lcpu_numbers(x86_lcpu_t *lcpu)
472{
473 lcpu->lnum = lcpu->cpu_num % topoParms.nLThreadsPerCore;
474}
475
476void
477x86_set_core_numbers(x86_core_t *core, x86_lcpu_t *lcpu)
478{
479 core->pcore_num = lcpu->cpu_num / topoParms.nLThreadsPerCore;
480 core->lcore_num = core->pcore_num % topoParms.nLCoresPerDie;
481}
482
483void
484x86_set_die_numbers(x86_die_t *die, x86_lcpu_t *lcpu)
485{
486 die->pdie_num = lcpu->cpu_num / (topoParms.nLThreadsPerCore * topoParms.nLCoresPerDie);
487 die->ldie_num = die->pdie_num % topoParms.nLDiesPerPackage;
488}
489
490void
491x86_set_pkg_numbers(x86_pkg_t *pkg, x86_lcpu_t *lcpu)
492{
493 pkg->ppkg_num = lcpu->cpu_num / topoParms.nLThreadsPerPackage;
494 pkg->lpkg_num = pkg->ppkg_num;
495}
496
497static x86_die_t *
498x86_die_alloc(int cpu)
499{
500 x86_die_t *die;
501 cpu_data_t *cpup;
502
503 cpup = cpu_datap(cpu);
504
505 simple_lock(&x86_topo_lock);
506 if (free_dies != NULL) {
507 die = free_dies;
508 free_dies = die->next_in_pkg;
509 die->next_in_pkg = NULL;
510 simple_unlock(&x86_topo_lock);
511 } else {
512 simple_unlock(&x86_topo_lock);
513 die = kalloc(sizeof(x86_die_t));
514 if (die == NULL)
515 panic("x86_die_alloc() kalloc of x86_die_t failed!\n");
516 }
517
518 bzero((void *) die, sizeof(x86_die_t));
519
520 die->pdie_num = cpup->cpu_phys_number / topoParms.nPThreadsPerDie;
521
522 die->ldie_num = num_dies;
523 atomic_incl((long *) &num_dies, 1);
524
525 die->flags = X86DIE_FL_PRESENT;
526 return(die);
527}
2d21ac55
A
528
529static void
593a1d5f
A
530x86_die_free(x86_die_t *die)
531{
532 simple_lock(&x86_topo_lock);
533 die->next_in_pkg = free_dies;
534 free_dies = die;
535 atomic_decl((long *) &num_dies, 1);
536 simple_unlock(&x86_topo_lock);
537}
538
539static x86_pkg_t *
540x86_package_alloc(int cpu)
541{
542 x86_pkg_t *pkg;
543 cpu_data_t *cpup;
544
545 cpup = cpu_datap(cpu);
546
547 simple_lock(&x86_topo_lock);
548 if (free_pkgs != NULL) {
549 pkg = free_pkgs;
550 free_pkgs = pkg->next;
551 pkg->next = NULL;
552 simple_unlock(&x86_topo_lock);
553 } else {
554 simple_unlock(&x86_topo_lock);
555 pkg = kalloc(sizeof(x86_pkg_t));
556 if (pkg == NULL)
557 panic("x86_package_alloc() kalloc of x86_pkg_t failed!\n");
558 }
559
560 bzero((void *) pkg, sizeof(x86_pkg_t));
561
562 pkg->ppkg_num = cpup->cpu_phys_number / topoParms.nPThreadsPerPackage;
563
564 pkg->lpkg_num = topoParms.nPackages;
565 atomic_incl((long *) &topoParms.nPackages, 1);
566
567 pkg->flags = X86PKG_FL_PRESENT | X86PKG_FL_READY;
568 return(pkg);
569}
570
571static void
572x86_package_free(x86_pkg_t *pkg)
573{
574 simple_lock(&x86_topo_lock);
575 pkg->next = free_pkgs;
576 free_pkgs = pkg;
577 atomic_decl((long *) &topoParms.nPackages, 1);
578 simple_unlock(&x86_topo_lock);
579}
580
581static void
582x86_cache_add_lcpu(x86_cpu_cache_t *cache, x86_lcpu_t *lcpu)
583{
584 x86_cpu_cache_t *cur_cache;
585 int i;
586
587 /*
588 * Put the new CPU into the list of the cache.
589 */
590 cur_cache = lcpu->caches[cache->level - 1];
591 lcpu->caches[cache->level - 1] = cache;
592 cache->next = cur_cache;
593 cache->nlcpus += 1;
594 for (i = 0; i < cache->nlcpus; i += 1) {
595 if (cache->cpus[i] == NULL) {
596 cache->cpus[i] = lcpu;
597 break;
598 }
599 }
600}
601
602static void
603x86_lcpu_add_caches(x86_lcpu_t *lcpu)
2d21ac55
A
604{
605 x86_cpu_cache_t *list;
606 x86_cpu_cache_t *cur;
593a1d5f
A
607 x86_cpu_cache_t *match;
608 x86_die_t *die;
609 x86_core_t *core;
2d21ac55 610 x86_lcpu_t *cur_lcpu;
593a1d5f
A
611 uint32_t level;
612 boolean_t found = FALSE;
2d21ac55 613
2d21ac55
A
614 assert(lcpu != NULL);
615
616 /*
617 * Add the cache data to the topology.
618 */
619 list = x86_cache_list();
620
621 simple_lock(&x86_topo_lock);
622
623 while (list != NULL) {
624 /*
625 * Remove the cache from the front of the list.
626 */
627 cur = list;
628 list = cur->next;
629 cur->next = NULL;
630 level = cur->level - 1;
631
632 /*
633 * If the cache isn't shared then just put it where it
634 * belongs.
635 */
593a1d5f
A
636 if (cur->maxcpus == 1) {
637 x86_cache_add_lcpu(cur, lcpu);
638 continue;
2d21ac55
A
639 }
640
641 /*
642 * We'll assume that all of the caches at a particular level
643 * have the same sharing. So if we have a cache already at
644 * this level, we'll just skip looking for the match.
645 */
646 if (lcpu->caches[level] != NULL) {
647 x86_cache_free(cur);
648 continue;
649 }
650
651 /*
652 * This is a shared cache, so we have to figure out if
653 * this is the first time we've seen this cache. We do
593a1d5f
A
654 * this by searching through the topology and seeing if
655 * this cache is already described.
2d21ac55 656 *
593a1d5f
A
657 * Assume that L{LLC-1} are all at the core level and that
658 * LLC is shared at the die level.
2d21ac55 659 */
593a1d5f
A
660 if (level < topoParms.LLCDepth) {
661 /*
662 * Shared at the core.
663 */
664 core = lcpu->core;
665 cur_lcpu = core->lcpus;
666 while (cur_lcpu != NULL) {
667 /*
668 * Skip ourselves.
669 */
670 if (cur_lcpu == lcpu) {
671 cur_lcpu = cur_lcpu->next_in_core;
672 continue;
673 }
2d21ac55 674
593a1d5f
A
675 /*
676 * If there's a cache on this logical CPU,
677 * then use that one.
678 */
679 match = x86_match_cache(cur_lcpu->caches[level], cur);
680 if (match != NULL) {
681 x86_cache_free(cur);
682 x86_cache_add_lcpu(match, lcpu);
683 found = TRUE;
684 break;
2d21ac55 685 }
593a1d5f
A
686
687 cur_lcpu = cur_lcpu->next_in_core;
2d21ac55 688 }
593a1d5f
A
689 } else {
690 /*
691 * Shared at the die.
692 */
693 die = lcpu->die;
694 cur_lcpu = die->lcpus;
695 while (cur_lcpu != NULL) {
696 /*
697 * Skip ourselves.
698 */
699 if (cur_lcpu == lcpu) {
700 cur_lcpu = cur_lcpu->next_in_die;
701 continue;
702 }
2d21ac55 703
593a1d5f
A
704 /*
705 * If there's a cache on this logical CPU,
706 * then use that one.
707 */
708 match = x86_match_cache(cur_lcpu->caches[level], cur);
709 if (match != NULL) {
710 x86_cache_free(cur);
711 x86_cache_add_lcpu(match, lcpu);
712 found = TRUE;
713 break;
714 }
715
716 cur_lcpu = cur_lcpu->next_in_die;
717 }
2d21ac55
A
718 }
719
593a1d5f
A
720 /*
721 * If a shared cache wasn't found, then this logical CPU must
722 * be the first one encountered.
723 */
2d21ac55 724 if (!found) {
593a1d5f 725 x86_cache_add_lcpu(cur, lcpu);
2d21ac55
A
726 }
727 }
728
2d21ac55
A
729 simple_unlock(&x86_topo_lock);
730}
731
593a1d5f
A
732static void
733x86_core_add_lcpu(x86_core_t *core, x86_lcpu_t *lcpu)
2d21ac55 734{
593a1d5f
A
735 assert(core != NULL);
736 assert(lcpu != NULL);
2d21ac55
A
737
738 simple_lock(&x86_topo_lock);
2d21ac55 739
593a1d5f
A
740 lcpu->next_in_core = core->lcpus;
741 lcpu->core = core;
742 core->lcpus = lcpu;
743 core->num_lcpus += 1;
744 simple_unlock(&x86_topo_lock);
745}
2d21ac55 746
593a1d5f
A
747static void
748x86_die_add_lcpu(x86_die_t *die, x86_lcpu_t *lcpu)
749{
750 assert(die != NULL);
751 assert(lcpu != NULL);
752
753 lcpu->next_in_die = die->lcpus;
754 lcpu->die = die;
755 die->lcpus = lcpu;
756}
2d21ac55 757
593a1d5f
A
758static void
759x86_die_add_core(x86_die_t *die, x86_core_t *core)
760{
761 assert(die != NULL);
762 assert(core != NULL);
2d21ac55 763
593a1d5f
A
764 core->next_in_die = die->cores;
765 core->die = die;
766 die->cores = core;
767 die->num_cores += 1;
2d21ac55
A
768}
769
593a1d5f
A
770 static void
771x86_package_add_lcpu(x86_pkg_t *pkg, x86_lcpu_t *lcpu)
2d21ac55 772{
593a1d5f
A
773 assert(pkg != NULL);
774 assert(lcpu != NULL);
775
776 lcpu->next_in_pkg = pkg->lcpus;
777 lcpu->package = pkg;
778 pkg->lcpus = lcpu;
2d21ac55
A
779}
780
781static void
782x86_package_add_core(x86_pkg_t *pkg, x86_core_t *core)
783{
784 assert(pkg != NULL);
785 assert(core != NULL);
786
593a1d5f 787 core->next_in_pkg = pkg->cores;
2d21ac55
A
788 core->package = pkg;
789 pkg->cores = core;
593a1d5f
A
790}
791
792static void
793x86_package_add_die(x86_pkg_t *pkg, x86_die_t *die)
794{
795 assert(pkg != NULL);
796 assert(die != NULL);
797
798 die->next_in_pkg = pkg->dies;
799 die->package = pkg;
800 pkg->dies = die;
801 pkg->num_dies += 1;
0c530ab8 802}
21362eb3 803
0c530ab8
A
804void *
805cpu_thread_alloc(int cpu)
806{
593a1d5f
A
807 x86_core_t *core = NULL;
808 x86_die_t *die = NULL;
809 x86_pkg_t *pkg = NULL;
2d21ac55
A
810 cpu_data_t *cpup;
811 uint32_t phys_cpu;
6601e61a 812
593a1d5f
A
813 /*
814 * Only allow one to manipulate the topology at a time.
815 */
816 simple_lock(&x86_topo_lock);
817
818 /*
819 * Make sure all of the topology parameters have been initialized.
820 */
821 if (!topoParmsInited)
822 initTopoParms();
823
2d21ac55
A
824 cpup = cpu_datap(cpu);
825
826 phys_cpu = cpup->cpu_phys_number;
827
828 x86_lcpu_init(cpu);
829
830 /*
831 * Assume that all cpus have the same features.
832 */
833 if (cpu_is_hyperthreaded()) {
834 cpup->cpu_threadtype = CPU_THREADTYPE_INTEL_HTT;
835 } else {
836 cpup->cpu_threadtype = CPU_THREADTYPE_NONE;
837 }
838
839 /*
593a1d5f 840 * Get the package that the logical CPU is in.
2d21ac55 841 */
593a1d5f 842 do {
2d21ac55
A
843 pkg = x86_package_find(cpu);
844 if (pkg == NULL) {
845 /*
846 * Package structure hasn't been created yet, do it now.
847 */
848 simple_unlock(&x86_topo_lock);
849 pkg = x86_package_alloc(cpu);
850 simple_lock(&x86_topo_lock);
851 if (x86_package_find(cpu) != NULL) {
852 x86_package_free(pkg);
593a1d5f 853 continue;
2d21ac55
A
854 }
855
856 /*
857 * Add the new package to the global list of packages.
858 */
859 pkg->next = x86_pkgs;
860 x86_pkgs = pkg;
91447636 861 }
593a1d5f 862 } while (pkg == NULL);
91447636 863
593a1d5f
A
864 /*
865 * Get the die that the logical CPU is in.
866 */
867 do {
868 die = x86_die_find(cpu);
869 if (die == NULL) {
870 /*
871 * Die structure hasn't been created yet, do it now.
872 */
873 simple_unlock(&x86_topo_lock);
874 die = x86_die_alloc(cpu);
875 simple_lock(&x86_topo_lock);
876 if (x86_die_find(cpu) != NULL) {
877 x86_die_free(die);
878 continue;
879 }
880
881 /*
882 * Add the die to the package.
883 */
884 x86_package_add_die(pkg, die);
2d21ac55 885 }
593a1d5f 886 } while (die == NULL);
0c530ab8 887
593a1d5f
A
888 /*
889 * Get the core for this logical CPU.
890 */
891 do {
892 core = x86_core_find(cpu);
893 if (core == NULL) {
894 /*
895 * Allocate the core structure now.
896 */
897 simple_unlock(&x86_topo_lock);
898 core = x86_core_alloc(cpu);
899 simple_lock(&x86_topo_lock);
900 if (x86_core_find(cpu) != NULL) {
901 x86_core_free(core);
902 continue;
903 }
904
905 /*
906 * Add the core to the die & package.
907 */
908 x86_die_add_core(die, core);
909 x86_package_add_core(pkg, core);
910 machine_info.physical_cpu_max += 1;
911 }
912 } while (core == NULL);
0c530ab8 913
2d21ac55
A
914
915 /*
916 * Done manipulating the topology, so others can get in.
917 */
918 machine_info.logical_cpu_max += 1;
919 simple_unlock(&x86_topo_lock);
0c530ab8 920
593a1d5f
A
921 /*
922 * Add the logical CPU to the other topology structures.
923 */
2d21ac55 924 x86_core_add_lcpu(core, &cpup->lcpu);
593a1d5f
A
925 x86_die_add_lcpu(core->die, &cpup->lcpu);
926 x86_package_add_lcpu(core->package, &cpup->lcpu);
927 x86_lcpu_add_caches(&cpup->lcpu);
4452a7af 928
2d21ac55 929 return (void *) core;
0c530ab8
A
930}
931
932void
933cpu_thread_init(void)
934{
593a1d5f
A
935 int my_cpu = get_cpu_number();
936 cpu_data_t *cpup = current_cpu_datap();
2d21ac55 937 x86_core_t *core;
593a1d5f 938 static int initialized = 0;
2d21ac55
A
939
940 /*
941 * If we're the boot processor, we do all of the initialization of
942 * the CPU topology infrastructure.
943 */
944 if (my_cpu == master_cpu && !initialized) {
945 simple_lock_init(&x86_topo_lock, 0);
0c530ab8
A
946
947 /*
2d21ac55 948 * Put this logical CPU into the physical CPU topology.
0c530ab8 949 */
2d21ac55
A
950 cpup->lcpu.core = cpu_thread_alloc(my_cpu);
951
952 initialized = 1;
953 }
0c530ab8 954
2d21ac55
A
955 /*
956 * Do the CPU accounting.
957 */
958 core = cpup->lcpu.core;
959 simple_lock(&x86_topo_lock);
960 machine_info.logical_cpu += 1;
961 if (core->active_lcpus == 0)
962 machine_info.physical_cpu += 1;
963 core->active_lcpus += 1;
2d21ac55 964 simple_unlock(&x86_topo_lock);
91447636 965
2d21ac55
A
966 pmCPUMarkRunning(cpup);
967 etimer_resync_deadlines();
91447636
A
968}
969
970/*
971 * Called for a cpu to halt permanently
972 * (as opposed to halting and expecting an interrupt to awaken it).
973 */
974void
975cpu_thread_halt(void)
976{
2d21ac55
A
977 x86_core_t *core;
978 cpu_data_t *cpup = current_cpu_datap();
91447636 979
2d21ac55
A
980 simple_lock(&x86_topo_lock);
981 machine_info.logical_cpu -= 1;
2d21ac55
A
982 core = cpup->lcpu.core;
983 core->active_lcpus -= 1;
984 if (core->active_lcpus == 0)
985 machine_info.physical_cpu -= 1;
986 simple_unlock(&x86_topo_lock);
91447636 987
2d21ac55
A
988 /*
989 * Let the power management code determine the best way to "stop"
990 * the processor.
991 */
992 ml_set_interrupts_enabled(FALSE);
993 while (1) {
994 pmCPUHalt(PM_HALT_NORMAL);
995 }
996 /* NOT REACHED */
91447636 997}
593a1d5f 998
c910b4d9
A
999/*
1000 * Validates that the topology was built correctly. Must be called only
1001 * after the complete topology is built and no other changes are being made.
1002 */
1003void
1004validate_topology(void)
1005{
1006 x86_pkg_t *pkg;
1007 x86_die_t *die;
1008 x86_core_t *core;
1009 x86_lcpu_t *lcpu;
1010 uint32_t nDies;
1011 uint32_t nCores;
1012 uint32_t nCPUs;
1013
7ddcb079
A
1014 if (topo_dbg)
1015 debug_topology_print();
1016
c910b4d9
A
1017 /*
1018 * XXX
1019 *
1020 * Right now this only works if the number of CPUs started is the total
1021 * number of CPUs. However, when specifying cpus=n the topology is only
1022 * partially constructed and the checks below will fail.
1023 *
1024 * We should *always* build the complete topology and only start the CPUs
1025 * indicated by cpus=n. Until that happens, this code will not check the
1026 * topology if the number of cpus defined is < that described the the
1027 * topology parameters.
1028 */
1029 nCPUs = topoParms.nPackages * topoParms.nLThreadsPerPackage;
1030 if (nCPUs > real_ncpus)
1031 return;
1032
1033 pkg = x86_pkgs;
1034 while (pkg != NULL) {
1035 /*
1036 * Make sure that the package has the correct number of dies.
1037 */
1038 nDies = 0;
1039 die = pkg->dies;
1040 while (die != NULL) {
1041 if (die->package == NULL)
1042 panic("Die(%d)->package is NULL",
1043 die->pdie_num);
1044 if (die->package != pkg)
1045 panic("Die %d points to package %d, should be %d",
1046 die->pdie_num, die->package->lpkg_num, pkg->lpkg_num);
1047
7ddcb079 1048 TOPO_DBG("Die(%d)->package %d\n",
c910b4d9
A
1049 die->pdie_num, pkg->lpkg_num);
1050
1051 /*
1052 * Make sure that the die has the correct number of cores.
1053 */
7ddcb079 1054 TOPO_DBG("Die(%d)->cores: ", die->pdie_num);
c910b4d9
A
1055 nCores = 0;
1056 core = die->cores;
1057 while (core != NULL) {
1058 if (core->die == NULL)
1059 panic("Core(%d)->die is NULL",
1060 core->pcore_num);
1061 if (core->die != die)
1062 panic("Core %d points to die %d, should be %d",
1063 core->pcore_num, core->die->pdie_num, die->pdie_num);
1064 nCores += 1;
7ddcb079 1065 TOPO_DBG("%d ", core->pcore_num);
c910b4d9
A
1066 core = core->next_in_die;
1067 }
7ddcb079 1068 TOPO_DBG("\n");
c910b4d9
A
1069
1070 if (nCores != topoParms.nLCoresPerDie)
1071 panic("Should have %d Cores, but only found %d for Die %d",
1072 topoParms.nLCoresPerDie, nCores, die->pdie_num);
1073
1074 /*
1075 * Make sure that the die has the correct number of CPUs.
1076 */
7ddcb079 1077 TOPO_DBG("Die(%d)->lcpus: ", die->pdie_num);
c910b4d9
A
1078 nCPUs = 0;
1079 lcpu = die->lcpus;
1080 while (lcpu != NULL) {
1081 if (lcpu->die == NULL)
1082 panic("CPU(%d)->die is NULL",
1083 lcpu->cpu_num);
1084 if (lcpu->die != die)
1085 panic("CPU %d points to die %d, should be %d",
1086 lcpu->cpu_num, lcpu->die->pdie_num, die->pdie_num);
1087 nCPUs += 1;
7ddcb079 1088 TOPO_DBG("%d ", lcpu->cpu_num);
c910b4d9
A
1089 lcpu = lcpu->next_in_die;
1090 }
7ddcb079 1091 TOPO_DBG("\n");
c910b4d9
A
1092
1093 if (nCPUs != topoParms.nLThreadsPerDie)
1094 panic("Should have %d Threads, but only found %d for Die %d",
1095 topoParms.nLThreadsPerDie, nCPUs, die->pdie_num);
1096
1097 nDies += 1;
1098 die = die->next_in_pkg;
1099 }
1100
1101 if (nDies != topoParms.nLDiesPerPackage)
1102 panic("Should have %d Dies, but only found %d for package %d",
1103 topoParms.nLDiesPerPackage, nDies, pkg->lpkg_num);
1104
1105 /*
1106 * Make sure that the package has the correct number of cores.
1107 */
1108 nCores = 0;
1109 core = pkg->cores;
1110 while (core != NULL) {
1111 if (core->package == NULL)
1112 panic("Core(%d)->package is NULL",
1113 core->pcore_num);
1114 if (core->package != pkg)
1115 panic("Core %d points to package %d, should be %d",
1116 core->pcore_num, core->package->lpkg_num, pkg->lpkg_num);
7ddcb079 1117 TOPO_DBG("Core(%d)->package %d\n",
c910b4d9
A
1118 core->pcore_num, pkg->lpkg_num);
1119
1120 /*
1121 * Make sure that the core has the correct number of CPUs.
1122 */
1123 nCPUs = 0;
1124 lcpu = core->lcpus;
7ddcb079 1125 TOPO_DBG("Core(%d)->lcpus: ", core->pcore_num);
c910b4d9
A
1126 while (lcpu != NULL) {
1127 if (lcpu->core == NULL)
1128 panic("CPU(%d)->core is NULL",
1129 lcpu->cpu_num);
1130 if (lcpu->core != core)
1131 panic("CPU %d points to core %d, should be %d",
1132 lcpu->cpu_num, lcpu->core->pcore_num, core->pcore_num);
7ddcb079 1133 TOPO_DBG("%d ", lcpu->cpu_num);
c910b4d9
A
1134 nCPUs += 1;
1135 lcpu = lcpu->next_in_core;
1136 }
7ddcb079 1137 TOPO_DBG("\n");
c910b4d9
A
1138
1139 if (nCPUs != topoParms.nLThreadsPerCore)
1140 panic("Should have %d Threads, but only found %d for Core %d",
1141 topoParms.nLThreadsPerCore, nCPUs, core->pcore_num);
1142 nCores += 1;
1143 core = core->next_in_pkg;
1144 }
1145
1146 if (nCores != topoParms.nLCoresPerPackage)
1147 panic("Should have %d Cores, but only found %d for package %d",
1148 topoParms.nLCoresPerPackage, nCores, pkg->lpkg_num);
1149
1150 /*
1151 * Make sure that the package has the correct number of CPUs.
1152 */
1153 nCPUs = 0;
1154 lcpu = pkg->lcpus;
1155 while (lcpu != NULL) {
1156 if (lcpu->package == NULL)
1157 panic("CPU(%d)->package is NULL",
1158 lcpu->cpu_num);
1159 if (lcpu->package != pkg)
1160 panic("CPU %d points to package %d, should be %d",
1161 lcpu->cpu_num, lcpu->package->lpkg_num, pkg->lpkg_num);
7ddcb079 1162 TOPO_DBG("CPU(%d)->package %d\n",
c910b4d9
A
1163 lcpu->cpu_num, pkg->lpkg_num);
1164 nCPUs += 1;
1165 lcpu = lcpu->next_in_pkg;
1166 }
1167
1168 if (nCPUs != topoParms.nLThreadsPerPackage)
1169 panic("Should have %d Threads, but only found %d for package %d",
1170 topoParms.nLThreadsPerPackage, nCPUs, pkg->lpkg_num);
1171
1172 pkg = pkg->next;
1173 }
1174}
1175
593a1d5f
A
1176/*
1177 * Prints out the topology
1178 */
7ddcb079 1179static void
593a1d5f
A
1180debug_topology_print(void)
1181{
1182 x86_pkg_t *pkg;
1183 x86_die_t *die;
1184 x86_core_t *core;
1185 x86_lcpu_t *cpu;
1186
1187 pkg = x86_pkgs;
1188 while (pkg != NULL) {
1189 kprintf("Package:\n");
1190 kprintf(" Physical: %d\n", pkg->ppkg_num);
1191 kprintf(" Logical: %d\n", pkg->lpkg_num);
1192
1193 die = pkg->dies;
1194 while (die != NULL) {
1195 kprintf(" Die:\n");
1196 kprintf(" Physical: %d\n", die->pdie_num);
1197 kprintf(" Logical: %d\n", die->ldie_num);
1198
1199 core = die->cores;
1200 while (core != NULL) {
1201 kprintf(" Core:\n");
1202 kprintf(" Physical: %d\n", core->pcore_num);
1203 kprintf(" Logical: %d\n", core->lcore_num);
1204
1205 cpu = core->lcpus;
1206 while (cpu != NULL) {
1207 kprintf(" LCPU:\n");
1208 kprintf(" CPU #: %d\n", cpu->cpu_num);
1209 kprintf(" Physical: %d\n", cpu->pnum);
1210 kprintf(" Logical: %d\n", cpu->lnum);
1211 kprintf(" Flags: ");
1212 if (cpu->master)
1213 kprintf("MASTER ");
1214 if (cpu->primary)
1215 kprintf("PRIMARY");
1216 if (!cpu->master && !cpu->primary)
1217 kprintf("(NONE)");
1218 kprintf("\n");
1219
1220 cpu = cpu->next_in_core;
1221 }
1222
1223 core = core->next_in_die;
1224 }
1225
1226 die = die->next_in_pkg;
1227 }
1228
1229 pkg = pkg->next;
1230 }
1231}