]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_threads.c
xnu-3247.1.106.tar.gz
[apple/xnu.git] / osfmk / i386 / cpu_threads.c
1 /*
2 * Copyright (c) 2003-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <vm/vm_kern.h>
29 #include <kern/kalloc.h>
30 #include <kern/timer_queue.h>
31 #include <mach/machine.h>
32 #include <i386/cpu_threads.h>
33 #include <i386/cpuid.h>
34 #include <i386/machine_cpu.h>
35 #include <i386/pmCPU.h>
36 #include <i386/bit_routines.h>
37
38 #define DIVISOR_GUARD(denom) \
39 if ((denom) == 0) { \
40 kprintf("%s: %d Zero divisor: " #denom, \
41 __FILE__, __LINE__); \
42 }
43
44 static void debug_topology_print(void);
45
46 boolean_t topo_dbg = FALSE;
47
48 x86_pkg_t *x86_pkgs = NULL;
49 uint32_t num_Lx_caches[MAX_CACHE_DEPTH] = { 0 };
50
51 static x86_pkg_t *free_pkgs = NULL;
52 static x86_die_t *free_dies = NULL;
53 static x86_core_t *free_cores = NULL;
54 static uint32_t num_dies = 0;
55
56 static x86_cpu_cache_t *x86_caches = NULL;
57 static uint32_t num_caches = 0;
58
59 static boolean_t topoParmsInited = FALSE;
60 x86_topology_parameters_t topoParms;
61
62 decl_simple_lock_data(, x86_topo_lock);
63
64 static struct cpu_cache {
65 int level; int type;
66 } cpu_caches [LCACHE_MAX] = {
67 [L1D] = { 1, CPU_CACHE_TYPE_DATA },
68 [L1I] = { 1, CPU_CACHE_TYPE_INST },
69 [L2U] = { 2, CPU_CACHE_TYPE_UNIF },
70 [L3U] = { 3, CPU_CACHE_TYPE_UNIF },
71 };
72
73 static boolean_t
74 cpu_is_hyperthreaded(void)
75 {
76 i386_cpu_info_t *cpuinfo;
77
78 cpuinfo = cpuid_info();
79 return(cpuinfo->thread_count > cpuinfo->core_count);
80 }
81
82 static x86_cpu_cache_t *
83 x86_cache_alloc(void)
84 {
85 x86_cpu_cache_t *cache;
86 int i;
87
88 if (x86_caches == NULL) {
89 cache = kalloc(sizeof(x86_cpu_cache_t) + (MAX_CPUS * sizeof(x86_lcpu_t *)));
90 if (cache == NULL)
91 return(NULL);
92 } else {
93 cache = x86_caches;
94 x86_caches = cache->next;
95 cache->next = NULL;
96 }
97
98 bzero(cache, sizeof(x86_cpu_cache_t));
99 cache->next = NULL;
100 cache->maxcpus = MAX_CPUS;
101 for (i = 0; i < cache->maxcpus; i += 1) {
102 cache->cpus[i] = NULL;
103 }
104
105 num_caches += 1;
106
107 return(cache);
108 }
109
110 static void
111 x86_LLC_info(void)
112 {
113 int cache_level = 0;
114 uint32_t nCPUsSharing = 1;
115 i386_cpu_info_t *cpuinfo;
116 struct cpu_cache *cachep;
117 int i;
118
119 cpuinfo = cpuid_info();
120
121 for (i = 0, cachep = &cpu_caches[0]; i < LCACHE_MAX; i++, cachep++) {
122
123 if (cachep->type == 0 || cpuid_info()->cache_size[i] == 0)
124 continue;
125
126 /*
127 * Only worry about it if it's a deeper level than
128 * what we've seen before.
129 */
130 if (cachep->level > cache_level) {
131 cache_level = cachep->level;
132
133 /*
134 * Save the number of CPUs sharing this cache.
135 */
136 nCPUsSharing = cpuinfo->cache_sharing[i];
137 }
138 }
139
140 /*
141 * Make the level of the LLC be 0 based.
142 */
143 topoParms.LLCDepth = cache_level - 1;
144
145 /*
146 * nCPUsSharing represents the *maximum* number of cores or
147 * logical CPUs sharing the cache.
148 */
149 topoParms.maxSharingLLC = nCPUsSharing;
150
151 topoParms.nCoresSharingLLC = nCPUsSharing / (cpuinfo->thread_count /
152 cpuinfo->core_count);
153 topoParms.nLCPUsSharingLLC = nCPUsSharing;
154
155 /*
156 * nCPUsSharing may not be the number of *active* cores or
157 * threads that are sharing the cache.
158 */
159 if (nCPUsSharing > cpuinfo->core_count)
160 topoParms.nCoresSharingLLC = cpuinfo->core_count;
161 if (nCPUsSharing > cpuinfo->thread_count)
162 topoParms.nLCPUsSharingLLC = cpuinfo->thread_count;
163 }
164
165 static void
166 initTopoParms(void)
167 {
168 i386_cpu_info_t *cpuinfo;
169
170 topoParms.stable = FALSE;
171
172 cpuinfo = cpuid_info();
173
174 PE_parse_boot_argn("-topo", &topo_dbg, sizeof(topo_dbg));
175
176 /*
177 * We need to start with getting the LLC information correct.
178 */
179 x86_LLC_info();
180
181 /*
182 * Compute the number of threads (logical CPUs) per core.
183 */
184 DIVISOR_GUARD(cpuinfo->core_count);
185 topoParms.nLThreadsPerCore = cpuinfo->thread_count / cpuinfo->core_count;
186 DIVISOR_GUARD(cpuinfo->cpuid_cores_per_package);
187 topoParms.nPThreadsPerCore = cpuinfo->cpuid_logical_per_package / cpuinfo->cpuid_cores_per_package;
188
189 /*
190 * Compute the number of dies per package.
191 */
192 DIVISOR_GUARD(topoParms.nCoresSharingLLC);
193 topoParms.nLDiesPerPackage = cpuinfo->core_count / topoParms.nCoresSharingLLC;
194 DIVISOR_GUARD(topoParms.nPThreadsPerCore);
195 DIVISOR_GUARD(topoParms.maxSharingLLC / topoParms.nPThreadsPerCore);
196 topoParms.nPDiesPerPackage = cpuinfo->cpuid_cores_per_package / (topoParms.maxSharingLLC / topoParms.nPThreadsPerCore);
197
198
199 /*
200 * Compute the number of cores per die.
201 */
202 topoParms.nLCoresPerDie = topoParms.nCoresSharingLLC;
203 topoParms.nPCoresPerDie = (topoParms.maxSharingLLC / topoParms.nPThreadsPerCore);
204
205 /*
206 * Compute the number of threads per die.
207 */
208 topoParms.nLThreadsPerDie = topoParms.nLThreadsPerCore * topoParms.nLCoresPerDie;
209 topoParms.nPThreadsPerDie = topoParms.nPThreadsPerCore * topoParms.nPCoresPerDie;
210
211 /*
212 * Compute the number of cores per package.
213 */
214 topoParms.nLCoresPerPackage = topoParms.nLCoresPerDie * topoParms.nLDiesPerPackage;
215 topoParms.nPCoresPerPackage = topoParms.nPCoresPerDie * topoParms.nPDiesPerPackage;
216
217 /*
218 * Compute the number of threads per package.
219 */
220 topoParms.nLThreadsPerPackage = topoParms.nLThreadsPerCore * topoParms.nLCoresPerPackage;
221 topoParms.nPThreadsPerPackage = topoParms.nPThreadsPerCore * topoParms.nPCoresPerPackage;
222
223 TOPO_DBG("\nCache Topology Parameters:\n");
224 TOPO_DBG("\tLLC Depth: %d\n", topoParms.LLCDepth);
225 TOPO_DBG("\tCores Sharing LLC: %d\n", topoParms.nCoresSharingLLC);
226 TOPO_DBG("\tThreads Sharing LLC: %d\n", topoParms.nLCPUsSharingLLC);
227 TOPO_DBG("\tmax Sharing of LLC: %d\n", topoParms.maxSharingLLC);
228
229 TOPO_DBG("\nLogical Topology Parameters:\n");
230 TOPO_DBG("\tThreads per Core: %d\n", topoParms.nLThreadsPerCore);
231 TOPO_DBG("\tCores per Die: %d\n", topoParms.nLCoresPerDie);
232 TOPO_DBG("\tThreads per Die: %d\n", topoParms.nLThreadsPerDie);
233 TOPO_DBG("\tDies per Package: %d\n", topoParms.nLDiesPerPackage);
234 TOPO_DBG("\tCores per Package: %d\n", topoParms.nLCoresPerPackage);
235 TOPO_DBG("\tThreads per Package: %d\n", topoParms.nLThreadsPerPackage);
236
237 TOPO_DBG("\nPhysical Topology Parameters:\n");
238 TOPO_DBG("\tThreads per Core: %d\n", topoParms.nPThreadsPerCore);
239 TOPO_DBG("\tCores per Die: %d\n", topoParms.nPCoresPerDie);
240 TOPO_DBG("\tThreads per Die: %d\n", topoParms.nPThreadsPerDie);
241 TOPO_DBG("\tDies per Package: %d\n", topoParms.nPDiesPerPackage);
242 TOPO_DBG("\tCores per Package: %d\n", topoParms.nPCoresPerPackage);
243 TOPO_DBG("\tThreads per Package: %d\n", topoParms.nPThreadsPerPackage);
244
245 topoParmsInited = TRUE;
246 }
247
248 static void
249 x86_cache_free(x86_cpu_cache_t *cache)
250 {
251 num_caches -= 1;
252 if (cache->level > 0 && cache->level <= MAX_CACHE_DEPTH)
253 num_Lx_caches[cache->level - 1] -= 1;
254 cache->next = x86_caches;
255 x86_caches = cache;
256 }
257
258 /*
259 * This returns a list of cache structures that represent the
260 * caches for a CPU. Some of the structures may have to be
261 * "freed" if they are actually shared between CPUs.
262 */
263 static x86_cpu_cache_t *
264 x86_cache_list(void)
265 {
266 x86_cpu_cache_t *root = NULL;
267 x86_cpu_cache_t *cur = NULL;
268 x86_cpu_cache_t *last = NULL;
269 struct cpu_cache *cachep;
270 int i;
271
272 /*
273 * Cons up a list driven not by CPUID leaf 4 (deterministic cache params)
274 * but by the table above plus parameters already cracked from cpuid...
275 */
276 for (i = 0, cachep = &cpu_caches[0]; i < LCACHE_MAX; i++, cachep++) {
277
278 if (cachep->type == 0 || cpuid_info()->cache_size[i] == 0)
279 continue;
280
281 cur = x86_cache_alloc();
282 if (cur == NULL)
283 break;
284
285 cur->type = cachep->type;
286 cur->level = cachep->level;
287 cur->nlcpus = 0;
288 cur->maxcpus = cpuid_info()->cache_sharing[i];
289 cur->partitions = cpuid_info()->cache_partitions[i];
290 cur->cache_size = cpuid_info()->cache_size[i];
291 cur->line_size = cpuid_info()->cache_linesize;
292
293 if (last == NULL) {
294 root = cur;
295 last = cur;
296 } else {
297 last->next = cur;
298 last = cur;
299 }
300 num_Lx_caches[cur->level - 1] += 1;
301 }
302 return root;
303 }
304
305
306 static x86_cpu_cache_t *
307 x86_match_cache(x86_cpu_cache_t *list, x86_cpu_cache_t *matcher)
308 {
309 x86_cpu_cache_t *cur_cache;
310
311 cur_cache = list;
312 while (cur_cache != NULL) {
313 if (cur_cache->maxcpus == matcher->maxcpus
314 && cur_cache->type == matcher->type
315 && cur_cache->level == matcher->level
316 && cur_cache->partitions == matcher->partitions
317 && cur_cache->line_size == matcher->line_size
318 && cur_cache->cache_size == matcher->cache_size)
319 break;
320
321 cur_cache = cur_cache->next;
322 }
323
324 return(cur_cache);
325 }
326
327 static void
328 x86_lcpu_init(int cpu)
329 {
330 cpu_data_t *cpup;
331 x86_lcpu_t *lcpu;
332 int i;
333
334 cpup = cpu_datap(cpu);
335
336 lcpu = &cpup->lcpu;
337 lcpu->lcpu = lcpu;
338 lcpu->cpu = cpup;
339 lcpu->next_in_core = NULL;
340 lcpu->next_in_die = NULL;
341 lcpu->next_in_pkg = NULL;
342 lcpu->core = NULL;
343 lcpu->die = NULL;
344 lcpu->package = NULL;
345 lcpu->cpu_num = cpu;
346 lcpu->lnum = cpu;
347 lcpu->pnum = cpup->cpu_phys_number;
348 lcpu->state = LCPU_OFF;
349 for (i = 0; i < MAX_CACHE_DEPTH; i += 1)
350 lcpu->caches[i] = NULL;
351 }
352
353 static x86_core_t *
354 x86_core_alloc(int cpu)
355 {
356 x86_core_t *core;
357 cpu_data_t *cpup;
358
359 cpup = cpu_datap(cpu);
360
361 simple_lock(&x86_topo_lock);
362 if (free_cores != NULL) {
363 core = free_cores;
364 free_cores = core->next_in_die;
365 core->next_in_die = NULL;
366 simple_unlock(&x86_topo_lock);
367 } else {
368 simple_unlock(&x86_topo_lock);
369 core = kalloc(sizeof(x86_core_t));
370 if (core == NULL)
371 panic("x86_core_alloc() kalloc of x86_core_t failed!\n");
372 }
373
374 bzero((void *) core, sizeof(x86_core_t));
375
376 core->pcore_num = cpup->cpu_phys_number / topoParms.nPThreadsPerCore;
377 core->lcore_num = core->pcore_num % topoParms.nPCoresPerPackage;
378
379 core->flags = X86CORE_FL_PRESENT | X86CORE_FL_READY
380 | X86CORE_FL_HALTED | X86CORE_FL_IDLE;
381
382 return(core);
383 }
384
385 static void
386 x86_core_free(x86_core_t *core)
387 {
388 simple_lock(&x86_topo_lock);
389 core->next_in_die = free_cores;
390 free_cores = core;
391 simple_unlock(&x86_topo_lock);
392 }
393
394 static x86_pkg_t *
395 x86_package_find(int cpu)
396 {
397 x86_pkg_t *pkg;
398 cpu_data_t *cpup;
399 uint32_t pkg_num;
400
401 cpup = cpu_datap(cpu);
402
403 pkg_num = cpup->cpu_phys_number / topoParms.nPThreadsPerPackage;
404
405 pkg = x86_pkgs;
406 while (pkg != NULL) {
407 if (pkg->ppkg_num == pkg_num)
408 break;
409 pkg = pkg->next;
410 }
411
412 return(pkg);
413 }
414
415 static x86_die_t *
416 x86_die_find(int cpu)
417 {
418 x86_die_t *die;
419 x86_pkg_t *pkg;
420 cpu_data_t *cpup;
421 uint32_t die_num;
422
423 cpup = cpu_datap(cpu);
424
425 die_num = cpup->cpu_phys_number / topoParms.nPThreadsPerDie;
426
427 pkg = x86_package_find(cpu);
428 if (pkg == NULL)
429 return(NULL);
430
431 die = pkg->dies;
432 while (die != NULL) {
433 if (die->pdie_num == die_num)
434 break;
435 die = die->next_in_pkg;
436 }
437
438 return(die);
439 }
440
441 static x86_core_t *
442 x86_core_find(int cpu)
443 {
444 x86_core_t *core;
445 x86_die_t *die;
446 cpu_data_t *cpup;
447 uint32_t core_num;
448
449 cpup = cpu_datap(cpu);
450
451 core_num = cpup->cpu_phys_number / topoParms.nPThreadsPerCore;
452
453 die = x86_die_find(cpu);
454 if (die == NULL)
455 return(NULL);
456
457 core = die->cores;
458 while (core != NULL) {
459 if (core->pcore_num == core_num)
460 break;
461 core = core->next_in_die;
462 }
463
464 return(core);
465 }
466
467 void
468 x86_set_logical_topology(x86_lcpu_t *lcpu, int pnum, int lnum)
469 {
470 x86_core_t *core = lcpu->core;
471 x86_die_t *die = lcpu->die;
472 x86_pkg_t *pkg = lcpu->package;
473
474 assert(core != NULL);
475 assert(die != NULL);
476 assert(pkg != NULL);
477
478 lcpu->cpu_num = lnum;
479 lcpu->pnum = pnum;
480 lcpu->master = (lnum == master_cpu);
481 lcpu->primary = (lnum % topoParms.nLThreadsPerPackage) == 0;
482
483 lcpu->lnum = lnum % topoParms.nLThreadsPerCore;
484
485 core->pcore_num = lnum / topoParms.nLThreadsPerCore;
486 core->lcore_num = core->pcore_num % topoParms.nLCoresPerDie;
487
488 die->pdie_num = lnum / (topoParms.nLThreadsPerCore*topoParms.nLCoresPerDie);
489 die->ldie_num = die->pdie_num % topoParms.nLDiesPerPackage;
490
491 pkg->ppkg_num = lnum / topoParms.nLThreadsPerPackage;
492 pkg->lpkg_num = pkg->ppkg_num;
493
494 }
495
496 static x86_die_t *
497 x86_die_alloc(int cpu)
498 {
499 x86_die_t *die;
500 cpu_data_t *cpup;
501
502 cpup = cpu_datap(cpu);
503
504 simple_lock(&x86_topo_lock);
505 if (free_dies != NULL) {
506 die = free_dies;
507 free_dies = die->next_in_pkg;
508 die->next_in_pkg = NULL;
509 simple_unlock(&x86_topo_lock);
510 } else {
511 simple_unlock(&x86_topo_lock);
512 die = kalloc(sizeof(x86_die_t));
513 if (die == NULL)
514 panic("x86_die_alloc() kalloc of x86_die_t failed!\n");
515 }
516
517 bzero((void *) die, sizeof(x86_die_t));
518
519 die->pdie_num = cpup->cpu_phys_number / topoParms.nPThreadsPerDie;
520
521 die->ldie_num = num_dies;
522 atomic_incl((long *) &num_dies, 1);
523
524 die->flags = X86DIE_FL_PRESENT;
525 return(die);
526 }
527
528 static void
529 x86_die_free(x86_die_t *die)
530 {
531 simple_lock(&x86_topo_lock);
532 die->next_in_pkg = free_dies;
533 free_dies = die;
534 atomic_decl((long *) &num_dies, 1);
535 simple_unlock(&x86_topo_lock);
536 }
537
538 static x86_pkg_t *
539 x86_package_alloc(int cpu)
540 {
541 x86_pkg_t *pkg;
542 cpu_data_t *cpup;
543
544 cpup = cpu_datap(cpu);
545
546 simple_lock(&x86_topo_lock);
547 if (free_pkgs != NULL) {
548 pkg = free_pkgs;
549 free_pkgs = pkg->next;
550 pkg->next = NULL;
551 simple_unlock(&x86_topo_lock);
552 } else {
553 simple_unlock(&x86_topo_lock);
554 pkg = kalloc(sizeof(x86_pkg_t));
555 if (pkg == NULL)
556 panic("x86_package_alloc() kalloc of x86_pkg_t failed!\n");
557 }
558
559 bzero((void *) pkg, sizeof(x86_pkg_t));
560
561 pkg->ppkg_num = cpup->cpu_phys_number / topoParms.nPThreadsPerPackage;
562
563 pkg->lpkg_num = topoParms.nPackages;
564 atomic_incl((long *) &topoParms.nPackages, 1);
565
566 pkg->flags = X86PKG_FL_PRESENT | X86PKG_FL_READY;
567 return(pkg);
568 }
569
570 static void
571 x86_package_free(x86_pkg_t *pkg)
572 {
573 simple_lock(&x86_topo_lock);
574 pkg->next = free_pkgs;
575 free_pkgs = pkg;
576 atomic_decl((long *) &topoParms.nPackages, 1);
577 simple_unlock(&x86_topo_lock);
578 }
579
580 static void
581 x86_cache_add_lcpu(x86_cpu_cache_t *cache, x86_lcpu_t *lcpu)
582 {
583 x86_cpu_cache_t *cur_cache;
584 int i;
585
586 /*
587 * Put the new CPU into the list of the cache.
588 */
589 cur_cache = lcpu->caches[cache->level - 1];
590 lcpu->caches[cache->level - 1] = cache;
591 cache->next = cur_cache;
592 cache->nlcpus += 1;
593 for (i = 0; i < cache->nlcpus; i += 1) {
594 if (cache->cpus[i] == NULL) {
595 cache->cpus[i] = lcpu;
596 break;
597 }
598 }
599 }
600
601 static void
602 x86_lcpu_add_caches(x86_lcpu_t *lcpu)
603 {
604 x86_cpu_cache_t *list;
605 x86_cpu_cache_t *cur;
606 x86_cpu_cache_t *match;
607 x86_die_t *die;
608 x86_core_t *core;
609 x86_lcpu_t *cur_lcpu;
610 uint32_t level;
611 boolean_t found = FALSE;
612
613 assert(lcpu != NULL);
614
615 /*
616 * Add the cache data to the topology.
617 */
618 list = x86_cache_list();
619
620 simple_lock(&x86_topo_lock);
621
622 while (list != NULL) {
623 /*
624 * Remove the cache from the front of the list.
625 */
626 cur = list;
627 list = cur->next;
628 cur->next = NULL;
629 level = cur->level - 1;
630
631 /*
632 * If the cache isn't shared then just put it where it
633 * belongs.
634 */
635 if (cur->maxcpus == 1) {
636 x86_cache_add_lcpu(cur, lcpu);
637 continue;
638 }
639
640 /*
641 * We'll assume that all of the caches at a particular level
642 * have the same sharing. So if we have a cache already at
643 * this level, we'll just skip looking for the match.
644 */
645 if (lcpu->caches[level] != NULL) {
646 x86_cache_free(cur);
647 continue;
648 }
649
650 /*
651 * This is a shared cache, so we have to figure out if
652 * this is the first time we've seen this cache. We do
653 * this by searching through the topology and seeing if
654 * this cache is already described.
655 *
656 * Assume that L{LLC-1} are all at the core level and that
657 * LLC is shared at the die level.
658 */
659 if (level < topoParms.LLCDepth) {
660 /*
661 * Shared at the core.
662 */
663 core = lcpu->core;
664 cur_lcpu = core->lcpus;
665 while (cur_lcpu != NULL) {
666 /*
667 * Skip ourselves.
668 */
669 if (cur_lcpu == lcpu) {
670 cur_lcpu = cur_lcpu->next_in_core;
671 continue;
672 }
673
674 /*
675 * If there's a cache on this logical CPU,
676 * then use that one.
677 */
678 match = x86_match_cache(cur_lcpu->caches[level], cur);
679 if (match != NULL) {
680 x86_cache_free(cur);
681 x86_cache_add_lcpu(match, lcpu);
682 found = TRUE;
683 break;
684 }
685
686 cur_lcpu = cur_lcpu->next_in_core;
687 }
688 } else {
689 /*
690 * Shared at the die.
691 */
692 die = lcpu->die;
693 cur_lcpu = die->lcpus;
694 while (cur_lcpu != NULL) {
695 /*
696 * Skip ourselves.
697 */
698 if (cur_lcpu == lcpu) {
699 cur_lcpu = cur_lcpu->next_in_die;
700 continue;
701 }
702
703 /*
704 * If there's a cache on this logical CPU,
705 * then use that one.
706 */
707 match = x86_match_cache(cur_lcpu->caches[level], cur);
708 if (match != NULL) {
709 x86_cache_free(cur);
710 x86_cache_add_lcpu(match, lcpu);
711 found = TRUE;
712 break;
713 }
714
715 cur_lcpu = cur_lcpu->next_in_die;
716 }
717 }
718
719 /*
720 * If a shared cache wasn't found, then this logical CPU must
721 * be the first one encountered.
722 */
723 if (!found) {
724 x86_cache_add_lcpu(cur, lcpu);
725 }
726 }
727
728 simple_unlock(&x86_topo_lock);
729 }
730
731 static void
732 x86_core_add_lcpu(x86_core_t *core, x86_lcpu_t *lcpu)
733 {
734 assert(core != NULL);
735 assert(lcpu != NULL);
736
737 simple_lock(&x86_topo_lock);
738
739 lcpu->next_in_core = core->lcpus;
740 lcpu->core = core;
741 core->lcpus = lcpu;
742 core->num_lcpus += 1;
743 simple_unlock(&x86_topo_lock);
744 }
745
746 static void
747 x86_die_add_lcpu(x86_die_t *die, x86_lcpu_t *lcpu)
748 {
749 assert(die != NULL);
750 assert(lcpu != NULL);
751
752 lcpu->next_in_die = die->lcpus;
753 lcpu->die = die;
754 die->lcpus = lcpu;
755 }
756
757 static void
758 x86_die_add_core(x86_die_t *die, x86_core_t *core)
759 {
760 assert(die != NULL);
761 assert(core != NULL);
762
763 core->next_in_die = die->cores;
764 core->die = die;
765 die->cores = core;
766 die->num_cores += 1;
767 }
768
769 static void
770 x86_package_add_lcpu(x86_pkg_t *pkg, x86_lcpu_t *lcpu)
771 {
772 assert(pkg != NULL);
773 assert(lcpu != NULL);
774
775 lcpu->next_in_pkg = pkg->lcpus;
776 lcpu->package = pkg;
777 pkg->lcpus = lcpu;
778 }
779
780 static void
781 x86_package_add_core(x86_pkg_t *pkg, x86_core_t *core)
782 {
783 assert(pkg != NULL);
784 assert(core != NULL);
785
786 core->next_in_pkg = pkg->cores;
787 core->package = pkg;
788 pkg->cores = core;
789 }
790
791 static void
792 x86_package_add_die(x86_pkg_t *pkg, x86_die_t *die)
793 {
794 assert(pkg != NULL);
795 assert(die != NULL);
796
797 die->next_in_pkg = pkg->dies;
798 die->package = pkg;
799 pkg->dies = die;
800 pkg->num_dies += 1;
801 }
802
803 void *
804 cpu_thread_alloc(int cpu)
805 {
806 x86_core_t *core = NULL;
807 x86_die_t *die = NULL;
808 x86_pkg_t *pkg = NULL;
809 cpu_data_t *cpup;
810 uint32_t phys_cpu;
811
812 /*
813 * Only allow one to manipulate the topology at a time.
814 */
815 simple_lock(&x86_topo_lock);
816
817 /*
818 * Make sure all of the topology parameters have been initialized.
819 */
820 if (!topoParmsInited)
821 initTopoParms();
822
823 cpup = cpu_datap(cpu);
824
825 phys_cpu = cpup->cpu_phys_number;
826
827 x86_lcpu_init(cpu);
828
829 /*
830 * Assume that all cpus have the same features.
831 */
832 if (cpu_is_hyperthreaded()) {
833 cpup->cpu_threadtype = CPU_THREADTYPE_INTEL_HTT;
834 } else {
835 cpup->cpu_threadtype = CPU_THREADTYPE_NONE;
836 }
837
838 /*
839 * Get the package that the logical CPU is in.
840 */
841 do {
842 pkg = x86_package_find(cpu);
843 if (pkg == NULL) {
844 /*
845 * Package structure hasn't been created yet, do it now.
846 */
847 simple_unlock(&x86_topo_lock);
848 pkg = x86_package_alloc(cpu);
849 simple_lock(&x86_topo_lock);
850 if (x86_package_find(cpu) != NULL) {
851 x86_package_free(pkg);
852 continue;
853 }
854
855 /*
856 * Add the new package to the global list of packages.
857 */
858 pkg->next = x86_pkgs;
859 x86_pkgs = pkg;
860 }
861 } while (pkg == NULL);
862
863 /*
864 * Get the die that the logical CPU is in.
865 */
866 do {
867 die = x86_die_find(cpu);
868 if (die == NULL) {
869 /*
870 * Die structure hasn't been created yet, do it now.
871 */
872 simple_unlock(&x86_topo_lock);
873 die = x86_die_alloc(cpu);
874 simple_lock(&x86_topo_lock);
875 if (x86_die_find(cpu) != NULL) {
876 x86_die_free(die);
877 continue;
878 }
879
880 /*
881 * Add the die to the package.
882 */
883 x86_package_add_die(pkg, die);
884 }
885 } while (die == NULL);
886
887 /*
888 * Get the core for this logical CPU.
889 */
890 do {
891 core = x86_core_find(cpu);
892 if (core == NULL) {
893 /*
894 * Allocate the core structure now.
895 */
896 simple_unlock(&x86_topo_lock);
897 core = x86_core_alloc(cpu);
898 simple_lock(&x86_topo_lock);
899 if (x86_core_find(cpu) != NULL) {
900 x86_core_free(core);
901 continue;
902 }
903
904 /*
905 * Add the core to the die & package.
906 */
907 x86_die_add_core(die, core);
908 x86_package_add_core(pkg, core);
909 machine_info.physical_cpu_max += 1;
910 }
911 } while (core == NULL);
912
913
914 /*
915 * Done manipulating the topology, so others can get in.
916 */
917 machine_info.logical_cpu_max += 1;
918 simple_unlock(&x86_topo_lock);
919
920 /*
921 * Add the logical CPU to the other topology structures.
922 */
923 x86_core_add_lcpu(core, &cpup->lcpu);
924 x86_die_add_lcpu(core->die, &cpup->lcpu);
925 x86_package_add_lcpu(core->package, &cpup->lcpu);
926 x86_lcpu_add_caches(&cpup->lcpu);
927
928 return (void *) core;
929 }
930
931 void
932 cpu_thread_init(void)
933 {
934 int my_cpu = get_cpu_number();
935 cpu_data_t *cpup = current_cpu_datap();
936 x86_core_t *core;
937 static int initialized = 0;
938
939 /*
940 * If we're the boot processor, we do all of the initialization of
941 * the CPU topology infrastructure.
942 */
943 if (my_cpu == master_cpu && !initialized) {
944 simple_lock_init(&x86_topo_lock, 0);
945
946 /*
947 * Put this logical CPU into the physical CPU topology.
948 */
949 cpup->lcpu.core = cpu_thread_alloc(my_cpu);
950
951 initialized = 1;
952 }
953
954 /*
955 * Do the CPU accounting.
956 */
957 core = cpup->lcpu.core;
958 simple_lock(&x86_topo_lock);
959 machine_info.logical_cpu += 1;
960 if (core->active_lcpus == 0)
961 machine_info.physical_cpu += 1;
962 core->active_lcpus += 1;
963 simple_unlock(&x86_topo_lock);
964
965 pmCPUMarkRunning(cpup);
966 timer_resync_deadlines();
967 }
968
969 /*
970 * Called for a cpu to halt permanently
971 * (as opposed to halting and expecting an interrupt to awaken it).
972 */
973 void
974 cpu_thread_halt(void)
975 {
976 x86_core_t *core;
977 cpu_data_t *cpup = current_cpu_datap();
978
979 simple_lock(&x86_topo_lock);
980 machine_info.logical_cpu -= 1;
981 core = cpup->lcpu.core;
982 core->active_lcpus -= 1;
983 if (core->active_lcpus == 0)
984 machine_info.physical_cpu -= 1;
985 simple_unlock(&x86_topo_lock);
986
987 /*
988 * Let the power management code determine the best way to "stop"
989 * the processor.
990 */
991 ml_set_interrupts_enabled(FALSE);
992 while (1) {
993 pmCPUHalt(PM_HALT_NORMAL);
994 }
995 /* NOT REACHED */
996 }
997
998 /*
999 * Validates that the topology was built correctly. Must be called only
1000 * after the complete topology is built and no other changes are being made.
1001 */
1002 void
1003 x86_validate_topology(void)
1004 {
1005 x86_pkg_t *pkg;
1006 x86_die_t *die;
1007 x86_core_t *core;
1008 x86_lcpu_t *lcpu;
1009 uint32_t nDies;
1010 uint32_t nCores;
1011 uint32_t nCPUs;
1012
1013 if (topo_dbg)
1014 debug_topology_print();
1015
1016 /*
1017 * XXX
1018 *
1019 * Right now this only works if the number of CPUs started is the total
1020 * number of CPUs. However, when specifying cpus=n the topology is only
1021 * partially constructed and the checks below will fail.
1022 *
1023 * We should *always* build the complete topology and only start the CPUs
1024 * indicated by cpus=n. Until that happens, this code will not check the
1025 * topology if the number of cpus defined is < that described the the
1026 * topology parameters.
1027 */
1028 nCPUs = topoParms.nPackages * topoParms.nLThreadsPerPackage;
1029 if (nCPUs > real_ncpus)
1030 return;
1031
1032 pkg = x86_pkgs;
1033 while (pkg != NULL) {
1034 /*
1035 * Make sure that the package has the correct number of dies.
1036 */
1037 nDies = 0;
1038 die = pkg->dies;
1039 while (die != NULL) {
1040 if (die->package == NULL)
1041 panic("Die(%d)->package is NULL",
1042 die->pdie_num);
1043 if (die->package != pkg)
1044 panic("Die %d points to package %d, should be %d",
1045 die->pdie_num, die->package->lpkg_num, pkg->lpkg_num);
1046
1047 TOPO_DBG("Die(%d)->package %d\n",
1048 die->pdie_num, pkg->lpkg_num);
1049
1050 /*
1051 * Make sure that the die has the correct number of cores.
1052 */
1053 TOPO_DBG("Die(%d)->cores: ", die->pdie_num);
1054 nCores = 0;
1055 core = die->cores;
1056 while (core != NULL) {
1057 if (core->die == NULL)
1058 panic("Core(%d)->die is NULL",
1059 core->pcore_num);
1060 if (core->die != die)
1061 panic("Core %d points to die %d, should be %d",
1062 core->pcore_num, core->die->pdie_num, die->pdie_num);
1063 nCores += 1;
1064 TOPO_DBG("%d ", core->pcore_num);
1065 core = core->next_in_die;
1066 }
1067 TOPO_DBG("\n");
1068
1069 if (nCores != topoParms.nLCoresPerDie)
1070 panic("Should have %d Cores, but only found %d for Die %d",
1071 topoParms.nLCoresPerDie, nCores, die->pdie_num);
1072
1073 /*
1074 * Make sure that the die has the correct number of CPUs.
1075 */
1076 TOPO_DBG("Die(%d)->lcpus: ", die->pdie_num);
1077 nCPUs = 0;
1078 lcpu = die->lcpus;
1079 while (lcpu != NULL) {
1080 if (lcpu->die == NULL)
1081 panic("CPU(%d)->die is NULL",
1082 lcpu->cpu_num);
1083 if (lcpu->die != die)
1084 panic("CPU %d points to die %d, should be %d",
1085 lcpu->cpu_num, lcpu->die->pdie_num, die->pdie_num);
1086 nCPUs += 1;
1087 TOPO_DBG("%d ", lcpu->cpu_num);
1088 lcpu = lcpu->next_in_die;
1089 }
1090 TOPO_DBG("\n");
1091
1092 if (nCPUs != topoParms.nLThreadsPerDie)
1093 panic("Should have %d Threads, but only found %d for Die %d",
1094 topoParms.nLThreadsPerDie, nCPUs, die->pdie_num);
1095
1096 nDies += 1;
1097 die = die->next_in_pkg;
1098 }
1099
1100 if (nDies != topoParms.nLDiesPerPackage)
1101 panic("Should have %d Dies, but only found %d for package %d",
1102 topoParms.nLDiesPerPackage, nDies, pkg->lpkg_num);
1103
1104 /*
1105 * Make sure that the package has the correct number of cores.
1106 */
1107 nCores = 0;
1108 core = pkg->cores;
1109 while (core != NULL) {
1110 if (core->package == NULL)
1111 panic("Core(%d)->package is NULL",
1112 core->pcore_num);
1113 if (core->package != pkg)
1114 panic("Core %d points to package %d, should be %d",
1115 core->pcore_num, core->package->lpkg_num, pkg->lpkg_num);
1116 TOPO_DBG("Core(%d)->package %d\n",
1117 core->pcore_num, pkg->lpkg_num);
1118
1119 /*
1120 * Make sure that the core has the correct number of CPUs.
1121 */
1122 nCPUs = 0;
1123 lcpu = core->lcpus;
1124 TOPO_DBG("Core(%d)->lcpus: ", core->pcore_num);
1125 while (lcpu != NULL) {
1126 if (lcpu->core == NULL)
1127 panic("CPU(%d)->core is NULL",
1128 lcpu->cpu_num);
1129 if (lcpu->core != core)
1130 panic("CPU %d points to core %d, should be %d",
1131 lcpu->cpu_num, lcpu->core->pcore_num, core->pcore_num);
1132 TOPO_DBG("%d ", lcpu->cpu_num);
1133 nCPUs += 1;
1134 lcpu = lcpu->next_in_core;
1135 }
1136 TOPO_DBG("\n");
1137
1138 if (nCPUs != topoParms.nLThreadsPerCore)
1139 panic("Should have %d Threads, but only found %d for Core %d",
1140 topoParms.nLThreadsPerCore, nCPUs, core->pcore_num);
1141 nCores += 1;
1142 core = core->next_in_pkg;
1143 }
1144
1145 if (nCores != topoParms.nLCoresPerPackage)
1146 panic("Should have %d Cores, but only found %d for package %d",
1147 topoParms.nLCoresPerPackage, nCores, pkg->lpkg_num);
1148
1149 /*
1150 * Make sure that the package has the correct number of CPUs.
1151 */
1152 nCPUs = 0;
1153 lcpu = pkg->lcpus;
1154 while (lcpu != NULL) {
1155 if (lcpu->package == NULL)
1156 panic("CPU(%d)->package is NULL",
1157 lcpu->cpu_num);
1158 if (lcpu->package != pkg)
1159 panic("CPU %d points to package %d, should be %d",
1160 lcpu->cpu_num, lcpu->package->lpkg_num, pkg->lpkg_num);
1161 TOPO_DBG("CPU(%d)->package %d\n",
1162 lcpu->cpu_num, pkg->lpkg_num);
1163 nCPUs += 1;
1164 lcpu = lcpu->next_in_pkg;
1165 }
1166
1167 if (nCPUs != topoParms.nLThreadsPerPackage)
1168 panic("Should have %d Threads, but only found %d for package %d",
1169 topoParms.nLThreadsPerPackage, nCPUs, pkg->lpkg_num);
1170
1171 pkg = pkg->next;
1172 }
1173 }
1174
1175 /*
1176 * Prints out the topology
1177 */
1178 static void
1179 debug_topology_print(void)
1180 {
1181 x86_pkg_t *pkg;
1182 x86_die_t *die;
1183 x86_core_t *core;
1184 x86_lcpu_t *cpu;
1185
1186 pkg = x86_pkgs;
1187 while (pkg != NULL) {
1188 kprintf("Package:\n");
1189 kprintf(" Physical: %d\n", pkg->ppkg_num);
1190 kprintf(" Logical: %d\n", pkg->lpkg_num);
1191
1192 die = pkg->dies;
1193 while (die != NULL) {
1194 kprintf(" Die:\n");
1195 kprintf(" Physical: %d\n", die->pdie_num);
1196 kprintf(" Logical: %d\n", die->ldie_num);
1197
1198 core = die->cores;
1199 while (core != NULL) {
1200 kprintf(" Core:\n");
1201 kprintf(" Physical: %d\n", core->pcore_num);
1202 kprintf(" Logical: %d\n", core->lcore_num);
1203
1204 cpu = core->lcpus;
1205 while (cpu != NULL) {
1206 kprintf(" LCPU:\n");
1207 kprintf(" CPU #: %d\n", cpu->cpu_num);
1208 kprintf(" Physical: %d\n", cpu->pnum);
1209 kprintf(" Logical: %d\n", cpu->lnum);
1210 kprintf(" Flags: ");
1211 if (cpu->master)
1212 kprintf("MASTER ");
1213 if (cpu->primary)
1214 kprintf("PRIMARY");
1215 if (!cpu->master && !cpu->primary)
1216 kprintf("(NONE)");
1217 kprintf("\n");
1218
1219 cpu = cpu->next_in_core;
1220 }
1221
1222 core = core->next_in_die;
1223 }
1224
1225 die = die->next_in_pkg;
1226 }
1227
1228 pkg = pkg->next;
1229 }
1230 }