2 * Copyright (c) 2003-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <vm/vm_kern.h>
29 #include <kern/kalloc.h>
30 #include <kern/etimer.h>
31 #include <mach/machine.h>
32 #include <i386/cpu_threads.h>
33 #include <i386/cpuid.h>
34 #include <i386/machine_cpu.h>
35 #include <i386/pmCPU.h>
36 #include <i386/lock.h>
38 #define DIVISOR_GUARD(denom) \
40 kprintf("%s: %d Zero divisor: " #denom, \
41 __FILE__, __LINE__); \
44 static void debug_topology_print(void);
46 boolean_t topo_dbg
= FALSE
;
48 x86_pkg_t
*x86_pkgs
= NULL
;
49 uint32_t num_Lx_caches
[MAX_CACHE_DEPTH
] = { 0 };
51 static x86_pkg_t
*free_pkgs
= NULL
;
52 static x86_die_t
*free_dies
= NULL
;
53 static x86_core_t
*free_cores
= NULL
;
54 static uint32_t num_dies
= 0;
56 static x86_cpu_cache_t
*x86_caches
= NULL
;
57 static uint32_t num_caches
= 0;
59 static boolean_t topoParmsInited
= FALSE
;
60 x86_topology_parameters_t topoParms
;
62 decl_simple_lock_data(, x86_topo_lock
);
64 static struct cpu_cache
{
66 } cpu_caches
[LCACHE_MAX
] = {
67 [L1D
] = { 1, CPU_CACHE_TYPE_DATA
},
68 [L1I
] = { 1, CPU_CACHE_TYPE_INST
},
69 [L2U
] = { 2, CPU_CACHE_TYPE_UNIF
},
70 [L3U
] = { 3, CPU_CACHE_TYPE_UNIF
},
74 cpu_is_hyperthreaded(void)
76 i386_cpu_info_t
*cpuinfo
;
78 cpuinfo
= cpuid_info();
79 return(cpuinfo
->thread_count
> cpuinfo
->core_count
);
82 static x86_cpu_cache_t
*
85 x86_cpu_cache_t
*cache
;
88 if (x86_caches
== NULL
) {
89 cache
= kalloc(sizeof(x86_cpu_cache_t
) + (MAX_CPUS
* sizeof(x86_lcpu_t
*)));
94 x86_caches
= cache
->next
;
98 bzero(cache
, sizeof(x86_cpu_cache_t
));
100 cache
->maxcpus
= MAX_CPUS
;
101 for (i
= 0; i
< cache
->maxcpus
; i
+= 1) {
102 cache
->cpus
[i
] = NULL
;
114 uint32_t nCPUsSharing
= 1;
115 i386_cpu_info_t
*cpuinfo
;
116 struct cpu_cache
*cachep
;
119 cpuinfo
= cpuid_info();
121 for (i
= 0, cachep
= &cpu_caches
[0]; i
< LCACHE_MAX
; i
++, cachep
++) {
123 if (cachep
->type
== 0 || cpuid_info()->cache_size
[i
] == 0)
127 * Only worry about it if it's a deeper level than
128 * what we've seen before.
130 if (cachep
->level
> cache_level
) {
131 cache_level
= cachep
->level
;
134 * Save the number of CPUs sharing this cache.
136 nCPUsSharing
= cpuinfo
->cache_sharing
[i
];
141 * Make the level of the LLC be 0 based.
143 topoParms
.LLCDepth
= cache_level
- 1;
146 * nCPUsSharing represents the *maximum* number of cores or
147 * logical CPUs sharing the cache.
149 topoParms
.maxSharingLLC
= nCPUsSharing
;
151 topoParms
.nCoresSharingLLC
= nCPUsSharing
/ (cpuinfo
->thread_count
/
152 cpuinfo
->core_count
);
153 topoParms
.nLCPUsSharingLLC
= nCPUsSharing
;
156 * nCPUsSharing may not be the number of *active* cores or
157 * threads that are sharing the cache.
159 if (nCPUsSharing
> cpuinfo
->core_count
)
160 topoParms
.nCoresSharingLLC
= cpuinfo
->core_count
;
161 if (nCPUsSharing
> cpuinfo
->thread_count
)
162 topoParms
.nLCPUsSharingLLC
= cpuinfo
->thread_count
;
168 i386_cpu_info_t
*cpuinfo
;
170 topoParms
.stable
= FALSE
;
172 cpuinfo
= cpuid_info();
174 PE_parse_boot_argn("-topo", &topo_dbg
, sizeof(topo_dbg
));
177 * We need to start with getting the LLC information correct.
182 * Compute the number of threads (logical CPUs) per core.
184 DIVISOR_GUARD(cpuinfo
->core_count
);
185 topoParms
.nLThreadsPerCore
= cpuinfo
->thread_count
/ cpuinfo
->core_count
;
186 DIVISOR_GUARD(cpuinfo
->cpuid_cores_per_package
);
187 topoParms
.nPThreadsPerCore
= cpuinfo
->cpuid_logical_per_package
/ cpuinfo
->cpuid_cores_per_package
;
190 * Compute the number of dies per package.
192 DIVISOR_GUARD(topoParms
.nCoresSharingLLC
);
193 topoParms
.nLDiesPerPackage
= cpuinfo
->core_count
/ topoParms
.nCoresSharingLLC
;
194 DIVISOR_GUARD(topoParms
.nPThreadsPerCore
);
195 DIVISOR_GUARD(topoParms
.maxSharingLLC
/ topoParms
.nPThreadsPerCore
);
196 topoParms
.nPDiesPerPackage
= cpuinfo
->cpuid_cores_per_package
/ (topoParms
.maxSharingLLC
/ topoParms
.nPThreadsPerCore
);
200 * Compute the number of cores per die.
202 topoParms
.nLCoresPerDie
= topoParms
.nCoresSharingLLC
;
203 topoParms
.nPCoresPerDie
= (topoParms
.maxSharingLLC
/ topoParms
.nPThreadsPerCore
);
206 * Compute the number of threads per die.
208 topoParms
.nLThreadsPerDie
= topoParms
.nLThreadsPerCore
* topoParms
.nLCoresPerDie
;
209 topoParms
.nPThreadsPerDie
= topoParms
.nPThreadsPerCore
* topoParms
.nPCoresPerDie
;
212 * Compute the number of cores per package.
214 topoParms
.nLCoresPerPackage
= topoParms
.nLCoresPerDie
* topoParms
.nLDiesPerPackage
;
215 topoParms
.nPCoresPerPackage
= topoParms
.nPCoresPerDie
* topoParms
.nPDiesPerPackage
;
218 * Compute the number of threads per package.
220 topoParms
.nLThreadsPerPackage
= topoParms
.nLThreadsPerCore
* topoParms
.nLCoresPerPackage
;
221 topoParms
.nPThreadsPerPackage
= topoParms
.nPThreadsPerCore
* topoParms
.nPCoresPerPackage
;
223 TOPO_DBG("\nCache Topology Parameters:\n");
224 TOPO_DBG("\tLLC Depth: %d\n", topoParms
.LLCDepth
);
225 TOPO_DBG("\tCores Sharing LLC: %d\n", topoParms
.nCoresSharingLLC
);
226 TOPO_DBG("\tThreads Sharing LLC: %d\n", topoParms
.nLCPUsSharingLLC
);
227 TOPO_DBG("\tmax Sharing of LLC: %d\n", topoParms
.maxSharingLLC
);
229 TOPO_DBG("\nLogical Topology Parameters:\n");
230 TOPO_DBG("\tThreads per Core: %d\n", topoParms
.nLThreadsPerCore
);
231 TOPO_DBG("\tCores per Die: %d\n", topoParms
.nLCoresPerDie
);
232 TOPO_DBG("\tThreads per Die: %d\n", topoParms
.nLThreadsPerDie
);
233 TOPO_DBG("\tDies per Package: %d\n", topoParms
.nLDiesPerPackage
);
234 TOPO_DBG("\tCores per Package: %d\n", topoParms
.nLCoresPerPackage
);
235 TOPO_DBG("\tThreads per Package: %d\n", topoParms
.nLThreadsPerPackage
);
237 TOPO_DBG("\nPhysical Topology Parameters:\n");
238 TOPO_DBG("\tThreads per Core: %d\n", topoParms
.nPThreadsPerCore
);
239 TOPO_DBG("\tCores per Die: %d\n", topoParms
.nPCoresPerDie
);
240 TOPO_DBG("\tThreads per Die: %d\n", topoParms
.nPThreadsPerDie
);
241 TOPO_DBG("\tDies per Package: %d\n", topoParms
.nPDiesPerPackage
);
242 TOPO_DBG("\tCores per Package: %d\n", topoParms
.nPCoresPerPackage
);
243 TOPO_DBG("\tThreads per Package: %d\n", topoParms
.nPThreadsPerPackage
);
245 topoParmsInited
= TRUE
;
249 x86_cache_free(x86_cpu_cache_t
*cache
)
252 if (cache
->level
> 0 && cache
->level
<= MAX_CACHE_DEPTH
)
253 num_Lx_caches
[cache
->level
- 1] -= 1;
254 cache
->next
= x86_caches
;
259 * This returns a list of cache structures that represent the
260 * caches for a CPU. Some of the structures may have to be
261 * "freed" if they are actually shared between CPUs.
263 static x86_cpu_cache_t
*
266 x86_cpu_cache_t
*root
= NULL
;
267 x86_cpu_cache_t
*cur
= NULL
;
268 x86_cpu_cache_t
*last
= NULL
;
269 struct cpu_cache
*cachep
;
273 * Cons up a list driven not by CPUID leaf 4 (deterministic cache params)
274 * but by the table above plus parameters already cracked from cpuid...
276 for (i
= 0, cachep
= &cpu_caches
[0]; i
< LCACHE_MAX
; i
++, cachep
++) {
278 if (cachep
->type
== 0 || cpuid_info()->cache_size
[i
] == 0)
281 cur
= x86_cache_alloc();
285 cur
->type
= cachep
->type
;
286 cur
->level
= cachep
->level
;
288 cur
->maxcpus
= cpuid_info()->cache_sharing
[i
];
289 cur
->partitions
= cpuid_info()->cache_partitions
[i
];
290 cur
->cache_size
= cpuid_info()->cache_size
[i
];
291 cur
->line_size
= cpuid_info()->cache_linesize
;
300 num_Lx_caches
[cur
->level
- 1] += 1;
306 static x86_cpu_cache_t
*
307 x86_match_cache(x86_cpu_cache_t
*list
, x86_cpu_cache_t
*matcher
)
309 x86_cpu_cache_t
*cur_cache
;
312 while (cur_cache
!= NULL
) {
313 if (cur_cache
->maxcpus
== matcher
->maxcpus
314 && cur_cache
->type
== matcher
->type
315 && cur_cache
->level
== matcher
->level
316 && cur_cache
->partitions
== matcher
->partitions
317 && cur_cache
->line_size
== matcher
->line_size
318 && cur_cache
->cache_size
== matcher
->cache_size
)
321 cur_cache
= cur_cache
->next
;
328 x86_lcpu_init(int cpu
)
334 cpup
= cpu_datap(cpu
);
339 lcpu
->next_in_core
= NULL
;
340 lcpu
->next_in_die
= NULL
;
341 lcpu
->next_in_pkg
= NULL
;
344 lcpu
->package
= NULL
;
347 lcpu
->pnum
= cpup
->cpu_phys_number
;
348 lcpu
->state
= LCPU_OFF
;
349 for (i
= 0; i
< MAX_CACHE_DEPTH
; i
+= 1)
350 lcpu
->caches
[i
] = NULL
;
352 lcpu
->master
= (lcpu
->cpu_num
== (unsigned int) master_cpu
);
353 lcpu
->primary
= (lcpu
->pnum
% topoParms
.nPThreadsPerPackage
) == 0;
357 x86_core_alloc(int cpu
)
362 cpup
= cpu_datap(cpu
);
364 simple_lock(&x86_topo_lock
);
365 if (free_cores
!= NULL
) {
367 free_cores
= core
->next_in_die
;
368 core
->next_in_die
= NULL
;
369 simple_unlock(&x86_topo_lock
);
371 simple_unlock(&x86_topo_lock
);
372 core
= kalloc(sizeof(x86_core_t
));
374 panic("x86_core_alloc() kalloc of x86_core_t failed!\n");
377 bzero((void *) core
, sizeof(x86_core_t
));
379 core
->pcore_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerCore
;
380 core
->lcore_num
= core
->pcore_num
% topoParms
.nPCoresPerPackage
;
382 core
->flags
= X86CORE_FL_PRESENT
| X86CORE_FL_READY
383 | X86CORE_FL_HALTED
| X86CORE_FL_IDLE
;
389 x86_core_free(x86_core_t
*core
)
391 simple_lock(&x86_topo_lock
);
392 core
->next_in_die
= free_cores
;
394 simple_unlock(&x86_topo_lock
);
398 x86_package_find(int cpu
)
404 cpup
= cpu_datap(cpu
);
406 pkg_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerPackage
;
409 while (pkg
!= NULL
) {
410 if (pkg
->ppkg_num
== pkg_num
)
419 x86_die_find(int cpu
)
426 cpup
= cpu_datap(cpu
);
428 die_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerDie
;
430 pkg
= x86_package_find(cpu
);
435 while (die
!= NULL
) {
436 if (die
->pdie_num
== die_num
)
438 die
= die
->next_in_pkg
;
445 x86_core_find(int cpu
)
452 cpup
= cpu_datap(cpu
);
454 core_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerCore
;
456 die
= x86_die_find(cpu
);
461 while (core
!= NULL
) {
462 if (core
->pcore_num
== core_num
)
464 core
= core
->next_in_die
;
471 x86_set_lcpu_numbers(x86_lcpu_t
*lcpu
)
473 lcpu
->lnum
= lcpu
->cpu_num
% topoParms
.nLThreadsPerCore
;
477 x86_set_core_numbers(x86_core_t
*core
, x86_lcpu_t
*lcpu
)
479 core
->pcore_num
= lcpu
->cpu_num
/ topoParms
.nLThreadsPerCore
;
480 core
->lcore_num
= core
->pcore_num
% topoParms
.nLCoresPerDie
;
484 x86_set_die_numbers(x86_die_t
*die
, x86_lcpu_t
*lcpu
)
486 die
->pdie_num
= lcpu
->cpu_num
/ (topoParms
.nLThreadsPerCore
* topoParms
.nLCoresPerDie
);
487 die
->ldie_num
= die
->pdie_num
% topoParms
.nLDiesPerPackage
;
491 x86_set_pkg_numbers(x86_pkg_t
*pkg
, x86_lcpu_t
*lcpu
)
493 pkg
->ppkg_num
= lcpu
->cpu_num
/ topoParms
.nLThreadsPerPackage
;
494 pkg
->lpkg_num
= pkg
->ppkg_num
;
498 x86_die_alloc(int cpu
)
503 cpup
= cpu_datap(cpu
);
505 simple_lock(&x86_topo_lock
);
506 if (free_dies
!= NULL
) {
508 free_dies
= die
->next_in_pkg
;
509 die
->next_in_pkg
= NULL
;
510 simple_unlock(&x86_topo_lock
);
512 simple_unlock(&x86_topo_lock
);
513 die
= kalloc(sizeof(x86_die_t
));
515 panic("x86_die_alloc() kalloc of x86_die_t failed!\n");
518 bzero((void *) die
, sizeof(x86_die_t
));
520 die
->pdie_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerDie
;
522 die
->ldie_num
= num_dies
;
523 atomic_incl((long *) &num_dies
, 1);
525 die
->flags
= X86DIE_FL_PRESENT
;
530 x86_die_free(x86_die_t
*die
)
532 simple_lock(&x86_topo_lock
);
533 die
->next_in_pkg
= free_dies
;
535 atomic_decl((long *) &num_dies
, 1);
536 simple_unlock(&x86_topo_lock
);
540 x86_package_alloc(int cpu
)
545 cpup
= cpu_datap(cpu
);
547 simple_lock(&x86_topo_lock
);
548 if (free_pkgs
!= NULL
) {
550 free_pkgs
= pkg
->next
;
552 simple_unlock(&x86_topo_lock
);
554 simple_unlock(&x86_topo_lock
);
555 pkg
= kalloc(sizeof(x86_pkg_t
));
557 panic("x86_package_alloc() kalloc of x86_pkg_t failed!\n");
560 bzero((void *) pkg
, sizeof(x86_pkg_t
));
562 pkg
->ppkg_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerPackage
;
564 pkg
->lpkg_num
= topoParms
.nPackages
;
565 atomic_incl((long *) &topoParms
.nPackages
, 1);
567 pkg
->flags
= X86PKG_FL_PRESENT
| X86PKG_FL_READY
;
572 x86_package_free(x86_pkg_t
*pkg
)
574 simple_lock(&x86_topo_lock
);
575 pkg
->next
= free_pkgs
;
577 atomic_decl((long *) &topoParms
.nPackages
, 1);
578 simple_unlock(&x86_topo_lock
);
582 x86_cache_add_lcpu(x86_cpu_cache_t
*cache
, x86_lcpu_t
*lcpu
)
584 x86_cpu_cache_t
*cur_cache
;
588 * Put the new CPU into the list of the cache.
590 cur_cache
= lcpu
->caches
[cache
->level
- 1];
591 lcpu
->caches
[cache
->level
- 1] = cache
;
592 cache
->next
= cur_cache
;
594 for (i
= 0; i
< cache
->nlcpus
; i
+= 1) {
595 if (cache
->cpus
[i
] == NULL
) {
596 cache
->cpus
[i
] = lcpu
;
603 x86_lcpu_add_caches(x86_lcpu_t
*lcpu
)
605 x86_cpu_cache_t
*list
;
606 x86_cpu_cache_t
*cur
;
607 x86_cpu_cache_t
*match
;
610 x86_lcpu_t
*cur_lcpu
;
612 boolean_t found
= FALSE
;
614 assert(lcpu
!= NULL
);
617 * Add the cache data to the topology.
619 list
= x86_cache_list();
621 simple_lock(&x86_topo_lock
);
623 while (list
!= NULL
) {
625 * Remove the cache from the front of the list.
630 level
= cur
->level
- 1;
633 * If the cache isn't shared then just put it where it
636 if (cur
->maxcpus
== 1) {
637 x86_cache_add_lcpu(cur
, lcpu
);
642 * We'll assume that all of the caches at a particular level
643 * have the same sharing. So if we have a cache already at
644 * this level, we'll just skip looking for the match.
646 if (lcpu
->caches
[level
] != NULL
) {
652 * This is a shared cache, so we have to figure out if
653 * this is the first time we've seen this cache. We do
654 * this by searching through the topology and seeing if
655 * this cache is already described.
657 * Assume that L{LLC-1} are all at the core level and that
658 * LLC is shared at the die level.
660 if (level
< topoParms
.LLCDepth
) {
662 * Shared at the core.
665 cur_lcpu
= core
->lcpus
;
666 while (cur_lcpu
!= NULL
) {
670 if (cur_lcpu
== lcpu
) {
671 cur_lcpu
= cur_lcpu
->next_in_core
;
676 * If there's a cache on this logical CPU,
679 match
= x86_match_cache(cur_lcpu
->caches
[level
], cur
);
682 x86_cache_add_lcpu(match
, lcpu
);
687 cur_lcpu
= cur_lcpu
->next_in_core
;
694 cur_lcpu
= die
->lcpus
;
695 while (cur_lcpu
!= NULL
) {
699 if (cur_lcpu
== lcpu
) {
700 cur_lcpu
= cur_lcpu
->next_in_die
;
705 * If there's a cache on this logical CPU,
708 match
= x86_match_cache(cur_lcpu
->caches
[level
], cur
);
711 x86_cache_add_lcpu(match
, lcpu
);
716 cur_lcpu
= cur_lcpu
->next_in_die
;
721 * If a shared cache wasn't found, then this logical CPU must
722 * be the first one encountered.
725 x86_cache_add_lcpu(cur
, lcpu
);
729 simple_unlock(&x86_topo_lock
);
733 x86_core_add_lcpu(x86_core_t
*core
, x86_lcpu_t
*lcpu
)
735 assert(core
!= NULL
);
736 assert(lcpu
!= NULL
);
738 simple_lock(&x86_topo_lock
);
740 lcpu
->next_in_core
= core
->lcpus
;
743 core
->num_lcpus
+= 1;
744 simple_unlock(&x86_topo_lock
);
748 x86_die_add_lcpu(x86_die_t
*die
, x86_lcpu_t
*lcpu
)
751 assert(lcpu
!= NULL
);
753 lcpu
->next_in_die
= die
->lcpus
;
759 x86_die_add_core(x86_die_t
*die
, x86_core_t
*core
)
762 assert(core
!= NULL
);
764 core
->next_in_die
= die
->cores
;
771 x86_package_add_lcpu(x86_pkg_t
*pkg
, x86_lcpu_t
*lcpu
)
774 assert(lcpu
!= NULL
);
776 lcpu
->next_in_pkg
= pkg
->lcpus
;
782 x86_package_add_core(x86_pkg_t
*pkg
, x86_core_t
*core
)
785 assert(core
!= NULL
);
787 core
->next_in_pkg
= pkg
->cores
;
793 x86_package_add_die(x86_pkg_t
*pkg
, x86_die_t
*die
)
798 die
->next_in_pkg
= pkg
->dies
;
805 cpu_thread_alloc(int cpu
)
807 x86_core_t
*core
= NULL
;
808 x86_die_t
*die
= NULL
;
809 x86_pkg_t
*pkg
= NULL
;
814 * Only allow one to manipulate the topology at a time.
816 simple_lock(&x86_topo_lock
);
819 * Make sure all of the topology parameters have been initialized.
821 if (!topoParmsInited
)
824 cpup
= cpu_datap(cpu
);
826 phys_cpu
= cpup
->cpu_phys_number
;
831 * Assume that all cpus have the same features.
833 if (cpu_is_hyperthreaded()) {
834 cpup
->cpu_threadtype
= CPU_THREADTYPE_INTEL_HTT
;
836 cpup
->cpu_threadtype
= CPU_THREADTYPE_NONE
;
840 * Get the package that the logical CPU is in.
843 pkg
= x86_package_find(cpu
);
846 * Package structure hasn't been created yet, do it now.
848 simple_unlock(&x86_topo_lock
);
849 pkg
= x86_package_alloc(cpu
);
850 simple_lock(&x86_topo_lock
);
851 if (x86_package_find(cpu
) != NULL
) {
852 x86_package_free(pkg
);
857 * Add the new package to the global list of packages.
859 pkg
->next
= x86_pkgs
;
862 } while (pkg
== NULL
);
865 * Get the die that the logical CPU is in.
868 die
= x86_die_find(cpu
);
871 * Die structure hasn't been created yet, do it now.
873 simple_unlock(&x86_topo_lock
);
874 die
= x86_die_alloc(cpu
);
875 simple_lock(&x86_topo_lock
);
876 if (x86_die_find(cpu
) != NULL
) {
882 * Add the die to the package.
884 x86_package_add_die(pkg
, die
);
886 } while (die
== NULL
);
889 * Get the core for this logical CPU.
892 core
= x86_core_find(cpu
);
895 * Allocate the core structure now.
897 simple_unlock(&x86_topo_lock
);
898 core
= x86_core_alloc(cpu
);
899 simple_lock(&x86_topo_lock
);
900 if (x86_core_find(cpu
) != NULL
) {
906 * Add the core to the die & package.
908 x86_die_add_core(die
, core
);
909 x86_package_add_core(pkg
, core
);
910 machine_info
.physical_cpu_max
+= 1;
912 } while (core
== NULL
);
916 * Done manipulating the topology, so others can get in.
918 machine_info
.logical_cpu_max
+= 1;
919 simple_unlock(&x86_topo_lock
);
922 * Add the logical CPU to the other topology structures.
924 x86_core_add_lcpu(core
, &cpup
->lcpu
);
925 x86_die_add_lcpu(core
->die
, &cpup
->lcpu
);
926 x86_package_add_lcpu(core
->package
, &cpup
->lcpu
);
927 x86_lcpu_add_caches(&cpup
->lcpu
);
929 return (void *) core
;
933 cpu_thread_init(void)
935 int my_cpu
= get_cpu_number();
936 cpu_data_t
*cpup
= current_cpu_datap();
938 static int initialized
= 0;
941 * If we're the boot processor, we do all of the initialization of
942 * the CPU topology infrastructure.
944 if (my_cpu
== master_cpu
&& !initialized
) {
945 simple_lock_init(&x86_topo_lock
, 0);
948 * Put this logical CPU into the physical CPU topology.
950 cpup
->lcpu
.core
= cpu_thread_alloc(my_cpu
);
956 * Do the CPU accounting.
958 core
= cpup
->lcpu
.core
;
959 simple_lock(&x86_topo_lock
);
960 machine_info
.logical_cpu
+= 1;
961 if (core
->active_lcpus
== 0)
962 machine_info
.physical_cpu
+= 1;
963 core
->active_lcpus
+= 1;
964 simple_unlock(&x86_topo_lock
);
966 pmCPUMarkRunning(cpup
);
967 etimer_resync_deadlines();
971 * Called for a cpu to halt permanently
972 * (as opposed to halting and expecting an interrupt to awaken it).
975 cpu_thread_halt(void)
978 cpu_data_t
*cpup
= current_cpu_datap();
980 simple_lock(&x86_topo_lock
);
981 machine_info
.logical_cpu
-= 1;
982 core
= cpup
->lcpu
.core
;
983 core
->active_lcpus
-= 1;
984 if (core
->active_lcpus
== 0)
985 machine_info
.physical_cpu
-= 1;
986 simple_unlock(&x86_topo_lock
);
989 * Let the power management code determine the best way to "stop"
992 ml_set_interrupts_enabled(FALSE
);
994 pmCPUHalt(PM_HALT_NORMAL
);
1000 * Validates that the topology was built correctly. Must be called only
1001 * after the complete topology is built and no other changes are being made.
1004 validate_topology(void)
1015 debug_topology_print();
1020 * Right now this only works if the number of CPUs started is the total
1021 * number of CPUs. However, when specifying cpus=n the topology is only
1022 * partially constructed and the checks below will fail.
1024 * We should *always* build the complete topology and only start the CPUs
1025 * indicated by cpus=n. Until that happens, this code will not check the
1026 * topology if the number of cpus defined is < that described the the
1027 * topology parameters.
1029 nCPUs
= topoParms
.nPackages
* topoParms
.nLThreadsPerPackage
;
1030 if (nCPUs
> real_ncpus
)
1034 while (pkg
!= NULL
) {
1036 * Make sure that the package has the correct number of dies.
1040 while (die
!= NULL
) {
1041 if (die
->package
== NULL
)
1042 panic("Die(%d)->package is NULL",
1044 if (die
->package
!= pkg
)
1045 panic("Die %d points to package %d, should be %d",
1046 die
->pdie_num
, die
->package
->lpkg_num
, pkg
->lpkg_num
);
1048 TOPO_DBG("Die(%d)->package %d\n",
1049 die
->pdie_num
, pkg
->lpkg_num
);
1052 * Make sure that the die has the correct number of cores.
1054 TOPO_DBG("Die(%d)->cores: ", die
->pdie_num
);
1057 while (core
!= NULL
) {
1058 if (core
->die
== NULL
)
1059 panic("Core(%d)->die is NULL",
1061 if (core
->die
!= die
)
1062 panic("Core %d points to die %d, should be %d",
1063 core
->pcore_num
, core
->die
->pdie_num
, die
->pdie_num
);
1065 TOPO_DBG("%d ", core
->pcore_num
);
1066 core
= core
->next_in_die
;
1070 if (nCores
!= topoParms
.nLCoresPerDie
)
1071 panic("Should have %d Cores, but only found %d for Die %d",
1072 topoParms
.nLCoresPerDie
, nCores
, die
->pdie_num
);
1075 * Make sure that the die has the correct number of CPUs.
1077 TOPO_DBG("Die(%d)->lcpus: ", die
->pdie_num
);
1080 while (lcpu
!= NULL
) {
1081 if (lcpu
->die
== NULL
)
1082 panic("CPU(%d)->die is NULL",
1084 if (lcpu
->die
!= die
)
1085 panic("CPU %d points to die %d, should be %d",
1086 lcpu
->cpu_num
, lcpu
->die
->pdie_num
, die
->pdie_num
);
1088 TOPO_DBG("%d ", lcpu
->cpu_num
);
1089 lcpu
= lcpu
->next_in_die
;
1093 if (nCPUs
!= topoParms
.nLThreadsPerDie
)
1094 panic("Should have %d Threads, but only found %d for Die %d",
1095 topoParms
.nLThreadsPerDie
, nCPUs
, die
->pdie_num
);
1098 die
= die
->next_in_pkg
;
1101 if (nDies
!= topoParms
.nLDiesPerPackage
)
1102 panic("Should have %d Dies, but only found %d for package %d",
1103 topoParms
.nLDiesPerPackage
, nDies
, pkg
->lpkg_num
);
1106 * Make sure that the package has the correct number of cores.
1110 while (core
!= NULL
) {
1111 if (core
->package
== NULL
)
1112 panic("Core(%d)->package is NULL",
1114 if (core
->package
!= pkg
)
1115 panic("Core %d points to package %d, should be %d",
1116 core
->pcore_num
, core
->package
->lpkg_num
, pkg
->lpkg_num
);
1117 TOPO_DBG("Core(%d)->package %d\n",
1118 core
->pcore_num
, pkg
->lpkg_num
);
1121 * Make sure that the core has the correct number of CPUs.
1125 TOPO_DBG("Core(%d)->lcpus: ", core
->pcore_num
);
1126 while (lcpu
!= NULL
) {
1127 if (lcpu
->core
== NULL
)
1128 panic("CPU(%d)->core is NULL",
1130 if (lcpu
->core
!= core
)
1131 panic("CPU %d points to core %d, should be %d",
1132 lcpu
->cpu_num
, lcpu
->core
->pcore_num
, core
->pcore_num
);
1133 TOPO_DBG("%d ", lcpu
->cpu_num
);
1135 lcpu
= lcpu
->next_in_core
;
1139 if (nCPUs
!= topoParms
.nLThreadsPerCore
)
1140 panic("Should have %d Threads, but only found %d for Core %d",
1141 topoParms
.nLThreadsPerCore
, nCPUs
, core
->pcore_num
);
1143 core
= core
->next_in_pkg
;
1146 if (nCores
!= topoParms
.nLCoresPerPackage
)
1147 panic("Should have %d Cores, but only found %d for package %d",
1148 topoParms
.nLCoresPerPackage
, nCores
, pkg
->lpkg_num
);
1151 * Make sure that the package has the correct number of CPUs.
1155 while (lcpu
!= NULL
) {
1156 if (lcpu
->package
== NULL
)
1157 panic("CPU(%d)->package is NULL",
1159 if (lcpu
->package
!= pkg
)
1160 panic("CPU %d points to package %d, should be %d",
1161 lcpu
->cpu_num
, lcpu
->package
->lpkg_num
, pkg
->lpkg_num
);
1162 TOPO_DBG("CPU(%d)->package %d\n",
1163 lcpu
->cpu_num
, pkg
->lpkg_num
);
1165 lcpu
= lcpu
->next_in_pkg
;
1168 if (nCPUs
!= topoParms
.nLThreadsPerPackage
)
1169 panic("Should have %d Threads, but only found %d for package %d",
1170 topoParms
.nLThreadsPerPackage
, nCPUs
, pkg
->lpkg_num
);
1177 * Prints out the topology
1180 debug_topology_print(void)
1188 while (pkg
!= NULL
) {
1189 kprintf("Package:\n");
1190 kprintf(" Physical: %d\n", pkg
->ppkg_num
);
1191 kprintf(" Logical: %d\n", pkg
->lpkg_num
);
1194 while (die
!= NULL
) {
1196 kprintf(" Physical: %d\n", die
->pdie_num
);
1197 kprintf(" Logical: %d\n", die
->ldie_num
);
1200 while (core
!= NULL
) {
1201 kprintf(" Core:\n");
1202 kprintf(" Physical: %d\n", core
->pcore_num
);
1203 kprintf(" Logical: %d\n", core
->lcore_num
);
1206 while (cpu
!= NULL
) {
1207 kprintf(" LCPU:\n");
1208 kprintf(" CPU #: %d\n", cpu
->cpu_num
);
1209 kprintf(" Physical: %d\n", cpu
->pnum
);
1210 kprintf(" Logical: %d\n", cpu
->lnum
);
1211 kprintf(" Flags: ");
1216 if (!cpu
->master
&& !cpu
->primary
)
1220 cpu
= cpu
->next_in_core
;
1223 core
= core
->next_in_die
;
1226 die
= die
->next_in_pkg
;