2 * Copyright (c) 2003-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <vm/vm_kern.h>
29 #include <kern/kalloc.h>
30 #include <kern/timer_queue.h>
31 #include <mach/machine.h>
32 #include <i386/cpu_threads.h>
33 #include <i386/cpuid.h>
34 #include <i386/machine_cpu.h>
35 #include <i386/pmCPU.h>
36 #include <i386/bit_routines.h>
38 #define DIVISOR_GUARD(denom) \
40 kprintf("%s: %d Zero divisor: " #denom, \
41 __FILE__, __LINE__); \
44 static void debug_topology_print(void);
46 boolean_t topo_dbg
= FALSE
;
48 x86_pkg_t
*x86_pkgs
= NULL
;
49 uint32_t num_Lx_caches
[MAX_CACHE_DEPTH
] = { 0 };
51 static x86_pkg_t
*free_pkgs
= NULL
;
52 static x86_die_t
*free_dies
= NULL
;
53 static x86_core_t
*free_cores
= NULL
;
54 static uint32_t num_dies
= 0;
56 static x86_cpu_cache_t
*x86_caches
= NULL
;
57 static uint32_t num_caches
= 0;
59 static boolean_t topoParmsInited
= FALSE
;
60 x86_topology_parameters_t topoParms
;
62 decl_simple_lock_data(, x86_topo_lock
);
64 static struct cpu_cache
{
66 } cpu_caches
[LCACHE_MAX
] = {
67 [L1D
] = { 1, CPU_CACHE_TYPE_DATA
},
68 [L1I
] = { 1, CPU_CACHE_TYPE_INST
},
69 [L2U
] = { 2, CPU_CACHE_TYPE_UNIF
},
70 [L3U
] = { 3, CPU_CACHE_TYPE_UNIF
},
74 cpu_is_hyperthreaded(void)
76 i386_cpu_info_t
*cpuinfo
;
78 cpuinfo
= cpuid_info();
79 return(cpuinfo
->thread_count
> cpuinfo
->core_count
);
82 static x86_cpu_cache_t
*
85 x86_cpu_cache_t
*cache
;
88 if (x86_caches
== NULL
) {
89 cache
= kalloc(sizeof(x86_cpu_cache_t
) + (MAX_CPUS
* sizeof(x86_lcpu_t
*)));
94 x86_caches
= cache
->next
;
98 bzero(cache
, sizeof(x86_cpu_cache_t
));
100 cache
->maxcpus
= MAX_CPUS
;
101 for (i
= 0; i
< cache
->maxcpus
; i
+= 1) {
102 cache
->cpus
[i
] = NULL
;
114 uint32_t nCPUsSharing
= 1;
115 i386_cpu_info_t
*cpuinfo
;
116 struct cpu_cache
*cachep
;
119 cpuinfo
= cpuid_info();
121 for (i
= 0, cachep
= &cpu_caches
[0]; i
< LCACHE_MAX
; i
++, cachep
++) {
123 if (cachep
->type
== 0 || cpuid_info()->cache_size
[i
] == 0)
127 * Only worry about it if it's a deeper level than
128 * what we've seen before.
130 if (cachep
->level
> cache_level
) {
131 cache_level
= cachep
->level
;
134 * Save the number of CPUs sharing this cache.
136 nCPUsSharing
= cpuinfo
->cache_sharing
[i
];
141 * Make the level of the LLC be 0 based.
143 topoParms
.LLCDepth
= cache_level
- 1;
146 * nCPUsSharing represents the *maximum* number of cores or
147 * logical CPUs sharing the cache.
149 topoParms
.maxSharingLLC
= nCPUsSharing
;
151 topoParms
.nCoresSharingLLC
= nCPUsSharing
/ (cpuinfo
->thread_count
/
152 cpuinfo
->core_count
);
153 topoParms
.nLCPUsSharingLLC
= nCPUsSharing
;
156 * nCPUsSharing may not be the number of *active* cores or
157 * threads that are sharing the cache.
159 if (nCPUsSharing
> cpuinfo
->core_count
)
160 topoParms
.nCoresSharingLLC
= cpuinfo
->core_count
;
161 if (nCPUsSharing
> cpuinfo
->thread_count
)
162 topoParms
.nLCPUsSharingLLC
= cpuinfo
->thread_count
;
168 i386_cpu_info_t
*cpuinfo
;
170 topoParms
.stable
= FALSE
;
172 cpuinfo
= cpuid_info();
174 PE_parse_boot_argn("-topo", &topo_dbg
, sizeof(topo_dbg
));
177 * We need to start with getting the LLC information correct.
182 * Compute the number of threads (logical CPUs) per core.
184 DIVISOR_GUARD(cpuinfo
->core_count
);
185 topoParms
.nLThreadsPerCore
= cpuinfo
->thread_count
/ cpuinfo
->core_count
;
186 DIVISOR_GUARD(cpuinfo
->cpuid_cores_per_package
);
187 topoParms
.nPThreadsPerCore
= cpuinfo
->cpuid_logical_per_package
/ cpuinfo
->cpuid_cores_per_package
;
190 * Compute the number of dies per package.
192 DIVISOR_GUARD(topoParms
.nCoresSharingLLC
);
193 topoParms
.nLDiesPerPackage
= cpuinfo
->core_count
/ topoParms
.nCoresSharingLLC
;
194 DIVISOR_GUARD(topoParms
.nPThreadsPerCore
);
195 DIVISOR_GUARD(topoParms
.maxSharingLLC
/ topoParms
.nPThreadsPerCore
);
196 topoParms
.nPDiesPerPackage
= cpuinfo
->cpuid_cores_per_package
/ (topoParms
.maxSharingLLC
/ topoParms
.nPThreadsPerCore
);
200 * Compute the number of cores per die.
202 topoParms
.nLCoresPerDie
= topoParms
.nCoresSharingLLC
;
203 topoParms
.nPCoresPerDie
= (topoParms
.maxSharingLLC
/ topoParms
.nPThreadsPerCore
);
206 * Compute the number of threads per die.
208 topoParms
.nLThreadsPerDie
= topoParms
.nLThreadsPerCore
* topoParms
.nLCoresPerDie
;
209 topoParms
.nPThreadsPerDie
= topoParms
.nPThreadsPerCore
* topoParms
.nPCoresPerDie
;
212 * Compute the number of cores per package.
214 topoParms
.nLCoresPerPackage
= topoParms
.nLCoresPerDie
* topoParms
.nLDiesPerPackage
;
215 topoParms
.nPCoresPerPackage
= topoParms
.nPCoresPerDie
* topoParms
.nPDiesPerPackage
;
218 * Compute the number of threads per package.
220 topoParms
.nLThreadsPerPackage
= topoParms
.nLThreadsPerCore
* topoParms
.nLCoresPerPackage
;
221 topoParms
.nPThreadsPerPackage
= topoParms
.nPThreadsPerCore
* topoParms
.nPCoresPerPackage
;
223 TOPO_DBG("\nCache Topology Parameters:\n");
224 TOPO_DBG("\tLLC Depth: %d\n", topoParms
.LLCDepth
);
225 TOPO_DBG("\tCores Sharing LLC: %d\n", topoParms
.nCoresSharingLLC
);
226 TOPO_DBG("\tThreads Sharing LLC: %d\n", topoParms
.nLCPUsSharingLLC
);
227 TOPO_DBG("\tmax Sharing of LLC: %d\n", topoParms
.maxSharingLLC
);
229 TOPO_DBG("\nLogical Topology Parameters:\n");
230 TOPO_DBG("\tThreads per Core: %d\n", topoParms
.nLThreadsPerCore
);
231 TOPO_DBG("\tCores per Die: %d\n", topoParms
.nLCoresPerDie
);
232 TOPO_DBG("\tThreads per Die: %d\n", topoParms
.nLThreadsPerDie
);
233 TOPO_DBG("\tDies per Package: %d\n", topoParms
.nLDiesPerPackage
);
234 TOPO_DBG("\tCores per Package: %d\n", topoParms
.nLCoresPerPackage
);
235 TOPO_DBG("\tThreads per Package: %d\n", topoParms
.nLThreadsPerPackage
);
237 TOPO_DBG("\nPhysical Topology Parameters:\n");
238 TOPO_DBG("\tThreads per Core: %d\n", topoParms
.nPThreadsPerCore
);
239 TOPO_DBG("\tCores per Die: %d\n", topoParms
.nPCoresPerDie
);
240 TOPO_DBG("\tThreads per Die: %d\n", topoParms
.nPThreadsPerDie
);
241 TOPO_DBG("\tDies per Package: %d\n", topoParms
.nPDiesPerPackage
);
242 TOPO_DBG("\tCores per Package: %d\n", topoParms
.nPCoresPerPackage
);
243 TOPO_DBG("\tThreads per Package: %d\n", topoParms
.nPThreadsPerPackage
);
245 topoParmsInited
= TRUE
;
249 x86_cache_free(x86_cpu_cache_t
*cache
)
252 if (cache
->level
> 0 && cache
->level
<= MAX_CACHE_DEPTH
)
253 num_Lx_caches
[cache
->level
- 1] -= 1;
254 cache
->next
= x86_caches
;
259 * This returns a list of cache structures that represent the
260 * caches for a CPU. Some of the structures may have to be
261 * "freed" if they are actually shared between CPUs.
263 static x86_cpu_cache_t
*
266 x86_cpu_cache_t
*root
= NULL
;
267 x86_cpu_cache_t
*cur
= NULL
;
268 x86_cpu_cache_t
*last
= NULL
;
269 struct cpu_cache
*cachep
;
273 * Cons up a list driven not by CPUID leaf 4 (deterministic cache params)
274 * but by the table above plus parameters already cracked from cpuid...
276 for (i
= 0, cachep
= &cpu_caches
[0]; i
< LCACHE_MAX
; i
++, cachep
++) {
278 if (cachep
->type
== 0 || cpuid_info()->cache_size
[i
] == 0)
281 cur
= x86_cache_alloc();
285 cur
->type
= cachep
->type
;
286 cur
->level
= cachep
->level
;
288 cur
->maxcpus
= cpuid_info()->cache_sharing
[i
];
289 cur
->partitions
= cpuid_info()->cache_partitions
[i
];
290 cur
->cache_size
= cpuid_info()->cache_size
[i
];
291 cur
->line_size
= cpuid_info()->cache_linesize
;
300 num_Lx_caches
[cur
->level
- 1] += 1;
306 static x86_cpu_cache_t
*
307 x86_match_cache(x86_cpu_cache_t
*list
, x86_cpu_cache_t
*matcher
)
309 x86_cpu_cache_t
*cur_cache
;
312 while (cur_cache
!= NULL
) {
313 if (cur_cache
->maxcpus
== matcher
->maxcpus
314 && cur_cache
->type
== matcher
->type
315 && cur_cache
->level
== matcher
->level
316 && cur_cache
->partitions
== matcher
->partitions
317 && cur_cache
->line_size
== matcher
->line_size
318 && cur_cache
->cache_size
== matcher
->cache_size
)
321 cur_cache
= cur_cache
->next
;
328 x86_lcpu_init(int cpu
)
334 cpup
= cpu_datap(cpu
);
339 lcpu
->next_in_core
= NULL
;
340 lcpu
->next_in_die
= NULL
;
341 lcpu
->next_in_pkg
= NULL
;
344 lcpu
->package
= NULL
;
347 lcpu
->pnum
= cpup
->cpu_phys_number
;
348 lcpu
->state
= LCPU_OFF
;
349 for (i
= 0; i
< MAX_CACHE_DEPTH
; i
+= 1)
350 lcpu
->caches
[i
] = NULL
;
354 x86_core_alloc(int cpu
)
359 cpup
= cpu_datap(cpu
);
361 simple_lock(&x86_topo_lock
);
362 if (free_cores
!= NULL
) {
364 free_cores
= core
->next_in_die
;
365 core
->next_in_die
= NULL
;
366 simple_unlock(&x86_topo_lock
);
368 simple_unlock(&x86_topo_lock
);
369 core
= kalloc(sizeof(x86_core_t
));
371 panic("x86_core_alloc() kalloc of x86_core_t failed!\n");
374 bzero((void *) core
, sizeof(x86_core_t
));
376 core
->pcore_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerCore
;
377 core
->lcore_num
= core
->pcore_num
% topoParms
.nPCoresPerPackage
;
379 core
->flags
= X86CORE_FL_PRESENT
| X86CORE_FL_READY
380 | X86CORE_FL_HALTED
| X86CORE_FL_IDLE
;
386 x86_core_free(x86_core_t
*core
)
388 simple_lock(&x86_topo_lock
);
389 core
->next_in_die
= free_cores
;
391 simple_unlock(&x86_topo_lock
);
395 x86_package_find(int cpu
)
401 cpup
= cpu_datap(cpu
);
403 pkg_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerPackage
;
406 while (pkg
!= NULL
) {
407 if (pkg
->ppkg_num
== pkg_num
)
416 x86_die_find(int cpu
)
423 cpup
= cpu_datap(cpu
);
425 die_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerDie
;
427 pkg
= x86_package_find(cpu
);
432 while (die
!= NULL
) {
433 if (die
->pdie_num
== die_num
)
435 die
= die
->next_in_pkg
;
442 x86_core_find(int cpu
)
449 cpup
= cpu_datap(cpu
);
451 core_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerCore
;
453 die
= x86_die_find(cpu
);
458 while (core
!= NULL
) {
459 if (core
->pcore_num
== core_num
)
461 core
= core
->next_in_die
;
468 x86_set_logical_topology(x86_lcpu_t
*lcpu
, int pnum
, int lnum
)
470 x86_core_t
*core
= lcpu
->core
;
471 x86_die_t
*die
= lcpu
->die
;
472 x86_pkg_t
*pkg
= lcpu
->package
;
474 assert(core
!= NULL
);
478 lcpu
->cpu_num
= lnum
;
480 lcpu
->master
= (lnum
== master_cpu
);
481 lcpu
->primary
= (lnum
% topoParms
.nLThreadsPerPackage
) == 0;
483 lcpu
->lnum
= lnum
% topoParms
.nLThreadsPerCore
;
485 core
->pcore_num
= lnum
/ topoParms
.nLThreadsPerCore
;
486 core
->lcore_num
= core
->pcore_num
% topoParms
.nLCoresPerDie
;
488 die
->pdie_num
= lnum
/ (topoParms
.nLThreadsPerCore
*topoParms
.nLCoresPerDie
);
489 die
->ldie_num
= die
->pdie_num
% topoParms
.nLDiesPerPackage
;
491 pkg
->ppkg_num
= lnum
/ topoParms
.nLThreadsPerPackage
;
492 pkg
->lpkg_num
= pkg
->ppkg_num
;
497 x86_die_alloc(int cpu
)
502 cpup
= cpu_datap(cpu
);
504 simple_lock(&x86_topo_lock
);
505 if (free_dies
!= NULL
) {
507 free_dies
= die
->next_in_pkg
;
508 die
->next_in_pkg
= NULL
;
509 simple_unlock(&x86_topo_lock
);
511 simple_unlock(&x86_topo_lock
);
512 die
= kalloc(sizeof(x86_die_t
));
514 panic("x86_die_alloc() kalloc of x86_die_t failed!\n");
517 bzero((void *) die
, sizeof(x86_die_t
));
519 die
->pdie_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerDie
;
521 die
->ldie_num
= num_dies
;
522 atomic_incl((long *) &num_dies
, 1);
524 die
->flags
= X86DIE_FL_PRESENT
;
529 x86_die_free(x86_die_t
*die
)
531 simple_lock(&x86_topo_lock
);
532 die
->next_in_pkg
= free_dies
;
534 atomic_decl((long *) &num_dies
, 1);
535 simple_unlock(&x86_topo_lock
);
539 x86_package_alloc(int cpu
)
544 cpup
= cpu_datap(cpu
);
546 simple_lock(&x86_topo_lock
);
547 if (free_pkgs
!= NULL
) {
549 free_pkgs
= pkg
->next
;
551 simple_unlock(&x86_topo_lock
);
553 simple_unlock(&x86_topo_lock
);
554 pkg
= kalloc(sizeof(x86_pkg_t
));
556 panic("x86_package_alloc() kalloc of x86_pkg_t failed!\n");
559 bzero((void *) pkg
, sizeof(x86_pkg_t
));
561 pkg
->ppkg_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerPackage
;
563 pkg
->lpkg_num
= topoParms
.nPackages
;
564 atomic_incl((long *) &topoParms
.nPackages
, 1);
566 pkg
->flags
= X86PKG_FL_PRESENT
| X86PKG_FL_READY
;
571 x86_package_free(x86_pkg_t
*pkg
)
573 simple_lock(&x86_topo_lock
);
574 pkg
->next
= free_pkgs
;
576 atomic_decl((long *) &topoParms
.nPackages
, 1);
577 simple_unlock(&x86_topo_lock
);
581 x86_cache_add_lcpu(x86_cpu_cache_t
*cache
, x86_lcpu_t
*lcpu
)
583 x86_cpu_cache_t
*cur_cache
;
587 * Put the new CPU into the list of the cache.
589 cur_cache
= lcpu
->caches
[cache
->level
- 1];
590 lcpu
->caches
[cache
->level
- 1] = cache
;
591 cache
->next
= cur_cache
;
593 for (i
= 0; i
< cache
->nlcpus
; i
+= 1) {
594 if (cache
->cpus
[i
] == NULL
) {
595 cache
->cpus
[i
] = lcpu
;
602 x86_lcpu_add_caches(x86_lcpu_t
*lcpu
)
604 x86_cpu_cache_t
*list
;
605 x86_cpu_cache_t
*cur
;
606 x86_cpu_cache_t
*match
;
609 x86_lcpu_t
*cur_lcpu
;
611 boolean_t found
= FALSE
;
613 assert(lcpu
!= NULL
);
616 * Add the cache data to the topology.
618 list
= x86_cache_list();
620 simple_lock(&x86_topo_lock
);
622 while (list
!= NULL
) {
624 * Remove the cache from the front of the list.
629 level
= cur
->level
- 1;
632 * If the cache isn't shared then just put it where it
635 if (cur
->maxcpus
== 1) {
636 x86_cache_add_lcpu(cur
, lcpu
);
641 * We'll assume that all of the caches at a particular level
642 * have the same sharing. So if we have a cache already at
643 * this level, we'll just skip looking for the match.
645 if (lcpu
->caches
[level
] != NULL
) {
651 * This is a shared cache, so we have to figure out if
652 * this is the first time we've seen this cache. We do
653 * this by searching through the topology and seeing if
654 * this cache is already described.
656 * Assume that L{LLC-1} are all at the core level and that
657 * LLC is shared at the die level.
659 if (level
< topoParms
.LLCDepth
) {
661 * Shared at the core.
664 cur_lcpu
= core
->lcpus
;
665 while (cur_lcpu
!= NULL
) {
669 if (cur_lcpu
== lcpu
) {
670 cur_lcpu
= cur_lcpu
->next_in_core
;
675 * If there's a cache on this logical CPU,
678 match
= x86_match_cache(cur_lcpu
->caches
[level
], cur
);
681 x86_cache_add_lcpu(match
, lcpu
);
686 cur_lcpu
= cur_lcpu
->next_in_core
;
693 cur_lcpu
= die
->lcpus
;
694 while (cur_lcpu
!= NULL
) {
698 if (cur_lcpu
== lcpu
) {
699 cur_lcpu
= cur_lcpu
->next_in_die
;
704 * If there's a cache on this logical CPU,
707 match
= x86_match_cache(cur_lcpu
->caches
[level
], cur
);
710 x86_cache_add_lcpu(match
, lcpu
);
715 cur_lcpu
= cur_lcpu
->next_in_die
;
720 * If a shared cache wasn't found, then this logical CPU must
721 * be the first one encountered.
724 x86_cache_add_lcpu(cur
, lcpu
);
728 simple_unlock(&x86_topo_lock
);
732 x86_core_add_lcpu(x86_core_t
*core
, x86_lcpu_t
*lcpu
)
734 assert(core
!= NULL
);
735 assert(lcpu
!= NULL
);
737 simple_lock(&x86_topo_lock
);
739 lcpu
->next_in_core
= core
->lcpus
;
742 core
->num_lcpus
+= 1;
743 simple_unlock(&x86_topo_lock
);
747 x86_die_add_lcpu(x86_die_t
*die
, x86_lcpu_t
*lcpu
)
750 assert(lcpu
!= NULL
);
752 lcpu
->next_in_die
= die
->lcpus
;
758 x86_die_add_core(x86_die_t
*die
, x86_core_t
*core
)
761 assert(core
!= NULL
);
763 core
->next_in_die
= die
->cores
;
770 x86_package_add_lcpu(x86_pkg_t
*pkg
, x86_lcpu_t
*lcpu
)
773 assert(lcpu
!= NULL
);
775 lcpu
->next_in_pkg
= pkg
->lcpus
;
781 x86_package_add_core(x86_pkg_t
*pkg
, x86_core_t
*core
)
784 assert(core
!= NULL
);
786 core
->next_in_pkg
= pkg
->cores
;
792 x86_package_add_die(x86_pkg_t
*pkg
, x86_die_t
*die
)
797 die
->next_in_pkg
= pkg
->dies
;
804 cpu_thread_alloc(int cpu
)
806 x86_core_t
*core
= NULL
;
807 x86_die_t
*die
= NULL
;
808 x86_pkg_t
*pkg
= NULL
;
813 * Only allow one to manipulate the topology at a time.
815 simple_lock(&x86_topo_lock
);
818 * Make sure all of the topology parameters have been initialized.
820 if (!topoParmsInited
)
823 cpup
= cpu_datap(cpu
);
825 phys_cpu
= cpup
->cpu_phys_number
;
830 * Assume that all cpus have the same features.
832 if (cpu_is_hyperthreaded()) {
833 cpup
->cpu_threadtype
= CPU_THREADTYPE_INTEL_HTT
;
835 cpup
->cpu_threadtype
= CPU_THREADTYPE_NONE
;
839 * Get the package that the logical CPU is in.
842 pkg
= x86_package_find(cpu
);
845 * Package structure hasn't been created yet, do it now.
847 simple_unlock(&x86_topo_lock
);
848 pkg
= x86_package_alloc(cpu
);
849 simple_lock(&x86_topo_lock
);
850 if (x86_package_find(cpu
) != NULL
) {
851 x86_package_free(pkg
);
856 * Add the new package to the global list of packages.
858 pkg
->next
= x86_pkgs
;
861 } while (pkg
== NULL
);
864 * Get the die that the logical CPU is in.
867 die
= x86_die_find(cpu
);
870 * Die structure hasn't been created yet, do it now.
872 simple_unlock(&x86_topo_lock
);
873 die
= x86_die_alloc(cpu
);
874 simple_lock(&x86_topo_lock
);
875 if (x86_die_find(cpu
) != NULL
) {
881 * Add the die to the package.
883 x86_package_add_die(pkg
, die
);
885 } while (die
== NULL
);
888 * Get the core for this logical CPU.
891 core
= x86_core_find(cpu
);
894 * Allocate the core structure now.
896 simple_unlock(&x86_topo_lock
);
897 core
= x86_core_alloc(cpu
);
898 simple_lock(&x86_topo_lock
);
899 if (x86_core_find(cpu
) != NULL
) {
905 * Add the core to the die & package.
907 x86_die_add_core(die
, core
);
908 x86_package_add_core(pkg
, core
);
909 machine_info
.physical_cpu_max
+= 1;
911 } while (core
== NULL
);
915 * Done manipulating the topology, so others can get in.
917 machine_info
.logical_cpu_max
+= 1;
918 simple_unlock(&x86_topo_lock
);
921 * Add the logical CPU to the other topology structures.
923 x86_core_add_lcpu(core
, &cpup
->lcpu
);
924 x86_die_add_lcpu(core
->die
, &cpup
->lcpu
);
925 x86_package_add_lcpu(core
->package
, &cpup
->lcpu
);
926 x86_lcpu_add_caches(&cpup
->lcpu
);
928 return (void *) core
;
932 cpu_thread_init(void)
934 int my_cpu
= get_cpu_number();
935 cpu_data_t
*cpup
= current_cpu_datap();
937 static int initialized
= 0;
940 * If we're the boot processor, we do all of the initialization of
941 * the CPU topology infrastructure.
943 if (my_cpu
== master_cpu
&& !initialized
) {
944 simple_lock_init(&x86_topo_lock
, 0);
947 * Put this logical CPU into the physical CPU topology.
949 cpup
->lcpu
.core
= cpu_thread_alloc(my_cpu
);
955 * Do the CPU accounting.
957 core
= cpup
->lcpu
.core
;
958 simple_lock(&x86_topo_lock
);
959 machine_info
.logical_cpu
+= 1;
960 if (core
->active_lcpus
== 0)
961 machine_info
.physical_cpu
+= 1;
962 core
->active_lcpus
+= 1;
963 simple_unlock(&x86_topo_lock
);
965 pmCPUMarkRunning(cpup
);
966 timer_resync_deadlines();
970 * Called for a cpu to halt permanently
971 * (as opposed to halting and expecting an interrupt to awaken it).
974 cpu_thread_halt(void)
977 cpu_data_t
*cpup
= current_cpu_datap();
979 simple_lock(&x86_topo_lock
);
980 machine_info
.logical_cpu
-= 1;
981 core
= cpup
->lcpu
.core
;
982 core
->active_lcpus
-= 1;
983 if (core
->active_lcpus
== 0)
984 machine_info
.physical_cpu
-= 1;
985 simple_unlock(&x86_topo_lock
);
988 * Let the power management code determine the best way to "stop"
991 ml_set_interrupts_enabled(FALSE
);
993 pmCPUHalt(PM_HALT_NORMAL
);
999 * Validates that the topology was built correctly. Must be called only
1000 * after the complete topology is built and no other changes are being made.
1003 x86_validate_topology(void)
1014 debug_topology_print();
1019 * Right now this only works if the number of CPUs started is the total
1020 * number of CPUs. However, when specifying cpus=n the topology is only
1021 * partially constructed and the checks below will fail.
1023 * We should *always* build the complete topology and only start the CPUs
1024 * indicated by cpus=n. Until that happens, this code will not check the
1025 * topology if the number of cpus defined is < that described the the
1026 * topology parameters.
1028 nCPUs
= topoParms
.nPackages
* topoParms
.nLThreadsPerPackage
;
1029 if (nCPUs
> real_ncpus
)
1033 while (pkg
!= NULL
) {
1035 * Make sure that the package has the correct number of dies.
1039 while (die
!= NULL
) {
1040 if (die
->package
== NULL
)
1041 panic("Die(%d)->package is NULL",
1043 if (die
->package
!= pkg
)
1044 panic("Die %d points to package %d, should be %d",
1045 die
->pdie_num
, die
->package
->lpkg_num
, pkg
->lpkg_num
);
1047 TOPO_DBG("Die(%d)->package %d\n",
1048 die
->pdie_num
, pkg
->lpkg_num
);
1051 * Make sure that the die has the correct number of cores.
1053 TOPO_DBG("Die(%d)->cores: ", die
->pdie_num
);
1056 while (core
!= NULL
) {
1057 if (core
->die
== NULL
)
1058 panic("Core(%d)->die is NULL",
1060 if (core
->die
!= die
)
1061 panic("Core %d points to die %d, should be %d",
1062 core
->pcore_num
, core
->die
->pdie_num
, die
->pdie_num
);
1064 TOPO_DBG("%d ", core
->pcore_num
);
1065 core
= core
->next_in_die
;
1069 if (nCores
!= topoParms
.nLCoresPerDie
)
1070 panic("Should have %d Cores, but only found %d for Die %d",
1071 topoParms
.nLCoresPerDie
, nCores
, die
->pdie_num
);
1074 * Make sure that the die has the correct number of CPUs.
1076 TOPO_DBG("Die(%d)->lcpus: ", die
->pdie_num
);
1079 while (lcpu
!= NULL
) {
1080 if (lcpu
->die
== NULL
)
1081 panic("CPU(%d)->die is NULL",
1083 if (lcpu
->die
!= die
)
1084 panic("CPU %d points to die %d, should be %d",
1085 lcpu
->cpu_num
, lcpu
->die
->pdie_num
, die
->pdie_num
);
1087 TOPO_DBG("%d ", lcpu
->cpu_num
);
1088 lcpu
= lcpu
->next_in_die
;
1092 if (nCPUs
!= topoParms
.nLThreadsPerDie
)
1093 panic("Should have %d Threads, but only found %d for Die %d",
1094 topoParms
.nLThreadsPerDie
, nCPUs
, die
->pdie_num
);
1097 die
= die
->next_in_pkg
;
1100 if (nDies
!= topoParms
.nLDiesPerPackage
)
1101 panic("Should have %d Dies, but only found %d for package %d",
1102 topoParms
.nLDiesPerPackage
, nDies
, pkg
->lpkg_num
);
1105 * Make sure that the package has the correct number of cores.
1109 while (core
!= NULL
) {
1110 if (core
->package
== NULL
)
1111 panic("Core(%d)->package is NULL",
1113 if (core
->package
!= pkg
)
1114 panic("Core %d points to package %d, should be %d",
1115 core
->pcore_num
, core
->package
->lpkg_num
, pkg
->lpkg_num
);
1116 TOPO_DBG("Core(%d)->package %d\n",
1117 core
->pcore_num
, pkg
->lpkg_num
);
1120 * Make sure that the core has the correct number of CPUs.
1124 TOPO_DBG("Core(%d)->lcpus: ", core
->pcore_num
);
1125 while (lcpu
!= NULL
) {
1126 if (lcpu
->core
== NULL
)
1127 panic("CPU(%d)->core is NULL",
1129 if (lcpu
->core
!= core
)
1130 panic("CPU %d points to core %d, should be %d",
1131 lcpu
->cpu_num
, lcpu
->core
->pcore_num
, core
->pcore_num
);
1132 TOPO_DBG("%d ", lcpu
->cpu_num
);
1134 lcpu
= lcpu
->next_in_core
;
1138 if (nCPUs
!= topoParms
.nLThreadsPerCore
)
1139 panic("Should have %d Threads, but only found %d for Core %d",
1140 topoParms
.nLThreadsPerCore
, nCPUs
, core
->pcore_num
);
1142 core
= core
->next_in_pkg
;
1145 if (nCores
!= topoParms
.nLCoresPerPackage
)
1146 panic("Should have %d Cores, but only found %d for package %d",
1147 topoParms
.nLCoresPerPackage
, nCores
, pkg
->lpkg_num
);
1150 * Make sure that the package has the correct number of CPUs.
1154 while (lcpu
!= NULL
) {
1155 if (lcpu
->package
== NULL
)
1156 panic("CPU(%d)->package is NULL",
1158 if (lcpu
->package
!= pkg
)
1159 panic("CPU %d points to package %d, should be %d",
1160 lcpu
->cpu_num
, lcpu
->package
->lpkg_num
, pkg
->lpkg_num
);
1161 TOPO_DBG("CPU(%d)->package %d\n",
1162 lcpu
->cpu_num
, pkg
->lpkg_num
);
1164 lcpu
= lcpu
->next_in_pkg
;
1167 if (nCPUs
!= topoParms
.nLThreadsPerPackage
)
1168 panic("Should have %d Threads, but only found %d for package %d",
1169 topoParms
.nLThreadsPerPackage
, nCPUs
, pkg
->lpkg_num
);
1176 * Prints out the topology
1179 debug_topology_print(void)
1187 while (pkg
!= NULL
) {
1188 kprintf("Package:\n");
1189 kprintf(" Physical: %d\n", pkg
->ppkg_num
);
1190 kprintf(" Logical: %d\n", pkg
->lpkg_num
);
1193 while (die
!= NULL
) {
1195 kprintf(" Physical: %d\n", die
->pdie_num
);
1196 kprintf(" Logical: %d\n", die
->ldie_num
);
1199 while (core
!= NULL
) {
1200 kprintf(" Core:\n");
1201 kprintf(" Physical: %d\n", core
->pcore_num
);
1202 kprintf(" Logical: %d\n", core
->lcore_num
);
1205 while (cpu
!= NULL
) {
1206 kprintf(" LCPU:\n");
1207 kprintf(" CPU #: %d\n", cpu
->cpu_num
);
1208 kprintf(" Physical: %d\n", cpu
->pnum
);
1209 kprintf(" Logical: %d\n", cpu
->lnum
);
1210 kprintf(" Flags: ");
1215 if (!cpu
->master
&& !cpu
->primary
)
1219 cpu
= cpu
->next_in_core
;
1222 core
= core
->next_in_die
;
1225 die
= die
->next_in_pkg
;