2 * Copyright (c) 2003-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <vm/vm_kern.h>
29 #include <kern/zalloc.h>
30 #include <kern/lock_group.h>
31 #include <kern/timer_queue.h>
32 #include <mach/machine.h>
33 #include <i386/cpu_threads.h>
34 #include <i386/cpuid.h>
35 #include <i386/machine_cpu.h>
36 #include <i386/pmCPU.h>
37 #include <i386/bit_routines.h>
40 #include <kern/monotonic.h>
41 #endif /* MONOTONIC */
43 #define DIVISOR_GUARD(denom) \
45 kprintf("%s: %d Zero divisor: " #denom, \
46 __FILE__, __LINE__); \
49 static void debug_topology_print(void);
51 boolean_t topo_dbg
= FALSE
;
53 x86_pkg_t
*x86_pkgs
= NULL
;
54 uint32_t num_Lx_caches
[MAX_CACHE_DEPTH
] = { 0 };
56 static x86_pkg_t
*free_pkgs
= NULL
;
57 static x86_die_t
*free_dies
= NULL
;
58 static x86_core_t
*free_cores
= NULL
;
59 static uint32_t num_dies
= 0;
61 static x86_cpu_cache_t
*x86_caches
= NULL
;
62 static uint32_t num_caches
= 0;
64 static boolean_t topoParmsInited
= FALSE
;
65 x86_topology_parameters_t topoParms
;
67 decl_simple_lock_data(, x86_topo_lock
);
69 static struct cpu_cache
{
71 } cpu_caches
[LCACHE_MAX
] = {
72 [L1D
] = { 1, CPU_CACHE_TYPE_DATA
},
73 [L1I
] = { 1, CPU_CACHE_TYPE_INST
},
74 [L2U
] = { 2, CPU_CACHE_TYPE_UNIF
},
75 [L3U
] = { 3, CPU_CACHE_TYPE_UNIF
},
79 cpu_is_hyperthreaded(void)
81 i386_cpu_info_t
*cpuinfo
;
83 cpuinfo
= cpuid_info();
84 return cpuinfo
->thread_count
> cpuinfo
->core_count
;
87 static x86_cpu_cache_t
*
90 x86_cpu_cache_t
*cache
;
93 if (x86_caches
== NULL
) {
94 cache
= zalloc_permanent(sizeof(x86_cpu_cache_t
) +
95 (MAX_CPUS
* sizeof(x86_lcpu_t
*)), ZALIGN(x86_cpu_cache_t
));
101 x86_caches
= cache
->next
;
106 cache
->maxcpus
= MAX_CPUS
;
107 for (i
= 0; i
< cache
->maxcpus
; i
+= 1) {
108 cache
->cpus
[i
] = NULL
;
120 uint32_t nCPUsSharing
= 1;
121 i386_cpu_info_t
*cpuinfo
;
122 struct cpu_cache
*cachep
;
125 cpuinfo
= cpuid_info();
127 for (i
= 0, cachep
= &cpu_caches
[0]; i
< LCACHE_MAX
; i
++, cachep
++) {
128 if (cachep
->type
== 0 || cpuid_info()->cache_size
[i
] == 0) {
133 * Only worry about it if it's a deeper level than
134 * what we've seen before.
136 if (cachep
->level
> cache_level
) {
137 cache_level
= cachep
->level
;
140 * Save the number of CPUs sharing this cache.
142 nCPUsSharing
= cpuinfo
->cache_sharing
[i
];
147 * Make the level of the LLC be 0 based.
149 topoParms
.LLCDepth
= cache_level
- 1;
152 * nCPUsSharing represents the *maximum* number of cores or
153 * logical CPUs sharing the cache.
155 topoParms
.maxSharingLLC
= nCPUsSharing
;
157 topoParms
.nCoresSharingLLC
= nCPUsSharing
/ (cpuinfo
->thread_count
/
158 cpuinfo
->core_count
);
159 topoParms
.nLCPUsSharingLLC
= nCPUsSharing
;
162 * nCPUsSharing may not be the number of *active* cores or
163 * threads that are sharing the cache.
165 if (nCPUsSharing
> cpuinfo
->core_count
) {
166 topoParms
.nCoresSharingLLC
= cpuinfo
->core_count
;
168 if (nCPUsSharing
> cpuinfo
->thread_count
) {
169 topoParms
.nLCPUsSharingLLC
= cpuinfo
->thread_count
;
176 i386_cpu_info_t
*cpuinfo
;
178 topoParms
.stable
= FALSE
;
180 cpuinfo
= cpuid_info();
182 PE_parse_boot_argn("-topo", &topo_dbg
, sizeof(topo_dbg
));
185 * We need to start with getting the LLC information correct.
190 * Compute the number of threads (logical CPUs) per core.
192 DIVISOR_GUARD(cpuinfo
->core_count
);
193 topoParms
.nLThreadsPerCore
= cpuinfo
->thread_count
/ cpuinfo
->core_count
;
194 DIVISOR_GUARD(cpuinfo
->cpuid_cores_per_package
);
195 topoParms
.nPThreadsPerCore
= cpuinfo
->cpuid_logical_per_package
/ cpuinfo
->cpuid_cores_per_package
;
198 * Compute the number of dies per package.
200 DIVISOR_GUARD(topoParms
.nCoresSharingLLC
);
201 topoParms
.nLDiesPerPackage
= cpuinfo
->core_count
/ topoParms
.nCoresSharingLLC
;
202 DIVISOR_GUARD(topoParms
.nPThreadsPerCore
);
203 DIVISOR_GUARD(topoParms
.maxSharingLLC
/ topoParms
.nPThreadsPerCore
);
204 topoParms
.nPDiesPerPackage
= cpuinfo
->cpuid_cores_per_package
/ (topoParms
.maxSharingLLC
/ topoParms
.nPThreadsPerCore
);
208 * Compute the number of cores per die.
210 topoParms
.nLCoresPerDie
= topoParms
.nCoresSharingLLC
;
211 topoParms
.nPCoresPerDie
= (topoParms
.maxSharingLLC
/ topoParms
.nPThreadsPerCore
);
214 * Compute the number of threads per die.
216 topoParms
.nLThreadsPerDie
= topoParms
.nLThreadsPerCore
* topoParms
.nLCoresPerDie
;
217 topoParms
.nPThreadsPerDie
= topoParms
.nPThreadsPerCore
* topoParms
.nPCoresPerDie
;
220 * Compute the number of cores per package.
222 topoParms
.nLCoresPerPackage
= topoParms
.nLCoresPerDie
* topoParms
.nLDiesPerPackage
;
223 topoParms
.nPCoresPerPackage
= topoParms
.nPCoresPerDie
* topoParms
.nPDiesPerPackage
;
226 * Compute the number of threads per package.
228 topoParms
.nLThreadsPerPackage
= topoParms
.nLThreadsPerCore
* topoParms
.nLCoresPerPackage
;
229 topoParms
.nPThreadsPerPackage
= topoParms
.nPThreadsPerCore
* topoParms
.nPCoresPerPackage
;
231 TOPO_DBG("\nCache Topology Parameters:\n");
232 TOPO_DBG("\tLLC Depth: %d\n", topoParms
.LLCDepth
);
233 TOPO_DBG("\tCores Sharing LLC: %d\n", topoParms
.nCoresSharingLLC
);
234 TOPO_DBG("\tThreads Sharing LLC: %d\n", topoParms
.nLCPUsSharingLLC
);
235 TOPO_DBG("\tmax Sharing of LLC: %d\n", topoParms
.maxSharingLLC
);
237 TOPO_DBG("\nLogical Topology Parameters:\n");
238 TOPO_DBG("\tThreads per Core: %d\n", topoParms
.nLThreadsPerCore
);
239 TOPO_DBG("\tCores per Die: %d\n", topoParms
.nLCoresPerDie
);
240 TOPO_DBG("\tThreads per Die: %d\n", topoParms
.nLThreadsPerDie
);
241 TOPO_DBG("\tDies per Package: %d\n", topoParms
.nLDiesPerPackage
);
242 TOPO_DBG("\tCores per Package: %d\n", topoParms
.nLCoresPerPackage
);
243 TOPO_DBG("\tThreads per Package: %d\n", topoParms
.nLThreadsPerPackage
);
245 TOPO_DBG("\nPhysical Topology Parameters:\n");
246 TOPO_DBG("\tThreads per Core: %d\n", topoParms
.nPThreadsPerCore
);
247 TOPO_DBG("\tCores per Die: %d\n", topoParms
.nPCoresPerDie
);
248 TOPO_DBG("\tThreads per Die: %d\n", topoParms
.nPThreadsPerDie
);
249 TOPO_DBG("\tDies per Package: %d\n", topoParms
.nPDiesPerPackage
);
250 TOPO_DBG("\tCores per Package: %d\n", topoParms
.nPCoresPerPackage
);
251 TOPO_DBG("\tThreads per Package: %d\n", topoParms
.nPThreadsPerPackage
);
253 topoParmsInited
= TRUE
;
257 x86_cache_free(x86_cpu_cache_t
*cache
)
260 if (cache
->level
> 0 && cache
->level
<= MAX_CACHE_DEPTH
) {
261 num_Lx_caches
[cache
->level
- 1] -= 1;
263 cache
->next
= x86_caches
;
268 * This returns a list of cache structures that represent the
269 * caches for a CPU. Some of the structures may have to be
270 * "freed" if they are actually shared between CPUs.
272 static x86_cpu_cache_t
*
275 x86_cpu_cache_t
*root
= NULL
;
276 x86_cpu_cache_t
*cur
= NULL
;
277 x86_cpu_cache_t
*last
= NULL
;
278 struct cpu_cache
*cachep
;
282 * Cons up a list driven not by CPUID leaf 4 (deterministic cache params)
283 * but by the table above plus parameters already cracked from cpuid...
285 for (i
= 0, cachep
= &cpu_caches
[0]; i
< LCACHE_MAX
; i
++, cachep
++) {
286 if (cachep
->type
== 0 || cpuid_info()->cache_size
[i
] == 0) {
290 cur
= x86_cache_alloc();
295 cur
->type
= cachep
->type
;
296 cur
->level
= cachep
->level
;
298 cur
->maxcpus
= cpuid_info()->cache_sharing
[i
];
299 cur
->partitions
= cpuid_info()->cache_partitions
[i
];
300 cur
->cache_size
= cpuid_info()->cache_size
[i
];
301 cur
->line_size
= cpuid_info()->cache_linesize
;
310 num_Lx_caches
[cur
->level
- 1] += 1;
316 static x86_cpu_cache_t
*
317 x86_match_cache(x86_cpu_cache_t
*list
, x86_cpu_cache_t
*matcher
)
319 x86_cpu_cache_t
*cur_cache
;
322 while (cur_cache
!= NULL
) {
323 if (cur_cache
->maxcpus
== matcher
->maxcpus
324 && cur_cache
->type
== matcher
->type
325 && cur_cache
->level
== matcher
->level
326 && cur_cache
->partitions
== matcher
->partitions
327 && cur_cache
->line_size
== matcher
->line_size
328 && cur_cache
->cache_size
== matcher
->cache_size
) {
332 cur_cache
= cur_cache
->next
;
339 x86_lcpu_init(int cpu
)
345 cpup
= cpu_datap(cpu
);
350 lcpu
->next_in_core
= NULL
;
351 lcpu
->next_in_die
= NULL
;
352 lcpu
->next_in_pkg
= NULL
;
355 lcpu
->package
= NULL
;
358 lcpu
->pnum
= cpup
->cpu_phys_number
;
359 lcpu
->state
= LCPU_OFF
;
360 for (i
= 0; i
< MAX_CACHE_DEPTH
; i
+= 1) {
361 lcpu
->caches
[i
] = NULL
;
366 x86_core_alloc(int cpu
)
371 cpup
= cpu_datap(cpu
);
373 mp_safe_spin_lock(&x86_topo_lock
);
374 if (free_cores
!= NULL
) {
376 free_cores
= core
->next_in_die
;
377 core
->next_in_die
= NULL
;
378 simple_unlock(&x86_topo_lock
);
380 simple_unlock(&x86_topo_lock
);
381 core
= zalloc_permanent_type(x86_core_t
);
383 panic("x86_core_alloc() alloc of x86_core_t failed!\n");
387 core
->pcore_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerCore
;
388 core
->lcore_num
= core
->pcore_num
% topoParms
.nPCoresPerPackage
;
390 core
->flags
= X86CORE_FL_PRESENT
| X86CORE_FL_READY
391 | X86CORE_FL_HALTED
| X86CORE_FL_IDLE
;
397 x86_core_free(x86_core_t
*core
)
399 mp_safe_spin_lock(&x86_topo_lock
);
400 core
->next_in_die
= free_cores
;
402 simple_unlock(&x86_topo_lock
);
406 x86_package_find(int cpu
)
412 cpup
= cpu_datap(cpu
);
414 pkg_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerPackage
;
417 while (pkg
!= NULL
) {
418 if (pkg
->ppkg_num
== pkg_num
) {
428 x86_die_find(int cpu
)
435 cpup
= cpu_datap(cpu
);
437 die_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerDie
;
439 pkg
= x86_package_find(cpu
);
445 while (die
!= NULL
) {
446 if (die
->pdie_num
== die_num
) {
449 die
= die
->next_in_pkg
;
456 x86_core_find(int cpu
)
463 cpup
= cpu_datap(cpu
);
465 core_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerCore
;
467 die
= x86_die_find(cpu
);
473 while (core
!= NULL
) {
474 if (core
->pcore_num
== core_num
) {
477 core
= core
->next_in_die
;
484 x86_set_logical_topology(x86_lcpu_t
*lcpu
, int pnum
, int lnum
)
486 x86_core_t
*core
= lcpu
->core
;
487 x86_die_t
*die
= lcpu
->die
;
488 x86_pkg_t
*pkg
= lcpu
->package
;
490 assert(core
!= NULL
);
494 lcpu
->cpu_num
= lnum
;
496 lcpu
->master
= (lnum
== master_cpu
);
497 lcpu
->primary
= (lnum
% topoParms
.nLThreadsPerPackage
) == 0;
499 lcpu
->lnum
= lnum
% topoParms
.nLThreadsPerCore
;
501 core
->pcore_num
= lnum
/ topoParms
.nLThreadsPerCore
;
502 core
->lcore_num
= core
->pcore_num
% topoParms
.nLCoresPerDie
;
504 die
->pdie_num
= lnum
/ (topoParms
.nLThreadsPerCore
* topoParms
.nLCoresPerDie
);
505 die
->ldie_num
= die
->pdie_num
% topoParms
.nLDiesPerPackage
;
507 pkg
->ppkg_num
= lnum
/ topoParms
.nLThreadsPerPackage
;
508 pkg
->lpkg_num
= pkg
->ppkg_num
;
512 x86_die_alloc(int cpu
)
517 cpup
= cpu_datap(cpu
);
519 mp_safe_spin_lock(&x86_topo_lock
);
520 if (free_dies
!= NULL
) {
522 free_dies
= die
->next_in_pkg
;
523 die
->next_in_pkg
= NULL
;
524 simple_unlock(&x86_topo_lock
);
526 simple_unlock(&x86_topo_lock
);
527 die
= zalloc_permanent_type(x86_die_t
);
529 panic("x86_die_alloc() alloc of x86_die_t failed!\n");
533 die
->pdie_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerDie
;
535 die
->ldie_num
= num_dies
;
536 atomic_incl((long *) &num_dies
, 1);
538 die
->flags
= X86DIE_FL_PRESENT
;
543 x86_die_free(x86_die_t
*die
)
545 mp_safe_spin_lock(&x86_topo_lock
);
546 die
->next_in_pkg
= free_dies
;
548 atomic_decl((long *) &num_dies
, 1);
549 simple_unlock(&x86_topo_lock
);
553 x86_package_alloc(int cpu
)
558 cpup
= cpu_datap(cpu
);
560 mp_safe_spin_lock(&x86_topo_lock
);
561 if (free_pkgs
!= NULL
) {
563 free_pkgs
= pkg
->next
;
565 simple_unlock(&x86_topo_lock
);
567 simple_unlock(&x86_topo_lock
);
568 pkg
= zalloc_permanent_type(x86_pkg_t
);
570 panic("x86_package_alloc() alloc of x86_pkg_t failed!\n");
574 pkg
->ppkg_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerPackage
;
576 pkg
->lpkg_num
= topoParms
.nPackages
;
577 atomic_incl((long *) &topoParms
.nPackages
, 1);
579 pkg
->flags
= X86PKG_FL_PRESENT
| X86PKG_FL_READY
;
584 x86_package_free(x86_pkg_t
*pkg
)
586 mp_safe_spin_lock(&x86_topo_lock
);
587 pkg
->next
= free_pkgs
;
589 atomic_decl((long *) &topoParms
.nPackages
, 1);
590 simple_unlock(&x86_topo_lock
);
594 x86_cache_add_lcpu(x86_cpu_cache_t
*cache
, x86_lcpu_t
*lcpu
)
596 x86_cpu_cache_t
*cur_cache
;
600 * Put the new CPU into the list of the cache.
602 cur_cache
= lcpu
->caches
[cache
->level
- 1];
603 lcpu
->caches
[cache
->level
- 1] = cache
;
604 cache
->next
= cur_cache
;
606 for (i
= 0; i
< cache
->nlcpus
; i
+= 1) {
607 if (cache
->cpus
[i
] == NULL
) {
608 cache
->cpus
[i
] = lcpu
;
615 x86_lcpu_add_caches(x86_lcpu_t
*lcpu
)
617 x86_cpu_cache_t
*list
;
618 x86_cpu_cache_t
*cur
;
619 x86_cpu_cache_t
*match
;
622 x86_lcpu_t
*cur_lcpu
;
624 boolean_t found
= FALSE
;
626 assert(lcpu
!= NULL
);
629 * Add the cache data to the topology.
631 list
= x86_cache_list();
633 mp_safe_spin_lock(&x86_topo_lock
);
635 while (list
!= NULL
) {
637 * Remove the cache from the front of the list.
642 level
= cur
->level
- 1;
645 * If the cache isn't shared then just put it where it
648 if (cur
->maxcpus
== 1) {
649 x86_cache_add_lcpu(cur
, lcpu
);
654 * We'll assume that all of the caches at a particular level
655 * have the same sharing. So if we have a cache already at
656 * this level, we'll just skip looking for the match.
658 if (lcpu
->caches
[level
] != NULL
) {
664 * This is a shared cache, so we have to figure out if
665 * this is the first time we've seen this cache. We do
666 * this by searching through the topology and seeing if
667 * this cache is already described.
669 * Assume that L{LLC-1} are all at the core level and that
670 * LLC is shared at the die level.
672 if (level
< topoParms
.LLCDepth
) {
674 * Shared at the core.
677 cur_lcpu
= core
->lcpus
;
678 while (cur_lcpu
!= NULL
) {
682 if (cur_lcpu
== lcpu
) {
683 cur_lcpu
= cur_lcpu
->next_in_core
;
688 * If there's a cache on this logical CPU,
691 match
= x86_match_cache(cur_lcpu
->caches
[level
], cur
);
694 x86_cache_add_lcpu(match
, lcpu
);
699 cur_lcpu
= cur_lcpu
->next_in_core
;
706 cur_lcpu
= die
->lcpus
;
707 while (cur_lcpu
!= NULL
) {
711 if (cur_lcpu
== lcpu
) {
712 cur_lcpu
= cur_lcpu
->next_in_die
;
717 * If there's a cache on this logical CPU,
720 match
= x86_match_cache(cur_lcpu
->caches
[level
], cur
);
723 x86_cache_add_lcpu(match
, lcpu
);
728 cur_lcpu
= cur_lcpu
->next_in_die
;
733 * If a shared cache wasn't found, then this logical CPU must
734 * be the first one encountered.
737 x86_cache_add_lcpu(cur
, lcpu
);
741 simple_unlock(&x86_topo_lock
);
745 x86_core_add_lcpu(x86_core_t
*core
, x86_lcpu_t
*lcpu
)
747 assert(core
!= NULL
);
748 assert(lcpu
!= NULL
);
750 mp_safe_spin_lock(&x86_topo_lock
);
752 lcpu
->next_in_core
= core
->lcpus
;
755 core
->num_lcpus
+= 1;
756 simple_unlock(&x86_topo_lock
);
760 x86_die_add_lcpu(x86_die_t
*die
, x86_lcpu_t
*lcpu
)
763 assert(lcpu
!= NULL
);
765 lcpu
->next_in_die
= die
->lcpus
;
771 x86_die_add_core(x86_die_t
*die
, x86_core_t
*core
)
774 assert(core
!= NULL
);
776 core
->next_in_die
= die
->cores
;
783 x86_package_add_lcpu(x86_pkg_t
*pkg
, x86_lcpu_t
*lcpu
)
786 assert(lcpu
!= NULL
);
788 lcpu
->next_in_pkg
= pkg
->lcpus
;
794 x86_package_add_core(x86_pkg_t
*pkg
, x86_core_t
*core
)
797 assert(core
!= NULL
);
799 core
->next_in_pkg
= pkg
->cores
;
805 x86_package_add_die(x86_pkg_t
*pkg
, x86_die_t
*die
)
810 die
->next_in_pkg
= pkg
->dies
;
817 cpu_thread_alloc(int cpu
)
819 x86_core_t
*core
= NULL
;
820 x86_die_t
*die
= NULL
;
821 x86_pkg_t
*pkg
= NULL
;
826 * Only allow one to manipulate the topology at a time.
828 mp_safe_spin_lock(&x86_topo_lock
);
831 * Make sure all of the topology parameters have been initialized.
833 if (!topoParmsInited
) {
837 cpup
= cpu_datap(cpu
);
839 phys_cpu
= cpup
->cpu_phys_number
;
844 * Assume that all cpus have the same features.
846 if (cpu_is_hyperthreaded()) {
847 cpup
->cpu_threadtype
= CPU_THREADTYPE_INTEL_HTT
;
849 cpup
->cpu_threadtype
= CPU_THREADTYPE_NONE
;
853 * Get the package that the logical CPU is in.
856 pkg
= x86_package_find(cpu
);
859 * Package structure hasn't been created yet, do it now.
861 simple_unlock(&x86_topo_lock
);
862 pkg
= x86_package_alloc(cpu
);
863 mp_safe_spin_lock(&x86_topo_lock
);
864 if (x86_package_find(cpu
) != NULL
) {
865 x86_package_free(pkg
);
870 * Add the new package to the global list of packages.
872 pkg
->next
= x86_pkgs
;
875 } while (pkg
== NULL
);
878 * Get the die that the logical CPU is in.
881 die
= x86_die_find(cpu
);
884 * Die structure hasn't been created yet, do it now.
886 simple_unlock(&x86_topo_lock
);
887 die
= x86_die_alloc(cpu
);
888 mp_safe_spin_lock(&x86_topo_lock
);
889 if (x86_die_find(cpu
) != NULL
) {
895 * Add the die to the package.
897 x86_package_add_die(pkg
, die
);
899 } while (die
== NULL
);
902 * Get the core for this logical CPU.
905 core
= x86_core_find(cpu
);
908 * Allocate the core structure now.
910 simple_unlock(&x86_topo_lock
);
911 core
= x86_core_alloc(cpu
);
912 mp_safe_spin_lock(&x86_topo_lock
);
913 if (x86_core_find(cpu
) != NULL
) {
919 * Add the core to the die & package.
921 x86_die_add_core(die
, core
);
922 x86_package_add_core(pkg
, core
);
923 machine_info
.physical_cpu_max
+= 1;
925 } while (core
== NULL
);
929 * Done manipulating the topology, so others can get in.
931 machine_info
.logical_cpu_max
+= 1;
932 simple_unlock(&x86_topo_lock
);
935 * Add the logical CPU to the other topology structures.
937 x86_core_add_lcpu(core
, &cpup
->lcpu
);
938 x86_die_add_lcpu(core
->die
, &cpup
->lcpu
);
939 x86_package_add_lcpu(core
->package
, &cpup
->lcpu
);
940 x86_lcpu_add_caches(&cpup
->lcpu
);
942 return (void *) core
;
946 cpu_thread_init(void)
948 int my_cpu
= get_cpu_number();
949 cpu_data_t
*cpup
= current_cpu_datap();
951 static int initialized
= 0;
954 * If we're the boot processor, we do all of the initialization of
955 * the CPU topology infrastructure.
957 if (my_cpu
== master_cpu
&& !initialized
) {
958 simple_lock_init(&x86_topo_lock
, 0);
961 * Put this logical CPU into the physical CPU topology.
963 cpup
->lcpu
.core
= cpu_thread_alloc(my_cpu
);
969 * Do the CPU accounting.
971 core
= cpup
->lcpu
.core
;
972 mp_safe_spin_lock(&x86_topo_lock
);
973 machine_info
.logical_cpu
+= 1;
974 if (core
->active_lcpus
== 0) {
975 machine_info
.physical_cpu
+= 1;
977 core
->active_lcpus
+= 1;
978 simple_unlock(&x86_topo_lock
);
980 pmCPUMarkRunning(cpup
);
981 timer_resync_deadlines();
985 * Called for a cpu to halt permanently
986 * (as opposed to halting and expecting an interrupt to awaken it).
988 __attribute__((noreturn
))
990 cpu_thread_halt(void)
993 cpu_data_t
*cpup
= current_cpu_datap();
995 mp_safe_spin_lock(&x86_topo_lock
);
996 machine_info
.logical_cpu
-= 1;
997 core
= cpup
->lcpu
.core
;
998 core
->active_lcpus
-= 1;
999 if (core
->active_lcpus
== 0) {
1000 machine_info
.physical_cpu
-= 1;
1002 simple_unlock(&x86_topo_lock
);
1005 * Let the power management code determine the best way to "stop"
1008 ml_set_interrupts_enabled(FALSE
);
1010 pmCPUHalt(PM_HALT_NORMAL
);
1016 * Validates that the topology was built correctly. Must be called only
1017 * after the complete topology is built and no other changes are being made.
1020 x86_validate_topology(void)
1031 debug_topology_print();
1035 * Called after processors are registered but before non-boot processors
1037 * - real_ncpus: number of registered processors driven from MADT
1038 * - max_ncpus: max number of processors that will be started
1040 nCPUs
= topoParms
.nPackages
* topoParms
.nLThreadsPerPackage
;
1041 if (nCPUs
!= real_ncpus
) {
1042 panic("x86_validate_topology() %d threads but %d registered from MADT",
1047 while (pkg
!= NULL
) {
1049 * Make sure that the package has the correct number of dies.
1053 while (die
!= NULL
) {
1054 if (die
->package
== NULL
) {
1055 panic("Die(%d)->package is NULL",
1058 if (die
->package
!= pkg
) {
1059 panic("Die %d points to package %d, should be %d",
1060 die
->pdie_num
, die
->package
->lpkg_num
, pkg
->lpkg_num
);
1063 TOPO_DBG("Die(%d)->package %d\n",
1064 die
->pdie_num
, pkg
->lpkg_num
);
1067 * Make sure that the die has the correct number of cores.
1069 TOPO_DBG("Die(%d)->cores: ", die
->pdie_num
);
1072 while (core
!= NULL
) {
1073 if (core
->die
== NULL
) {
1074 panic("Core(%d)->die is NULL",
1077 if (core
->die
!= die
) {
1078 panic("Core %d points to die %d, should be %d",
1079 core
->pcore_num
, core
->die
->pdie_num
, die
->pdie_num
);
1082 TOPO_DBG("%d ", core
->pcore_num
);
1083 core
= core
->next_in_die
;
1087 if (nCores
!= topoParms
.nLCoresPerDie
) {
1088 panic("Should have %d Cores, but only found %d for Die %d",
1089 topoParms
.nLCoresPerDie
, nCores
, die
->pdie_num
);
1093 * Make sure that the die has the correct number of CPUs.
1095 TOPO_DBG("Die(%d)->lcpus: ", die
->pdie_num
);
1098 while (lcpu
!= NULL
) {
1099 if (lcpu
->die
== NULL
) {
1100 panic("CPU(%d)->die is NULL",
1103 if (lcpu
->die
!= die
) {
1104 panic("CPU %d points to die %d, should be %d",
1105 lcpu
->cpu_num
, lcpu
->die
->pdie_num
, die
->pdie_num
);
1108 TOPO_DBG("%d ", lcpu
->cpu_num
);
1109 lcpu
= lcpu
->next_in_die
;
1113 if (nCPUs
!= topoParms
.nLThreadsPerDie
) {
1114 panic("Should have %d Threads, but only found %d for Die %d",
1115 topoParms
.nLThreadsPerDie
, nCPUs
, die
->pdie_num
);
1119 die
= die
->next_in_pkg
;
1122 if (nDies
!= topoParms
.nLDiesPerPackage
) {
1123 panic("Should have %d Dies, but only found %d for package %d",
1124 topoParms
.nLDiesPerPackage
, nDies
, pkg
->lpkg_num
);
1128 * Make sure that the package has the correct number of cores.
1132 while (core
!= NULL
) {
1133 if (core
->package
== NULL
) {
1134 panic("Core(%d)->package is NULL",
1137 if (core
->package
!= pkg
) {
1138 panic("Core %d points to package %d, should be %d",
1139 core
->pcore_num
, core
->package
->lpkg_num
, pkg
->lpkg_num
);
1141 TOPO_DBG("Core(%d)->package %d\n",
1142 core
->pcore_num
, pkg
->lpkg_num
);
1145 * Make sure that the core has the correct number of CPUs.
1149 TOPO_DBG("Core(%d)->lcpus: ", core
->pcore_num
);
1150 while (lcpu
!= NULL
) {
1151 if (lcpu
->core
== NULL
) {
1152 panic("CPU(%d)->core is NULL",
1155 if (lcpu
->core
!= core
) {
1156 panic("CPU %d points to core %d, should be %d",
1157 lcpu
->cpu_num
, lcpu
->core
->pcore_num
, core
->pcore_num
);
1159 TOPO_DBG("%d ", lcpu
->cpu_num
);
1161 lcpu
= lcpu
->next_in_core
;
1165 if (nCPUs
!= topoParms
.nLThreadsPerCore
) {
1166 panic("Should have %d Threads, but only found %d for Core %d",
1167 topoParms
.nLThreadsPerCore
, nCPUs
, core
->pcore_num
);
1170 core
= core
->next_in_pkg
;
1173 if (nCores
!= topoParms
.nLCoresPerPackage
) {
1174 panic("Should have %d Cores, but only found %d for package %d",
1175 topoParms
.nLCoresPerPackage
, nCores
, pkg
->lpkg_num
);
1179 * Make sure that the package has the correct number of CPUs.
1183 while (lcpu
!= NULL
) {
1184 if (lcpu
->package
== NULL
) {
1185 panic("CPU(%d)->package is NULL",
1188 if (lcpu
->package
!= pkg
) {
1189 panic("CPU %d points to package %d, should be %d",
1190 lcpu
->cpu_num
, lcpu
->package
->lpkg_num
, pkg
->lpkg_num
);
1192 TOPO_DBG("CPU(%d)->package %d\n",
1193 lcpu
->cpu_num
, pkg
->lpkg_num
);
1195 lcpu
= lcpu
->next_in_pkg
;
1198 if (nCPUs
!= topoParms
.nLThreadsPerPackage
) {
1199 panic("Should have %d Threads, but only found %d for package %d",
1200 topoParms
.nLThreadsPerPackage
, nCPUs
, pkg
->lpkg_num
);
1208 * Prints out the topology
1211 debug_topology_print(void)
1219 while (pkg
!= NULL
) {
1220 kprintf("Package:\n");
1221 kprintf(" Physical: %d\n", pkg
->ppkg_num
);
1222 kprintf(" Logical: %d\n", pkg
->lpkg_num
);
1225 while (die
!= NULL
) {
1227 kprintf(" Physical: %d\n", die
->pdie_num
);
1228 kprintf(" Logical: %d\n", die
->ldie_num
);
1231 while (core
!= NULL
) {
1232 kprintf(" Core:\n");
1233 kprintf(" Physical: %d\n", core
->pcore_num
);
1234 kprintf(" Logical: %d\n", core
->lcore_num
);
1237 while (cpu
!= NULL
) {
1238 kprintf(" LCPU:\n");
1239 kprintf(" CPU #: %d\n", cpu
->cpu_num
);
1240 kprintf(" Physical: %d\n", cpu
->pnum
);
1241 kprintf(" Logical: %d\n", cpu
->lnum
);
1242 kprintf(" Flags: ");
1249 if (!cpu
->master
&& !cpu
->primary
) {
1254 cpu
= cpu
->next_in_core
;
1257 core
= core
->next_in_die
;
1260 die
= die
->next_in_pkg
;