2 * Copyright (c) 2003-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <vm/vm_kern.h>
29 #include <kern/kalloc.h>
30 #include <mach/machine.h>
31 #include <i386/cpu_threads.h>
32 #include <i386/cpuid.h>
33 #include <i386/machine_cpu.h>
34 #include <i386/lock.h>
35 #include <i386/perfmon.h>
36 #include <i386/pmCPU.h>
38 //#define TOPO_DEBUG 1
40 void debug_topology_print(void);
41 #define DBG(x...) kprintf("DBG: " x)
44 #endif /* TOPO_DEBUG */
46 #define bitmask(h,l) ((bit(h)|(bit(h)-1)) & ~(bit(l)-1))
47 #define bitfield(x,h,l) (((x) & bitmask(h,l)) >> l)
49 x86_pkg_t
*x86_pkgs
= NULL
;
50 uint32_t num_Lx_caches
[MAX_CACHE_DEPTH
] = { 0 };
52 static x86_pkg_t
*free_pkgs
= NULL
;
53 static x86_die_t
*free_dies
= NULL
;
54 static x86_core_t
*free_cores
= NULL
;
55 static uint32_t num_dies
= 0;
57 static x86_cpu_cache_t
*x86_caches
= NULL
;
58 static uint32_t num_caches
= 0;
60 static boolean_t topoParmsInited
= FALSE
;
61 x86_topology_parameters_t topoParms
;
63 decl_simple_lock_data(, x86_topo_lock
);
66 cpu_is_hyperthreaded(void)
68 i386_cpu_info_t
*cpuinfo
;
70 cpuinfo
= cpuid_info();
71 return(cpuinfo
->thread_count
> cpuinfo
->core_count
);
74 static x86_cpu_cache_t
*
77 x86_cpu_cache_t
*cache
;
80 if (x86_caches
== NULL
) {
81 cache
= kalloc(sizeof(x86_cpu_cache_t
) + (MAX_CPUS
* sizeof(x86_lcpu_t
*)));
86 x86_caches
= cache
->next
;
90 bzero(cache
, sizeof(x86_cpu_cache_t
));
92 cache
->maxcpus
= MAX_CPUS
;
93 for (i
= 0; i
< cache
->maxcpus
; i
+= 1) {
94 cache
->cpus
[i
] = NULL
;
106 uint32_t cache_info
[4];
107 uint32_t cache_level
= 0;
108 uint32_t nCPUsSharing
= 1;
109 i386_cpu_info_t
*cpuinfo
;
111 cpuinfo
= cpuid_info();
113 do_cpuid(0, cache_info
);
115 if (cache_info
[eax
] < 4) {
117 * Processor does not support deterministic
118 * cache information. Set LLC sharing to 1, since
119 * we have no better information.
121 if (cpu_is_hyperthreaded()) {
122 topoParms
.nCoresSharingLLC
= 1;
123 topoParms
.nLCPUsSharingLLC
= 2;
124 topoParms
.maxSharingLLC
= 2;
126 topoParms
.nCoresSharingLLC
= 1;
127 topoParms
.nLCPUsSharingLLC
= 1;
128 topoParms
.maxSharingLLC
= 1;
133 for (index
= 0; ; index
+= 1) {
137 cache_info
[ecx
] = index
;
144 * See if all levels have been queried.
146 if (bitfield(cache_info
[eax
], 4, 0) == 0)
150 * Get the current level.
152 this_level
= bitfield(cache_info
[eax
], 7, 5);
155 * Only worry about it if it's a deeper level than
156 * what we've seen before.
158 if (this_level
> cache_level
) {
159 cache_level
= this_level
;
162 * Save the number of CPUs sharing this cache.
164 nCPUsSharing
= bitfield(cache_info
[eax
], 25, 14) + 1;
169 * Make the level of the LLC be 0 based.
171 topoParms
.LLCDepth
= cache_level
- 1;
174 * nCPUsSharing represents the *maximum* number of cores or
175 * logical CPUs sharing the cache.
177 topoParms
.maxSharingLLC
= nCPUsSharing
;
179 topoParms
.nCoresSharingLLC
= nCPUsSharing
;
180 topoParms
.nLCPUsSharingLLC
= nCPUsSharing
;
183 * nCPUsSharing may not be the number of *active* cores or
184 * threads that are sharing the cache.
186 if (nCPUsSharing
> cpuinfo
->core_count
)
187 topoParms
.nCoresSharingLLC
= cpuinfo
->core_count
;
188 if (nCPUsSharing
> cpuinfo
->thread_count
)
189 topoParms
.nLCPUsSharingLLC
= cpuinfo
->thread_count
;
192 if (nCPUsSharing
> cpuinfo
->thread_count
)
193 topoParms
.maxSharingLLC
= cpuinfo
->thread_count
;
199 i386_cpu_info_t
*cpuinfo
;
201 cpuinfo
= cpuid_info();
204 * We need to start with getting the LLC information correct.
209 * Compute the number of threads (logical CPUs) per core.
211 topoParms
.nLThreadsPerCore
= cpuinfo
->thread_count
/ cpuinfo
->core_count
;
212 topoParms
.nPThreadsPerCore
= cpuinfo
->cpuid_logical_per_package
/ cpuinfo
->cpuid_cores_per_package
;
215 * Compute the number of dies per package.
217 topoParms
.nLDiesPerPackage
= cpuinfo
->core_count
/ topoParms
.nCoresSharingLLC
;
218 topoParms
.nPDiesPerPackage
= cpuinfo
->cpuid_cores_per_package
/ (topoParms
.maxSharingLLC
/ topoParms
.nPThreadsPerCore
);
221 * Compute the number of cores per die.
223 topoParms
.nLCoresPerDie
= topoParms
.nCoresSharingLLC
;
224 topoParms
.nPCoresPerDie
= (topoParms
.maxSharingLLC
/ topoParms
.nPThreadsPerCore
);
227 * Compute the number of threads per die.
229 topoParms
.nLThreadsPerDie
= topoParms
.nLThreadsPerCore
* topoParms
.nLCoresPerDie
;
230 topoParms
.nPThreadsPerDie
= topoParms
.nPThreadsPerCore
* topoParms
.nPCoresPerDie
;
233 * Compute the number of cores per package.
235 topoParms
.nLCoresPerPackage
= topoParms
.nLCoresPerDie
* topoParms
.nLDiesPerPackage
;
236 topoParms
.nPCoresPerPackage
= topoParms
.nPCoresPerDie
* topoParms
.nPDiesPerPackage
;
239 * Compute the number of threads per package.
241 topoParms
.nLThreadsPerPackage
= topoParms
.nLThreadsPerCore
* topoParms
.nLCoresPerPackage
;
242 topoParms
.nPThreadsPerPackage
= topoParms
.nPThreadsPerCore
* topoParms
.nPCoresPerPackage
;
244 DBG("\nLogical Topology Parameters:\n");
245 DBG("\tThreads per Core: %d\n", topoParms
.nLThreadsPerCore
);
246 DBG("\tCores per Die: %d\n", topoParms
.nLCoresPerDie
);
247 DBG("\tThreads per Die: %d\n", topoParms
.nLThreadsPerDie
);
248 DBG("\tDies per Package: %d\n", topoParms
.nLDiesPerPackage
);
249 DBG("\tCores per Package: %d\n", topoParms
.nLCoresPerPackage
);
250 DBG("\tThreads per Package: %d\n", topoParms
.nLThreadsPerPackage
);
252 DBG("\nPhysical Topology Parameters:\n");
253 DBG("\tThreads per Core: %d\n", topoParms
.nPThreadsPerCore
);
254 DBG("\tCores per Die: %d\n", topoParms
.nPCoresPerDie
);
255 DBG("\tThreads per Die: %d\n", topoParms
.nPThreadsPerDie
);
256 DBG("\tDies per Package: %d\n", topoParms
.nPDiesPerPackage
);
257 DBG("\tCores per Package: %d\n", topoParms
.nPCoresPerPackage
);
258 DBG("\tThreads per Package: %d\n", topoParms
.nPThreadsPerPackage
);
260 topoParmsInited
= TRUE
;
264 x86_cache_free(x86_cpu_cache_t
*cache
)
267 if (cache
->level
> 0 && cache
->level
<= MAX_CACHE_DEPTH
)
268 num_Lx_caches
[cache
->level
- 1] -= 1;
269 cache
->next
= x86_caches
;
274 * This returns a list of cache structures that represent the
275 * caches for a CPU. Some of the structures may have to be
276 * "freed" if they are actually shared between CPUs.
278 static x86_cpu_cache_t
*
281 x86_cpu_cache_t
*root
= NULL
;
282 x86_cpu_cache_t
*cur
= NULL
;
283 x86_cpu_cache_t
*last
= NULL
;
285 uint32_t cache_info
[4];
288 do_cpuid(0, cache_info
);
290 if (cache_info
[eax
] < 4) {
292 * Processor does not support deterministic
293 * cache information. Don't report anything
298 for (index
= 0; ; index
+= 1) {
300 cache_info
[ecx
] = index
;
307 * See if all levels have been queried.
309 if (bitfield(cache_info
[eax
], 4, 0) == 0)
312 cur
= x86_cache_alloc();
317 cur
->type
= bitfield(cache_info
[eax
], 4, 0);
318 cur
->level
= bitfield(cache_info
[eax
], 7, 5);
319 cur
->maxcpus
= (bitfield(cache_info
[eax
], 25, 14) + 1);
320 cur
->line_size
= bitfield(cache_info
[ebx
], 11, 0) + 1;
321 cur
->partitions
= bitfield(cache_info
[ebx
], 21, 12) + 1;
322 cur
->ways
= bitfield(cache_info
[ebx
], 31, 22) + 1;
323 nsets
= bitfield(cache_info
[ecx
], 31, 0) + 1;
324 cur
->cache_size
= cur
->line_size
* cur
->ways
* cur
->partitions
* nsets
;
335 num_Lx_caches
[cur
->level
- 1] += 1;
341 static x86_cpu_cache_t
*
342 x86_match_cache(x86_cpu_cache_t
*list
, x86_cpu_cache_t
*matcher
)
344 x86_cpu_cache_t
*cur_cache
;
347 while (cur_cache
!= NULL
) {
348 if (cur_cache
->maxcpus
== matcher
->maxcpus
349 && cur_cache
->type
== matcher
->type
350 && cur_cache
->level
== matcher
->level
351 && cur_cache
->ways
== matcher
->ways
352 && cur_cache
->partitions
== matcher
->partitions
353 && cur_cache
->line_size
== matcher
->line_size
354 && cur_cache
->cache_size
== matcher
->cache_size
)
357 cur_cache
= cur_cache
->next
;
364 x86_lcpu_init(int cpu
)
370 cpup
= cpu_datap(cpu
);
375 lcpu
->next_in_core
= NULL
;
376 lcpu
->next_in_die
= NULL
;
377 lcpu
->next_in_pkg
= NULL
;
380 lcpu
->package
= NULL
;
383 lcpu
->pnum
= cpup
->cpu_phys_number
;
384 lcpu
->state
= LCPU_OFF
;
385 for (i
= 0; i
< MAX_CACHE_DEPTH
; i
+= 1)
386 lcpu
->caches
[i
] = NULL
;
388 lcpu
->master
= (lcpu
->cpu_num
== (unsigned int) master_cpu
);
389 lcpu
->primary
= (lcpu
->pnum
% topoParms
.nPThreadsPerPackage
) == 0;
393 x86_core_alloc(int cpu
)
398 cpup
= cpu_datap(cpu
);
400 simple_lock(&x86_topo_lock
);
401 if (free_cores
!= NULL
) {
403 free_cores
= core
->next_in_die
;
404 core
->next_in_die
= NULL
;
405 simple_unlock(&x86_topo_lock
);
407 simple_unlock(&x86_topo_lock
);
408 core
= kalloc(sizeof(x86_core_t
));
410 panic("x86_core_alloc() kalloc of x86_core_t failed!\n");
413 bzero((void *) core
, sizeof(x86_core_t
));
415 core
->pcore_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerCore
;
416 core
->lcore_num
= core
->pcore_num
% topoParms
.nPCoresPerPackage
;
418 core
->flags
= X86CORE_FL_PRESENT
| X86CORE_FL_READY
419 | X86CORE_FL_HALTED
| X86CORE_FL_IDLE
;
425 x86_core_free(x86_core_t
*core
)
427 simple_lock(&x86_topo_lock
);
428 core
->next_in_die
= free_cores
;
430 simple_unlock(&x86_topo_lock
);
434 x86_package_find(int cpu
)
440 cpup
= cpu_datap(cpu
);
442 pkg_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerPackage
;
445 while (pkg
!= NULL
) {
446 if (pkg
->ppkg_num
== pkg_num
)
455 x86_die_find(int cpu
)
462 cpup
= cpu_datap(cpu
);
464 die_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerDie
;
466 pkg
= x86_package_find(cpu
);
471 while (die
!= NULL
) {
472 if (die
->pdie_num
== die_num
)
474 die
= die
->next_in_pkg
;
481 x86_core_find(int cpu
)
488 cpup
= cpu_datap(cpu
);
490 core_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerCore
;
492 die
= x86_die_find(cpu
);
497 while (core
!= NULL
) {
498 if (core
->pcore_num
== core_num
)
500 core
= core
->next_in_die
;
507 x86_set_lcpu_numbers(x86_lcpu_t
*lcpu
)
509 lcpu
->lnum
= lcpu
->cpu_num
% topoParms
.nLThreadsPerCore
;
513 x86_set_core_numbers(x86_core_t
*core
, x86_lcpu_t
*lcpu
)
515 core
->pcore_num
= lcpu
->cpu_num
/ topoParms
.nLThreadsPerCore
;
516 core
->lcore_num
= core
->pcore_num
% topoParms
.nLCoresPerDie
;
520 x86_set_die_numbers(x86_die_t
*die
, x86_lcpu_t
*lcpu
)
522 die
->pdie_num
= lcpu
->cpu_num
/ (topoParms
.nLThreadsPerCore
* topoParms
.nLCoresPerDie
);
523 die
->ldie_num
= die
->pdie_num
% topoParms
.nLDiesPerPackage
;
527 x86_set_pkg_numbers(x86_pkg_t
*pkg
, x86_lcpu_t
*lcpu
)
529 pkg
->ppkg_num
= lcpu
->cpu_num
/ topoParms
.nLThreadsPerPackage
;
530 pkg
->lpkg_num
= pkg
->ppkg_num
;
534 x86_die_alloc(int cpu
)
539 cpup
= cpu_datap(cpu
);
541 simple_lock(&x86_topo_lock
);
542 if (free_dies
!= NULL
) {
544 free_dies
= die
->next_in_pkg
;
545 die
->next_in_pkg
= NULL
;
546 simple_unlock(&x86_topo_lock
);
548 simple_unlock(&x86_topo_lock
);
549 die
= kalloc(sizeof(x86_die_t
));
551 panic("x86_die_alloc() kalloc of x86_die_t failed!\n");
554 bzero((void *) die
, sizeof(x86_die_t
));
556 die
->pdie_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerDie
;
558 die
->ldie_num
= num_dies
;
559 atomic_incl((long *) &num_dies
, 1);
561 die
->flags
= X86DIE_FL_PRESENT
;
566 x86_die_free(x86_die_t
*die
)
568 simple_lock(&x86_topo_lock
);
569 die
->next_in_pkg
= free_dies
;
571 atomic_decl((long *) &num_dies
, 1);
572 simple_unlock(&x86_topo_lock
);
576 x86_package_alloc(int cpu
)
581 cpup
= cpu_datap(cpu
);
583 simple_lock(&x86_topo_lock
);
584 if (free_pkgs
!= NULL
) {
586 free_pkgs
= pkg
->next
;
588 simple_unlock(&x86_topo_lock
);
590 simple_unlock(&x86_topo_lock
);
591 pkg
= kalloc(sizeof(x86_pkg_t
));
593 panic("x86_package_alloc() kalloc of x86_pkg_t failed!\n");
596 bzero((void *) pkg
, sizeof(x86_pkg_t
));
598 pkg
->ppkg_num
= cpup
->cpu_phys_number
/ topoParms
.nPThreadsPerPackage
;
600 pkg
->lpkg_num
= topoParms
.nPackages
;
601 atomic_incl((long *) &topoParms
.nPackages
, 1);
603 pkg
->flags
= X86PKG_FL_PRESENT
| X86PKG_FL_READY
;
608 x86_package_free(x86_pkg_t
*pkg
)
610 simple_lock(&x86_topo_lock
);
611 pkg
->next
= free_pkgs
;
613 atomic_decl((long *) &topoParms
.nPackages
, 1);
614 simple_unlock(&x86_topo_lock
);
618 x86_cache_add_lcpu(x86_cpu_cache_t
*cache
, x86_lcpu_t
*lcpu
)
620 x86_cpu_cache_t
*cur_cache
;
624 * Put the new CPU into the list of the cache.
626 cur_cache
= lcpu
->caches
[cache
->level
- 1];
627 lcpu
->caches
[cache
->level
- 1] = cache
;
628 cache
->next
= cur_cache
;
630 for (i
= 0; i
< cache
->nlcpus
; i
+= 1) {
631 if (cache
->cpus
[i
] == NULL
) {
632 cache
->cpus
[i
] = lcpu
;
639 x86_lcpu_add_caches(x86_lcpu_t
*lcpu
)
641 x86_cpu_cache_t
*list
;
642 x86_cpu_cache_t
*cur
;
643 x86_cpu_cache_t
*match
;
646 x86_lcpu_t
*cur_lcpu
;
648 boolean_t found
= FALSE
;
650 assert(lcpu
!= NULL
);
653 * Add the cache data to the topology.
655 list
= x86_cache_list();
657 simple_lock(&x86_topo_lock
);
659 while (list
!= NULL
) {
661 * Remove the cache from the front of the list.
666 level
= cur
->level
- 1;
669 * If the cache isn't shared then just put it where it
672 if (cur
->maxcpus
== 1) {
673 x86_cache_add_lcpu(cur
, lcpu
);
678 * We'll assume that all of the caches at a particular level
679 * have the same sharing. So if we have a cache already at
680 * this level, we'll just skip looking for the match.
682 if (lcpu
->caches
[level
] != NULL
) {
688 * This is a shared cache, so we have to figure out if
689 * this is the first time we've seen this cache. We do
690 * this by searching through the topology and seeing if
691 * this cache is already described.
693 * Assume that L{LLC-1} are all at the core level and that
694 * LLC is shared at the die level.
696 if (level
< topoParms
.LLCDepth
) {
698 * Shared at the core.
701 cur_lcpu
= core
->lcpus
;
702 while (cur_lcpu
!= NULL
) {
706 if (cur_lcpu
== lcpu
) {
707 cur_lcpu
= cur_lcpu
->next_in_core
;
712 * If there's a cache on this logical CPU,
715 match
= x86_match_cache(cur_lcpu
->caches
[level
], cur
);
718 x86_cache_add_lcpu(match
, lcpu
);
723 cur_lcpu
= cur_lcpu
->next_in_core
;
730 cur_lcpu
= die
->lcpus
;
731 while (cur_lcpu
!= NULL
) {
735 if (cur_lcpu
== lcpu
) {
736 cur_lcpu
= cur_lcpu
->next_in_die
;
741 * If there's a cache on this logical CPU,
744 match
= x86_match_cache(cur_lcpu
->caches
[level
], cur
);
747 x86_cache_add_lcpu(match
, lcpu
);
752 cur_lcpu
= cur_lcpu
->next_in_die
;
757 * If a shared cache wasn't found, then this logical CPU must
758 * be the first one encountered.
761 x86_cache_add_lcpu(cur
, lcpu
);
765 simple_unlock(&x86_topo_lock
);
769 x86_core_add_lcpu(x86_core_t
*core
, x86_lcpu_t
*lcpu
)
771 assert(core
!= NULL
);
772 assert(lcpu
!= NULL
);
774 simple_lock(&x86_topo_lock
);
776 lcpu
->next_in_core
= core
->lcpus
;
779 core
->num_lcpus
+= 1;
780 simple_unlock(&x86_topo_lock
);
784 x86_die_add_lcpu(x86_die_t
*die
, x86_lcpu_t
*lcpu
)
787 assert(lcpu
!= NULL
);
789 lcpu
->next_in_die
= die
->lcpus
;
795 x86_die_add_core(x86_die_t
*die
, x86_core_t
*core
)
798 assert(core
!= NULL
);
800 core
->next_in_die
= die
->cores
;
807 x86_package_add_lcpu(x86_pkg_t
*pkg
, x86_lcpu_t
*lcpu
)
810 assert(lcpu
!= NULL
);
812 lcpu
->next_in_pkg
= pkg
->lcpus
;
818 x86_package_add_core(x86_pkg_t
*pkg
, x86_core_t
*core
)
821 assert(core
!= NULL
);
823 core
->next_in_pkg
= pkg
->cores
;
829 x86_package_add_die(x86_pkg_t
*pkg
, x86_die_t
*die
)
834 die
->next_in_pkg
= pkg
->dies
;
841 cpu_thread_alloc(int cpu
)
843 x86_core_t
*core
= NULL
;
844 x86_die_t
*die
= NULL
;
845 x86_pkg_t
*pkg
= NULL
;
850 * Only allow one to manipulate the topology at a time.
852 simple_lock(&x86_topo_lock
);
855 * Make sure all of the topology parameters have been initialized.
857 if (!topoParmsInited
)
860 cpup
= cpu_datap(cpu
);
862 phys_cpu
= cpup
->cpu_phys_number
;
867 * Allocate performance counter structure.
869 simple_unlock(&x86_topo_lock
);
870 cpup
->lcpu
.pmc
= pmc_alloc();
871 simple_lock(&x86_topo_lock
);
874 * Assume that all cpus have the same features.
876 if (cpu_is_hyperthreaded()) {
877 cpup
->cpu_threadtype
= CPU_THREADTYPE_INTEL_HTT
;
879 cpup
->cpu_threadtype
= CPU_THREADTYPE_NONE
;
883 * Get the package that the logical CPU is in.
886 pkg
= x86_package_find(cpu
);
889 * Package structure hasn't been created yet, do it now.
891 simple_unlock(&x86_topo_lock
);
892 pkg
= x86_package_alloc(cpu
);
893 simple_lock(&x86_topo_lock
);
894 if (x86_package_find(cpu
) != NULL
) {
895 x86_package_free(pkg
);
900 * Add the new package to the global list of packages.
902 pkg
->next
= x86_pkgs
;
905 } while (pkg
== NULL
);
908 * Get the die that the logical CPU is in.
911 die
= x86_die_find(cpu
);
914 * Die structure hasn't been created yet, do it now.
916 simple_unlock(&x86_topo_lock
);
917 die
= x86_die_alloc(cpu
);
918 simple_lock(&x86_topo_lock
);
919 if (x86_die_find(cpu
) != NULL
) {
925 * Add the die to the package.
927 x86_package_add_die(pkg
, die
);
929 } while (die
== NULL
);
932 * Get the core for this logical CPU.
935 core
= x86_core_find(cpu
);
938 * Allocate the core structure now.
940 simple_unlock(&x86_topo_lock
);
941 core
= x86_core_alloc(cpu
);
942 simple_lock(&x86_topo_lock
);
943 if (x86_core_find(cpu
) != NULL
) {
949 * Add the core to the die & package.
951 x86_die_add_core(die
, core
);
952 x86_package_add_core(pkg
, core
);
953 machine_info
.physical_cpu_max
+= 1;
955 } while (core
== NULL
);
959 * Done manipulating the topology, so others can get in.
961 machine_info
.logical_cpu_max
+= 1;
962 simple_unlock(&x86_topo_lock
);
965 * Add the logical CPU to the other topology structures.
967 x86_core_add_lcpu(core
, &cpup
->lcpu
);
968 x86_die_add_lcpu(core
->die
, &cpup
->lcpu
);
969 x86_package_add_lcpu(core
->package
, &cpup
->lcpu
);
970 x86_lcpu_add_caches(&cpup
->lcpu
);
972 return (void *) core
;
976 cpu_thread_init(void)
978 int my_cpu
= get_cpu_number();
979 cpu_data_t
*cpup
= current_cpu_datap();
981 static int initialized
= 0;
984 * If we're the boot processor, we do all of the initialization of
985 * the CPU topology infrastructure.
987 if (my_cpu
== master_cpu
&& !initialized
) {
988 simple_lock_init(&x86_topo_lock
, 0);
991 * Put this logical CPU into the physical CPU topology.
993 cpup
->lcpu
.core
= cpu_thread_alloc(my_cpu
);
999 * Do the CPU accounting.
1001 core
= cpup
->lcpu
.core
;
1002 simple_lock(&x86_topo_lock
);
1003 machine_info
.logical_cpu
+= 1;
1004 if (core
->active_lcpus
== 0)
1005 machine_info
.physical_cpu
+= 1;
1006 core
->active_lcpus
+= 1;
1007 simple_unlock(&x86_topo_lock
);
1009 pmCPUMarkRunning(cpup
);
1010 etimer_resync_deadlines();
1014 * Called for a cpu to halt permanently
1015 * (as opposed to halting and expecting an interrupt to awaken it).
1018 cpu_thread_halt(void)
1021 cpu_data_t
*cpup
= current_cpu_datap();
1023 simple_lock(&x86_topo_lock
);
1024 machine_info
.logical_cpu
-= 1;
1025 core
= cpup
->lcpu
.core
;
1026 core
->active_lcpus
-= 1;
1027 if (core
->active_lcpus
== 0)
1028 machine_info
.physical_cpu
-= 1;
1029 simple_unlock(&x86_topo_lock
);
1032 * Let the power management code determine the best way to "stop"
1035 ml_set_interrupts_enabled(FALSE
);
1037 pmCPUHalt(PM_HALT_NORMAL
);
1044 * Prints out the topology
1047 debug_topology_print(void)
1055 while (pkg
!= NULL
) {
1056 kprintf("Package:\n");
1057 kprintf(" Physical: %d\n", pkg
->ppkg_num
);
1058 kprintf(" Logical: %d\n", pkg
->lpkg_num
);
1061 while (die
!= NULL
) {
1063 kprintf(" Physical: %d\n", die
->pdie_num
);
1064 kprintf(" Logical: %d\n", die
->ldie_num
);
1067 while (core
!= NULL
) {
1068 kprintf(" Core:\n");
1069 kprintf(" Physical: %d\n", core
->pcore_num
);
1070 kprintf(" Logical: %d\n", core
->lcore_num
);
1073 while (cpu
!= NULL
) {
1074 kprintf(" LCPU:\n");
1075 kprintf(" CPU #: %d\n", cpu
->cpu_num
);
1076 kprintf(" Physical: %d\n", cpu
->pnum
);
1077 kprintf(" Logical: %d\n", cpu
->lnum
);
1078 kprintf(" Flags: ");
1083 if (!cpu
->master
&& !cpu
->primary
)
1087 cpu
= cpu
->next_in_core
;
1090 core
= core
->next_in_die
;
1093 die
= die
->next_in_pkg
;
1099 #endif /* TOPO_DEBUG */