]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_topology.c
2 * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/machine.h>
30 #include <mach/processor.h>
31 #include <kern/kalloc.h>
32 #include <i386/cpu_affinity.h>
33 #include <i386/cpu_topology.h>
34 #include <i386/cpu_threads.h>
35 #include <i386/machine_cpu.h>
36 #include <i386/lock.h>
37 #include <i386/cpu_data.h>
38 #include <i386/lapic.h>
39 #include <i386/machine_routines.h>
41 //#define TOPO_DEBUG 1
43 #define DBG(x...) kprintf("DBG: " x)
47 void debug_topology_print(void);
48 void validate_topology(void);
50 __private_extern__
void qsort(
54 int (*)(const void *, const void *));
56 static int lapicid_cmp(const void *x
, const void *y
);
57 static x86_affinity_set_t
*find_cache_affinity(x86_cpu_cache_t
*L2_cachep
);
59 x86_affinity_set_t
*x86_affinities
= NULL
;
60 static int x86_affinity_count
= 0;
63 * cpu_topology_sort() is called after all processors have been registered
64 * but before any non-boot processor id started.
65 * We establish canonical logical processor numbering - logical cpus must be
66 * contiguous, zero-based and assigned in physical (local apic id) order.
67 * This step is required because the discovery/registration order is
68 * non-deterministic - cores are registered in differing orders over boots.
69 * Enforcing canonical numbering simplifies identification
70 * of processors - in particular, for stopping/starting from CHUD.
73 cpu_topology_sort(int ncpus
)
77 processor_t lprim
= NULL
;
79 assert(machine_info
.physical_cpu
== 1);
80 assert(machine_info
.logical_cpu
== 1);
81 assert(master_cpu
== 0);
82 assert(cpu_number() == 0);
83 assert(cpu_datap(0)->cpu_number
== 0);
85 /* Lights out for this */
86 istate
= ml_set_interrupts_enabled(FALSE
);
89 DBG("cpu_topology_start() %d cpu%s registered\n",
90 ncpus
, (ncpus
> 1) ? "s" : "");
91 for (i
= 0; i
< ncpus
; i
++) {
92 cpu_data_t
*cpup
= cpu_datap(i
);
93 DBG("\tcpu_data[%d]:0x%08x local apic 0x%x\n",
94 i
, (unsigned) cpup
, cpup
->cpu_phys_number
);
98 * Re-order the cpu_data_ptr vector sorting by physical id.
99 * Skip the boot processor, it's required to be correct.
102 qsort((void *) &cpu_data_ptr
[1],
104 sizeof(cpu_data_t
*),
108 DBG("cpu_topology_start() after sorting:\n");
109 for (i
= 0; i
< ncpus
; i
++) {
110 cpu_data_t
*cpup
= cpu_datap(i
);
111 DBG("\tcpu_data[%d]:0x%08x local apic 0x%x\n",
112 i
, (unsigned) cpup
, cpup
->cpu_phys_number
);
117 * Fix up logical numbers and reset the map kept by the lapic code.
119 for (i
= 1; i
< ncpus
; i
++) {
120 cpu_data_t
*cpup
= cpu_datap(i
);
121 x86_core_t
*core
= cpup
->lcpu
.core
;
122 x86_die_t
*die
= cpup
->lcpu
.die
;
123 x86_pkg_t
*pkg
= cpup
->lcpu
.package
;
125 assert(core
!= NULL
);
129 if (cpup
->cpu_number
!= i
) {
130 kprintf("cpu_datap(%d):%p local apic id 0x%x "
131 "remapped from %d\n",
132 i
, cpup
, cpup
->cpu_phys_number
,
135 cpup
->cpu_number
= i
;
136 cpup
->lcpu
.cpu_num
= i
;
137 cpup
->lcpu
.pnum
= cpup
->cpu_phys_number
;
138 lapic_cpu_map(cpup
->cpu_phys_number
, i
);
139 x86_set_lcpu_numbers(&cpup
->lcpu
);
140 x86_set_core_numbers(core
, &cpup
->lcpu
);
141 x86_set_die_numbers(die
, &cpup
->lcpu
);
142 x86_set_pkg_numbers(pkg
, &cpup
->lcpu
);
146 debug_topology_print();
147 #endif /* TOPO_DEBUG */
150 ml_set_interrupts_enabled(istate
);
151 DBG("cpu_topology_start() LLC is L%d\n", topoParms
.LLCDepth
+ 1);
154 * Let the CPU Power Management know that the topology is stable.
156 topoParms
.stable
= TRUE
;
160 * Iterate over all logical cpus finding or creating the affinity set
161 * for their LLC cache. Each affinity set possesses a processor set
162 * into which each logical processor is added.
164 DBG("cpu_topology_start() creating affinity sets:\n");
165 for (i
= 0; i
< ncpus
; i
++) {
166 cpu_data_t
*cpup
= cpu_datap(i
);
167 x86_lcpu_t
*lcpup
= cpu_to_lcpu(i
);
168 x86_cpu_cache_t
*LLC_cachep
;
169 x86_affinity_set_t
*aset
;
171 LLC_cachep
= lcpup
->caches
[topoParms
.LLCDepth
];
172 assert(LLC_cachep
->type
== CPU_CACHE_TYPE_UNIF
);
173 aset
= find_cache_affinity(LLC_cachep
);
175 aset
= (x86_affinity_set_t
*) kalloc(sizeof(*aset
));
177 panic("cpu_topology_start() failed aset alloc");
178 aset
->next
= x86_affinities
;
179 x86_affinities
= aset
;
180 aset
->num
= x86_affinity_count
++;
181 aset
->cache
= LLC_cachep
;
182 aset
->pset
= (i
== master_cpu
) ?
183 processor_pset(master_processor
) :
184 pset_create(pset_node_root());
185 if (aset
->pset
== PROCESSOR_SET_NULL
)
186 panic("cpu_topology_start: pset_create");
187 DBG("\tnew set %p(%d) pset %p for cache %p\n",
188 aset
, aset
->num
, aset
->pset
, aset
->cache
);
191 DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n",
192 aset
, aset
->num
, lcpup
, lcpup
->cpu_num
, cpup
, cpup
->cpu_processor
);
195 processor_init(cpup
->cpu_processor
, i
, aset
->pset
);
197 if (lcpup
->core
->num_lcpus
> 1) {
198 if (lcpup
->lnum
== 0)
199 lprim
= cpup
->cpu_processor
;
201 processor_meta_init(cpup
->cpu_processor
, lprim
);
206 /* We got a request to start a CPU. Check that this CPU is within the
207 * max cpu limit set before we do.
210 cpu_topology_start_cpu( int cpunum
)
212 int ncpus
= machine_info
.max_cpus
;
215 /* Decide whether to start a CPU, and actually start it */
216 DBG("cpu_topology_start() processor_start():\n");
219 DBG("\tlcpu %d\n", cpu_datap(i
)->cpu_number
);
220 processor_start(cpu_datap(i
)->cpu_processor
);
228 lapicid_cmp(const void *x
, const void *y
)
230 cpu_data_t
*cpu_x
= *((cpu_data_t
**)(uintptr_t)x
);
231 cpu_data_t
*cpu_y
= *((cpu_data_t
**)(uintptr_t)y
);
233 DBG("lapicid_cmp(%p,%p) (%d,%d)\n",
234 x
, y
, cpu_x
->cpu_phys_number
, cpu_y
->cpu_phys_number
);
235 if (cpu_x
->cpu_phys_number
< cpu_y
->cpu_phys_number
)
237 if (cpu_x
->cpu_phys_number
== cpu_y
->cpu_phys_number
)
242 static x86_affinity_set_t
*
243 find_cache_affinity(x86_cpu_cache_t
*l2_cachep
)
245 x86_affinity_set_t
*aset
;
247 for (aset
= x86_affinities
; aset
!= NULL
; aset
= aset
->next
) {
248 if (l2_cachep
== aset
->cache
)
255 ml_get_max_affinity_sets(void)
257 return x86_affinity_count
;
261 ml_affinity_to_pset(uint32_t affinity_num
)
263 x86_affinity_set_t
*aset
;
265 for (aset
= x86_affinities
; aset
!= NULL
; aset
= aset
->next
) {
266 if (affinity_num
== aset
->num
)
269 return (aset
== NULL
) ? PROCESSOR_SET_NULL
: aset
->pset
;
273 ml_cpu_cache_size(unsigned int level
)
275 x86_cpu_cache_t
*cachep
;
278 return machine_info
.max_mem
;
279 } else if ( 1 <= level
&& level
<= MAX_CACHE_DEPTH
) {
280 cachep
= current_cpu_datap()->lcpu
.caches
[level
-1];
281 return cachep
? cachep
->cache_size
: 0;
288 ml_cpu_cache_sharing(unsigned int level
)
290 x86_cpu_cache_t
*cachep
;
293 return machine_info
.max_cpus
;
294 } else if ( 1 <= level
&& level
<= MAX_CACHE_DEPTH
) {
295 cachep
= current_cpu_datap()->lcpu
.caches
[level
-1];
296 return cachep
? cachep
->nlcpus
: 0;