]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_topology.c
xnu-1486.2.11.tar.gz
[apple/xnu.git] / osfmk / i386 / cpu_topology.c
1 /*
2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/machine.h>
30 #include <mach/processor.h>
31 #include <kern/kalloc.h>
32 #include <i386/cpu_affinity.h>
33 #include <i386/cpu_topology.h>
34 #include <i386/cpu_threads.h>
35 #include <i386/machine_cpu.h>
36 #include <i386/lock.h>
37 #include <i386/cpu_data.h>
38 #include <i386/lapic.h>
39 #include <i386/machine_routines.h>
40
41 //#define TOPO_DEBUG 1
42 #if TOPO_DEBUG
43 #define DBG(x...) kprintf("DBG: " x)
44 #else
45 #define DBG(x...)
46 #endif
47 void debug_topology_print(void);
48 void validate_topology(void);
49
50 __private_extern__ void qsort(
51 void * array,
52 size_t nmembers,
53 size_t member_size,
54 int (*)(const void *, const void *));
55
56 static int lapicid_cmp(const void *x, const void *y);
57 static x86_affinity_set_t *find_cache_affinity(x86_cpu_cache_t *L2_cachep);
58
59 x86_affinity_set_t *x86_affinities = NULL;
60 static int x86_affinity_count = 0;
61
62 /*
63 * cpu_topology_sort() is called after all processors have been registered
64 * but before any non-boot processor id started.
65 * We establish canonical logical processor numbering - logical cpus must be
66 * contiguous, zero-based and assigned in physical (local apic id) order.
67 * This step is required because the discovery/registration order is
68 * non-deterministic - cores are registered in differing orders over boots.
69 * Enforcing canonical numbering simplifies identification
70 * of processors - in particular, for stopping/starting from CHUD.
71 */
72 void
73 cpu_topology_sort(int ncpus)
74 {
75 int i;
76 boolean_t istate;
77 processor_t lprim = NULL;
78
79 assert(machine_info.physical_cpu == 1);
80 assert(machine_info.logical_cpu == 1);
81 assert(master_cpu == 0);
82 assert(cpu_number() == 0);
83 assert(cpu_datap(0)->cpu_number == 0);
84
85 /* Lights out for this */
86 istate = ml_set_interrupts_enabled(FALSE);
87
88 #ifdef TOPO_DEBUG
89 DBG("cpu_topology_start() %d cpu%s registered\n",
90 ncpus, (ncpus > 1) ? "s" : "");
91 for (i = 0; i < ncpus; i++) {
92 cpu_data_t *cpup = cpu_datap(i);
93 DBG("\tcpu_data[%d]:0x%08x local apic 0x%x\n",
94 i, (unsigned) cpup, cpup->cpu_phys_number);
95 }
96 #endif
97 /*
98 * Re-order the cpu_data_ptr vector sorting by physical id.
99 * Skip the boot processor, it's required to be correct.
100 */
101 if (ncpus > 1) {
102 qsort((void *) &cpu_data_ptr[1],
103 ncpus - 1,
104 sizeof(cpu_data_t *),
105 lapicid_cmp);
106 }
107 #ifdef TOPO_DEBUG
108 DBG("cpu_topology_start() after sorting:\n");
109 for (i = 0; i < ncpus; i++) {
110 cpu_data_t *cpup = cpu_datap(i);
111 DBG("\tcpu_data[%d]:0x%08x local apic 0x%x\n",
112 i, (unsigned) cpup, cpup->cpu_phys_number);
113 }
114 #endif
115
116 /*
117 * Fix up logical numbers and reset the map kept by the lapic code.
118 */
119 for (i = 1; i < ncpus; i++) {
120 cpu_data_t *cpup = cpu_datap(i);
121 x86_core_t *core = cpup->lcpu.core;
122 x86_die_t *die = cpup->lcpu.die;
123 x86_pkg_t *pkg = cpup->lcpu.package;
124
125 assert(core != NULL);
126 assert(die != NULL);
127 assert(pkg != NULL);
128
129 if (cpup->cpu_number != i) {
130 kprintf("cpu_datap(%d):%p local apic id 0x%x "
131 "remapped from %d\n",
132 i, cpup, cpup->cpu_phys_number,
133 cpup->cpu_number);
134 }
135 cpup->cpu_number = i;
136 cpup->lcpu.cpu_num = i;
137 cpup->lcpu.pnum = cpup->cpu_phys_number;
138 lapic_cpu_map(cpup->cpu_phys_number, i);
139 x86_set_lcpu_numbers(&cpup->lcpu);
140 x86_set_core_numbers(core, &cpup->lcpu);
141 x86_set_die_numbers(die, &cpup->lcpu);
142 x86_set_pkg_numbers(pkg, &cpup->lcpu);
143 }
144
145 #if TOPO_DEBUG
146 debug_topology_print();
147 #endif /* TOPO_DEBUG */
148 validate_topology();
149
150 ml_set_interrupts_enabled(istate);
151 DBG("cpu_topology_start() LLC is L%d\n", topoParms.LLCDepth + 1);
152
153 /*
154 * Iterate over all logical cpus finding or creating the affinity set
155 * for their LLC cache. Each affinity set possesses a processor set
156 * into which each logical processor is added.
157 */
158 DBG("cpu_topology_start() creating affinity sets:\n");
159 for (i = 0; i < ncpus; i++) {
160 cpu_data_t *cpup = cpu_datap(i);
161 x86_lcpu_t *lcpup = cpu_to_lcpu(i);
162 x86_cpu_cache_t *LLC_cachep;
163 x86_affinity_set_t *aset;
164
165 LLC_cachep = lcpup->caches[topoParms.LLCDepth];
166 assert(LLC_cachep->type == CPU_CACHE_TYPE_UNIF);
167 aset = find_cache_affinity(LLC_cachep);
168 if (aset == NULL) {
169 aset = (x86_affinity_set_t *) kalloc(sizeof(*aset));
170 if (aset == NULL)
171 panic("cpu_topology_start() failed aset alloc");
172 aset->next = x86_affinities;
173 x86_affinities = aset;
174 aset->num = x86_affinity_count++;
175 aset->cache = LLC_cachep;
176 aset->pset = (i == master_cpu) ?
177 processor_pset(master_processor) :
178 pset_create(pset_node_root());
179 if (aset->pset == PROCESSOR_SET_NULL)
180 panic("cpu_topology_start: pset_create");
181 DBG("\tnew set %p(%d) pset %p for cache %p\n",
182 aset, aset->num, aset->pset, aset->cache);
183 }
184
185 DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n",
186 aset, aset->num, lcpup, lcpup->cpu_num, cpup, cpup->cpu_processor);
187
188 if (i != master_cpu)
189 processor_init(cpup->cpu_processor, i, aset->pset);
190
191 if (lcpup->core->num_lcpus > 1) {
192 if (lcpup->lnum == 0)
193 lprim = cpup->cpu_processor;
194
195 processor_meta_init(cpup->cpu_processor, lprim);
196 }
197 }
198 }
199
200 /* We got a request to start a CPU. Check that this CPU is within the
201 * max cpu limit set before we do.
202 */
203 kern_return_t
204 cpu_topology_start_cpu( int cpunum )
205 {
206 int ncpus = machine_info.max_cpus;
207 int i = cpunum;
208
209 /* Decide whether to start a CPU, and actually start it */
210 DBG("cpu_topology_start() processor_start():\n");
211 if( i < ncpus)
212 {
213 DBG("\tlcpu %d\n", cpu_datap(i)->cpu_number);
214 processor_start(cpu_datap(i)->cpu_processor);
215 return KERN_SUCCESS;
216 }
217 else
218 return KERN_FAILURE;
219 }
220
221 static int
222 lapicid_cmp(const void *x, const void *y)
223 {
224 cpu_data_t *cpu_x = *((cpu_data_t **)(uintptr_t)x);
225 cpu_data_t *cpu_y = *((cpu_data_t **)(uintptr_t)y);
226
227 DBG("lapicid_cmp(%p,%p) (%d,%d)\n",
228 x, y, cpu_x->cpu_phys_number, cpu_y->cpu_phys_number);
229 if (cpu_x->cpu_phys_number < cpu_y->cpu_phys_number)
230 return -1;
231 if (cpu_x->cpu_phys_number == cpu_y->cpu_phys_number)
232 return 0;
233 return 1;
234 }
235
236 static x86_affinity_set_t *
237 find_cache_affinity(x86_cpu_cache_t *l2_cachep)
238 {
239 x86_affinity_set_t *aset;
240
241 for (aset = x86_affinities; aset != NULL; aset = aset->next) {
242 if (l2_cachep == aset->cache)
243 break;
244 }
245 return aset;
246 }
247
248 int
249 ml_get_max_affinity_sets(void)
250 {
251 return x86_affinity_count;
252 }
253
254 processor_set_t
255 ml_affinity_to_pset(uint32_t affinity_num)
256 {
257 x86_affinity_set_t *aset;
258
259 for (aset = x86_affinities; aset != NULL; aset = aset->next) {
260 if (affinity_num == aset->num)
261 break;
262 }
263 return (aset == NULL) ? PROCESSOR_SET_NULL : aset->pset;
264 }
265
266 uint64_t
267 ml_cpu_cache_size(unsigned int level)
268 {
269 x86_cpu_cache_t *cachep;
270
271 if (level == 0) {
272 return machine_info.max_mem;
273 } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) {
274 cachep = current_cpu_datap()->lcpu.caches[level-1];
275 return cachep ? cachep->cache_size : 0;
276 } else {
277 return 0;
278 }
279 }
280
281 uint64_t
282 ml_cpu_cache_sharing(unsigned int level)
283 {
284 x86_cpu_cache_t *cachep;
285
286 if (level == 0) {
287 return machine_info.max_cpus;
288 } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) {
289 cachep = current_cpu_datap()->lcpu.caches[level-1];
290 return cachep ? cachep->nlcpus : 0;
291 } else {
292 return 0;
293 }
294 }
295