]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_topology.c
37ce39b2a8cd4768d10f7ec362ebb4b505c41c43
[apple/xnu.git] / osfmk / i386 / cpu_topology.c
1 /*
2 * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/machine.h>
30 #include <mach/processor.h>
31 #include <kern/kalloc.h>
32 #include <i386/cpu_affinity.h>
33 #include <i386/cpu_topology.h>
34 #include <i386/cpu_threads.h>
35 #include <i386/machine_cpu.h>
36 #include <i386/bit_routines.h>
37 #include <i386/cpu_data.h>
38 #include <i386/lapic.h>
39 #include <i386/machine_routines.h>
40 #include <stddef.h>
41
42 __private_extern__ void qsort(
43 void * array,
44 size_t nmembers,
45 size_t member_size,
46 int (*)(const void *, const void *));
47
48 static int lapicid_cmp(const void *x, const void *y);
49 static x86_affinity_set_t *find_cache_affinity(x86_cpu_cache_t *L2_cachep);
50
51 x86_affinity_set_t *x86_affinities = NULL;
52 static int x86_affinity_count = 0;
53
54 extern cpu_data_t cpshadows[];
55
56 #if DEVELOPMENT || DEBUG
57 void iotrace_init(int ncpus);
58 #endif /* DEVELOPMENT || DEBUG */
59
60
61 /* Re-sort double-mapped CPU data shadows after topology discovery sorts the
62 * primary CPU data structures by physical/APIC CPU ID.
63 */
64 static void
65 cpu_shadow_sort(int ncpus)
66 {
67 for (int i = 0; i < ncpus; i++) {
68 cpu_data_t *cpup = cpu_datap(i);
69 ptrdiff_t coff = cpup - cpu_datap(0);
70
71 cpup->cd_shadow = &cpshadows[coff];
72 }
73 }
74
75 /*
76 * cpu_topology_sort() is called after all processors have been registered but
77 * before any non-boot processor is started. We establish canonical logical
78 * processor numbering - logical cpus must be contiguous, zero-based and
79 * assigned in physical (local apic id) order. This step is required because
80 * the discovery/registration order is non-deterministic - cores are registered
81 * in differing orders over boots. Enforcing canonical numbering simplifies
82 * identification of processors.
83 */
84 void
85 cpu_topology_sort(int ncpus)
86 {
87 int i;
88 boolean_t istate;
89 processor_t lprim = NULL;
90
91 assert(machine_info.physical_cpu == 1);
92 assert(machine_info.logical_cpu == 1);
93 assert(master_cpu == 0);
94 assert(cpu_number() == 0);
95 assert(cpu_datap(0)->cpu_number == 0);
96
97 /* Lights out for this */
98 istate = ml_set_interrupts_enabled(FALSE);
99
100 if (topo_dbg) {
101 TOPO_DBG("cpu_topology_start() %d cpu%s registered\n",
102 ncpus, (ncpus > 1) ? "s" : "");
103 for (i = 0; i < ncpus; i++) {
104 cpu_data_t *cpup = cpu_datap(i);
105 TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n",
106 i, (void *) cpup, cpup->cpu_phys_number);
107 }
108 }
109
110 /*
111 * Re-order the cpu_data_ptr vector sorting by physical id.
112 * Skip the boot processor, it's required to be correct.
113 */
114 if (ncpus > 1) {
115 qsort((void *) &cpu_data_ptr[1],
116 ncpus - 1,
117 sizeof(cpu_data_t *),
118 lapicid_cmp);
119 }
120 if (topo_dbg) {
121 TOPO_DBG("cpu_topology_start() after sorting:\n");
122 for (i = 0; i < ncpus; i++) {
123 cpu_data_t *cpup = cpu_datap(i);
124 TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n",
125 i, (void *) cpup, cpup->cpu_phys_number);
126 }
127 }
128
129 /*
130 * Finalize logical numbers and map kept by the lapic code.
131 */
132 for (i = 0; i < ncpus; i++) {
133 cpu_data_t *cpup = cpu_datap(i);
134
135 if (cpup->cpu_number != i) {
136 kprintf("cpu_datap(%d):%p local apic id 0x%x "
137 "remapped from %d\n",
138 i, cpup, cpup->cpu_phys_number,
139 cpup->cpu_number);
140 }
141 cpup->cpu_number = i;
142 lapic_cpu_map(cpup->cpu_phys_number, i);
143 x86_set_logical_topology(&cpup->lcpu, cpup->cpu_phys_number, i);
144 }
145
146 cpu_shadow_sort(ncpus);
147 x86_validate_topology();
148
149 ml_set_interrupts_enabled(istate);
150 TOPO_DBG("cpu_topology_start() LLC is L%d\n", topoParms.LLCDepth + 1);
151
152 #if DEVELOPMENT || DEBUG
153 iotrace_init(ncpus);
154 #endif /* DEVELOPMENT || DEBUG */
155
156 /*
157 * Let the CPU Power Management know that the topology is stable.
158 */
159 topoParms.stable = TRUE;
160 pmCPUStateInit();
161
162 /*
163 * Iterate over all logical cpus finding or creating the affinity set
164 * for their LLC cache. Each affinity set possesses a processor set
165 * into which each logical processor is added.
166 */
167 TOPO_DBG("cpu_topology_start() creating affinity sets:\n");
168 for (i = 0; i < ncpus; i++) {
169 cpu_data_t *cpup = cpu_datap(i);
170 x86_lcpu_t *lcpup = cpu_to_lcpu(i);
171 x86_cpu_cache_t *LLC_cachep;
172 x86_affinity_set_t *aset;
173
174 LLC_cachep = lcpup->caches[topoParms.LLCDepth];
175 assert(LLC_cachep->type == CPU_CACHE_TYPE_UNIF);
176 aset = find_cache_affinity(LLC_cachep);
177 if (aset == NULL) {
178 aset = (x86_affinity_set_t *) kalloc(sizeof(*aset));
179 if (aset == NULL) {
180 panic("cpu_topology_start() failed aset alloc");
181 }
182 aset->next = x86_affinities;
183 x86_affinities = aset;
184 aset->num = x86_affinity_count++;
185 aset->cache = LLC_cachep;
186 aset->pset = (i == master_cpu) ?
187 processor_pset(master_processor) :
188 pset_create(pset_node_root());
189 if (aset->pset == PROCESSOR_SET_NULL) {
190 panic("cpu_topology_start: pset_create");
191 }
192 TOPO_DBG("\tnew set %p(%d) pset %p for cache %p\n",
193 aset, aset->num, aset->pset, aset->cache);
194 }
195
196 TOPO_DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n",
197 aset, aset->num, lcpup, lcpup->cpu_num, cpup, cpup->cpu_processor);
198
199 if (i != master_cpu) {
200 processor_init(cpup->cpu_processor, i, aset->pset);
201 }
202
203 if (lcpup->core->num_lcpus > 1) {
204 if (lcpup->lnum == 0) {
205 lprim = cpup->cpu_processor;
206 }
207
208 processor_set_primary(cpup->cpu_processor, lprim);
209 }
210 }
211 }
212
213 /* We got a request to start a CPU. Check that this CPU is within the
214 * max cpu limit set before we do.
215 */
216 kern_return_t
217 cpu_topology_start_cpu( int cpunum )
218 {
219 int ncpus = machine_info.max_cpus;
220 int i = cpunum;
221
222 /* Decide whether to start a CPU, and actually start it */
223 TOPO_DBG("cpu_topology_start() processor_start():\n");
224 if (i < ncpus) {
225 TOPO_DBG("\tlcpu %d\n", cpu_datap(i)->cpu_number);
226 processor_start(cpu_datap(i)->cpu_processor);
227 return KERN_SUCCESS;
228 } else {
229 return KERN_FAILURE;
230 }
231 }
232
233 static int
234 lapicid_cmp(const void *x, const void *y)
235 {
236 cpu_data_t *cpu_x = *((cpu_data_t **)(uintptr_t)x);
237 cpu_data_t *cpu_y = *((cpu_data_t **)(uintptr_t)y);
238
239 TOPO_DBG("lapicid_cmp(%p,%p) (%d,%d)\n",
240 x, y, cpu_x->cpu_phys_number, cpu_y->cpu_phys_number);
241 if (cpu_x->cpu_phys_number < cpu_y->cpu_phys_number) {
242 return -1;
243 }
244 if (cpu_x->cpu_phys_number == cpu_y->cpu_phys_number) {
245 return 0;
246 }
247 return 1;
248 }
249
250 static x86_affinity_set_t *
251 find_cache_affinity(x86_cpu_cache_t *l2_cachep)
252 {
253 x86_affinity_set_t *aset;
254
255 for (aset = x86_affinities; aset != NULL; aset = aset->next) {
256 if (l2_cachep == aset->cache) {
257 break;
258 }
259 }
260 return aset;
261 }
262
263 int
264 ml_get_max_affinity_sets(void)
265 {
266 return x86_affinity_count;
267 }
268
269 processor_set_t
270 ml_affinity_to_pset(uint32_t affinity_num)
271 {
272 x86_affinity_set_t *aset;
273
274 for (aset = x86_affinities; aset != NULL; aset = aset->next) {
275 if (affinity_num == aset->num) {
276 break;
277 }
278 }
279 return (aset == NULL) ? PROCESSOR_SET_NULL : aset->pset;
280 }
281
282 uint64_t
283 ml_cpu_cache_size(unsigned int level)
284 {
285 x86_cpu_cache_t *cachep;
286
287 if (level == 0) {
288 return machine_info.max_mem;
289 } else if (1 <= level && level <= MAX_CACHE_DEPTH) {
290 cachep = current_cpu_datap()->lcpu.caches[level - 1];
291 return cachep ? cachep->cache_size : 0;
292 } else {
293 return 0;
294 }
295 }
296
297 uint64_t
298 ml_cpu_cache_sharing(unsigned int level)
299 {
300 x86_cpu_cache_t *cachep;
301
302 if (level == 0) {
303 return machine_info.max_cpus;
304 } else if (1 <= level && level <= MAX_CACHE_DEPTH) {
305 cachep = current_cpu_datap()->lcpu.caches[level - 1];
306 return cachep ? cachep->nlcpus : 0;
307 } else {
308 return 0;
309 }
310 }
311
312 #if DEVELOPMENT || DEBUG
313 volatile int mmiotrace_enabled = 1;
314 int iotrace_generators = 0;
315 int iotrace_entries_per_cpu = 0;
316 int *iotrace_next;
317 iotrace_entry_t **iotrace_ring;
318
319 void
320 init_iotrace_bufs(int cpucnt, int entries_per_cpu)
321 {
322 int i;
323
324 iotrace_next = kalloc_tag(cpucnt * sizeof(int), VM_KERN_MEMORY_DIAG);
325 if (__improbable(iotrace_next == NULL)) {
326 iotrace_generators = 0;
327 return;
328 } else {
329 bzero(iotrace_next, cpucnt * sizeof(int));
330 }
331
332 iotrace_ring = kalloc_tag(cpucnt * sizeof(iotrace_entry_t *), VM_KERN_MEMORY_DIAG);
333 if (__improbable(iotrace_ring == NULL)) {
334 kfree(iotrace_next, cpucnt * sizeof(int));
335 iotrace_generators = 0;
336 return;
337 }
338 for (i = 0; i < cpucnt; i++) {
339 iotrace_ring[i] = kalloc_tag(entries_per_cpu * sizeof(iotrace_entry_t), VM_KERN_MEMORY_DIAG);
340 if (__improbable(iotrace_ring[i] == NULL)) {
341 kfree(iotrace_next, cpucnt * sizeof(int));
342 iotrace_next = NULL;
343 for (int j = 0; j < i; j++) {
344 kfree(iotrace_ring[j], entries_per_cpu * sizeof(iotrace_entry_t));
345 }
346 kfree(iotrace_ring, cpucnt * sizeof(iotrace_entry_t *));
347 iotrace_ring = NULL;
348 return;
349 }
350 bzero(iotrace_ring[i], entries_per_cpu * sizeof(iotrace_entry_t));
351 }
352
353 iotrace_entries_per_cpu = entries_per_cpu;
354 iotrace_generators = cpucnt;
355 }
356
357 void
358 iotrace_init(int ncpus)
359 {
360 int iot, epc;
361 int entries_per_cpu;
362
363 if (PE_parse_boot_argn("iotrace", &iot, sizeof(iot))) {
364 mmiotrace_enabled = iot;
365 }
366
367 if (mmiotrace_enabled == 0) {
368 return;
369 }
370
371 if (PE_parse_boot_argn("iotrace_epc", &epc, sizeof(epc)) &&
372 epc >= 1 && epc <= IOTRACE_MAX_ENTRIES_PER_CPU) {
373 entries_per_cpu = epc;
374 } else {
375 entries_per_cpu = DEFAULT_IOTRACE_ENTRIES_PER_CPU;
376 }
377
378 init_iotrace_bufs(ncpus, entries_per_cpu);
379 }
380 #endif /* DEVELOPMENT || DEBUG */