]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_topology.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / i386 / cpu_topology.c
1 /*
2 * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/machine.h>
30 #include <mach/processor.h>
31 #include <kern/kalloc.h>
32 #include <i386/cpu_affinity.h>
33 #include <i386/cpu_topology.h>
34 #include <i386/cpu_threads.h>
35 #include <i386/machine_cpu.h>
36 #include <i386/bit_routines.h>
37 #include <i386/cpu_data.h>
38 #include <i386/lapic.h>
39 #include <i386/machine_routines.h>
40 #include <stddef.h>
41
42 __private_extern__ void qsort(
43 void * array,
44 size_t nmembers,
45 size_t member_size,
46 int (*)(const void *, const void *));
47
48 static int lapicid_cmp(const void *x, const void *y);
49 static x86_affinity_set_t *find_cache_affinity(x86_cpu_cache_t *L2_cachep);
50
51 x86_affinity_set_t *x86_affinities = NULL;
52 static int x86_affinity_count = 0;
53
54 extern cpu_data_t cpshadows[];
55
56 #if DEVELOPMENT || DEBUG
57 void iotrace_init(int ncpus);
58 void traptrace_init(int ncpus);
59 #endif /* DEVELOPMENT || DEBUG */
60
61
62 /* Re-sort double-mapped CPU data shadows after topology discovery sorts the
63 * primary CPU data structures by physical/APIC CPU ID.
64 */
65 static void
66 cpu_shadow_sort(int ncpus)
67 {
68 for (int i = 0; i < ncpus; i++) {
69 cpu_data_t *cpup = cpu_datap(i);
70 ptrdiff_t coff = cpup - cpu_datap(0);
71
72 cpup->cd_shadow = &cpshadows[coff];
73 }
74 }
75
76 /*
77 * cpu_topology_sort() is called after all processors have been registered but
78 * before any non-boot processor is started. We establish canonical logical
79 * processor numbering - logical cpus must be contiguous, zero-based and
80 * assigned in physical (local apic id) order. This step is required because
81 * the discovery/registration order is non-deterministic - cores are registered
82 * in differing orders over boots. Enforcing canonical numbering simplifies
83 * identification of processors.
84 */
85 void
86 cpu_topology_sort(int ncpus)
87 {
88 int i;
89 boolean_t istate;
90 processor_t lprim = NULL;
91
92 assert(machine_info.physical_cpu == 1);
93 assert(machine_info.logical_cpu == 1);
94 assert(master_cpu == 0);
95 assert(cpu_number() == 0);
96 assert(cpu_datap(0)->cpu_number == 0);
97
98 uint32_t cpus_per_pset = 0;
99
100 #if DEVELOPMENT || DEBUG
101 PE_parse_boot_argn("cpus_per_pset", &cpus_per_pset, sizeof(cpus_per_pset));
102 #endif
103
104 /* Lights out for this */
105 istate = ml_set_interrupts_enabled(FALSE);
106
107 if (topo_dbg) {
108 TOPO_DBG("cpu_topology_start() %d cpu%s registered\n",
109 ncpus, (ncpus > 1) ? "s" : "");
110 for (i = 0; i < ncpus; i++) {
111 cpu_data_t *cpup = cpu_datap(i);
112 TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n",
113 i, (void *) cpup, cpup->cpu_phys_number);
114 }
115 }
116
117 /*
118 * Re-order the cpu_data_ptr vector sorting by physical id.
119 * Skip the boot processor, it's required to be correct.
120 */
121 if (ncpus > 1) {
122 qsort((void *) &cpu_data_ptr[1],
123 ncpus - 1,
124 sizeof(cpu_data_t *),
125 lapicid_cmp);
126 }
127 if (topo_dbg) {
128 TOPO_DBG("cpu_topology_start() after sorting:\n");
129 for (i = 0; i < ncpus; i++) {
130 cpu_data_t *cpup = cpu_datap(i);
131 TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n",
132 i, (void *) cpup, cpup->cpu_phys_number);
133 }
134 }
135
136 /*
137 * Finalize logical numbers and map kept by the lapic code.
138 */
139 for (i = 0; i < ncpus; i++) {
140 cpu_data_t *cpup = cpu_datap(i);
141
142 if (cpup->cpu_number != i) {
143 kprintf("cpu_datap(%d):%p local apic id 0x%x "
144 "remapped from %d\n",
145 i, cpup, cpup->cpu_phys_number,
146 cpup->cpu_number);
147 }
148 cpup->cpu_number = i;
149 lapic_cpu_map(cpup->cpu_phys_number, i);
150 x86_set_logical_topology(&cpup->lcpu, cpup->cpu_phys_number, i);
151 }
152
153 cpu_shadow_sort(ncpus);
154 x86_validate_topology();
155
156 ml_set_interrupts_enabled(istate);
157 TOPO_DBG("cpu_topology_start() LLC is L%d\n", topoParms.LLCDepth + 1);
158
159 #if DEVELOPMENT || DEBUG
160 iotrace_init(ncpus);
161 traptrace_init(ncpus);
162 #endif /* DEVELOPMENT || DEBUG */
163
164 /*
165 * Let the CPU Power Management know that the topology is stable.
166 */
167 topoParms.stable = TRUE;
168 pmCPUStateInit();
169
170 /*
171 * Iterate over all logical cpus finding or creating the affinity set
172 * for their LLC cache. Each affinity set possesses a processor set
173 * into which each logical processor is added.
174 */
175 TOPO_DBG("cpu_topology_start() creating affinity sets:ncpus=%d max_cpus=%d\n", ncpus, machine_info.max_cpus);
176 for (i = 0; i < machine_info.max_cpus; i++) {
177 cpu_data_t *cpup = cpu_datap(i);
178 x86_lcpu_t *lcpup = cpu_to_lcpu(i);
179 x86_cpu_cache_t *LLC_cachep;
180 x86_affinity_set_t *aset;
181
182 LLC_cachep = lcpup->caches[topoParms.LLCDepth];
183 assert(LLC_cachep->type == CPU_CACHE_TYPE_UNIF);
184 aset = find_cache_affinity(LLC_cachep);
185 if ((aset == NULL) || ((cpus_per_pset != 0) && (i % cpus_per_pset) == 0)) {
186 aset = (x86_affinity_set_t *) kalloc(sizeof(*aset));
187 if (aset == NULL) {
188 panic("cpu_topology_start() failed aset alloc");
189 }
190 aset->next = x86_affinities;
191 x86_affinities = aset;
192 aset->num = x86_affinity_count++;
193 aset->cache = LLC_cachep;
194 aset->pset = (i == master_cpu) ?
195 processor_pset(master_processor) :
196 pset_create(pset_node_root());
197 if (aset->pset == PROCESSOR_SET_NULL) {
198 panic("cpu_topology_start: pset_create");
199 }
200 TOPO_DBG("\tnew set %p(%d) pset %p for cache %p\n",
201 aset, aset->num, aset->pset, aset->cache);
202 }
203
204 TOPO_DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n",
205 aset, aset->num, lcpup, lcpup->cpu_num, cpup, cpup->cpu_processor);
206
207 if (i != master_cpu) {
208 processor_init(cpup->cpu_processor, i, aset->pset);
209 }
210
211 if (lcpup->core->num_lcpus > 1) {
212 if (lcpup->lnum == 0) {
213 lprim = cpup->cpu_processor;
214 }
215
216 processor_set_primary(cpup->cpu_processor, lprim);
217 }
218 }
219
220 if (machine_info.max_cpus < machine_info.logical_cpu_max) {
221 /* boot-args cpus=n is set, so adjust max numbers to match */
222 int logical_max = machine_info.max_cpus;
223 int physical_max = logical_max;
224 if (machine_info.logical_cpu_max != machine_info.physical_cpu_max) {
225 physical_max = (logical_max + 1) / 2;
226 }
227 machine_info.logical_cpu_max = logical_max;
228 machine_info.physical_cpu_max = physical_max;
229 }
230 }
231
232 /* We got a request to start a CPU. Check that this CPU is within the
233 * max cpu limit set before we do.
234 */
235 kern_return_t
236 cpu_topology_start_cpu( int cpunum )
237 {
238 int ncpus = machine_info.max_cpus;
239 int i = cpunum;
240
241 /* Decide whether to start a CPU, and actually start it */
242 TOPO_DBG("cpu_topology_start() processor_start():\n");
243 if (i < ncpus) {
244 TOPO_DBG("\tlcpu %d\n", cpu_datap(i)->cpu_number);
245 processor_start(cpu_datap(i)->cpu_processor);
246 return KERN_SUCCESS;
247 } else {
248 return KERN_FAILURE;
249 }
250 }
251
252 static int
253 lapicid_cmp(const void *x, const void *y)
254 {
255 cpu_data_t *cpu_x = *((cpu_data_t **)(uintptr_t)x);
256 cpu_data_t *cpu_y = *((cpu_data_t **)(uintptr_t)y);
257
258 TOPO_DBG("lapicid_cmp(%p,%p) (%d,%d)\n",
259 x, y, cpu_x->cpu_phys_number, cpu_y->cpu_phys_number);
260 if (cpu_x->cpu_phys_number < cpu_y->cpu_phys_number) {
261 return -1;
262 }
263 if (cpu_x->cpu_phys_number == cpu_y->cpu_phys_number) {
264 return 0;
265 }
266 return 1;
267 }
268
269 static x86_affinity_set_t *
270 find_cache_affinity(x86_cpu_cache_t *l2_cachep)
271 {
272 x86_affinity_set_t *aset;
273
274 for (aset = x86_affinities; aset != NULL; aset = aset->next) {
275 if (l2_cachep == aset->cache) {
276 break;
277 }
278 }
279 return aset;
280 }
281
282 int
283 ml_get_max_affinity_sets(void)
284 {
285 return x86_affinity_count;
286 }
287
288 processor_set_t
289 ml_affinity_to_pset(uint32_t affinity_num)
290 {
291 x86_affinity_set_t *aset;
292
293 for (aset = x86_affinities; aset != NULL; aset = aset->next) {
294 if (affinity_num == aset->num) {
295 break;
296 }
297 }
298 return (aset == NULL) ? PROCESSOR_SET_NULL : aset->pset;
299 }
300
301 uint64_t
302 ml_cpu_cache_size(unsigned int level)
303 {
304 x86_cpu_cache_t *cachep;
305
306 if (level == 0) {
307 return machine_info.max_mem;
308 } else if (1 <= level && level <= MAX_CACHE_DEPTH) {
309 cachep = current_cpu_datap()->lcpu.caches[level - 1];
310 return cachep ? cachep->cache_size : 0;
311 } else {
312 return 0;
313 }
314 }
315
316 uint64_t
317 ml_cpu_cache_sharing(unsigned int level)
318 {
319 x86_cpu_cache_t *cachep;
320
321 if (level == 0) {
322 return machine_info.max_cpus;
323 } else if (1 <= level && level <= MAX_CACHE_DEPTH) {
324 cachep = current_cpu_datap()->lcpu.caches[level - 1];
325 return cachep ? cachep->nlcpus : 0;
326 } else {
327 return 0;
328 }
329 }
330
331 #if DEVELOPMENT || DEBUG
332 volatile int mmiotrace_enabled = 1;
333 int iotrace_generators = 0;
334 int iotrace_entries_per_cpu = 0;
335 int *iotrace_next;
336 iotrace_entry_t **iotrace_ring;
337
338 volatile int traptrace_enabled = 1;
339 int traptrace_generators = 0;
340 int traptrace_entries_per_cpu = 0;
341 int *traptrace_next;
342 traptrace_entry_t **traptrace_ring;
343
344 static void
345 init_trace_bufs(int cpucnt, int entries_per_cpu, void ***ring, int entry_size,
346 int **next_array, int *allocated_entries_per_cpu, int *allocated_generator_count)
347 {
348 int i;
349
350 *next_array = kalloc_tag(cpucnt * sizeof(int), VM_KERN_MEMORY_DIAG);
351 if (__improbable(*next_array == NULL)) {
352 *allocated_generator_count = 0;
353 return;
354 } else {
355 bzero(*next_array, cpucnt * sizeof(int));
356 }
357
358 *ring = kalloc_tag(cpucnt * sizeof(void *), VM_KERN_MEMORY_DIAG);
359 if (__improbable(*ring == NULL)) {
360 kfree(*next_array, cpucnt * sizeof(int));
361 *next_array = NULL;
362 *allocated_generator_count = 0;
363 return;
364 }
365 for (i = 0; i < cpucnt; i++) {
366 (*ring)[i] = kalloc_tag(entries_per_cpu * entry_size, VM_KERN_MEMORY_DIAG);
367 if (__improbable((*ring)[i] == NULL)) {
368 kfree(*next_array, cpucnt * sizeof(int));
369 *next_array = NULL;
370 for (int j = 0; j < i; j++) {
371 kfree((*ring)[j], entries_per_cpu * entry_size);
372 }
373 kfree(*ring, cpucnt * sizeof(void *));
374 *ring = NULL;
375 return;
376 }
377 bzero((*ring)[i], entries_per_cpu * entry_size);
378 }
379
380 *allocated_entries_per_cpu = entries_per_cpu;
381 *allocated_generator_count = cpucnt;
382 }
383
384
385 static void
386 init_iotrace_bufs(int cpucnt, int entries_per_cpu)
387 {
388 init_trace_bufs(cpucnt, entries_per_cpu, (void ***)&iotrace_ring, sizeof(iotrace_entry_t),
389 &iotrace_next, &iotrace_entries_per_cpu, &iotrace_generators);
390 }
391
392 static void
393 init_traptrace_bufs(int cpucnt, int entries_per_cpu)
394 {
395 init_trace_bufs(cpucnt, entries_per_cpu, (void ***)&traptrace_ring, sizeof(traptrace_entry_t),
396 &traptrace_next, &traptrace_entries_per_cpu, &traptrace_generators);
397 }
398
399 static void
400 gentrace_configure_from_bootargs(const char *ena_prop, int *ena_valp, const char *epc_prop,
401 int *epcp, int max_epc, int def_epc, int override)
402 {
403 if (kern_feature_override(override)) {
404 *ena_valp = 0;
405 }
406
407 (void) PE_parse_boot_argn(ena_prop, ena_valp, sizeof(*ena_valp));
408
409 if (*ena_valp == 0) {
410 return;
411 }
412
413 if (PE_parse_boot_argn(epc_prop, epcp, sizeof(*epcp)) &&
414 (*epcp < 1 || *epcp > max_epc)) {
415 *epcp = def_epc;
416 }
417 }
418
419 void
420 iotrace_init(int ncpus)
421 {
422 int entries_per_cpu = DEFAULT_IOTRACE_ENTRIES_PER_CPU;
423 int enable = mmiotrace_enabled;
424
425 gentrace_configure_from_bootargs("iotrace", &enable, "iotrace_epc", &entries_per_cpu,
426 IOTRACE_MAX_ENTRIES_PER_CPU, DEFAULT_IOTRACE_ENTRIES_PER_CPU, KF_IOTRACE_OVRD);
427
428 mmiotrace_enabled = enable;
429
430 if (mmiotrace_enabled) {
431 init_iotrace_bufs(ncpus, entries_per_cpu);
432 }
433 }
434
435 void
436 traptrace_init(int ncpus)
437 {
438 int entries_per_cpu = DEFAULT_TRAPTRACE_ENTRIES_PER_CPU;
439 int enable = traptrace_enabled;
440
441 gentrace_configure_from_bootargs("traptrace", &enable, "traptrace_epc", &entries_per_cpu,
442 TRAPTRACE_MAX_ENTRIES_PER_CPU, DEFAULT_TRAPTRACE_ENTRIES_PER_CPU, KF_TRAPTRACE_OVRD);
443
444 traptrace_enabled = enable;
445
446 if (traptrace_enabled) {
447 init_traptrace_bufs(ncpus, entries_per_cpu);
448 }
449 }
450
451 #endif /* DEVELOPMENT || DEBUG */