]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_topology.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / osfmk / i386 / cpu_topology.c
1 /*
2 * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/machine.h>
30 #include <mach/processor.h>
31 #include <kern/kalloc.h>
32 #include <i386/cpu_affinity.h>
33 #include <i386/cpu_topology.h>
34 #include <i386/cpu_threads.h>
35 #include <i386/machine_cpu.h>
36 #include <i386/bit_routines.h>
37 #include <i386/cpu_data.h>
38 #include <i386/lapic.h>
39 #include <i386/machine_routines.h>
40
41 __private_extern__ void qsort(
42 void * array,
43 size_t nmembers,
44 size_t member_size,
45 int (*)(const void *, const void *));
46
47 static int lapicid_cmp(const void *x, const void *y);
48 static x86_affinity_set_t *find_cache_affinity(x86_cpu_cache_t *L2_cachep);
49
50 x86_affinity_set_t *x86_affinities = NULL;
51 static int x86_affinity_count = 0;
52
53 extern cpu_data_t cpshadows[];
54 /* Re-sort double-mapped CPU data shadows after topology discovery sorts the
55 * primary CPU data structures by physical/APIC CPU ID.
56 */
57 static void cpu_shadow_sort(int ncpus) {
58 for (int i = 0; i < ncpus; i++) {
59 cpu_data_t *cpup = cpu_datap(i);
60 ptrdiff_t coff = cpup - cpu_datap(0);
61
62 cpup->cd_shadow = &cpshadows[coff];
63 }
64 }
65
66 /*
67 * cpu_topology_sort() is called after all processors have been registered
68 * but before any non-boot processor id started.
69 * We establish canonical logical processor numbering - logical cpus must be
70 * contiguous, zero-based and assigned in physical (local apic id) order.
71 * This step is required because the discovery/registration order is
72 * non-deterministic - cores are registered in differing orders over boots.
73 * Enforcing canonical numbering simplifies identification
74 * of processors - in particular, for stopping/starting from CHUD.
75 */
76 void
77 cpu_topology_sort(int ncpus)
78 {
79 int i;
80 boolean_t istate;
81 processor_t lprim = NULL;
82
83 assert(machine_info.physical_cpu == 1);
84 assert(machine_info.logical_cpu == 1);
85 assert(master_cpu == 0);
86 assert(cpu_number() == 0);
87 assert(cpu_datap(0)->cpu_number == 0);
88
89 /* Lights out for this */
90 istate = ml_set_interrupts_enabled(FALSE);
91
92 if (topo_dbg) {
93 TOPO_DBG("cpu_topology_start() %d cpu%s registered\n",
94 ncpus, (ncpus > 1) ? "s" : "");
95 for (i = 0; i < ncpus; i++) {
96 cpu_data_t *cpup = cpu_datap(i);
97 TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n",
98 i, (void *) cpup, cpup->cpu_phys_number);
99 }
100 }
101
102 /*
103 * Re-order the cpu_data_ptr vector sorting by physical id.
104 * Skip the boot processor, it's required to be correct.
105 */
106 if (ncpus > 1) {
107 qsort((void *) &cpu_data_ptr[1],
108 ncpus - 1,
109 sizeof(cpu_data_t *),
110 lapicid_cmp);
111 }
112 if (topo_dbg) {
113 TOPO_DBG("cpu_topology_start() after sorting:\n");
114 for (i = 0; i < ncpus; i++) {
115 cpu_data_t *cpup = cpu_datap(i);
116 TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n",
117 i, (void *) cpup, cpup->cpu_phys_number);
118 }
119 }
120
121 /*
122 * Finalize logical numbers and map kept by the lapic code.
123 */
124 for (i = 0; i < ncpus; i++) {
125 cpu_data_t *cpup = cpu_datap(i);
126
127 if (cpup->cpu_number != i) {
128 kprintf("cpu_datap(%d):%p local apic id 0x%x "
129 "remapped from %d\n",
130 i, cpup, cpup->cpu_phys_number,
131 cpup->cpu_number);
132 }
133 cpup->cpu_number = i;
134 lapic_cpu_map(cpup->cpu_phys_number, i);
135 x86_set_logical_topology(&cpup->lcpu, cpup->cpu_phys_number, i);
136 }
137
138 cpu_shadow_sort(ncpus);
139 x86_validate_topology();
140
141 ml_set_interrupts_enabled(istate);
142 TOPO_DBG("cpu_topology_start() LLC is L%d\n", topoParms.LLCDepth + 1);
143
144 /*
145 * Let the CPU Power Management know that the topology is stable.
146 */
147 topoParms.stable = TRUE;
148 pmCPUStateInit();
149
150 /*
151 * Iterate over all logical cpus finding or creating the affinity set
152 * for their LLC cache. Each affinity set possesses a processor set
153 * into which each logical processor is added.
154 */
155 TOPO_DBG("cpu_topology_start() creating affinity sets:\n");
156 for (i = 0; i < ncpus; i++) {
157 cpu_data_t *cpup = cpu_datap(i);
158 x86_lcpu_t *lcpup = cpu_to_lcpu(i);
159 x86_cpu_cache_t *LLC_cachep;
160 x86_affinity_set_t *aset;
161
162 LLC_cachep = lcpup->caches[topoParms.LLCDepth];
163 assert(LLC_cachep->type == CPU_CACHE_TYPE_UNIF);
164 aset = find_cache_affinity(LLC_cachep);
165 if (aset == NULL) {
166 aset = (x86_affinity_set_t *) kalloc(sizeof(*aset));
167 if (aset == NULL)
168 panic("cpu_topology_start() failed aset alloc");
169 aset->next = x86_affinities;
170 x86_affinities = aset;
171 aset->num = x86_affinity_count++;
172 aset->cache = LLC_cachep;
173 aset->pset = (i == master_cpu) ?
174 processor_pset(master_processor) :
175 pset_create(pset_node_root());
176 if (aset->pset == PROCESSOR_SET_NULL)
177 panic("cpu_topology_start: pset_create");
178 TOPO_DBG("\tnew set %p(%d) pset %p for cache %p\n",
179 aset, aset->num, aset->pset, aset->cache);
180 }
181
182 TOPO_DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n",
183 aset, aset->num, lcpup, lcpup->cpu_num, cpup, cpup->cpu_processor);
184
185 if (i != master_cpu)
186 processor_init(cpup->cpu_processor, i, aset->pset);
187
188 if (lcpup->core->num_lcpus > 1) {
189 if (lcpup->lnum == 0)
190 lprim = cpup->cpu_processor;
191
192 processor_set_primary(cpup->cpu_processor, lprim);
193 }
194 }
195 }
196
197 /* We got a request to start a CPU. Check that this CPU is within the
198 * max cpu limit set before we do.
199 */
200 kern_return_t
201 cpu_topology_start_cpu( int cpunum )
202 {
203 int ncpus = machine_info.max_cpus;
204 int i = cpunum;
205
206 /* Decide whether to start a CPU, and actually start it */
207 TOPO_DBG("cpu_topology_start() processor_start():\n");
208 if( i < ncpus)
209 {
210 TOPO_DBG("\tlcpu %d\n", cpu_datap(i)->cpu_number);
211 processor_start(cpu_datap(i)->cpu_processor);
212 return KERN_SUCCESS;
213 }
214 else
215 return KERN_FAILURE;
216 }
217
218 static int
219 lapicid_cmp(const void *x, const void *y)
220 {
221 cpu_data_t *cpu_x = *((cpu_data_t **)(uintptr_t)x);
222 cpu_data_t *cpu_y = *((cpu_data_t **)(uintptr_t)y);
223
224 TOPO_DBG("lapicid_cmp(%p,%p) (%d,%d)\n",
225 x, y, cpu_x->cpu_phys_number, cpu_y->cpu_phys_number);
226 if (cpu_x->cpu_phys_number < cpu_y->cpu_phys_number)
227 return -1;
228 if (cpu_x->cpu_phys_number == cpu_y->cpu_phys_number)
229 return 0;
230 return 1;
231 }
232
233 static x86_affinity_set_t *
234 find_cache_affinity(x86_cpu_cache_t *l2_cachep)
235 {
236 x86_affinity_set_t *aset;
237
238 for (aset = x86_affinities; aset != NULL; aset = aset->next) {
239 if (l2_cachep == aset->cache)
240 break;
241 }
242 return aset;
243 }
244
245 int
246 ml_get_max_affinity_sets(void)
247 {
248 return x86_affinity_count;
249 }
250
251 processor_set_t
252 ml_affinity_to_pset(uint32_t affinity_num)
253 {
254 x86_affinity_set_t *aset;
255
256 for (aset = x86_affinities; aset != NULL; aset = aset->next) {
257 if (affinity_num == aset->num)
258 break;
259 }
260 return (aset == NULL) ? PROCESSOR_SET_NULL : aset->pset;
261 }
262
263 uint64_t
264 ml_cpu_cache_size(unsigned int level)
265 {
266 x86_cpu_cache_t *cachep;
267
268 if (level == 0) {
269 return machine_info.max_mem;
270 } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) {
271 cachep = current_cpu_datap()->lcpu.caches[level-1];
272 return cachep ? cachep->cache_size : 0;
273 } else {
274 return 0;
275 }
276 }
277
278 uint64_t
279 ml_cpu_cache_sharing(unsigned int level)
280 {
281 x86_cpu_cache_t *cachep;
282
283 if (level == 0) {
284 return machine_info.max_cpus;
285 } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) {
286 cachep = current_cpu_datap()->lcpu.caches[level-1];
287 return cachep ? cachep->nlcpus : 0;
288 } else {
289 return 0;
290 }
291 }
292