]>
Commit | Line | Data |
---|---|---|
2d21ac55 | 1 | /* |
d1ecb069 | 2 | * Copyright (c) 2007-2010 Apple Inc. All rights reserved. |
2d21ac55 A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <mach/machine.h> | |
30 | #include <mach/processor.h> | |
31 | #include <kern/kalloc.h> | |
32 | #include <i386/cpu_affinity.h> | |
33 | #include <i386/cpu_topology.h> | |
2d21ac55 A |
34 | #include <i386/cpu_threads.h> |
35 | #include <i386/machine_cpu.h> | |
2d21ac55 | 36 | #include <i386/lock.h> |
b0d623f7 | 37 | #include <i386/cpu_data.h> |
593a1d5f | 38 | #include <i386/lapic.h> |
b0d623f7 | 39 | #include <i386/machine_routines.h> |
2d21ac55 | 40 | |
2d21ac55 A |
41 | __private_extern__ void qsort( |
42 | void * array, | |
43 | size_t nmembers, | |
44 | size_t member_size, | |
45 | int (*)(const void *, const void *)); | |
46 | ||
47 | static int lapicid_cmp(const void *x, const void *y); | |
48 | static x86_affinity_set_t *find_cache_affinity(x86_cpu_cache_t *L2_cachep); | |
49 | ||
50 | x86_affinity_set_t *x86_affinities = NULL; | |
51 | static int x86_affinity_count = 0; | |
52 | ||
53 | /* | |
b0d623f7 | 54 | * cpu_topology_sort() is called after all processors have been registered |
2d21ac55 A |
55 | * but before any non-boot processor id started. |
56 | * We establish canonical logical processor numbering - logical cpus must be | |
57 | * contiguous, zero-based and assigned in physical (local apic id) order. | |
58 | * This step is required because the discovery/registration order is | |
59 | * non-deterministic - cores are registered in differing orders over boots. | |
60 | * Enforcing canonical numbering simplifies identification | |
61 | * of processors - in particular, for stopping/starting from CHUD. | |
62 | */ | |
63 | void | |
b0d623f7 | 64 | cpu_topology_sort(int ncpus) |
2d21ac55 | 65 | { |
2d21ac55 A |
66 | int i; |
67 | boolean_t istate; | |
b0d623f7 | 68 | processor_t lprim = NULL; |
2d21ac55 A |
69 | |
70 | assert(machine_info.physical_cpu == 1); | |
71 | assert(machine_info.logical_cpu == 1); | |
72 | assert(master_cpu == 0); | |
73 | assert(cpu_number() == 0); | |
74 | assert(cpu_datap(0)->cpu_number == 0); | |
b0d623f7 | 75 | |
2d21ac55 A |
76 | /* Lights out for this */ |
77 | istate = ml_set_interrupts_enabled(FALSE); | |
78 | ||
7ddcb079 A |
79 | if (topo_dbg) { |
80 | TOPO_DBG("cpu_topology_start() %d cpu%s registered\n", | |
81 | ncpus, (ncpus > 1) ? "s" : ""); | |
82 | for (i = 0; i < ncpus; i++) { | |
83 | cpu_data_t *cpup = cpu_datap(i); | |
84 | TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n", | |
85 | i, (void *) cpup, cpup->cpu_phys_number); | |
86 | } | |
2d21ac55 | 87 | } |
7ddcb079 | 88 | |
2d21ac55 A |
89 | /* |
90 | * Re-order the cpu_data_ptr vector sorting by physical id. | |
91 | * Skip the boot processor, it's required to be correct. | |
92 | */ | |
93 | if (ncpus > 1) { | |
94 | qsort((void *) &cpu_data_ptr[1], | |
95 | ncpus - 1, | |
96 | sizeof(cpu_data_t *), | |
97 | lapicid_cmp); | |
98 | } | |
7ddcb079 A |
99 | if (topo_dbg) { |
100 | TOPO_DBG("cpu_topology_start() after sorting:\n"); | |
101 | for (i = 0; i < ncpus; i++) { | |
102 | cpu_data_t *cpup = cpu_datap(i); | |
103 | TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n", | |
104 | i, (void *) cpup, cpup->cpu_phys_number); | |
105 | } | |
2d21ac55 | 106 | } |
2d21ac55 A |
107 | |
108 | /* | |
39236c6e | 109 | * Finalize logical numbers and map kept by the lapic code. |
2d21ac55 | 110 | */ |
39236c6e | 111 | for (i = 0; i < ncpus; i++) { |
2d21ac55 A |
112 | cpu_data_t *cpup = cpu_datap(i); |
113 | ||
114 | if (cpup->cpu_number != i) { | |
b0d623f7 | 115 | kprintf("cpu_datap(%d):%p local apic id 0x%x " |
2d21ac55 | 116 | "remapped from %d\n", |
b0d623f7 | 117 | i, cpup, cpup->cpu_phys_number, |
2d21ac55 A |
118 | cpup->cpu_number); |
119 | } | |
120 | cpup->cpu_number = i; | |
2d21ac55 | 121 | lapic_cpu_map(cpup->cpu_phys_number, i); |
39236c6e | 122 | x86_set_logical_topology(&cpup->lcpu, cpup->cpu_phys_number, i); |
2d21ac55 A |
123 | } |
124 | ||
39236c6e | 125 | x86_validate_topology(); |
593a1d5f | 126 | |
2d21ac55 | 127 | ml_set_interrupts_enabled(istate); |
7ddcb079 | 128 | TOPO_DBG("cpu_topology_start() LLC is L%d\n", topoParms.LLCDepth + 1); |
2d21ac55 | 129 | |
d1ecb069 A |
130 | /* |
131 | * Let the CPU Power Management know that the topology is stable. | |
132 | */ | |
133 | topoParms.stable = TRUE; | |
134 | pmCPUStateInit(); | |
135 | ||
2d21ac55 A |
136 | /* |
137 | * Iterate over all logical cpus finding or creating the affinity set | |
593a1d5f | 138 | * for their LLC cache. Each affinity set possesses a processor set |
2d21ac55 A |
139 | * into which each logical processor is added. |
140 | */ | |
7ddcb079 | 141 | TOPO_DBG("cpu_topology_start() creating affinity sets:\n"); |
2d21ac55 A |
142 | for (i = 0; i < ncpus; i++) { |
143 | cpu_data_t *cpup = cpu_datap(i); | |
144 | x86_lcpu_t *lcpup = cpu_to_lcpu(i); | |
593a1d5f | 145 | x86_cpu_cache_t *LLC_cachep; |
2d21ac55 A |
146 | x86_affinity_set_t *aset; |
147 | ||
593a1d5f A |
148 | LLC_cachep = lcpup->caches[topoParms.LLCDepth]; |
149 | assert(LLC_cachep->type == CPU_CACHE_TYPE_UNIF); | |
150 | aset = find_cache_affinity(LLC_cachep); | |
2d21ac55 A |
151 | if (aset == NULL) { |
152 | aset = (x86_affinity_set_t *) kalloc(sizeof(*aset)); | |
153 | if (aset == NULL) | |
154 | panic("cpu_topology_start() failed aset alloc"); | |
155 | aset->next = x86_affinities; | |
156 | x86_affinities = aset; | |
157 | aset->num = x86_affinity_count++; | |
593a1d5f | 158 | aset->cache = LLC_cachep; |
2d21ac55 A |
159 | aset->pset = (i == master_cpu) ? |
160 | processor_pset(master_processor) : | |
161 | pset_create(pset_node_root()); | |
162 | if (aset->pset == PROCESSOR_SET_NULL) | |
163 | panic("cpu_topology_start: pset_create"); | |
7ddcb079 | 164 | TOPO_DBG("\tnew set %p(%d) pset %p for cache %p\n", |
2d21ac55 A |
165 | aset, aset->num, aset->pset, aset->cache); |
166 | } | |
167 | ||
7ddcb079 | 168 | TOPO_DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n", |
593a1d5f | 169 | aset, aset->num, lcpup, lcpup->cpu_num, cpup, cpup->cpu_processor); |
2d21ac55 A |
170 | |
171 | if (i != master_cpu) | |
172 | processor_init(cpup->cpu_processor, i, aset->pset); | |
b0d623f7 A |
173 | |
174 | if (lcpup->core->num_lcpus > 1) { | |
175 | if (lcpup->lnum == 0) | |
176 | lprim = cpup->cpu_processor; | |
177 | ||
178 | processor_meta_init(cpup->cpu_processor, lprim); | |
179 | } | |
2d21ac55 | 180 | } |
b0d623f7 | 181 | } |
2d21ac55 | 182 | |
b0d623f7 A |
183 | /* We got a request to start a CPU. Check that this CPU is within the |
184 | * max cpu limit set before we do. | |
185 | */ | |
186 | kern_return_t | |
187 | cpu_topology_start_cpu( int cpunum ) | |
188 | { | |
189 | int ncpus = machine_info.max_cpus; | |
190 | int i = cpunum; | |
191 | ||
192 | /* Decide whether to start a CPU, and actually start it */ | |
7ddcb079 | 193 | TOPO_DBG("cpu_topology_start() processor_start():\n"); |
b0d623f7 A |
194 | if( i < ncpus) |
195 | { | |
7ddcb079 | 196 | TOPO_DBG("\tlcpu %d\n", cpu_datap(i)->cpu_number); |
2d21ac55 | 197 | processor_start(cpu_datap(i)->cpu_processor); |
b0d623f7 | 198 | return KERN_SUCCESS; |
2d21ac55 | 199 | } |
b0d623f7 A |
200 | else |
201 | return KERN_FAILURE; | |
2d21ac55 A |
202 | } |
203 | ||
204 | static int | |
205 | lapicid_cmp(const void *x, const void *y) | |
206 | { | |
207 | cpu_data_t *cpu_x = *((cpu_data_t **)(uintptr_t)x); | |
208 | cpu_data_t *cpu_y = *((cpu_data_t **)(uintptr_t)y); | |
209 | ||
7ddcb079 | 210 | TOPO_DBG("lapicid_cmp(%p,%p) (%d,%d)\n", |
2d21ac55 A |
211 | x, y, cpu_x->cpu_phys_number, cpu_y->cpu_phys_number); |
212 | if (cpu_x->cpu_phys_number < cpu_y->cpu_phys_number) | |
213 | return -1; | |
214 | if (cpu_x->cpu_phys_number == cpu_y->cpu_phys_number) | |
215 | return 0; | |
216 | return 1; | |
217 | } | |
218 | ||
219 | static x86_affinity_set_t * | |
220 | find_cache_affinity(x86_cpu_cache_t *l2_cachep) | |
221 | { | |
222 | x86_affinity_set_t *aset; | |
223 | ||
224 | for (aset = x86_affinities; aset != NULL; aset = aset->next) { | |
225 | if (l2_cachep == aset->cache) | |
226 | break; | |
227 | } | |
228 | return aset; | |
229 | } | |
230 | ||
231 | int | |
232 | ml_get_max_affinity_sets(void) | |
233 | { | |
234 | return x86_affinity_count; | |
235 | } | |
236 | ||
237 | processor_set_t | |
238 | ml_affinity_to_pset(uint32_t affinity_num) | |
239 | { | |
240 | x86_affinity_set_t *aset; | |
241 | ||
242 | for (aset = x86_affinities; aset != NULL; aset = aset->next) { | |
243 | if (affinity_num == aset->num) | |
244 | break; | |
245 | } | |
593a1d5f | 246 | return (aset == NULL) ? PROCESSOR_SET_NULL : aset->pset; |
2d21ac55 A |
247 | } |
248 | ||
249 | uint64_t | |
250 | ml_cpu_cache_size(unsigned int level) | |
251 | { | |
252 | x86_cpu_cache_t *cachep; | |
253 | ||
254 | if (level == 0) { | |
255 | return machine_info.max_mem; | |
593a1d5f | 256 | } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) { |
2d21ac55 A |
257 | cachep = current_cpu_datap()->lcpu.caches[level-1]; |
258 | return cachep ? cachep->cache_size : 0; | |
259 | } else { | |
260 | return 0; | |
261 | } | |
262 | } | |
263 | ||
264 | uint64_t | |
265 | ml_cpu_cache_sharing(unsigned int level) | |
266 | { | |
267 | x86_cpu_cache_t *cachep; | |
268 | ||
269 | if (level == 0) { | |
270 | return machine_info.max_cpus; | |
593a1d5f | 271 | } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) { |
2d21ac55 A |
272 | cachep = current_cpu_datap()->lcpu.caches[level-1]; |
273 | return cachep ? cachep->nlcpus : 0; | |
274 | } else { | |
275 | return 0; | |
276 | } | |
277 | } | |
278 |