]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/cpu_topology.c
xnu-1228.9.59.tar.gz
[apple/xnu.git] / osfmk / i386 / cpu_topology.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/machine.h>
30#include <mach/processor.h>
31#include <kern/kalloc.h>
32#include <i386/cpu_affinity.h>
33#include <i386/cpu_topology.h>
34#include <i386/cpu_data.h>
35#include <i386/cpu_threads.h>
36#include <i386/machine_cpu.h>
37#include <i386/machine_routines.h>
38#include <i386/lock.h>
593a1d5f 39#include <i386/lapic.h>
2d21ac55
A
40
41//#define TOPO_DEBUG 1
42#if TOPO_DEBUG
43#define DBG(x...) kprintf("DBG: " x)
44#else
45#define DBG(x...)
46#endif
593a1d5f 47void debug_topology_print(void);
2d21ac55
A
48
49__private_extern__ void qsort(
50 void * array,
51 size_t nmembers,
52 size_t member_size,
53 int (*)(const void *, const void *));
54
55static int lapicid_cmp(const void *x, const void *y);
56static x86_affinity_set_t *find_cache_affinity(x86_cpu_cache_t *L2_cachep);
57
58x86_affinity_set_t *x86_affinities = NULL;
59static int x86_affinity_count = 0;
60
61/*
62 * cpu_topology_start() is called after all processors have been registered
63 * but before any non-boot processor id started.
64 * We establish canonical logical processor numbering - logical cpus must be
65 * contiguous, zero-based and assigned in physical (local apic id) order.
66 * This step is required because the discovery/registration order is
67 * non-deterministic - cores are registered in differing orders over boots.
68 * Enforcing canonical numbering simplifies identification
69 * of processors - in particular, for stopping/starting from CHUD.
70 */
71void
72cpu_topology_start(void)
73{
74 int ncpus = machine_info.max_cpus;
75 int i;
76 boolean_t istate;
77
78 assert(machine_info.physical_cpu == 1);
79 assert(machine_info.logical_cpu == 1);
80 assert(master_cpu == 0);
81 assert(cpu_number() == 0);
82 assert(cpu_datap(0)->cpu_number == 0);
83
84 /* Lights out for this */
85 istate = ml_set_interrupts_enabled(FALSE);
86
87#ifdef TOPO_DEBUG
88 DBG("cpu_topology_start() %d cpu%s registered\n",
89 ncpus, (ncpus > 1) ? "s" : "");
90 for (i = 0; i < ncpus; i++) {
91 cpu_data_t *cpup = cpu_datap(i);
92 DBG("\tcpu_data[%d]:0x%08x local apic 0x%x\n",
93 i, (unsigned) cpup, cpup->cpu_phys_number);
94 }
95#endif
96 /*
97 * Re-order the cpu_data_ptr vector sorting by physical id.
98 * Skip the boot processor, it's required to be correct.
99 */
100 if (ncpus > 1) {
101 qsort((void *) &cpu_data_ptr[1],
102 ncpus - 1,
103 sizeof(cpu_data_t *),
104 lapicid_cmp);
105 }
106#ifdef TOPO_DEBUG
107 DBG("cpu_topology_start() after sorting:\n");
108 for (i = 0; i < ncpus; i++) {
109 cpu_data_t *cpup = cpu_datap(i);
110 DBG("\tcpu_data[%d]:0x%08x local apic 0x%x\n",
111 i, (unsigned) cpup, cpup->cpu_phys_number);
112 }
113#endif
114
115 /*
116 * Fix up logical numbers and reset the map kept by the lapic code.
117 */
118 for (i = 1; i < ncpus; i++) {
119 cpu_data_t *cpup = cpu_datap(i);
593a1d5f
A
120 x86_core_t *core = cpup->lcpu.core;
121 x86_die_t *die = cpup->lcpu.die;
122 x86_pkg_t *pkg = cpup->lcpu.package;
123
124 assert(core != NULL);
125 assert(die != NULL);
126 assert(pkg != NULL);
2d21ac55
A
127
128 if (cpup->cpu_number != i) {
129 kprintf("cpu_datap(%d):0x%08x local apic id 0x%x "
130 "remapped from %d\n",
131 i, (unsigned) cpup, cpup->cpu_phys_number,
132 cpup->cpu_number);
133 }
134 cpup->cpu_number = i;
593a1d5f
A
135 cpup->lcpu.cpu_num = i;
136 cpup->lcpu.pnum = cpup->cpu_phys_number;
2d21ac55 137 lapic_cpu_map(cpup->cpu_phys_number, i);
593a1d5f
A
138 x86_set_lcpu_numbers(&cpup->lcpu);
139 x86_set_core_numbers(core, &cpup->lcpu);
140 x86_set_die_numbers(die, &cpup->lcpu);
141 x86_set_pkg_numbers(pkg, &cpup->lcpu);
2d21ac55
A
142 }
143
593a1d5f
A
144#if TOPO_DEBUG
145 debug_topology_print();
146#endif /* TOPO_DEBUG */
147
2d21ac55 148 ml_set_interrupts_enabled(istate);
593a1d5f 149 DBG("cpu_topology_start() LLC is L%d\n", topoParms.LLCDepth + 1);
2d21ac55
A
150
151 /*
152 * Iterate over all logical cpus finding or creating the affinity set
593a1d5f 153 * for their LLC cache. Each affinity set possesses a processor set
2d21ac55
A
154 * into which each logical processor is added.
155 */
156 DBG("cpu_topology_start() creating affinity sets:\n");
157 for (i = 0; i < ncpus; i++) {
158 cpu_data_t *cpup = cpu_datap(i);
159 x86_lcpu_t *lcpup = cpu_to_lcpu(i);
593a1d5f 160 x86_cpu_cache_t *LLC_cachep;
2d21ac55
A
161 x86_affinity_set_t *aset;
162
593a1d5f
A
163 LLC_cachep = lcpup->caches[topoParms.LLCDepth];
164 assert(LLC_cachep->type == CPU_CACHE_TYPE_UNIF);
165 aset = find_cache_affinity(LLC_cachep);
2d21ac55
A
166 if (aset == NULL) {
167 aset = (x86_affinity_set_t *) kalloc(sizeof(*aset));
168 if (aset == NULL)
169 panic("cpu_topology_start() failed aset alloc");
170 aset->next = x86_affinities;
171 x86_affinities = aset;
172 aset->num = x86_affinity_count++;
593a1d5f 173 aset->cache = LLC_cachep;
2d21ac55
A
174 aset->pset = (i == master_cpu) ?
175 processor_pset(master_processor) :
176 pset_create(pset_node_root());
177 if (aset->pset == PROCESSOR_SET_NULL)
178 panic("cpu_topology_start: pset_create");
179 DBG("\tnew set %p(%d) pset %p for cache %p\n",
180 aset, aset->num, aset->pset, aset->cache);
181 }
182
183 DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n",
593a1d5f 184 aset, aset->num, lcpup, lcpup->cpu_num, cpup, cpup->cpu_processor);
2d21ac55
A
185
186 if (i != master_cpu)
187 processor_init(cpup->cpu_processor, i, aset->pset);
188 }
189
190 /*
191 * Finally we start all processors (including the boot cpu we're
192 * running on).
193 */
194 DBG("cpu_topology_start() processor_start():\n");
195 for (i = 0; i < ncpus; i++) {
196 DBG("\tlcpu %d\n", cpu_datap(i)->cpu_number);
197 processor_start(cpu_datap(i)->cpu_processor);
198 }
199}
200
201static int
202lapicid_cmp(const void *x, const void *y)
203{
204 cpu_data_t *cpu_x = *((cpu_data_t **)(uintptr_t)x);
205 cpu_data_t *cpu_y = *((cpu_data_t **)(uintptr_t)y);
206
207 DBG("lapicid_cmp(%p,%p) (%d,%d)\n",
208 x, y, cpu_x->cpu_phys_number, cpu_y->cpu_phys_number);
209 if (cpu_x->cpu_phys_number < cpu_y->cpu_phys_number)
210 return -1;
211 if (cpu_x->cpu_phys_number == cpu_y->cpu_phys_number)
212 return 0;
213 return 1;
214}
215
216static x86_affinity_set_t *
217find_cache_affinity(x86_cpu_cache_t *l2_cachep)
218{
219 x86_affinity_set_t *aset;
220
221 for (aset = x86_affinities; aset != NULL; aset = aset->next) {
222 if (l2_cachep == aset->cache)
223 break;
224 }
225 return aset;
226}
227
228int
229ml_get_max_affinity_sets(void)
230{
231 return x86_affinity_count;
232}
233
234processor_set_t
235ml_affinity_to_pset(uint32_t affinity_num)
236{
237 x86_affinity_set_t *aset;
238
239 for (aset = x86_affinities; aset != NULL; aset = aset->next) {
240 if (affinity_num == aset->num)
241 break;
242 }
593a1d5f 243 return (aset == NULL) ? PROCESSOR_SET_NULL : aset->pset;
2d21ac55
A
244}
245
246uint64_t
247ml_cpu_cache_size(unsigned int level)
248{
249 x86_cpu_cache_t *cachep;
250
251 if (level == 0) {
252 return machine_info.max_mem;
593a1d5f 253 } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) {
2d21ac55
A
254 cachep = current_cpu_datap()->lcpu.caches[level-1];
255 return cachep ? cachep->cache_size : 0;
256 } else {
257 return 0;
258 }
259}
260
261uint64_t
262ml_cpu_cache_sharing(unsigned int level)
263{
264 x86_cpu_cache_t *cachep;
265
266 if (level == 0) {
267 return machine_info.max_cpus;
593a1d5f 268 } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) {
2d21ac55
A
269 cachep = current_cpu_datap()->lcpu.caches[level-1];
270 return cachep ? cachep->nlcpus : 0;
271 } else {
272 return 0;
273 }
274}
275