]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | #include <platforms.h> | |
32 | #include <mach_kdb.h> | |
33 | #include <vm/vm_page.h> | |
34 | #include <pexpert/pexpert.h> | |
35 | ||
36 | #include <i386/cpuid.h> | |
37 | #if MACH_KDB | |
38 | #include <machine/db_machdep.h> | |
39 | #include <ddb/db_aout.h> | |
40 | #include <ddb/db_access.h> | |
41 | #include <ddb/db_sym.h> | |
42 | #include <ddb/db_variables.h> | |
43 | #include <ddb/db_command.h> | |
44 | #include <ddb/db_output.h> | |
45 | #include <ddb/db_expr.h> | |
46 | #endif | |
47 | ||
48 | #define min(a,b) ((a) < (b) ? (a) : (b)) | |
49 | #define quad(hi,lo) (((uint64_t)(hi)) << 32 | (lo)) | |
50 | ||
51 | /* Only for 32bit values */ | |
52 | #define bit32(n) (1U << (n)) | |
53 | #define bitmask32(h,l) ((bit32(h)|(bit32(h)-1)) & ~(bit32(l)-1)) | |
54 | #define bitfield32(x,h,l) ((((x) & bitmask32(h,l)) >> l)) | |
55 | ||
56 | /* | |
57 | * Leaf 2 cache descriptor encodings. | |
58 | */ | |
59 | typedef enum { | |
60 | _NULL_, /* NULL (empty) descriptor */ | |
61 | CACHE, /* Cache */ | |
62 | TLB, /* TLB */ | |
63 | STLB, /* Shared second-level unified TLB */ | |
64 | PREFETCH /* Prefetch size */ | |
65 | } cpuid_leaf2_desc_type_t; | |
66 | ||
67 | typedef enum { | |
68 | NA, /* Not Applicable */ | |
69 | FULLY, /* Fully-associative */ | |
70 | TRACE, /* Trace Cache (P4 only) */ | |
71 | INST, /* Instruction TLB */ | |
72 | DATA, /* Data TLB */ | |
73 | DATA0, /* Data TLB, 1st level */ | |
74 | DATA1, /* Data TLB, 2nd level */ | |
75 | L1, /* L1 (unified) cache */ | |
76 | L1_INST, /* L1 Instruction cache */ | |
77 | L1_DATA, /* L1 Data cache */ | |
78 | L2, /* L2 (unified) cache */ | |
79 | L3, /* L3 (unified) cache */ | |
80 | L2_2LINESECTOR, /* L2 (unified) cache with 2 lines per sector */ | |
81 | L3_2LINESECTOR, /* L3(unified) cache with 2 lines per sector */ | |
82 | SMALL, /* Small page TLB */ | |
83 | LARGE, /* Large page TLB */ | |
84 | BOTH /* Small and Large page TLB */ | |
85 | } cpuid_leaf2_qualifier_t; | |
86 | ||
87 | typedef struct cpuid_cache_descriptor { | |
88 | uint8_t value; /* descriptor code */ | |
89 | uint8_t type; /* cpuid_leaf2_desc_type_t */ | |
90 | uint8_t level; /* level of cache/TLB hierachy */ | |
91 | uint8_t ways; /* wayness of cache */ | |
92 | uint16_t size; /* cachesize or TLB pagesize */ | |
93 | uint16_t entries; /* number of TLB entries or linesize */ | |
94 | } cpuid_cache_descriptor_t; | |
95 | ||
96 | /* | |
97 | * These multipliers are used to encode 1*K .. 64*M in a 16 bit size field | |
98 | */ | |
99 | #define K (1) | |
100 | #define M (1024) | |
101 | ||
102 | /* | |
103 | * Intel cache descriptor table: | |
104 | */ | |
105 | static cpuid_cache_descriptor_t intel_cpuid_leaf2_descriptor_table[] = { | |
106 | // ------------------------------------------------------- | |
107 | // value type level ways size entries | |
108 | // ------------------------------------------------------- | |
109 | { 0x00, _NULL_, NA, NA, NA, NA }, | |
110 | { 0x01, TLB, INST, 4, SMALL, 32 }, | |
111 | { 0x02, TLB, INST, FULLY, LARGE, 2 }, | |
112 | { 0x03, TLB, DATA, 4, SMALL, 64 }, | |
113 | { 0x04, TLB, DATA, 4, LARGE, 8 }, | |
114 | { 0x05, TLB, DATA1, 4, LARGE, 32 }, | |
115 | { 0x06, CACHE, L1_INST, 4, 8*K, 32 }, | |
116 | { 0x08, CACHE, L1_INST, 4, 16*K, 32 }, | |
117 | { 0x09, CACHE, L1_INST, 4, 32*K, 64 }, | |
118 | { 0x0A, CACHE, L1_DATA, 2, 8*K, 32 }, | |
119 | { 0x0B, TLB, INST, 4, LARGE, 4 }, | |
120 | { 0x0C, CACHE, L1_DATA, 4, 16*K, 32 }, | |
121 | { 0x0D, CACHE, L1_DATA, 4, 16*K, 64 }, | |
122 | { 0x0E, CACHE, L1_DATA, 6, 24*K, 64 }, | |
123 | { 0x21, CACHE, L2, 8, 256*K, 64 }, | |
124 | { 0x22, CACHE, L3_2LINESECTOR, 4, 512*K, 64 }, | |
125 | { 0x23, CACHE, L3_2LINESECTOR, 8, 1*M, 64 }, | |
126 | { 0x25, CACHE, L3_2LINESECTOR, 8, 2*M, 64 }, | |
127 | { 0x29, CACHE, L3_2LINESECTOR, 8, 4*M, 64 }, | |
128 | { 0x2C, CACHE, L1_DATA, 8, 32*K, 64 }, | |
129 | { 0x30, CACHE, L1_INST, 8, 32*K, 64 }, | |
130 | { 0x40, CACHE, L2, NA, 0, NA }, | |
131 | { 0x41, CACHE, L2, 4, 128*K, 32 }, | |
132 | { 0x42, CACHE, L2, 4, 256*K, 32 }, | |
133 | { 0x43, CACHE, L2, 4, 512*K, 32 }, | |
134 | { 0x44, CACHE, L2, 4, 1*M, 32 }, | |
135 | { 0x45, CACHE, L2, 4, 2*M, 32 }, | |
136 | { 0x46, CACHE, L3, 4, 4*M, 64 }, | |
137 | { 0x47, CACHE, L3, 8, 8*M, 64 }, | |
138 | { 0x48, CACHE, L2, 12, 3*M, 64 }, | |
139 | { 0x49, CACHE, L2, 16, 4*M, 64 }, | |
140 | { 0x4A, CACHE, L3, 12, 6*M, 64 }, | |
141 | { 0x4B, CACHE, L3, 16, 8*M, 64 }, | |
142 | { 0x4C, CACHE, L3, 12, 12*M, 64 }, | |
143 | { 0x4D, CACHE, L3, 16, 16*M, 64 }, | |
144 | { 0x4E, CACHE, L2, 24, 6*M, 64 }, | |
145 | { 0x4F, TLB, INST, NA, SMALL, 32 }, | |
146 | { 0x50, TLB, INST, NA, BOTH, 64 }, | |
147 | { 0x51, TLB, INST, NA, BOTH, 128 }, | |
148 | { 0x52, TLB, INST, NA, BOTH, 256 }, | |
149 | { 0x55, TLB, INST, FULLY, BOTH, 7 }, | |
150 | { 0x56, TLB, DATA0, 4, LARGE, 16 }, | |
151 | { 0x57, TLB, DATA0, 4, SMALL, 16 }, | |
152 | { 0x59, TLB, DATA0, FULLY, SMALL, 16 }, | |
153 | { 0x5A, TLB, DATA0, 4, LARGE, 32 }, | |
154 | { 0x5B, TLB, DATA, NA, BOTH, 64 }, | |
155 | { 0x5C, TLB, DATA, NA, BOTH, 128 }, | |
156 | { 0x5D, TLB, DATA, NA, BOTH, 256 }, | |
157 | { 0x60, CACHE, L1, 16*K, 8, 64 }, | |
158 | { 0x61, CACHE, L1, 4, 8*K, 64 }, | |
159 | { 0x62, CACHE, L1, 4, 16*K, 64 }, | |
160 | { 0x63, CACHE, L1, 4, 32*K, 64 }, | |
161 | { 0x70, CACHE, TRACE, 8, 12*K, NA }, | |
162 | { 0x71, CACHE, TRACE, 8, 16*K, NA }, | |
163 | { 0x72, CACHE, TRACE, 8, 32*K, NA }, | |
164 | { 0x78, CACHE, L2, 4, 1*M, 64 }, | |
165 | { 0x79, CACHE, L2_2LINESECTOR, 8, 128*K, 64 }, | |
166 | { 0x7A, CACHE, L2_2LINESECTOR, 8, 256*K, 64 }, | |
167 | { 0x7B, CACHE, L2_2LINESECTOR, 8, 512*K, 64 }, | |
168 | { 0x7C, CACHE, L2_2LINESECTOR, 8, 1*M, 64 }, | |
169 | { 0x7D, CACHE, L2, 8, 2*M, 64 }, | |
170 | { 0x7F, CACHE, L2, 2, 512*K, 64 }, | |
171 | { 0x80, CACHE, L2, 8, 512*K, 64 }, | |
172 | { 0x82, CACHE, L2, 8, 256*K, 32 }, | |
173 | { 0x83, CACHE, L2, 8, 512*K, 32 }, | |
174 | { 0x84, CACHE, L2, 8, 1*M, 32 }, | |
175 | { 0x85, CACHE, L2, 8, 2*M, 32 }, | |
176 | { 0x86, CACHE, L2, 4, 512*K, 64 }, | |
177 | { 0x87, CACHE, L2, 8, 1*M, 64 }, | |
178 | { 0xB0, TLB, INST, 4, SMALL, 128 }, | |
179 | { 0xB1, TLB, INST, 4, LARGE, 8 }, | |
180 | { 0xB2, TLB, INST, 4, SMALL, 64 }, | |
181 | { 0xB3, TLB, DATA, 4, SMALL, 128 }, | |
182 | { 0xB4, TLB, DATA1, 4, SMALL, 256 }, | |
183 | { 0xBA, TLB, DATA1, 4, BOTH, 64 }, | |
184 | { 0xCA, STLB, DATA1, 4, BOTH, 512 }, | |
185 | { 0xD0, CACHE, L3, 4, 512*K, 64 }, | |
186 | { 0xD1, CACHE, L3, 4, 1*M, 64 }, | |
187 | { 0xD2, CACHE, L3, 4, 2*M, 64 }, | |
188 | { 0xD3, CACHE, L3, 4, 4*M, 64 }, | |
189 | { 0xD4, CACHE, L3, 4, 8*M, 64 }, | |
190 | { 0xD6, CACHE, L3, 8, 1*M, 64 }, | |
191 | { 0xD7, CACHE, L3, 8, 2*M, 64 }, | |
192 | { 0xD8, CACHE, L3, 8, 4*M, 64 }, | |
193 | { 0xD9, CACHE, L3, 8, 8*M, 64 }, | |
194 | { 0xDA, CACHE, L3, 8, 12*M, 64 }, | |
195 | { 0xDC, CACHE, L3, 12, 1536*K, 64 }, | |
196 | { 0xDD, CACHE, L3, 12, 3*M, 64 }, | |
197 | { 0xDE, CACHE, L3, 12, 6*M, 64 }, | |
198 | { 0xDF, CACHE, L3, 12, 12*M, 64 }, | |
199 | { 0xE0, CACHE, L3, 12, 18*M, 64 }, | |
200 | { 0xE2, CACHE, L3, 16, 2*M, 64 }, | |
201 | { 0xE3, CACHE, L3, 16, 4*M, 64 }, | |
202 | { 0xE4, CACHE, L3, 16, 8*M, 64 }, | |
203 | { 0xE5, CACHE, L3, 16, 16*M, 64 }, | |
204 | { 0xE6, CACHE, L3, 16, 24*M, 64 }, | |
205 | { 0xF0, PREFETCH, NA, NA, 64, NA }, | |
206 | { 0xF1, PREFETCH, NA, NA, 128, NA }, | |
207 | { 0xFF, CACHE, NA, NA, 0, NA } | |
208 | }; | |
209 | #define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \ | |
210 | sizeof(cpuid_cache_descriptor_t)) | |
211 | ||
212 | static inline cpuid_cache_descriptor_t * | |
213 | cpuid_leaf2_find(uint8_t value) | |
214 | { | |
215 | unsigned int i; | |
216 | ||
217 | for (i = 0; i < INTEL_LEAF2_DESC_NUM; i++) | |
218 | if (intel_cpuid_leaf2_descriptor_table[i].value == value) | |
219 | return &intel_cpuid_leaf2_descriptor_table[i]; | |
220 | return NULL; | |
221 | } | |
222 | ||
223 | /* | |
224 | * CPU identification routines. | |
225 | */ | |
226 | ||
227 | static i386_cpu_info_t *cpuid_cpu_infop = NULL; | |
228 | static i386_cpu_info_t cpuid_cpu_info; | |
229 | ||
230 | #if defined(__x86_64__) | |
231 | static void cpuid_fn(uint32_t selector, uint32_t *result) | |
232 | { | |
233 | do_cpuid(selector, result); | |
234 | } | |
235 | #else | |
236 | static void cpuid_fn(uint32_t selector, uint32_t *result) | |
237 | { | |
238 | if (get_is64bit()) { | |
239 | asm("call _cpuid64" | |
240 | : "=a" (result[0]), | |
241 | "=b" (result[1]), | |
242 | "=c" (result[2]), | |
243 | "=d" (result[3]) | |
244 | : "a"(selector), | |
245 | "b" (0), | |
246 | "c" (0), | |
247 | "d" (0)); | |
248 | } else { | |
249 | do_cpuid(selector, result); | |
250 | } | |
251 | } | |
252 | #endif | |
253 | ||
254 | /* this function is Intel-specific */ | |
255 | static void | |
256 | cpuid_set_cache_info( i386_cpu_info_t * info_p ) | |
257 | { | |
258 | uint32_t cpuid_result[4]; | |
259 | uint32_t reg[4]; | |
260 | uint32_t index; | |
261 | uint32_t linesizes[LCACHE_MAX]; | |
262 | unsigned int i; | |
263 | unsigned int j; | |
264 | boolean_t cpuid_deterministic_supported = FALSE; | |
265 | ||
266 | bzero( linesizes, sizeof(linesizes) ); | |
267 | ||
268 | /* Get processor cache descriptor info using leaf 2. We don't use | |
269 | * this internally, but must publish it for KEXTs. | |
270 | */ | |
271 | cpuid_fn(2, cpuid_result); | |
272 | for (j = 0; j < 4; j++) { | |
273 | if ((cpuid_result[j] >> 31) == 1) /* bit31 is validity */ | |
274 | continue; | |
275 | ((uint32_t *) info_p->cache_info)[j] = cpuid_result[j]; | |
276 | } | |
277 | /* first byte gives number of cpuid calls to get all descriptors */ | |
278 | for (i = 1; i < info_p->cache_info[0]; i++) { | |
279 | if (i*16 > sizeof(info_p->cache_info)) | |
280 | break; | |
281 | cpuid_fn(2, cpuid_result); | |
282 | for (j = 0; j < 4; j++) { | |
283 | if ((cpuid_result[j] >> 31) == 1) | |
284 | continue; | |
285 | ((uint32_t *) info_p->cache_info)[4*i+j] = | |
286 | cpuid_result[j]; | |
287 | } | |
288 | } | |
289 | ||
290 | /* | |
291 | * Get cache info using leaf 4, the "deterministic cache parameters." | |
292 | * Most processors Mac OS X supports implement this flavor of CPUID. | |
293 | * Loop over each cache on the processor. | |
294 | */ | |
295 | cpuid_fn(0, cpuid_result); | |
296 | if (cpuid_result[eax] >= 4) | |
297 | cpuid_deterministic_supported = TRUE; | |
298 | ||
299 | for (index = 0; cpuid_deterministic_supported; index++) { | |
300 | cache_type_t type = Lnone; | |
301 | uint32_t cache_type; | |
302 | uint32_t cache_level; | |
303 | uint32_t cache_sharing; | |
304 | uint32_t cache_linesize; | |
305 | uint32_t cache_sets; | |
306 | uint32_t cache_associativity; | |
307 | uint32_t cache_size; | |
308 | uint32_t cache_partitions; | |
309 | uint32_t colors; | |
310 | ||
311 | reg[eax] = 4; /* cpuid request 4 */ | |
312 | reg[ecx] = index; /* index starting at 0 */ | |
313 | cpuid(reg); | |
314 | //kprintf("cpuid(4) index=%d eax=%p\n", index, reg[eax]); | |
315 | cache_type = bitfield32(reg[eax], 4, 0); | |
316 | if (cache_type == 0) | |
317 | break; /* no more caches */ | |
318 | cache_level = bitfield32(reg[eax], 7, 5); | |
319 | cache_sharing = bitfield32(reg[eax], 25, 14) + 1; | |
320 | info_p->cpuid_cores_per_package | |
321 | = bitfield32(reg[eax], 31, 26) + 1; | |
322 | cache_linesize = bitfield32(reg[ebx], 11, 0) + 1; | |
323 | cache_partitions = bitfield32(reg[ebx], 21, 12) + 1; | |
324 | cache_associativity = bitfield32(reg[ebx], 31, 22) + 1; | |
325 | cache_sets = bitfield32(reg[ecx], 31, 0) + 1; | |
326 | ||
327 | /* Map type/levels returned by CPUID into cache_type_t */ | |
328 | switch (cache_level) { | |
329 | case 1: | |
330 | type = cache_type == 1 ? L1D : | |
331 | cache_type == 2 ? L1I : | |
332 | Lnone; | |
333 | break; | |
334 | case 2: | |
335 | type = cache_type == 3 ? L2U : | |
336 | Lnone; | |
337 | break; | |
338 | case 3: | |
339 | type = cache_type == 3 ? L3U : | |
340 | Lnone; | |
341 | break; | |
342 | default: | |
343 | type = Lnone; | |
344 | } | |
345 | ||
346 | /* The total size of a cache is: | |
347 | * ( linesize * sets * associativity * partitions ) | |
348 | */ | |
349 | if (type != Lnone) { | |
350 | cache_size = cache_linesize * cache_sets * | |
351 | cache_associativity * cache_partitions; | |
352 | info_p->cache_size[type] = cache_size; | |
353 | info_p->cache_sharing[type] = cache_sharing; | |
354 | info_p->cache_partitions[type] = cache_partitions; | |
355 | linesizes[type] = cache_linesize; | |
356 | ||
357 | /* | |
358 | * Overwrite associativity determined via | |
359 | * CPUID.0x80000006 -- this leaf is more | |
360 | * accurate | |
361 | */ | |
362 | if (type == L2U) | |
363 | info_p->cpuid_cache_L2_associativity = cache_associativity; | |
364 | ||
365 | /* Compute the number of page colors for this cache, | |
366 | * which is: | |
367 | * ( linesize * sets ) / page_size | |
368 | * | |
369 | * To help visualize this, consider two views of a | |
370 | * physical address. To the cache, it is composed | |
371 | * of a line offset, a set selector, and a tag. | |
372 | * To VM, it is composed of a page offset, a page | |
373 | * color, and other bits in the pageframe number: | |
374 | * | |
375 | * +-----------------+---------+--------+ | |
376 | * cache: | tag | set | offset | | |
377 | * +-----------------+---------+--------+ | |
378 | * | |
379 | * +-----------------+-------+----------+ | |
380 | * VM: | don't care | color | pg offset| | |
381 | * +-----------------+-------+----------+ | |
382 | * | |
383 | * The color is those bits in (set+offset) not covered | |
384 | * by the page offset. | |
385 | */ | |
386 | colors = ( cache_linesize * cache_sets ) >> 12; | |
387 | ||
388 | if ( colors > vm_cache_geometry_colors ) | |
389 | vm_cache_geometry_colors = colors; | |
390 | } | |
391 | } | |
392 | ||
393 | /* | |
394 | * If deterministic cache parameters are not available, use | |
395 | * something else | |
396 | */ | |
397 | if (info_p->cpuid_cores_per_package == 0) { | |
398 | info_p->cpuid_cores_per_package = 1; | |
399 | ||
400 | /* cpuid define in 1024 quantities */ | |
401 | info_p->cache_size[L2U] = info_p->cpuid_cache_size * 1024; | |
402 | info_p->cache_sharing[L2U] = 1; | |
403 | info_p->cache_partitions[L2U] = 1; | |
404 | ||
405 | linesizes[L2U] = info_p->cpuid_cache_linesize; | |
406 | } | |
407 | ||
408 | /* | |
409 | * What linesize to publish? We use the L2 linesize if any, | |
410 | * else the L1D. | |
411 | */ | |
412 | if ( linesizes[L2U] ) | |
413 | info_p->cache_linesize = linesizes[L2U]; | |
414 | else if (linesizes[L1D]) | |
415 | info_p->cache_linesize = linesizes[L1D]; | |
416 | else panic("no linesize"); | |
417 | ||
418 | /* | |
419 | * Extract and publish TLB information from Leaf 2 descriptors. | |
420 | */ | |
421 | for (i = 1; i < sizeof(info_p->cache_info); i++) { | |
422 | cpuid_cache_descriptor_t *descp; | |
423 | int id; | |
424 | int level; | |
425 | int page; | |
426 | ||
427 | descp = cpuid_leaf2_find(info_p->cache_info[i]); | |
428 | if (descp == NULL) | |
429 | continue; | |
430 | ||
431 | switch (descp->type) { | |
432 | case TLB: | |
433 | page = (descp->size == SMALL) ? TLB_SMALL : TLB_LARGE; | |
434 | /* determine I or D: */ | |
435 | switch (descp->level) { | |
436 | case INST: | |
437 | id = TLB_INST; | |
438 | break; | |
439 | case DATA: | |
440 | case DATA0: | |
441 | case DATA1: | |
442 | id = TLB_DATA; | |
443 | break; | |
444 | default: | |
445 | continue; | |
446 | } | |
447 | /* determine level: */ | |
448 | switch (descp->level) { | |
449 | case DATA1: | |
450 | level = 1; | |
451 | break; | |
452 | default: | |
453 | level = 0; | |
454 | } | |
455 | info_p->cpuid_tlb[id][page][level] = descp->entries; | |
456 | break; | |
457 | case STLB: | |
458 | info_p->cpuid_stlb = descp->entries; | |
459 | } | |
460 | } | |
461 | } | |
462 | ||
463 | static void | |
464 | cpuid_set_generic_info(i386_cpu_info_t *info_p) | |
465 | { | |
466 | uint32_t reg[4]; | |
467 | char str[128], *p; | |
468 | ||
469 | /* do cpuid 0 to get vendor */ | |
470 | cpuid_fn(0, reg); | |
471 | info_p->cpuid_max_basic = reg[eax]; | |
472 | bcopy((char *)®[ebx], &info_p->cpuid_vendor[0], 4); /* ug */ | |
473 | bcopy((char *)®[ecx], &info_p->cpuid_vendor[8], 4); | |
474 | bcopy((char *)®[edx], &info_p->cpuid_vendor[4], 4); | |
475 | info_p->cpuid_vendor[12] = 0; | |
476 | ||
477 | /* get extended cpuid results */ | |
478 | cpuid_fn(0x80000000, reg); | |
479 | info_p->cpuid_max_ext = reg[eax]; | |
480 | ||
481 | /* check to see if we can get brand string */ | |
482 | if (info_p->cpuid_max_ext >= 0x80000004) { | |
483 | /* | |
484 | * The brand string 48 bytes (max), guaranteed to | |
485 | * be NUL terminated. | |
486 | */ | |
487 | cpuid_fn(0x80000002, reg); | |
488 | bcopy((char *)reg, &str[0], 16); | |
489 | cpuid_fn(0x80000003, reg); | |
490 | bcopy((char *)reg, &str[16], 16); | |
491 | cpuid_fn(0x80000004, reg); | |
492 | bcopy((char *)reg, &str[32], 16); | |
493 | for (p = str; *p != '\0'; p++) { | |
494 | if (*p != ' ') break; | |
495 | } | |
496 | strlcpy(info_p->cpuid_brand_string, | |
497 | p, sizeof(info_p->cpuid_brand_string)); | |
498 | ||
499 | if (!strncmp(info_p->cpuid_brand_string, CPUID_STRING_UNKNOWN, | |
500 | min(sizeof(info_p->cpuid_brand_string), | |
501 | strlen(CPUID_STRING_UNKNOWN) + 1))) { | |
502 | /* | |
503 | * This string means we have a firmware-programmable brand string, | |
504 | * and the firmware couldn't figure out what sort of CPU we have. | |
505 | */ | |
506 | info_p->cpuid_brand_string[0] = '\0'; | |
507 | } | |
508 | } | |
509 | ||
510 | /* Get cache and addressing info. */ | |
511 | if (info_p->cpuid_max_ext >= 0x80000006) { | |
512 | uint32_t assoc; | |
513 | cpuid_fn(0x80000006, reg); | |
514 | info_p->cpuid_cache_linesize = bitfield32(reg[ecx], 7, 0); | |
515 | assoc = bitfield32(reg[ecx],15,12); | |
516 | /* | |
517 | * L2 associativity is encoded, though in an insufficiently | |
518 | * descriptive fashion, e.g. 24-way is mapped to 16-way. | |
519 | * Represent a fully associative cache as 0xFFFF. | |
520 | * Overwritten by associativity as determined via CPUID.4 | |
521 | * if available. | |
522 | */ | |
523 | if (assoc == 6) | |
524 | assoc = 8; | |
525 | else if (assoc == 8) | |
526 | assoc = 16; | |
527 | else if (assoc == 0xF) | |
528 | assoc = 0xFFFF; | |
529 | info_p->cpuid_cache_L2_associativity = assoc; | |
530 | info_p->cpuid_cache_size = bitfield32(reg[ecx],31,16); | |
531 | cpuid_fn(0x80000008, reg); | |
532 | info_p->cpuid_address_bits_physical = | |
533 | bitfield32(reg[eax], 7, 0); | |
534 | info_p->cpuid_address_bits_virtual = | |
535 | bitfield32(reg[eax],15, 8); | |
536 | } | |
537 | ||
538 | /* | |
539 | * Get processor signature and decode | |
540 | * and bracket this with the approved procedure for reading the | |
541 | * the microcode version number a.k.a. signature a.k.a. BIOS ID | |
542 | */ | |
543 | wrmsr64(MSR_IA32_BIOS_SIGN_ID, 0); | |
544 | cpuid_fn(1, reg); | |
545 | info_p->cpuid_microcode_version = | |
546 | (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32); | |
547 | info_p->cpuid_signature = reg[eax]; | |
548 | info_p->cpuid_stepping = bitfield32(reg[eax], 3, 0); | |
549 | info_p->cpuid_model = bitfield32(reg[eax], 7, 4); | |
550 | info_p->cpuid_family = bitfield32(reg[eax], 11, 8); | |
551 | info_p->cpuid_type = bitfield32(reg[eax], 13, 12); | |
552 | info_p->cpuid_extmodel = bitfield32(reg[eax], 19, 16); | |
553 | info_p->cpuid_extfamily = bitfield32(reg[eax], 27, 20); | |
554 | info_p->cpuid_brand = bitfield32(reg[ebx], 7, 0); | |
555 | info_p->cpuid_features = quad(reg[ecx], reg[edx]); | |
556 | ||
557 | /* Get "processor flag"; necessary for microcode update matching */ | |
558 | info_p->cpuid_processor_flag = (rdmsr64(MSR_IA32_PLATFORM_ID)>> 50) & 3; | |
559 | ||
560 | /* Fold extensions into family/model */ | |
561 | if (info_p->cpuid_family == 0x0f) | |
562 | info_p->cpuid_family += info_p->cpuid_extfamily; | |
563 | if (info_p->cpuid_family == 0x0f || info_p->cpuid_family == 0x06) | |
564 | info_p->cpuid_model += (info_p->cpuid_extmodel << 4); | |
565 | ||
566 | if (info_p->cpuid_features & CPUID_FEATURE_HTT) | |
567 | info_p->cpuid_logical_per_package = | |
568 | bitfield32(reg[ebx], 23, 16); | |
569 | else | |
570 | info_p->cpuid_logical_per_package = 1; | |
571 | ||
572 | if (info_p->cpuid_max_ext >= 0x80000001) { | |
573 | cpuid_fn(0x80000001, reg); | |
574 | info_p->cpuid_extfeatures = | |
575 | quad(reg[ecx], reg[edx]); | |
576 | } | |
577 | ||
578 | /* Fold in the Invariant TSC feature bit, if present */ | |
579 | if (info_p->cpuid_max_ext >= 0x80000007) { | |
580 | cpuid_fn(0x80000007, reg); | |
581 | info_p->cpuid_extfeatures |= | |
582 | reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI; | |
583 | } | |
584 | ||
585 | if (info_p->cpuid_max_basic >= 0x5) { | |
586 | cpuid_mwait_leaf_t *cmp = &info_p->cpuid_mwait_leaf; | |
587 | ||
588 | /* | |
589 | * Extract the Monitor/Mwait Leaf info: | |
590 | */ | |
591 | cpuid_fn(5, reg); | |
592 | cmp->linesize_min = reg[eax]; | |
593 | cmp->linesize_max = reg[ebx]; | |
594 | cmp->extensions = reg[ecx]; | |
595 | cmp->sub_Cstates = reg[edx]; | |
596 | info_p->cpuid_mwait_leafp = cmp; | |
597 | } | |
598 | ||
599 | if (info_p->cpuid_max_basic >= 0x6) { | |
600 | cpuid_thermal_leaf_t *ctp = &info_p->cpuid_thermal_leaf; | |
601 | ||
602 | /* | |
603 | * The thermal and Power Leaf: | |
604 | */ | |
605 | cpuid_fn(6, reg); | |
606 | ctp->sensor = bitfield32(reg[eax], 0, 0); | |
607 | ctp->dynamic_acceleration = bitfield32(reg[eax], 1, 1); | |
608 | ctp->invariant_APIC_timer = bitfield32(reg[eax], 2, 2); | |
609 | ctp->core_power_limits = bitfield32(reg[eax], 3, 3); | |
610 | ctp->fine_grain_clock_mod = bitfield32(reg[eax], 4, 4); | |
611 | ctp->package_thermal_intr = bitfield32(reg[eax], 5, 5); | |
612 | ctp->thresholds = bitfield32(reg[ebx], 3, 0); | |
613 | ctp->ACNT_MCNT = bitfield32(reg[ecx], 0, 0); | |
614 | ctp->hardware_feedback = bitfield32(reg[ecx], 1, 1); | |
615 | ctp->energy_policy = bitfield32(reg[ecx], 2, 2); | |
616 | info_p->cpuid_thermal_leafp = ctp; | |
617 | } | |
618 | ||
619 | if (info_p->cpuid_max_basic >= 0xa) { | |
620 | cpuid_arch_perf_leaf_t *capp = &info_p->cpuid_arch_perf_leaf; | |
621 | ||
622 | /* | |
623 | * Architectural Performance Monitoring Leaf: | |
624 | */ | |
625 | cpuid_fn(0xa, reg); | |
626 | capp->version = bitfield32(reg[eax], 7, 0); | |
627 | capp->number = bitfield32(reg[eax], 15, 8); | |
628 | capp->width = bitfield32(reg[eax], 23, 16); | |
629 | capp->events_number = bitfield32(reg[eax], 31, 24); | |
630 | capp->events = reg[ebx]; | |
631 | capp->fixed_number = bitfield32(reg[edx], 4, 0); | |
632 | capp->fixed_width = bitfield32(reg[edx], 12, 5); | |
633 | info_p->cpuid_arch_perf_leafp = capp; | |
634 | } | |
635 | ||
636 | if (info_p->cpuid_max_basic >= 0xd) { | |
637 | cpuid_xsave_leaf_t *xsp = &info_p->cpuid_xsave_leaf; | |
638 | /* | |
639 | * XSAVE Features: | |
640 | */ | |
641 | cpuid_fn(0xd, info_p->cpuid_xsave_leaf.extended_state); | |
642 | info_p->cpuid_xsave_leafp = xsp; | |
643 | } | |
644 | ||
645 | return; | |
646 | } | |
647 | ||
648 | static uint32_t | |
649 | cpuid_set_cpufamily(i386_cpu_info_t *info_p) | |
650 | { | |
651 | uint32_t cpufamily = CPUFAMILY_UNKNOWN; | |
652 | ||
653 | switch (info_p->cpuid_family) { | |
654 | case 6: | |
655 | switch (info_p->cpuid_model) { | |
656 | #if CONFIG_YONAH | |
657 | case 14: | |
658 | cpufamily = CPUFAMILY_INTEL_YONAH; | |
659 | break; | |
660 | #endif | |
661 | case 15: | |
662 | cpufamily = CPUFAMILY_INTEL_MEROM; | |
663 | break; | |
664 | case 23: | |
665 | cpufamily = CPUFAMILY_INTEL_PENRYN; | |
666 | break; | |
667 | case CPUID_MODEL_NEHALEM: | |
668 | case CPUID_MODEL_FIELDS: | |
669 | case CPUID_MODEL_DALES: | |
670 | case CPUID_MODEL_NEHALEM_EX: | |
671 | cpufamily = CPUFAMILY_INTEL_NEHALEM; | |
672 | break; | |
673 | case CPUID_MODEL_DALES_32NM: | |
674 | case CPUID_MODEL_WESTMERE: | |
675 | case CPUID_MODEL_WESTMERE_EX: | |
676 | cpufamily = CPUFAMILY_INTEL_WESTMERE; | |
677 | break; | |
678 | case CPUID_MODEL_SANDYBRIDGE: | |
679 | case CPUID_MODEL_JAKETOWN: | |
680 | cpufamily = CPUFAMILY_INTEL_SANDYBRIDGE; | |
681 | break; | |
682 | } | |
683 | break; | |
684 | } | |
685 | ||
686 | info_p->cpuid_cpufamily = cpufamily; | |
687 | return cpufamily; | |
688 | } | |
689 | /* | |
690 | * Must be invoked either when executing single threaded, or with | |
691 | * independent synchronization. | |
692 | */ | |
693 | void | |
694 | cpuid_set_info(void) | |
695 | { | |
696 | i386_cpu_info_t *info_p = &cpuid_cpu_info; | |
697 | ||
698 | bzero((void *)info_p, sizeof(cpuid_cpu_info)); | |
699 | ||
700 | cpuid_set_generic_info(info_p); | |
701 | ||
702 | /* verify we are running on a supported CPU */ | |
703 | if ((strncmp(CPUID_VID_INTEL, info_p->cpuid_vendor, | |
704 | min(strlen(CPUID_STRING_UNKNOWN) + 1, | |
705 | sizeof(info_p->cpuid_vendor)))) || | |
706 | (cpuid_set_cpufamily(info_p) == CPUFAMILY_UNKNOWN)) | |
707 | panic("Unsupported CPU"); | |
708 | ||
709 | info_p->cpuid_cpu_type = CPU_TYPE_X86; | |
710 | info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_ARCH1; | |
711 | /* Must be invoked after set_generic_info */ | |
712 | cpuid_set_cache_info(&cpuid_cpu_info); | |
713 | ||
714 | /* | |
715 | * Find the number of enabled cores and threads | |
716 | * (which determines whether SMT/Hyperthreading is active). | |
717 | */ | |
718 | switch (info_p->cpuid_cpufamily) { | |
719 | case CPUFAMILY_INTEL_WESTMERE: { | |
720 | uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT); | |
721 | info_p->core_count = bitfield32((uint32_t)msr, 19, 16); | |
722 | info_p->thread_count = bitfield32((uint32_t)msr, 15, 0); | |
723 | break; | |
724 | } | |
725 | case CPUFAMILY_INTEL_SANDYBRIDGE: | |
726 | case CPUFAMILY_INTEL_NEHALEM: { | |
727 | uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT); | |
728 | info_p->core_count = bitfield32((uint32_t)msr, 31, 16); | |
729 | info_p->thread_count = bitfield32((uint32_t)msr, 15, 0); | |
730 | break; | |
731 | } | |
732 | } | |
733 | if (info_p->core_count == 0) { | |
734 | info_p->core_count = info_p->cpuid_cores_per_package; | |
735 | info_p->thread_count = info_p->cpuid_logical_per_package; | |
736 | } | |
737 | ||
738 | cpuid_cpu_info.cpuid_model_string = ""; /* deprecated */ | |
739 | } | |
740 | ||
741 | static struct { | |
742 | uint64_t mask; | |
743 | const char *name; | |
744 | } feature_map[] = { | |
745 | {CPUID_FEATURE_FPU, "FPU"}, | |
746 | {CPUID_FEATURE_VME, "VME"}, | |
747 | {CPUID_FEATURE_DE, "DE"}, | |
748 | {CPUID_FEATURE_PSE, "PSE"}, | |
749 | {CPUID_FEATURE_TSC, "TSC"}, | |
750 | {CPUID_FEATURE_MSR, "MSR"}, | |
751 | {CPUID_FEATURE_PAE, "PAE"}, | |
752 | {CPUID_FEATURE_MCE, "MCE"}, | |
753 | {CPUID_FEATURE_CX8, "CX8"}, | |
754 | {CPUID_FEATURE_APIC, "APIC"}, | |
755 | {CPUID_FEATURE_SEP, "SEP"}, | |
756 | {CPUID_FEATURE_MTRR, "MTRR"}, | |
757 | {CPUID_FEATURE_PGE, "PGE"}, | |
758 | {CPUID_FEATURE_MCA, "MCA"}, | |
759 | {CPUID_FEATURE_CMOV, "CMOV"}, | |
760 | {CPUID_FEATURE_PAT, "PAT"}, | |
761 | {CPUID_FEATURE_PSE36, "PSE36"}, | |
762 | {CPUID_FEATURE_PSN, "PSN"}, | |
763 | {CPUID_FEATURE_CLFSH, "CLFSH"}, | |
764 | {CPUID_FEATURE_DS, "DS"}, | |
765 | {CPUID_FEATURE_ACPI, "ACPI"}, | |
766 | {CPUID_FEATURE_MMX, "MMX"}, | |
767 | {CPUID_FEATURE_FXSR, "FXSR"}, | |
768 | {CPUID_FEATURE_SSE, "SSE"}, | |
769 | {CPUID_FEATURE_SSE2, "SSE2"}, | |
770 | {CPUID_FEATURE_SS, "SS"}, | |
771 | {CPUID_FEATURE_HTT, "HTT"}, | |
772 | {CPUID_FEATURE_TM, "TM"}, | |
773 | {CPUID_FEATURE_PBE, "PBE"}, | |
774 | {CPUID_FEATURE_SSE3, "SSE3"}, | |
775 | {CPUID_FEATURE_PCLMULQDQ, "PCLMULQDQ"}, | |
776 | {CPUID_FEATURE_DTES64, "DTES64"}, | |
777 | {CPUID_FEATURE_MONITOR, "MON"}, | |
778 | {CPUID_FEATURE_DSCPL, "DSCPL"}, | |
779 | {CPUID_FEATURE_VMX, "VMX"}, | |
780 | {CPUID_FEATURE_SMX, "SMX"}, | |
781 | {CPUID_FEATURE_EST, "EST"}, | |
782 | {CPUID_FEATURE_TM2, "TM2"}, | |
783 | {CPUID_FEATURE_SSSE3, "SSSE3"}, | |
784 | {CPUID_FEATURE_CID, "CID"}, | |
785 | {CPUID_FEATURE_CX16, "CX16"}, | |
786 | {CPUID_FEATURE_xTPR, "TPR"}, | |
787 | {CPUID_FEATURE_PDCM, "PDCM"}, | |
788 | {CPUID_FEATURE_SSE4_1, "SSE4.1"}, | |
789 | {CPUID_FEATURE_SSE4_2, "SSE4.2"}, | |
790 | {CPUID_FEATURE_xAPIC, "xAPIC"}, | |
791 | {CPUID_FEATURE_MOVBE, "MOVBE"}, | |
792 | {CPUID_FEATURE_POPCNT, "POPCNT"}, | |
793 | {CPUID_FEATURE_AES, "AES"}, | |
794 | {CPUID_FEATURE_VMM, "VMM"}, | |
795 | {CPUID_FEATURE_PCID, "PCID"}, | |
796 | {CPUID_FEATURE_XSAVE, "XSAVE"}, | |
797 | {CPUID_FEATURE_OSXSAVE, "OSXSAVE"}, | |
798 | {CPUID_FEATURE_SEGLIM64, "SEGLIM64"}, | |
799 | {CPUID_FEATURE_TSCTMR, "TSCTMR"}, | |
800 | {CPUID_FEATURE_AVX1_0, "AVX1.0"}, | |
801 | {0, 0} | |
802 | }, | |
803 | extfeature_map[] = { | |
804 | {CPUID_EXTFEATURE_SYSCALL, "SYSCALL"}, | |
805 | {CPUID_EXTFEATURE_XD, "XD"}, | |
806 | {CPUID_EXTFEATURE_1GBPAGE, "1GBPAGE"}, | |
807 | {CPUID_EXTFEATURE_EM64T, "EM64T"}, | |
808 | {CPUID_EXTFEATURE_LAHF, "LAHF"}, | |
809 | {CPUID_EXTFEATURE_RDTSCP, "RDTSCP"}, | |
810 | {CPUID_EXTFEATURE_TSCI, "TSCI"}, | |
811 | {0, 0} | |
812 | }; | |
813 | ||
814 | i386_cpu_info_t * | |
815 | cpuid_info(void) | |
816 | { | |
817 | /* Set-up the cpuid_info stucture lazily */ | |
818 | if (cpuid_cpu_infop == NULL) { | |
819 | cpuid_set_info(); | |
820 | cpuid_cpu_infop = &cpuid_cpu_info; | |
821 | } | |
822 | return cpuid_cpu_infop; | |
823 | } | |
824 | ||
825 | char * | |
826 | cpuid_get_feature_names(uint64_t features, char *buf, unsigned buf_len) | |
827 | { | |
828 | size_t len = 0; | |
829 | char *p = buf; | |
830 | int i; | |
831 | ||
832 | for (i = 0; feature_map[i].mask != 0; i++) { | |
833 | if ((features & feature_map[i].mask) == 0) | |
834 | continue; | |
835 | if (len && ((size_t)(p - buf) < (buf_len - 1))) | |
836 | *p++ = ' '; | |
837 | ||
838 | len = min(strlen(feature_map[i].name), (size_t) ((buf_len-1) - (p-buf))); | |
839 | if (len == 0) | |
840 | break; | |
841 | bcopy(feature_map[i].name, p, len); | |
842 | p += len; | |
843 | } | |
844 | *p = '\0'; | |
845 | return buf; | |
846 | } | |
847 | ||
848 | char * | |
849 | cpuid_get_extfeature_names(uint64_t extfeatures, char *buf, unsigned buf_len) | |
850 | { | |
851 | size_t len = 0; | |
852 | char *p = buf; | |
853 | int i; | |
854 | ||
855 | for (i = 0; extfeature_map[i].mask != 0; i++) { | |
856 | if ((extfeatures & extfeature_map[i].mask) == 0) | |
857 | continue; | |
858 | if (len && ((size_t) (p - buf) < (buf_len - 1))) | |
859 | *p++ = ' '; | |
860 | len = min(strlen(extfeature_map[i].name), (size_t) ((buf_len-1)-(p-buf))); | |
861 | if (len == 0) | |
862 | break; | |
863 | bcopy(extfeature_map[i].name, p, len); | |
864 | p += len; | |
865 | } | |
866 | *p = '\0'; | |
867 | return buf; | |
868 | } | |
869 | ||
870 | ||
871 | void | |
872 | cpuid_feature_display( | |
873 | const char *header) | |
874 | { | |
875 | char buf[256]; | |
876 | ||
877 | kprintf("%s: %s\n", header, | |
878 | cpuid_get_feature_names(cpuid_features(), | |
879 | buf, sizeof(buf))); | |
880 | if (cpuid_features() & CPUID_FEATURE_HTT) { | |
881 | #define s_if_plural(n) ((n > 1) ? "s" : "") | |
882 | kprintf(" HTT: %d core%s per package;" | |
883 | " %d logical cpu%s per package\n", | |
884 | cpuid_cpu_info.cpuid_cores_per_package, | |
885 | s_if_plural(cpuid_cpu_info.cpuid_cores_per_package), | |
886 | cpuid_cpu_info.cpuid_logical_per_package, | |
887 | s_if_plural(cpuid_cpu_info.cpuid_logical_per_package)); | |
888 | } | |
889 | } | |
890 | ||
891 | void | |
892 | cpuid_extfeature_display( | |
893 | const char *header) | |
894 | { | |
895 | char buf[256]; | |
896 | ||
897 | kprintf("%s: %s\n", header, | |
898 | cpuid_get_extfeature_names(cpuid_extfeatures(), | |
899 | buf, sizeof(buf))); | |
900 | } | |
901 | ||
902 | void | |
903 | cpuid_cpu_display( | |
904 | const char *header) | |
905 | { | |
906 | if (cpuid_cpu_info.cpuid_brand_string[0] != '\0') { | |
907 | kprintf("%s: %s\n", header, cpuid_cpu_info.cpuid_brand_string); | |
908 | } | |
909 | } | |
910 | ||
911 | unsigned int | |
912 | cpuid_family(void) | |
913 | { | |
914 | return cpuid_info()->cpuid_family; | |
915 | } | |
916 | ||
917 | uint32_t | |
918 | cpuid_cpufamily(void) | |
919 | { | |
920 | return cpuid_info()->cpuid_cpufamily; | |
921 | } | |
922 | ||
923 | cpu_type_t | |
924 | cpuid_cputype(void) | |
925 | { | |
926 | return cpuid_info()->cpuid_cpu_type; | |
927 | } | |
928 | ||
929 | cpu_subtype_t | |
930 | cpuid_cpusubtype(void) | |
931 | { | |
932 | return cpuid_info()->cpuid_cpu_subtype; | |
933 | } | |
934 | ||
935 | uint64_t | |
936 | cpuid_features(void) | |
937 | { | |
938 | static int checked = 0; | |
939 | char fpu_arg[20] = { 0 }; | |
940 | ||
941 | (void) cpuid_info(); | |
942 | if (!checked) { | |
943 | /* check for boot-time fpu limitations */ | |
944 | if (PE_parse_boot_argn("_fpu", &fpu_arg[0], sizeof (fpu_arg))) { | |
945 | printf("limiting fpu features to: %s\n", fpu_arg); | |
946 | if (!strncmp("387", fpu_arg, sizeof("387")) || !strncmp("mmx", fpu_arg, sizeof("mmx"))) { | |
947 | printf("no sse or sse2\n"); | |
948 | cpuid_cpu_info.cpuid_features &= ~(CPUID_FEATURE_SSE | CPUID_FEATURE_SSE2 | CPUID_FEATURE_FXSR); | |
949 | } else if (!strncmp("sse", fpu_arg, sizeof("sse"))) { | |
950 | printf("no sse2\n"); | |
951 | cpuid_cpu_info.cpuid_features &= ~(CPUID_FEATURE_SSE2); | |
952 | } | |
953 | } | |
954 | checked = 1; | |
955 | } | |
956 | return cpuid_cpu_info.cpuid_features; | |
957 | } | |
958 | ||
959 | uint64_t | |
960 | cpuid_extfeatures(void) | |
961 | { | |
962 | return cpuid_info()->cpuid_extfeatures; | |
963 | } | |
964 | ||
965 | ||
966 | #if MACH_KDB | |
967 | ||
968 | /* | |
969 | * Display the cpuid | |
970 | * * | |
971 | * cp | |
972 | */ | |
973 | void | |
974 | db_cpuid(__unused db_expr_t addr, | |
975 | __unused int have_addr, | |
976 | __unused db_expr_t count, | |
977 | __unused char *modif) | |
978 | { | |
979 | ||
980 | uint32_t i, mid; | |
981 | uint32_t cpid[4]; | |
982 | ||
983 | do_cpuid(0, cpid); /* Get the first cpuid which is the number of | |
984 | * basic ids */ | |
985 | db_printf("%08X - %08X %08X %08X %08X\n", | |
986 | 0, cpid[eax], cpid[ebx], cpid[ecx], cpid[edx]); | |
987 | ||
988 | mid = cpid[eax]; /* Set the number */ | |
989 | for (i = 1; i <= mid; i++) { /* Dump 'em out */ | |
990 | do_cpuid(i, cpid); /* Get the next */ | |
991 | db_printf("%08X - %08X %08X %08X %08X\n", | |
992 | i, cpid[eax], cpid[ebx], cpid[ecx], cpid[edx]); | |
993 | } | |
994 | db_printf("\n"); | |
995 | ||
996 | do_cpuid(0x80000000, cpid); /* Get the first extended cpuid which | |
997 | * is the number of extended ids */ | |
998 | db_printf("%08X - %08X %08X %08X %08X\n", | |
999 | 0x80000000, cpid[eax], cpid[ebx], cpid[ecx], cpid[edx]); | |
1000 | ||
1001 | mid = cpid[eax]; /* Set the number */ | |
1002 | for (i = 0x80000001; i <= mid; i++) { /* Dump 'em out */ | |
1003 | do_cpuid(i, cpid); /* Get the next */ | |
1004 | db_printf("%08X - %08X %08X %08X %08X\n", | |
1005 | i, cpid[eax], cpid[ebx], cpid[ecx], cpid[edx]); | |
1006 | } | |
1007 | } | |
1008 | ||
1009 | #endif |