2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <platforms.h>
33 #include <vm/vm_page.h>
34 #include <pexpert/pexpert.h>
36 #include <i386/cpuid.h>
38 #include <machine/db_machdep.h>
39 #include <ddb/db_aout.h>
40 #include <ddb/db_access.h>
41 #include <ddb/db_sym.h>
42 #include <ddb/db_variables.h>
43 #include <ddb/db_command.h>
44 #include <ddb/db_output.h>
45 #include <ddb/db_expr.h>
48 #define min(a,b) ((a) < (b) ? (a) : (b))
49 #define quad(hi,lo) (((uint64_t)(hi)) << 32 | (lo))
51 /* Only for 32bit values */
52 #define bit(n) (1U << (n))
53 #define bitmask(h,l) ((bit(h)|(bit(h)-1)) & ~(bit(l)-1))
54 #define bitfield(x,h,l) ((((x) & bitmask(h,l)) >> l))
57 * Leaf 2 cache descriptor encodings.
60 _NULL_
, /* NULL (empty) descriptor */
63 STLB
, /* Shared second-level unified TLB */
64 PREFETCH
/* Prefetch size */
65 } cpuid_leaf2_desc_type_t
;
68 NA
, /* Not Applicable */
69 FULLY
, /* Fully-associative */
70 TRACE
, /* Trace Cache (P4 only) */
71 INST
, /* Instruction TLB */
73 DATA0
, /* Data TLB, 1st level */
74 DATA1
, /* Data TLB, 2nd level */
75 L1
, /* L1 (unified) cache */
76 L1_INST
, /* L1 Instruction cache */
77 L1_DATA
, /* L1 Data cache */
78 L2
, /* L2 (unified) cache */
79 L3
, /* L3 (unified) cache */
80 L2_2LINESECTOR
, /* L2 (unified) cache with 2 lines per sector */
81 L3_2LINESECTOR
, /* L3(unified) cache with 2 lines per sector */
82 SMALL
, /* Small page TLB */
83 LARGE
, /* Large page TLB */
84 BOTH
/* Small and Large page TLB */
85 } cpuid_leaf2_qualifier_t
;
87 typedef struct cpuid_cache_descriptor
{
88 uint8_t value
; /* descriptor code */
89 uint8_t type
; /* cpuid_leaf2_desc_type_t */
90 uint8_t level
; /* level of cache/TLB hierachy */
91 uint8_t ways
; /* wayness of cache */
92 uint16_t size
; /* cachesize or TLB pagesize */
93 uint16_t entries
; /* number of TLB entries or linesize */
94 } cpuid_cache_descriptor_t
;
97 * These multipliers are used to encode 1*K .. 64*M in a 16 bit size field
103 * Intel cache descriptor table:
105 static cpuid_cache_descriptor_t intel_cpuid_leaf2_descriptor_table
[] = {
106 // -------------------------------------------------------
107 // value type level ways size entries
108 // -------------------------------------------------------
109 { 0x00, _NULL_
, NA
, NA
, NA
, NA
},
110 { 0x01, TLB
, INST
, 4, SMALL
, 32 },
111 { 0x02, TLB
, INST
, FULLY
, LARGE
, 2 },
112 { 0x03, TLB
, DATA
, 4, SMALL
, 64 },
113 { 0x04, TLB
, DATA
, 4, LARGE
, 8 },
114 { 0x05, TLB
, DATA1
, 4, LARGE
, 32 },
115 { 0x06, CACHE
, L1_INST
, 4, 8*K
, 32 },
116 { 0x08, CACHE
, L1_INST
, 4, 16*K
, 32 },
117 { 0x09, CACHE
, L1_INST
, 4, 32*K
, 64 },
118 { 0x0A, CACHE
, L1_DATA
, 2, 8*K
, 32 },
119 { 0x0B, TLB
, INST
, 4, LARGE
, 4 },
120 { 0x0C, CACHE
, L1_DATA
, 4, 16*K
, 32 },
121 { 0x0D, CACHE
, L1_DATA
, 4, 16*K
, 64 },
122 { 0x0E, CACHE
, L1_DATA
, 6, 24*K
, 64 },
123 { 0x21, CACHE
, L2
, 8, 256*K
, 64 },
124 { 0x22, CACHE
, L3_2LINESECTOR
, 4, 512*K
, 64 },
125 { 0x23, CACHE
, L3_2LINESECTOR
, 8, 1*M
, 64 },
126 { 0x25, CACHE
, L3_2LINESECTOR
, 8, 2*M
, 64 },
127 { 0x29, CACHE
, L3_2LINESECTOR
, 8, 4*M
, 64 },
128 { 0x2C, CACHE
, L1_DATA
, 8, 32*K
, 64 },
129 { 0x30, CACHE
, L1_INST
, 8, 32*K
, 64 },
130 { 0x40, CACHE
, L2
, NA
, 0, NA
},
131 { 0x41, CACHE
, L2
, 4, 128*K
, 32 },
132 { 0x42, CACHE
, L2
, 4, 256*K
, 32 },
133 { 0x43, CACHE
, L2
, 4, 512*K
, 32 },
134 { 0x44, CACHE
, L2
, 4, 1*M
, 32 },
135 { 0x45, CACHE
, L2
, 4, 2*M
, 32 },
136 { 0x46, CACHE
, L3
, 4, 4*M
, 64 },
137 { 0x47, CACHE
, L3
, 8, 8*M
, 64 },
138 { 0x48, CACHE
, L2
, 12, 3*M
, 64 },
139 { 0x49, CACHE
, L2
, 16, 4*M
, 64 },
140 { 0x4A, CACHE
, L3
, 12, 6*M
, 64 },
141 { 0x4B, CACHE
, L3
, 16, 8*M
, 64 },
142 { 0x4C, CACHE
, L3
, 12, 12*M
, 64 },
143 { 0x4D, CACHE
, L3
, 16, 16*M
, 64 },
144 { 0x4E, CACHE
, L2
, 24, 6*M
, 64 },
145 { 0x4F, TLB
, INST
, NA
, SMALL
, 32 },
146 { 0x50, TLB
, INST
, NA
, BOTH
, 64 },
147 { 0x51, TLB
, INST
, NA
, BOTH
, 128 },
148 { 0x52, TLB
, INST
, NA
, BOTH
, 256 },
149 { 0x55, TLB
, INST
, FULLY
, BOTH
, 7 },
150 { 0x56, TLB
, DATA0
, 4, LARGE
, 16 },
151 { 0x57, TLB
, DATA0
, 4, SMALL
, 16 },
152 { 0x59, TLB
, DATA0
, FULLY
, SMALL
, 16 },
153 { 0x5A, TLB
, DATA0
, 4, LARGE
, 32 },
154 { 0x5B, TLB
, DATA
, NA
, BOTH
, 64 },
155 { 0x5C, TLB
, DATA
, NA
, BOTH
, 128 },
156 { 0x5D, TLB
, DATA
, NA
, BOTH
, 256 },
157 { 0x60, CACHE
, L1
, 16*K
, 8, 64 },
158 { 0x61, CACHE
, L1
, 4, 8*K
, 64 },
159 { 0x62, CACHE
, L1
, 4, 16*K
, 64 },
160 { 0x63, CACHE
, L1
, 4, 32*K
, 64 },
161 { 0x70, CACHE
, TRACE
, 8, 12*K
, NA
},
162 { 0x71, CACHE
, TRACE
, 8, 16*K
, NA
},
163 { 0x72, CACHE
, TRACE
, 8, 32*K
, NA
},
164 { 0x78, CACHE
, L2
, 4, 1*M
, 64 },
165 { 0x79, CACHE
, L2_2LINESECTOR
, 8, 128*K
, 64 },
166 { 0x7A, CACHE
, L2_2LINESECTOR
, 8, 256*K
, 64 },
167 { 0x7B, CACHE
, L2_2LINESECTOR
, 8, 512*K
, 64 },
168 { 0x7C, CACHE
, L2_2LINESECTOR
, 8, 1*M
, 64 },
169 { 0x7D, CACHE
, L2
, 8, 2*M
, 64 },
170 { 0x7F, CACHE
, L2
, 2, 512*K
, 64 },
171 { 0x80, CACHE
, L2
, 8, 512*K
, 64 },
172 { 0x82, CACHE
, L2
, 8, 256*K
, 32 },
173 { 0x83, CACHE
, L2
, 8, 512*K
, 32 },
174 { 0x84, CACHE
, L2
, 8, 1*M
, 32 },
175 { 0x85, CACHE
, L2
, 8, 2*M
, 32 },
176 { 0x86, CACHE
, L2
, 4, 512*K
, 64 },
177 { 0x87, CACHE
, L2
, 8, 1*M
, 64 },
178 { 0xB0, TLB
, INST
, 4, SMALL
, 128 },
179 { 0xB1, TLB
, INST
, 4, LARGE
, 8 },
180 { 0xB2, TLB
, INST
, 4, SMALL
, 64 },
181 { 0xB3, TLB
, DATA
, 4, SMALL
, 128 },
182 { 0xB4, TLB
, DATA1
, 4, SMALL
, 256 },
183 { 0xBA, TLB
, DATA1
, 4, BOTH
, 64 },
184 { 0xCA, STLB
, DATA1
, 4, BOTH
, 512 },
185 { 0xD0, CACHE
, L3
, 4, 512*K
, 64 },
186 { 0xD1, CACHE
, L3
, 4, 1*M
, 64 },
187 { 0xD2, CACHE
, L3
, 4, 2*M
, 64 },
188 { 0xD6, CACHE
, L3
, 8, 1*M
, 64 },
189 { 0xD7, CACHE
, L3
, 8, 2*M
, 64 },
190 { 0xD8, CACHE
, L3
, 8, 4*M
, 64 },
191 { 0xDC, CACHE
, L3
, 12, 1536*K
, 64 },
192 { 0xDD, CACHE
, L3
, 12, 3*M
, 64 },
193 { 0xDE, CACHE
, L3
, 12, 6*M
, 64 },
194 { 0xE2, CACHE
, L3
, 16, 2*M
, 64 },
195 { 0xE3, CACHE
, L3
, 16, 4*M
, 64 },
196 { 0xE4, CACHE
, L3
, 16, 8*M
, 64 },
197 { 0xF0, PREFETCH
, NA
, NA
, 64, NA
},
198 { 0xF1, PREFETCH
, NA
, NA
, 128, NA
}
200 #define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \
201 sizeof(cpuid_cache_descriptor_t))
203 static inline cpuid_cache_descriptor_t
*
204 cpuid_leaf2_find(uint8_t value
)
208 for (i
= 0; i
< INTEL_LEAF2_DESC_NUM
; i
++)
209 if (intel_cpuid_leaf2_descriptor_table
[i
].value
== value
)
210 return &intel_cpuid_leaf2_descriptor_table
[i
];
215 * CPU identification routines.
218 static i386_cpu_info_t
*cpuid_cpu_infop
= NULL
;
219 static i386_cpu_info_t cpuid_cpu_info
;
221 #if defined(__x86_64__)
222 static void _do_cpuid(uint32_t selector
, uint32_t *result
)
224 do_cpuid(selector
, result
);
227 static void _do_cpuid(uint32_t selector
, uint32_t *result
)
229 if (cpu_mode_is64bit()) {
237 do_cpuid(selector
, result
);
242 /* this function is Intel-specific */
244 cpuid_set_cache_info( i386_cpu_info_t
* info_p
)
246 uint32_t cpuid_result
[4];
249 uint32_t linesizes
[LCACHE_MAX
];
252 boolean_t cpuid_deterministic_supported
= FALSE
;
254 bzero( linesizes
, sizeof(linesizes
) );
256 /* Get processor cache descriptor info using leaf 2. We don't use
257 * this internally, but must publish it for KEXTs.
259 _do_cpuid(2, cpuid_result
);
260 for (j
= 0; j
< 4; j
++) {
261 if ((cpuid_result
[j
] >> 31) == 1) /* bit31 is validity */
263 ((uint32_t *) info_p
->cache_info
)[j
] = cpuid_result
[j
];
265 /* first byte gives number of cpuid calls to get all descriptors */
266 for (i
= 1; i
< info_p
->cache_info
[0]; i
++) {
267 if (i
*16 > sizeof(info_p
->cache_info
))
269 _do_cpuid(2, cpuid_result
);
270 for (j
= 0; j
< 4; j
++) {
271 if ((cpuid_result
[j
] >> 31) == 1)
273 ((uint32_t *) info_p
->cache_info
)[4*i
+j
] =
279 * Get cache info using leaf 4, the "deterministic cache parameters."
280 * Most processors Mac OS X supports implement this flavor of CPUID.
281 * Loop over each cache on the processor.
283 _do_cpuid(0, cpuid_result
);
284 if (cpuid_result
[eax
] >= 4)
285 cpuid_deterministic_supported
= TRUE
;
287 for (index
= 0; cpuid_deterministic_supported
; index
++) {
288 cache_type_t type
= Lnone
;
290 uint32_t cache_level
;
291 uint32_t cache_sharing
;
292 uint32_t cache_linesize
;
294 uint32_t cache_associativity
;
296 uint32_t cache_partitions
;
299 reg
[eax
] = 4; /* cpuid request 4 */
300 reg
[ecx
] = index
; /* index starting at 0 */
302 //kprintf("cpuid(4) index=%d eax=%p\n", index, reg[eax]);
303 cache_type
= bitfield(reg
[eax
], 4, 0);
305 break; /* no more caches */
306 cache_level
= bitfield(reg
[eax
], 7, 5);
307 cache_sharing
= bitfield(reg
[eax
], 25, 14) + 1;
308 info_p
->cpuid_cores_per_package
309 = bitfield(reg
[eax
], 31, 26) + 1;
310 cache_linesize
= bitfield(reg
[ebx
], 11, 0) + 1;
311 cache_partitions
= bitfield(reg
[ebx
], 21, 12) + 1;
312 cache_associativity
= bitfield(reg
[ebx
], 31, 22) + 1;
313 cache_sets
= bitfield(reg
[ecx
], 31, 0) + 1;
315 /* Map type/levels returned by CPUID into cache_type_t */
316 switch (cache_level
) {
318 type
= cache_type
== 1 ? L1D
:
319 cache_type
== 2 ? L1I
:
323 type
= cache_type
== 3 ? L2U
:
327 type
= cache_type
== 3 ? L3U
:
334 /* The total size of a cache is:
335 * ( linesize * sets * associativity * partitions )
338 cache_size
= cache_linesize
* cache_sets
*
339 cache_associativity
* cache_partitions
;
340 info_p
->cache_size
[type
] = cache_size
;
341 info_p
->cache_sharing
[type
] = cache_sharing
;
342 info_p
->cache_partitions
[type
] = cache_partitions
;
343 linesizes
[type
] = cache_linesize
;
345 /* Compute the number of page colors for this cache,
347 * ( linesize * sets ) / page_size
349 * To help visualize this, consider two views of a
350 * physical address. To the cache, it is composed
351 * of a line offset, a set selector, and a tag.
352 * To VM, it is composed of a page offset, a page
353 * color, and other bits in the pageframe number:
355 * +-----------------+---------+--------+
356 * cache: | tag | set | offset |
357 * +-----------------+---------+--------+
359 * +-----------------+-------+----------+
360 * VM: | don't care | color | pg offset|
361 * +-----------------+-------+----------+
363 * The color is those bits in (set+offset) not covered
364 * by the page offset.
366 colors
= ( cache_linesize
* cache_sets
) >> 12;
368 if ( colors
> vm_cache_geometry_colors
)
369 vm_cache_geometry_colors
= colors
;
374 * If deterministic cache parameters are not available, use
377 if (info_p
->cpuid_cores_per_package
== 0) {
378 info_p
->cpuid_cores_per_package
= 1;
380 /* cpuid define in 1024 quantities */
381 info_p
->cache_size
[L2U
] = info_p
->cpuid_cache_size
* 1024;
382 info_p
->cache_sharing
[L2U
] = 1;
383 info_p
->cache_partitions
[L2U
] = 1;
385 linesizes
[L2U
] = info_p
->cpuid_cache_linesize
;
389 * What linesize to publish? We use the L2 linesize if any,
392 if ( linesizes
[L2U
] )
393 info_p
->cache_linesize
= linesizes
[L2U
];
394 else if (linesizes
[L1D
])
395 info_p
->cache_linesize
= linesizes
[L1D
];
396 else panic("no linesize");
399 * Extract and publish TLB information from Leaf 2 descriptors.
401 for (i
= 1; i
< sizeof(info_p
->cache_info
); i
++) {
402 cpuid_cache_descriptor_t
*descp
;
407 descp
= cpuid_leaf2_find(info_p
->cache_info
[i
]);
411 switch (descp
->type
) {
413 page
= (descp
->size
== SMALL
) ? TLB_SMALL
: TLB_LARGE
;
414 /* determine I or D: */
415 switch (descp
->level
) {
427 /* determine level: */
428 switch (descp
->level
) {
435 info_p
->cpuid_tlb
[id
][page
][level
] = descp
->entries
;
438 info_p
->cpuid_stlb
= descp
->entries
;
444 cpuid_set_generic_info(i386_cpu_info_t
*info_p
)
446 uint32_t cpuid_reg
[4];
449 /* do cpuid 0 to get vendor */
450 _do_cpuid(0, cpuid_reg
);
451 info_p
->cpuid_max_basic
= cpuid_reg
[eax
];
452 bcopy((char *)&cpuid_reg
[ebx
], &info_p
->cpuid_vendor
[0], 4); /* ug */
453 bcopy((char *)&cpuid_reg
[ecx
], &info_p
->cpuid_vendor
[8], 4);
454 bcopy((char *)&cpuid_reg
[edx
], &info_p
->cpuid_vendor
[4], 4);
455 info_p
->cpuid_vendor
[12] = 0;
457 /* get extended cpuid results */
458 _do_cpuid(0x80000000, cpuid_reg
);
459 info_p
->cpuid_max_ext
= cpuid_reg
[eax
];
461 /* check to see if we can get brand string */
462 if (info_p
->cpuid_max_ext
>= 0x80000004) {
464 * The brand string 48 bytes (max), guaranteed to
467 _do_cpuid(0x80000002, cpuid_reg
);
468 bcopy((char *)cpuid_reg
, &str
[0], 16);
469 _do_cpuid(0x80000003, cpuid_reg
);
470 bcopy((char *)cpuid_reg
, &str
[16], 16);
471 _do_cpuid(0x80000004, cpuid_reg
);
472 bcopy((char *)cpuid_reg
, &str
[32], 16);
473 for (p
= str
; *p
!= '\0'; p
++) {
474 if (*p
!= ' ') break;
476 strlcpy(info_p
->cpuid_brand_string
,
477 p
, sizeof(info_p
->cpuid_brand_string
));
479 if (!strncmp(info_p
->cpuid_brand_string
, CPUID_STRING_UNKNOWN
,
480 min(sizeof(info_p
->cpuid_brand_string
),
481 strlen(CPUID_STRING_UNKNOWN
) + 1))) {
483 * This string means we have a firmware-programmable brand string,
484 * and the firmware couldn't figure out what sort of CPU we have.
486 info_p
->cpuid_brand_string
[0] = '\0';
490 /* Get cache and addressing info. */
491 if (info_p
->cpuid_max_ext
>= 0x80000006) {
492 _do_cpuid(0x80000006, cpuid_reg
);
493 info_p
->cpuid_cache_linesize
= bitfield(cpuid_reg
[ecx
], 7, 0);
494 info_p
->cpuid_cache_L2_associativity
=
495 bitfield(cpuid_reg
[ecx
],15,12);
496 info_p
->cpuid_cache_size
= bitfield(cpuid_reg
[ecx
],31,16);
497 _do_cpuid(0x80000008, cpuid_reg
);
498 info_p
->cpuid_address_bits_physical
=
499 bitfield(cpuid_reg
[eax
], 7, 0);
500 info_p
->cpuid_address_bits_virtual
=
501 bitfield(cpuid_reg
[eax
],15, 8);
504 /* get processor signature and decode */
505 _do_cpuid(1, cpuid_reg
);
506 info_p
->cpuid_signature
= cpuid_reg
[eax
];
507 info_p
->cpuid_stepping
= bitfield(cpuid_reg
[eax
], 3, 0);
508 info_p
->cpuid_model
= bitfield(cpuid_reg
[eax
], 7, 4);
509 info_p
->cpuid_family
= bitfield(cpuid_reg
[eax
], 11, 8);
510 info_p
->cpuid_type
= bitfield(cpuid_reg
[eax
], 13, 12);
511 info_p
->cpuid_extmodel
= bitfield(cpuid_reg
[eax
], 19, 16);
512 info_p
->cpuid_extfamily
= bitfield(cpuid_reg
[eax
], 27, 20);
513 info_p
->cpuid_brand
= bitfield(cpuid_reg
[ebx
], 7, 0);
514 info_p
->cpuid_features
= quad(cpuid_reg
[ecx
], cpuid_reg
[edx
]);
516 /* Fold extensions into family/model */
517 if (info_p
->cpuid_family
== 0x0f)
518 info_p
->cpuid_family
+= info_p
->cpuid_extfamily
;
519 if (info_p
->cpuid_family
== 0x0f || info_p
->cpuid_family
== 0x06)
520 info_p
->cpuid_model
+= (info_p
->cpuid_extmodel
<< 4);
522 if (info_p
->cpuid_features
& CPUID_FEATURE_HTT
)
523 info_p
->cpuid_logical_per_package
=
524 bitfield(cpuid_reg
[ebx
], 23, 16);
526 info_p
->cpuid_logical_per_package
= 1;
528 if (info_p
->cpuid_max_ext
>= 0x80000001) {
529 _do_cpuid(0x80000001, cpuid_reg
);
530 info_p
->cpuid_extfeatures
=
531 quad(cpuid_reg
[ecx
], cpuid_reg
[edx
]);
534 /* Fold in the Invariant TSC feature bit, if present */
535 if (info_p
->cpuid_max_ext
>= 0x80000007) {
536 _do_cpuid(0x80000007, cpuid_reg
);
537 info_p
->cpuid_extfeatures
|=
538 cpuid_reg
[edx
] & (uint32_t)CPUID_EXTFEATURE_TSCI
;
541 /* Find the microcode version number a.k.a. signature a.k.a. BIOS ID */
542 info_p
->cpuid_microcode_version
=
543 (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID
) >> 32);
545 if (info_p
->cpuid_model
== CPUID_MODEL_NEHALEM
) {
547 * For Nehalem, find the number of enabled cores and threads
548 * (which determines whether SMT/Hyperthreading is active).
550 uint64_t msr_core_thread_count
= rdmsr64(MSR_CORE_THREAD_COUNT
);
551 info_p
->core_count
= bitfield((uint32_t)msr_core_thread_count
, 31, 16);
552 info_p
->thread_count
= bitfield((uint32_t)msr_core_thread_count
, 15, 0);
555 if (info_p
->cpuid_max_basic
>= 0x5) {
557 * Extract the Monitor/Mwait Leaf info:
559 _do_cpuid(5, cpuid_reg
);
560 info_p
->cpuid_mwait_linesize_min
= cpuid_reg
[eax
];
561 info_p
->cpuid_mwait_linesize_max
= cpuid_reg
[ebx
];
562 info_p
->cpuid_mwait_extensions
= cpuid_reg
[ecx
];
563 info_p
->cpuid_mwait_sub_Cstates
= cpuid_reg
[edx
];
566 if (info_p
->cpuid_max_basic
>= 0x6) {
568 * The thermal and Power Leaf:
570 _do_cpuid(6, cpuid_reg
);
571 info_p
->cpuid_thermal_sensor
=
572 bitfield(cpuid_reg
[eax
], 0, 0);
573 info_p
->cpuid_thermal_dynamic_acceleration
=
574 bitfield(cpuid_reg
[eax
], 1, 1);
575 info_p
->cpuid_thermal_thresholds
=
576 bitfield(cpuid_reg
[ebx
], 3, 0);
577 info_p
->cpuid_thermal_ACNT_MCNT
=
578 bitfield(cpuid_reg
[ecx
], 0, 0);
581 if (info_p
->cpuid_max_basic
>= 0xa) {
583 * Architectural Performance Monitoring Leaf:
585 _do_cpuid(0xa, cpuid_reg
);
586 info_p
->cpuid_arch_perf_version
=
587 bitfield(cpuid_reg
[eax
], 7, 0);
588 info_p
->cpuid_arch_perf_number
=
589 bitfield(cpuid_reg
[eax
],15, 8);
590 info_p
->cpuid_arch_perf_width
=
591 bitfield(cpuid_reg
[eax
],23,16);
592 info_p
->cpuid_arch_perf_events_number
=
593 bitfield(cpuid_reg
[eax
],31,24);
594 info_p
->cpuid_arch_perf_events
=
596 info_p
->cpuid_arch_perf_fixed_number
=
597 bitfield(cpuid_reg
[edx
], 4, 0);
598 info_p
->cpuid_arch_perf_fixed_width
=
599 bitfield(cpuid_reg
[edx
],12, 5);
608 bzero((void *)&cpuid_cpu_info
, sizeof(cpuid_cpu_info
));
610 cpuid_set_generic_info(&cpuid_cpu_info
);
612 /* verify we are running on a supported CPU */
613 if ((strncmp(CPUID_VID_INTEL
, cpuid_cpu_info
.cpuid_vendor
,
614 min(strlen(CPUID_STRING_UNKNOWN
) + 1,
615 sizeof(cpuid_cpu_info
.cpuid_vendor
)))) ||
616 (cpuid_cpu_info
.cpuid_family
!= 6) ||
617 (cpuid_cpu_info
.cpuid_model
< 13))
618 panic("Unsupported CPU");
620 cpuid_cpu_info
.cpuid_cpu_type
= CPU_TYPE_X86
;
621 cpuid_cpu_info
.cpuid_cpu_subtype
= CPU_SUBTYPE_X86_ARCH1
;
623 cpuid_set_cache_info(&cpuid_cpu_info
);
625 if (cpuid_cpu_info
.core_count
== 0) {
626 cpuid_cpu_info
.core_count
=
627 cpuid_cpu_info
.cpuid_cores_per_package
;
628 cpuid_cpu_info
.thread_count
=
629 cpuid_cpu_info
.cpuid_logical_per_package
;
632 cpuid_cpu_info
.cpuid_model_string
= ""; /* deprecated */
639 {CPUID_FEATURE_FPU
, "FPU",},
640 {CPUID_FEATURE_VME
, "VME",},
641 {CPUID_FEATURE_DE
, "DE",},
642 {CPUID_FEATURE_PSE
, "PSE",},
643 {CPUID_FEATURE_TSC
, "TSC",},
644 {CPUID_FEATURE_MSR
, "MSR",},
645 {CPUID_FEATURE_PAE
, "PAE",},
646 {CPUID_FEATURE_MCE
, "MCE",},
647 {CPUID_FEATURE_CX8
, "CX8",},
648 {CPUID_FEATURE_APIC
, "APIC",},
649 {CPUID_FEATURE_SEP
, "SEP",},
650 {CPUID_FEATURE_MTRR
, "MTRR",},
651 {CPUID_FEATURE_PGE
, "PGE",},
652 {CPUID_FEATURE_MCA
, "MCA",},
653 {CPUID_FEATURE_CMOV
, "CMOV",},
654 {CPUID_FEATURE_PAT
, "PAT",},
655 {CPUID_FEATURE_PSE36
, "PSE36",},
656 {CPUID_FEATURE_PSN
, "PSN",},
657 {CPUID_FEATURE_CLFSH
, "CLFSH",},
658 {CPUID_FEATURE_DS
, "DS",},
659 {CPUID_FEATURE_ACPI
, "ACPI",},
660 {CPUID_FEATURE_MMX
, "MMX",},
661 {CPUID_FEATURE_FXSR
, "FXSR",},
662 {CPUID_FEATURE_SSE
, "SSE",},
663 {CPUID_FEATURE_SSE2
, "SSE2",},
664 {CPUID_FEATURE_SS
, "SS",},
665 {CPUID_FEATURE_HTT
, "HTT",},
666 {CPUID_FEATURE_TM
, "TM",},
667 {CPUID_FEATURE_SSE3
, "SSE3"},
668 {CPUID_FEATURE_MONITOR
, "MON"},
669 {CPUID_FEATURE_DSCPL
, "DSCPL"},
670 {CPUID_FEATURE_VMX
, "VMX"},
671 {CPUID_FEATURE_SMX
, "SMX"},
672 {CPUID_FEATURE_EST
, "EST"},
673 {CPUID_FEATURE_TM2
, "TM2"},
674 {CPUID_FEATURE_SSSE3
, "SSSE3"},
675 {CPUID_FEATURE_CID
, "CID"},
676 {CPUID_FEATURE_CX16
, "CX16"},
677 {CPUID_FEATURE_xTPR
, "TPR"},
678 {CPUID_FEATURE_PDCM
, "PDCM"},
679 {CPUID_FEATURE_SSE4_1
, "SSE4.1"},
680 {CPUID_FEATURE_SSE4_2
, "SSE4.2"},
681 {CPUID_FEATURE_xAPIC
, "xAPIC"},
682 {CPUID_FEATURE_POPCNT
, "POPCNT"},
683 {CPUID_FEATURE_VMM
, "VMM"},
687 {CPUID_EXTFEATURE_SYSCALL
, "SYSCALL"},
688 {CPUID_EXTFEATURE_XD
, "XD"},
689 {CPUID_EXTFEATURE_EM64T
, "EM64T"},
690 {CPUID_EXTFEATURE_LAHF
, "LAHF"},
691 {CPUID_EXTFEATURE_RDTSCP
, "RDTSCP"},
692 {CPUID_EXTFEATURE_TSCI
, "TSCI"},
699 /* Set-up the cpuid_info stucture lazily */
700 if (cpuid_cpu_infop
== NULL
) {
702 cpuid_cpu_infop
= &cpuid_cpu_info
;
704 return cpuid_cpu_infop
;
708 cpuid_get_feature_names(uint64_t features
, char *buf
, unsigned buf_len
)
714 for (i
= 0; feature_map
[i
].mask
!= 0; i
++) {
715 if ((features
& feature_map
[i
].mask
) == 0)
719 len
= min(strlen(feature_map
[i
].name
), (size_t) ((buf_len
-1) - (p
-buf
)));
722 bcopy(feature_map
[i
].name
, p
, len
);
730 cpuid_get_extfeature_names(uint64_t extfeatures
, char *buf
, unsigned buf_len
)
736 for (i
= 0; extfeature_map
[i
].mask
!= 0; i
++) {
737 if ((extfeatures
& extfeature_map
[i
].mask
) == 0)
741 len
= min(strlen(extfeature_map
[i
].name
), (size_t) ((buf_len
-1)-(p
-buf
)));
744 bcopy(extfeature_map
[i
].name
, p
, len
);
753 cpuid_feature_display(
758 kprintf("%s: %s\n", header
,
759 cpuid_get_feature_names(cpuid_features(),
761 if (cpuid_features() & CPUID_FEATURE_HTT
) {
762 #define s_if_plural(n) ((n > 1) ? "s" : "")
763 kprintf(" HTT: %d core%s per package;"
764 " %d logical cpu%s per package\n",
765 cpuid_cpu_info
.cpuid_cores_per_package
,
766 s_if_plural(cpuid_cpu_info
.cpuid_cores_per_package
),
767 cpuid_cpu_info
.cpuid_logical_per_package
,
768 s_if_plural(cpuid_cpu_info
.cpuid_logical_per_package
));
773 cpuid_extfeature_display(
778 kprintf("%s: %s\n", header
,
779 cpuid_get_extfeature_names(cpuid_extfeatures(),
787 if (cpuid_cpu_info
.cpuid_brand_string
[0] != '\0') {
788 kprintf("%s: %s\n", header
, cpuid_cpu_info
.cpuid_brand_string
);
795 return cpuid_info()->cpuid_family
;
801 return cpuid_info()->cpuid_cpu_type
;
805 cpuid_cpusubtype(void)
807 return cpuid_info()->cpuid_cpu_subtype
;
813 static int checked
= 0;
814 char fpu_arg
[20] = { 0 };
818 /* check for boot-time fpu limitations */
819 if (PE_parse_boot_argn("_fpu", &fpu_arg
[0], sizeof (fpu_arg
))) {
820 printf("limiting fpu features to: %s\n", fpu_arg
);
821 if (!strncmp("387", fpu_arg
, sizeof("387")) || !strncmp("mmx", fpu_arg
, sizeof("mmx"))) {
822 printf("no sse or sse2\n");
823 cpuid_cpu_info
.cpuid_features
&= ~(CPUID_FEATURE_SSE
| CPUID_FEATURE_SSE2
| CPUID_FEATURE_FXSR
);
824 } else if (!strncmp("sse", fpu_arg
, sizeof("sse"))) {
826 cpuid_cpu_info
.cpuid_features
&= ~(CPUID_FEATURE_SSE2
);
831 return cpuid_cpu_info
.cpuid_features
;
835 cpuid_extfeatures(void)
837 return cpuid_info()->cpuid_extfeatures
;
849 db_cpuid(__unused db_expr_t addr
,
850 __unused
int have_addr
,
851 __unused db_expr_t count
,
852 __unused
char *modif
)
858 do_cpuid(0, cpid
); /* Get the first cpuid which is the number of
860 db_printf("%08X - %08X %08X %08X %08X\n",
861 0, cpid
[eax
], cpid
[ebx
], cpid
[ecx
], cpid
[edx
]);
863 mid
= cpid
[eax
]; /* Set the number */
864 for (i
= 1; i
<= mid
; i
++) { /* Dump 'em out */
865 do_cpuid(i
, cpid
); /* Get the next */
866 db_printf("%08X - %08X %08X %08X %08X\n",
867 i
, cpid
[eax
], cpid
[ebx
], cpid
[ecx
], cpid
[edx
]);
871 do_cpuid(0x80000000, cpid
); /* Get the first extended cpuid which
872 * is the number of extended ids */
873 db_printf("%08X - %08X %08X %08X %08X\n",
874 0x80000000, cpid
[eax
], cpid
[ebx
], cpid
[ecx
], cpid
[edx
]);
876 mid
= cpid
[eax
]; /* Set the number */
877 for (i
= 0x80000001; i
<= mid
; i
++) { /* Dump 'em out */
878 do_cpuid(i
, cpid
); /* Get the next */
879 db_printf("%08X - %08X %08X %08X %08X\n",
880 i
, cpid
[eax
], cpid
[ebx
], cpid
[ecx
], cpid
[edx
]);