2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <platforms.h>
33 #include <vm/vm_page.h>
34 #include <pexpert/pexpert.h>
36 #include <i386/cpuid.h>
38 #include <machine/db_machdep.h>
39 #include <ddb/db_aout.h>
40 #include <ddb/db_access.h>
41 #include <ddb/db_sym.h>
42 #include <ddb/db_variables.h>
43 #include <ddb/db_command.h>
44 #include <ddb/db_output.h>
45 #include <ddb/db_expr.h>
48 #define min(a,b) ((a) < (b) ? (a) : (b))
49 #define quad(hi,lo) (((uint64_t)(hi)) << 32 | (lo))
51 /* Only for 32bit values */
52 #define bit32(n) (1U << (n))
53 #define bitmask32(h,l) ((bit32(h)|(bit32(h)-1)) & ~(bit32(l)-1))
54 #define bitfield32(x,h,l) ((((x) & bitmask32(h,l)) >> l))
57 * Leaf 2 cache descriptor encodings.
60 _NULL_
, /* NULL (empty) descriptor */
63 STLB
, /* Shared second-level unified TLB */
64 PREFETCH
/* Prefetch size */
65 } cpuid_leaf2_desc_type_t
;
68 NA
, /* Not Applicable */
69 FULLY
, /* Fully-associative */
70 TRACE
, /* Trace Cache (P4 only) */
71 INST
, /* Instruction TLB */
73 DATA0
, /* Data TLB, 1st level */
74 DATA1
, /* Data TLB, 2nd level */
75 L1
, /* L1 (unified) cache */
76 L1_INST
, /* L1 Instruction cache */
77 L1_DATA
, /* L1 Data cache */
78 L2
, /* L2 (unified) cache */
79 L3
, /* L3 (unified) cache */
80 L2_2LINESECTOR
, /* L2 (unified) cache with 2 lines per sector */
81 L3_2LINESECTOR
, /* L3(unified) cache with 2 lines per sector */
82 SMALL
, /* Small page TLB */
83 LARGE
, /* Large page TLB */
84 BOTH
/* Small and Large page TLB */
85 } cpuid_leaf2_qualifier_t
;
87 typedef struct cpuid_cache_descriptor
{
88 uint8_t value
; /* descriptor code */
89 uint8_t type
; /* cpuid_leaf2_desc_type_t */
90 uint8_t level
; /* level of cache/TLB hierachy */
91 uint8_t ways
; /* wayness of cache */
92 uint16_t size
; /* cachesize or TLB pagesize */
93 uint16_t entries
; /* number of TLB entries or linesize */
94 } cpuid_cache_descriptor_t
;
97 * These multipliers are used to encode 1*K .. 64*M in a 16 bit size field
103 * Intel cache descriptor table:
105 static cpuid_cache_descriptor_t intel_cpuid_leaf2_descriptor_table
[] = {
106 // -------------------------------------------------------
107 // value type level ways size entries
108 // -------------------------------------------------------
109 { 0x00, _NULL_
, NA
, NA
, NA
, NA
},
110 { 0x01, TLB
, INST
, 4, SMALL
, 32 },
111 { 0x02, TLB
, INST
, FULLY
, LARGE
, 2 },
112 { 0x03, TLB
, DATA
, 4, SMALL
, 64 },
113 { 0x04, TLB
, DATA
, 4, LARGE
, 8 },
114 { 0x05, TLB
, DATA1
, 4, LARGE
, 32 },
115 { 0x06, CACHE
, L1_INST
, 4, 8*K
, 32 },
116 { 0x08, CACHE
, L1_INST
, 4, 16*K
, 32 },
117 { 0x09, CACHE
, L1_INST
, 4, 32*K
, 64 },
118 { 0x0A, CACHE
, L1_DATA
, 2, 8*K
, 32 },
119 { 0x0B, TLB
, INST
, 4, LARGE
, 4 },
120 { 0x0C, CACHE
, L1_DATA
, 4, 16*K
, 32 },
121 { 0x0D, CACHE
, L1_DATA
, 4, 16*K
, 64 },
122 { 0x0E, CACHE
, L1_DATA
, 6, 24*K
, 64 },
123 { 0x21, CACHE
, L2
, 8, 256*K
, 64 },
124 { 0x22, CACHE
, L3_2LINESECTOR
, 4, 512*K
, 64 },
125 { 0x23, CACHE
, L3_2LINESECTOR
, 8, 1*M
, 64 },
126 { 0x25, CACHE
, L3_2LINESECTOR
, 8, 2*M
, 64 },
127 { 0x29, CACHE
, L3_2LINESECTOR
, 8, 4*M
, 64 },
128 { 0x2C, CACHE
, L1_DATA
, 8, 32*K
, 64 },
129 { 0x30, CACHE
, L1_INST
, 8, 32*K
, 64 },
130 { 0x40, CACHE
, L2
, NA
, 0, NA
},
131 { 0x41, CACHE
, L2
, 4, 128*K
, 32 },
132 { 0x42, CACHE
, L2
, 4, 256*K
, 32 },
133 { 0x43, CACHE
, L2
, 4, 512*K
, 32 },
134 { 0x44, CACHE
, L2
, 4, 1*M
, 32 },
135 { 0x45, CACHE
, L2
, 4, 2*M
, 32 },
136 { 0x46, CACHE
, L3
, 4, 4*M
, 64 },
137 { 0x47, CACHE
, L3
, 8, 8*M
, 64 },
138 { 0x48, CACHE
, L2
, 12, 3*M
, 64 },
139 { 0x49, CACHE
, L2
, 16, 4*M
, 64 },
140 { 0x4A, CACHE
, L3
, 12, 6*M
, 64 },
141 { 0x4B, CACHE
, L3
, 16, 8*M
, 64 },
142 { 0x4C, CACHE
, L3
, 12, 12*M
, 64 },
143 { 0x4D, CACHE
, L3
, 16, 16*M
, 64 },
144 { 0x4E, CACHE
, L2
, 24, 6*M
, 64 },
145 { 0x4F, TLB
, INST
, NA
, SMALL
, 32 },
146 { 0x50, TLB
, INST
, NA
, BOTH
, 64 },
147 { 0x51, TLB
, INST
, NA
, BOTH
, 128 },
148 { 0x52, TLB
, INST
, NA
, BOTH
, 256 },
149 { 0x55, TLB
, INST
, FULLY
, BOTH
, 7 },
150 { 0x56, TLB
, DATA0
, 4, LARGE
, 16 },
151 { 0x57, TLB
, DATA0
, 4, SMALL
, 16 },
152 { 0x59, TLB
, DATA0
, FULLY
, SMALL
, 16 },
153 { 0x5A, TLB
, DATA0
, 4, LARGE
, 32 },
154 { 0x5B, TLB
, DATA
, NA
, BOTH
, 64 },
155 { 0x5C, TLB
, DATA
, NA
, BOTH
, 128 },
156 { 0x5D, TLB
, DATA
, NA
, BOTH
, 256 },
157 { 0x60, CACHE
, L1
, 16*K
, 8, 64 },
158 { 0x61, CACHE
, L1
, 4, 8*K
, 64 },
159 { 0x62, CACHE
, L1
, 4, 16*K
, 64 },
160 { 0x63, CACHE
, L1
, 4, 32*K
, 64 },
161 { 0x70, CACHE
, TRACE
, 8, 12*K
, NA
},
162 { 0x71, CACHE
, TRACE
, 8, 16*K
, NA
},
163 { 0x72, CACHE
, TRACE
, 8, 32*K
, NA
},
164 { 0x78, CACHE
, L2
, 4, 1*M
, 64 },
165 { 0x79, CACHE
, L2_2LINESECTOR
, 8, 128*K
, 64 },
166 { 0x7A, CACHE
, L2_2LINESECTOR
, 8, 256*K
, 64 },
167 { 0x7B, CACHE
, L2_2LINESECTOR
, 8, 512*K
, 64 },
168 { 0x7C, CACHE
, L2_2LINESECTOR
, 8, 1*M
, 64 },
169 { 0x7D, CACHE
, L2
, 8, 2*M
, 64 },
170 { 0x7F, CACHE
, L2
, 2, 512*K
, 64 },
171 { 0x80, CACHE
, L2
, 8, 512*K
, 64 },
172 { 0x82, CACHE
, L2
, 8, 256*K
, 32 },
173 { 0x83, CACHE
, L2
, 8, 512*K
, 32 },
174 { 0x84, CACHE
, L2
, 8, 1*M
, 32 },
175 { 0x85, CACHE
, L2
, 8, 2*M
, 32 },
176 { 0x86, CACHE
, L2
, 4, 512*K
, 64 },
177 { 0x87, CACHE
, L2
, 8, 1*M
, 64 },
178 { 0xB0, TLB
, INST
, 4, SMALL
, 128 },
179 { 0xB1, TLB
, INST
, 4, LARGE
, 8 },
180 { 0xB2, TLB
, INST
, 4, SMALL
, 64 },
181 { 0xB3, TLB
, DATA
, 4, SMALL
, 128 },
182 { 0xB4, TLB
, DATA1
, 4, SMALL
, 256 },
183 { 0xBA, TLB
, DATA1
, 4, BOTH
, 64 },
184 { 0xCA, STLB
, DATA1
, 4, BOTH
, 512 },
185 { 0xD0, CACHE
, L3
, 4, 512*K
, 64 },
186 { 0xD1, CACHE
, L3
, 4, 1*M
, 64 },
187 { 0xD2, CACHE
, L3
, 4, 2*M
, 64 },
188 { 0xD3, CACHE
, L3
, 4, 4*M
, 64 },
189 { 0xD4, CACHE
, L3
, 4, 8*M
, 64 },
190 { 0xD6, CACHE
, L3
, 8, 1*M
, 64 },
191 { 0xD7, CACHE
, L3
, 8, 2*M
, 64 },
192 { 0xD8, CACHE
, L3
, 8, 4*M
, 64 },
193 { 0xD9, CACHE
, L3
, 8, 8*M
, 64 },
194 { 0xDA, CACHE
, L3
, 8, 12*M
, 64 },
195 { 0xDC, CACHE
, L3
, 12, 1536*K
, 64 },
196 { 0xDD, CACHE
, L3
, 12, 3*M
, 64 },
197 { 0xDE, CACHE
, L3
, 12, 6*M
, 64 },
198 { 0xDF, CACHE
, L3
, 12, 12*M
, 64 },
199 { 0xE0, CACHE
, L3
, 12, 18*M
, 64 },
200 { 0xE2, CACHE
, L3
, 16, 2*M
, 64 },
201 { 0xE3, CACHE
, L3
, 16, 4*M
, 64 },
202 { 0xE4, CACHE
, L3
, 16, 8*M
, 64 },
203 { 0xE5, CACHE
, L3
, 16, 16*M
, 64 },
204 { 0xE6, CACHE
, L3
, 16, 24*M
, 64 },
205 { 0xF0, PREFETCH
, NA
, NA
, 64, NA
},
206 { 0xF1, PREFETCH
, NA
, NA
, 128, NA
}
208 #define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \
209 sizeof(cpuid_cache_descriptor_t))
211 static inline cpuid_cache_descriptor_t
*
212 cpuid_leaf2_find(uint8_t value
)
216 for (i
= 0; i
< INTEL_LEAF2_DESC_NUM
; i
++)
217 if (intel_cpuid_leaf2_descriptor_table
[i
].value
== value
)
218 return &intel_cpuid_leaf2_descriptor_table
[i
];
223 * CPU identification routines.
226 static i386_cpu_info_t
*cpuid_cpu_infop
= NULL
;
227 static i386_cpu_info_t cpuid_cpu_info
;
229 #if defined(__x86_64__)
230 static void cpuid_fn(uint32_t selector
, uint32_t *result
)
232 do_cpuid(selector
, result
);
235 static void cpuid_fn(uint32_t selector
, uint32_t *result
)
237 if (cpu_mode_is64bit()) {
245 do_cpuid(selector
, result
);
250 /* this function is Intel-specific */
252 cpuid_set_cache_info( i386_cpu_info_t
* info_p
)
254 uint32_t cpuid_result
[4];
257 uint32_t linesizes
[LCACHE_MAX
];
260 boolean_t cpuid_deterministic_supported
= FALSE
;
262 bzero( linesizes
, sizeof(linesizes
) );
264 /* Get processor cache descriptor info using leaf 2. We don't use
265 * this internally, but must publish it for KEXTs.
267 cpuid_fn(2, cpuid_result
);
268 for (j
= 0; j
< 4; j
++) {
269 if ((cpuid_result
[j
] >> 31) == 1) /* bit31 is validity */
271 ((uint32_t *) info_p
->cache_info
)[j
] = cpuid_result
[j
];
273 /* first byte gives number of cpuid calls to get all descriptors */
274 for (i
= 1; i
< info_p
->cache_info
[0]; i
++) {
275 if (i
*16 > sizeof(info_p
->cache_info
))
277 cpuid_fn(2, cpuid_result
);
278 for (j
= 0; j
< 4; j
++) {
279 if ((cpuid_result
[j
] >> 31) == 1)
281 ((uint32_t *) info_p
->cache_info
)[4*i
+j
] =
287 * Get cache info using leaf 4, the "deterministic cache parameters."
288 * Most processors Mac OS X supports implement this flavor of CPUID.
289 * Loop over each cache on the processor.
291 cpuid_fn(0, cpuid_result
);
292 if (cpuid_result
[eax
] >= 4)
293 cpuid_deterministic_supported
= TRUE
;
295 for (index
= 0; cpuid_deterministic_supported
; index
++) {
296 cache_type_t type
= Lnone
;
298 uint32_t cache_level
;
299 uint32_t cache_sharing
;
300 uint32_t cache_linesize
;
302 uint32_t cache_associativity
;
304 uint32_t cache_partitions
;
307 reg
[eax
] = 4; /* cpuid request 4 */
308 reg
[ecx
] = index
; /* index starting at 0 */
310 //kprintf("cpuid(4) index=%d eax=%p\n", index, reg[eax]);
311 cache_type
= bitfield32(reg
[eax
], 4, 0);
313 break; /* no more caches */
314 cache_level
= bitfield32(reg
[eax
], 7, 5);
315 cache_sharing
= bitfield32(reg
[eax
], 25, 14) + 1;
316 info_p
->cpuid_cores_per_package
317 = bitfield32(reg
[eax
], 31, 26) + 1;
318 cache_linesize
= bitfield32(reg
[ebx
], 11, 0) + 1;
319 cache_partitions
= bitfield32(reg
[ebx
], 21, 12) + 1;
320 cache_associativity
= bitfield32(reg
[ebx
], 31, 22) + 1;
321 cache_sets
= bitfield32(reg
[ecx
], 31, 0) + 1;
323 /* Map type/levels returned by CPUID into cache_type_t */
324 switch (cache_level
) {
326 type
= cache_type
== 1 ? L1D
:
327 cache_type
== 2 ? L1I
:
331 type
= cache_type
== 3 ? L2U
:
335 type
= cache_type
== 3 ? L3U
:
342 /* The total size of a cache is:
343 * ( linesize * sets * associativity * partitions )
346 cache_size
= cache_linesize
* cache_sets
*
347 cache_associativity
* cache_partitions
;
348 info_p
->cache_size
[type
] = cache_size
;
349 info_p
->cache_sharing
[type
] = cache_sharing
;
350 info_p
->cache_partitions
[type
] = cache_partitions
;
351 linesizes
[type
] = cache_linesize
;
353 /* Compute the number of page colors for this cache,
355 * ( linesize * sets ) / page_size
357 * To help visualize this, consider two views of a
358 * physical address. To the cache, it is composed
359 * of a line offset, a set selector, and a tag.
360 * To VM, it is composed of a page offset, a page
361 * color, and other bits in the pageframe number:
363 * +-----------------+---------+--------+
364 * cache: | tag | set | offset |
365 * +-----------------+---------+--------+
367 * +-----------------+-------+----------+
368 * VM: | don't care | color | pg offset|
369 * +-----------------+-------+----------+
371 * The color is those bits in (set+offset) not covered
372 * by the page offset.
374 colors
= ( cache_linesize
* cache_sets
) >> 12;
376 if ( colors
> vm_cache_geometry_colors
)
377 vm_cache_geometry_colors
= colors
;
382 * If deterministic cache parameters are not available, use
385 if (info_p
->cpuid_cores_per_package
== 0) {
386 info_p
->cpuid_cores_per_package
= 1;
388 /* cpuid define in 1024 quantities */
389 info_p
->cache_size
[L2U
] = info_p
->cpuid_cache_size
* 1024;
390 info_p
->cache_sharing
[L2U
] = 1;
391 info_p
->cache_partitions
[L2U
] = 1;
393 linesizes
[L2U
] = info_p
->cpuid_cache_linesize
;
397 * What linesize to publish? We use the L2 linesize if any,
400 if ( linesizes
[L2U
] )
401 info_p
->cache_linesize
= linesizes
[L2U
];
402 else if (linesizes
[L1D
])
403 info_p
->cache_linesize
= linesizes
[L1D
];
404 else panic("no linesize");
407 * Extract and publish TLB information from Leaf 2 descriptors.
409 for (i
= 1; i
< sizeof(info_p
->cache_info
); i
++) {
410 cpuid_cache_descriptor_t
*descp
;
415 descp
= cpuid_leaf2_find(info_p
->cache_info
[i
]);
419 switch (descp
->type
) {
421 page
= (descp
->size
== SMALL
) ? TLB_SMALL
: TLB_LARGE
;
422 /* determine I or D: */
423 switch (descp
->level
) {
435 /* determine level: */
436 switch (descp
->level
) {
443 info_p
->cpuid_tlb
[id
][page
][level
] = descp
->entries
;
446 info_p
->cpuid_stlb
= descp
->entries
;
452 cpuid_set_generic_info(i386_cpu_info_t
*info_p
)
457 /* do cpuid 0 to get vendor */
459 info_p
->cpuid_max_basic
= reg
[eax
];
460 bcopy((char *)®
[ebx
], &info_p
->cpuid_vendor
[0], 4); /* ug */
461 bcopy((char *)®
[ecx
], &info_p
->cpuid_vendor
[8], 4);
462 bcopy((char *)®
[edx
], &info_p
->cpuid_vendor
[4], 4);
463 info_p
->cpuid_vendor
[12] = 0;
465 /* get extended cpuid results */
466 cpuid_fn(0x80000000, reg
);
467 info_p
->cpuid_max_ext
= reg
[eax
];
469 /* check to see if we can get brand string */
470 if (info_p
->cpuid_max_ext
>= 0x80000004) {
472 * The brand string 48 bytes (max), guaranteed to
475 cpuid_fn(0x80000002, reg
);
476 bcopy((char *)reg
, &str
[0], 16);
477 cpuid_fn(0x80000003, reg
);
478 bcopy((char *)reg
, &str
[16], 16);
479 cpuid_fn(0x80000004, reg
);
480 bcopy((char *)reg
, &str
[32], 16);
481 for (p
= str
; *p
!= '\0'; p
++) {
482 if (*p
!= ' ') break;
484 strlcpy(info_p
->cpuid_brand_string
,
485 p
, sizeof(info_p
->cpuid_brand_string
));
487 if (!strncmp(info_p
->cpuid_brand_string
, CPUID_STRING_UNKNOWN
,
488 min(sizeof(info_p
->cpuid_brand_string
),
489 strlen(CPUID_STRING_UNKNOWN
) + 1))) {
491 * This string means we have a firmware-programmable brand string,
492 * and the firmware couldn't figure out what sort of CPU we have.
494 info_p
->cpuid_brand_string
[0] = '\0';
498 /* Get cache and addressing info. */
499 if (info_p
->cpuid_max_ext
>= 0x80000006) {
500 cpuid_fn(0x80000006, reg
);
501 info_p
->cpuid_cache_linesize
= bitfield32(reg
[ecx
], 7, 0);
502 info_p
->cpuid_cache_L2_associativity
=
503 bitfield32(reg
[ecx
],15,12);
504 info_p
->cpuid_cache_size
= bitfield32(reg
[ecx
],31,16);
505 cpuid_fn(0x80000008, reg
);
506 info_p
->cpuid_address_bits_physical
=
507 bitfield32(reg
[eax
], 7, 0);
508 info_p
->cpuid_address_bits_virtual
=
509 bitfield32(reg
[eax
],15, 8);
512 /* get processor signature and decode */
514 info_p
->cpuid_signature
= reg
[eax
];
515 info_p
->cpuid_stepping
= bitfield32(reg
[eax
], 3, 0);
516 info_p
->cpuid_model
= bitfield32(reg
[eax
], 7, 4);
517 info_p
->cpuid_family
= bitfield32(reg
[eax
], 11, 8);
518 info_p
->cpuid_type
= bitfield32(reg
[eax
], 13, 12);
519 info_p
->cpuid_extmodel
= bitfield32(reg
[eax
], 19, 16);
520 info_p
->cpuid_extfamily
= bitfield32(reg
[eax
], 27, 20);
521 info_p
->cpuid_brand
= bitfield32(reg
[ebx
], 7, 0);
522 info_p
->cpuid_features
= quad(reg
[ecx
], reg
[edx
]);
524 /* Fold extensions into family/model */
525 if (info_p
->cpuid_family
== 0x0f)
526 info_p
->cpuid_family
+= info_p
->cpuid_extfamily
;
527 if (info_p
->cpuid_family
== 0x0f || info_p
->cpuid_family
== 0x06)
528 info_p
->cpuid_model
+= (info_p
->cpuid_extmodel
<< 4);
530 if (info_p
->cpuid_features
& CPUID_FEATURE_HTT
)
531 info_p
->cpuid_logical_per_package
=
532 bitfield32(reg
[ebx
], 23, 16);
534 info_p
->cpuid_logical_per_package
= 1;
536 if (info_p
->cpuid_max_ext
>= 0x80000001) {
537 cpuid_fn(0x80000001, reg
);
538 info_p
->cpuid_extfeatures
=
539 quad(reg
[ecx
], reg
[edx
]);
542 /* Fold in the Invariant TSC feature bit, if present */
543 if (info_p
->cpuid_max_ext
>= 0x80000007) {
544 cpuid_fn(0x80000007, reg
);
545 info_p
->cpuid_extfeatures
|=
546 reg
[edx
] & (uint32_t)CPUID_EXTFEATURE_TSCI
;
549 /* Find the microcode version number a.k.a. signature a.k.a. BIOS ID */
550 info_p
->cpuid_microcode_version
=
551 (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID
) >> 32);
553 if (info_p
->cpuid_max_basic
>= 0x5) {
554 cpuid_mwait_leaf_t
*cmp
= &info_p
->cpuid_mwait_leaf
;
557 * Extract the Monitor/Mwait Leaf info:
560 cmp
->linesize_min
= reg
[eax
];
561 cmp
->linesize_max
= reg
[ebx
];
562 cmp
->extensions
= reg
[ecx
];
563 cmp
->sub_Cstates
= reg
[edx
];
564 info_p
->cpuid_mwait_leafp
= cmp
;
567 if (info_p
->cpuid_max_basic
>= 0x6) {
568 cpuid_thermal_leaf_t
*ctp
= &info_p
->cpuid_thermal_leaf
;
571 * The thermal and Power Leaf:
574 ctp
->sensor
= bitfield32(reg
[eax
], 0, 0);
575 ctp
->dynamic_acceleration
= bitfield32(reg
[eax
], 1, 1);
576 ctp
->invariant_APIC_timer
= bitfield32(reg
[eax
], 2, 2);
577 ctp
->thresholds
= bitfield32(reg
[ebx
], 3, 0);
578 ctp
->ACNT_MCNT
= bitfield32(reg
[ecx
], 0, 0);
579 info_p
->cpuid_thermal_leafp
= ctp
;
582 if (info_p
->cpuid_max_basic
>= 0xa) {
583 cpuid_arch_perf_leaf_t
*capp
= &info_p
->cpuid_arch_perf_leaf
;
586 * Architectural Performance Monitoring Leaf:
589 capp
->version
= bitfield32(reg
[eax
], 7, 0);
590 capp
->number
= bitfield32(reg
[eax
], 15, 8);
591 capp
->width
= bitfield32(reg
[eax
], 23, 16);
592 capp
->events_number
= bitfield32(reg
[eax
], 31, 24);
593 capp
->events
= reg
[ebx
];
594 capp
->fixed_number
= bitfield32(reg
[edx
], 4, 0);
595 capp
->fixed_width
= bitfield32(reg
[edx
], 12, 5);
596 info_p
->cpuid_arch_perf_leafp
= capp
;
603 cpuid_set_cpufamily(i386_cpu_info_t
*info_p
)
605 uint32_t cpufamily
= CPUFAMILY_UNKNOWN
;
607 switch (info_p
->cpuid_family
) {
609 switch (info_p
->cpuid_model
) {
611 cpufamily
= CPUFAMILY_INTEL_6_13
;
614 cpufamily
= CPUFAMILY_INTEL_YONAH
;
617 cpufamily
= CPUFAMILY_INTEL_MEROM
;
620 cpufamily
= CPUFAMILY_INTEL_PENRYN
;
622 case CPUID_MODEL_NEHALEM
:
623 case CPUID_MODEL_FIELDS
:
624 case CPUID_MODEL_DALES
:
625 case CPUID_MODEL_NEHALEM_EX
:
626 cpufamily
= CPUFAMILY_INTEL_NEHALEM
;
628 case CPUID_MODEL_DALES_32NM
:
629 case CPUID_MODEL_WESTMERE
:
630 case CPUID_MODEL_WESTMERE_EX
:
631 cpufamily
= CPUFAMILY_INTEL_WESTMERE
;
637 info_p
->cpuid_cpufamily
= cpufamily
;
644 i386_cpu_info_t
*info_p
= &cpuid_cpu_info
;
646 bzero((void *)info_p
, sizeof(cpuid_cpu_info
));
648 cpuid_set_generic_info(info_p
);
650 /* verify we are running on a supported CPU */
651 if ((strncmp(CPUID_VID_INTEL
, info_p
->cpuid_vendor
,
652 min(strlen(CPUID_STRING_UNKNOWN
) + 1,
653 sizeof(info_p
->cpuid_vendor
)))) ||
654 (cpuid_set_cpufamily(info_p
) == CPUFAMILY_UNKNOWN
))
655 panic("Unsupported CPU");
657 info_p
->cpuid_cpu_type
= CPU_TYPE_X86
;
658 info_p
->cpuid_cpu_subtype
= CPU_SUBTYPE_X86_ARCH1
;
660 cpuid_set_cache_info(&cpuid_cpu_info
);
663 * Find the number of enabled cores and threads
664 * (which determines whether SMT/Hyperthreading is active).
666 switch (info_p
->cpuid_cpufamily
) {
668 * This should be the same as Nehalem but an A0 silicon bug returns
669 * invalid data in the top 12 bits. Hence, we use only bits [19..16]
670 * rather than [31..16] for core count - which actually can't exceed 8.
672 case CPUFAMILY_INTEL_WESTMERE
: {
673 uint64_t msr
= rdmsr64(MSR_CORE_THREAD_COUNT
);
674 info_p
->core_count
= bitfield32((uint32_t)msr
, 19, 16);
675 info_p
->thread_count
= bitfield32((uint32_t)msr
, 15, 0);
678 case CPUFAMILY_INTEL_NEHALEM
: {
679 uint64_t msr
= rdmsr64(MSR_CORE_THREAD_COUNT
);
680 info_p
->core_count
= bitfield32((uint32_t)msr
, 31, 16);
681 info_p
->thread_count
= bitfield32((uint32_t)msr
, 15, 0);
685 if (info_p
->core_count
== 0) {
686 info_p
->core_count
= info_p
->cpuid_cores_per_package
;
687 info_p
->thread_count
= info_p
->cpuid_logical_per_package
;
690 cpuid_cpu_info
.cpuid_model_string
= ""; /* deprecated */
697 {CPUID_FEATURE_FPU
, "FPU",},
698 {CPUID_FEATURE_VME
, "VME",},
699 {CPUID_FEATURE_DE
, "DE",},
700 {CPUID_FEATURE_PSE
, "PSE",},
701 {CPUID_FEATURE_TSC
, "TSC",},
702 {CPUID_FEATURE_MSR
, "MSR",},
703 {CPUID_FEATURE_PAE
, "PAE",},
704 {CPUID_FEATURE_MCE
, "MCE",},
705 {CPUID_FEATURE_CX8
, "CX8",},
706 {CPUID_FEATURE_APIC
, "APIC",},
707 {CPUID_FEATURE_SEP
, "SEP",},
708 {CPUID_FEATURE_MTRR
, "MTRR",},
709 {CPUID_FEATURE_PGE
, "PGE",},
710 {CPUID_FEATURE_MCA
, "MCA",},
711 {CPUID_FEATURE_CMOV
, "CMOV",},
712 {CPUID_FEATURE_PAT
, "PAT",},
713 {CPUID_FEATURE_PSE36
, "PSE36",},
714 {CPUID_FEATURE_PSN
, "PSN",},
715 {CPUID_FEATURE_CLFSH
, "CLFSH",},
716 {CPUID_FEATURE_DS
, "DS",},
717 {CPUID_FEATURE_ACPI
, "ACPI",},
718 {CPUID_FEATURE_MMX
, "MMX",},
719 {CPUID_FEATURE_FXSR
, "FXSR",},
720 {CPUID_FEATURE_SSE
, "SSE",},
721 {CPUID_FEATURE_SSE2
, "SSE2",},
722 {CPUID_FEATURE_SS
, "SS",},
723 {CPUID_FEATURE_HTT
, "HTT",},
724 {CPUID_FEATURE_TM
, "TM",},
725 {CPUID_FEATURE_SSE3
, "SSE3"},
726 {CPUID_FEATURE_PCLMULQDQ
, "PCLMULQDQ"},
727 {CPUID_FEATURE_MONITOR
, "MON"},
728 {CPUID_FEATURE_DSCPL
, "DSCPL"},
729 {CPUID_FEATURE_VMX
, "VMX"},
730 {CPUID_FEATURE_SMX
, "SMX"},
731 {CPUID_FEATURE_EST
, "EST"},
732 {CPUID_FEATURE_TM2
, "TM2"},
733 {CPUID_FEATURE_SSSE3
, "SSSE3"},
734 {CPUID_FEATURE_CID
, "CID"},
735 {CPUID_FEATURE_CX16
, "CX16"},
736 {CPUID_FEATURE_xTPR
, "TPR"},
737 {CPUID_FEATURE_PDCM
, "PDCM"},
738 {CPUID_FEATURE_SSE4_1
, "SSE4.1"},
739 {CPUID_FEATURE_SSE4_2
, "SSE4.2"},
740 {CPUID_FEATURE_xAPIC
, "xAPIC"},
741 {CPUID_FEATURE_POPCNT
, "POPCNT"},
742 {CPUID_FEATURE_AES
, "AES"},
743 {CPUID_FEATURE_VMM
, "VMM"},
747 {CPUID_EXTFEATURE_SYSCALL
, "SYSCALL"},
748 {CPUID_EXTFEATURE_XD
, "XD"},
749 {CPUID_EXTFEATURE_1GBPAGE
, "1GBPAGE"},
750 {CPUID_EXTFEATURE_RDTSCP
, "RDTSCP"},
751 {CPUID_EXTFEATURE_EM64T
, "EM64T"},
752 {CPUID_EXTFEATURE_LAHF
, "LAHF"},
753 {CPUID_EXTFEATURE_TSCI
, "TSCI"},
760 /* Set-up the cpuid_info stucture lazily */
761 if (cpuid_cpu_infop
== NULL
) {
763 cpuid_cpu_infop
= &cpuid_cpu_info
;
765 return cpuid_cpu_infop
;
769 cpuid_get_feature_names(uint64_t features
, char *buf
, unsigned buf_len
)
775 for (i
= 0; feature_map
[i
].mask
!= 0; i
++) {
776 if ((features
& feature_map
[i
].mask
) == 0)
780 len
= min(strlen(feature_map
[i
].name
), (size_t) ((buf_len
-1) - (p
-buf
)));
783 bcopy(feature_map
[i
].name
, p
, len
);
791 cpuid_get_extfeature_names(uint64_t extfeatures
, char *buf
, unsigned buf_len
)
797 for (i
= 0; extfeature_map
[i
].mask
!= 0; i
++) {
798 if ((extfeatures
& extfeature_map
[i
].mask
) == 0)
802 len
= min(strlen(extfeature_map
[i
].name
), (size_t) ((buf_len
-1)-(p
-buf
)));
805 bcopy(extfeature_map
[i
].name
, p
, len
);
814 cpuid_feature_display(
819 kprintf("%s: %s\n", header
,
820 cpuid_get_feature_names(cpuid_features(),
822 if (cpuid_features() & CPUID_FEATURE_HTT
) {
823 #define s_if_plural(n) ((n > 1) ? "s" : "")
824 kprintf(" HTT: %d core%s per package;"
825 " %d logical cpu%s per package\n",
826 cpuid_cpu_info
.cpuid_cores_per_package
,
827 s_if_plural(cpuid_cpu_info
.cpuid_cores_per_package
),
828 cpuid_cpu_info
.cpuid_logical_per_package
,
829 s_if_plural(cpuid_cpu_info
.cpuid_logical_per_package
));
834 cpuid_extfeature_display(
839 kprintf("%s: %s\n", header
,
840 cpuid_get_extfeature_names(cpuid_extfeatures(),
848 if (cpuid_cpu_info
.cpuid_brand_string
[0] != '\0') {
849 kprintf("%s: %s\n", header
, cpuid_cpu_info
.cpuid_brand_string
);
856 return cpuid_info()->cpuid_family
;
860 cpuid_cpufamily(void)
862 return cpuid_info()->cpuid_cpufamily
;
868 return cpuid_info()->cpuid_cpu_type
;
872 cpuid_cpusubtype(void)
874 return cpuid_info()->cpuid_cpu_subtype
;
880 static int checked
= 0;
881 char fpu_arg
[20] = { 0 };
885 /* check for boot-time fpu limitations */
886 if (PE_parse_boot_argn("_fpu", &fpu_arg
[0], sizeof (fpu_arg
))) {
887 printf("limiting fpu features to: %s\n", fpu_arg
);
888 if (!strncmp("387", fpu_arg
, sizeof("387")) || !strncmp("mmx", fpu_arg
, sizeof("mmx"))) {
889 printf("no sse or sse2\n");
890 cpuid_cpu_info
.cpuid_features
&= ~(CPUID_FEATURE_SSE
| CPUID_FEATURE_SSE2
| CPUID_FEATURE_FXSR
);
891 } else if (!strncmp("sse", fpu_arg
, sizeof("sse"))) {
893 cpuid_cpu_info
.cpuid_features
&= ~(CPUID_FEATURE_SSE2
);
898 return cpuid_cpu_info
.cpuid_features
;
902 cpuid_extfeatures(void)
904 return cpuid_info()->cpuid_extfeatures
;
916 db_cpuid(__unused db_expr_t addr
,
917 __unused
int have_addr
,
918 __unused db_expr_t count
,
919 __unused
char *modif
)
925 do_cpuid(0, cpid
); /* Get the first cpuid which is the number of
927 db_printf("%08X - %08X %08X %08X %08X\n",
928 0, cpid
[eax
], cpid
[ebx
], cpid
[ecx
], cpid
[edx
]);
930 mid
= cpid
[eax
]; /* Set the number */
931 for (i
= 1; i
<= mid
; i
++) { /* Dump 'em out */
932 do_cpuid(i
, cpid
); /* Get the next */
933 db_printf("%08X - %08X %08X %08X %08X\n",
934 i
, cpid
[eax
], cpid
[ebx
], cpid
[ecx
], cpid
[edx
]);
938 do_cpuid(0x80000000, cpid
); /* Get the first extended cpuid which
939 * is the number of extended ids */
940 db_printf("%08X - %08X %08X %08X %08X\n",
941 0x80000000, cpid
[eax
], cpid
[ebx
], cpid
[ecx
], cpid
[edx
]);
943 mid
= cpid
[eax
]; /* Set the number */
944 for (i
= 0x80000001; i
<= mid
; i
++) { /* Dump 'em out */
945 do_cpuid(i
, cpid
); /* Get the next */
946 db_printf("%08X - %08X %08X %08X %08X\n",
947 i
, cpid
[eax
], cpid
[ebx
], cpid
[ecx
], cpid
[edx
]);