2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <platforms.h>
33 #include <vm/vm_page.h>
34 #include <pexpert/pexpert.h>
38 #include <i386/db_machdep.h>
39 #include <ddb/db_aout.h>
40 #include <ddb/db_access.h>
41 #include <ddb/db_sym.h>
42 #include <ddb/db_variables.h>
43 #include <ddb/db_command.h>
44 #include <ddb/db_output.h>
45 #include <ddb/db_expr.h>
48 #define min(a,b) ((a) < (b) ? (a) : (b))
49 #define quad(hi,lo) (((uint64_t)(hi)) << 32 | (lo))
51 #define bit(n) (1UL << (n))
52 #define bitmask(h,l) ((bit(h)|(bit(h)-1)) & ~(bit(l)-1))
53 #define bitfield(x,h,l) (((x) & bitmask(h,l)) >> l)
56 * CPU identification routines.
59 static i386_cpu_info_t
*cpuid_cpu_infop
= NULL
;
60 static i386_cpu_info_t cpuid_cpu_info
;
62 /* this function is Intel-specific */
64 cpuid_set_cache_info( i386_cpu_info_t
* info_p
)
66 uint32_t cpuid_result
[4];
69 uint32_t linesizes
[LCACHE_MAX
];
72 boolean_t cpuid_deterministic_supported
= FALSE
;
74 bzero( linesizes
, sizeof(linesizes
) );
76 /* Get processor cache descriptor info using leaf 2. We don't use
77 * this internally, but must publish it for KEXTs.
79 do_cpuid(2, cpuid_result
);
80 for (j
= 0; j
< 4; j
++) {
81 if ((cpuid_result
[j
] >> 31) == 1) /* bit31 is validity */
83 ((uint32_t *) info_p
->cache_info
)[j
] = cpuid_result
[j
];
85 /* first byte gives number of cpuid calls to get all descriptors */
86 for (i
= 1; i
< info_p
->cache_info
[0]; i
++) {
87 if (i
*16 > sizeof(info_p
->cache_info
))
89 do_cpuid(2, cpuid_result
);
90 for (j
= 0; j
< 4; j
++) {
91 if ((cpuid_result
[j
] >> 31) == 1)
93 ((uint32_t *) info_p
->cache_info
)[4*i
+j
] =
99 * Get cache info using leaf 4, the "deterministic cache parameters."
100 * Most processors Mac OS X supports implement this flavor of CPUID.
101 * Loop over each cache on the processor.
103 do_cpuid(0, cpuid_result
);
104 if (cpuid_result
[eax
] >= 4)
105 cpuid_deterministic_supported
= TRUE
;
107 for (index
= 0; cpuid_deterministic_supported
; index
++) {
108 cache_type_t type
= Lnone
;
110 uint32_t cache_level
;
111 uint32_t cache_sharing
;
112 uint32_t cache_linesize
;
114 uint32_t cache_associativity
;
116 uint32_t cache_partitions
;
119 reg
[eax
] = 4; /* cpuid request 4 */
120 reg
[ecx
] = index
; /* index starting at 0 */
122 //kprintf("cpuid(4) index=%d eax=%p\n", index, reg[eax]);
123 cache_type
= bitfield(reg
[eax
], 4, 0);
125 break; /* no more caches */
126 cache_level
= bitfield(reg
[eax
], 7, 5);
127 cache_sharing
= bitfield(reg
[eax
], 25, 14) + 1;
128 info_p
->cpuid_cores_per_package
129 = bitfield(reg
[eax
], 31, 26) + 1;
130 cache_linesize
= bitfield(reg
[ebx
], 11, 0) + 1;
131 cache_partitions
= bitfield(reg
[ebx
], 21, 12) + 1;
132 cache_associativity
= bitfield(reg
[ebx
], 31, 22) + 1;
133 cache_sets
= bitfield(reg
[ecx
], 31, 0) + 1;
135 /* Map type/levels returned by CPUID into cache_type_t */
136 switch (cache_level
) {
138 type
= cache_type
== 1 ? L1D
:
139 cache_type
== 2 ? L1I
:
143 type
= cache_type
== 3 ? L2U
:
147 type
= cache_type
== 3 ? L3U
:
154 /* The total size of a cache is:
155 * ( linesize * sets * associativity )
158 cache_size
= cache_linesize
* cache_sets
* cache_associativity
;
159 info_p
->cache_size
[type
] = cache_size
;
160 info_p
->cache_sharing
[type
] = cache_sharing
;
161 info_p
->cache_partitions
[type
] = cache_partitions
;
162 linesizes
[type
] = cache_linesize
;
164 /* Compute the number of page colors for this cache,
166 * ( linesize * sets ) / page_size
168 * To help visualize this, consider two views of a
169 * physical address. To the cache, it is composed
170 * of a line offset, a set selector, and a tag.
171 * To VM, it is composed of a page offset, a page
172 * color, and other bits in the pageframe number:
174 * +-----------------+---------+--------+
175 * cache: | tag | set | offset |
176 * +-----------------+---------+--------+
178 * +-----------------+-------+----------+
179 * VM: | don't care | color | pg offset|
180 * +-----------------+-------+----------+
182 * The color is those bits in (set+offset) not covered
183 * by the page offset.
185 colors
= ( cache_linesize
* cache_sets
) >> 12;
187 if ( colors
> vm_cache_geometry_colors
)
188 vm_cache_geometry_colors
= colors
;
193 * If deterministic cache parameters are not available, use
196 if (info_p
->cpuid_cores_per_package
== 0) {
197 info_p
->cpuid_cores_per_package
= 1;
199 /* cpuid define in 1024 quantities */
200 info_p
->cache_size
[L2U
] = info_p
->cpuid_cache_size
* 1024;
201 info_p
->cache_sharing
[L2U
] = 1;
202 info_p
->cache_partitions
[L2U
] = 1;
204 linesizes
[L2U
] = info_p
->cpuid_cache_linesize
;
208 * What linesize to publish? We use the L2 linesize if any,
211 if ( linesizes
[L2U
] )
212 info_p
->cache_linesize
= linesizes
[L2U
];
213 else if (linesizes
[L1D
])
214 info_p
->cache_linesize
= linesizes
[L1D
];
215 else panic("no linesize");
218 * Extract and publish TLB information.
220 for (i
= 1; i
< sizeof(info_p
->cache_info
); i
++) {
221 uint8_t desc
= info_p
->cache_info
[i
];
224 case CPUID_CACHE_ITLB_4K_32_4
:
225 info_p
->cpuid_itlb_small
= 32;
227 case CPUID_CACHE_ITLB_4M_2
:
228 info_p
->cpuid_itlb_large
= 2;
230 case CPUID_CACHE_DTLB_4K_64_4
:
231 info_p
->cpuid_dtlb_small
= 64;
233 case CPUID_CACHE_DTLB_4M_8_4
:
234 info_p
->cpuid_dtlb_large
= 8;
236 case CPUID_CACHE_DTLB_4M_32_4
:
237 info_p
->cpuid_dtlb_large
= 32;
239 case CPUID_CACHE_ITLB_64
:
240 info_p
->cpuid_itlb_small
= 64;
241 info_p
->cpuid_itlb_large
= 64;
243 case CPUID_CACHE_ITLB_128
:
244 info_p
->cpuid_itlb_small
= 128;
245 info_p
->cpuid_itlb_large
= 128;
247 case CPUID_CACHE_ITLB_256
:
248 info_p
->cpuid_itlb_small
= 256;
249 info_p
->cpuid_itlb_large
= 256;
251 case CPUID_CACHE_DTLB_64
:
252 info_p
->cpuid_dtlb_small
= 64;
253 info_p
->cpuid_dtlb_large
= 64;
255 case CPUID_CACHE_DTLB_128
:
256 info_p
->cpuid_dtlb_small
= 128;
257 info_p
->cpuid_dtlb_large
= 128;
259 case CPUID_CACHE_DTLB_256
:
260 info_p
->cpuid_dtlb_small
= 256;
261 info_p
->cpuid_dtlb_large
= 256;
263 case CPUID_CACHE_ITLB_4M2M_7
:
264 info_p
->cpuid_itlb_large
= 7;
266 case CPUID_CACHE_DTLB_4K_16_4
:
267 info_p
->cpuid_dtlb_small
= 16;
269 case CPUID_CACHE_DTLB_4M2M_32_4
:
270 info_p
->cpuid_dtlb_large
= 32;
272 case CPUID_CACHE_ITLB_4K_128_4
:
273 info_p
->cpuid_itlb_small
= 128;
275 case CPUID_CACHE_ITLB_4M_8
:
276 info_p
->cpuid_itlb_large
= 8;
278 case CPUID_CACHE_DTLB_4K_128_4
:
279 info_p
->cpuid_dtlb_small
= 128;
281 case CPUID_CACHE_DTLB_4K_256_4
:
282 info_p
->cpuid_dtlb_small
= 256;
289 cpuid_set_generic_info(i386_cpu_info_t
*info_p
)
291 uint32_t cpuid_reg
[4];
295 /* do cpuid 0 to get vendor */
296 do_cpuid(0, cpuid_reg
);
297 bcopy((char *)&cpuid_reg
[ebx
], &info_p
->cpuid_vendor
[0], 4); /* ug */
298 bcopy((char *)&cpuid_reg
[ecx
], &info_p
->cpuid_vendor
[8], 4);
299 bcopy((char *)&cpuid_reg
[edx
], &info_p
->cpuid_vendor
[4], 4);
300 info_p
->cpuid_vendor
[12] = 0;
302 /* get extended cpuid results */
303 do_cpuid(0x80000000, cpuid_reg
);
304 max_extid
= cpuid_reg
[eax
];
306 /* check to see if we can get brand string */
307 if (max_extid
>= 0x80000004) {
309 * The brand string 48 bytes (max), guaranteed to
312 do_cpuid(0x80000002, cpuid_reg
);
313 bcopy((char *)cpuid_reg
, &str
[0], 16);
314 do_cpuid(0x80000003, cpuid_reg
);
315 bcopy((char *)cpuid_reg
, &str
[16], 16);
316 do_cpuid(0x80000004, cpuid_reg
);
317 bcopy((char *)cpuid_reg
, &str
[32], 16);
318 for (p
= str
; *p
!= '\0'; p
++) {
319 if (*p
!= ' ') break;
321 strlcpy(info_p
->cpuid_brand_string
,
322 p
, sizeof(info_p
->cpuid_brand_string
));
324 if (!strncmp(info_p
->cpuid_brand_string
, CPUID_STRING_UNKNOWN
,
325 min(sizeof(info_p
->cpuid_brand_string
),
326 strlen(CPUID_STRING_UNKNOWN
) + 1))) {
328 * This string means we have a firmware-programmable brand string,
329 * and the firmware couldn't figure out what sort of CPU we have.
331 info_p
->cpuid_brand_string
[0] = '\0';
335 /* Get cache and addressing info. */
336 if (max_extid
>= 0x80000006) {
337 do_cpuid(0x80000006, cpuid_reg
);
338 info_p
->cpuid_cache_linesize
= bitfield(cpuid_reg
[ecx
], 7, 0);
339 info_p
->cpuid_cache_L2_associativity
=
340 bitfield(cpuid_reg
[ecx
],15,12);
341 info_p
->cpuid_cache_size
= bitfield(cpuid_reg
[ecx
],31,16);
342 do_cpuid(0x80000008, cpuid_reg
);
343 info_p
->cpuid_address_bits_physical
=
344 bitfield(cpuid_reg
[eax
], 7, 0);
345 info_p
->cpuid_address_bits_virtual
=
346 bitfield(cpuid_reg
[eax
],15, 8);
349 /* get processor signature and decode */
350 do_cpuid(1, cpuid_reg
);
351 info_p
->cpuid_signature
= cpuid_reg
[eax
];
352 info_p
->cpuid_stepping
= bitfield(cpuid_reg
[eax
], 3, 0);
353 info_p
->cpuid_model
= bitfield(cpuid_reg
[eax
], 7, 4);
354 info_p
->cpuid_family
= bitfield(cpuid_reg
[eax
], 11, 8);
355 info_p
->cpuid_type
= bitfield(cpuid_reg
[eax
], 13, 12);
356 info_p
->cpuid_extmodel
= bitfield(cpuid_reg
[eax
], 19, 16);
357 info_p
->cpuid_extfamily
= bitfield(cpuid_reg
[eax
], 27, 20);
358 info_p
->cpuid_brand
= bitfield(cpuid_reg
[ebx
], 7, 0);
359 info_p
->cpuid_features
= quad(cpuid_reg
[ecx
], cpuid_reg
[edx
]);
361 /* Fold extensions into family/model */
362 if (info_p
->cpuid_family
== 0x0f)
363 info_p
->cpuid_family
+= info_p
->cpuid_extfamily
;
364 if (info_p
->cpuid_family
== 0x0f || info_p
->cpuid_family
== 0x06)
365 info_p
->cpuid_model
+= (info_p
->cpuid_extmodel
<< 4);
367 if (info_p
->cpuid_features
& CPUID_FEATURE_HTT
)
368 info_p
->cpuid_logical_per_package
=
369 bitfield(cpuid_reg
[ebx
], 23, 16);
371 info_p
->cpuid_logical_per_package
= 1;
373 if (max_extid
>= 0x80000001) {
374 do_cpuid(0x80000001, cpuid_reg
);
375 info_p
->cpuid_extfeatures
=
376 quad(cpuid_reg
[ecx
], cpuid_reg
[edx
]);
379 /* Fold in the Invariant TSC feature bit, if present */
380 if (max_extid
>= 0x80000007) {
381 do_cpuid(0x80000007, cpuid_reg
);
382 info_p
->cpuid_extfeatures
|=
383 cpuid_reg
[edx
] & CPUID_EXTFEATURE_TSCI
;
386 /* Find the microcode version number a.k.a. signature a.k.a. BIOS ID */
387 info_p
->cpuid_microcode_version
=
388 (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID
) >> 32);
390 if (info_p
->cpuid_model
== CPUID_MODEL_NEHALEM
) {
392 * For Nehalem, find the number of enabled cores and threads
393 * (which determines whether SMT/Hyperthreading is active).
395 uint64_t msr_core_thread_count
= rdmsr64(MSR_CORE_THREAD_COUNT
);
396 info_p
->core_count
= bitfield(msr_core_thread_count
, 31, 16);
397 info_p
->thread_count
= bitfield(msr_core_thread_count
, 15, 0);
400 if (info_p
->cpuid_features
& CPUID_FEATURE_MONITOR
) {
402 * Extract the Monitor/Mwait Leaf info:
404 do_cpuid(5, cpuid_reg
);
405 info_p
->cpuid_mwait_linesize_min
= cpuid_reg
[eax
];
406 info_p
->cpuid_mwait_linesize_max
= cpuid_reg
[ebx
];
407 info_p
->cpuid_mwait_extensions
= cpuid_reg
[ecx
];
408 info_p
->cpuid_mwait_sub_Cstates
= cpuid_reg
[edx
];
411 * And the thermal and Power Leaf while we're at it:
413 do_cpuid(6, cpuid_reg
);
414 info_p
->cpuid_thermal_sensor
=
415 bitfield(cpuid_reg
[eax
], 0, 0);
416 info_p
->cpuid_thermal_dynamic_acceleration
=
417 bitfield(cpuid_reg
[eax
], 1, 1);
418 info_p
->cpuid_thermal_thresholds
=
419 bitfield(cpuid_reg
[ebx
], 3, 0);
420 info_p
->cpuid_thermal_ACNT_MCNT
=
421 bitfield(cpuid_reg
[ecx
], 0, 0);
424 * And the Architectural Performance Monitoring Leaf:
426 do_cpuid(0xa, cpuid_reg
);
427 info_p
->cpuid_arch_perf_version
=
428 bitfield(cpuid_reg
[eax
], 7, 0);
429 info_p
->cpuid_arch_perf_number
=
430 bitfield(cpuid_reg
[eax
],15, 8);
431 info_p
->cpuid_arch_perf_width
=
432 bitfield(cpuid_reg
[eax
],23,16);
433 info_p
->cpuid_arch_perf_events_number
=
434 bitfield(cpuid_reg
[eax
],31,24);
435 info_p
->cpuid_arch_perf_events
=
437 info_p
->cpuid_arch_perf_fixed_number
=
438 bitfield(cpuid_reg
[edx
], 4, 0);
439 info_p
->cpuid_arch_perf_fixed_width
=
440 bitfield(cpuid_reg
[edx
],12, 5);
450 bzero((void *)&cpuid_cpu_info
, sizeof(cpuid_cpu_info
));
452 cpuid_set_generic_info(&cpuid_cpu_info
);
454 /* verify we are running on a supported CPU */
455 if ((strncmp(CPUID_VID_INTEL
, cpuid_cpu_info
.cpuid_vendor
,
456 min(strlen(CPUID_STRING_UNKNOWN
) + 1,
457 sizeof(cpuid_cpu_info
.cpuid_vendor
)))) ||
458 (cpuid_cpu_info
.cpuid_family
!= 6) ||
459 (cpuid_cpu_info
.cpuid_model
< 13))
460 panic("Unsupported CPU");
462 cpuid_cpu_info
.cpuid_cpu_type
= CPU_TYPE_X86
;
463 cpuid_cpu_info
.cpuid_cpu_subtype
= CPU_SUBTYPE_X86_ARCH1
;
465 cpuid_set_cache_info(&cpuid_cpu_info
);
467 if (cpuid_cpu_info
.core_count
== 0) {
468 cpuid_cpu_info
.core_count
=
469 cpuid_cpu_info
.cpuid_cores_per_package
;
470 cpuid_cpu_info
.thread_count
=
471 cpuid_cpu_info
.cpuid_logical_per_package
;
474 cpuid_cpu_info
.cpuid_model_string
= ""; /* deprecated */
481 {CPUID_FEATURE_FPU
, "FPU",},
482 {CPUID_FEATURE_VME
, "VME",},
483 {CPUID_FEATURE_DE
, "DE",},
484 {CPUID_FEATURE_PSE
, "PSE",},
485 {CPUID_FEATURE_TSC
, "TSC",},
486 {CPUID_FEATURE_MSR
, "MSR",},
487 {CPUID_FEATURE_PAE
, "PAE",},
488 {CPUID_FEATURE_MCE
, "MCE",},
489 {CPUID_FEATURE_CX8
, "CX8",},
490 {CPUID_FEATURE_APIC
, "APIC",},
491 {CPUID_FEATURE_SEP
, "SEP",},
492 {CPUID_FEATURE_MTRR
, "MTRR",},
493 {CPUID_FEATURE_PGE
, "PGE",},
494 {CPUID_FEATURE_MCA
, "MCA",},
495 {CPUID_FEATURE_CMOV
, "CMOV",},
496 {CPUID_FEATURE_PAT
, "PAT",},
497 {CPUID_FEATURE_PSE36
, "PSE36",},
498 {CPUID_FEATURE_PSN
, "PSN",},
499 {CPUID_FEATURE_CLFSH
, "CLFSH",},
500 {CPUID_FEATURE_DS
, "DS",},
501 {CPUID_FEATURE_ACPI
, "ACPI",},
502 {CPUID_FEATURE_MMX
, "MMX",},
503 {CPUID_FEATURE_FXSR
, "FXSR",},
504 {CPUID_FEATURE_SSE
, "SSE",},
505 {CPUID_FEATURE_SSE2
, "SSE2",},
506 {CPUID_FEATURE_SS
, "SS",},
507 {CPUID_FEATURE_HTT
, "HTT",},
508 {CPUID_FEATURE_TM
, "TM",},
509 {CPUID_FEATURE_SSE3
, "SSE3"},
510 {CPUID_FEATURE_MONITOR
, "MON"},
511 {CPUID_FEATURE_DSCPL
, "DSCPL"},
512 {CPUID_FEATURE_VMX
, "VMX"},
513 {CPUID_FEATURE_SMX
, "SMX"},
514 {CPUID_FEATURE_EST
, "EST"},
515 {CPUID_FEATURE_TM2
, "TM2"},
516 {CPUID_FEATURE_SSSE3
, "SSSE3"},
517 {CPUID_FEATURE_CID
, "CID"},
518 {CPUID_FEATURE_CX16
, "CX16"},
519 {CPUID_FEATURE_xTPR
, "TPR"},
520 {CPUID_FEATURE_PDCM
, "PDCM"},
521 {CPUID_FEATURE_SSE4_1
, "SSE4.1"},
522 {CPUID_FEATURE_SSE4_2
, "SSE4.2"},
523 {CPUID_FEATURE_xAPIC
, "xAPIC"},
524 {CPUID_FEATURE_POPCNT
, "POPCNT"},
528 {CPUID_EXTFEATURE_SYSCALL
, "SYSCALL"},
529 {CPUID_EXTFEATURE_XD
, "XD"},
530 {CPUID_EXTFEATURE_EM64T
, "EM64T"},
531 {CPUID_EXTFEATURE_LAHF
, "LAHF"},
532 {CPUID_EXTFEATURE_RDTSCP
, "RDTSCP"},
533 {CPUID_EXTFEATURE_TSCI
, "TSCI"},
540 /* Set-up the cpuid_info stucture lazily */
541 if (cpuid_cpu_infop
== NULL
) {
543 cpuid_cpu_infop
= &cpuid_cpu_info
;
545 return cpuid_cpu_infop
;
549 cpuid_get_feature_names(uint64_t features
, char *buf
, unsigned buf_len
)
555 for (i
= 0; feature_map
[i
].mask
!= 0; i
++) {
556 if ((features
& feature_map
[i
].mask
) == 0)
560 len
= min(strlen(feature_map
[i
].name
), (buf_len
-1) - (p
-buf
));
563 bcopy(feature_map
[i
].name
, p
, len
);
571 cpuid_get_extfeature_names(uint64_t extfeatures
, char *buf
, unsigned buf_len
)
577 for (i
= 0; extfeature_map
[i
].mask
!= 0; i
++) {
578 if ((extfeatures
& extfeature_map
[i
].mask
) == 0)
582 len
= min(strlen(extfeature_map
[i
].name
), (buf_len
-1)-(p
-buf
));
585 bcopy(extfeature_map
[i
].name
, p
, len
);
593 #if CONFIG_NO_KPRINTF_STRINGS
595 cpuid_feature_display(
596 __unused
const char *header
)
601 cpuid_extfeature_display(
602 __unused
const char *header
)
608 __unused
const char *header
)
611 #else /* CONFIG_NO_KPRINTF_STRINGS */
613 cpuid_feature_display(
618 kprintf("%s: %s\n", header
,
619 cpuid_get_feature_names(cpuid_features(),
621 if (cpuid_features() & CPUID_FEATURE_HTT
) {
622 #define s_if_plural(n) ((n > 1) ? "s" : "")
623 kprintf(" HTT: %d core%s per package;"
624 " %d logical cpu%s per package\n",
625 cpuid_cpu_info
.cpuid_cores_per_package
,
626 s_if_plural(cpuid_cpu_info
.cpuid_cores_per_package
),
627 cpuid_cpu_info
.cpuid_logical_per_package
,
628 s_if_plural(cpuid_cpu_info
.cpuid_logical_per_package
));
633 cpuid_extfeature_display(
638 kprintf("%s: %s\n", header
,
639 cpuid_get_extfeature_names(cpuid_extfeatures(),
647 if (cpuid_cpu_info
.cpuid_brand_string
[0] != '\0') {
648 kprintf("%s: %s\n", header
, cpuid_cpu_info
.cpuid_brand_string
);
651 #endif /* !CONFIG_NO_KPRINTF_STRINGS */
656 return cpuid_info()->cpuid_family
;
662 return cpuid_info()->cpuid_cpu_type
;
666 cpuid_cpusubtype(void)
668 return cpuid_info()->cpuid_cpu_subtype
;
674 static int checked
= 0;
675 char fpu_arg
[20] = { 0 };
679 /* check for boot-time fpu limitations */
680 if (PE_parse_boot_argn("_fpu", &fpu_arg
[0], sizeof (fpu_arg
))) {
681 printf("limiting fpu features to: %s\n", fpu_arg
);
682 if (!strncmp("387", fpu_arg
, sizeof("387")) || !strncmp("mmx", fpu_arg
, sizeof("mmx"))) {
683 printf("no sse or sse2\n");
684 cpuid_cpu_info
.cpuid_features
&= ~(CPUID_FEATURE_SSE
| CPUID_FEATURE_SSE2
| CPUID_FEATURE_FXSR
);
685 } else if (!strncmp("sse", fpu_arg
, sizeof("sse"))) {
687 cpuid_cpu_info
.cpuid_features
&= ~(CPUID_FEATURE_SSE2
);
692 return cpuid_cpu_info
.cpuid_features
;
696 cpuid_extfeatures(void)
698 return cpuid_info()->cpuid_extfeatures
;
710 db_cpuid(__unused db_expr_t addr
,
711 __unused
int have_addr
,
712 __unused db_expr_t count
,
713 __unused
char *modif
)
719 do_cpuid(0, cpid
); /* Get the first cpuid which is the number of
721 db_printf("%08X - %08X %08X %08X %08X\n",
722 0, cpid
[eax
], cpid
[ebx
], cpid
[ecx
], cpid
[edx
]);
724 mid
= cpid
[eax
]; /* Set the number */
725 for (i
= 1; i
<= mid
; i
++) { /* Dump 'em out */
726 do_cpuid(i
, cpid
); /* Get the next */
727 db_printf("%08X - %08X %08X %08X %08X\n",
728 i
, cpid
[eax
], cpid
[ebx
], cpid
[ecx
], cpid
[edx
]);
732 do_cpuid(0x80000000, cpid
); /* Get the first extended cpuid which
733 * is the number of extended ids */
734 db_printf("%08X - %08X %08X %08X %08X\n",
735 0x80000000, cpid
[eax
], cpid
[ebx
], cpid
[ecx
], cpid
[edx
]);
737 mid
= cpid
[eax
]; /* Set the number */
738 for (i
= 0x80000001; i
<= mid
; i
++) { /* Dump 'em out */
739 do_cpuid(i
, cpid
); /* Get the next */
740 db_printf("%08X - %08X %08X %08X %08X\n",
741 i
, cpid
[eax
], cpid
[ebx
], cpid
[ecx
], cpid
[edx
]);