5 * Copyright 2009 Apple Inc. All rights reserved.
13 #include <sys/param.h>
14 #include <System/machine/cpu_capabilities.h>
15 #include <mach/mach.h>
16 #include <mach/mach_error.h>
17 #include <mach/bootstrap.h>
20 #ifdef _COMM_PAGE_ACTIVE_CPUS
21 int active_cpu_test(void);
24 int get_sys_uint64(const char *sel
, uint64_t *val
);
25 int get_sys_int32(const char *sel
, int32_t *val
);
27 #define getcommptr(var, commpageaddr) do { \
28 var = (typeof(var))(uintptr_t)(commpageaddr); \
32 * Check some of the data in the commpage
33 * against manual sysctls
35 int commpage_data_tests( void * the_argp
)
41 volatile uint64_t *comm_u64
;
42 volatile uint32_t *comm_u32
;
43 volatile uint16_t *comm_u16
;
44 volatile uint8_t *comm_u8
;
47 /* _COMM_PAGE_CPU_CAPABILITIES */
48 getcommptr(comm_u32
, _COMM_PAGE_CPU_CAPABILITIES
);
50 ret
= get_sys_int32("hw.ncpu", &sys_i32
);
53 if (sys_i32
!= ((*comm_u32
& kNumCPUs
) >> kNumCPUsShift
)) {
54 warnx("kNumCPUs does not match hw.ncpu");
59 getcommptr(comm_u8
, _COMM_PAGE_NCPUS
);
60 if (sys_i32
!= (*comm_u8
)) {
61 warnx("_COMM_PAGE_NCPUS does not match hw.ncpu");
66 ret
= get_sys_int32("hw.logicalcpu", &sys_i32
);
69 if (sys_i32
!= ((*comm_u32
& kNumCPUs
) >> kNumCPUsShift
)) {
70 warnx("kNumCPUs does not match hw.logicalcpu");
75 /* Intel only capabilities */
76 #if defined(__i386__) || defined(__x86_64__)
77 ret
= get_sys_int32("hw.optional.mmx", &sys_i32
);
80 if (!(sys_i32
) ^ !(*comm_u32
& kHasMMX
)) {
81 warnx("kHasMMX does not match hw.optional.mmx");
86 ret
= get_sys_int32("hw.optional.sse", &sys_i32
);
89 if (!(sys_i32
) ^ !(*comm_u32
& kHasSSE
)) {
90 warnx("kHasSSE does not match hw.optional.sse");
94 ret
= get_sys_int32("hw.optional.sse2", &sys_i32
);
97 if (!(sys_i32
) ^ !(*comm_u32
& kHasSSE2
)) {
98 warnx("kHasSSE2 does not match hw.optional.sse2");
103 ret
= get_sys_int32("hw.optional.sse3", &sys_i32
);
106 if (!(sys_i32
) ^ !(*comm_u32
& kHasSSE3
)) {
107 warnx("kHasSSE3 does not match hw.optional.sse3");
112 ret
= get_sys_int32("hw.optional.supplementalsse3", &sys_i32
);
115 if (!(sys_i32
) ^ !(*comm_u32
& kHasSupplementalSSE3
)) {
116 warnx("kHasSupplementalSSE3 does not match hw.optional.supplementalsse3");
121 ret
= get_sys_int32("hw.optional.sse4_1", &sys_i32
);
124 if (!(sys_i32
) ^ !(*comm_u32
& kHasSSE4_1
)) {
125 warnx("kHasSSE4_1 does not match hw.optional.sse4_1");
130 ret
= get_sys_int32("hw.optional.sse4_2", &sys_i32
);
133 if (!(sys_i32
) ^ !(*comm_u32
& kHasSSE4_2
)) {
134 warnx("kHasSSE4_2 does not match hw.optional.sse4_2");
139 ret
= get_sys_int32("hw.optional.aes", &sys_i32
);
142 if (!(sys_i32
) ^ !(*comm_u32
& kHasAES
)) {
143 warnx("kHasAES does not match hw.optional.aes");
148 ret
= get_sys_int32("hw.optional.x86_64", &sys_i32
);
151 if (!(sys_i32
) ^ !(*comm_u32
& k64Bit
)) {
152 warnx("k64Bit does not match hw.optional.x86_64");
156 #endif /* __i386__ || __x86_64__ */
158 /* These fields are not implemented for all architectures */
159 #if defined(_COMM_PAGE_SCHED_GEN) && !TARGET_OS_EMBEDDED
160 uint32_t preempt_count1
, preempt_count2
;
163 ret
= get_sys_uint64("hw.cpufrequency_max", &sys_u64
);
166 getcommptr(comm_u32
, _COMM_PAGE_SCHED_GEN
);
167 preempt_count1
= *comm_u32
;
168 /* execute for around 1 quantum (10ms) */
169 for(count
= MAX(10000000ULL, sys_u64
/64); count
> 0; count
--) {
172 preempt_count2
= *comm_u32
;
173 if (preempt_count1
>= preempt_count2
) {
174 warnx("_COMM_PAGE_SCHED_GEN not incrementing (%u => %u)",
175 preempt_count1
, preempt_count2
);
179 #endif /* _COMM_PAGE_SCHED_GEN */
181 #ifdef _COMM_PAGE_ACTIVE_CPUS
182 ret
= get_sys_int32("hw.activecpu", &sys_i32
);
185 getcommptr(comm_u8
, _COMM_PAGE_ACTIVE_CPUS
);
186 if (sys_i32
!= (*comm_u8
)) {
187 warnx("_COMM_PAGE_ACTIVE_CPUS does not match hw.activecpu");
192 /* We shouldn't be supporting userspace processor_start/processor_exit on embedded */
193 #if !TARGET_OS_EMBEDDED
194 ret
= active_cpu_test();
196 #endif /* !TARGET_OS_EMBEDDED */
197 #endif /* _COMM_PAGE_ACTIVE_CPUS */
199 #ifdef _COMM_PAGE_PHYSICAL_CPUS
200 ret
= get_sys_int32("hw.physicalcpu_max", &sys_i32
);
203 getcommptr(comm_u8
, _COMM_PAGE_PHYSICAL_CPUS
);
204 if (sys_i32
!= (*comm_u8
)) {
205 warnx("_COMM_PAGE_PHYSICAL_CPUS does not match hw.physicalcpu_max");
209 #endif /* _COMM_PAGE_PHYSICAL_CPUS */
211 #ifdef _COMM_PAGE_LOGICAL_CPUS
212 ret
= get_sys_int32("hw.logicalcpu_max", &sys_i32
);
215 getcommptr(comm_u8
, _COMM_PAGE_LOGICAL_CPUS
);
216 if (sys_i32
!= (*comm_u8
)) {
217 warnx("_COMM_PAGE_LOGICAL_CPUS does not match hw.logicalcpu_max");
221 #endif /* _COMM_PAGE_LOGICAL_CPUS */
224 #ifdef _COMM_PAGE_MEMORY_SIZE
225 ret
= get_sys_uint64("hw.memsize", &sys_u64
);
228 getcommptr(comm_u64
, _COMM_PAGE_MEMORY_SIZE
);
229 if (sys_u64
!= (*comm_u64
)) {
230 warnx("_COMM_PAGE_MEMORY_SIZE does not match hw.memsize");
234 #endif /* _COMM_PAGE_MEMORY_SIZE */
245 int get_sys_uint64(const char *sel
, uint64_t *val
)
247 size_t size
= sizeof(*val
);
250 ret
= sysctlbyname(sel
, val
, &size
, NULL
, 0);
252 warn("sysctlbyname(%s)", sel
);
256 // warnx("sysctlbyname(%s) => %llx", sel, *val);
261 int get_sys_int32(const char *sel
, int32_t *val
)
263 size_t size
= sizeof(*val
);
266 ret
= sysctlbyname(sel
, val
, &size
, NULL
, 0);
268 warn("sysctlbyname(%s)", sel
);
272 // warnx("sysctlbyname(%s) => %x", sel, *val);
277 #ifdef _COMM_PAGE_ACTIVE_CPUS
279 * Try to find a secondary processor that we can disable,
280 * and make sure the commpage reflects that. This test
281 * will pass on UP systems, and if all secondary processors
282 * have been manually disabled
284 int active_cpu_test(void)
286 volatile uint8_t *activeaddr
;
287 uint8_t original_activecpu
;
288 boolean_t test_failed
= FALSE
;
290 /* Code stolen from hostinfo.c */
292 processor_t
*processor_list
;
293 host_name_port_t host
;
294 struct processor_basic_info processor_basic_info
;
295 mach_msg_type_number_t cpu_count
;
296 mach_msg_type_number_t data_count
;
300 getcommptr(activeaddr
, _COMM_PAGE_ACTIVE_CPUS
);
301 original_activecpu
= *activeaddr
;
303 host
= mach_host_self();
304 ret
= host_processors(host
,
305 (processor_array_t
*) &processor_list
, &cpu_count
);
306 if (ret
!= KERN_SUCCESS
) {
307 mach_error("host_processors()", ret
);
311 /* skip master processor */
312 for (i
= 1; i
< cpu_count
; i
++) {
313 data_count
= PROCESSOR_BASIC_INFO_COUNT
;
314 ret
= processor_info(processor_list
[i
], PROCESSOR_BASIC_INFO
,
316 (processor_info_t
) &processor_basic_info
,
318 if (ret
!= KERN_SUCCESS
) {
319 if (ret
== MACH_SEND_INVALID_DEST
) {
322 mach_error("processor_info", ret
);
326 if (processor_basic_info
.running
) {
328 ret
= processor_exit(processor_list
[i
]);
329 if (ret
!= KERN_SUCCESS
) {
330 mach_error("processor_exit()", ret
);
336 if (*activeaddr
!= (original_activecpu
- 1)) {
340 ret
= processor_start(processor_list
[i
]);
341 if (ret
!= KERN_SUCCESS
) {
342 mach_error("processor_exit()", ret
);
353 warnx("_COMM_PAGE_ACTIVE_CPUS not updated after disabling a CPU");
357 if (*activeaddr
!= original_activecpu
) {
358 warnx("_COMM_PAGE_ACTIVE_CPUS not restored to original value");