]> git.saurik.com Git - apple/xnu.git/blob - tools/tests/xnu_quick_test/commpage_tests.c
xnu-2782.20.48.tar.gz
[apple/xnu.git] / tools / tests / xnu_quick_test / commpage_tests.c
1 /*
2 * commpage_tests.c
3 * xnu_quick_test
4 *
5 * Copyright 2009 Apple Inc. All rights reserved.
6 *
7 */
8
9 #include "tests.h"
10 #include <unistd.h>
11 #include <stdint.h>
12 #include <err.h>
13 #include <sys/param.h>
14 #include <System/machine/cpu_capabilities.h>
15 #include <mach/mach.h>
16 #include <mach/mach_error.h>
17 #include <mach/bootstrap.h>
18
19
20 #ifdef _COMM_PAGE_ACTIVE_CPUS
21 int active_cpu_test(void);
22 #endif
23
24 int get_sys_uint64(const char *sel, uint64_t *val);
25 int get_sys_int32(const char *sel, int32_t *val);
26
27 #define getcommptr(var, commpageaddr) do { \
28 var = (typeof(var))(uintptr_t)(commpageaddr); \
29 } while(0)
30
31 /*
32 * Check some of the data in the commpage
33 * against manual sysctls
34 */
35 int commpage_data_tests( void * the_argp )
36 {
37 int ret;
38 uint64_t sys_u64;
39 int32_t sys_i32;
40
41 volatile uint64_t *comm_u64;
42 volatile uint32_t *comm_u32;
43 volatile uint16_t *comm_u16;
44 volatile uint8_t *comm_u8;
45
46
47 /* _COMM_PAGE_CPU_CAPABILITIES */
48 getcommptr(comm_u32, _COMM_PAGE_CPU_CAPABILITIES);
49
50 ret = get_sys_int32("hw.ncpu", &sys_i32);
51 if (ret) goto fail;
52
53 if (sys_i32 != ((*comm_u32 & kNumCPUs) >> kNumCPUsShift)) {
54 warnx("kNumCPUs does not match hw.ncpu");
55 ret = -1;
56 goto fail;
57 }
58
59 getcommptr(comm_u8, _COMM_PAGE_NCPUS);
60 if (sys_i32 != (*comm_u8)) {
61 warnx("_COMM_PAGE_NCPUS does not match hw.ncpu");
62 ret = -1;
63 goto fail;
64 }
65
66 ret = get_sys_int32("hw.logicalcpu", &sys_i32);
67 if (ret) goto fail;
68
69 if (sys_i32 != ((*comm_u32 & kNumCPUs) >> kNumCPUsShift)) {
70 warnx("kNumCPUs does not match hw.logicalcpu");
71 ret = -1;
72 goto fail;
73 }
74
75 /* Intel only capabilities */
76 #if defined(__i386__) || defined(__x86_64__)
77 ret = get_sys_int32("hw.optional.mmx", &sys_i32);
78 if (ret) goto fail;
79
80 if (!(sys_i32) ^ !(*comm_u32 & kHasMMX)) {
81 warnx("kHasMMX does not match hw.optional.mmx");
82 ret = -1;
83 goto fail;
84 }
85
86 ret = get_sys_int32("hw.optional.sse", &sys_i32);
87 if (ret) goto fail;
88
89 if (!(sys_i32) ^ !(*comm_u32 & kHasSSE)) {
90 warnx("kHasSSE does not match hw.optional.sse");
91 ret = -1;
92 goto fail;
93 }
94 ret = get_sys_int32("hw.optional.sse2", &sys_i32);
95 if (ret) goto fail;
96
97 if (!(sys_i32) ^ !(*comm_u32 & kHasSSE2)) {
98 warnx("kHasSSE2 does not match hw.optional.sse2");
99 ret = -1;
100 goto fail;
101 }
102
103 ret = get_sys_int32("hw.optional.sse3", &sys_i32);
104 if (ret) goto fail;
105
106 if (!(sys_i32) ^ !(*comm_u32 & kHasSSE3)) {
107 warnx("kHasSSE3 does not match hw.optional.sse3");
108 ret = -1;
109 goto fail;
110 }
111
112 ret = get_sys_int32("hw.optional.supplementalsse3", &sys_i32);
113 if (ret) goto fail;
114
115 if (!(sys_i32) ^ !(*comm_u32 & kHasSupplementalSSE3)) {
116 warnx("kHasSupplementalSSE3 does not match hw.optional.supplementalsse3");
117 ret = -1;
118 goto fail;
119 }
120
121 ret = get_sys_int32("hw.optional.sse4_1", &sys_i32);
122 if (ret) goto fail;
123
124 if (!(sys_i32) ^ !(*comm_u32 & kHasSSE4_1)) {
125 warnx("kHasSSE4_1 does not match hw.optional.sse4_1");
126 ret = -1;
127 goto fail;
128 }
129
130 ret = get_sys_int32("hw.optional.sse4_2", &sys_i32);
131 if (ret) goto fail;
132
133 if (!(sys_i32) ^ !(*comm_u32 & kHasSSE4_2)) {
134 warnx("kHasSSE4_2 does not match hw.optional.sse4_2");
135 ret = -1;
136 goto fail;
137 }
138
139 ret = get_sys_int32("hw.optional.aes", &sys_i32);
140 if (ret) goto fail;
141
142 if (!(sys_i32) ^ !(*comm_u32 & kHasAES)) {
143 warnx("kHasAES does not match hw.optional.aes");
144 ret = -1;
145 goto fail;
146 }
147
148 ret = get_sys_int32("hw.optional.x86_64", &sys_i32);
149 if (ret) goto fail;
150
151 if (!(sys_i32) ^ !(*comm_u32 & k64Bit)) {
152 warnx("k64Bit does not match hw.optional.x86_64");
153 ret = -1;
154 goto fail;
155 }
156 #endif /* __i386__ || __x86_64__ */
157
158 /* These fields are not implemented for all architectures */
159 #if defined(_COMM_PAGE_SCHED_GEN) && !TARGET_OS_EMBEDDED
160 uint32_t preempt_count1, preempt_count2;
161 uint64_t count;
162
163 ret = get_sys_uint64("hw.cpufrequency_max", &sys_u64);
164 if (ret) goto fail;
165
166 getcommptr(comm_u32, _COMM_PAGE_SCHED_GEN);
167 preempt_count1 = *comm_u32;
168 /* execute for around 1 quantum (10ms) */
169 for(count = MAX(10000000ULL, sys_u64/64); count > 0; count--) {
170 asm volatile("");
171 }
172 preempt_count2 = *comm_u32;
173 if (preempt_count1 >= preempt_count2) {
174 warnx("_COMM_PAGE_SCHED_GEN not incrementing (%u => %u)",
175 preempt_count1, preempt_count2);
176 ret = -1;
177 goto fail;
178 }
179 #endif /* _COMM_PAGE_SCHED_GEN */
180
181 #ifdef _COMM_PAGE_ACTIVE_CPUS
182 ret = get_sys_int32("hw.activecpu", &sys_i32);
183 if (ret) goto fail;
184
185 getcommptr(comm_u8, _COMM_PAGE_ACTIVE_CPUS);
186 if (sys_i32 != (*comm_u8)) {
187 warnx("_COMM_PAGE_ACTIVE_CPUS does not match hw.activecpu");
188 ret = -1;
189 goto fail;
190 }
191
192 /* We shouldn't be supporting userspace processor_start/processor_exit on embedded */
193 ret = active_cpu_test();
194 if (ret) goto fail;
195 #endif /* _COMM_PAGE_ACTIVE_CPUS */
196
197 #ifdef _COMM_PAGE_PHYSICAL_CPUS
198 ret = get_sys_int32("hw.physicalcpu_max", &sys_i32);
199 if (ret) goto fail;
200
201 getcommptr(comm_u8, _COMM_PAGE_PHYSICAL_CPUS);
202 if (sys_i32 != (*comm_u8)) {
203 warnx("_COMM_PAGE_PHYSICAL_CPUS does not match hw.physicalcpu_max");
204 ret = -1;
205 goto fail;
206 }
207 #endif /* _COMM_PAGE_PHYSICAL_CPUS */
208
209 #ifdef _COMM_PAGE_LOGICAL_CPUS
210 ret = get_sys_int32("hw.logicalcpu_max", &sys_i32);
211 if (ret) goto fail;
212
213 getcommptr(comm_u8, _COMM_PAGE_LOGICAL_CPUS);
214 if (sys_i32 != (*comm_u8)) {
215 warnx("_COMM_PAGE_LOGICAL_CPUS does not match hw.logicalcpu_max");
216 ret = -1;
217 goto fail;
218 }
219 #endif /* _COMM_PAGE_LOGICAL_CPUS */
220
221 #if 0
222 #ifdef _COMM_PAGE_MEMORY_SIZE
223 ret = get_sys_uint64("hw.memsize", &sys_u64);
224 if (ret) goto fail;
225
226 getcommptr(comm_u64, _COMM_PAGE_MEMORY_SIZE);
227 if (sys_u64 != (*comm_u64)) {
228 warnx("_COMM_PAGE_MEMORY_SIZE does not match hw.memsize");
229 ret = -1;
230 goto fail;
231 }
232 #endif /* _COMM_PAGE_MEMORY_SIZE */
233 #endif
234
235 ret = 0;
236
237 fail:
238
239 return ret;
240 }
241
242
243 int get_sys_uint64(const char *sel, uint64_t *val)
244 {
245 size_t size = sizeof(*val);
246 int ret;
247
248 ret = sysctlbyname(sel, val, &size, NULL, 0);
249 if (ret == -1) {
250 warn("sysctlbyname(%s)", sel);
251 return ret;
252 }
253
254 // warnx("sysctlbyname(%s) => %llx", sel, *val);
255
256 return 0;
257 }
258
259 int get_sys_int32(const char *sel, int32_t *val)
260 {
261 size_t size = sizeof(*val);
262 int ret;
263
264 ret = sysctlbyname(sel, val, &size, NULL, 0);
265 if (ret == -1) {
266 warn("sysctlbyname(%s)", sel);
267 return ret;
268 }
269
270 // warnx("sysctlbyname(%s) => %x", sel, *val);
271
272 return 0;
273 }
274
275 #ifdef _COMM_PAGE_ACTIVE_CPUS
276 /*
277 * Try to find a secondary processor that we can disable,
278 * and make sure the commpage reflects that. This test
279 * will pass on UP systems, and if all secondary processors
280 * have been manually disabled
281 */
282 int active_cpu_test(void)
283 {
284 volatile uint8_t *activeaddr;
285 uint8_t original_activecpu;
286 boolean_t test_failed = FALSE;
287
288 /* Code stolen from hostinfo.c */
289 kern_return_t ret;
290 processor_t *processor_list;
291 host_name_port_t host;
292 struct processor_basic_info processor_basic_info;
293 mach_msg_type_number_t cpu_count;
294 mach_msg_type_number_t data_count;
295 int i;
296
297
298 getcommptr(activeaddr, _COMM_PAGE_ACTIVE_CPUS);
299 original_activecpu = *activeaddr;
300
301 host = mach_host_self();
302 ret = host_processors(host,
303 (processor_array_t *) &processor_list, &cpu_count);
304 if (ret != KERN_SUCCESS) {
305 mach_error("host_processors()", ret);
306 return ret;
307 }
308
309 /* skip master processor */
310 for (i = 1; i < cpu_count; i++) {
311 data_count = PROCESSOR_BASIC_INFO_COUNT;
312 ret = processor_info(processor_list[i], PROCESSOR_BASIC_INFO,
313 &host,
314 (processor_info_t) &processor_basic_info,
315 &data_count);
316 if (ret != KERN_SUCCESS) {
317 if (ret == MACH_SEND_INVALID_DEST) {
318 continue;
319 }
320 mach_error("processor_info", ret);
321 return ret;
322 }
323
324 if (processor_basic_info.running) {
325 /* found victim */
326 ret = processor_exit(processor_list[i]);
327 if (ret != KERN_SUCCESS) {
328 mach_error("processor_exit()", ret);
329 return ret;
330 }
331
332 sleep(1);
333
334 if (*activeaddr != (original_activecpu - 1)) {
335 test_failed = TRUE;
336 }
337
338 ret = processor_start(processor_list[i]);
339 if (ret != KERN_SUCCESS) {
340 mach_error("processor_exit()", ret);
341 return ret;
342 }
343
344 sleep(1);
345
346 break;
347 }
348 }
349
350 if (test_failed) {
351 warnx("_COMM_PAGE_ACTIVE_CPUS not updated after disabling a CPU");
352 return -1;
353 }
354
355 if (*activeaddr != original_activecpu) {
356 warnx("_COMM_PAGE_ACTIVE_CPUS not restored to original value");
357 return -1;
358 }
359
360 return 0;
361 }
362 #endif