]> git.saurik.com Git - apple/xnu.git/blob - osfmk/tests/kernel_tests.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / tests / kernel_tests.c
1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object.h>
47 #include <kern/priority_queue.h>
48
49 #if !(DEVELOPMENT || DEBUG)
50 #error "Testing is not enabled on RELEASE configurations"
51 #endif
52
53 #include <tests/xnupost.h>
54
55 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
56 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
57
58 uint32_t total_post_tests_count = 0;
59 void xnupost_reset_panic_widgets(void);
60
61 /* test declarations */
62 kern_return_t zalloc_test(void);
63 kern_return_t RandomULong_test(void);
64 kern_return_t kcdata_api_test(void);
65 kern_return_t priority_queue_test(void);
66
67 #if defined(__arm__) || defined(__arm64__)
68 kern_return_t pmap_coredump_test(void);
69 #endif
70
71 extern kern_return_t console_serial_test(void);
72 extern kern_return_t console_serial_alloc_rel_tests(void);
73 extern kern_return_t console_serial_parallel_log_tests(void);
74 extern kern_return_t test_os_log(void);
75 extern kern_return_t test_os_log_parallel(void);
76 extern kern_return_t bitmap_post_test(void);
77
78 #ifdef __arm64__
79 extern kern_return_t arm64_munger_test(void);
80 extern kern_return_t ex_cb_test(void);
81 #if __ARM_PAN_AVAILABLE__
82 extern kern_return_t arm64_pan_test(void);
83 #endif
84 #endif /* __arm64__ */
85
86 extern kern_return_t test_thread_call(void);
87
88
89 struct xnupost_panic_widget xt_panic_widgets = {NULL, NULL, NULL, NULL};
90
91 struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
92 XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
93 XNUPOST_TEST_CONFIG_BASIC(test_os_log),
94 XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
95 #ifdef __arm64__
96 XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
97 XNUPOST_TEST_CONFIG_BASIC(ex_cb_test),
98 #if __ARM_PAN_AVAILABLE__
99 XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
100 #endif
101 #endif /* __arm64__ */
102 XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
103 XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
104 XNUPOST_TEST_CONFIG_BASIC(console_serial_alloc_rel_tests),
105 XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
106 #if defined(__arm__) || defined(__arm64__)
107 XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
108 #endif
109 XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
110 //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
111 XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
112 XNUPOST_TEST_CONFIG_BASIC(priority_queue_test),
113 };
114
115 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
116
117 #define POSTARGS_RUN_TESTS 0x1
118 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
119 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
120 uint64_t kernel_post_args = 0x0;
121
122 /* static variables to hold state */
123 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
124 static char kernel_post_test_configs[256];
125 boolean_t xnupost_should_run_test(uint32_t test_num);
126
127 kern_return_t
128 xnupost_parse_config()
129 {
130 if (parse_config_retval != KERN_INVALID_CAPABILITY)
131 return parse_config_retval;
132 PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
133
134 if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
135 kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
136 }
137
138 if (kernel_post_args != 0) {
139 parse_config_retval = KERN_SUCCESS;
140 goto out;
141 }
142 parse_config_retval = KERN_NOT_SUPPORTED;
143 out:
144 return parse_config_retval;
145 }
146
147 boolean_t
148 xnupost_should_run_test(uint32_t test_num)
149 {
150 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
151 int64_t begin = 0, end = 999999;
152 char * b = kernel_post_test_configs;
153 while (*b) {
154 get_range_bounds(b, &begin, &end);
155 if (test_num >= begin && test_num <= end) {
156 return TRUE;
157 }
158
159 /* skip to the next "," */
160 while (*b != ',') {
161 if (*b == '\0')
162 return FALSE;
163 b++;
164 }
165 /* skip past the ',' */
166 b++;
167 }
168 return FALSE;
169 }
170 return TRUE;
171 }
172
173 kern_return_t
174 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
175 {
176 if (KERN_SUCCESS != xnupost_parse_config())
177 return KERN_FAILURE;
178
179 xnupost_test_t testp;
180 for (uint32_t i = 0; i < test_count; i++) {
181 testp = &test_list[i];
182 if (testp->xt_test_num == 0) {
183 testp->xt_test_num = ++total_post_tests_count;
184 }
185 /* make sure the boot-arg based test run list is honored */
186 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
187 testp->xt_config |= XT_CONFIG_IGNORE;
188 if (xnupost_should_run_test(testp->xt_test_num)) {
189 testp->xt_config &= ~(XT_CONFIG_IGNORE);
190 testp->xt_config |= XT_CONFIG_RUN;
191 printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
192 }
193 }
194 printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
195 testp->xt_config);
196 }
197
198 return KERN_SUCCESS;
199 }
200
201 kern_return_t
202 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
203 {
204 uint32_t i = 0;
205 int retval = KERN_SUCCESS;
206
207 if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
208 printf("No POST boot-arg set.\n");
209 return retval;
210 }
211
212 T_START;
213 xnupost_test_t testp;
214 for (; i < test_count; i++) {
215 xnupost_reset_panic_widgets();
216 testp = &test_list[i];
217 T_BEGIN(testp->xt_name);
218 testp->xt_begin_time = mach_absolute_time();
219 testp->xt_end_time = testp->xt_begin_time;
220
221 /*
222 * If test is designed to panic and controller
223 * is not available then mark as SKIPPED
224 */
225 if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
226 T_SKIP(
227 "Test expects panic but "
228 "no controller is present");
229 testp->xt_test_actions = XT_ACTION_SKIPPED;
230 continue;
231 }
232
233 if ((testp->xt_config & XT_CONFIG_IGNORE)) {
234 T_SKIP("Test is marked as XT_CONFIG_IGNORE");
235 testp->xt_test_actions = XT_ACTION_SKIPPED;
236 continue;
237 }
238
239 testp->xt_func();
240 T_END;
241 testp->xt_retval = T_TESTRESULT;
242 testp->xt_end_time = mach_absolute_time();
243 if (testp->xt_retval == testp->xt_expected_retval) {
244 testp->xt_test_actions = XT_ACTION_PASSED;
245 } else {
246 testp->xt_test_actions = XT_ACTION_FAILED;
247 }
248 }
249 T_FINISH;
250 return retval;
251 }
252
253 kern_return_t
254 kernel_list_tests()
255 {
256 return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
257 }
258
259 kern_return_t
260 kernel_do_post()
261 {
262 return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
263 }
264
265 kern_return_t
266 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
267 {
268 if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL)
269 return KERN_RESOURCE_SHORTAGE;
270
271 xt_panic_widgets.xtp_context_p = context;
272 xt_panic_widgets.xtp_func = funcp;
273 xt_panic_widgets.xtp_func_name = funcname;
274 xt_panic_widgets.xtp_outval_p = outval;
275
276 return KERN_SUCCESS;
277 }
278
279 void
280 xnupost_reset_panic_widgets()
281 {
282 bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
283 }
284
285 kern_return_t
286 xnupost_process_kdb_stop(const char * panic_s)
287 {
288 xt_panic_return_t retval = 0;
289 struct xnupost_panic_widget * pw = &xt_panic_widgets;
290 const char * name = "unknown";
291 if (xt_panic_widgets.xtp_func_name) {
292 name = xt_panic_widgets.xtp_func_name;
293 }
294
295 /* bail early on if kernPOST is not set */
296 if (kernel_post_args == 0) {
297 return KERN_INVALID_CAPABILITY;
298 }
299
300 if (xt_panic_widgets.xtp_func) {
301 T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
302 retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
303 } else {
304 return KERN_INVALID_CAPABILITY;
305 }
306
307 switch (retval) {
308 case XT_RET_W_SUCCESS:
309 T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
310 /* KERN_SUCCESS means return from panic/assertion */
311 return KERN_SUCCESS;
312
313 case XT_RET_W_FAIL:
314 T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
315 return KERN_SUCCESS;
316
317 case XT_PANIC_W_FAIL:
318 T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
319 return KERN_FAILURE;
320
321 case XT_PANIC_W_SUCCESS:
322 T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
323 return KERN_FAILURE;
324
325 case XT_PANIC_UNRELATED:
326 default:
327 T_LOG("UNRELATED: Continuing to kdb_stop.");
328 return KERN_FAILURE;
329 }
330 }
331
332 xt_panic_return_t
333 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
334 {
335 xt_panic_return_t ret = XT_PANIC_UNRELATED;
336
337 if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
338 T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
339 ret = XT_RET_W_SUCCESS;
340 }
341
342 if (outval)
343 *outval = (void *)(uintptr_t)ret;
344 return ret;
345 }
346
347 kern_return_t
348 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
349 {
350 uint32_t i = 0;
351 xnupost_test_t testp;
352 for (; i < test_count; i++) {
353 testp = &test_list[i];
354 testp->xt_begin_time = 0;
355 testp->xt_end_time = 0;
356 testp->xt_test_actions = XT_ACTION_NONE;
357 testp->xt_retval = -1;
358 }
359 return KERN_SUCCESS;
360 }
361
362
363 kern_return_t
364 zalloc_test()
365 {
366 zone_t test_zone;
367 void * test_ptr;
368
369 T_SETUPBEGIN;
370 test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_uint64_zone");
371 T_ASSERT_NOTNULL(test_zone, NULL);
372
373 T_ASSERT_EQ_INT(zone_free_count(test_zone), 0, NULL);
374 T_SETUPEND;
375
376 T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
377
378 zfree(test_zone, test_ptr);
379
380 /* A sample report for perfdata */
381 T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
382
383 return KERN_SUCCESS;
384 }
385
386 /*
387 * Function used for comparison by qsort()
388 */
389 static int
390 compare_numbers_ascending(const void * a, const void * b)
391 {
392 const uint64_t x = *(const uint64_t *)a;
393 const uint64_t y = *(const uint64_t *)b;
394 if (x < y) {
395 return -1;
396 } else if (x > y) {
397 return 1;
398 } else {
399 return 0;
400 }
401 }
402
403 /*
404 * Function used for comparison by qsort()
405 */
406 static int
407 compare_numbers_descending(const void * a, const void * b)
408 {
409 const uint32_t x = *(const uint32_t *)a;
410 const uint32_t y = *(const uint32_t *)b;
411 if (x > y) {
412 return -1;
413 } else if (x < y) {
414 return 1;
415 } else {
416 return 0;
417 }
418 }
419
420 /* Node structure for the priority queue tests */
421 struct priority_queue_test_node {
422 struct priority_queue_entry link;
423 priority_queue_key_t node_key;
424 };
425
426 static void
427 priority_queue_test_queue(struct priority_queue *pq, int type,
428 priority_queue_compare_fn_t cmp_fn)
429 {
430 /* Configuration for the test */
431 #define PRIORITY_QUEUE_NODES 7
432 static uint32_t priority_list[] = { 20, 3, 7, 6, 50, 2, 8};
433 uint32_t increase_pri = 100;
434 uint32_t decrease_pri = 90;
435 struct priority_queue_test_node *result;
436 uint32_t key = 0;
437 boolean_t update_result = false;
438
439 struct priority_queue_test_node *node = NULL;
440 /* Add all priorities to the first priority queue */
441 for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) {
442 node = kalloc(sizeof(struct priority_queue_test_node));
443 T_ASSERT_NOTNULL(node, NULL);
444
445 priority_queue_entry_init(&(node->link));
446 node->node_key = priority_list[i];
447 key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? PRIORITY_QUEUE_KEY_NONE : priority_list[i];
448 priority_queue_insert(pq, &(node->link), key, cmp_fn);
449 }
450
451 T_ASSERT_NOTNULL(node, NULL);
452 key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? node->node_key : priority_queue_entry_key(pq, &(node->link));
453 T_ASSERT((key == node->node_key), "verify node stored key correctly");
454
455 /* Test the priority increase operation by updating the last node added (8) */
456 T_ASSERT_NOTNULL(node, NULL);
457 node->node_key = increase_pri;
458 key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? PRIORITY_QUEUE_KEY_NONE : node->node_key;
459 update_result = priority_queue_entry_increase(pq, &node->link, key, cmp_fn);
460 T_ASSERT((update_result == true), "increase key updated root");
461 result = priority_queue_max(pq, struct priority_queue_test_node, link);
462 T_ASSERT((result->node_key == increase_pri), "verify priority_queue_entry_increase() operation");
463
464
465 /* Test the priority decrease operation by updating the last node added */
466 T_ASSERT((result == node), NULL);
467 node->node_key = decrease_pri;
468 key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? PRIORITY_QUEUE_KEY_NONE : node->node_key;
469 update_result = priority_queue_entry_decrease(pq, &node->link, key, cmp_fn);
470 T_ASSERT((update_result == true), "decrease key updated root");
471 result = priority_queue_max(pq, struct priority_queue_test_node, link);
472 T_ASSERT((result->node_key == decrease_pri), "verify priority_queue_entry_decrease() operation");
473
474 /* Update our local priority list as well */
475 priority_list[PRIORITY_QUEUE_NODES - 1] = decrease_pri;
476
477 /* Sort the local list in descending order */
478 qsort(priority_list, PRIORITY_QUEUE_NODES, sizeof(priority_list[0]), compare_numbers_descending);
479
480 /* Test the maximum operation by comparing max node with local list */
481 result = priority_queue_max(pq, struct priority_queue_test_node, link);
482 T_ASSERT((result->node_key == priority_list[0]), "(heap (%u) == qsort (%u)) priority queue max node lookup",
483 (uint32_t)result->node_key, priority_list[0]);
484
485 /* Remove all remaining elements and verify they match local list */
486 for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) {
487 result = priority_queue_remove_max(pq, struct priority_queue_test_node, link, cmp_fn);
488 T_ASSERT((result->node_key == priority_list[i]), "(heap (%u) == qsort (%u)) priority queue max node removal",
489 (uint32_t)result->node_key, priority_list[i]);
490 }
491
492 priority_queue_destroy(pq, struct priority_queue_test_node, link, ^(void *n) {
493 kfree(n, sizeof(struct priority_queue_test_node));
494 });
495 }
496
497 kern_return_t
498 priority_queue_test(void)
499 {
500 /*
501 * Initialize two priority queues
502 * - One which uses the key comparator
503 * - Other which uses the node comparator
504 */
505 static struct priority_queue pq;
506 static struct priority_queue pq_nodes;
507
508 T_SETUPBEGIN;
509
510 priority_queue_init(&pq, PRIORITY_QUEUE_BUILTIN_KEY | PRIORITY_QUEUE_MAX_HEAP);
511 priority_queue_init(&pq_nodes, PRIORITY_QUEUE_GENERIC_KEY | PRIORITY_QUEUE_MAX_HEAP);
512
513 T_SETUPEND;
514
515 priority_queue_test_queue(&pq, PRIORITY_QUEUE_BUILTIN_KEY,
516 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
517
518 priority_queue_test_queue(&pq_nodes, PRIORITY_QUEUE_GENERIC_KEY,
519 priority_heap_make_comparator(a, b, struct priority_queue_test_node, link, {
520 return (a->node_key > b->node_key) ? 1 : ((a->node_key == b->node_key) ? 0 : -1);
521 }));
522
523 return KERN_SUCCESS;
524 }
525
526 /*
527 * Function to count number of bits that are set in a number.
528 * It uses Side Addition using Magic Binary Numbers
529 */
530 static int
531 count_bits(uint64_t number)
532 {
533 return __builtin_popcountll(number);
534 }
535
536 kern_return_t
537 RandomULong_test()
538 {
539 /*
540 * Randomness test for RandomULong()
541 *
542 * This test verifies that:
543 * a. RandomULong works
544 * b. The generated numbers match the following entropy criteria:
545 * For a thousand iterations, verify:
546 * 1. mean entropy > 12 bits
547 * 2. min entropy > 4 bits
548 * 3. No Duplicate
549 * 4. No incremental/decremental pattern in a window of 3
550 * 5. No Zero
551 * 6. No -1
552 *
553 * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
554 */
555
556 #define CONF_MIN_ENTROPY 4
557 #define CONF_MEAN_ENTROPY 12
558 #define CONF_ITERATIONS 1000
559 #define CONF_WINDOW_SIZE 3
560 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
561
562 int i;
563 uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
564 uint32_t aggregate_bit_entropy = 0;
565 uint32_t mean_bit_entropy = 0;
566 uint64_t numbers[CONF_ITERATIONS];
567 min_bit_entropy = UINT32_MAX;
568 max_bit_entropy = 0;
569
570 /*
571 * TEST 1: Number generation and basic and basic validation
572 * Check for non-zero (no bits set), -1 (all bits set) and error
573 */
574 for (i = 0; i < CONF_ITERATIONS; i++) {
575 read_random(&numbers[i], sizeof(numbers[i]));
576 if (numbers[i] == 0) {
577 T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
578 }
579 if (numbers[i] == UINT64_MAX) {
580 T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
581 }
582 }
583 T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
584
585 /*
586 * TEST 2: Mean and Min Bit Entropy
587 * Check the bit entropy and its mean over the generated numbers.
588 */
589 for (i = 1; i < CONF_ITERATIONS; i++) {
590 bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
591 if (bit_entropy < min_bit_entropy)
592 min_bit_entropy = bit_entropy;
593 if (bit_entropy > max_bit_entropy)
594 max_bit_entropy = bit_entropy;
595
596 if (bit_entropy < CONF_MIN_ENTROPY) {
597 T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
598 "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
599 }
600
601 aggregate_bit_entropy += bit_entropy;
602 }
603 T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
604
605 mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
606 T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
607 T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
608 T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
609 min_bit_entropy, mean_bit_entropy, max_bit_entropy);
610 T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
611 T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
612 T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
613
614 /*
615 * TEST 3: Incremental Pattern Search
616 * Check that incremental/decremental pattern does not exist in the given window
617 */
618 int window_start, window_end, trend;
619 window_start = window_end = trend = 0;
620
621 do {
622 /*
623 * Set the window
624 */
625 window_end = window_start + CONF_WINDOW_SIZE - 1;
626 if (window_end >= CONF_ITERATIONS)
627 window_end = CONF_ITERATIONS - 1;
628
629 trend = 0;
630 for (i = window_start; i < window_end; i++) {
631 if (numbers[i] < numbers[i + 1])
632 trend++;
633 else if (numbers[i] > numbers[i + 1])
634 trend--;
635 }
636 /*
637 * Check that there is no increasing or decreasing trend
638 * i.e. trend <= ceil(window_size/2)
639 */
640 if (trend < 0) {
641 trend = -trend;
642 }
643 if (trend > CONF_WINDOW_TREND_LIMIT) {
644 T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
645 }
646
647 /*
648 * Move to the next window
649 */
650 window_start++;
651
652 } while (window_start < (CONF_ITERATIONS - 1));
653 T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
654
655 /*
656 * TEST 4: Find Duplicates
657 * Check no duplicate values are generated
658 */
659 qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
660 for (i = 1; i < CONF_ITERATIONS; i++) {
661 if (numbers[i] == numbers[i - 1]) {
662 T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
663 }
664 }
665 T_PASS("Test did not find any duplicates as expected.");
666
667 return KERN_SUCCESS;
668 }
669
670
671 /* KCDATA kernel api tests */
672 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
673 struct sample_disk_io_stats {
674 uint64_t disk_reads_count;
675 uint64_t disk_reads_size;
676 uint64_t io_priority_count[4];
677 uint64_t io_priority_size;
678 } __attribute__((packed));
679
680 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
681 {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 0 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_count"},
682 {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 1 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_size"},
683 {KCS_SUBTYPE_FLAGS_ARRAY, KC_ST_UINT64, 2 * sizeof(uint64_t), KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)), "io_priority_count"},
684 {KCS_SUBTYPE_FLAGS_ARRAY, KC_ST_UINT64, (2 + 4) * sizeof(uint64_t), sizeof(uint64_t), "io_priority_size"},
685 };
686
687 kern_return_t
688 kcdata_api_test()
689 {
690 kern_return_t retval = KERN_SUCCESS;
691
692 /* test for NULL input */
693 retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
694 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
695
696 /* another negative test with buffer size < 32 bytes */
697 char data[30] = "sample_disk_io_stats";
698 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
699 KCFLAG_USE_MEMCOPY);
700 T_ASSERT(retval == KERN_RESOURCE_SHORTAGE, "init with 30 bytes failed as expected with KERN_RESOURCE_SHORTAGE");
701
702 /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
703 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
704 KCFLAG_USE_COPYOUT);
705 T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
706
707 /* test with successful kcdata_memory_static_init */
708 test_kc_data.kcd_length = 0xdeadbeef;
709 mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
710 T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
711
712 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
713 KCFLAG_USE_MEMCOPY);
714
715 T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
716
717 T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
718 T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
719 T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
720
721 /* verify we have BEGIN and END HEADERS set */
722 uint32_t * mem = (uint32_t *)address;
723 T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
724 T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
725 T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
726
727 /* verify kcdata_memory_get_used_bytes() */
728 uint64_t bytes_used = 0;
729 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
730 T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
731
732 /* test for kcdata_get_memory_addr() */
733
734 mach_vm_address_t user_addr = 0;
735 /* negative test for NULL user_addr AND/OR kcdata_descriptor */
736 retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
737 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
738
739 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
740 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
741
742 /* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
743 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
744 T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
745 T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
746
747 /* successful case with valid size. */
748 user_addr = 0xdeadbeef;
749 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
750 T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
751 T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
752 T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
753
754 /* Try creating an item with really large size */
755 user_addr = 0xdeadbeef;
756 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
757 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
758 T_ASSERT(retval == KERN_RESOURCE_SHORTAGE, "Allocating entry with size > buffer -> KERN_RESOURCE_SHORTAGE");
759 T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
760 T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
761
762 /* verify convenience functions for uint32_with_description */
763 retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
764 T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
765
766 retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
767 T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
768
769 /* verify creating an KCDATA_TYPE_ARRAY here */
770 user_addr = 0xdeadbeef;
771 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
772 /* save memory address where the array will come up */
773 struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
774
775 retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
776 T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
777 T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
778 T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
779 kcdata_iter_t iter = kcdata_iter(item_p, PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data));
780 T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
781
782 /* FIXME add tests here for ranges of sizes and counts */
783
784 T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
785
786 /* test adding of custom type */
787
788 retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
789 sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
790 T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
791
792 return KERN_SUCCESS;
793 }
794
795 /*
796 kern_return_t
797 kcdata_api_assert_tests()
798 {
799 kern_return_t retval = 0;
800 void * assert_check_retval = NULL;
801 test_kc_data2.kcd_length = 0xdeadbeef;
802 mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
803 T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
804
805 retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
806 KCFLAG_USE_MEMCOPY);
807
808 T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
809
810 retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
811 T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
812
813 // this will assert
814 retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
815 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
816 T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
817
818 return KERN_SUCCESS;
819 }
820 */
821
822 #if defined(__arm__) || defined(__arm64__)
823
824 #include <arm/pmap.h>
825
826 #define MAX_PMAP_OBJECT_ELEMENT 100000
827
828 extern struct vm_object pmap_object_store; /* store pt pages */
829 extern unsigned long gPhysBase, gPhysSize, first_avail;
830
831 /*
832 * Define macros to transverse the pmap object structures and extract
833 * physical page number with information from low global only
834 * This emulate how Astris extracts information from coredump
835 */
836 #if defined(__arm64__)
837
838 static inline uintptr_t
839 astris_vm_page_unpack_ptr(uintptr_t p)
840 {
841 if (!p)
842 return ((uintptr_t)0);
843
844 return (p & lowGlo.lgPmapMemFromArrayMask)
845 ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
846 : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
847 }
848
849 // assume next pointer is the first element
850 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
851
852 #endif
853
854 #if defined(__arm__)
855
856 // assume next pointer is the first element
857 #define astris_vm_page_queue_next(qc) *((uintptr_t *)(qc))
858
859 #endif
860
861 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
862
863 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
864
865 #define astris_vm_page_queue_iterate(head, elt) \
866 for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
867 (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
868
869 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
870
871 static inline ppnum_t
872 astris_vm_page_get_phys_page(uintptr_t m)
873 {
874 return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
875 ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
876 : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
877 }
878
879 kern_return_t
880 pmap_coredump_test(void)
881 {
882 int iter = 0;
883 uintptr_t p;
884
885 T_LOG("Testing coredump info for PMAP.");
886
887 T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
888 T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
889 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
890 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMinorVersion, 0, NULL);
891 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
892
893 // check the constant values in lowGlo
894 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((uint64_t) & (pmap_object_store.memq)), NULL);
895 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
896 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
897 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
898
899 #if defined(__arm64__)
900 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PACKED_FROM_VM_PAGES_ARRAY, NULL);
901 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PACKED_POINTER_SHIFT, NULL);
902 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_MIN_KERNEL_AND_KEXT_ADDRESS, NULL);
903 #endif
904
905 vm_object_lock_shared(&pmap_object_store);
906 astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
907 {
908 ppnum_t ppnum = astris_vm_page_get_phys_page(p);
909 pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
910 T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
911 T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
912 iter++;
913 T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
914 }
915 vm_object_unlock(&pmap_object_store);
916
917 T_ASSERT_GT_INT(iter, 0, NULL);
918 return KERN_SUCCESS;
919 }
920 #endif