]> git.saurik.com Git - apple/xnu.git/blob - osfmk/tests/kernel_tests.c
01669bf9ca2ec6e6574a2083fde09d1f7e5add6d
[apple/xnu.git] / osfmk / tests / kernel_tests.c
1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object.h>
47 #include <kern/priority_queue.h>
48
49 #if !(DEVELOPMENT || DEBUG)
50 #error "Testing is not enabled on RELEASE configurations"
51 #endif
52
53 #include <tests/xnupost.h>
54
55 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
56 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
57
58 uint32_t total_post_tests_count = 0;
59 void xnupost_reset_panic_widgets(void);
60
61 /* test declarations */
62 kern_return_t zalloc_test(void);
63 kern_return_t RandomULong_test(void);
64 kern_return_t kcdata_api_test(void);
65 kern_return_t priority_queue_test(void);
66
67 #if defined(__arm__) || defined(__arm64__)
68 kern_return_t pmap_coredump_test(void);
69 #endif
70
71 extern kern_return_t console_serial_test(void);
72 extern kern_return_t console_serial_alloc_rel_tests(void);
73 extern kern_return_t console_serial_parallel_log_tests(void);
74 extern kern_return_t test_os_log(void);
75 extern kern_return_t test_os_log_parallel(void);
76 extern kern_return_t bitmap_post_test(void);
77
78 #ifdef __arm64__
79 extern kern_return_t arm64_munger_test(void);
80 extern kern_return_t ex_cb_test(void);
81 #if __ARM_PAN_AVAILABLE__
82 extern kern_return_t arm64_pan_test(void);
83 #endif
84 #endif /* __arm64__ */
85
86 extern kern_return_t test_thread_call(void);
87
88
89 struct xnupost_panic_widget xt_panic_widgets = {NULL, NULL, NULL, NULL};
90
91 struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
92 XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
93 XNUPOST_TEST_CONFIG_BASIC(test_os_log),
94 XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
95 #ifdef __arm64__
96 XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
97 XNUPOST_TEST_CONFIG_BASIC(ex_cb_test),
98 #if __ARM_PAN_AVAILABLE__
99 XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
100 #endif
101 #endif /* __arm64__ */
102 XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
103 XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
104 XNUPOST_TEST_CONFIG_BASIC(console_serial_alloc_rel_tests),
105 XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
106 #if defined(__arm__) || defined(__arm64__)
107 XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
108 #endif
109 XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
110 //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
111 XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
112 XNUPOST_TEST_CONFIG_BASIC(priority_queue_test), };
113
114 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
115
116 #define POSTARGS_RUN_TESTS 0x1
117 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
118 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
119 uint64_t kernel_post_args = 0x0;
120
121 /* static variables to hold state */
122 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
123 static char kernel_post_test_configs[256];
124 boolean_t xnupost_should_run_test(uint32_t test_num);
125
126 kern_return_t
127 xnupost_parse_config()
128 {
129 if (parse_config_retval != KERN_INVALID_CAPABILITY) {
130 return parse_config_retval;
131 }
132 PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
133
134 if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
135 kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
136 }
137
138 if (kernel_post_args != 0) {
139 parse_config_retval = KERN_SUCCESS;
140 goto out;
141 }
142 parse_config_retval = KERN_NOT_SUPPORTED;
143 out:
144 return parse_config_retval;
145 }
146
147 boolean_t
148 xnupost_should_run_test(uint32_t test_num)
149 {
150 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
151 int64_t begin = 0, end = 999999;
152 char * b = kernel_post_test_configs;
153 while (*b) {
154 get_range_bounds(b, &begin, &end);
155 if (test_num >= begin && test_num <= end) {
156 return TRUE;
157 }
158
159 /* skip to the next "," */
160 while (*b != ',') {
161 if (*b == '\0') {
162 return FALSE;
163 }
164 b++;
165 }
166 /* skip past the ',' */
167 b++;
168 }
169 return FALSE;
170 }
171 return TRUE;
172 }
173
174 kern_return_t
175 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
176 {
177 if (KERN_SUCCESS != xnupost_parse_config()) {
178 return KERN_FAILURE;
179 }
180
181 xnupost_test_t testp;
182 for (uint32_t i = 0; i < test_count; i++) {
183 testp = &test_list[i];
184 if (testp->xt_test_num == 0) {
185 testp->xt_test_num = ++total_post_tests_count;
186 }
187 /* make sure the boot-arg based test run list is honored */
188 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
189 testp->xt_config |= XT_CONFIG_IGNORE;
190 if (xnupost_should_run_test(testp->xt_test_num)) {
191 testp->xt_config &= ~(XT_CONFIG_IGNORE);
192 testp->xt_config |= XT_CONFIG_RUN;
193 printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
194 }
195 }
196 printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
197 testp->xt_config);
198 }
199
200 return KERN_SUCCESS;
201 }
202
203 kern_return_t
204 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
205 {
206 uint32_t i = 0;
207 int retval = KERN_SUCCESS;
208
209 if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
210 printf("No POST boot-arg set.\n");
211 return retval;
212 }
213
214 T_START;
215 xnupost_test_t testp;
216 for (; i < test_count; i++) {
217 xnupost_reset_panic_widgets();
218 testp = &test_list[i];
219 T_BEGIN(testp->xt_name);
220 testp->xt_begin_time = mach_absolute_time();
221 testp->xt_end_time = testp->xt_begin_time;
222
223 /*
224 * If test is designed to panic and controller
225 * is not available then mark as SKIPPED
226 */
227 if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
228 T_SKIP(
229 "Test expects panic but "
230 "no controller is present");
231 testp->xt_test_actions = XT_ACTION_SKIPPED;
232 continue;
233 }
234
235 if ((testp->xt_config & XT_CONFIG_IGNORE)) {
236 T_SKIP("Test is marked as XT_CONFIG_IGNORE");
237 testp->xt_test_actions = XT_ACTION_SKIPPED;
238 continue;
239 }
240
241 testp->xt_func();
242 T_END;
243 testp->xt_retval = T_TESTRESULT;
244 testp->xt_end_time = mach_absolute_time();
245 if (testp->xt_retval == testp->xt_expected_retval) {
246 testp->xt_test_actions = XT_ACTION_PASSED;
247 } else {
248 testp->xt_test_actions = XT_ACTION_FAILED;
249 }
250 }
251 T_FINISH;
252 return retval;
253 }
254
255 kern_return_t
256 kernel_list_tests()
257 {
258 return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
259 }
260
261 kern_return_t
262 kernel_do_post()
263 {
264 return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
265 }
266
267 kern_return_t
268 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
269 {
270 if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
271 return KERN_RESOURCE_SHORTAGE;
272 }
273
274 xt_panic_widgets.xtp_context_p = context;
275 xt_panic_widgets.xtp_func = funcp;
276 xt_panic_widgets.xtp_func_name = funcname;
277 xt_panic_widgets.xtp_outval_p = outval;
278
279 return KERN_SUCCESS;
280 }
281
282 void
283 xnupost_reset_panic_widgets()
284 {
285 bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
286 }
287
288 kern_return_t
289 xnupost_process_kdb_stop(const char * panic_s)
290 {
291 xt_panic_return_t retval = 0;
292 struct xnupost_panic_widget * pw = &xt_panic_widgets;
293 const char * name = "unknown";
294 if (xt_panic_widgets.xtp_func_name) {
295 name = xt_panic_widgets.xtp_func_name;
296 }
297
298 /* bail early on if kernPOST is not set */
299 if (kernel_post_args == 0) {
300 return KERN_INVALID_CAPABILITY;
301 }
302
303 if (xt_panic_widgets.xtp_func) {
304 T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
305 retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
306 } else {
307 return KERN_INVALID_CAPABILITY;
308 }
309
310 switch (retval) {
311 case XT_RET_W_SUCCESS:
312 T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
313 /* KERN_SUCCESS means return from panic/assertion */
314 return KERN_SUCCESS;
315
316 case XT_RET_W_FAIL:
317 T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
318 return KERN_SUCCESS;
319
320 case XT_PANIC_W_FAIL:
321 T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
322 return KERN_FAILURE;
323
324 case XT_PANIC_W_SUCCESS:
325 T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
326 return KERN_FAILURE;
327
328 case XT_PANIC_UNRELATED:
329 default:
330 T_LOG("UNRELATED: Continuing to kdb_stop.");
331 return KERN_FAILURE;
332 }
333 }
334
335 xt_panic_return_t
336 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
337 {
338 xt_panic_return_t ret = XT_PANIC_UNRELATED;
339
340 if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
341 T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
342 ret = XT_RET_W_SUCCESS;
343 }
344
345 if (outval) {
346 *outval = (void *)(uintptr_t)ret;
347 }
348 return ret;
349 }
350
351 kern_return_t
352 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
353 {
354 uint32_t i = 0;
355 xnupost_test_t testp;
356 for (; i < test_count; i++) {
357 testp = &test_list[i];
358 testp->xt_begin_time = 0;
359 testp->xt_end_time = 0;
360 testp->xt_test_actions = XT_ACTION_NONE;
361 testp->xt_retval = -1;
362 }
363 return KERN_SUCCESS;
364 }
365
366
367 kern_return_t
368 zalloc_test()
369 {
370 zone_t test_zone;
371 void * test_ptr;
372
373 T_SETUPBEGIN;
374 test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_uint64_zone");
375 T_ASSERT_NOTNULL(test_zone, NULL);
376
377 T_ASSERT_EQ_INT(zone_free_count(test_zone), 0, NULL);
378 T_SETUPEND;
379
380 T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
381
382 zfree(test_zone, test_ptr);
383
384 /* A sample report for perfdata */
385 T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
386
387 return KERN_SUCCESS;
388 }
389
390 /*
391 * Function used for comparison by qsort()
392 */
393 static int
394 compare_numbers_ascending(const void * a, const void * b)
395 {
396 const uint64_t x = *(const uint64_t *)a;
397 const uint64_t y = *(const uint64_t *)b;
398 if (x < y) {
399 return -1;
400 } else if (x > y) {
401 return 1;
402 } else {
403 return 0;
404 }
405 }
406
407 /*
408 * Function used for comparison by qsort()
409 */
410 static int
411 compare_numbers_descending(const void * a, const void * b)
412 {
413 const uint32_t x = *(const uint32_t *)a;
414 const uint32_t y = *(const uint32_t *)b;
415 if (x > y) {
416 return -1;
417 } else if (x < y) {
418 return 1;
419 } else {
420 return 0;
421 }
422 }
423
424 /* Node structure for the priority queue tests */
425 struct priority_queue_test_node {
426 struct priority_queue_entry link;
427 priority_queue_key_t node_key;
428 };
429
430 static void
431 priority_queue_test_queue(struct priority_queue *pq, int type,
432 priority_queue_compare_fn_t cmp_fn)
433 {
434 /* Configuration for the test */
435 #define PRIORITY_QUEUE_NODES 7
436 static uint32_t priority_list[] = { 20, 3, 7, 6, 50, 2, 8};
437 uint32_t increase_pri = 100;
438 uint32_t decrease_pri = 90;
439 struct priority_queue_test_node *result;
440 uint32_t key = 0;
441 boolean_t update_result = false;
442
443 struct priority_queue_test_node *node = NULL;
444 /* Add all priorities to the first priority queue */
445 for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) {
446 node = kalloc(sizeof(struct priority_queue_test_node));
447 T_ASSERT_NOTNULL(node, NULL);
448
449 priority_queue_entry_init(&(node->link));
450 node->node_key = priority_list[i];
451 key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? PRIORITY_QUEUE_KEY_NONE : priority_list[i];
452 priority_queue_insert(pq, &(node->link), key, cmp_fn);
453 }
454
455 T_ASSERT_NOTNULL(node, NULL);
456 key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? node->node_key : priority_queue_entry_key(pq, &(node->link));
457 T_ASSERT((key == node->node_key), "verify node stored key correctly");
458
459 /* Test the priority increase operation by updating the last node added (8) */
460 T_ASSERT_NOTNULL(node, NULL);
461 node->node_key = increase_pri;
462 key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? PRIORITY_QUEUE_KEY_NONE : node->node_key;
463 update_result = priority_queue_entry_increase(pq, &node->link, key, cmp_fn);
464 T_ASSERT((update_result == true), "increase key updated root");
465 result = priority_queue_max(pq, struct priority_queue_test_node, link);
466 T_ASSERT((result->node_key == increase_pri), "verify priority_queue_entry_increase() operation");
467
468
469 /* Test the priority decrease operation by updating the last node added */
470 T_ASSERT((result == node), NULL);
471 node->node_key = decrease_pri;
472 key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? PRIORITY_QUEUE_KEY_NONE : node->node_key;
473 update_result = priority_queue_entry_decrease(pq, &node->link, key, cmp_fn);
474 T_ASSERT((update_result == true), "decrease key updated root");
475 result = priority_queue_max(pq, struct priority_queue_test_node, link);
476 T_ASSERT((result->node_key == decrease_pri), "verify priority_queue_entry_decrease() operation");
477
478 /* Update our local priority list as well */
479 priority_list[PRIORITY_QUEUE_NODES - 1] = decrease_pri;
480
481 /* Sort the local list in descending order */
482 qsort(priority_list, PRIORITY_QUEUE_NODES, sizeof(priority_list[0]), compare_numbers_descending);
483
484 /* Test the maximum operation by comparing max node with local list */
485 result = priority_queue_max(pq, struct priority_queue_test_node, link);
486 T_ASSERT((result->node_key == priority_list[0]), "(heap (%u) == qsort (%u)) priority queue max node lookup",
487 (uint32_t)result->node_key, priority_list[0]);
488
489 /* Remove all remaining elements and verify they match local list */
490 for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) {
491 result = priority_queue_remove_max(pq, struct priority_queue_test_node, link, cmp_fn);
492 T_ASSERT((result->node_key == priority_list[i]), "(heap (%u) == qsort (%u)) priority queue max node removal",
493 (uint32_t)result->node_key, priority_list[i]);
494 }
495
496 priority_queue_destroy(pq, struct priority_queue_test_node, link, ^(void *n) {
497 kfree(n, sizeof(struct priority_queue_test_node));
498 });
499 }
500
501 kern_return_t
502 priority_queue_test(void)
503 {
504 /*
505 * Initialize two priority queues
506 * - One which uses the key comparator
507 * - Other which uses the node comparator
508 */
509 static struct priority_queue pq;
510 static struct priority_queue pq_nodes;
511
512 T_SETUPBEGIN;
513
514 priority_queue_init(&pq, PRIORITY_QUEUE_BUILTIN_KEY | PRIORITY_QUEUE_MAX_HEAP);
515 priority_queue_init(&pq_nodes, PRIORITY_QUEUE_GENERIC_KEY | PRIORITY_QUEUE_MAX_HEAP);
516
517 T_SETUPEND;
518
519 priority_queue_test_queue(&pq, PRIORITY_QUEUE_BUILTIN_KEY,
520 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
521
522 priority_queue_test_queue(&pq_nodes, PRIORITY_QUEUE_GENERIC_KEY,
523 priority_heap_make_comparator(a, b, struct priority_queue_test_node, link, {
524 return (a->node_key > b->node_key) ? 1 : ((a->node_key == b->node_key) ? 0 : -1);
525 }));
526
527 return KERN_SUCCESS;
528 }
529
530 /*
531 * Function to count number of bits that are set in a number.
532 * It uses Side Addition using Magic Binary Numbers
533 */
534 static int
535 count_bits(uint64_t number)
536 {
537 return __builtin_popcountll(number);
538 }
539
540 kern_return_t
541 RandomULong_test()
542 {
543 /*
544 * Randomness test for RandomULong()
545 *
546 * This test verifies that:
547 * a. RandomULong works
548 * b. The generated numbers match the following entropy criteria:
549 * For a thousand iterations, verify:
550 * 1. mean entropy > 12 bits
551 * 2. min entropy > 4 bits
552 * 3. No Duplicate
553 * 4. No incremental/decremental pattern in a window of 3
554 * 5. No Zero
555 * 6. No -1
556 *
557 * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
558 */
559
560 #define CONF_MIN_ENTROPY 4
561 #define CONF_MEAN_ENTROPY 12
562 #define CONF_ITERATIONS 1000
563 #define CONF_WINDOW_SIZE 3
564 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
565
566 int i;
567 uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
568 uint32_t aggregate_bit_entropy = 0;
569 uint32_t mean_bit_entropy = 0;
570 uint64_t numbers[CONF_ITERATIONS];
571 min_bit_entropy = UINT32_MAX;
572 max_bit_entropy = 0;
573
574 /*
575 * TEST 1: Number generation and basic and basic validation
576 * Check for non-zero (no bits set), -1 (all bits set) and error
577 */
578 for (i = 0; i < CONF_ITERATIONS; i++) {
579 read_random(&numbers[i], sizeof(numbers[i]));
580 if (numbers[i] == 0) {
581 T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
582 }
583 if (numbers[i] == UINT64_MAX) {
584 T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
585 }
586 }
587 T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
588
589 /*
590 * TEST 2: Mean and Min Bit Entropy
591 * Check the bit entropy and its mean over the generated numbers.
592 */
593 for (i = 1; i < CONF_ITERATIONS; i++) {
594 bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
595 if (bit_entropy < min_bit_entropy) {
596 min_bit_entropy = bit_entropy;
597 }
598 if (bit_entropy > max_bit_entropy) {
599 max_bit_entropy = bit_entropy;
600 }
601
602 if (bit_entropy < CONF_MIN_ENTROPY) {
603 T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
604 "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
605 }
606
607 aggregate_bit_entropy += bit_entropy;
608 }
609 T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
610
611 mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
612 T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
613 T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
614 T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
615 min_bit_entropy, mean_bit_entropy, max_bit_entropy);
616 T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
617 T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
618 T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
619
620 /*
621 * TEST 3: Incremental Pattern Search
622 * Check that incremental/decremental pattern does not exist in the given window
623 */
624 int window_start, window_end, trend;
625 window_start = window_end = trend = 0;
626
627 do {
628 /*
629 * Set the window
630 */
631 window_end = window_start + CONF_WINDOW_SIZE - 1;
632 if (window_end >= CONF_ITERATIONS) {
633 window_end = CONF_ITERATIONS - 1;
634 }
635
636 trend = 0;
637 for (i = window_start; i < window_end; i++) {
638 if (numbers[i] < numbers[i + 1]) {
639 trend++;
640 } else if (numbers[i] > numbers[i + 1]) {
641 trend--;
642 }
643 }
644 /*
645 * Check that there is no increasing or decreasing trend
646 * i.e. trend <= ceil(window_size/2)
647 */
648 if (trend < 0) {
649 trend = -trend;
650 }
651 if (trend > CONF_WINDOW_TREND_LIMIT) {
652 T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
653 }
654
655 /*
656 * Move to the next window
657 */
658 window_start++;
659 } while (window_start < (CONF_ITERATIONS - 1));
660 T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
661
662 /*
663 * TEST 4: Find Duplicates
664 * Check no duplicate values are generated
665 */
666 qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
667 for (i = 1; i < CONF_ITERATIONS; i++) {
668 if (numbers[i] == numbers[i - 1]) {
669 T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
670 }
671 }
672 T_PASS("Test did not find any duplicates as expected.");
673
674 return KERN_SUCCESS;
675 }
676
677
678 /* KCDATA kernel api tests */
679 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
680 struct sample_disk_io_stats {
681 uint64_t disk_reads_count;
682 uint64_t disk_reads_size;
683 uint64_t io_priority_count[4];
684 uint64_t io_priority_size;
685 } __attribute__((packed));
686
687 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
688 {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 0 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_count"},
689 {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 1 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_size"},
690 {KCS_SUBTYPE_FLAGS_ARRAY, KC_ST_UINT64, 2 * sizeof(uint64_t), KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)), "io_priority_count"},
691 {KCS_SUBTYPE_FLAGS_ARRAY, KC_ST_UINT64, (2 + 4) * sizeof(uint64_t), sizeof(uint64_t), "io_priority_size"},
692 };
693
694 kern_return_t
695 kcdata_api_test()
696 {
697 kern_return_t retval = KERN_SUCCESS;
698
699 /* test for NULL input */
700 retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
701 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
702
703 /* another negative test with buffer size < 32 bytes */
704 char data[30] = "sample_disk_io_stats";
705 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
706 KCFLAG_USE_MEMCOPY);
707 T_ASSERT(retval == KERN_RESOURCE_SHORTAGE, "init with 30 bytes failed as expected with KERN_RESOURCE_SHORTAGE");
708
709 /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
710 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
711 KCFLAG_USE_COPYOUT);
712 T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
713
714 /* test with successful kcdata_memory_static_init */
715 test_kc_data.kcd_length = 0xdeadbeef;
716 mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
717 T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
718
719 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
720 KCFLAG_USE_MEMCOPY);
721
722 T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
723
724 T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
725 T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
726 T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
727
728 /* verify we have BEGIN and END HEADERS set */
729 uint32_t * mem = (uint32_t *)address;
730 T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
731 T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
732 T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
733
734 /* verify kcdata_memory_get_used_bytes() */
735 uint64_t bytes_used = 0;
736 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
737 T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
738
739 /* test for kcdata_get_memory_addr() */
740
741 mach_vm_address_t user_addr = 0;
742 /* negative test for NULL user_addr AND/OR kcdata_descriptor */
743 retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
744 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
745
746 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
747 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
748
749 /* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
750 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
751 T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
752 T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
753
754 /* successful case with valid size. */
755 user_addr = 0xdeadbeef;
756 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
757 T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
758 T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
759 T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
760
761 /* Try creating an item with really large size */
762 user_addr = 0xdeadbeef;
763 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
764 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
765 T_ASSERT(retval == KERN_RESOURCE_SHORTAGE, "Allocating entry with size > buffer -> KERN_RESOURCE_SHORTAGE");
766 T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
767 T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
768
769 /* verify convenience functions for uint32_with_description */
770 retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
771 T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
772
773 retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
774 T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
775
776 /* verify creating an KCDATA_TYPE_ARRAY here */
777 user_addr = 0xdeadbeef;
778 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
779 /* save memory address where the array will come up */
780 struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
781
782 retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
783 T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
784 T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
785 T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
786 kcdata_iter_t iter = kcdata_iter(item_p, PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data));
787 T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
788
789 /* FIXME add tests here for ranges of sizes and counts */
790
791 T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
792
793 /* test adding of custom type */
794
795 retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
796 sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
797 T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
798
799 return KERN_SUCCESS;
800 }
801
802 /*
803 * kern_return_t
804 * kcdata_api_assert_tests()
805 * {
806 * kern_return_t retval = 0;
807 * void * assert_check_retval = NULL;
808 * test_kc_data2.kcd_length = 0xdeadbeef;
809 * mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
810 * T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
811 *
812 * retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
813 * KCFLAG_USE_MEMCOPY);
814 *
815 * T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
816 *
817 * retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
818 * T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
819 *
820 * // this will assert
821 * retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
822 * T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
823 * T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
824 *
825 * return KERN_SUCCESS;
826 * }
827 */
828
829 #if defined(__arm__) || defined(__arm64__)
830
831 #include <arm/pmap.h>
832
833 #define MAX_PMAP_OBJECT_ELEMENT 100000
834
835 extern struct vm_object pmap_object_store; /* store pt pages */
836 extern unsigned long gPhysBase, gPhysSize, first_avail;
837
838 /*
839 * Define macros to transverse the pmap object structures and extract
840 * physical page number with information from low global only
841 * This emulate how Astris extracts information from coredump
842 */
843 #if defined(__arm64__)
844
845 static inline uintptr_t
846 astris_vm_page_unpack_ptr(uintptr_t p)
847 {
848 if (!p) {
849 return (uintptr_t)0;
850 }
851
852 return (p & lowGlo.lgPmapMemFromArrayMask)
853 ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
854 : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
855 }
856
857 // assume next pointer is the first element
858 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
859
860 #endif
861
862 #if defined(__arm__)
863
864 // assume next pointer is the first element
865 #define astris_vm_page_queue_next(qc) *((uintptr_t *)(qc))
866
867 #endif
868
869 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
870
871 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
872
873 #define astris_vm_page_queue_iterate(head, elt) \
874 for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
875 (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
876
877 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
878
879 static inline ppnum_t
880 astris_vm_page_get_phys_page(uintptr_t m)
881 {
882 return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
883 ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
884 : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
885 }
886
887 kern_return_t
888 pmap_coredump_test(void)
889 {
890 int iter = 0;
891 uintptr_t p;
892
893 T_LOG("Testing coredump info for PMAP.");
894
895 T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
896 T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
897 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
898 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMinorVersion, 0, NULL);
899 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
900
901 // check the constant values in lowGlo
902 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((uint64_t) &(pmap_object_store.memq)), NULL);
903 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
904 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
905 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
906
907 #if defined(__arm64__)
908 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PACKED_FROM_VM_PAGES_ARRAY, NULL);
909 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PACKED_POINTER_SHIFT, NULL);
910 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_MIN_KERNEL_AND_KEXT_ADDRESS, NULL);
911 #endif
912
913 vm_object_lock_shared(&pmap_object_store);
914 astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
915 {
916 ppnum_t ppnum = astris_vm_page_get_phys_page(p);
917 pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
918 T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
919 T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
920 iter++;
921 T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
922 }
923 vm_object_unlock(&pmap_object_store);
924
925 T_ASSERT_GT_INT(iter, 0, NULL);
926 return KERN_SUCCESS;
927 }
928 #endif