2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object.h>
47 #include <kern/priority_queue.h>
49 #if !(DEVELOPMENT || DEBUG)
50 #error "Testing is not enabled on RELEASE configurations"
53 #include <tests/xnupost.h>
55 extern boolean_t
get_range_bounds(char * c
, int64_t * lower
, int64_t * upper
);
56 __private_extern__
void qsort(void * a
, size_t n
, size_t es
, int (*cmp
)(const void *, const void *));
58 uint32_t total_post_tests_count
= 0;
59 void xnupost_reset_panic_widgets(void);
61 /* test declarations */
62 kern_return_t
zalloc_test(void);
63 kern_return_t
RandomULong_test(void);
64 kern_return_t
kcdata_api_test(void);
65 kern_return_t
priority_queue_test(void);
67 #if defined(__arm__) || defined(__arm64__)
68 kern_return_t
pmap_coredump_test(void);
71 extern kern_return_t
console_serial_test(void);
72 extern kern_return_t
console_serial_alloc_rel_tests(void);
73 extern kern_return_t
console_serial_parallel_log_tests(void);
74 extern kern_return_t
test_os_log(void);
75 extern kern_return_t
test_os_log_parallel(void);
76 extern kern_return_t
bitmap_post_test(void);
79 extern kern_return_t
arm64_munger_test(void);
80 extern kern_return_t
ex_cb_test(void);
81 #if __ARM_PAN_AVAILABLE__
82 extern kern_return_t
arm64_pan_test(void);
84 #endif /* __arm64__ */
86 extern kern_return_t
test_thread_call(void);
89 struct xnupost_panic_widget xt_panic_widgets
= {NULL
, NULL
, NULL
, NULL
};
91 struct xnupost_test kernel_post_tests
[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test
),
92 XNUPOST_TEST_CONFIG_BASIC(RandomULong_test
),
93 XNUPOST_TEST_CONFIG_BASIC(test_os_log
),
94 XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel
),
96 XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test
),
97 XNUPOST_TEST_CONFIG_BASIC(ex_cb_test
),
98 #if __ARM_PAN_AVAILABLE__
99 XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test
),
101 #endif /* __arm64__ */
102 XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test
),
103 XNUPOST_TEST_CONFIG_BASIC(console_serial_test
),
104 XNUPOST_TEST_CONFIG_BASIC(console_serial_alloc_rel_tests
),
105 XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests
),
106 #if defined(__arm__) || defined(__arm64__)
107 XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test
),
109 XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test
),
110 //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
111 XNUPOST_TEST_CONFIG_BASIC(test_thread_call
),
112 XNUPOST_TEST_CONFIG_BASIC(priority_queue_test
), };
114 uint32_t kernel_post_tests_count
= sizeof(kernel_post_tests
) / sizeof(xnupost_test_data_t
);
116 #define POSTARGS_RUN_TESTS 0x1
117 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
118 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
119 uint64_t kernel_post_args
= 0x0;
121 /* static variables to hold state */
122 static kern_return_t parse_config_retval
= KERN_INVALID_CAPABILITY
;
123 static char kernel_post_test_configs
[256];
124 boolean_t
xnupost_should_run_test(uint32_t test_num
);
127 xnupost_parse_config()
129 if (parse_config_retval
!= KERN_INVALID_CAPABILITY
) {
130 return parse_config_retval
;
132 PE_parse_boot_argn("kernPOST", &kernel_post_args
, sizeof(kernel_post_args
));
134 if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs
[0], sizeof(kernel_post_test_configs
)) == TRUE
) {
135 kernel_post_args
|= POSTARGS_CUSTOM_TEST_RUNLIST
;
138 if (kernel_post_args
!= 0) {
139 parse_config_retval
= KERN_SUCCESS
;
142 parse_config_retval
= KERN_NOT_SUPPORTED
;
144 return parse_config_retval
;
148 xnupost_should_run_test(uint32_t test_num
)
150 if (kernel_post_args
& POSTARGS_CUSTOM_TEST_RUNLIST
) {
151 int64_t begin
= 0, end
= 999999;
152 char * b
= kernel_post_test_configs
;
154 get_range_bounds(b
, &begin
, &end
);
155 if (test_num
>= begin
&& test_num
<= end
) {
159 /* skip to the next "," */
166 /* skip past the ',' */
175 xnupost_list_tests(xnupost_test_t test_list
, uint32_t test_count
)
177 if (KERN_SUCCESS
!= xnupost_parse_config()) {
181 xnupost_test_t testp
;
182 for (uint32_t i
= 0; i
< test_count
; i
++) {
183 testp
= &test_list
[i
];
184 if (testp
->xt_test_num
== 0) {
185 testp
->xt_test_num
= ++total_post_tests_count
;
187 /* make sure the boot-arg based test run list is honored */
188 if (kernel_post_args
& POSTARGS_CUSTOM_TEST_RUNLIST
) {
189 testp
->xt_config
|= XT_CONFIG_IGNORE
;
190 if (xnupost_should_run_test(testp
->xt_test_num
)) {
191 testp
->xt_config
&= ~(XT_CONFIG_IGNORE
);
192 testp
->xt_config
|= XT_CONFIG_RUN
;
193 printf("\n[TEST] #%u is marked as ignored", testp
->xt_test_num
);
196 printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp
->xt_test_num
, testp
->xt_name
, testp
->xt_expected_retval
,
204 xnupost_run_tests(xnupost_test_t test_list
, uint32_t test_count
)
207 int retval
= KERN_SUCCESS
;
209 if ((kernel_post_args
& POSTARGS_RUN_TESTS
) == 0) {
210 printf("No POST boot-arg set.\n");
215 xnupost_test_t testp
;
216 for (; i
< test_count
; i
++) {
217 xnupost_reset_panic_widgets();
218 testp
= &test_list
[i
];
219 T_BEGIN(testp
->xt_name
);
220 testp
->xt_begin_time
= mach_absolute_time();
221 testp
->xt_end_time
= testp
->xt_begin_time
;
224 * If test is designed to panic and controller
225 * is not available then mark as SKIPPED
227 if ((testp
->xt_config
& XT_CONFIG_EXPECT_PANIC
) && !(kernel_post_args
& POSTARGS_CONTROLLER_AVAILABLE
)) {
229 "Test expects panic but "
230 "no controller is present");
231 testp
->xt_test_actions
= XT_ACTION_SKIPPED
;
235 if ((testp
->xt_config
& XT_CONFIG_IGNORE
)) {
236 T_SKIP("Test is marked as XT_CONFIG_IGNORE");
237 testp
->xt_test_actions
= XT_ACTION_SKIPPED
;
243 testp
->xt_retval
= T_TESTRESULT
;
244 testp
->xt_end_time
= mach_absolute_time();
245 if (testp
->xt_retval
== testp
->xt_expected_retval
) {
246 testp
->xt_test_actions
= XT_ACTION_PASSED
;
248 testp
->xt_test_actions
= XT_ACTION_FAILED
;
258 return xnupost_list_tests(kernel_post_tests
, kernel_post_tests_count
);
264 return xnupost_run_tests(kernel_post_tests
, kernel_post_tests_count
);
268 xnupost_register_panic_widget(xt_panic_widget_func funcp
, const char * funcname
, void * context
, void ** outval
)
270 if (xt_panic_widgets
.xtp_context_p
!= NULL
|| xt_panic_widgets
.xtp_func
!= NULL
) {
271 return KERN_RESOURCE_SHORTAGE
;
274 xt_panic_widgets
.xtp_context_p
= context
;
275 xt_panic_widgets
.xtp_func
= funcp
;
276 xt_panic_widgets
.xtp_func_name
= funcname
;
277 xt_panic_widgets
.xtp_outval_p
= outval
;
283 xnupost_reset_panic_widgets()
285 bzero(&xt_panic_widgets
, sizeof(xt_panic_widgets
));
289 xnupost_process_kdb_stop(const char * panic_s
)
291 xt_panic_return_t retval
= 0;
292 struct xnupost_panic_widget
* pw
= &xt_panic_widgets
;
293 const char * name
= "unknown";
294 if (xt_panic_widgets
.xtp_func_name
) {
295 name
= xt_panic_widgets
.xtp_func_name
;
298 /* bail early on if kernPOST is not set */
299 if (kernel_post_args
== 0) {
300 return KERN_INVALID_CAPABILITY
;
303 if (xt_panic_widgets
.xtp_func
) {
304 T_LOG("%s: Calling out to widget: %s", __func__
, xt_panic_widgets
.xtp_func_name
);
305 retval
= pw
->xtp_func(panic_s
, pw
->xtp_context_p
, pw
->xtp_outval_p
);
307 return KERN_INVALID_CAPABILITY
;
311 case XT_RET_W_SUCCESS
:
312 T_EXPECT_EQ_INT(retval
, XT_RET_W_SUCCESS
, "%s reported successful handling. Returning from kdb_stop.", name
);
313 /* KERN_SUCCESS means return from panic/assertion */
317 T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name
);
320 case XT_PANIC_W_FAIL
:
321 T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name
);
324 case XT_PANIC_W_SUCCESS
:
325 T_EXPECT_EQ_INT(retval
, XT_PANIC_W_SUCCESS
, "%s reported successful testcase. But continuing to kdb_stop.", name
);
328 case XT_PANIC_UNRELATED
:
330 T_LOG("UNRELATED: Continuing to kdb_stop.");
336 _xt_generic_assert_check(const char * s
, void * str_to_match
, void ** outval
)
338 xt_panic_return_t ret
= XT_PANIC_UNRELATED
;
340 if (NULL
!= strnstr(__DECONST(char *, s
), (char *)str_to_match
, strlen(s
))) {
341 T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__
, s
, (char *)str_to_match
);
342 ret
= XT_RET_W_SUCCESS
;
346 *outval
= (void *)(uintptr_t)ret
;
352 xnupost_reset_tests(xnupost_test_t test_list
, uint32_t test_count
)
355 xnupost_test_t testp
;
356 for (; i
< test_count
; i
++) {
357 testp
= &test_list
[i
];
358 testp
->xt_begin_time
= 0;
359 testp
->xt_end_time
= 0;
360 testp
->xt_test_actions
= XT_ACTION_NONE
;
361 testp
->xt_retval
= -1;
374 test_zone
= zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_uint64_zone");
375 T_ASSERT_NOTNULL(test_zone
, NULL
);
377 T_ASSERT_EQ_INT(zone_free_count(test_zone
), 0, NULL
);
380 T_ASSERT_NOTNULL(test_ptr
= zalloc(test_zone
), NULL
);
382 zfree(test_zone
, test_ptr
);
384 /* A sample report for perfdata */
385 T_PERF("num_threads_at_ktest", threads_count
, "count", "# of threads in system at zalloc_test");
391 * Function used for comparison by qsort()
394 compare_numbers_ascending(const void * a
, const void * b
)
396 const uint64_t x
= *(const uint64_t *)a
;
397 const uint64_t y
= *(const uint64_t *)b
;
408 * Function used for comparison by qsort()
411 compare_numbers_descending(const void * a
, const void * b
)
413 const uint32_t x
= *(const uint32_t *)a
;
414 const uint32_t y
= *(const uint32_t *)b
;
424 /* Node structure for the priority queue tests */
425 struct priority_queue_test_node
{
426 struct priority_queue_entry link
;
427 priority_queue_key_t node_key
;
431 priority_queue_test_queue(struct priority_queue
*pq
, int type
,
432 priority_queue_compare_fn_t cmp_fn
)
434 /* Configuration for the test */
435 #define PRIORITY_QUEUE_NODES 7
436 static uint32_t priority_list
[] = { 20, 3, 7, 6, 50, 2, 8};
437 uint32_t increase_pri
= 100;
438 uint32_t decrease_pri
= 90;
439 struct priority_queue_test_node
*result
;
441 boolean_t update_result
= false;
443 struct priority_queue_test_node
*node
= NULL
;
444 /* Add all priorities to the first priority queue */
445 for (int i
= 0; i
< PRIORITY_QUEUE_NODES
; i
++) {
446 node
= kalloc(sizeof(struct priority_queue_test_node
));
447 T_ASSERT_NOTNULL(node
, NULL
);
449 priority_queue_entry_init(&(node
->link
));
450 node
->node_key
= priority_list
[i
];
451 key
= (type
== PRIORITY_QUEUE_GENERIC_KEY
) ? PRIORITY_QUEUE_KEY_NONE
: priority_list
[i
];
452 priority_queue_insert(pq
, &(node
->link
), key
, cmp_fn
);
455 T_ASSERT_NOTNULL(node
, NULL
);
456 key
= (type
== PRIORITY_QUEUE_GENERIC_KEY
) ? node
->node_key
: priority_queue_entry_key(pq
, &(node
->link
));
457 T_ASSERT((key
== node
->node_key
), "verify node stored key correctly");
459 /* Test the priority increase operation by updating the last node added (8) */
460 T_ASSERT_NOTNULL(node
, NULL
);
461 node
->node_key
= increase_pri
;
462 key
= (type
== PRIORITY_QUEUE_GENERIC_KEY
) ? PRIORITY_QUEUE_KEY_NONE
: node
->node_key
;
463 update_result
= priority_queue_entry_increase(pq
, &node
->link
, key
, cmp_fn
);
464 T_ASSERT((update_result
== true), "increase key updated root");
465 result
= priority_queue_max(pq
, struct priority_queue_test_node
, link
);
466 T_ASSERT((result
->node_key
== increase_pri
), "verify priority_queue_entry_increase() operation");
469 /* Test the priority decrease operation by updating the last node added */
470 T_ASSERT((result
== node
), NULL
);
471 node
->node_key
= decrease_pri
;
472 key
= (type
== PRIORITY_QUEUE_GENERIC_KEY
) ? PRIORITY_QUEUE_KEY_NONE
: node
->node_key
;
473 update_result
= priority_queue_entry_decrease(pq
, &node
->link
, key
, cmp_fn
);
474 T_ASSERT((update_result
== true), "decrease key updated root");
475 result
= priority_queue_max(pq
, struct priority_queue_test_node
, link
);
476 T_ASSERT((result
->node_key
== decrease_pri
), "verify priority_queue_entry_decrease() operation");
478 /* Update our local priority list as well */
479 priority_list
[PRIORITY_QUEUE_NODES
- 1] = decrease_pri
;
481 /* Sort the local list in descending order */
482 qsort(priority_list
, PRIORITY_QUEUE_NODES
, sizeof(priority_list
[0]), compare_numbers_descending
);
484 /* Test the maximum operation by comparing max node with local list */
485 result
= priority_queue_max(pq
, struct priority_queue_test_node
, link
);
486 T_ASSERT((result
->node_key
== priority_list
[0]), "(heap (%u) == qsort (%u)) priority queue max node lookup",
487 (uint32_t)result
->node_key
, priority_list
[0]);
489 /* Remove all remaining elements and verify they match local list */
490 for (int i
= 0; i
< PRIORITY_QUEUE_NODES
; i
++) {
491 result
= priority_queue_remove_max(pq
, struct priority_queue_test_node
, link
, cmp_fn
);
492 T_ASSERT((result
->node_key
== priority_list
[i
]), "(heap (%u) == qsort (%u)) priority queue max node removal",
493 (uint32_t)result
->node_key
, priority_list
[i
]);
496 priority_queue_destroy(pq
, struct priority_queue_test_node
, link
, ^(void *n
) {
497 kfree(n
, sizeof(struct priority_queue_test_node
));
502 priority_queue_test(void)
505 * Initialize two priority queues
506 * - One which uses the key comparator
507 * - Other which uses the node comparator
509 static struct priority_queue pq
;
510 static struct priority_queue pq_nodes
;
514 priority_queue_init(&pq
, PRIORITY_QUEUE_BUILTIN_KEY
| PRIORITY_QUEUE_MAX_HEAP
);
515 priority_queue_init(&pq_nodes
, PRIORITY_QUEUE_GENERIC_KEY
| PRIORITY_QUEUE_MAX_HEAP
);
519 priority_queue_test_queue(&pq
, PRIORITY_QUEUE_BUILTIN_KEY
,
520 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE
);
522 priority_queue_test_queue(&pq_nodes
, PRIORITY_QUEUE_GENERIC_KEY
,
523 priority_heap_make_comparator(a
, b
, struct priority_queue_test_node
, link
, {
524 return (a
->node_key
> b
->node_key
) ? 1 : ((a
->node_key
== b
->node_key
) ? 0 : -1);
531 * Function to count number of bits that are set in a number.
532 * It uses Side Addition using Magic Binary Numbers
535 count_bits(uint64_t number
)
537 return __builtin_popcountll(number
);
544 * Randomness test for RandomULong()
546 * This test verifies that:
547 * a. RandomULong works
548 * b. The generated numbers match the following entropy criteria:
549 * For a thousand iterations, verify:
550 * 1. mean entropy > 12 bits
551 * 2. min entropy > 4 bits
553 * 4. No incremental/decremental pattern in a window of 3
557 * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
560 #define CONF_MIN_ENTROPY 4
561 #define CONF_MEAN_ENTROPY 12
562 #define CONF_ITERATIONS 1000
563 #define CONF_WINDOW_SIZE 3
564 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
567 uint32_t min_bit_entropy
, max_bit_entropy
, bit_entropy
;
568 uint32_t aggregate_bit_entropy
= 0;
569 uint32_t mean_bit_entropy
= 0;
570 uint64_t numbers
[CONF_ITERATIONS
];
571 min_bit_entropy
= UINT32_MAX
;
575 * TEST 1: Number generation and basic and basic validation
576 * Check for non-zero (no bits set), -1 (all bits set) and error
578 for (i
= 0; i
< CONF_ITERATIONS
; i
++) {
579 read_random(&numbers
[i
], sizeof(numbers
[i
]));
580 if (numbers
[i
] == 0) {
581 T_ASSERT_NE_ULLONG(numbers
[i
], 0, "read_random returned zero value.");
583 if (numbers
[i
] == UINT64_MAX
) {
584 T_ASSERT_NE_ULLONG(numbers
[i
], UINT64_MAX
, "read_random returned -1.");
587 T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS
);
590 * TEST 2: Mean and Min Bit Entropy
591 * Check the bit entropy and its mean over the generated numbers.
593 for (i
= 1; i
< CONF_ITERATIONS
; i
++) {
594 bit_entropy
= count_bits(numbers
[i
- 1] ^ numbers
[i
]);
595 if (bit_entropy
< min_bit_entropy
) {
596 min_bit_entropy
= bit_entropy
;
598 if (bit_entropy
> max_bit_entropy
) {
599 max_bit_entropy
= bit_entropy
;
602 if (bit_entropy
< CONF_MIN_ENTROPY
) {
603 T_EXPECT_GE_UINT(bit_entropy
, CONF_MIN_ENTROPY
,
604 "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
607 aggregate_bit_entropy
+= bit_entropy
;
609 T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY
);
611 mean_bit_entropy
= aggregate_bit_entropy
/ CONF_ITERATIONS
;
612 T_EXPECT_GE_UINT(mean_bit_entropy
, CONF_MEAN_ENTROPY
, "Test criteria for mean number of differing bits.");
613 T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY
, mean_bit_entropy
);
614 T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS
,
615 min_bit_entropy
, mean_bit_entropy
, max_bit_entropy
);
616 T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS
), min_bit_entropy
, "bits", "minimum bit entropy in RNG. High is better");
617 T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS
), mean_bit_entropy
, "bits", "mean bit entropy in RNG. High is better");
618 T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS
), max_bit_entropy
, "bits", "max bit entropy in RNG. High is better");
621 * TEST 3: Incremental Pattern Search
622 * Check that incremental/decremental pattern does not exist in the given window
624 int window_start
, window_end
, trend
;
625 window_start
= window_end
= trend
= 0;
631 window_end
= window_start
+ CONF_WINDOW_SIZE
- 1;
632 if (window_end
>= CONF_ITERATIONS
) {
633 window_end
= CONF_ITERATIONS
- 1;
637 for (i
= window_start
; i
< window_end
; i
++) {
638 if (numbers
[i
] < numbers
[i
+ 1]) {
640 } else if (numbers
[i
] > numbers
[i
+ 1]) {
645 * Check that there is no increasing or decreasing trend
646 * i.e. trend <= ceil(window_size/2)
651 if (trend
> CONF_WINDOW_TREND_LIMIT
) {
652 T_ASSERT_LE_INT(trend
, CONF_WINDOW_TREND_LIMIT
, "Found increasing/decreasing trend in random numbers.");
656 * Move to the next window
659 } while (window_start
< (CONF_ITERATIONS
- 1));
660 T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE
);
663 * TEST 4: Find Duplicates
664 * Check no duplicate values are generated
666 qsort(numbers
, CONF_ITERATIONS
, sizeof(numbers
[0]), compare_numbers_ascending
);
667 for (i
= 1; i
< CONF_ITERATIONS
; i
++) {
668 if (numbers
[i
] == numbers
[i
- 1]) {
669 T_ASSERT_NE_ULLONG(numbers
[i
], numbers
[i
- 1], "read_random generated duplicate values.");
672 T_PASS("Test did not find any duplicates as expected.");
678 /* KCDATA kernel api tests */
679 static struct kcdata_descriptor test_kc_data
;//, test_kc_data2;
680 struct sample_disk_io_stats
{
681 uint64_t disk_reads_count
;
682 uint64_t disk_reads_size
;
683 uint64_t io_priority_count
[4];
684 uint64_t io_priority_size
;
685 } __attribute__((packed
));
687 struct kcdata_subtype_descriptor test_disk_io_stats_def
[] = {
688 {KCS_SUBTYPE_FLAGS_NONE
, KC_ST_UINT64
, 0 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_count"},
689 {KCS_SUBTYPE_FLAGS_NONE
, KC_ST_UINT64
, 1 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_size"},
690 {KCS_SUBTYPE_FLAGS_ARRAY
, KC_ST_UINT64
, 2 * sizeof(uint64_t), KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)), "io_priority_count"},
691 {KCS_SUBTYPE_FLAGS_ARRAY
, KC_ST_UINT64
, (2 + 4) * sizeof(uint64_t), sizeof(uint64_t), "io_priority_size"},
697 kern_return_t retval
= KERN_SUCCESS
;
699 /* test for NULL input */
700 retval
= kcdata_memory_static_init(NULL
, (mach_vm_address_t
)0, KCDATA_BUFFER_BEGIN_STACKSHOT
, 100, KCFLAG_USE_MEMCOPY
);
701 T_ASSERT(retval
== KERN_INVALID_ARGUMENT
, "kcdata_memory_static_init with NULL struct");
703 /* another negative test with buffer size < 32 bytes */
704 char data
[30] = "sample_disk_io_stats";
705 retval
= kcdata_memory_static_init(&test_kc_data
, (mach_vm_address_t
)&data
, KCDATA_BUFFER_BEGIN_CRASHINFO
, sizeof(data
),
707 T_ASSERT(retval
== KERN_RESOURCE_SHORTAGE
, "init with 30 bytes failed as expected with KERN_RESOURCE_SHORTAGE");
709 /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
710 retval
= kcdata_memory_static_init(&test_kc_data
, (mach_vm_address_t
)0, KCDATA_BUFFER_BEGIN_CRASHINFO
, PAGE_SIZE
,
712 T_ASSERT(retval
== KERN_NO_ACCESS
, "writing to 0x0 returned KERN_NO_ACCESS");
714 /* test with successful kcdata_memory_static_init */
715 test_kc_data
.kcd_length
= 0xdeadbeef;
716 mach_vm_address_t address
= (mach_vm_address_t
)kalloc(PAGE_SIZE
);
717 T_EXPECT_NOTNULL(address
, "kalloc of PAGE_SIZE data.");
719 retval
= kcdata_memory_static_init(&test_kc_data
, (mach_vm_address_t
)address
, KCDATA_BUFFER_BEGIN_STACKSHOT
, PAGE_SIZE
,
722 T_ASSERT(retval
== KERN_SUCCESS
, "successful kcdata_memory_static_init call");
724 T_ASSERT(test_kc_data
.kcd_length
== PAGE_SIZE
, "kcdata length is set correctly to PAGE_SIZE.");
725 T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data
.kcd_addr_begin
, test_kc_data
.kcd_addr_end
, address
);
726 T_ASSERT(test_kc_data
.kcd_addr_begin
== address
, "kcdata begin address is correct 0x%llx", (uint64_t)address
);
728 /* verify we have BEGIN and END HEADERS set */
729 uint32_t * mem
= (uint32_t *)address
;
730 T_ASSERT(mem
[0] == KCDATA_BUFFER_BEGIN_STACKSHOT
, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
731 T_ASSERT(mem
[4] == KCDATA_TYPE_BUFFER_END
, "KCDATA_TYPE_BUFFER_END is appended as expected");
732 T_ASSERT(mem
[5] == 0, "size of BUFFER_END tag is zero");
734 /* verify kcdata_memory_get_used_bytes() */
735 uint64_t bytes_used
= 0;
736 bytes_used
= kcdata_memory_get_used_bytes(&test_kc_data
);
737 T_ASSERT(bytes_used
== (2 * sizeof(struct kcdata_item
)), "bytes_used api returned expected %llu", bytes_used
);
739 /* test for kcdata_get_memory_addr() */
741 mach_vm_address_t user_addr
= 0;
742 /* negative test for NULL user_addr AND/OR kcdata_descriptor */
743 retval
= kcdata_get_memory_addr(NULL
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), &user_addr
);
744 T_ASSERT(retval
== KERN_INVALID_ARGUMENT
, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
746 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), NULL
);
747 T_ASSERT(retval
== KERN_INVALID_ARGUMENT
, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
749 /* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
750 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_USECS_SINCE_EPOCH
, 0, &user_addr
);
751 T_ASSERT(retval
== KERN_SUCCESS
, "Successfully got kcdata entry for 0 size data");
752 T_ASSERT(user_addr
== test_kc_data
.kcd_addr_end
, "0 sized data did not add any extra buffer space");
754 /* successful case with valid size. */
755 user_addr
= 0xdeadbeef;
756 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), &user_addr
);
757 T_ASSERT(retval
== KERN_SUCCESS
, "kcdata_get_memory_addr with valid values succeeded.");
758 T_ASSERT(user_addr
> test_kc_data
.kcd_addr_begin
, "user_addr is in range of buffer");
759 T_ASSERT(user_addr
< test_kc_data
.kcd_addr_end
, "user_addr is in range of buffer");
761 /* Try creating an item with really large size */
762 user_addr
= 0xdeadbeef;
763 bytes_used
= kcdata_memory_get_used_bytes(&test_kc_data
);
764 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, PAGE_SIZE
* 4, &user_addr
);
765 T_ASSERT(retval
== KERN_RESOURCE_SHORTAGE
, "Allocating entry with size > buffer -> KERN_RESOURCE_SHORTAGE");
766 T_ASSERT(user_addr
== 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
767 T_ASSERT(bytes_used
== kcdata_memory_get_used_bytes(&test_kc_data
), "The data structure should be unaffected");
769 /* verify convenience functions for uint32_with_description */
770 retval
= kcdata_add_uint32_with_description(&test_kc_data
, 0xbdc0ffee, "This is bad coffee");
771 T_ASSERT(retval
== KERN_SUCCESS
, "add uint32 with description succeeded.");
773 retval
= kcdata_add_uint64_with_description(&test_kc_data
, 0xf001badc0ffee, "another 8 byte no.");
774 T_ASSERT(retval
== KERN_SUCCESS
, "add uint64 with desc succeeded.");
776 /* verify creating an KCDATA_TYPE_ARRAY here */
777 user_addr
= 0xdeadbeef;
778 bytes_used
= kcdata_memory_get_used_bytes(&test_kc_data
);
779 /* save memory address where the array will come up */
780 struct kcdata_item
* item_p
= (struct kcdata_item
*)test_kc_data
.kcd_addr_end
;
782 retval
= kcdata_get_memory_addr_for_array(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), 20, &user_addr
);
783 T_ASSERT(retval
== KERN_SUCCESS
, "Array of 20 integers should be possible");
784 T_ASSERT(user_addr
!= 0xdeadbeef, "user_addr is updated as expected");
785 T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data
) - bytes_used
) >= 20 * sizeof(uint64_t), "memory allocation is in range");
786 kcdata_iter_t iter
= kcdata_iter(item_p
, PAGE_SIZE
- kcdata_memory_get_used_bytes(&test_kc_data
));
787 T_ASSERT(kcdata_iter_array_elem_count(iter
) == 20, "array count is 20");
789 /* FIXME add tests here for ranges of sizes and counts */
791 T_ASSERT(item_p
->flags
== (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME
<< 32) | 20), "flags are set correctly");
793 /* test adding of custom type */
795 retval
= kcdata_add_type_definition(&test_kc_data
, 0x999, data
, &test_disk_io_stats_def
[0],
796 sizeof(test_disk_io_stats_def
) / sizeof(struct kcdata_subtype_descriptor
));
797 T_ASSERT(retval
== KERN_SUCCESS
, "adding custom type succeeded.");
804 * kcdata_api_assert_tests()
806 * kern_return_t retval = 0;
807 * void * assert_check_retval = NULL;
808 * test_kc_data2.kcd_length = 0xdeadbeef;
809 * mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
810 * T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
812 * retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
813 * KCFLAG_USE_MEMCOPY);
815 * T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
817 * retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
818 * T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
820 * // this will assert
821 * retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
822 * T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
823 * T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
825 * return KERN_SUCCESS;
829 #if defined(__arm__) || defined(__arm64__)
831 #include <arm/pmap.h>
833 #define MAX_PMAP_OBJECT_ELEMENT 100000
835 extern struct vm_object pmap_object_store
; /* store pt pages */
836 extern unsigned long gPhysBase
, gPhysSize
, first_avail
;
839 * Define macros to transverse the pmap object structures and extract
840 * physical page number with information from low global only
841 * This emulate how Astris extracts information from coredump
843 #if defined(__arm64__)
845 static inline uintptr_t
846 astris_vm_page_unpack_ptr(uintptr_t p
)
852 return (p
& lowGlo
.lgPmapMemFromArrayMask
)
853 ? lowGlo
.lgPmapMemStartAddr
+ (p
& ~(lowGlo
.lgPmapMemFromArrayMask
)) * lowGlo
.lgPmapMemPagesize
854 : lowGlo
.lgPmapMemPackedBaseAddr
+ (p
<< lowGlo
.lgPmapMemPackedShift
);
857 // assume next pointer is the first element
858 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
864 // assume next pointer is the first element
865 #define astris_vm_page_queue_next(qc) *((uintptr_t *)(qc))
869 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
871 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
873 #define astris_vm_page_queue_iterate(head, elt) \
874 for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
875 (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
877 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
879 static inline ppnum_t
880 astris_vm_page_get_phys_page(uintptr_t m
)
882 return (m
>= lowGlo
.lgPmapMemStartAddr
&& m
< lowGlo
.lgPmapMemEndAddr
)
883 ? (ppnum_t
)((m
- lowGlo
.lgPmapMemStartAddr
) / lowGlo
.lgPmapMemPagesize
+ lowGlo
.lgPmapMemFirstppnum
)
884 : *((ppnum_t
*)(m
+ lowGlo
.lgPmapMemPageOffset
));
888 pmap_coredump_test(void)
893 T_LOG("Testing coredump info for PMAP.");
895 T_ASSERT_GE_ULONG(lowGlo
.lgStaticAddr
, gPhysBase
, NULL
);
896 T_ASSERT_LE_ULONG(lowGlo
.lgStaticAddr
+ lowGlo
.lgStaticSize
, first_avail
, NULL
);
897 T_ASSERT_EQ_ULONG(lowGlo
.lgLayoutMajorVersion
, 3, NULL
);
898 T_ASSERT_EQ_ULONG(lowGlo
.lgLayoutMinorVersion
, 0, NULL
);
899 T_ASSERT_EQ_ULONG(lowGlo
.lgLayoutMagic
, LOWGLO_LAYOUT_MAGIC
, NULL
);
901 // check the constant values in lowGlo
902 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemQ
, ((uint64_t) &(pmap_object_store
.memq
)), NULL
);
903 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPageOffset
, offsetof(struct vm_page_with_ppnum
, vmp_phys_page
), NULL
);
904 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemChainOffset
, offsetof(struct vm_page
, vmp_listq
), NULL
);
905 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPagesize
, sizeof(struct vm_page
), NULL
);
907 #if defined(__arm64__)
908 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemFromArrayMask
, VM_PACKED_FROM_VM_PAGES_ARRAY
, NULL
);
909 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPackedShift
, VM_PACKED_POINTER_SHIFT
, NULL
);
910 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPackedBaseAddr
, VM_MIN_KERNEL_AND_KEXT_ADDRESS
, NULL
);
913 vm_object_lock_shared(&pmap_object_store
);
914 astris_vm_page_queue_iterate(lowGlo
.lgPmapMemQ
, p
)
916 ppnum_t ppnum
= astris_vm_page_get_phys_page(p
);
917 pmap_paddr_t pa
= (pmap_paddr_t
)astris_ptoa(ppnum
);
918 T_ASSERT_GE_ULONG(pa
, gPhysBase
, NULL
);
919 T_ASSERT_LT_ULONG(pa
, gPhysBase
+ gPhysSize
, NULL
);
921 T_ASSERT_LT_INT(iter
, MAX_PMAP_OBJECT_ELEMENT
, NULL
);
923 vm_object_unlock(&pmap_object_store
);
925 T_ASSERT_GT_INT(iter
, 0, NULL
);