2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object.h>
47 #include <kern/priority_queue.h>
49 #if !(DEVELOPMENT || DEBUG)
50 #error "Testing is not enabled on RELEASE configurations"
53 #include <tests/xnupost.h>
55 extern boolean_t
get_range_bounds(char * c
, int64_t * lower
, int64_t * upper
);
56 __private_extern__
void qsort(void * a
, size_t n
, size_t es
, int (*cmp
)(const void *, const void *));
58 uint32_t total_post_tests_count
= 0;
59 void xnupost_reset_panic_widgets(void);
61 /* test declarations */
62 kern_return_t
zalloc_test(void);
63 kern_return_t
RandomULong_test(void);
64 kern_return_t
kcdata_api_test(void);
65 kern_return_t
priority_queue_test(void);
67 #if defined(__arm__) || defined(__arm64__)
68 kern_return_t
pmap_coredump_test(void);
71 extern kern_return_t
console_serial_test(void);
72 extern kern_return_t
console_serial_alloc_rel_tests(void);
73 extern kern_return_t
console_serial_parallel_log_tests(void);
74 extern kern_return_t
test_os_log(void);
75 extern kern_return_t
test_os_log_parallel(void);
76 extern kern_return_t
bitmap_post_test(void);
79 extern kern_return_t
arm64_munger_test(void);
80 extern kern_return_t
ex_cb_test(void);
81 #if __ARM_PAN_AVAILABLE__
82 extern kern_return_t
arm64_pan_test(void);
84 #endif /* __arm64__ */
86 extern kern_return_t
test_thread_call(void);
89 struct xnupost_panic_widget xt_panic_widgets
= {NULL
, NULL
, NULL
, NULL
};
91 struct xnupost_test kernel_post_tests
[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test
),
92 XNUPOST_TEST_CONFIG_BASIC(RandomULong_test
),
93 XNUPOST_TEST_CONFIG_BASIC(test_os_log
),
94 XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel
),
96 XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test
),
97 XNUPOST_TEST_CONFIG_BASIC(ex_cb_test
),
98 #if __ARM_PAN_AVAILABLE__
99 XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test
),
101 #endif /* __arm64__ */
102 XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test
),
103 XNUPOST_TEST_CONFIG_BASIC(console_serial_test
),
104 XNUPOST_TEST_CONFIG_BASIC(console_serial_alloc_rel_tests
),
105 XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests
),
106 #if defined(__arm__) || defined(__arm64__)
107 XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test
),
109 XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test
),
110 //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
111 XNUPOST_TEST_CONFIG_BASIC(test_thread_call
),
112 XNUPOST_TEST_CONFIG_BASIC(priority_queue_test
),
115 uint32_t kernel_post_tests_count
= sizeof(kernel_post_tests
) / sizeof(xnupost_test_data_t
);
117 #define POSTARGS_RUN_TESTS 0x1
118 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
119 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
120 uint64_t kernel_post_args
= 0x0;
122 /* static variables to hold state */
123 static kern_return_t parse_config_retval
= KERN_INVALID_CAPABILITY
;
124 static char kernel_post_test_configs
[256];
125 boolean_t
xnupost_should_run_test(uint32_t test_num
);
128 xnupost_parse_config()
130 if (parse_config_retval
!= KERN_INVALID_CAPABILITY
)
131 return parse_config_retval
;
132 PE_parse_boot_argn("kernPOST", &kernel_post_args
, sizeof(kernel_post_args
));
134 if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs
[0], sizeof(kernel_post_test_configs
)) == TRUE
) {
135 kernel_post_args
|= POSTARGS_CUSTOM_TEST_RUNLIST
;
138 if (kernel_post_args
!= 0) {
139 parse_config_retval
= KERN_SUCCESS
;
142 parse_config_retval
= KERN_NOT_SUPPORTED
;
144 return parse_config_retval
;
148 xnupost_should_run_test(uint32_t test_num
)
150 if (kernel_post_args
& POSTARGS_CUSTOM_TEST_RUNLIST
) {
151 int64_t begin
= 0, end
= 999999;
152 char * b
= kernel_post_test_configs
;
154 get_range_bounds(b
, &begin
, &end
);
155 if (test_num
>= begin
&& test_num
<= end
) {
159 /* skip to the next "," */
165 /* skip past the ',' */
174 xnupost_list_tests(xnupost_test_t test_list
, uint32_t test_count
)
176 if (KERN_SUCCESS
!= xnupost_parse_config())
179 xnupost_test_t testp
;
180 for (uint32_t i
= 0; i
< test_count
; i
++) {
181 testp
= &test_list
[i
];
182 if (testp
->xt_test_num
== 0) {
183 testp
->xt_test_num
= ++total_post_tests_count
;
185 /* make sure the boot-arg based test run list is honored */
186 if (kernel_post_args
& POSTARGS_CUSTOM_TEST_RUNLIST
) {
187 testp
->xt_config
|= XT_CONFIG_IGNORE
;
188 if (xnupost_should_run_test(testp
->xt_test_num
)) {
189 testp
->xt_config
&= ~(XT_CONFIG_IGNORE
);
190 testp
->xt_config
|= XT_CONFIG_RUN
;
191 printf("\n[TEST] #%u is marked as ignored", testp
->xt_test_num
);
194 printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp
->xt_test_num
, testp
->xt_name
, testp
->xt_expected_retval
,
202 xnupost_run_tests(xnupost_test_t test_list
, uint32_t test_count
)
205 int retval
= KERN_SUCCESS
;
207 if ((kernel_post_args
& POSTARGS_RUN_TESTS
) == 0) {
208 printf("No POST boot-arg set.\n");
213 xnupost_test_t testp
;
214 for (; i
< test_count
; i
++) {
215 xnupost_reset_panic_widgets();
216 testp
= &test_list
[i
];
217 T_BEGIN(testp
->xt_name
);
218 testp
->xt_begin_time
= mach_absolute_time();
219 testp
->xt_end_time
= testp
->xt_begin_time
;
222 * If test is designed to panic and controller
223 * is not available then mark as SKIPPED
225 if ((testp
->xt_config
& XT_CONFIG_EXPECT_PANIC
) && !(kernel_post_args
& POSTARGS_CONTROLLER_AVAILABLE
)) {
227 "Test expects panic but "
228 "no controller is present");
229 testp
->xt_test_actions
= XT_ACTION_SKIPPED
;
233 if ((testp
->xt_config
& XT_CONFIG_IGNORE
)) {
234 T_SKIP("Test is marked as XT_CONFIG_IGNORE");
235 testp
->xt_test_actions
= XT_ACTION_SKIPPED
;
241 testp
->xt_retval
= T_TESTRESULT
;
242 testp
->xt_end_time
= mach_absolute_time();
243 if (testp
->xt_retval
== testp
->xt_expected_retval
) {
244 testp
->xt_test_actions
= XT_ACTION_PASSED
;
246 testp
->xt_test_actions
= XT_ACTION_FAILED
;
256 return xnupost_list_tests(kernel_post_tests
, kernel_post_tests_count
);
262 return xnupost_run_tests(kernel_post_tests
, kernel_post_tests_count
);
266 xnupost_register_panic_widget(xt_panic_widget_func funcp
, const char * funcname
, void * context
, void ** outval
)
268 if (xt_panic_widgets
.xtp_context_p
!= NULL
|| xt_panic_widgets
.xtp_func
!= NULL
)
269 return KERN_RESOURCE_SHORTAGE
;
271 xt_panic_widgets
.xtp_context_p
= context
;
272 xt_panic_widgets
.xtp_func
= funcp
;
273 xt_panic_widgets
.xtp_func_name
= funcname
;
274 xt_panic_widgets
.xtp_outval_p
= outval
;
280 xnupost_reset_panic_widgets()
282 bzero(&xt_panic_widgets
, sizeof(xt_panic_widgets
));
286 xnupost_process_kdb_stop(const char * panic_s
)
288 xt_panic_return_t retval
= 0;
289 struct xnupost_panic_widget
* pw
= &xt_panic_widgets
;
290 const char * name
= "unknown";
291 if (xt_panic_widgets
.xtp_func_name
) {
292 name
= xt_panic_widgets
.xtp_func_name
;
295 /* bail early on if kernPOST is not set */
296 if (kernel_post_args
== 0) {
297 return KERN_INVALID_CAPABILITY
;
300 if (xt_panic_widgets
.xtp_func
) {
301 T_LOG("%s: Calling out to widget: %s", __func__
, xt_panic_widgets
.xtp_func_name
);
302 retval
= pw
->xtp_func(panic_s
, pw
->xtp_context_p
, pw
->xtp_outval_p
);
304 return KERN_INVALID_CAPABILITY
;
308 case XT_RET_W_SUCCESS
:
309 T_EXPECT_EQ_INT(retval
, XT_RET_W_SUCCESS
, "%s reported successful handling. Returning from kdb_stop.", name
);
310 /* KERN_SUCCESS means return from panic/assertion */
314 T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name
);
317 case XT_PANIC_W_FAIL
:
318 T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name
);
321 case XT_PANIC_W_SUCCESS
:
322 T_EXPECT_EQ_INT(retval
, XT_PANIC_W_SUCCESS
, "%s reported successful testcase. But continuing to kdb_stop.", name
);
325 case XT_PANIC_UNRELATED
:
327 T_LOG("UNRELATED: Continuing to kdb_stop.");
333 _xt_generic_assert_check(const char * s
, void * str_to_match
, void ** outval
)
335 xt_panic_return_t ret
= XT_PANIC_UNRELATED
;
337 if (NULL
!= strnstr(__DECONST(char *, s
), (char *)str_to_match
, strlen(s
))) {
338 T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__
, s
, (char *)str_to_match
);
339 ret
= XT_RET_W_SUCCESS
;
343 *outval
= (void *)(uintptr_t)ret
;
348 xnupost_reset_tests(xnupost_test_t test_list
, uint32_t test_count
)
351 xnupost_test_t testp
;
352 for (; i
< test_count
; i
++) {
353 testp
= &test_list
[i
];
354 testp
->xt_begin_time
= 0;
355 testp
->xt_end_time
= 0;
356 testp
->xt_test_actions
= XT_ACTION_NONE
;
357 testp
->xt_retval
= -1;
370 test_zone
= zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_uint64_zone");
371 T_ASSERT_NOTNULL(test_zone
, NULL
);
373 T_ASSERT_EQ_INT(zone_free_count(test_zone
), 0, NULL
);
376 T_ASSERT_NOTNULL(test_ptr
= zalloc(test_zone
), NULL
);
378 zfree(test_zone
, test_ptr
);
380 /* A sample report for perfdata */
381 T_PERF("num_threads_at_ktest", threads_count
, "count", "# of threads in system at zalloc_test");
387 * Function used for comparison by qsort()
390 compare_numbers_ascending(const void * a
, const void * b
)
392 const uint64_t x
= *(const uint64_t *)a
;
393 const uint64_t y
= *(const uint64_t *)b
;
404 * Function used for comparison by qsort()
407 compare_numbers_descending(const void * a
, const void * b
)
409 const uint32_t x
= *(const uint32_t *)a
;
410 const uint32_t y
= *(const uint32_t *)b
;
420 /* Node structure for the priority queue tests */
421 struct priority_queue_test_node
{
422 struct priority_queue_entry link
;
423 priority_queue_key_t node_key
;
427 priority_queue_test_queue(struct priority_queue
*pq
, int type
,
428 priority_queue_compare_fn_t cmp_fn
)
430 /* Configuration for the test */
431 #define PRIORITY_QUEUE_NODES 7
432 static uint32_t priority_list
[] = { 20, 3, 7, 6, 50, 2, 8};
433 uint32_t increase_pri
= 100;
434 uint32_t decrease_pri
= 90;
435 struct priority_queue_test_node
*result
;
437 boolean_t update_result
= false;
439 struct priority_queue_test_node
*node
= NULL
;
440 /* Add all priorities to the first priority queue */
441 for (int i
= 0; i
< PRIORITY_QUEUE_NODES
; i
++) {
442 node
= kalloc(sizeof(struct priority_queue_test_node
));
443 T_ASSERT_NOTNULL(node
, NULL
);
445 priority_queue_entry_init(&(node
->link
));
446 node
->node_key
= priority_list
[i
];
447 key
= (type
== PRIORITY_QUEUE_GENERIC_KEY
) ? PRIORITY_QUEUE_KEY_NONE
: priority_list
[i
];
448 priority_queue_insert(pq
, &(node
->link
), key
, cmp_fn
);
451 T_ASSERT_NOTNULL(node
, NULL
);
452 key
= (type
== PRIORITY_QUEUE_GENERIC_KEY
) ? node
->node_key
: priority_queue_entry_key(pq
, &(node
->link
));
453 T_ASSERT((key
== node
->node_key
), "verify node stored key correctly");
455 /* Test the priority increase operation by updating the last node added (8) */
456 T_ASSERT_NOTNULL(node
, NULL
);
457 node
->node_key
= increase_pri
;
458 key
= (type
== PRIORITY_QUEUE_GENERIC_KEY
) ? PRIORITY_QUEUE_KEY_NONE
: node
->node_key
;
459 update_result
= priority_queue_entry_increase(pq
, &node
->link
, key
, cmp_fn
);
460 T_ASSERT((update_result
== true), "increase key updated root");
461 result
= priority_queue_max(pq
, struct priority_queue_test_node
, link
);
462 T_ASSERT((result
->node_key
== increase_pri
), "verify priority_queue_entry_increase() operation");
465 /* Test the priority decrease operation by updating the last node added */
466 T_ASSERT((result
== node
), NULL
);
467 node
->node_key
= decrease_pri
;
468 key
= (type
== PRIORITY_QUEUE_GENERIC_KEY
) ? PRIORITY_QUEUE_KEY_NONE
: node
->node_key
;
469 update_result
= priority_queue_entry_decrease(pq
, &node
->link
, key
, cmp_fn
);
470 T_ASSERT((update_result
== true), "decrease key updated root");
471 result
= priority_queue_max(pq
, struct priority_queue_test_node
, link
);
472 T_ASSERT((result
->node_key
== decrease_pri
), "verify priority_queue_entry_decrease() operation");
474 /* Update our local priority list as well */
475 priority_list
[PRIORITY_QUEUE_NODES
- 1] = decrease_pri
;
477 /* Sort the local list in descending order */
478 qsort(priority_list
, PRIORITY_QUEUE_NODES
, sizeof(priority_list
[0]), compare_numbers_descending
);
480 /* Test the maximum operation by comparing max node with local list */
481 result
= priority_queue_max(pq
, struct priority_queue_test_node
, link
);
482 T_ASSERT((result
->node_key
== priority_list
[0]), "(heap (%u) == qsort (%u)) priority queue max node lookup",
483 (uint32_t)result
->node_key
, priority_list
[0]);
485 /* Remove all remaining elements and verify they match local list */
486 for (int i
= 0; i
< PRIORITY_QUEUE_NODES
; i
++) {
487 result
= priority_queue_remove_max(pq
, struct priority_queue_test_node
, link
, cmp_fn
);
488 T_ASSERT((result
->node_key
== priority_list
[i
]), "(heap (%u) == qsort (%u)) priority queue max node removal",
489 (uint32_t)result
->node_key
, priority_list
[i
]);
492 priority_queue_destroy(pq
, struct priority_queue_test_node
, link
, ^(void *n
) {
493 kfree(n
, sizeof(struct priority_queue_test_node
));
498 priority_queue_test(void)
501 * Initialize two priority queues
502 * - One which uses the key comparator
503 * - Other which uses the node comparator
505 static struct priority_queue pq
;
506 static struct priority_queue pq_nodes
;
510 priority_queue_init(&pq
, PRIORITY_QUEUE_BUILTIN_KEY
| PRIORITY_QUEUE_MAX_HEAP
);
511 priority_queue_init(&pq_nodes
, PRIORITY_QUEUE_GENERIC_KEY
| PRIORITY_QUEUE_MAX_HEAP
);
515 priority_queue_test_queue(&pq
, PRIORITY_QUEUE_BUILTIN_KEY
,
516 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE
);
518 priority_queue_test_queue(&pq_nodes
, PRIORITY_QUEUE_GENERIC_KEY
,
519 priority_heap_make_comparator(a
, b
, struct priority_queue_test_node
, link
, {
520 return (a
->node_key
> b
->node_key
) ? 1 : ((a
->node_key
== b
->node_key
) ? 0 : -1);
527 * Function to count number of bits that are set in a number.
528 * It uses Side Addition using Magic Binary Numbers
531 count_bits(uint64_t number
)
533 return __builtin_popcountll(number
);
540 * Randomness test for RandomULong()
542 * This test verifies that:
543 * a. RandomULong works
544 * b. The generated numbers match the following entropy criteria:
545 * For a thousand iterations, verify:
546 * 1. mean entropy > 12 bits
547 * 2. min entropy > 4 bits
549 * 4. No incremental/decremental pattern in a window of 3
553 * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
556 #define CONF_MIN_ENTROPY 4
557 #define CONF_MEAN_ENTROPY 12
558 #define CONF_ITERATIONS 1000
559 #define CONF_WINDOW_SIZE 3
560 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
563 uint32_t min_bit_entropy
, max_bit_entropy
, bit_entropy
;
564 uint32_t aggregate_bit_entropy
= 0;
565 uint32_t mean_bit_entropy
= 0;
566 uint64_t numbers
[CONF_ITERATIONS
];
567 min_bit_entropy
= UINT32_MAX
;
571 * TEST 1: Number generation and basic and basic validation
572 * Check for non-zero (no bits set), -1 (all bits set) and error
574 for (i
= 0; i
< CONF_ITERATIONS
; i
++) {
575 read_random(&numbers
[i
], sizeof(numbers
[i
]));
576 if (numbers
[i
] == 0) {
577 T_ASSERT_NE_ULLONG(numbers
[i
], 0, "read_random returned zero value.");
579 if (numbers
[i
] == UINT64_MAX
) {
580 T_ASSERT_NE_ULLONG(numbers
[i
], UINT64_MAX
, "read_random returned -1.");
583 T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS
);
586 * TEST 2: Mean and Min Bit Entropy
587 * Check the bit entropy and its mean over the generated numbers.
589 for (i
= 1; i
< CONF_ITERATIONS
; i
++) {
590 bit_entropy
= count_bits(numbers
[i
- 1] ^ numbers
[i
]);
591 if (bit_entropy
< min_bit_entropy
)
592 min_bit_entropy
= bit_entropy
;
593 if (bit_entropy
> max_bit_entropy
)
594 max_bit_entropy
= bit_entropy
;
596 if (bit_entropy
< CONF_MIN_ENTROPY
) {
597 T_EXPECT_GE_UINT(bit_entropy
, CONF_MIN_ENTROPY
,
598 "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
601 aggregate_bit_entropy
+= bit_entropy
;
603 T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY
);
605 mean_bit_entropy
= aggregate_bit_entropy
/ CONF_ITERATIONS
;
606 T_EXPECT_GE_UINT(mean_bit_entropy
, CONF_MEAN_ENTROPY
, "Test criteria for mean number of differing bits.");
607 T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY
, mean_bit_entropy
);
608 T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS
,
609 min_bit_entropy
, mean_bit_entropy
, max_bit_entropy
);
610 T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS
), min_bit_entropy
, "bits", "minimum bit entropy in RNG. High is better");
611 T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS
), mean_bit_entropy
, "bits", "mean bit entropy in RNG. High is better");
612 T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS
), max_bit_entropy
, "bits", "max bit entropy in RNG. High is better");
615 * TEST 3: Incremental Pattern Search
616 * Check that incremental/decremental pattern does not exist in the given window
618 int window_start
, window_end
, trend
;
619 window_start
= window_end
= trend
= 0;
625 window_end
= window_start
+ CONF_WINDOW_SIZE
- 1;
626 if (window_end
>= CONF_ITERATIONS
)
627 window_end
= CONF_ITERATIONS
- 1;
630 for (i
= window_start
; i
< window_end
; i
++) {
631 if (numbers
[i
] < numbers
[i
+ 1])
633 else if (numbers
[i
] > numbers
[i
+ 1])
637 * Check that there is no increasing or decreasing trend
638 * i.e. trend <= ceil(window_size/2)
643 if (trend
> CONF_WINDOW_TREND_LIMIT
) {
644 T_ASSERT_LE_INT(trend
, CONF_WINDOW_TREND_LIMIT
, "Found increasing/decreasing trend in random numbers.");
648 * Move to the next window
652 } while (window_start
< (CONF_ITERATIONS
- 1));
653 T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE
);
656 * TEST 4: Find Duplicates
657 * Check no duplicate values are generated
659 qsort(numbers
, CONF_ITERATIONS
, sizeof(numbers
[0]), compare_numbers_ascending
);
660 for (i
= 1; i
< CONF_ITERATIONS
; i
++) {
661 if (numbers
[i
] == numbers
[i
- 1]) {
662 T_ASSERT_NE_ULLONG(numbers
[i
], numbers
[i
- 1], "read_random generated duplicate values.");
665 T_PASS("Test did not find any duplicates as expected.");
671 /* KCDATA kernel api tests */
672 static struct kcdata_descriptor test_kc_data
;//, test_kc_data2;
673 struct sample_disk_io_stats
{
674 uint64_t disk_reads_count
;
675 uint64_t disk_reads_size
;
676 uint64_t io_priority_count
[4];
677 uint64_t io_priority_size
;
678 } __attribute__((packed
));
680 struct kcdata_subtype_descriptor test_disk_io_stats_def
[] = {
681 {KCS_SUBTYPE_FLAGS_NONE
, KC_ST_UINT64
, 0 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_count"},
682 {KCS_SUBTYPE_FLAGS_NONE
, KC_ST_UINT64
, 1 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_size"},
683 {KCS_SUBTYPE_FLAGS_ARRAY
, KC_ST_UINT64
, 2 * sizeof(uint64_t), KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)), "io_priority_count"},
684 {KCS_SUBTYPE_FLAGS_ARRAY
, KC_ST_UINT64
, (2 + 4) * sizeof(uint64_t), sizeof(uint64_t), "io_priority_size"},
690 kern_return_t retval
= KERN_SUCCESS
;
692 /* test for NULL input */
693 retval
= kcdata_memory_static_init(NULL
, (mach_vm_address_t
)0, KCDATA_BUFFER_BEGIN_STACKSHOT
, 100, KCFLAG_USE_MEMCOPY
);
694 T_ASSERT(retval
== KERN_INVALID_ARGUMENT
, "kcdata_memory_static_init with NULL struct");
696 /* another negative test with buffer size < 32 bytes */
697 char data
[30] = "sample_disk_io_stats";
698 retval
= kcdata_memory_static_init(&test_kc_data
, (mach_vm_address_t
)&data
, KCDATA_BUFFER_BEGIN_CRASHINFO
, sizeof(data
),
700 T_ASSERT(retval
== KERN_RESOURCE_SHORTAGE
, "init with 30 bytes failed as expected with KERN_RESOURCE_SHORTAGE");
702 /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
703 retval
= kcdata_memory_static_init(&test_kc_data
, (mach_vm_address_t
)0, KCDATA_BUFFER_BEGIN_CRASHINFO
, PAGE_SIZE
,
705 T_ASSERT(retval
== KERN_NO_ACCESS
, "writing to 0x0 returned KERN_NO_ACCESS");
707 /* test with successful kcdata_memory_static_init */
708 test_kc_data
.kcd_length
= 0xdeadbeef;
709 mach_vm_address_t address
= (mach_vm_address_t
)kalloc(PAGE_SIZE
);
710 T_EXPECT_NOTNULL(address
, "kalloc of PAGE_SIZE data.");
712 retval
= kcdata_memory_static_init(&test_kc_data
, (mach_vm_address_t
)address
, KCDATA_BUFFER_BEGIN_STACKSHOT
, PAGE_SIZE
,
715 T_ASSERT(retval
== KERN_SUCCESS
, "successful kcdata_memory_static_init call");
717 T_ASSERT(test_kc_data
.kcd_length
== PAGE_SIZE
, "kcdata length is set correctly to PAGE_SIZE.");
718 T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data
.kcd_addr_begin
, test_kc_data
.kcd_addr_end
, address
);
719 T_ASSERT(test_kc_data
.kcd_addr_begin
== address
, "kcdata begin address is correct 0x%llx", (uint64_t)address
);
721 /* verify we have BEGIN and END HEADERS set */
722 uint32_t * mem
= (uint32_t *)address
;
723 T_ASSERT(mem
[0] == KCDATA_BUFFER_BEGIN_STACKSHOT
, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
724 T_ASSERT(mem
[4] == KCDATA_TYPE_BUFFER_END
, "KCDATA_TYPE_BUFFER_END is appended as expected");
725 T_ASSERT(mem
[5] == 0, "size of BUFFER_END tag is zero");
727 /* verify kcdata_memory_get_used_bytes() */
728 uint64_t bytes_used
= 0;
729 bytes_used
= kcdata_memory_get_used_bytes(&test_kc_data
);
730 T_ASSERT(bytes_used
== (2 * sizeof(struct kcdata_item
)), "bytes_used api returned expected %llu", bytes_used
);
732 /* test for kcdata_get_memory_addr() */
734 mach_vm_address_t user_addr
= 0;
735 /* negative test for NULL user_addr AND/OR kcdata_descriptor */
736 retval
= kcdata_get_memory_addr(NULL
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), &user_addr
);
737 T_ASSERT(retval
== KERN_INVALID_ARGUMENT
, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
739 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), NULL
);
740 T_ASSERT(retval
== KERN_INVALID_ARGUMENT
, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
742 /* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
743 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_USECS_SINCE_EPOCH
, 0, &user_addr
);
744 T_ASSERT(retval
== KERN_SUCCESS
, "Successfully got kcdata entry for 0 size data");
745 T_ASSERT(user_addr
== test_kc_data
.kcd_addr_end
, "0 sized data did not add any extra buffer space");
747 /* successful case with valid size. */
748 user_addr
= 0xdeadbeef;
749 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), &user_addr
);
750 T_ASSERT(retval
== KERN_SUCCESS
, "kcdata_get_memory_addr with valid values succeeded.");
751 T_ASSERT(user_addr
> test_kc_data
.kcd_addr_begin
, "user_addr is in range of buffer");
752 T_ASSERT(user_addr
< test_kc_data
.kcd_addr_end
, "user_addr is in range of buffer");
754 /* Try creating an item with really large size */
755 user_addr
= 0xdeadbeef;
756 bytes_used
= kcdata_memory_get_used_bytes(&test_kc_data
);
757 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, PAGE_SIZE
* 4, &user_addr
);
758 T_ASSERT(retval
== KERN_RESOURCE_SHORTAGE
, "Allocating entry with size > buffer -> KERN_RESOURCE_SHORTAGE");
759 T_ASSERT(user_addr
== 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
760 T_ASSERT(bytes_used
== kcdata_memory_get_used_bytes(&test_kc_data
), "The data structure should be unaffected");
762 /* verify convenience functions for uint32_with_description */
763 retval
= kcdata_add_uint32_with_description(&test_kc_data
, 0xbdc0ffee, "This is bad coffee");
764 T_ASSERT(retval
== KERN_SUCCESS
, "add uint32 with description succeeded.");
766 retval
= kcdata_add_uint64_with_description(&test_kc_data
, 0xf001badc0ffee, "another 8 byte no.");
767 T_ASSERT(retval
== KERN_SUCCESS
, "add uint64 with desc succeeded.");
769 /* verify creating an KCDATA_TYPE_ARRAY here */
770 user_addr
= 0xdeadbeef;
771 bytes_used
= kcdata_memory_get_used_bytes(&test_kc_data
);
772 /* save memory address where the array will come up */
773 struct kcdata_item
* item_p
= (struct kcdata_item
*)test_kc_data
.kcd_addr_end
;
775 retval
= kcdata_get_memory_addr_for_array(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), 20, &user_addr
);
776 T_ASSERT(retval
== KERN_SUCCESS
, "Array of 20 integers should be possible");
777 T_ASSERT(user_addr
!= 0xdeadbeef, "user_addr is updated as expected");
778 T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data
) - bytes_used
) >= 20 * sizeof(uint64_t), "memory allocation is in range");
779 kcdata_iter_t iter
= kcdata_iter(item_p
, PAGE_SIZE
- kcdata_memory_get_used_bytes(&test_kc_data
));
780 T_ASSERT(kcdata_iter_array_elem_count(iter
) == 20, "array count is 20");
782 /* FIXME add tests here for ranges of sizes and counts */
784 T_ASSERT(item_p
->flags
== (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME
<< 32) | 20), "flags are set correctly");
786 /* test adding of custom type */
788 retval
= kcdata_add_type_definition(&test_kc_data
, 0x999, data
, &test_disk_io_stats_def
[0],
789 sizeof(test_disk_io_stats_def
) / sizeof(struct kcdata_subtype_descriptor
));
790 T_ASSERT(retval
== KERN_SUCCESS
, "adding custom type succeeded.");
797 kcdata_api_assert_tests()
799 kern_return_t retval = 0;
800 void * assert_check_retval = NULL;
801 test_kc_data2.kcd_length = 0xdeadbeef;
802 mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
803 T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
805 retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
808 T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
810 retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
811 T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
814 retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
815 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
816 T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
822 #if defined(__arm__) || defined(__arm64__)
824 #include <arm/pmap.h>
826 #define MAX_PMAP_OBJECT_ELEMENT 100000
828 extern struct vm_object pmap_object_store
; /* store pt pages */
829 extern unsigned long gPhysBase
, gPhysSize
, first_avail
;
832 * Define macros to transverse the pmap object structures and extract
833 * physical page number with information from low global only
834 * This emulate how Astris extracts information from coredump
836 #if defined(__arm64__)
838 static inline uintptr_t
839 astris_vm_page_unpack_ptr(uintptr_t p
)
842 return ((uintptr_t)0);
844 return (p
& lowGlo
.lgPmapMemFromArrayMask
)
845 ? lowGlo
.lgPmapMemStartAddr
+ (p
& ~(lowGlo
.lgPmapMemFromArrayMask
)) * lowGlo
.lgPmapMemPagesize
846 : lowGlo
.lgPmapMemPackedBaseAddr
+ (p
<< lowGlo
.lgPmapMemPackedShift
);
849 // assume next pointer is the first element
850 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
856 // assume next pointer is the first element
857 #define astris_vm_page_queue_next(qc) *((uintptr_t *)(qc))
861 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
863 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
865 #define astris_vm_page_queue_iterate(head, elt) \
866 for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
867 (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
869 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
871 static inline ppnum_t
872 astris_vm_page_get_phys_page(uintptr_t m
)
874 return (m
>= lowGlo
.lgPmapMemStartAddr
&& m
< lowGlo
.lgPmapMemEndAddr
)
875 ? (ppnum_t
)((m
- lowGlo
.lgPmapMemStartAddr
) / lowGlo
.lgPmapMemPagesize
+ lowGlo
.lgPmapMemFirstppnum
)
876 : *((ppnum_t
*)(m
+ lowGlo
.lgPmapMemPageOffset
));
880 pmap_coredump_test(void)
885 T_LOG("Testing coredump info for PMAP.");
887 T_ASSERT_GE_ULONG(lowGlo
.lgStaticAddr
, gPhysBase
, NULL
);
888 T_ASSERT_LE_ULONG(lowGlo
.lgStaticAddr
+ lowGlo
.lgStaticSize
, first_avail
, NULL
);
889 T_ASSERT_EQ_ULONG(lowGlo
.lgLayoutMajorVersion
, 3, NULL
);
890 T_ASSERT_EQ_ULONG(lowGlo
.lgLayoutMinorVersion
, 0, NULL
);
891 T_ASSERT_EQ_ULONG(lowGlo
.lgLayoutMagic
, LOWGLO_LAYOUT_MAGIC
, NULL
);
893 // check the constant values in lowGlo
894 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemQ
, ((uint64_t) & (pmap_object_store
.memq
)), NULL
);
895 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPageOffset
, offsetof(struct vm_page_with_ppnum
, vmp_phys_page
), NULL
);
896 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemChainOffset
, offsetof(struct vm_page
, vmp_listq
), NULL
);
897 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPagesize
, sizeof(struct vm_page
), NULL
);
899 #if defined(__arm64__)
900 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemFromArrayMask
, VM_PACKED_FROM_VM_PAGES_ARRAY
, NULL
);
901 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPackedShift
, VM_PACKED_POINTER_SHIFT
, NULL
);
902 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPackedBaseAddr
, VM_MIN_KERNEL_AND_KEXT_ADDRESS
, NULL
);
905 vm_object_lock_shared(&pmap_object_store
);
906 astris_vm_page_queue_iterate(lowGlo
.lgPmapMemQ
, p
)
908 ppnum_t ppnum
= astris_vm_page_get_phys_page(p
);
909 pmap_paddr_t pa
= (pmap_paddr_t
)astris_ptoa(ppnum
);
910 T_ASSERT_GE_ULONG(pa
, gPhysBase
, NULL
);
911 T_ASSERT_LT_ULONG(pa
, gPhysBase
+ gPhysSize
, NULL
);
913 T_ASSERT_LT_INT(iter
, MAX_PMAP_OBJECT_ELEMENT
, NULL
);
915 vm_object_unlock(&pmap_object_store
);
917 T_ASSERT_GT_INT(iter
, 0, NULL
);