2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object.h>
47 #include <kern/priority_queue.h>
50 #if !(DEVELOPMENT || DEBUG)
51 #error "Testing is not enabled on RELEASE configurations"
54 #include <tests/xnupost.h>
56 extern boolean_t
get_range_bounds(char * c
, int64_t * lower
, int64_t * upper
);
57 __private_extern__
void qsort(void * a
, size_t n
, size_t es
, int (*cmp
)(const void *, const void *));
59 uint32_t total_post_tests_count
= 0;
60 void xnupost_reset_panic_widgets(void);
62 /* test declarations */
63 kern_return_t
zalloc_test(void);
64 kern_return_t
RandomULong_test(void);
65 kern_return_t
kcdata_api_test(void);
66 kern_return_t
priority_queue_test(void);
67 kern_return_t
ts_kernel_primitive_test(void);
68 kern_return_t
ts_kernel_sleep_inheritor_test(void);
69 kern_return_t
ts_kernel_gate_test(void);
70 kern_return_t
ts_kernel_turnstile_chain_test(void);
71 kern_return_t
ts_kernel_timingsafe_bcmp_test(void);
73 extern kern_return_t
kprintf_hhx_test(void);
75 #if defined(__arm__) || defined(__arm64__)
76 kern_return_t
pmap_coredump_test(void);
79 extern kern_return_t
console_serial_test(void);
80 extern kern_return_t
console_serial_alloc_rel_tests(void);
81 extern kern_return_t
console_serial_parallel_log_tests(void);
82 extern kern_return_t
test_os_log(void);
83 extern kern_return_t
test_os_log_parallel(void);
84 extern kern_return_t
bitmap_post_test(void);
87 extern kern_return_t
arm64_munger_test(void);
88 extern kern_return_t
ex_cb_test(void);
89 #if __ARM_PAN_AVAILABLE__
90 extern kern_return_t
arm64_pan_test(void);
92 #if defined(HAS_APPLE_PAC)
93 extern kern_return_t
arm64_ropjop_test(void);
94 #endif /* defined(HAS_APPLE_PAC) */
95 #endif /* __arm64__ */
97 extern kern_return_t
test_thread_call(void);
100 struct xnupost_panic_widget xt_panic_widgets
= {.xtp_context_p
= NULL
,
101 .xtp_outval_p
= NULL
,
102 .xtp_func_name
= NULL
,
105 struct xnupost_test kernel_post_tests
[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test
),
106 XNUPOST_TEST_CONFIG_BASIC(RandomULong_test
),
107 XNUPOST_TEST_CONFIG_BASIC(test_os_log
),
108 XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel
),
110 XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test
),
111 XNUPOST_TEST_CONFIG_BASIC(ex_cb_test
),
112 #if __ARM_PAN_AVAILABLE__
113 XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test
),
115 #if defined(HAS_APPLE_PAC)
116 XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test
),
117 #endif /* defined(HAS_APPLE_PAC) */
118 #endif /* __arm64__ */
119 XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test
),
120 XNUPOST_TEST_CONFIG_BASIC(console_serial_test
),
121 XNUPOST_TEST_CONFIG_BASIC(console_serial_alloc_rel_tests
),
122 XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests
),
123 #if defined(__arm__) || defined(__arm64__)
124 XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test
),
126 XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test
),
127 //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
128 XNUPOST_TEST_CONFIG_BASIC(test_thread_call
),
129 XNUPOST_TEST_CONFIG_BASIC(priority_queue_test
),
130 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test
),
131 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test
),
132 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test
),
133 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test
),
134 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test
),
135 XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test
), };
137 uint32_t kernel_post_tests_count
= sizeof(kernel_post_tests
) / sizeof(xnupost_test_data_t
);
139 #define POSTARGS_RUN_TESTS 0x1
140 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
141 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
142 uint64_t kernel_post_args
= 0x0;
144 /* static variables to hold state */
145 static kern_return_t parse_config_retval
= KERN_INVALID_CAPABILITY
;
146 static char kernel_post_test_configs
[256];
147 boolean_t
xnupost_should_run_test(uint32_t test_num
);
150 xnupost_parse_config()
152 if (parse_config_retval
!= KERN_INVALID_CAPABILITY
) {
153 return parse_config_retval
;
155 PE_parse_boot_argn("kernPOST", &kernel_post_args
, sizeof(kernel_post_args
));
157 if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs
[0], sizeof(kernel_post_test_configs
)) == TRUE
) {
158 kernel_post_args
|= POSTARGS_CUSTOM_TEST_RUNLIST
;
161 if (kernel_post_args
!= 0) {
162 parse_config_retval
= KERN_SUCCESS
;
165 parse_config_retval
= KERN_NOT_SUPPORTED
;
167 return parse_config_retval
;
171 xnupost_should_run_test(uint32_t test_num
)
173 if (kernel_post_args
& POSTARGS_CUSTOM_TEST_RUNLIST
) {
174 int64_t begin
= 0, end
= 999999;
175 char * b
= kernel_post_test_configs
;
177 get_range_bounds(b
, &begin
, &end
);
178 if (test_num
>= begin
&& test_num
<= end
) {
182 /* skip to the next "," */
189 /* skip past the ',' */
198 xnupost_list_tests(xnupost_test_t test_list
, uint32_t test_count
)
200 if (KERN_SUCCESS
!= xnupost_parse_config()) {
204 xnupost_test_t testp
;
205 for (uint32_t i
= 0; i
< test_count
; i
++) {
206 testp
= &test_list
[i
];
207 if (testp
->xt_test_num
== 0) {
208 testp
->xt_test_num
= ++total_post_tests_count
;
210 /* make sure the boot-arg based test run list is honored */
211 if (kernel_post_args
& POSTARGS_CUSTOM_TEST_RUNLIST
) {
212 testp
->xt_config
|= XT_CONFIG_IGNORE
;
213 if (xnupost_should_run_test(testp
->xt_test_num
)) {
214 testp
->xt_config
&= ~(XT_CONFIG_IGNORE
);
215 testp
->xt_config
|= XT_CONFIG_RUN
;
216 printf("\n[TEST] #%u is marked as ignored", testp
->xt_test_num
);
219 printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp
->xt_test_num
, testp
->xt_name
, testp
->xt_expected_retval
,
227 xnupost_run_tests(xnupost_test_t test_list
, uint32_t test_count
)
230 int retval
= KERN_SUCCESS
;
232 if ((kernel_post_args
& POSTARGS_RUN_TESTS
) == 0) {
233 printf("No POST boot-arg set.\n");
238 xnupost_test_t testp
;
239 for (; i
< test_count
; i
++) {
240 xnupost_reset_panic_widgets();
241 testp
= &test_list
[i
];
242 T_BEGIN(testp
->xt_name
);
243 testp
->xt_begin_time
= mach_absolute_time();
244 testp
->xt_end_time
= testp
->xt_begin_time
;
247 * If test is designed to panic and controller
248 * is not available then mark as SKIPPED
250 if ((testp
->xt_config
& XT_CONFIG_EXPECT_PANIC
) && !(kernel_post_args
& POSTARGS_CONTROLLER_AVAILABLE
)) {
252 "Test expects panic but "
253 "no controller is present");
254 testp
->xt_test_actions
= XT_ACTION_SKIPPED
;
258 if ((testp
->xt_config
& XT_CONFIG_IGNORE
)) {
259 T_SKIP("Test is marked as XT_CONFIG_IGNORE");
260 testp
->xt_test_actions
= XT_ACTION_SKIPPED
;
266 testp
->xt_retval
= T_TESTRESULT
;
267 testp
->xt_end_time
= mach_absolute_time();
268 if (testp
->xt_retval
== testp
->xt_expected_retval
) {
269 testp
->xt_test_actions
= XT_ACTION_PASSED
;
271 testp
->xt_test_actions
= XT_ACTION_FAILED
;
281 return xnupost_list_tests(kernel_post_tests
, kernel_post_tests_count
);
287 return xnupost_run_tests(kernel_post_tests
, kernel_post_tests_count
);
291 xnupost_register_panic_widget(xt_panic_widget_func funcp
, const char * funcname
, void * context
, void ** outval
)
293 if (xt_panic_widgets
.xtp_context_p
!= NULL
|| xt_panic_widgets
.xtp_func
!= NULL
) {
294 return KERN_RESOURCE_SHORTAGE
;
297 xt_panic_widgets
.xtp_context_p
= context
;
298 xt_panic_widgets
.xtp_func
= funcp
;
299 xt_panic_widgets
.xtp_func_name
= funcname
;
300 xt_panic_widgets
.xtp_outval_p
= outval
;
306 xnupost_reset_panic_widgets()
308 bzero(&xt_panic_widgets
, sizeof(xt_panic_widgets
));
312 xnupost_process_kdb_stop(const char * panic_s
)
314 xt_panic_return_t retval
= 0;
315 struct xnupost_panic_widget
* pw
= &xt_panic_widgets
;
316 const char * name
= "unknown";
317 if (xt_panic_widgets
.xtp_func_name
) {
318 name
= xt_panic_widgets
.xtp_func_name
;
321 /* bail early on if kernPOST is not set */
322 if (kernel_post_args
== 0) {
323 return KERN_INVALID_CAPABILITY
;
326 if (xt_panic_widgets
.xtp_func
) {
327 T_LOG("%s: Calling out to widget: %s", __func__
, xt_panic_widgets
.xtp_func_name
);
328 retval
= pw
->xtp_func(panic_s
, pw
->xtp_context_p
, pw
->xtp_outval_p
);
330 return KERN_INVALID_CAPABILITY
;
334 case XT_RET_W_SUCCESS
:
335 T_EXPECT_EQ_INT(retval
, XT_RET_W_SUCCESS
, "%s reported successful handling. Returning from kdb_stop.", name
);
336 /* KERN_SUCCESS means return from panic/assertion */
340 T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name
);
343 case XT_PANIC_W_FAIL
:
344 T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name
);
347 case XT_PANIC_W_SUCCESS
:
348 T_EXPECT_EQ_INT(retval
, XT_PANIC_W_SUCCESS
, "%s reported successful testcase. But continuing to kdb_stop.", name
);
351 case XT_PANIC_UNRELATED
:
353 T_LOG("UNRELATED: Continuing to kdb_stop.");
359 _xt_generic_assert_check(const char * s
, void * str_to_match
, void ** outval
)
361 xt_panic_return_t ret
= XT_PANIC_UNRELATED
;
363 if (NULL
!= strnstr(__DECONST(char *, s
), (char *)str_to_match
, strlen(s
))) {
364 T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__
, s
, (char *)str_to_match
);
365 ret
= XT_RET_W_SUCCESS
;
369 *outval
= (void *)(uintptr_t)ret
;
375 xnupost_reset_tests(xnupost_test_t test_list
, uint32_t test_count
)
378 xnupost_test_t testp
;
379 for (; i
< test_count
; i
++) {
380 testp
= &test_list
[i
];
381 testp
->xt_begin_time
= 0;
382 testp
->xt_end_time
= 0;
383 testp
->xt_test_actions
= XT_ACTION_NONE
;
384 testp
->xt_retval
= -1;
397 test_zone
= zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_uint64_zone");
398 T_ASSERT_NOTNULL(test_zone
, NULL
);
400 T_ASSERT_EQ_INT(zone_free_count(test_zone
), 0, NULL
);
403 T_ASSERT_NOTNULL(test_ptr
= zalloc(test_zone
), NULL
);
405 zfree(test_zone
, test_ptr
);
407 /* A sample report for perfdata */
408 T_PERF("num_threads_at_ktest", threads_count
, "count", "# of threads in system at zalloc_test");
414 * Function used for comparison by qsort()
417 compare_numbers_ascending(const void * a
, const void * b
)
419 const uint64_t x
= *(const uint64_t *)a
;
420 const uint64_t y
= *(const uint64_t *)b
;
431 * Function used for comparison by qsort()
434 compare_numbers_descending(const void * a
, const void * b
)
436 const uint32_t x
= *(const uint32_t *)a
;
437 const uint32_t y
= *(const uint32_t *)b
;
447 /* Node structure for the priority queue tests */
448 struct priority_queue_test_node
{
449 struct priority_queue_entry link
;
450 priority_queue_key_t node_key
;
454 priority_queue_test_queue(struct priority_queue
*pq
, int type
,
455 priority_queue_compare_fn_t cmp_fn
)
457 /* Configuration for the test */
458 #define PRIORITY_QUEUE_NODES 7
459 static uint32_t priority_list
[] = { 20, 3, 7, 6, 50, 2, 8};
460 uint32_t increase_pri
= 100;
461 uint32_t decrease_pri
= 90;
462 struct priority_queue_test_node
*result
;
464 boolean_t update_result
= false;
466 struct priority_queue_test_node
*node
= NULL
;
467 /* Add all priorities to the first priority queue */
468 for (int i
= 0; i
< PRIORITY_QUEUE_NODES
; i
++) {
469 node
= kalloc(sizeof(struct priority_queue_test_node
));
470 T_ASSERT_NOTNULL(node
, NULL
);
472 priority_queue_entry_init(&(node
->link
));
473 node
->node_key
= priority_list
[i
];
474 key
= (type
== PRIORITY_QUEUE_GENERIC_KEY
) ? PRIORITY_QUEUE_KEY_NONE
: priority_list
[i
];
475 priority_queue_insert(pq
, &(node
->link
), key
, cmp_fn
);
478 T_ASSERT_NOTNULL(node
, NULL
);
479 key
= (type
== PRIORITY_QUEUE_GENERIC_KEY
) ? node
->node_key
: priority_queue_entry_key(pq
, &(node
->link
));
480 T_ASSERT((key
== node
->node_key
), "verify node stored key correctly");
482 /* Test the priority increase operation by updating the last node added (8) */
483 T_ASSERT_NOTNULL(node
, NULL
);
484 node
->node_key
= increase_pri
;
485 key
= (type
== PRIORITY_QUEUE_GENERIC_KEY
) ? PRIORITY_QUEUE_KEY_NONE
: node
->node_key
;
486 update_result
= priority_queue_entry_increase(pq
, &node
->link
, key
, cmp_fn
);
487 T_ASSERT((update_result
== true), "increase key updated root");
488 result
= priority_queue_max(pq
, struct priority_queue_test_node
, link
);
489 T_ASSERT((result
->node_key
== increase_pri
), "verify priority_queue_entry_increase() operation");
492 /* Test the priority decrease operation by updating the last node added */
493 T_ASSERT((result
== node
), NULL
);
494 node
->node_key
= decrease_pri
;
495 key
= (type
== PRIORITY_QUEUE_GENERIC_KEY
) ? PRIORITY_QUEUE_KEY_NONE
: node
->node_key
;
496 update_result
= priority_queue_entry_decrease(pq
, &node
->link
, key
, cmp_fn
);
497 T_ASSERT((update_result
== true), "decrease key updated root");
498 result
= priority_queue_max(pq
, struct priority_queue_test_node
, link
);
499 T_ASSERT((result
->node_key
== decrease_pri
), "verify priority_queue_entry_decrease() operation");
501 /* Update our local priority list as well */
502 priority_list
[PRIORITY_QUEUE_NODES
- 1] = decrease_pri
;
504 /* Sort the local list in descending order */
505 qsort(priority_list
, PRIORITY_QUEUE_NODES
, sizeof(priority_list
[0]), compare_numbers_descending
);
507 /* Test the maximum operation by comparing max node with local list */
508 result
= priority_queue_max(pq
, struct priority_queue_test_node
, link
);
509 T_ASSERT((result
->node_key
== priority_list
[0]), "(heap (%u) == qsort (%u)) priority queue max node lookup",
510 (uint32_t)result
->node_key
, priority_list
[0]);
512 /* Remove all remaining elements and verify they match local list */
513 for (int i
= 0; i
< PRIORITY_QUEUE_NODES
; i
++) {
514 result
= priority_queue_remove_max(pq
, struct priority_queue_test_node
, link
, cmp_fn
);
515 T_ASSERT((result
->node_key
== priority_list
[i
]), "(heap (%u) == qsort (%u)) priority queue max node removal",
516 (uint32_t)result
->node_key
, priority_list
[i
]);
519 priority_queue_destroy(pq
, struct priority_queue_test_node
, link
, ^(void *n
) {
520 kfree(n
, sizeof(struct priority_queue_test_node
));
525 priority_queue_test(void)
528 * Initialize two priority queues
529 * - One which uses the key comparator
530 * - Other which uses the node comparator
532 static struct priority_queue pq
;
533 static struct priority_queue pq_nodes
;
537 priority_queue_init(&pq
, PRIORITY_QUEUE_BUILTIN_KEY
| PRIORITY_QUEUE_MAX_HEAP
);
538 priority_queue_init(&pq_nodes
, PRIORITY_QUEUE_GENERIC_KEY
| PRIORITY_QUEUE_MAX_HEAP
);
542 priority_queue_test_queue(&pq
, PRIORITY_QUEUE_BUILTIN_KEY
,
543 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE
);
545 priority_queue_test_queue(&pq_nodes
, PRIORITY_QUEUE_GENERIC_KEY
,
546 priority_heap_make_comparator(a
, b
, struct priority_queue_test_node
, link
, {
547 return (a
->node_key
> b
->node_key
) ? 1 : ((a
->node_key
== b
->node_key
) ? 0 : -1);
554 * Function to count number of bits that are set in a number.
555 * It uses Side Addition using Magic Binary Numbers
558 count_bits(uint64_t number
)
560 return __builtin_popcountll(number
);
567 * Randomness test for RandomULong()
569 * This test verifies that:
570 * a. RandomULong works
571 * b. The generated numbers match the following entropy criteria:
572 * For a thousand iterations, verify:
573 * 1. mean entropy > 12 bits
574 * 2. min entropy > 4 bits
576 * 4. No incremental/decremental pattern in a window of 3
580 * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
583 #define CONF_MIN_ENTROPY 4
584 #define CONF_MEAN_ENTROPY 12
585 #define CONF_ITERATIONS 1000
586 #define CONF_WINDOW_SIZE 3
587 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
590 uint32_t min_bit_entropy
, max_bit_entropy
, bit_entropy
;
591 uint32_t aggregate_bit_entropy
= 0;
592 uint32_t mean_bit_entropy
= 0;
593 uint64_t numbers
[CONF_ITERATIONS
];
594 min_bit_entropy
= UINT32_MAX
;
598 * TEST 1: Number generation and basic and basic validation
599 * Check for non-zero (no bits set), -1 (all bits set) and error
601 for (i
= 0; i
< CONF_ITERATIONS
; i
++) {
602 read_random(&numbers
[i
], sizeof(numbers
[i
]));
603 if (numbers
[i
] == 0) {
604 T_ASSERT_NE_ULLONG(numbers
[i
], 0, "read_random returned zero value.");
606 if (numbers
[i
] == UINT64_MAX
) {
607 T_ASSERT_NE_ULLONG(numbers
[i
], UINT64_MAX
, "read_random returned -1.");
610 T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS
);
613 * TEST 2: Mean and Min Bit Entropy
614 * Check the bit entropy and its mean over the generated numbers.
616 for (i
= 1; i
< CONF_ITERATIONS
; i
++) {
617 bit_entropy
= count_bits(numbers
[i
- 1] ^ numbers
[i
]);
618 if (bit_entropy
< min_bit_entropy
) {
619 min_bit_entropy
= bit_entropy
;
621 if (bit_entropy
> max_bit_entropy
) {
622 max_bit_entropy
= bit_entropy
;
625 if (bit_entropy
< CONF_MIN_ENTROPY
) {
626 T_EXPECT_GE_UINT(bit_entropy
, CONF_MIN_ENTROPY
,
627 "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
630 aggregate_bit_entropy
+= bit_entropy
;
632 T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY
);
634 mean_bit_entropy
= aggregate_bit_entropy
/ CONF_ITERATIONS
;
635 T_EXPECT_GE_UINT(mean_bit_entropy
, CONF_MEAN_ENTROPY
, "Test criteria for mean number of differing bits.");
636 T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY
, mean_bit_entropy
);
637 T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS
,
638 min_bit_entropy
, mean_bit_entropy
, max_bit_entropy
);
639 T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS
), min_bit_entropy
, "bits", "minimum bit entropy in RNG. High is better");
640 T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS
), mean_bit_entropy
, "bits", "mean bit entropy in RNG. High is better");
641 T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS
), max_bit_entropy
, "bits", "max bit entropy in RNG. High is better");
644 * TEST 3: Incremental Pattern Search
645 * Check that incremental/decremental pattern does not exist in the given window
647 int window_start
, window_end
, trend
;
648 window_start
= window_end
= trend
= 0;
654 window_end
= window_start
+ CONF_WINDOW_SIZE
- 1;
655 if (window_end
>= CONF_ITERATIONS
) {
656 window_end
= CONF_ITERATIONS
- 1;
660 for (i
= window_start
; i
< window_end
; i
++) {
661 if (numbers
[i
] < numbers
[i
+ 1]) {
663 } else if (numbers
[i
] > numbers
[i
+ 1]) {
668 * Check that there is no increasing or decreasing trend
669 * i.e. trend <= ceil(window_size/2)
674 if (trend
> CONF_WINDOW_TREND_LIMIT
) {
675 T_ASSERT_LE_INT(trend
, CONF_WINDOW_TREND_LIMIT
, "Found increasing/decreasing trend in random numbers.");
679 * Move to the next window
682 } while (window_start
< (CONF_ITERATIONS
- 1));
683 T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE
);
686 * TEST 4: Find Duplicates
687 * Check no duplicate values are generated
689 qsort(numbers
, CONF_ITERATIONS
, sizeof(numbers
[0]), compare_numbers_ascending
);
690 for (i
= 1; i
< CONF_ITERATIONS
; i
++) {
691 if (numbers
[i
] == numbers
[i
- 1]) {
692 T_ASSERT_NE_ULLONG(numbers
[i
], numbers
[i
- 1], "read_random generated duplicate values.");
695 T_PASS("Test did not find any duplicates as expected.");
701 /* KCDATA kernel api tests */
702 static struct kcdata_descriptor test_kc_data
;//, test_kc_data2;
703 struct sample_disk_io_stats
{
704 uint64_t disk_reads_count
;
705 uint64_t disk_reads_size
;
706 uint64_t io_priority_count
[4];
707 uint64_t io_priority_size
;
708 } __attribute__((packed
));
710 struct kcdata_subtype_descriptor test_disk_io_stats_def
[] = {
712 .kcs_flags
= KCS_SUBTYPE_FLAGS_NONE
,
713 .kcs_elem_type
= KC_ST_UINT64
,
714 .kcs_elem_offset
= 0 * sizeof(uint64_t),
715 .kcs_elem_size
= sizeof(uint64_t),
716 .kcs_name
= "disk_reads_count"
719 .kcs_flags
= KCS_SUBTYPE_FLAGS_NONE
,
720 .kcs_elem_type
= KC_ST_UINT64
,
721 .kcs_elem_offset
= 1 * sizeof(uint64_t),
722 .kcs_elem_size
= sizeof(uint64_t),
723 .kcs_name
= "disk_reads_size"
726 .kcs_flags
= KCS_SUBTYPE_FLAGS_ARRAY
,
727 .kcs_elem_type
= KC_ST_UINT64
,
728 .kcs_elem_offset
= 2 * sizeof(uint64_t),
729 .kcs_elem_size
= KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
730 .kcs_name
= "io_priority_count"
733 .kcs_flags
= KCS_SUBTYPE_FLAGS_ARRAY
,
734 .kcs_elem_type
= KC_ST_UINT64
,
735 .kcs_elem_offset
= (2 + 4) * sizeof(uint64_t),
736 .kcs_elem_size
= sizeof(uint64_t),
737 .kcs_name
= "io_priority_size"
744 kern_return_t retval
= KERN_SUCCESS
;
746 /* test for NULL input */
747 retval
= kcdata_memory_static_init(NULL
, (mach_vm_address_t
)0, KCDATA_BUFFER_BEGIN_STACKSHOT
, 100, KCFLAG_USE_MEMCOPY
);
748 T_ASSERT(retval
== KERN_INVALID_ARGUMENT
, "kcdata_memory_static_init with NULL struct");
750 /* another negative test with buffer size < 32 bytes */
751 char data
[30] = "sample_disk_io_stats";
752 retval
= kcdata_memory_static_init(&test_kc_data
, (mach_vm_address_t
)&data
, KCDATA_BUFFER_BEGIN_CRASHINFO
, sizeof(data
),
754 T_ASSERT(retval
== KERN_RESOURCE_SHORTAGE
, "init with 30 bytes failed as expected with KERN_RESOURCE_SHORTAGE");
756 /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
757 retval
= kcdata_memory_static_init(&test_kc_data
, (mach_vm_address_t
)0, KCDATA_BUFFER_BEGIN_CRASHINFO
, PAGE_SIZE
,
759 T_ASSERT(retval
== KERN_NO_ACCESS
, "writing to 0x0 returned KERN_NO_ACCESS");
761 /* test with successful kcdata_memory_static_init */
762 test_kc_data
.kcd_length
= 0xdeadbeef;
763 mach_vm_address_t address
= (mach_vm_address_t
)kalloc(PAGE_SIZE
);
764 T_EXPECT_NOTNULL(address
, "kalloc of PAGE_SIZE data.");
766 retval
= kcdata_memory_static_init(&test_kc_data
, (mach_vm_address_t
)address
, KCDATA_BUFFER_BEGIN_STACKSHOT
, PAGE_SIZE
,
769 T_ASSERT(retval
== KERN_SUCCESS
, "successful kcdata_memory_static_init call");
771 T_ASSERT(test_kc_data
.kcd_length
== PAGE_SIZE
, "kcdata length is set correctly to PAGE_SIZE.");
772 T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data
.kcd_addr_begin
, test_kc_data
.kcd_addr_end
, address
);
773 T_ASSERT(test_kc_data
.kcd_addr_begin
== address
, "kcdata begin address is correct 0x%llx", (uint64_t)address
);
775 /* verify we have BEGIN and END HEADERS set */
776 uint32_t * mem
= (uint32_t *)address
;
777 T_ASSERT(mem
[0] == KCDATA_BUFFER_BEGIN_STACKSHOT
, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
778 T_ASSERT(mem
[4] == KCDATA_TYPE_BUFFER_END
, "KCDATA_TYPE_BUFFER_END is appended as expected");
779 T_ASSERT(mem
[5] == 0, "size of BUFFER_END tag is zero");
781 /* verify kcdata_memory_get_used_bytes() */
782 uint64_t bytes_used
= 0;
783 bytes_used
= kcdata_memory_get_used_bytes(&test_kc_data
);
784 T_ASSERT(bytes_used
== (2 * sizeof(struct kcdata_item
)), "bytes_used api returned expected %llu", bytes_used
);
786 /* test for kcdata_get_memory_addr() */
788 mach_vm_address_t user_addr
= 0;
789 /* negative test for NULL user_addr AND/OR kcdata_descriptor */
790 retval
= kcdata_get_memory_addr(NULL
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), &user_addr
);
791 T_ASSERT(retval
== KERN_INVALID_ARGUMENT
, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
793 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), NULL
);
794 T_ASSERT(retval
== KERN_INVALID_ARGUMENT
, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
796 /* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
797 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_USECS_SINCE_EPOCH
, 0, &user_addr
);
798 T_ASSERT(retval
== KERN_SUCCESS
, "Successfully got kcdata entry for 0 size data");
799 T_ASSERT(user_addr
== test_kc_data
.kcd_addr_end
, "0 sized data did not add any extra buffer space");
801 /* successful case with valid size. */
802 user_addr
= 0xdeadbeef;
803 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), &user_addr
);
804 T_ASSERT(retval
== KERN_SUCCESS
, "kcdata_get_memory_addr with valid values succeeded.");
805 T_ASSERT(user_addr
> test_kc_data
.kcd_addr_begin
, "user_addr is in range of buffer");
806 T_ASSERT(user_addr
< test_kc_data
.kcd_addr_end
, "user_addr is in range of buffer");
808 /* Try creating an item with really large size */
809 user_addr
= 0xdeadbeef;
810 bytes_used
= kcdata_memory_get_used_bytes(&test_kc_data
);
811 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, PAGE_SIZE
* 4, &user_addr
);
812 T_ASSERT(retval
== KERN_RESOURCE_SHORTAGE
, "Allocating entry with size > buffer -> KERN_RESOURCE_SHORTAGE");
813 T_ASSERT(user_addr
== 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
814 T_ASSERT(bytes_used
== kcdata_memory_get_used_bytes(&test_kc_data
), "The data structure should be unaffected");
816 /* verify convenience functions for uint32_with_description */
817 retval
= kcdata_add_uint32_with_description(&test_kc_data
, 0xbdc0ffee, "This is bad coffee");
818 T_ASSERT(retval
== KERN_SUCCESS
, "add uint32 with description succeeded.");
820 retval
= kcdata_add_uint64_with_description(&test_kc_data
, 0xf001badc0ffee, "another 8 byte no.");
821 T_ASSERT(retval
== KERN_SUCCESS
, "add uint64 with desc succeeded.");
823 /* verify creating an KCDATA_TYPE_ARRAY here */
824 user_addr
= 0xdeadbeef;
825 bytes_used
= kcdata_memory_get_used_bytes(&test_kc_data
);
826 /* save memory address where the array will come up */
827 struct kcdata_item
* item_p
= (struct kcdata_item
*)test_kc_data
.kcd_addr_end
;
829 retval
= kcdata_get_memory_addr_for_array(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), 20, &user_addr
);
830 T_ASSERT(retval
== KERN_SUCCESS
, "Array of 20 integers should be possible");
831 T_ASSERT(user_addr
!= 0xdeadbeef, "user_addr is updated as expected");
832 T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data
) - bytes_used
) >= 20 * sizeof(uint64_t), "memory allocation is in range");
833 kcdata_iter_t iter
= kcdata_iter(item_p
, PAGE_SIZE
- kcdata_memory_get_used_bytes(&test_kc_data
));
834 T_ASSERT(kcdata_iter_array_elem_count(iter
) == 20, "array count is 20");
836 /* FIXME add tests here for ranges of sizes and counts */
838 T_ASSERT(item_p
->flags
== (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME
<< 32) | 20), "flags are set correctly");
840 /* test adding of custom type */
842 retval
= kcdata_add_type_definition(&test_kc_data
, 0x999, data
, &test_disk_io_stats_def
[0],
843 sizeof(test_disk_io_stats_def
) / sizeof(struct kcdata_subtype_descriptor
));
844 T_ASSERT(retval
== KERN_SUCCESS
, "adding custom type succeeded.");
851 * kcdata_api_assert_tests()
853 * kern_return_t retval = 0;
854 * void * assert_check_retval = NULL;
855 * test_kc_data2.kcd_length = 0xdeadbeef;
856 * mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
857 * T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
859 * retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
860 * KCFLAG_USE_MEMCOPY);
862 * T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
864 * retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
865 * T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
867 * // this will assert
868 * retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
869 * T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
870 * T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
872 * return KERN_SUCCESS;
876 #if defined(__arm__) || defined(__arm64__)
878 #include <arm/pmap.h>
880 #define MAX_PMAP_OBJECT_ELEMENT 100000
882 extern struct vm_object pmap_object_store
; /* store pt pages */
883 extern unsigned long gPhysBase
, gPhysSize
, first_avail
;
886 * Define macros to transverse the pmap object structures and extract
887 * physical page number with information from low global only
888 * This emulate how Astris extracts information from coredump
890 #if defined(__arm64__)
892 static inline uintptr_t
893 astris_vm_page_unpack_ptr(uintptr_t p
)
899 return (p
& lowGlo
.lgPmapMemFromArrayMask
)
900 ? lowGlo
.lgPmapMemStartAddr
+ (p
& ~(lowGlo
.lgPmapMemFromArrayMask
)) * lowGlo
.lgPmapMemPagesize
901 : lowGlo
.lgPmapMemPackedBaseAddr
+ (p
<< lowGlo
.lgPmapMemPackedShift
);
904 // assume next pointer is the first element
905 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
911 // assume next pointer is the first element
912 #define astris_vm_page_queue_next(qc) *((uintptr_t *)(qc))
916 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
918 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
920 #define astris_vm_page_queue_iterate(head, elt) \
921 for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
922 (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
924 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
926 static inline ppnum_t
927 astris_vm_page_get_phys_page(uintptr_t m
)
929 return (m
>= lowGlo
.lgPmapMemStartAddr
&& m
< lowGlo
.lgPmapMemEndAddr
)
930 ? (ppnum_t
)((m
- lowGlo
.lgPmapMemStartAddr
) / lowGlo
.lgPmapMemPagesize
+ lowGlo
.lgPmapMemFirstppnum
)
931 : *((ppnum_t
*)(m
+ lowGlo
.lgPmapMemPageOffset
));
935 pmap_coredump_test(void)
940 T_LOG("Testing coredump info for PMAP.");
942 T_ASSERT_GE_ULONG(lowGlo
.lgStaticAddr
, gPhysBase
, NULL
);
943 T_ASSERT_LE_ULONG(lowGlo
.lgStaticAddr
+ lowGlo
.lgStaticSize
, first_avail
, NULL
);
944 T_ASSERT_EQ_ULONG(lowGlo
.lgLayoutMajorVersion
, 3, NULL
);
945 T_ASSERT_EQ_ULONG(lowGlo
.lgLayoutMinorVersion
, 0, NULL
);
946 T_ASSERT_EQ_ULONG(lowGlo
.lgLayoutMagic
, LOWGLO_LAYOUT_MAGIC
, NULL
);
948 // check the constant values in lowGlo
949 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemQ
, ((uint64_t) &(pmap_object_store
.memq
)), NULL
);
950 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPageOffset
, offsetof(struct vm_page_with_ppnum
, vmp_phys_page
), NULL
);
951 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemChainOffset
, offsetof(struct vm_page
, vmp_listq
), NULL
);
952 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPagesize
, sizeof(struct vm_page
), NULL
);
954 #if defined(__arm64__)
955 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemFromArrayMask
, VM_PACKED_FROM_VM_PAGES_ARRAY
, NULL
);
956 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPackedShift
, VM_PACKED_POINTER_SHIFT
, NULL
);
957 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPackedBaseAddr
, VM_MIN_KERNEL_AND_KEXT_ADDRESS
, NULL
);
960 vm_object_lock_shared(&pmap_object_store
);
961 astris_vm_page_queue_iterate(lowGlo
.lgPmapMemQ
, p
)
963 ppnum_t ppnum
= astris_vm_page_get_phys_page(p
);
964 pmap_paddr_t pa
= (pmap_paddr_t
)astris_ptoa(ppnum
);
965 T_ASSERT_GE_ULONG(pa
, gPhysBase
, NULL
);
966 T_ASSERT_LT_ULONG(pa
, gPhysBase
+ gPhysSize
, NULL
);
968 T_ASSERT_LT_INT(iter
, MAX_PMAP_OBJECT_ELEMENT
, NULL
);
970 vm_object_unlock(&pmap_object_store
);
972 T_ASSERT_GT_INT(iter
, 0, NULL
);
977 struct ts_kern_prim_test_args
{
985 int priority_to_check
;
994 while (os_atomic_load(var
, acquire
) != num
) {
995 assert_wait((event_t
) var
, THREAD_UNINT
);
996 if (os_atomic_load(var
, acquire
) != num
) {
997 (void) thread_block(THREAD_CONTINUE_NULL
);
999 clear_wait(current_thread(), THREAD_AWAKENED
);
1010 os_atomic_inc(var
, relaxed
);
1011 thread_wakeup((event_t
) var
);
1015 extern void IOSleep(int);
1018 thread_lock_unlock_kernel_primitive(
1020 __unused wait_result_t wr
)
1022 thread_t thread
= current_thread();
1023 struct ts_kern_prim_test_args
*info
= (struct ts_kern_prim_test_args
*) args
;
1026 thread_lock(thread
);
1027 pri
= thread
->sched_pri
;
1028 thread_unlock(thread
);
1030 wait_threads(info
->wait_event_b
, info
->before_num
);
1031 wake_threads(info
->notify_b
);
1033 tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT
);
1035 wake_threads(info
->notify_a
);
1036 wait_threads(info
->wait_event_a
, info
->after_num
);
1040 if (info
->priority_to_check
) {
1041 thread_lock(thread
);
1042 pri
= thread
->sched_pri
;
1043 thread_unlock(thread
);
1044 T_ASSERT(pri
== info
->priority_to_check
, "Priority thread: current sched %d sched wanted %d", pri
, info
->priority_to_check
);
1047 tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT
);
1049 wake_threads(info
->end_barrier
);
1050 thread_terminate_self();
1054 ts_kernel_primitive_test(void)
1056 thread_t owner
, thread1
, thread2
;
1057 struct ts_kern_prim_test_args targs
[2] = {};
1058 kern_return_t result
;
1059 int end_barrier
= 0;
1060 int owner_locked
= 0;
1061 int waiters_ready
= 0;
1063 T_LOG("Testing turnstile kernel primitive");
1065 targs
[0].notify_b
= NULL
;
1066 targs
[0].wait_event_b
= NULL
;
1067 targs
[0].before_num
= 0;
1068 targs
[0].notify_a
= &owner_locked
;
1069 targs
[0].wait_event_a
= &waiters_ready
;
1070 targs
[0].after_num
= 2;
1071 targs
[0].priority_to_check
= 90;
1072 targs
[0].end_barrier
= &end_barrier
;
1074 // Start owner with priority 80
1075 result
= kernel_thread_start_priority((thread_continue_t
)thread_lock_unlock_kernel_primitive
, &targs
[0], 80, &owner
);
1076 T_ASSERT(result
== KERN_SUCCESS
, "Starting owner");
1078 targs
[1].notify_b
= &waiters_ready
;
1079 targs
[1].wait_event_b
= &owner_locked
;
1080 targs
[1].before_num
= 1;
1081 targs
[1].notify_a
= NULL
;
1082 targs
[1].wait_event_a
= NULL
;
1083 targs
[1].after_num
= 0;
1084 targs
[1].priority_to_check
= 0;
1085 targs
[1].end_barrier
= &end_barrier
;
1087 // Start waiters with priority 85 and 90
1088 result
= kernel_thread_start_priority((thread_continue_t
)thread_lock_unlock_kernel_primitive
, &targs
[1], 85, &thread1
);
1089 T_ASSERT(result
== KERN_SUCCESS
, "Starting thread1");
1091 result
= kernel_thread_start_priority((thread_continue_t
)thread_lock_unlock_kernel_primitive
, &targs
[1], 90, &thread2
);
1092 T_ASSERT(result
== KERN_SUCCESS
, "Starting thread2");
1094 wait_threads(&end_barrier
, 3);
1096 return KERN_SUCCESS
;
1102 #define NUM_THREADS 4
1104 struct synch_test_common
{
1105 unsigned int nthreads
;
1111 static kern_return_t
1112 init_synch_test_common(struct synch_test_common
*info
, unsigned int nthreads
)
1114 info
->nthreads
= nthreads
;
1115 info
->threads
= kalloc(sizeof(thread_t
) * nthreads
);
1116 if (!info
->threads
) {
1120 return KERN_SUCCESS
;
1124 destroy_synch_test_common(struct synch_test_common
*info
)
1126 kfree(info
->threads
, sizeof(thread_t
) * info
->nthreads
);
1130 start_threads(thread_continue_t func
, struct synch_test_common
*info
, bool sleep_after_first
)
1133 kern_return_t result
;
1137 info
->test_done
= 0;
1139 for (i
= 0; i
< info
->nthreads
; i
++) {
1140 info
->threads
[i
] = NULL
;
1143 info
->max_pri
= priority
+ (info
->nthreads
- 1) * 5;
1144 if (info
->max_pri
> 95) {
1148 for (i
= 0; i
< info
->nthreads
; i
++) {
1149 result
= kernel_thread_start_priority((thread_continue_t
)func
, info
, priority
, &thread
);
1150 os_atomic_store(&info
->threads
[i
], thread
, release
);
1151 T_ASSERT(result
== KERN_SUCCESS
, "Starting thread %d, priority %d, %p", i
, priority
, thread
);
1155 if (i
== 0 && sleep_after_first
) {
1162 get_max_pri(struct synch_test_common
* info
)
1164 return info
->max_pri
;
1168 wait_all_thread(struct synch_test_common
* info
)
1170 wait_threads(&info
->test_done
, info
->nthreads
);
1174 notify_waiter(struct synch_test_common
* info
)
1176 wake_threads(&info
->test_done
);
1180 wait_for_waiters(struct synch_test_common
*info
)
1185 for (i
= 0; i
< info
->nthreads
; i
++) {
1187 while (os_atomic_load(&info
->threads
[i
], acquire
) == NULL
) {
1194 if (info
->threads
[i
] != current_thread()) {
1197 thread
= os_atomic_load(&info
->threads
[i
], relaxed
);
1198 if (thread
== (thread_t
) 1) {
1202 if (!(thread
->state
& TH_RUN
)) {
1211 if (thread
->started
== FALSE
) {
1214 } while (thread
->state
& TH_RUN
);
1220 exclude_current_waiter(struct synch_test_common
*info
)
1224 for (i
= 0; i
< info
->nthreads
; i
++) {
1226 while (os_atomic_load(&info
->threads
[i
], acquire
) == NULL
) {
1233 if (os_atomic_load(&info
->threads
[i
], acquire
) == current_thread()) {
1234 os_atomic_store(&info
->threads
[i
], (thread_t
)1, release
);
1240 struct info_sleep_inheritor_test
{
1241 struct synch_test_common head
;
1244 decl_lck_mtx_gate_data(, gate
);
1245 boolean_t gate_closed
;
1247 boolean_t work_to_do
;
1248 unsigned int max_pri
;
1249 unsigned int steal_pri
;
1253 int handoff_failure
;
1254 thread_t thread_inheritor
;
1258 primitive_lock(struct info_sleep_inheritor_test
*info
)
1260 switch (info
->prim_type
) {
1262 lck_mtx_lock(&info
->mtx_lock
);
1265 lck_rw_lock(&info
->rw_lock
, LCK_RW_TYPE_EXCLUSIVE
);
1268 panic("invalid type %d", info
->prim_type
);
1273 primitive_unlock(struct info_sleep_inheritor_test
*info
)
1275 switch (info
->prim_type
) {
1277 lck_mtx_unlock(&info
->mtx_lock
);
1280 lck_rw_unlock(&info
->rw_lock
, LCK_RW_TYPE_EXCLUSIVE
);
1283 panic("invalid type %d", info
->prim_type
);
1287 static wait_result_t
1288 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test
*info
)
1290 wait_result_t ret
= KERN_SUCCESS
;
1291 switch (info
->prim_type
) {
1293 ret
= lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_DEFAULT
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1296 ret
= lck_rw_sleep_with_inheritor(&info
->rw_lock
, LCK_SLEEP_DEFAULT
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1299 panic("invalid type %d", info
->prim_type
);
1306 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test
*info
)
1308 switch (info
->prim_type
) {
1311 wakeup_one_with_inheritor((event_t
) &info
->thread_inheritor
, THREAD_AWAKENED
, LCK_WAKE_DEFAULT
, &info
->thread_inheritor
);
1314 panic("invalid type %d", info
->prim_type
);
1319 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test
*info
)
1321 switch (info
->prim_type
) {
1324 wakeup_all_with_inheritor((event_t
) &info
->thread_inheritor
, THREAD_AWAKENED
);
1327 panic("invalid type %d", info
->prim_type
);
1333 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test
*info
)
1335 switch (info
->prim_type
) {
1338 change_sleep_inheritor((event_t
) &info
->thread_inheritor
, info
->thread_inheritor
);
1341 panic("invalid type %d", info
->prim_type
);
1346 static kern_return_t
1347 primitive_gate_try_close(struct info_sleep_inheritor_test
*info
)
1349 kern_return_t ret
= KERN_SUCCESS
;
1350 switch (info
->prim_type
) {
1352 ret
= lck_mtx_gate_try_close(&info
->mtx_lock
, &info
->gate
);
1355 ret
= lck_rw_gate_try_close(&info
->rw_lock
, &info
->gate
);
1358 panic("invalid type %d", info
->prim_type
);
1363 static gate_wait_result_t
1364 primitive_gate_wait(struct info_sleep_inheritor_test
*info
)
1366 gate_wait_result_t ret
= GATE_OPENED
;
1367 switch (info
->prim_type
) {
1369 ret
= lck_mtx_gate_wait(&info
->mtx_lock
, &info
->gate
, LCK_SLEEP_DEFAULT
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1372 ret
= lck_rw_gate_wait(&info
->rw_lock
, &info
->gate
, LCK_SLEEP_DEFAULT
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1375 panic("invalid type %d", info
->prim_type
);
1381 primitive_gate_open(struct info_sleep_inheritor_test
*info
)
1383 switch (info
->prim_type
) {
1385 lck_mtx_gate_open(&info
->mtx_lock
, &info
->gate
);
1388 lck_rw_gate_open(&info
->rw_lock
, &info
->gate
);
1391 panic("invalid type %d", info
->prim_type
);
1396 primitive_gate_close(struct info_sleep_inheritor_test
*info
)
1398 switch (info
->prim_type
) {
1400 lck_mtx_gate_close(&info
->mtx_lock
, &info
->gate
);
1403 lck_rw_gate_close(&info
->rw_lock
, &info
->gate
);
1406 panic("invalid type %d", info
->prim_type
);
1411 primitive_gate_steal(struct info_sleep_inheritor_test
*info
)
1413 switch (info
->prim_type
) {
1415 lck_mtx_gate_steal(&info
->mtx_lock
, &info
->gate
);
1418 lck_rw_gate_steal(&info
->rw_lock
, &info
->gate
);
1421 panic("invalid type %d", info
->prim_type
);
1425 static kern_return_t
1426 primitive_gate_handoff(struct info_sleep_inheritor_test
*info
, int flags
)
1428 kern_return_t ret
= KERN_SUCCESS
;
1429 switch (info
->prim_type
) {
1431 ret
= lck_mtx_gate_handoff(&info
->mtx_lock
, &info
->gate
, flags
);
1434 ret
= lck_rw_gate_handoff(&info
->rw_lock
, &info
->gate
, flags
);
1437 panic("invalid type %d", info
->prim_type
);
1443 primitive_gate_assert(struct info_sleep_inheritor_test
*info
, int type
)
1445 switch (info
->prim_type
) {
1447 lck_mtx_gate_assert(&info
->mtx_lock
, &info
->gate
, type
);
1450 lck_rw_gate_assert(&info
->rw_lock
, &info
->gate
, type
);
1453 panic("invalid type %d", info
->prim_type
);
1458 primitive_gate_init(struct info_sleep_inheritor_test
*info
)
1460 switch (info
->prim_type
) {
1462 lck_mtx_gate_init(&info
->mtx_lock
, &info
->gate
);
1465 lck_rw_gate_init(&info
->rw_lock
, &info
->gate
);
1468 panic("invalid type %d", info
->prim_type
);
1473 primitive_gate_destroy(struct info_sleep_inheritor_test
*info
)
1475 switch (info
->prim_type
) {
1477 lck_mtx_gate_destroy(&info
->mtx_lock
, &info
->gate
);
1480 lck_rw_gate_destroy(&info
->rw_lock
, &info
->gate
);
1483 panic("invalid type %d", info
->prim_type
);
1488 thread_inheritor_like_mutex(
1490 __unused wait_result_t wr
)
1494 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1495 uint my_pri
= current_thread()->sched_pri
;
1497 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1500 * spin here to start concurrently
1502 wake_threads(&info
->synch
);
1503 wait_threads(&info
->synch
, info
->synch_value
);
1505 primitive_lock(info
);
1507 if (info
->thread_inheritor
== NULL
) {
1508 info
->thread_inheritor
= current_thread();
1510 wait
= primitive_sleep_with_inheritor(info
);
1511 T_ASSERT(wait
== THREAD_AWAKENED
|| wait
== THREAD_NOT_WAITING
, "sleep_with_inheritor return");
1513 primitive_unlock(info
);
1518 primitive_lock(info
);
1520 T_ASSERT(info
->thread_inheritor
== current_thread(), "thread_inheritor is %p", info
->thread_inheritor
);
1521 primitive_wakeup_one_with_inheritor(info
);
1522 T_LOG("woken up %p", info
->thread_inheritor
);
1524 if (info
->thread_inheritor
== NULL
) {
1525 T_ASSERT(info
->handoff_failure
== 0, "handoff failures");
1526 info
->handoff_failure
++;
1528 T_ASSERT(info
->thread_inheritor
!= current_thread(), "thread_inheritor is %p", info
->thread_inheritor
);
1529 thread_deallocate(info
->thread_inheritor
);
1532 primitive_unlock(info
);
1534 assert(current_thread()->kern_promotion_schedpri
== 0);
1535 notify_waiter((struct synch_test_common
*)info
);
1537 thread_terminate_self();
1541 thread_just_inheritor_do_work(
1543 __unused wait_result_t wr
)
1545 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1546 uint my_pri
= current_thread()->sched_pri
;
1549 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1550 primitive_lock(info
);
1552 if (info
->thread_inheritor
== NULL
) {
1553 info
->thread_inheritor
= current_thread();
1554 primitive_unlock(info
);
1555 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1557 wait_threads(&info
->synch
, info
->synch_value
- 1);
1559 wait_for_waiters((struct synch_test_common
*)info
);
1561 max_pri
= get_max_pri((struct synch_test_common
*) info
);
1562 T_ASSERT((uint
) current_thread()->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri
, max_pri
);
1564 os_atomic_store(&info
->synch
, 0, relaxed
);
1565 primitive_lock(info
);
1566 primitive_wakeup_all_with_inheritor(info
);
1568 wake_threads(&info
->synch
);
1569 primitive_sleep_with_inheritor(info
);
1572 primitive_unlock(info
);
1574 assert(current_thread()->kern_promotion_schedpri
== 0);
1575 notify_waiter((struct synch_test_common
*)info
);
1577 thread_terminate_self();
1583 __unused wait_result_t wr
)
1585 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1586 uint my_pri
= current_thread()->sched_pri
;
1588 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1589 primitive_lock(info
);
1591 if (info
->thread_inheritor
== NULL
) {
1592 info
->thread_inheritor
= current_thread();
1593 exclude_current_waiter((struct synch_test_common
*)info
);
1595 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1596 primitive_unlock(info
);
1598 wait_threads(&info
->synch
, info
->synch_value
- 2);
1600 wait_for_waiters((struct synch_test_common
*)info
);
1601 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1602 primitive_lock(info
);
1603 if (info
->thread_inheritor
== current_thread()) {
1604 primitive_wakeup_all_with_inheritor(info
);
1607 if (info
->steal_pri
== 0) {
1608 info
->steal_pri
= my_pri
;
1609 info
->thread_inheritor
= current_thread();
1610 primitive_change_sleep_inheritor(info
);
1611 exclude_current_waiter((struct synch_test_common
*)info
);
1613 primitive_unlock(info
);
1615 wait_threads(&info
->synch
, info
->synch_value
- 2);
1617 T_LOG("Thread pri %d stole push %p", my_pri
, current_thread());
1618 wait_for_waiters((struct synch_test_common
*)info
);
1620 T_ASSERT((uint
) current_thread()->sched_pri
== info
->steal_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri
, info
->steal_pri
);
1622 primitive_lock(info
);
1623 primitive_wakeup_all_with_inheritor(info
);
1625 if (my_pri
> info
->steal_pri
) {
1626 info
->steal_pri
= my_pri
;
1628 wake_threads(&info
->synch
);
1629 primitive_sleep_with_inheritor(info
);
1630 exclude_current_waiter((struct synch_test_common
*)info
);
1633 primitive_unlock(info
);
1635 assert(current_thread()->kern_promotion_schedpri
== 0);
1636 notify_waiter((struct synch_test_common
*)info
);
1638 thread_terminate_self();
1642 thread_no_inheritor_work(
1644 __unused wait_result_t wr
)
1646 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1647 uint my_pri
= current_thread()->sched_pri
;
1649 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1650 primitive_lock(info
);
1653 if (info
->value
== 0) {
1654 primitive_wakeup_all_with_inheritor(info
);
1656 info
->thread_inheritor
= NULL
;
1657 primitive_sleep_with_inheritor(info
);
1660 primitive_unlock(info
);
1662 assert(current_thread()->kern_promotion_schedpri
== 0);
1663 notify_waiter((struct synch_test_common
*)info
);
1665 thread_terminate_self();
1671 __unused wait_result_t wr
)
1673 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1674 uint my_pri
= current_thread()->sched_pri
;
1677 unsigned int mod_rand
;
1680 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1682 for (i
= 0; i
< 10; i
++) {
1683 lck_mtx_lock(&info
->mtx_lock
);
1684 if (info
->thread_inheritor
== NULL
) {
1685 info
->thread_inheritor
= current_thread();
1686 lck_mtx_unlock(&info
->mtx_lock
);
1688 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1690 wait_threads(&info
->synch
, info
->synch_value
- 1);
1691 wait_for_waiters((struct synch_test_common
*)info
);
1692 max_pri
= get_max_pri((struct synch_test_common
*) info
);
1693 T_ASSERT((uint
) current_thread()->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri
, max_pri
);
1695 os_atomic_store(&info
->synch
, 0, relaxed
);
1697 lck_mtx_lock(&info
->mtx_lock
);
1698 info
->thread_inheritor
= NULL
;
1699 wakeup_all_with_inheritor((event_t
) &info
->thread_inheritor
, THREAD_AWAKENED
);
1700 lck_mtx_unlock(&info
->mtx_lock
);
1704 read_random(&rand
, sizeof(rand
));
1705 mod_rand
= rand
% 2;
1707 wake_threads(&info
->synch
);
1710 lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_DEFAULT
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1711 lck_mtx_unlock(&info
->mtx_lock
);
1714 lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_UNLOCK
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1717 panic("rand()mod4 returned %u (random %u)", mod_rand
, rand
);
1722 * spin here to stop using the lock as mutex
1724 wake_threads(&info
->synch
);
1725 wait_threads(&info
->synch
, info
->synch_value
);
1727 for (i
= 0; i
< 10; i
++) {
1728 /* read_random might sleep so read it before acquiring the mtx as spin */
1729 read_random(&rand
, sizeof(rand
));
1731 lck_mtx_lock_spin(&info
->mtx_lock
);
1732 if (info
->thread_inheritor
== NULL
) {
1733 info
->thread_inheritor
= current_thread();
1734 lck_mtx_unlock(&info
->mtx_lock
);
1736 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1737 wait_for_waiters((struct synch_test_common
*)info
);
1738 max_pri
= get_max_pri((struct synch_test_common
*) info
);
1739 T_ASSERT((uint
) current_thread()->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri
, max_pri
);
1741 lck_mtx_lock_spin(&info
->mtx_lock
);
1742 info
->thread_inheritor
= NULL
;
1743 wakeup_all_with_inheritor((event_t
) &info
->thread_inheritor
, THREAD_AWAKENED
);
1744 lck_mtx_unlock(&info
->mtx_lock
);
1748 mod_rand
= rand
% 2;
1751 lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_SPIN
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1752 lck_mtx_unlock(&info
->mtx_lock
);
1755 lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_SPIN_ALWAYS
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1756 lck_mtx_unlock(&info
->mtx_lock
);
1759 panic("rand()mod4 returned %u (random %u)", mod_rand
, rand
);
1762 assert(current_thread()->kern_promotion_schedpri
== 0);
1763 notify_waiter((struct synch_test_common
*)info
);
1765 thread_terminate_self();
1771 __unused wait_result_t wr
)
1773 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1774 uint my_pri
= current_thread()->sched_pri
;
1778 unsigned int mod_rand
;
1781 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1783 for (i
= 0; i
< 10; i
++) {
1785 type
= LCK_RW_TYPE_SHARED
;
1786 lck_rw_lock(&info
->rw_lock
, type
);
1787 if (info
->thread_inheritor
== NULL
) {
1788 type
= LCK_RW_TYPE_EXCLUSIVE
;
1790 if (lck_rw_lock_shared_to_exclusive(&info
->rw_lock
)) {
1791 if (info
->thread_inheritor
== NULL
) {
1792 info
->thread_inheritor
= current_thread();
1793 lck_rw_unlock(&info
->rw_lock
, type
);
1794 wait_threads(&info
->synch
, info
->synch_value
- 1);
1796 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1797 wait_for_waiters((struct synch_test_common
*)info
);
1798 max_pri
= get_max_pri((struct synch_test_common
*) info
);
1799 T_ASSERT((uint
) current_thread()->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri
, max_pri
);
1801 os_atomic_store(&info
->synch
, 0, relaxed
);
1803 lck_rw_lock(&info
->rw_lock
, type
);
1804 info
->thread_inheritor
= NULL
;
1805 wakeup_all_with_inheritor((event_t
) &info
->thread_inheritor
, THREAD_AWAKENED
);
1806 lck_rw_unlock(&info
->rw_lock
, type
);
1814 read_random(&rand
, sizeof(rand
));
1815 mod_rand
= rand
% 4;
1817 wake_threads(&info
->synch
);
1820 lck_rw_sleep_with_inheritor(&info
->rw_lock
, LCK_SLEEP_DEFAULT
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1821 lck_rw_unlock(&info
->rw_lock
, type
);
1824 lck_rw_sleep_with_inheritor(&info
->rw_lock
, LCK_SLEEP_UNLOCK
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1827 lck_rw_sleep_with_inheritor(&info
->rw_lock
, LCK_SLEEP_SHARED
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1828 lck_rw_unlock(&info
->rw_lock
, LCK_RW_TYPE_SHARED
);
1831 lck_rw_sleep_with_inheritor(&info
->rw_lock
, LCK_SLEEP_EXCLUSIVE
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1832 lck_rw_unlock(&info
->rw_lock
, LCK_RW_TYPE_EXCLUSIVE
);
1835 panic("rand()mod4 returned %u (random %u)", mod_rand
, rand
);
1839 assert(current_thread()->kern_promotion_schedpri
== 0);
1840 notify_waiter((struct synch_test_common
*)info
);
1842 thread_terminate_self();
1846 test_sleep_with_wake_all(struct info_sleep_inheritor_test
*info
, int prim_type
)
1848 info
->prim_type
= prim_type
;
1850 info
->synch_value
= info
->head
.nthreads
;
1852 info
->thread_inheritor
= NULL
;
1854 start_threads((thread_continue_t
)thread_just_inheritor_do_work
, (struct synch_test_common
*)info
, TRUE
);
1855 wait_all_thread((struct synch_test_common
*)info
);
1859 test_sleep_with_wake_one(struct info_sleep_inheritor_test
*info
, int prim_type
)
1861 info
->prim_type
= prim_type
;
1864 info
->synch_value
= info
->head
.nthreads
;
1866 info
->handoff_failure
= 0;
1867 info
->thread_inheritor
= NULL
;
1869 start_threads((thread_continue_t
)thread_inheritor_like_mutex
, (struct synch_test_common
*)info
, FALSE
);
1870 wait_all_thread((struct synch_test_common
*)info
);
1872 T_ASSERT(info
->value
== (int)info
->head
.nthreads
, "value protected by sleep");
1873 T_ASSERT(info
->handoff_failure
== 1, "handoff failures");
1877 test_change_sleep_inheritor(struct info_sleep_inheritor_test
*info
, int prim_type
)
1879 info
->prim_type
= prim_type
;
1881 info
->thread_inheritor
= NULL
;
1882 info
->steal_pri
= 0;
1884 info
->synch_value
= info
->head
.nthreads
;
1886 start_threads((thread_continue_t
)thread_steal_work
, (struct synch_test_common
*)info
, FALSE
);
1887 wait_all_thread((struct synch_test_common
*)info
);
1891 test_no_inheritor(struct info_sleep_inheritor_test
*info
, int prim_type
)
1893 info
->prim_type
= prim_type
;
1895 info
->synch_value
= info
->head
.nthreads
;
1897 info
->thread_inheritor
= NULL
;
1898 info
->value
= info
->head
.nthreads
;
1900 start_threads((thread_continue_t
)thread_no_inheritor_work
, (struct synch_test_common
*)info
, FALSE
);
1901 wait_all_thread((struct synch_test_common
*)info
);
1905 test_rw_lock(struct info_sleep_inheritor_test
*info
)
1907 info
->thread_inheritor
= NULL
;
1908 info
->value
= info
->head
.nthreads
;
1910 info
->synch_value
= info
->head
.nthreads
;
1912 start_threads((thread_continue_t
)thread_rw_work
, (struct synch_test_common
*)info
, FALSE
);
1913 wait_all_thread((struct synch_test_common
*)info
);
1917 test_mtx_lock(struct info_sleep_inheritor_test
*info
)
1919 info
->thread_inheritor
= NULL
;
1920 info
->value
= info
->head
.nthreads
;
1922 info
->synch_value
= info
->head
.nthreads
;
1924 start_threads((thread_continue_t
)thread_mtx_work
, (struct synch_test_common
*)info
, FALSE
);
1925 wait_all_thread((struct synch_test_common
*)info
);
1929 ts_kernel_sleep_inheritor_test(void)
1931 struct info_sleep_inheritor_test info
= {};
1933 init_synch_test_common((struct synch_test_common
*)&info
, NUM_THREADS
);
1935 lck_attr_t
* lck_attr
= lck_attr_alloc_init();
1936 lck_grp_attr_t
* lck_grp_attr
= lck_grp_attr_alloc_init();
1937 lck_grp_t
* lck_grp
= lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr
);
1939 lck_mtx_init(&info
.mtx_lock
, lck_grp
, lck_attr
);
1940 lck_rw_init(&info
.rw_lock
, lck_grp
, lck_attr
);
1943 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1945 T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
1946 test_sleep_with_wake_all(&info
, MTX_LOCK
);
1949 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1951 T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
1952 test_sleep_with_wake_all(&info
, RW_LOCK
);
1955 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
1957 T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
1958 test_sleep_with_wake_one(&info
, MTX_LOCK
);
1961 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
1963 T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
1964 test_sleep_with_wake_one(&info
, RW_LOCK
);
1967 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1968 * and change_sleep_inheritor
1970 T_LOG("Testing change_sleep_inheritor with mxt sleep");
1971 test_change_sleep_inheritor(&info
, MTX_LOCK
);
1974 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1975 * and change_sleep_inheritor
1977 T_LOG("Testing change_sleep_inheritor with rw sleep");
1978 test_change_sleep_inheritor(&info
, RW_LOCK
);
1981 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1982 * with inheritor NULL
1984 T_LOG("Testing inheritor NULL");
1985 test_no_inheritor(&info
, MTX_LOCK
);
1988 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1989 * with inheritor NULL
1991 T_LOG("Testing inheritor NULL");
1992 test_no_inheritor(&info
, RW_LOCK
);
1995 * Testing mtx locking combinations
1997 T_LOG("Testing mtx locking combinations");
1998 test_mtx_lock(&info
);
2001 * Testing rw locking combinations
2003 T_LOG("Testing rw locking combinations");
2004 test_rw_lock(&info
);
2006 destroy_synch_test_common((struct synch_test_common
*)&info
);
2008 lck_attr_free(lck_attr
);
2009 lck_grp_attr_free(lck_grp_attr
);
2010 lck_rw_destroy(&info
.rw_lock
, lck_grp
);
2011 lck_mtx_destroy(&info
.mtx_lock
, lck_grp
);
2012 lck_grp_free(lck_grp
);
2014 return KERN_SUCCESS
;
2018 thread_gate_aggressive(
2020 __unused wait_result_t wr
)
2022 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
2023 uint my_pri
= current_thread()->sched_pri
;
2025 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
2027 primitive_lock(info
);
2028 if (info
->thread_inheritor
== NULL
) {
2029 info
->thread_inheritor
= current_thread();
2030 primitive_gate_assert(info
, GATE_ASSERT_OPEN
);
2031 primitive_gate_close(info
);
2032 exclude_current_waiter((struct synch_test_common
*)info
);
2034 primitive_unlock(info
);
2036 wait_threads(&info
->synch
, info
->synch_value
- 2);
2037 wait_for_waiters((struct synch_test_common
*)info
);
2038 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
2040 primitive_lock(info
);
2041 if (info
->thread_inheritor
== current_thread()) {
2042 primitive_gate_open(info
);
2045 if (info
->steal_pri
== 0) {
2046 info
->steal_pri
= my_pri
;
2047 info
->thread_inheritor
= current_thread();
2048 primitive_gate_steal(info
);
2049 exclude_current_waiter((struct synch_test_common
*)info
);
2051 primitive_unlock(info
);
2052 wait_threads(&info
->synch
, info
->synch_value
- 2);
2054 T_LOG("Thread pri %d stole push %p", my_pri
, current_thread());
2055 wait_for_waiters((struct synch_test_common
*)info
);
2056 T_ASSERT((uint
) current_thread()->sched_pri
== info
->steal_pri
, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri
, info
->steal_pri
);
2058 primitive_lock(info
);
2059 primitive_gate_open(info
);
2061 if (my_pri
> info
->steal_pri
) {
2062 info
->steal_pri
= my_pri
;
2064 wake_threads(&info
->synch
);
2065 primitive_gate_wait(info
);
2066 exclude_current_waiter((struct synch_test_common
*)info
);
2069 primitive_unlock(info
);
2071 assert(current_thread()->kern_promotion_schedpri
== 0);
2072 notify_waiter((struct synch_test_common
*)info
);
2074 thread_terminate_self();
2078 thread_gate_like_mutex(
2080 __unused wait_result_t wr
)
2082 gate_wait_result_t wait
;
2084 uint my_pri
= current_thread()->sched_pri
;
2086 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
2088 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
2091 * spin here to start concurrently
2093 wake_threads(&info
->synch
);
2094 wait_threads(&info
->synch
, info
->synch_value
);
2096 primitive_lock(info
);
2098 if (primitive_gate_try_close(info
) != KERN_SUCCESS
) {
2099 wait
= primitive_gate_wait(info
);
2100 T_ASSERT(wait
== GATE_HANDOFF
, "gate_wait return");
2103 primitive_gate_assert(info
, GATE_ASSERT_HELD
);
2105 primitive_unlock(info
);
2110 primitive_lock(info
);
2112 ret
= primitive_gate_handoff(info
, GATE_HANDOFF_DEFAULT
);
2113 if (ret
== KERN_NOT_WAITING
) {
2114 T_ASSERT(info
->handoff_failure
== 0, "handoff failures");
2115 primitive_gate_handoff(info
, GATE_HANDOFF_OPEN_IF_NO_WAITERS
);
2116 info
->handoff_failure
++;
2119 primitive_unlock(info
);
2120 notify_waiter((struct synch_test_common
*)info
);
2122 thread_terminate_self();
2126 thread_just_one_do_work(
2128 __unused wait_result_t wr
)
2130 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
2131 uint my_pri
= current_thread()->sched_pri
;
2134 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
2136 primitive_lock(info
);
2138 if (info
->work_to_do
) {
2139 if (primitive_gate_try_close(info
) == KERN_SUCCESS
) {
2140 primitive_gate_assert(info
, GATE_ASSERT_HELD
);
2141 primitive_unlock(info
);
2143 T_LOG("Thread pri %d acquired the gate %p", my_pri
, current_thread());
2144 wait_threads(&info
->synch
, info
->synch_value
- 1);
2145 wait_for_waiters((struct synch_test_common
*)info
);
2146 max_pri
= get_max_pri((struct synch_test_common
*) info
);
2147 T_ASSERT((uint
) current_thread()->sched_pri
== max_pri
, "gate owner priority current is %d, should be %d", current_thread()->sched_pri
, max_pri
);
2148 os_atomic_store(&info
->synch
, 0, relaxed
);
2150 primitive_lock(info
);
2151 info
->work_to_do
= FALSE
;
2152 primitive_gate_open(info
);
2154 primitive_gate_assert(info
, GATE_ASSERT_CLOSED
);
2155 wake_threads(&info
->synch
);
2156 primitive_gate_wait(info
);
2160 primitive_unlock(info
);
2162 assert(current_thread()->kern_promotion_schedpri
== 0);
2163 notify_waiter((struct synch_test_common
*)info
);
2164 thread_terminate_self();
2168 test_gate_push(struct info_sleep_inheritor_test
*info
, int prim_type
)
2170 info
->prim_type
= prim_type
;
2172 primitive_gate_init(info
);
2173 info
->work_to_do
= TRUE
;
2175 info
->synch_value
= NUM_THREADS
;
2177 start_threads((thread_continue_t
)thread_just_one_do_work
, (struct synch_test_common
*) info
, TRUE
);
2178 wait_all_thread((struct synch_test_common
*)info
);
2180 primitive_gate_destroy(info
);
2184 test_gate_handoff(struct info_sleep_inheritor_test
*info
, int prim_type
)
2186 info
->prim_type
= prim_type
;
2188 primitive_gate_init(info
);
2191 info
->synch_value
= NUM_THREADS
;
2193 info
->handoff_failure
= 0;
2195 start_threads((thread_continue_t
)thread_gate_like_mutex
, (struct synch_test_common
*)info
, false);
2196 wait_all_thread((struct synch_test_common
*)info
);
2198 T_ASSERT(info
->value
== NUM_THREADS
, "value protected by gate");
2199 T_ASSERT(info
->handoff_failure
== 1, "handoff failures");
2201 primitive_gate_destroy(info
);
2205 test_gate_steal(struct info_sleep_inheritor_test
*info
, int prim_type
)
2207 info
->prim_type
= prim_type
;
2209 primitive_gate_init(info
);
2212 info
->synch_value
= NUM_THREADS
;
2213 info
->thread_inheritor
= NULL
;
2214 info
->steal_pri
= 0;
2216 start_threads((thread_continue_t
)thread_gate_aggressive
, (struct synch_test_common
*)info
, FALSE
);
2217 wait_all_thread((struct synch_test_common
*)info
);
2219 primitive_gate_destroy(info
);
2223 ts_kernel_gate_test(void)
2225 struct info_sleep_inheritor_test info
= {};
2227 T_LOG("Testing gate primitive");
2229 init_synch_test_common((struct synch_test_common
*)&info
, NUM_THREADS
);
2231 lck_attr_t
* lck_attr
= lck_attr_alloc_init();
2232 lck_grp_attr_t
* lck_grp_attr
= lck_grp_attr_alloc_init();
2233 lck_grp_t
* lck_grp
= lck_grp_alloc_init("test gate", lck_grp_attr
);
2235 lck_mtx_init(&info
.mtx_lock
, lck_grp
, lck_attr
);
2236 lck_rw_init(&info
.rw_lock
, lck_grp
, lck_attr
);
2239 * Testing the priority inherited by the keeper
2240 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2242 T_LOG("Testing gate push, lck");
2243 test_gate_push(&info
, MTX_LOCK
);
2245 T_LOG("Testing gate push, rw");
2246 test_gate_push(&info
, RW_LOCK
);
2249 * Testing the handoff
2250 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2252 T_LOG("Testing gate handoff, lck");
2253 test_gate_handoff(&info
, MTX_LOCK
);
2255 T_LOG("Testing gate handoff, rw");
2256 test_gate_handoff(&info
, RW_LOCK
);
2260 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2262 T_LOG("Testing gate steal, lck");
2263 test_gate_steal(&info
, MTX_LOCK
);
2265 T_LOG("Testing gate steal, rw");
2266 test_gate_steal(&info
, RW_LOCK
);
2268 destroy_synch_test_common((struct synch_test_common
*)&info
);
2270 lck_attr_free(lck_attr
);
2271 lck_grp_attr_free(lck_grp_attr
);
2272 lck_mtx_destroy(&info
.mtx_lock
, lck_grp
);
2273 lck_grp_free(lck_grp
);
2275 return KERN_SUCCESS
;
2278 #define NUM_THREAD_CHAIN 6
2280 struct turnstile_chain_test
{
2281 struct synch_test_common head
;
2286 gate_t gates
[NUM_THREAD_CHAIN
];
2290 thread_sleep_gate_chain_work(
2292 __unused wait_result_t wr
)
2294 struct turnstile_chain_test
*info
= (struct turnstile_chain_test
*) args
;
2295 thread_t self
= current_thread();
2296 uint my_pri
= self
->sched_pri
;
2299 thread_t inheritor
= NULL
, woken_up
;
2300 event_t wait_event
, wake_event
;
2303 T_LOG("Started thread pri %d %p", my_pri
, self
);
2306 * Need to use the threads ids, wait for all of them to be populated
2309 while (os_atomic_load(&info
->head
.threads
[info
->head
.nthreads
- 1], acquire
) == NULL
) {
2313 max_pri
= get_max_pri((struct synch_test_common
*) info
);
2315 for (i
= 0; i
< info
->head
.nthreads
; i
= i
+ 2) {
2316 // even threads will close a gate
2317 if (info
->head
.threads
[i
] == self
) {
2318 lck_mtx_lock(&info
->mtx_lock
);
2319 lck_mtx_gate_close(&info
->mtx_lock
, &info
->gates
[i
]);
2320 lck_mtx_unlock(&info
->mtx_lock
);
2325 wake_threads(&info
->synch2
);
2326 wait_threads(&info
->synch2
, info
->synch_value
);
2328 if (self
== os_atomic_load(&info
->head
.threads
[0], acquire
)) {
2329 wait_threads(&info
->synch
, info
->synch_value
- 1);
2330 wait_for_waiters((struct synch_test_common
*)info
);
2332 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2334 lck_mtx_lock(&info
->mtx_lock
);
2335 lck_mtx_gate_open(&info
->mtx_lock
, &info
->gates
[0]);
2336 lck_mtx_unlock(&info
->mtx_lock
);
2340 for (i
= 0; i
< info
->head
.nthreads
; i
++) {
2341 if (info
->head
.threads
[i
] == self
) {
2342 inheritor
= info
->head
.threads
[i
- 1];
2343 wait_event
= (event_t
) &info
->head
.threads
[i
- 1];
2344 wake_event
= (event_t
) &info
->head
.threads
[i
];
2348 assert(wait_event
!= NULL
);
2350 lck_mtx_lock(&info
->mtx_lock
);
2351 wake_threads(&info
->synch
);
2354 lck_mtx_gate_wait(&info
->mtx_lock
, &info
->gates
[i
- 1], LCK_SLEEP_UNLOCK
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
2355 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2357 ret
= wakeup_one_with_inheritor(wake_event
, THREAD_AWAKENED
, LCK_WAKE_DO_NOT_TRANSFER_PUSH
, &woken_up
);
2358 if (ret
== KERN_SUCCESS
) {
2359 T_ASSERT(i
!= (info
->head
.nthreads
- 1), "thread id");
2360 T_ASSERT(woken_up
== info
->head
.threads
[i
+ 1], "wakeup_one_with_inheritor woke next");
2362 T_ASSERT(i
== (info
->head
.nthreads
- 1), "thread id");
2365 // i am still the inheritor, wake all to drop inheritership
2366 ret
= wakeup_all_with_inheritor(wake_event
, LCK_WAKE_DEFAULT
);
2367 T_ASSERT(ret
== KERN_NOT_WAITING
, "waiters on event");
2369 // I previously closed a gate
2370 lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_UNLOCK
, wait_event
, inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
2371 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2373 lck_mtx_lock(&info
->mtx_lock
);
2374 lck_mtx_gate_open(&info
->mtx_lock
, &info
->gates
[i
]);
2375 lck_mtx_unlock(&info
->mtx_lock
);
2379 assert(current_thread()->kern_promotion_schedpri
== 0);
2380 notify_waiter((struct synch_test_common
*)info
);
2382 thread_terminate_self();
2386 thread_gate_chain_work(
2388 __unused wait_result_t wr
)
2390 struct turnstile_chain_test
*info
= (struct turnstile_chain_test
*) args
;
2391 thread_t self
= current_thread();
2392 uint my_pri
= self
->sched_pri
;
2395 T_LOG("Started thread pri %d %p", my_pri
, self
);
2399 * Need to use the threads ids, wait for all of them to be populated
2401 while (os_atomic_load(&info
->head
.threads
[info
->head
.nthreads
- 1], acquire
) == NULL
) {
2405 max_pri
= get_max_pri((struct synch_test_common
*) info
);
2407 for (i
= 0; i
< info
->head
.nthreads
; i
++) {
2408 if (info
->head
.threads
[i
] == self
) {
2409 lck_mtx_lock(&info
->mtx_lock
);
2410 lck_mtx_gate_close(&info
->mtx_lock
, &info
->gates
[i
]);
2411 lck_mtx_unlock(&info
->mtx_lock
);
2415 assert(i
!= info
->head
.nthreads
);
2417 wake_threads(&info
->synch2
);
2418 wait_threads(&info
->synch2
, info
->synch_value
);
2420 if (self
== os_atomic_load(&info
->head
.threads
[0], acquire
)) {
2421 wait_threads(&info
->synch
, info
->synch_value
- 1);
2423 wait_for_waiters((struct synch_test_common
*)info
);
2425 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2427 lck_mtx_lock(&info
->mtx_lock
);
2428 lck_mtx_gate_open(&info
->mtx_lock
, &info
->gates
[0]);
2429 lck_mtx_unlock(&info
->mtx_lock
);
2431 lck_mtx_lock(&info
->mtx_lock
);
2432 wake_threads(&info
->synch
);
2433 lck_mtx_gate_wait(&info
->mtx_lock
, &info
->gates
[i
- 1], LCK_SLEEP_UNLOCK
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
2435 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2437 lck_mtx_lock(&info
->mtx_lock
);
2438 lck_mtx_gate_open(&info
->mtx_lock
, &info
->gates
[i
]);
2439 lck_mtx_unlock(&info
->mtx_lock
);
2442 assert(current_thread()->kern_promotion_schedpri
== 0);
2443 notify_waiter((struct synch_test_common
*)info
);
2445 thread_terminate_self();
2449 thread_sleep_chain_work(
2451 __unused wait_result_t wr
)
2453 struct turnstile_chain_test
*info
= (struct turnstile_chain_test
*) args
;
2454 thread_t self
= current_thread();
2455 uint my_pri
= self
->sched_pri
;
2457 event_t wait_event
, wake_event
;
2459 thread_t inheritor
= NULL
, woken_up
= NULL
;
2462 T_LOG("Started thread pri %d %p", my_pri
, self
);
2465 * Need to use the threads ids, wait for all of them to be populated
2467 while (os_atomic_load(&info
->head
.threads
[info
->head
.nthreads
- 1], acquire
) == NULL
) {
2471 max_pri
= get_max_pri((struct synch_test_common
*) info
);
2473 if (self
== os_atomic_load(&info
->head
.threads
[0], acquire
)) {
2474 wait_threads(&info
->synch
, info
->synch_value
- 1);
2476 wait_for_waiters((struct synch_test_common
*)info
);
2478 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2480 ret
= wakeup_one_with_inheritor((event_t
) &info
->head
.threads
[0], THREAD_AWAKENED
, LCK_WAKE_DO_NOT_TRANSFER_PUSH
, &woken_up
);
2481 T_ASSERT(ret
== KERN_SUCCESS
, "wakeup_one_with_inheritor woke next");
2482 T_ASSERT(woken_up
== info
->head
.threads
[1], "thread woken up");
2484 // i am still the inheritor, wake all to drop inheritership
2485 ret
= wakeup_all_with_inheritor((event_t
) &info
->head
.threads
[0], LCK_WAKE_DEFAULT
);
2486 T_ASSERT(ret
== KERN_NOT_WAITING
, "waiters on event");
2490 for (i
= 0; i
< info
->head
.nthreads
; i
++) {
2491 if (info
->head
.threads
[i
] == self
) {
2492 inheritor
= info
->head
.threads
[i
- 1];
2493 wait_event
= (event_t
) &info
->head
.threads
[i
- 1];
2494 wake_event
= (event_t
) &info
->head
.threads
[i
];
2499 assert(wait_event
!= NULL
);
2500 lck_mtx_lock(&info
->mtx_lock
);
2501 wake_threads(&info
->synch
);
2503 lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_UNLOCK
, wait_event
, inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
2505 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2507 ret
= wakeup_one_with_inheritor(wake_event
, THREAD_AWAKENED
, LCK_WAKE_DO_NOT_TRANSFER_PUSH
, &woken_up
);
2508 if (ret
== KERN_SUCCESS
) {
2509 T_ASSERT(i
!= (info
->head
.nthreads
- 1), "thread id");
2510 T_ASSERT(woken_up
== info
->head
.threads
[i
+ 1], "wakeup_one_with_inheritor woke next");
2512 T_ASSERT(i
== (info
->head
.nthreads
- 1), "thread id");
2515 // i am still the inheritor, wake all to drop inheritership
2516 ret
= wakeup_all_with_inheritor(wake_event
, LCK_WAKE_DEFAULT
);
2517 T_ASSERT(ret
== KERN_NOT_WAITING
, "waiters on event");
2520 assert(current_thread()->kern_promotion_schedpri
== 0);
2521 notify_waiter((struct synch_test_common
*)info
);
2523 thread_terminate_self();
2527 test_sleep_chain(struct turnstile_chain_test
*info
)
2530 info
->synch_value
= info
->head
.nthreads
;
2532 start_threads((thread_continue_t
)thread_sleep_chain_work
, (struct synch_test_common
*)info
, FALSE
);
2533 wait_all_thread((struct synch_test_common
*)info
);
2537 test_gate_chain(struct turnstile_chain_test
*info
)
2541 info
->synch_value
= info
->head
.nthreads
;
2543 start_threads((thread_continue_t
)thread_gate_chain_work
, (struct synch_test_common
*)info
, FALSE
);
2544 wait_all_thread((struct synch_test_common
*)info
);
2548 test_sleep_gate_chain(struct turnstile_chain_test
*info
)
2552 info
->synch_value
= info
->head
.nthreads
;
2554 start_threads((thread_continue_t
)thread_sleep_gate_chain_work
, (struct synch_test_common
*)info
, FALSE
);
2555 wait_all_thread((struct synch_test_common
*)info
);
2559 ts_kernel_turnstile_chain_test(void)
2561 struct turnstile_chain_test info
= {};
2564 init_synch_test_common((struct synch_test_common
*)&info
, NUM_THREAD_CHAIN
);
2565 lck_attr_t
* lck_attr
= lck_attr_alloc_init();
2566 lck_grp_attr_t
* lck_grp_attr
= lck_grp_attr_alloc_init();
2567 lck_grp_t
* lck_grp
= lck_grp_alloc_init("test gate", lck_grp_attr
);
2569 lck_mtx_init(&info
.mtx_lock
, lck_grp
, lck_attr
);
2570 for (i
= 0; i
< NUM_THREAD_CHAIN
; i
++) {
2571 lck_mtx_gate_init(&info
.mtx_lock
, &info
.gates
[i
]);
2574 T_LOG("Testing sleep chain, lck");
2575 test_sleep_chain(&info
);
2577 T_LOG("Testing gate chain, lck");
2578 test_gate_chain(&info
);
2580 T_LOG("Testing sleep and gate chain, lck");
2581 test_sleep_gate_chain(&info
);
2583 destroy_synch_test_common((struct synch_test_common
*)&info
);
2584 for (i
= 0; i
< NUM_THREAD_CHAIN
; i
++) {
2585 lck_mtx_gate_destroy(&info
.mtx_lock
, &info
.gates
[i
]);
2587 lck_attr_free(lck_attr
);
2588 lck_grp_attr_free(lck_grp_attr
);
2589 lck_mtx_destroy(&info
.mtx_lock
, lck_grp
);
2590 lck_grp_free(lck_grp
);
2592 return KERN_SUCCESS
;
2596 ts_kernel_timingsafe_bcmp_test(void)
2602 T_ASSERT(timingsafe_bcmp(NULL
, NULL
, 0) == 0, NULL
);
2603 T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL
);
2604 T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL
);
2607 T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL
);
2610 T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL
);
2611 T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL
);
2612 T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL
);
2613 T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL
);
2615 // all possible bitwise differences
2616 for (i
= 1; i
< 256; i
+= 1) {
2617 unsigned char a
= 0;
2618 unsigned char b
= (unsigned char)i
;
2620 T_ASSERT(timingsafe_bcmp(&a
, &b
, sizeof(a
)) == 1, NULL
);
2624 buf_size
= 1024 * 16;
2625 buf
= kalloc(buf_size
);
2626 T_EXPECT_NOTNULL(buf
, "kalloc of buf");
2628 read_random(buf
, buf_size
);
2629 T_ASSERT(timingsafe_bcmp(buf
, buf
, buf_size
) == 0, NULL
);
2630 T_ASSERT(timingsafe_bcmp(buf
, buf
+ 1, buf_size
- 1) == 1, NULL
);
2631 T_ASSERT(timingsafe_bcmp(buf
, buf
+ 128, 128) == 1, NULL
);
2633 memcpy(buf
+ 128, buf
, 128);
2634 T_ASSERT(timingsafe_bcmp(buf
, buf
+ 128, 128) == 0, NULL
);
2636 kfree(buf
, buf_size
);
2638 return KERN_SUCCESS
;
2642 kprintf_hhx_test(void)
2644 printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
2645 (unsigned short)0xfeed, (unsigned short)0xface,
2646 (unsigned short)0xabad, (unsigned short)0xcafe,
2647 (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
2650 return KERN_SUCCESS
;