2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc_internal.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_protos.h>
50 #if !(DEVELOPMENT || DEBUG)
51 #error "Testing is not enabled on RELEASE configurations"
54 #include <tests/xnupost.h>
56 extern boolean_t
get_range_bounds(char * c
, int64_t * lower
, int64_t * upper
);
57 __private_extern__
void qsort(void * a
, size_t n
, size_t es
, int (*cmp
)(const void *, const void *));
59 uint32_t total_post_tests_count
= 0;
60 void xnupost_reset_panic_widgets(void);
62 /* test declarations */
63 kern_return_t
zalloc_test(void);
64 kern_return_t
RandomULong_test(void);
65 kern_return_t
kcdata_api_test(void);
66 kern_return_t
ts_kernel_primitive_test(void);
67 kern_return_t
ts_kernel_sleep_inheritor_test(void);
68 kern_return_t
ts_kernel_gate_test(void);
69 kern_return_t
ts_kernel_turnstile_chain_test(void);
70 kern_return_t
ts_kernel_timingsafe_bcmp_test(void);
73 extern kern_return_t
vfp_state_test(void);
76 extern kern_return_t
kprintf_hhx_test(void);
78 #if defined(__arm__) || defined(__arm64__)
79 kern_return_t
pmap_coredump_test(void);
82 extern kern_return_t
console_serial_test(void);
83 extern kern_return_t
console_serial_alloc_rel_tests(void);
84 extern kern_return_t
console_serial_parallel_log_tests(void);
85 extern kern_return_t
test_os_log(void);
86 extern kern_return_t
test_os_log_parallel(void);
87 extern kern_return_t
bitmap_post_test(void);
88 extern kern_return_t
counter_tests(void);
91 extern kern_return_t
arm64_munger_test(void);
92 extern kern_return_t
ex_cb_test(void);
93 #if __ARM_PAN_AVAILABLE__
94 extern kern_return_t
arm64_pan_test(void);
96 #if defined(HAS_APPLE_PAC)
97 extern kern_return_t
arm64_ropjop_test(void);
98 #endif /* defined(HAS_APPLE_PAC) */
99 #endif /* __arm64__ */
101 extern kern_return_t
test_thread_call(void);
104 struct xnupost_panic_widget xt_panic_widgets
= {.xtp_context_p
= NULL
,
105 .xtp_outval_p
= NULL
,
106 .xtp_func_name
= NULL
,
109 struct xnupost_test kernel_post_tests
[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test
),
110 XNUPOST_TEST_CONFIG_BASIC(RandomULong_test
),
111 XNUPOST_TEST_CONFIG_BASIC(test_os_log
),
112 XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel
),
114 XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test
),
115 XNUPOST_TEST_CONFIG_BASIC(ex_cb_test
),
116 #if __ARM_PAN_AVAILABLE__
117 XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test
),
119 #if defined(HAS_APPLE_PAC)
120 XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test
),
121 #endif /* defined(HAS_APPLE_PAC) */
122 #endif /* __arm64__ */
123 XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test
),
124 XNUPOST_TEST_CONFIG_BASIC(console_serial_test
),
125 XNUPOST_TEST_CONFIG_BASIC(console_serial_alloc_rel_tests
),
126 XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests
),
127 #if defined(__arm__) || defined(__arm64__)
128 XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test
),
130 XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test
),
131 //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
132 XNUPOST_TEST_CONFIG_BASIC(test_thread_call
),
133 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test
),
134 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test
),
135 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test
),
136 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test
),
137 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test
),
138 XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test
),
140 XNUPOST_TEST_CONFIG_BASIC(vfp_state_test
),
142 XNUPOST_TEST_CONFIG_BASIC(vm_tests
),
143 XNUPOST_TEST_CONFIG_BASIC(counter_tests
)};
145 uint32_t kernel_post_tests_count
= sizeof(kernel_post_tests
) / sizeof(xnupost_test_data_t
);
147 #define POSTARGS_RUN_TESTS 0x1
148 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
149 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
150 uint64_t kernel_post_args
= 0x0;
152 /* static variables to hold state */
153 static kern_return_t parse_config_retval
= KERN_INVALID_CAPABILITY
;
154 static char kernel_post_test_configs
[256];
155 boolean_t
xnupost_should_run_test(uint32_t test_num
);
158 xnupost_parse_config()
160 if (parse_config_retval
!= KERN_INVALID_CAPABILITY
) {
161 return parse_config_retval
;
163 PE_parse_boot_argn("kernPOST", &kernel_post_args
, sizeof(kernel_post_args
));
165 if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs
[0], sizeof(kernel_post_test_configs
)) == TRUE
) {
166 kernel_post_args
|= POSTARGS_CUSTOM_TEST_RUNLIST
;
169 if (kernel_post_args
!= 0) {
170 parse_config_retval
= KERN_SUCCESS
;
173 parse_config_retval
= KERN_NOT_SUPPORTED
;
175 return parse_config_retval
;
179 xnupost_should_run_test(uint32_t test_num
)
181 if (kernel_post_args
& POSTARGS_CUSTOM_TEST_RUNLIST
) {
182 int64_t begin
= 0, end
= 999999;
183 char * b
= kernel_post_test_configs
;
185 get_range_bounds(b
, &begin
, &end
);
186 if (test_num
>= begin
&& test_num
<= end
) {
190 /* skip to the next "," */
197 /* skip past the ',' */
206 xnupost_list_tests(xnupost_test_t test_list
, uint32_t test_count
)
208 if (KERN_SUCCESS
!= xnupost_parse_config()) {
212 xnupost_test_t testp
;
213 for (uint32_t i
= 0; i
< test_count
; i
++) {
214 testp
= &test_list
[i
];
215 if (testp
->xt_test_num
== 0) {
216 assert(total_post_tests_count
< UINT16_MAX
);
217 testp
->xt_test_num
= (uint16_t)++total_post_tests_count
;
219 /* make sure the boot-arg based test run list is honored */
220 if (kernel_post_args
& POSTARGS_CUSTOM_TEST_RUNLIST
) {
221 testp
->xt_config
|= XT_CONFIG_IGNORE
;
222 if (xnupost_should_run_test(testp
->xt_test_num
)) {
223 testp
->xt_config
&= ~(XT_CONFIG_IGNORE
);
224 testp
->xt_config
|= XT_CONFIG_RUN
;
225 printf("\n[TEST] #%u is marked as ignored", testp
->xt_test_num
);
228 printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp
->xt_test_num
, testp
->xt_name
, testp
->xt_expected_retval
,
236 xnupost_run_tests(xnupost_test_t test_list
, uint32_t test_count
)
239 int retval
= KERN_SUCCESS
;
241 if ((kernel_post_args
& POSTARGS_RUN_TESTS
) == 0) {
242 printf("No POST boot-arg set.\n");
247 xnupost_test_t testp
;
248 for (; i
< test_count
; i
++) {
249 xnupost_reset_panic_widgets();
250 testp
= &test_list
[i
];
251 T_BEGIN(testp
->xt_name
);
252 testp
->xt_begin_time
= mach_absolute_time();
253 testp
->xt_end_time
= testp
->xt_begin_time
;
256 * If test is designed to panic and controller
257 * is not available then mark as SKIPPED
259 if ((testp
->xt_config
& XT_CONFIG_EXPECT_PANIC
) && !(kernel_post_args
& POSTARGS_CONTROLLER_AVAILABLE
)) {
261 "Test expects panic but "
262 "no controller is present");
263 testp
->xt_test_actions
= XT_ACTION_SKIPPED
;
267 if ((testp
->xt_config
& XT_CONFIG_IGNORE
)) {
268 T_SKIP("Test is marked as XT_CONFIG_IGNORE");
269 testp
->xt_test_actions
= XT_ACTION_SKIPPED
;
275 testp
->xt_retval
= T_TESTRESULT
;
276 testp
->xt_end_time
= mach_absolute_time();
277 if (testp
->xt_retval
== testp
->xt_expected_retval
) {
278 testp
->xt_test_actions
= XT_ACTION_PASSED
;
280 testp
->xt_test_actions
= XT_ACTION_FAILED
;
290 return xnupost_list_tests(kernel_post_tests
, kernel_post_tests_count
);
296 return xnupost_run_tests(kernel_post_tests
, kernel_post_tests_count
);
300 xnupost_register_panic_widget(xt_panic_widget_func funcp
, const char * funcname
, void * context
, void ** outval
)
302 if (xt_panic_widgets
.xtp_context_p
!= NULL
|| xt_panic_widgets
.xtp_func
!= NULL
) {
303 return KERN_RESOURCE_SHORTAGE
;
306 xt_panic_widgets
.xtp_context_p
= context
;
307 xt_panic_widgets
.xtp_func
= funcp
;
308 xt_panic_widgets
.xtp_func_name
= funcname
;
309 xt_panic_widgets
.xtp_outval_p
= outval
;
315 xnupost_reset_panic_widgets()
317 bzero(&xt_panic_widgets
, sizeof(xt_panic_widgets
));
321 xnupost_process_kdb_stop(const char * panic_s
)
323 xt_panic_return_t retval
= 0;
324 struct xnupost_panic_widget
* pw
= &xt_panic_widgets
;
325 const char * name
= "unknown";
326 if (xt_panic_widgets
.xtp_func_name
) {
327 name
= xt_panic_widgets
.xtp_func_name
;
330 /* bail early on if kernPOST is not set */
331 if (kernel_post_args
== 0) {
332 return KERN_INVALID_CAPABILITY
;
335 if (xt_panic_widgets
.xtp_func
) {
336 T_LOG("%s: Calling out to widget: %s", __func__
, xt_panic_widgets
.xtp_func_name
);
337 retval
= pw
->xtp_func(panic_s
, pw
->xtp_context_p
, pw
->xtp_outval_p
);
339 return KERN_INVALID_CAPABILITY
;
343 case XT_RET_W_SUCCESS
:
344 T_EXPECT_EQ_INT(retval
, XT_RET_W_SUCCESS
, "%s reported successful handling. Returning from kdb_stop.", name
);
345 /* KERN_SUCCESS means return from panic/assertion */
349 T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name
);
352 case XT_PANIC_W_FAIL
:
353 T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name
);
356 case XT_PANIC_W_SUCCESS
:
357 T_EXPECT_EQ_INT(retval
, XT_PANIC_W_SUCCESS
, "%s reported successful testcase. But continuing to kdb_stop.", name
);
360 case XT_PANIC_UNRELATED
:
362 T_LOG("UNRELATED: Continuing to kdb_stop.");
368 _xt_generic_assert_check(const char * s
, void * str_to_match
, void ** outval
)
370 xt_panic_return_t ret
= XT_PANIC_UNRELATED
;
372 if (NULL
!= strnstr(__DECONST(char *, s
), (char *)str_to_match
, strlen(s
))) {
373 T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__
, s
, (char *)str_to_match
);
374 ret
= XT_RET_W_SUCCESS
;
378 *outval
= (void *)(uintptr_t)ret
;
384 xnupost_reset_tests(xnupost_test_t test_list
, uint32_t test_count
)
387 xnupost_test_t testp
;
388 for (; i
< test_count
; i
++) {
389 testp
= &test_list
[i
];
390 testp
->xt_begin_time
= 0;
391 testp
->xt_end_time
= 0;
392 testp
->xt_test_actions
= XT_ACTION_NONE
;
393 testp
->xt_retval
= -1;
406 test_zone
= zone_create("test_uint64_zone", sizeof(uint64_t),
408 T_ASSERT_NOTNULL(test_zone
, NULL
);
410 T_ASSERT_EQ_INT(test_zone
->z_elems_free
, 0, NULL
);
413 T_ASSERT_NOTNULL(test_ptr
= zalloc(test_zone
), NULL
);
415 zfree(test_zone
, test_ptr
);
417 /* A sample report for perfdata */
418 T_PERF("num_threads_at_ktest", threads_count
, "count", "# of threads in system at zalloc_test");
424 * Function used for comparison by qsort()
427 compare_numbers_ascending(const void * a
, const void * b
)
429 const uint64_t x
= *(const uint64_t *)a
;
430 const uint64_t y
= *(const uint64_t *)b
;
441 * Function to count number of bits that are set in a number.
442 * It uses Side Addition using Magic Binary Numbers
445 count_bits(uint64_t number
)
447 return __builtin_popcountll(number
);
454 * Randomness test for RandomULong()
456 * This test verifies that:
457 * a. RandomULong works
458 * b. The generated numbers match the following entropy criteria:
459 * For a thousand iterations, verify:
460 * 1. mean entropy > 12 bits
461 * 2. min entropy > 4 bits
463 * 4. No incremental/decremental pattern in a window of 3
467 * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
470 #define CONF_MIN_ENTROPY 4
471 #define CONF_MEAN_ENTROPY 12
472 #define CONF_ITERATIONS 1000
473 #define CONF_WINDOW_SIZE 3
474 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
477 uint32_t min_bit_entropy
, max_bit_entropy
, bit_entropy
;
478 uint32_t aggregate_bit_entropy
= 0;
479 uint32_t mean_bit_entropy
= 0;
480 uint64_t numbers
[CONF_ITERATIONS
];
481 min_bit_entropy
= UINT32_MAX
;
485 * TEST 1: Number generation and basic and basic validation
486 * Check for non-zero (no bits set), -1 (all bits set) and error
488 for (i
= 0; i
< CONF_ITERATIONS
; i
++) {
489 read_random(&numbers
[i
], sizeof(numbers
[i
]));
490 if (numbers
[i
] == 0) {
491 T_ASSERT_NE_ULLONG(numbers
[i
], 0, "read_random returned zero value.");
493 if (numbers
[i
] == UINT64_MAX
) {
494 T_ASSERT_NE_ULLONG(numbers
[i
], UINT64_MAX
, "read_random returned -1.");
497 T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS
);
500 * TEST 2: Mean and Min Bit Entropy
501 * Check the bit entropy and its mean over the generated numbers.
503 for (i
= 1; i
< CONF_ITERATIONS
; i
++) {
504 bit_entropy
= count_bits(numbers
[i
- 1] ^ numbers
[i
]);
505 if (bit_entropy
< min_bit_entropy
) {
506 min_bit_entropy
= bit_entropy
;
508 if (bit_entropy
> max_bit_entropy
) {
509 max_bit_entropy
= bit_entropy
;
512 if (bit_entropy
< CONF_MIN_ENTROPY
) {
513 T_EXPECT_GE_UINT(bit_entropy
, CONF_MIN_ENTROPY
,
514 "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
517 aggregate_bit_entropy
+= bit_entropy
;
519 T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY
);
521 mean_bit_entropy
= aggregate_bit_entropy
/ CONF_ITERATIONS
;
522 T_EXPECT_GE_UINT(mean_bit_entropy
, CONF_MEAN_ENTROPY
, "Test criteria for mean number of differing bits.");
523 T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY
, mean_bit_entropy
);
524 T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS
,
525 min_bit_entropy
, mean_bit_entropy
, max_bit_entropy
);
526 T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS
), min_bit_entropy
, "bits", "minimum bit entropy in RNG. High is better");
527 T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS
), mean_bit_entropy
, "bits", "mean bit entropy in RNG. High is better");
528 T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS
), max_bit_entropy
, "bits", "max bit entropy in RNG. High is better");
531 * TEST 3: Incremental Pattern Search
532 * Check that incremental/decremental pattern does not exist in the given window
534 int window_start
, window_end
, trend
;
535 window_start
= window_end
= trend
= 0;
541 window_end
= window_start
+ CONF_WINDOW_SIZE
- 1;
542 if (window_end
>= CONF_ITERATIONS
) {
543 window_end
= CONF_ITERATIONS
- 1;
547 for (i
= window_start
; i
< window_end
; i
++) {
548 if (numbers
[i
] < numbers
[i
+ 1]) {
550 } else if (numbers
[i
] > numbers
[i
+ 1]) {
555 * Check that there is no increasing or decreasing trend
556 * i.e. trend <= ceil(window_size/2)
561 if (trend
> CONF_WINDOW_TREND_LIMIT
) {
562 T_ASSERT_LE_INT(trend
, CONF_WINDOW_TREND_LIMIT
, "Found increasing/decreasing trend in random numbers.");
566 * Move to the next window
569 } while (window_start
< (CONF_ITERATIONS
- 1));
570 T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE
);
573 * TEST 4: Find Duplicates
574 * Check no duplicate values are generated
576 qsort(numbers
, CONF_ITERATIONS
, sizeof(numbers
[0]), compare_numbers_ascending
);
577 for (i
= 1; i
< CONF_ITERATIONS
; i
++) {
578 if (numbers
[i
] == numbers
[i
- 1]) {
579 T_ASSERT_NE_ULLONG(numbers
[i
], numbers
[i
- 1], "read_random generated duplicate values.");
582 T_PASS("Test did not find any duplicates as expected.");
588 /* KCDATA kernel api tests */
589 static struct kcdata_descriptor test_kc_data
;//, test_kc_data2;
590 struct sample_disk_io_stats
{
591 uint64_t disk_reads_count
;
592 uint64_t disk_reads_size
;
593 uint64_t io_priority_count
[4];
594 uint64_t io_priority_size
;
595 } __attribute__((packed
));
597 struct kcdata_subtype_descriptor test_disk_io_stats_def
[] = {
599 .kcs_flags
= KCS_SUBTYPE_FLAGS_NONE
,
600 .kcs_elem_type
= KC_ST_UINT64
,
601 .kcs_elem_offset
= 0 * sizeof(uint64_t),
602 .kcs_elem_size
= sizeof(uint64_t),
603 .kcs_name
= "disk_reads_count"
606 .kcs_flags
= KCS_SUBTYPE_FLAGS_NONE
,
607 .kcs_elem_type
= KC_ST_UINT64
,
608 .kcs_elem_offset
= 1 * sizeof(uint64_t),
609 .kcs_elem_size
= sizeof(uint64_t),
610 .kcs_name
= "disk_reads_size"
613 .kcs_flags
= KCS_SUBTYPE_FLAGS_ARRAY
,
614 .kcs_elem_type
= KC_ST_UINT64
,
615 .kcs_elem_offset
= 2 * sizeof(uint64_t),
616 .kcs_elem_size
= KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
617 .kcs_name
= "io_priority_count"
620 .kcs_flags
= KCS_SUBTYPE_FLAGS_ARRAY
,
621 .kcs_elem_type
= KC_ST_UINT64
,
622 .kcs_elem_offset
= (2 + 4) * sizeof(uint64_t),
623 .kcs_elem_size
= sizeof(uint64_t),
624 .kcs_name
= "io_priority_size"
631 kern_return_t retval
= KERN_SUCCESS
;
633 /* test for NULL input */
634 retval
= kcdata_memory_static_init(NULL
, (mach_vm_address_t
)0, KCDATA_BUFFER_BEGIN_STACKSHOT
, 100, KCFLAG_USE_MEMCOPY
);
635 T_ASSERT(retval
== KERN_INVALID_ARGUMENT
, "kcdata_memory_static_init with NULL struct");
637 /* another negative test with buffer size < 32 bytes */
638 char data
[30] = "sample_disk_io_stats";
639 retval
= kcdata_memory_static_init(&test_kc_data
, (mach_vm_address_t
)&data
, KCDATA_BUFFER_BEGIN_CRASHINFO
, sizeof(data
),
641 T_ASSERT(retval
== KERN_INSUFFICIENT_BUFFER_SIZE
, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
643 /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
644 retval
= kcdata_memory_static_init(&test_kc_data
, (mach_vm_address_t
)0, KCDATA_BUFFER_BEGIN_CRASHINFO
, PAGE_SIZE
,
646 T_ASSERT(retval
== KERN_NO_ACCESS
, "writing to 0x0 returned KERN_NO_ACCESS");
648 /* test with successful kcdata_memory_static_init */
649 test_kc_data
.kcd_length
= 0xdeadbeef;
650 mach_vm_address_t address
= (mach_vm_address_t
)kalloc(PAGE_SIZE
);
651 T_EXPECT_NOTNULL(address
, "kalloc of PAGE_SIZE data.");
653 retval
= kcdata_memory_static_init(&test_kc_data
, (mach_vm_address_t
)address
, KCDATA_BUFFER_BEGIN_STACKSHOT
, PAGE_SIZE
,
656 T_ASSERT(retval
== KERN_SUCCESS
, "successful kcdata_memory_static_init call");
658 T_ASSERT(test_kc_data
.kcd_length
== PAGE_SIZE
, "kcdata length is set correctly to PAGE_SIZE.");
659 T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data
.kcd_addr_begin
, test_kc_data
.kcd_addr_end
, address
);
660 T_ASSERT(test_kc_data
.kcd_addr_begin
== address
, "kcdata begin address is correct 0x%llx", (uint64_t)address
);
662 /* verify we have BEGIN and END HEADERS set */
663 uint32_t * mem
= (uint32_t *)address
;
664 T_ASSERT(mem
[0] == KCDATA_BUFFER_BEGIN_STACKSHOT
, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
665 T_ASSERT(mem
[4] == KCDATA_TYPE_BUFFER_END
, "KCDATA_TYPE_BUFFER_END is appended as expected");
666 T_ASSERT(mem
[5] == 0, "size of BUFFER_END tag is zero");
668 /* verify kcdata_memory_get_used_bytes() */
669 uint64_t bytes_used
= 0;
670 bytes_used
= kcdata_memory_get_used_bytes(&test_kc_data
);
671 T_ASSERT(bytes_used
== (2 * sizeof(struct kcdata_item
)), "bytes_used api returned expected %llu", bytes_used
);
673 /* test for kcdata_get_memory_addr() */
675 mach_vm_address_t user_addr
= 0;
676 /* negative test for NULL user_addr AND/OR kcdata_descriptor */
677 retval
= kcdata_get_memory_addr(NULL
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), &user_addr
);
678 T_ASSERT(retval
== KERN_INVALID_ARGUMENT
, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
680 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), NULL
);
681 T_ASSERT(retval
== KERN_INVALID_ARGUMENT
, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
683 /* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
684 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_USECS_SINCE_EPOCH
, 0, &user_addr
);
685 T_ASSERT(retval
== KERN_SUCCESS
, "Successfully got kcdata entry for 0 size data");
686 T_ASSERT(user_addr
== test_kc_data
.kcd_addr_end
, "0 sized data did not add any extra buffer space");
688 /* successful case with valid size. */
689 user_addr
= 0xdeadbeef;
690 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), &user_addr
);
691 T_ASSERT(retval
== KERN_SUCCESS
, "kcdata_get_memory_addr with valid values succeeded.");
692 T_ASSERT(user_addr
> test_kc_data
.kcd_addr_begin
, "user_addr is in range of buffer");
693 T_ASSERT(user_addr
< test_kc_data
.kcd_addr_end
, "user_addr is in range of buffer");
695 /* Try creating an item with really large size */
696 user_addr
= 0xdeadbeef;
697 bytes_used
= kcdata_memory_get_used_bytes(&test_kc_data
);
698 retval
= kcdata_get_memory_addr(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, PAGE_SIZE
* 4, &user_addr
);
699 T_ASSERT(retval
== KERN_INSUFFICIENT_BUFFER_SIZE
, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
700 T_ASSERT(user_addr
== 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
701 T_ASSERT(bytes_used
== kcdata_memory_get_used_bytes(&test_kc_data
), "The data structure should be unaffected");
703 /* verify convenience functions for uint32_with_description */
704 retval
= kcdata_add_uint32_with_description(&test_kc_data
, 0xbdc0ffee, "This is bad coffee");
705 T_ASSERT(retval
== KERN_SUCCESS
, "add uint32 with description succeeded.");
707 retval
= kcdata_add_uint64_with_description(&test_kc_data
, 0xf001badc0ffee, "another 8 byte no.");
708 T_ASSERT(retval
== KERN_SUCCESS
, "add uint64 with desc succeeded.");
710 /* verify creating an KCDATA_TYPE_ARRAY here */
711 user_addr
= 0xdeadbeef;
712 bytes_used
= kcdata_memory_get_used_bytes(&test_kc_data
);
713 /* save memory address where the array will come up */
714 struct kcdata_item
* item_p
= (struct kcdata_item
*)test_kc_data
.kcd_addr_end
;
716 retval
= kcdata_get_memory_addr_for_array(&test_kc_data
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), 20, &user_addr
);
717 T_ASSERT(retval
== KERN_SUCCESS
, "Array of 20 integers should be possible");
718 T_ASSERT(user_addr
!= 0xdeadbeef, "user_addr is updated as expected");
719 T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data
) - bytes_used
) >= 20 * sizeof(uint64_t), "memory allocation is in range");
720 kcdata_iter_t iter
= kcdata_iter(item_p
, (unsigned long)(PAGE_SIZE
- kcdata_memory_get_used_bytes(&test_kc_data
)));
721 T_ASSERT(kcdata_iter_array_elem_count(iter
) == 20, "array count is 20");
723 /* FIXME add tests here for ranges of sizes and counts */
725 T_ASSERT(item_p
->flags
== (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME
<< 32) | 20), "flags are set correctly");
727 /* test adding of custom type */
729 retval
= kcdata_add_type_definition(&test_kc_data
, 0x999, data
, &test_disk_io_stats_def
[0],
730 sizeof(test_disk_io_stats_def
) / sizeof(struct kcdata_subtype_descriptor
));
731 T_ASSERT(retval
== KERN_SUCCESS
, "adding custom type succeeded.");
738 * kcdata_api_assert_tests()
740 * kern_return_t retval = 0;
741 * void * assert_check_retval = NULL;
742 * test_kc_data2.kcd_length = 0xdeadbeef;
743 * mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
744 * T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
746 * retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
747 * KCFLAG_USE_MEMCOPY);
749 * T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
751 * retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
752 * T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
754 * // this will assert
755 * retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
756 * T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
757 * T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
759 * return KERN_SUCCESS;
763 #if defined(__arm__) || defined(__arm64__)
765 #include <arm/pmap.h>
767 #define MAX_PMAP_OBJECT_ELEMENT 100000
769 extern struct vm_object pmap_object_store
; /* store pt pages */
770 extern unsigned long gPhysBase
, gPhysSize
, first_avail
;
773 * Define macros to transverse the pmap object structures and extract
774 * physical page number with information from low global only
775 * This emulate how Astris extracts information from coredump
777 #if defined(__arm64__)
779 static inline uintptr_t
780 astris_vm_page_unpack_ptr(uintptr_t p
)
786 return (p
& lowGlo
.lgPmapMemFromArrayMask
)
787 ? lowGlo
.lgPmapMemStartAddr
+ (p
& ~(lowGlo
.lgPmapMemFromArrayMask
)) * lowGlo
.lgPmapMemPagesize
788 : lowGlo
.lgPmapMemPackedBaseAddr
+ (p
<< lowGlo
.lgPmapMemPackedShift
);
791 // assume next pointer is the first element
792 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
798 // assume next pointer is the first element
799 #define astris_vm_page_queue_next(qc) *((uintptr_t *)(qc))
803 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
805 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
807 #define astris_vm_page_queue_iterate(head, elt) \
808 for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
809 (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
811 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
813 static inline ppnum_t
814 astris_vm_page_get_phys_page(uintptr_t m
)
816 return (m
>= lowGlo
.lgPmapMemStartAddr
&& m
< lowGlo
.lgPmapMemEndAddr
)
817 ? (ppnum_t
)((m
- lowGlo
.lgPmapMemStartAddr
) / lowGlo
.lgPmapMemPagesize
+ lowGlo
.lgPmapMemFirstppnum
)
818 : *((ppnum_t
*)(m
+ lowGlo
.lgPmapMemPageOffset
));
822 pmap_coredump_test(void)
827 T_LOG("Testing coredump info for PMAP.");
829 T_ASSERT_GE_ULONG(lowGlo
.lgStaticAddr
, gPhysBase
, NULL
);
830 T_ASSERT_LE_ULONG(lowGlo
.lgStaticAddr
+ lowGlo
.lgStaticSize
, first_avail
, NULL
);
831 T_ASSERT_EQ_ULONG(lowGlo
.lgLayoutMajorVersion
, 3, NULL
);
832 T_ASSERT_EQ_ULONG(lowGlo
.lgLayoutMinorVersion
, 2, NULL
);
833 T_ASSERT_EQ_ULONG(lowGlo
.lgLayoutMagic
, LOWGLO_LAYOUT_MAGIC
, NULL
);
835 // check the constant values in lowGlo
836 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemQ
, ((typeof(lowGlo
.lgPmapMemQ
)) & (pmap_object_store
.memq
)), NULL
);
837 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPageOffset
, offsetof(struct vm_page_with_ppnum
, vmp_phys_page
), NULL
);
838 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemChainOffset
, offsetof(struct vm_page
, vmp_listq
), NULL
);
839 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPagesize
, sizeof(struct vm_page
), NULL
);
841 #if defined(__arm64__)
842 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemFromArrayMask
, VM_PAGE_PACKED_FROM_ARRAY
, NULL
);
843 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPackedShift
, VM_PAGE_PACKED_PTR_SHIFT
, NULL
);
844 T_ASSERT_EQ_ULONG(lowGlo
.lgPmapMemPackedBaseAddr
, VM_PAGE_PACKED_PTR_BASE
, NULL
);
847 vm_object_lock_shared(&pmap_object_store
);
848 astris_vm_page_queue_iterate(lowGlo
.lgPmapMemQ
, p
)
850 ppnum_t ppnum
= astris_vm_page_get_phys_page(p
);
851 pmap_paddr_t pa
= (pmap_paddr_t
)astris_ptoa(ppnum
);
852 T_ASSERT_GE_ULONG(pa
, gPhysBase
, NULL
);
853 T_ASSERT_LT_ULONG(pa
, gPhysBase
+ gPhysSize
, NULL
);
855 T_ASSERT_LT_INT(iter
, MAX_PMAP_OBJECT_ELEMENT
, NULL
);
857 vm_object_unlock(&pmap_object_store
);
859 T_ASSERT_GT_INT(iter
, 0, NULL
);
864 struct ts_kern_prim_test_args
{
872 int priority_to_check
;
881 while (os_atomic_load(var
, acquire
) != num
) {
882 assert_wait((event_t
) var
, THREAD_UNINT
);
883 if (os_atomic_load(var
, acquire
) != num
) {
884 (void) thread_block(THREAD_CONTINUE_NULL
);
886 clear_wait(current_thread(), THREAD_AWAKENED
);
897 os_atomic_inc(var
, relaxed
);
898 thread_wakeup((event_t
) var
);
902 extern void IOSleep(int);
905 thread_lock_unlock_kernel_primitive(
907 __unused wait_result_t wr
)
909 thread_t thread
= current_thread();
910 struct ts_kern_prim_test_args
*info
= (struct ts_kern_prim_test_args
*) args
;
914 pri
= thread
->sched_pri
;
915 thread_unlock(thread
);
917 wait_threads(info
->wait_event_b
, info
->before_num
);
918 wake_threads(info
->notify_b
);
920 tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT
);
922 wake_threads(info
->notify_a
);
923 wait_threads(info
->wait_event_a
, info
->after_num
);
927 if (info
->priority_to_check
) {
929 pri
= thread
->sched_pri
;
930 thread_unlock(thread
);
931 T_ASSERT(pri
== info
->priority_to_check
, "Priority thread: current sched %d sched wanted %d", pri
, info
->priority_to_check
);
934 tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT
);
936 wake_threads(info
->end_barrier
);
937 thread_terminate_self();
941 ts_kernel_primitive_test(void)
943 thread_t owner
, thread1
, thread2
;
944 struct ts_kern_prim_test_args targs
[2] = {};
945 kern_return_t result
;
947 int owner_locked
= 0;
948 int waiters_ready
= 0;
950 T_LOG("Testing turnstile kernel primitive");
952 targs
[0].notify_b
= NULL
;
953 targs
[0].wait_event_b
= NULL
;
954 targs
[0].before_num
= 0;
955 targs
[0].notify_a
= &owner_locked
;
956 targs
[0].wait_event_a
= &waiters_ready
;
957 targs
[0].after_num
= 2;
958 targs
[0].priority_to_check
= 90;
959 targs
[0].end_barrier
= &end_barrier
;
961 // Start owner with priority 80
962 result
= kernel_thread_start_priority((thread_continue_t
)thread_lock_unlock_kernel_primitive
, &targs
[0], 80, &owner
);
963 T_ASSERT(result
== KERN_SUCCESS
, "Starting owner");
965 targs
[1].notify_b
= &waiters_ready
;
966 targs
[1].wait_event_b
= &owner_locked
;
967 targs
[1].before_num
= 1;
968 targs
[1].notify_a
= NULL
;
969 targs
[1].wait_event_a
= NULL
;
970 targs
[1].after_num
= 0;
971 targs
[1].priority_to_check
= 0;
972 targs
[1].end_barrier
= &end_barrier
;
974 // Start waiters with priority 85 and 90
975 result
= kernel_thread_start_priority((thread_continue_t
)thread_lock_unlock_kernel_primitive
, &targs
[1], 85, &thread1
);
976 T_ASSERT(result
== KERN_SUCCESS
, "Starting thread1");
978 result
= kernel_thread_start_priority((thread_continue_t
)thread_lock_unlock_kernel_primitive
, &targs
[1], 90, &thread2
);
979 T_ASSERT(result
== KERN_SUCCESS
, "Starting thread2");
981 wait_threads(&end_barrier
, 3);
989 #define NUM_THREADS 4
991 struct synch_test_common
{
992 unsigned int nthreads
;
999 init_synch_test_common(struct synch_test_common
*info
, unsigned int nthreads
)
1001 info
->nthreads
= nthreads
;
1002 info
->threads
= kalloc(sizeof(thread_t
) * nthreads
);
1003 if (!info
->threads
) {
1007 return KERN_SUCCESS
;
1011 destroy_synch_test_common(struct synch_test_common
*info
)
1013 kfree(info
->threads
, sizeof(thread_t
) * info
->nthreads
);
1017 start_threads(thread_continue_t func
, struct synch_test_common
*info
, bool sleep_after_first
)
1020 kern_return_t result
;
1024 info
->test_done
= 0;
1026 for (i
= 0; i
< info
->nthreads
; i
++) {
1027 info
->threads
[i
] = NULL
;
1030 info
->max_pri
= priority
+ (info
->nthreads
- 1) * 5;
1031 if (info
->max_pri
> 95) {
1035 for (i
= 0; i
< info
->nthreads
; i
++) {
1036 result
= kernel_thread_start_priority((thread_continue_t
)func
, info
, priority
, &thread
);
1037 os_atomic_store(&info
->threads
[i
], thread
, release
);
1038 T_ASSERT(result
== KERN_SUCCESS
, "Starting thread %d, priority %d, %p", i
, priority
, thread
);
1042 if (i
== 0 && sleep_after_first
) {
1049 get_max_pri(struct synch_test_common
* info
)
1051 return info
->max_pri
;
1055 wait_all_thread(struct synch_test_common
* info
)
1057 wait_threads(&info
->test_done
, info
->nthreads
);
1061 notify_waiter(struct synch_test_common
* info
)
1063 wake_threads(&info
->test_done
);
1067 wait_for_waiters(struct synch_test_common
*info
)
1072 for (i
= 0; i
< info
->nthreads
; i
++) {
1074 while (os_atomic_load(&info
->threads
[i
], acquire
) == NULL
) {
1081 if (info
->threads
[i
] != current_thread()) {
1084 thread
= os_atomic_load(&info
->threads
[i
], relaxed
);
1085 if (thread
== (thread_t
) 1) {
1089 if (!(thread
->state
& TH_RUN
)) {
1098 if (thread
->started
== FALSE
) {
1101 } while (thread
->state
& TH_RUN
);
1107 exclude_current_waiter(struct synch_test_common
*info
)
1111 for (i
= 0; i
< info
->nthreads
; i
++) {
1113 while (os_atomic_load(&info
->threads
[i
], acquire
) == NULL
) {
1120 if (os_atomic_load(&info
->threads
[i
], acquire
) == current_thread()) {
1121 os_atomic_store(&info
->threads
[i
], (thread_t
)1, release
);
1127 struct info_sleep_inheritor_test
{
1128 struct synch_test_common head
;
1131 decl_lck_mtx_gate_data(, gate
);
1132 boolean_t gate_closed
;
1134 boolean_t work_to_do
;
1135 unsigned int max_pri
;
1136 unsigned int steal_pri
;
1140 int handoff_failure
;
1141 thread_t thread_inheritor
;
1145 primitive_lock(struct info_sleep_inheritor_test
*info
)
1147 switch (info
->prim_type
) {
1149 lck_mtx_lock(&info
->mtx_lock
);
1152 lck_rw_lock(&info
->rw_lock
, LCK_RW_TYPE_EXCLUSIVE
);
1155 panic("invalid type %d", info
->prim_type
);
1160 primitive_unlock(struct info_sleep_inheritor_test
*info
)
1162 switch (info
->prim_type
) {
1164 lck_mtx_unlock(&info
->mtx_lock
);
1167 lck_rw_unlock(&info
->rw_lock
, LCK_RW_TYPE_EXCLUSIVE
);
1170 panic("invalid type %d", info
->prim_type
);
1174 static wait_result_t
1175 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test
*info
)
1177 wait_result_t ret
= KERN_SUCCESS
;
1178 switch (info
->prim_type
) {
1180 ret
= lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_DEFAULT
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1183 ret
= lck_rw_sleep_with_inheritor(&info
->rw_lock
, LCK_SLEEP_DEFAULT
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1186 panic("invalid type %d", info
->prim_type
);
1193 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test
*info
)
1195 switch (info
->prim_type
) {
1198 wakeup_one_with_inheritor((event_t
) &info
->thread_inheritor
, THREAD_AWAKENED
, LCK_WAKE_DEFAULT
, &info
->thread_inheritor
);
1201 panic("invalid type %d", info
->prim_type
);
1206 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test
*info
)
1208 switch (info
->prim_type
) {
1211 wakeup_all_with_inheritor((event_t
) &info
->thread_inheritor
, THREAD_AWAKENED
);
1214 panic("invalid type %d", info
->prim_type
);
1220 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test
*info
)
1222 switch (info
->prim_type
) {
1225 change_sleep_inheritor((event_t
) &info
->thread_inheritor
, info
->thread_inheritor
);
1228 panic("invalid type %d", info
->prim_type
);
1233 static kern_return_t
1234 primitive_gate_try_close(struct info_sleep_inheritor_test
*info
)
1236 kern_return_t ret
= KERN_SUCCESS
;
1237 switch (info
->prim_type
) {
1239 ret
= lck_mtx_gate_try_close(&info
->mtx_lock
, &info
->gate
);
1242 ret
= lck_rw_gate_try_close(&info
->rw_lock
, &info
->gate
);
1245 panic("invalid type %d", info
->prim_type
);
1250 static gate_wait_result_t
1251 primitive_gate_wait(struct info_sleep_inheritor_test
*info
)
1253 gate_wait_result_t ret
= GATE_OPENED
;
1254 switch (info
->prim_type
) {
1256 ret
= lck_mtx_gate_wait(&info
->mtx_lock
, &info
->gate
, LCK_SLEEP_DEFAULT
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1259 ret
= lck_rw_gate_wait(&info
->rw_lock
, &info
->gate
, LCK_SLEEP_DEFAULT
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1262 panic("invalid type %d", info
->prim_type
);
1268 primitive_gate_open(struct info_sleep_inheritor_test
*info
)
1270 switch (info
->prim_type
) {
1272 lck_mtx_gate_open(&info
->mtx_lock
, &info
->gate
);
1275 lck_rw_gate_open(&info
->rw_lock
, &info
->gate
);
1278 panic("invalid type %d", info
->prim_type
);
1283 primitive_gate_close(struct info_sleep_inheritor_test
*info
)
1285 switch (info
->prim_type
) {
1287 lck_mtx_gate_close(&info
->mtx_lock
, &info
->gate
);
1290 lck_rw_gate_close(&info
->rw_lock
, &info
->gate
);
1293 panic("invalid type %d", info
->prim_type
);
1298 primitive_gate_steal(struct info_sleep_inheritor_test
*info
)
1300 switch (info
->prim_type
) {
1302 lck_mtx_gate_steal(&info
->mtx_lock
, &info
->gate
);
1305 lck_rw_gate_steal(&info
->rw_lock
, &info
->gate
);
1308 panic("invalid type %d", info
->prim_type
);
1312 static kern_return_t
1313 primitive_gate_handoff(struct info_sleep_inheritor_test
*info
, int flags
)
1315 kern_return_t ret
= KERN_SUCCESS
;
1316 switch (info
->prim_type
) {
1318 ret
= lck_mtx_gate_handoff(&info
->mtx_lock
, &info
->gate
, flags
);
1321 ret
= lck_rw_gate_handoff(&info
->rw_lock
, &info
->gate
, flags
);
1324 panic("invalid type %d", info
->prim_type
);
1330 primitive_gate_assert(struct info_sleep_inheritor_test
*info
, int type
)
1332 switch (info
->prim_type
) {
1334 lck_mtx_gate_assert(&info
->mtx_lock
, &info
->gate
, type
);
1337 lck_rw_gate_assert(&info
->rw_lock
, &info
->gate
, type
);
1340 panic("invalid type %d", info
->prim_type
);
1345 primitive_gate_init(struct info_sleep_inheritor_test
*info
)
1347 switch (info
->prim_type
) {
1349 lck_mtx_gate_init(&info
->mtx_lock
, &info
->gate
);
1352 lck_rw_gate_init(&info
->rw_lock
, &info
->gate
);
1355 panic("invalid type %d", info
->prim_type
);
1360 primitive_gate_destroy(struct info_sleep_inheritor_test
*info
)
1362 switch (info
->prim_type
) {
1364 lck_mtx_gate_destroy(&info
->mtx_lock
, &info
->gate
);
1367 lck_rw_gate_destroy(&info
->rw_lock
, &info
->gate
);
1370 panic("invalid type %d", info
->prim_type
);
1375 thread_inheritor_like_mutex(
1377 __unused wait_result_t wr
)
1381 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1382 uint my_pri
= current_thread()->sched_pri
;
1384 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1387 * spin here to start concurrently
1389 wake_threads(&info
->synch
);
1390 wait_threads(&info
->synch
, info
->synch_value
);
1392 primitive_lock(info
);
1394 if (info
->thread_inheritor
== NULL
) {
1395 info
->thread_inheritor
= current_thread();
1397 wait
= primitive_sleep_with_inheritor(info
);
1398 T_ASSERT(wait
== THREAD_AWAKENED
|| wait
== THREAD_NOT_WAITING
, "sleep_with_inheritor return");
1400 primitive_unlock(info
);
1405 primitive_lock(info
);
1407 T_ASSERT(info
->thread_inheritor
== current_thread(), "thread_inheritor is %p", info
->thread_inheritor
);
1408 primitive_wakeup_one_with_inheritor(info
);
1409 T_LOG("woken up %p", info
->thread_inheritor
);
1411 if (info
->thread_inheritor
== NULL
) {
1412 T_ASSERT(info
->handoff_failure
== 0, "handoff failures");
1413 info
->handoff_failure
++;
1415 T_ASSERT(info
->thread_inheritor
!= current_thread(), "thread_inheritor is %p", info
->thread_inheritor
);
1416 thread_deallocate(info
->thread_inheritor
);
1419 primitive_unlock(info
);
1421 assert(current_thread()->kern_promotion_schedpri
== 0);
1422 notify_waiter((struct synch_test_common
*)info
);
1424 thread_terminate_self();
1428 thread_just_inheritor_do_work(
1430 __unused wait_result_t wr
)
1432 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1433 uint my_pri
= current_thread()->sched_pri
;
1436 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1437 primitive_lock(info
);
1439 if (info
->thread_inheritor
== NULL
) {
1440 info
->thread_inheritor
= current_thread();
1441 primitive_unlock(info
);
1442 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1444 wait_threads(&info
->synch
, info
->synch_value
- 1);
1446 wait_for_waiters((struct synch_test_common
*)info
);
1448 max_pri
= get_max_pri((struct synch_test_common
*) info
);
1449 T_ASSERT((uint
) current_thread()->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri
, max_pri
);
1451 os_atomic_store(&info
->synch
, 0, relaxed
);
1452 primitive_lock(info
);
1453 primitive_wakeup_all_with_inheritor(info
);
1455 wake_threads(&info
->synch
);
1456 primitive_sleep_with_inheritor(info
);
1459 primitive_unlock(info
);
1461 assert(current_thread()->kern_promotion_schedpri
== 0);
1462 notify_waiter((struct synch_test_common
*)info
);
1464 thread_terminate_self();
1470 __unused wait_result_t wr
)
1472 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1473 uint my_pri
= current_thread()->sched_pri
;
1475 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1476 primitive_lock(info
);
1478 if (info
->thread_inheritor
== NULL
) {
1479 info
->thread_inheritor
= current_thread();
1480 exclude_current_waiter((struct synch_test_common
*)info
);
1482 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1483 primitive_unlock(info
);
1485 wait_threads(&info
->synch
, info
->synch_value
- 2);
1487 wait_for_waiters((struct synch_test_common
*)info
);
1488 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1489 primitive_lock(info
);
1490 if (info
->thread_inheritor
== current_thread()) {
1491 primitive_wakeup_all_with_inheritor(info
);
1494 if (info
->steal_pri
== 0) {
1495 info
->steal_pri
= my_pri
;
1496 info
->thread_inheritor
= current_thread();
1497 primitive_change_sleep_inheritor(info
);
1498 exclude_current_waiter((struct synch_test_common
*)info
);
1500 primitive_unlock(info
);
1502 wait_threads(&info
->synch
, info
->synch_value
- 2);
1504 T_LOG("Thread pri %d stole push %p", my_pri
, current_thread());
1505 wait_for_waiters((struct synch_test_common
*)info
);
1507 T_ASSERT((uint
) current_thread()->sched_pri
== info
->steal_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri
, info
->steal_pri
);
1509 primitive_lock(info
);
1510 primitive_wakeup_all_with_inheritor(info
);
1512 if (my_pri
> info
->steal_pri
) {
1513 info
->steal_pri
= my_pri
;
1515 wake_threads(&info
->synch
);
1516 primitive_sleep_with_inheritor(info
);
1517 exclude_current_waiter((struct synch_test_common
*)info
);
1520 primitive_unlock(info
);
1522 assert(current_thread()->kern_promotion_schedpri
== 0);
1523 notify_waiter((struct synch_test_common
*)info
);
1525 thread_terminate_self();
1529 thread_no_inheritor_work(
1531 __unused wait_result_t wr
)
1533 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1534 uint my_pri
= current_thread()->sched_pri
;
1536 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1537 primitive_lock(info
);
1540 if (info
->value
== 0) {
1541 primitive_wakeup_all_with_inheritor(info
);
1543 info
->thread_inheritor
= NULL
;
1544 primitive_sleep_with_inheritor(info
);
1547 primitive_unlock(info
);
1549 assert(current_thread()->kern_promotion_schedpri
== 0);
1550 notify_waiter((struct synch_test_common
*)info
);
1552 thread_terminate_self();
1558 __unused wait_result_t wr
)
1560 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1561 uint my_pri
= current_thread()->sched_pri
;
1564 unsigned int mod_rand
;
1567 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1569 for (i
= 0; i
< 10; i
++) {
1570 lck_mtx_lock(&info
->mtx_lock
);
1571 if (info
->thread_inheritor
== NULL
) {
1572 info
->thread_inheritor
= current_thread();
1573 lck_mtx_unlock(&info
->mtx_lock
);
1575 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1577 wait_threads(&info
->synch
, info
->synch_value
- 1);
1578 wait_for_waiters((struct synch_test_common
*)info
);
1579 max_pri
= get_max_pri((struct synch_test_common
*) info
);
1580 T_ASSERT((uint
) current_thread()->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri
, max_pri
);
1582 os_atomic_store(&info
->synch
, 0, relaxed
);
1584 lck_mtx_lock(&info
->mtx_lock
);
1585 info
->thread_inheritor
= NULL
;
1586 wakeup_all_with_inheritor((event_t
) &info
->thread_inheritor
, THREAD_AWAKENED
);
1587 lck_mtx_unlock(&info
->mtx_lock
);
1591 read_random(&rand
, sizeof(rand
));
1592 mod_rand
= rand
% 2;
1594 wake_threads(&info
->synch
);
1597 lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_DEFAULT
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1598 lck_mtx_unlock(&info
->mtx_lock
);
1601 lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_UNLOCK
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1604 panic("rand()mod4 returned %u (random %u)", mod_rand
, rand
);
1609 * spin here to stop using the lock as mutex
1611 wake_threads(&info
->synch
);
1612 wait_threads(&info
->synch
, info
->synch_value
);
1614 for (i
= 0; i
< 10; i
++) {
1615 /* read_random might sleep so read it before acquiring the mtx as spin */
1616 read_random(&rand
, sizeof(rand
));
1618 lck_mtx_lock_spin(&info
->mtx_lock
);
1619 if (info
->thread_inheritor
== NULL
) {
1620 info
->thread_inheritor
= current_thread();
1621 lck_mtx_unlock(&info
->mtx_lock
);
1623 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1624 wait_for_waiters((struct synch_test_common
*)info
);
1625 max_pri
= get_max_pri((struct synch_test_common
*) info
);
1626 T_ASSERT((uint
) current_thread()->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri
, max_pri
);
1628 lck_mtx_lock_spin(&info
->mtx_lock
);
1629 info
->thread_inheritor
= NULL
;
1630 wakeup_all_with_inheritor((event_t
) &info
->thread_inheritor
, THREAD_AWAKENED
);
1631 lck_mtx_unlock(&info
->mtx_lock
);
1635 mod_rand
= rand
% 2;
1638 lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_SPIN
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1639 lck_mtx_unlock(&info
->mtx_lock
);
1642 lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_SPIN_ALWAYS
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1643 lck_mtx_unlock(&info
->mtx_lock
);
1646 panic("rand()mod4 returned %u (random %u)", mod_rand
, rand
);
1649 assert(current_thread()->kern_promotion_schedpri
== 0);
1650 notify_waiter((struct synch_test_common
*)info
);
1652 thread_terminate_self();
1658 __unused wait_result_t wr
)
1660 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1661 uint my_pri
= current_thread()->sched_pri
;
1665 unsigned int mod_rand
;
1668 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1670 for (i
= 0; i
< 10; i
++) {
1672 type
= LCK_RW_TYPE_SHARED
;
1673 lck_rw_lock(&info
->rw_lock
, type
);
1674 if (info
->thread_inheritor
== NULL
) {
1675 type
= LCK_RW_TYPE_EXCLUSIVE
;
1677 if (lck_rw_lock_shared_to_exclusive(&info
->rw_lock
)) {
1678 if (info
->thread_inheritor
== NULL
) {
1679 info
->thread_inheritor
= current_thread();
1680 lck_rw_unlock(&info
->rw_lock
, type
);
1681 wait_threads(&info
->synch
, info
->synch_value
- 1);
1683 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1684 wait_for_waiters((struct synch_test_common
*)info
);
1685 max_pri
= get_max_pri((struct synch_test_common
*) info
);
1686 T_ASSERT((uint
) current_thread()->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri
, max_pri
);
1688 os_atomic_store(&info
->synch
, 0, relaxed
);
1690 lck_rw_lock(&info
->rw_lock
, type
);
1691 info
->thread_inheritor
= NULL
;
1692 wakeup_all_with_inheritor((event_t
) &info
->thread_inheritor
, THREAD_AWAKENED
);
1693 lck_rw_unlock(&info
->rw_lock
, type
);
1701 read_random(&rand
, sizeof(rand
));
1702 mod_rand
= rand
% 4;
1704 wake_threads(&info
->synch
);
1707 lck_rw_sleep_with_inheritor(&info
->rw_lock
, LCK_SLEEP_DEFAULT
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1708 lck_rw_unlock(&info
->rw_lock
, type
);
1711 lck_rw_sleep_with_inheritor(&info
->rw_lock
, LCK_SLEEP_UNLOCK
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1714 lck_rw_sleep_with_inheritor(&info
->rw_lock
, LCK_SLEEP_SHARED
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1715 lck_rw_unlock(&info
->rw_lock
, LCK_RW_TYPE_SHARED
);
1718 lck_rw_sleep_with_inheritor(&info
->rw_lock
, LCK_SLEEP_EXCLUSIVE
, (event_t
) &info
->thread_inheritor
, info
->thread_inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
1719 lck_rw_unlock(&info
->rw_lock
, LCK_RW_TYPE_EXCLUSIVE
);
1722 panic("rand()mod4 returned %u (random %u)", mod_rand
, rand
);
1726 assert(current_thread()->kern_promotion_schedpri
== 0);
1727 notify_waiter((struct synch_test_common
*)info
);
1729 thread_terminate_self();
1733 test_sleep_with_wake_all(struct info_sleep_inheritor_test
*info
, int prim_type
)
1735 info
->prim_type
= prim_type
;
1737 info
->synch_value
= info
->head
.nthreads
;
1739 info
->thread_inheritor
= NULL
;
1741 start_threads((thread_continue_t
)thread_just_inheritor_do_work
, (struct synch_test_common
*)info
, TRUE
);
1742 wait_all_thread((struct synch_test_common
*)info
);
1746 test_sleep_with_wake_one(struct info_sleep_inheritor_test
*info
, int prim_type
)
1748 info
->prim_type
= prim_type
;
1751 info
->synch_value
= info
->head
.nthreads
;
1753 info
->handoff_failure
= 0;
1754 info
->thread_inheritor
= NULL
;
1756 start_threads((thread_continue_t
)thread_inheritor_like_mutex
, (struct synch_test_common
*)info
, FALSE
);
1757 wait_all_thread((struct synch_test_common
*)info
);
1759 T_ASSERT(info
->value
== (int)info
->head
.nthreads
, "value protected by sleep");
1760 T_ASSERT(info
->handoff_failure
== 1, "handoff failures");
1764 test_change_sleep_inheritor(struct info_sleep_inheritor_test
*info
, int prim_type
)
1766 info
->prim_type
= prim_type
;
1768 info
->thread_inheritor
= NULL
;
1769 info
->steal_pri
= 0;
1771 info
->synch_value
= info
->head
.nthreads
;
1773 start_threads((thread_continue_t
)thread_steal_work
, (struct synch_test_common
*)info
, FALSE
);
1774 wait_all_thread((struct synch_test_common
*)info
);
1778 test_no_inheritor(struct info_sleep_inheritor_test
*info
, int prim_type
)
1780 info
->prim_type
= prim_type
;
1782 info
->synch_value
= info
->head
.nthreads
;
1784 info
->thread_inheritor
= NULL
;
1785 info
->value
= info
->head
.nthreads
;
1787 start_threads((thread_continue_t
)thread_no_inheritor_work
, (struct synch_test_common
*)info
, FALSE
);
1788 wait_all_thread((struct synch_test_common
*)info
);
1792 test_rw_lock(struct info_sleep_inheritor_test
*info
)
1794 info
->thread_inheritor
= NULL
;
1795 info
->value
= info
->head
.nthreads
;
1797 info
->synch_value
= info
->head
.nthreads
;
1799 start_threads((thread_continue_t
)thread_rw_work
, (struct synch_test_common
*)info
, FALSE
);
1800 wait_all_thread((struct synch_test_common
*)info
);
1804 test_mtx_lock(struct info_sleep_inheritor_test
*info
)
1806 info
->thread_inheritor
= NULL
;
1807 info
->value
= info
->head
.nthreads
;
1809 info
->synch_value
= info
->head
.nthreads
;
1811 start_threads((thread_continue_t
)thread_mtx_work
, (struct synch_test_common
*)info
, FALSE
);
1812 wait_all_thread((struct synch_test_common
*)info
);
1816 ts_kernel_sleep_inheritor_test(void)
1818 struct info_sleep_inheritor_test info
= {};
1820 init_synch_test_common((struct synch_test_common
*)&info
, NUM_THREADS
);
1822 lck_attr_t
* lck_attr
= lck_attr_alloc_init();
1823 lck_grp_attr_t
* lck_grp_attr
= lck_grp_attr_alloc_init();
1824 lck_grp_t
* lck_grp
= lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr
);
1826 lck_mtx_init(&info
.mtx_lock
, lck_grp
, lck_attr
);
1827 lck_rw_init(&info
.rw_lock
, lck_grp
, lck_attr
);
1830 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1832 T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
1833 test_sleep_with_wake_all(&info
, MTX_LOCK
);
1836 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1838 T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
1839 test_sleep_with_wake_all(&info
, RW_LOCK
);
1842 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
1844 T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
1845 test_sleep_with_wake_one(&info
, MTX_LOCK
);
1848 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
1850 T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
1851 test_sleep_with_wake_one(&info
, RW_LOCK
);
1854 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1855 * and change_sleep_inheritor
1857 T_LOG("Testing change_sleep_inheritor with mxt sleep");
1858 test_change_sleep_inheritor(&info
, MTX_LOCK
);
1861 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1862 * and change_sleep_inheritor
1864 T_LOG("Testing change_sleep_inheritor with rw sleep");
1865 test_change_sleep_inheritor(&info
, RW_LOCK
);
1868 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1869 * with inheritor NULL
1871 T_LOG("Testing inheritor NULL");
1872 test_no_inheritor(&info
, MTX_LOCK
);
1875 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1876 * with inheritor NULL
1878 T_LOG("Testing inheritor NULL");
1879 test_no_inheritor(&info
, RW_LOCK
);
1882 * Testing mtx locking combinations
1884 T_LOG("Testing mtx locking combinations");
1885 test_mtx_lock(&info
);
1888 * Testing rw locking combinations
1890 T_LOG("Testing rw locking combinations");
1891 test_rw_lock(&info
);
1893 destroy_synch_test_common((struct synch_test_common
*)&info
);
1895 lck_attr_free(lck_attr
);
1896 lck_grp_attr_free(lck_grp_attr
);
1897 lck_rw_destroy(&info
.rw_lock
, lck_grp
);
1898 lck_mtx_destroy(&info
.mtx_lock
, lck_grp
);
1899 lck_grp_free(lck_grp
);
1901 return KERN_SUCCESS
;
1905 thread_gate_aggressive(
1907 __unused wait_result_t wr
)
1909 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1910 uint my_pri
= current_thread()->sched_pri
;
1912 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1914 primitive_lock(info
);
1915 if (info
->thread_inheritor
== NULL
) {
1916 info
->thread_inheritor
= current_thread();
1917 primitive_gate_assert(info
, GATE_ASSERT_OPEN
);
1918 primitive_gate_close(info
);
1919 exclude_current_waiter((struct synch_test_common
*)info
);
1921 primitive_unlock(info
);
1923 wait_threads(&info
->synch
, info
->synch_value
- 2);
1924 wait_for_waiters((struct synch_test_common
*)info
);
1925 T_LOG("Thread pri %d first to run %p", my_pri
, current_thread());
1927 primitive_lock(info
);
1928 if (info
->thread_inheritor
== current_thread()) {
1929 primitive_gate_open(info
);
1932 if (info
->steal_pri
== 0) {
1933 info
->steal_pri
= my_pri
;
1934 info
->thread_inheritor
= current_thread();
1935 primitive_gate_steal(info
);
1936 exclude_current_waiter((struct synch_test_common
*)info
);
1938 primitive_unlock(info
);
1939 wait_threads(&info
->synch
, info
->synch_value
- 2);
1941 T_LOG("Thread pri %d stole push %p", my_pri
, current_thread());
1942 wait_for_waiters((struct synch_test_common
*)info
);
1943 T_ASSERT((uint
) current_thread()->sched_pri
== info
->steal_pri
, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri
, info
->steal_pri
);
1945 primitive_lock(info
);
1946 primitive_gate_open(info
);
1948 if (my_pri
> info
->steal_pri
) {
1949 info
->steal_pri
= my_pri
;
1951 wake_threads(&info
->synch
);
1952 primitive_gate_wait(info
);
1953 exclude_current_waiter((struct synch_test_common
*)info
);
1956 primitive_unlock(info
);
1958 assert(current_thread()->kern_promotion_schedpri
== 0);
1959 notify_waiter((struct synch_test_common
*)info
);
1961 thread_terminate_self();
1965 thread_gate_like_mutex(
1967 __unused wait_result_t wr
)
1969 gate_wait_result_t wait
;
1971 uint my_pri
= current_thread()->sched_pri
;
1973 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
1975 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
1978 * spin here to start concurrently
1980 wake_threads(&info
->synch
);
1981 wait_threads(&info
->synch
, info
->synch_value
);
1983 primitive_lock(info
);
1985 if (primitive_gate_try_close(info
) != KERN_SUCCESS
) {
1986 wait
= primitive_gate_wait(info
);
1987 T_ASSERT(wait
== GATE_HANDOFF
, "gate_wait return");
1990 primitive_gate_assert(info
, GATE_ASSERT_HELD
);
1992 primitive_unlock(info
);
1997 primitive_lock(info
);
1999 ret
= primitive_gate_handoff(info
, GATE_HANDOFF_DEFAULT
);
2000 if (ret
== KERN_NOT_WAITING
) {
2001 T_ASSERT(info
->handoff_failure
== 0, "handoff failures");
2002 primitive_gate_handoff(info
, GATE_HANDOFF_OPEN_IF_NO_WAITERS
);
2003 info
->handoff_failure
++;
2006 primitive_unlock(info
);
2007 notify_waiter((struct synch_test_common
*)info
);
2009 thread_terminate_self();
2013 thread_just_one_do_work(
2015 __unused wait_result_t wr
)
2017 struct info_sleep_inheritor_test
*info
= (struct info_sleep_inheritor_test
*) args
;
2018 uint my_pri
= current_thread()->sched_pri
;
2021 T_LOG("Started thread pri %d %p", my_pri
, current_thread());
2023 primitive_lock(info
);
2025 if (info
->work_to_do
) {
2026 if (primitive_gate_try_close(info
) == KERN_SUCCESS
) {
2027 primitive_gate_assert(info
, GATE_ASSERT_HELD
);
2028 primitive_unlock(info
);
2030 T_LOG("Thread pri %d acquired the gate %p", my_pri
, current_thread());
2031 wait_threads(&info
->synch
, info
->synch_value
- 1);
2032 wait_for_waiters((struct synch_test_common
*)info
);
2033 max_pri
= get_max_pri((struct synch_test_common
*) info
);
2034 T_ASSERT((uint
) current_thread()->sched_pri
== max_pri
, "gate owner priority current is %d, should be %d", current_thread()->sched_pri
, max_pri
);
2035 os_atomic_store(&info
->synch
, 0, relaxed
);
2037 primitive_lock(info
);
2038 info
->work_to_do
= FALSE
;
2039 primitive_gate_open(info
);
2041 primitive_gate_assert(info
, GATE_ASSERT_CLOSED
);
2042 wake_threads(&info
->synch
);
2043 primitive_gate_wait(info
);
2047 primitive_unlock(info
);
2049 assert(current_thread()->kern_promotion_schedpri
== 0);
2050 notify_waiter((struct synch_test_common
*)info
);
2051 thread_terminate_self();
2055 test_gate_push(struct info_sleep_inheritor_test
*info
, int prim_type
)
2057 info
->prim_type
= prim_type
;
2059 primitive_gate_init(info
);
2060 info
->work_to_do
= TRUE
;
2062 info
->synch_value
= NUM_THREADS
;
2064 start_threads((thread_continue_t
)thread_just_one_do_work
, (struct synch_test_common
*) info
, TRUE
);
2065 wait_all_thread((struct synch_test_common
*)info
);
2067 primitive_gate_destroy(info
);
2071 test_gate_handoff(struct info_sleep_inheritor_test
*info
, int prim_type
)
2073 info
->prim_type
= prim_type
;
2075 primitive_gate_init(info
);
2078 info
->synch_value
= NUM_THREADS
;
2080 info
->handoff_failure
= 0;
2082 start_threads((thread_continue_t
)thread_gate_like_mutex
, (struct synch_test_common
*)info
, false);
2083 wait_all_thread((struct synch_test_common
*)info
);
2085 T_ASSERT(info
->value
== NUM_THREADS
, "value protected by gate");
2086 T_ASSERT(info
->handoff_failure
== 1, "handoff failures");
2088 primitive_gate_destroy(info
);
2092 test_gate_steal(struct info_sleep_inheritor_test
*info
, int prim_type
)
2094 info
->prim_type
= prim_type
;
2096 primitive_gate_init(info
);
2099 info
->synch_value
= NUM_THREADS
;
2100 info
->thread_inheritor
= NULL
;
2101 info
->steal_pri
= 0;
2103 start_threads((thread_continue_t
)thread_gate_aggressive
, (struct synch_test_common
*)info
, FALSE
);
2104 wait_all_thread((struct synch_test_common
*)info
);
2106 primitive_gate_destroy(info
);
2110 ts_kernel_gate_test(void)
2112 struct info_sleep_inheritor_test info
= {};
2114 T_LOG("Testing gate primitive");
2116 init_synch_test_common((struct synch_test_common
*)&info
, NUM_THREADS
);
2118 lck_attr_t
* lck_attr
= lck_attr_alloc_init();
2119 lck_grp_attr_t
* lck_grp_attr
= lck_grp_attr_alloc_init();
2120 lck_grp_t
* lck_grp
= lck_grp_alloc_init("test gate", lck_grp_attr
);
2122 lck_mtx_init(&info
.mtx_lock
, lck_grp
, lck_attr
);
2123 lck_rw_init(&info
.rw_lock
, lck_grp
, lck_attr
);
2126 * Testing the priority inherited by the keeper
2127 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2129 T_LOG("Testing gate push, lck");
2130 test_gate_push(&info
, MTX_LOCK
);
2132 T_LOG("Testing gate push, rw");
2133 test_gate_push(&info
, RW_LOCK
);
2136 * Testing the handoff
2137 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2139 T_LOG("Testing gate handoff, lck");
2140 test_gate_handoff(&info
, MTX_LOCK
);
2142 T_LOG("Testing gate handoff, rw");
2143 test_gate_handoff(&info
, RW_LOCK
);
2147 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2149 T_LOG("Testing gate steal, lck");
2150 test_gate_steal(&info
, MTX_LOCK
);
2152 T_LOG("Testing gate steal, rw");
2153 test_gate_steal(&info
, RW_LOCK
);
2155 destroy_synch_test_common((struct synch_test_common
*)&info
);
2157 lck_attr_free(lck_attr
);
2158 lck_grp_attr_free(lck_grp_attr
);
2159 lck_mtx_destroy(&info
.mtx_lock
, lck_grp
);
2160 lck_grp_free(lck_grp
);
2162 return KERN_SUCCESS
;
2165 #define NUM_THREAD_CHAIN 6
2167 struct turnstile_chain_test
{
2168 struct synch_test_common head
;
2173 gate_t gates
[NUM_THREAD_CHAIN
];
2177 thread_sleep_gate_chain_work(
2179 __unused wait_result_t wr
)
2181 struct turnstile_chain_test
*info
= (struct turnstile_chain_test
*) args
;
2182 thread_t self
= current_thread();
2183 uint my_pri
= self
->sched_pri
;
2186 thread_t inheritor
= NULL
, woken_up
;
2187 event_t wait_event
, wake_event
;
2190 T_LOG("Started thread pri %d %p", my_pri
, self
);
2193 * Need to use the threads ids, wait for all of them to be populated
2196 while (os_atomic_load(&info
->head
.threads
[info
->head
.nthreads
- 1], acquire
) == NULL
) {
2200 max_pri
= get_max_pri((struct synch_test_common
*) info
);
2202 for (i
= 0; i
< info
->head
.nthreads
; i
= i
+ 2) {
2203 // even threads will close a gate
2204 if (info
->head
.threads
[i
] == self
) {
2205 lck_mtx_lock(&info
->mtx_lock
);
2206 lck_mtx_gate_close(&info
->mtx_lock
, &info
->gates
[i
]);
2207 lck_mtx_unlock(&info
->mtx_lock
);
2212 wake_threads(&info
->synch2
);
2213 wait_threads(&info
->synch2
, info
->synch_value
);
2215 if (self
== os_atomic_load(&info
->head
.threads
[0], acquire
)) {
2216 wait_threads(&info
->synch
, info
->synch_value
- 1);
2217 wait_for_waiters((struct synch_test_common
*)info
);
2219 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2221 lck_mtx_lock(&info
->mtx_lock
);
2222 lck_mtx_gate_open(&info
->mtx_lock
, &info
->gates
[0]);
2223 lck_mtx_unlock(&info
->mtx_lock
);
2227 for (i
= 0; i
< info
->head
.nthreads
; i
++) {
2228 if (info
->head
.threads
[i
] == self
) {
2229 inheritor
= info
->head
.threads
[i
- 1];
2230 wait_event
= (event_t
) &info
->head
.threads
[i
- 1];
2231 wake_event
= (event_t
) &info
->head
.threads
[i
];
2235 assert(wait_event
!= NULL
);
2237 lck_mtx_lock(&info
->mtx_lock
);
2238 wake_threads(&info
->synch
);
2241 lck_mtx_gate_wait(&info
->mtx_lock
, &info
->gates
[i
- 1], LCK_SLEEP_UNLOCK
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
2242 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2244 ret
= wakeup_one_with_inheritor(wake_event
, THREAD_AWAKENED
, LCK_WAKE_DO_NOT_TRANSFER_PUSH
, &woken_up
);
2245 if (ret
== KERN_SUCCESS
) {
2246 T_ASSERT(i
!= (info
->head
.nthreads
- 1), "thread id");
2247 T_ASSERT(woken_up
== info
->head
.threads
[i
+ 1], "wakeup_one_with_inheritor woke next");
2249 T_ASSERT(i
== (info
->head
.nthreads
- 1), "thread id");
2252 // i am still the inheritor, wake all to drop inheritership
2253 ret
= wakeup_all_with_inheritor(wake_event
, LCK_WAKE_DEFAULT
);
2254 T_ASSERT(ret
== KERN_NOT_WAITING
, "waiters on event");
2256 // I previously closed a gate
2257 lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_UNLOCK
, wait_event
, inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
2258 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2260 lck_mtx_lock(&info
->mtx_lock
);
2261 lck_mtx_gate_open(&info
->mtx_lock
, &info
->gates
[i
]);
2262 lck_mtx_unlock(&info
->mtx_lock
);
2266 assert(current_thread()->kern_promotion_schedpri
== 0);
2267 notify_waiter((struct synch_test_common
*)info
);
2269 thread_terminate_self();
2273 thread_gate_chain_work(
2275 __unused wait_result_t wr
)
2277 struct turnstile_chain_test
*info
= (struct turnstile_chain_test
*) args
;
2278 thread_t self
= current_thread();
2279 uint my_pri
= self
->sched_pri
;
2282 T_LOG("Started thread pri %d %p", my_pri
, self
);
2286 * Need to use the threads ids, wait for all of them to be populated
2288 while (os_atomic_load(&info
->head
.threads
[info
->head
.nthreads
- 1], acquire
) == NULL
) {
2292 max_pri
= get_max_pri((struct synch_test_common
*) info
);
2294 for (i
= 0; i
< info
->head
.nthreads
; i
++) {
2295 if (info
->head
.threads
[i
] == self
) {
2296 lck_mtx_lock(&info
->mtx_lock
);
2297 lck_mtx_gate_close(&info
->mtx_lock
, &info
->gates
[i
]);
2298 lck_mtx_unlock(&info
->mtx_lock
);
2302 assert(i
!= info
->head
.nthreads
);
2304 wake_threads(&info
->synch2
);
2305 wait_threads(&info
->synch2
, info
->synch_value
);
2307 if (self
== os_atomic_load(&info
->head
.threads
[0], acquire
)) {
2308 wait_threads(&info
->synch
, info
->synch_value
- 1);
2310 wait_for_waiters((struct synch_test_common
*)info
);
2312 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2314 lck_mtx_lock(&info
->mtx_lock
);
2315 lck_mtx_gate_open(&info
->mtx_lock
, &info
->gates
[0]);
2316 lck_mtx_unlock(&info
->mtx_lock
);
2318 lck_mtx_lock(&info
->mtx_lock
);
2319 wake_threads(&info
->synch
);
2320 lck_mtx_gate_wait(&info
->mtx_lock
, &info
->gates
[i
- 1], LCK_SLEEP_UNLOCK
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
2322 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2324 lck_mtx_lock(&info
->mtx_lock
);
2325 lck_mtx_gate_open(&info
->mtx_lock
, &info
->gates
[i
]);
2326 lck_mtx_unlock(&info
->mtx_lock
);
2329 assert(current_thread()->kern_promotion_schedpri
== 0);
2330 notify_waiter((struct synch_test_common
*)info
);
2332 thread_terminate_self();
2336 thread_sleep_chain_work(
2338 __unused wait_result_t wr
)
2340 struct turnstile_chain_test
*info
= (struct turnstile_chain_test
*) args
;
2341 thread_t self
= current_thread();
2342 uint my_pri
= self
->sched_pri
;
2344 event_t wait_event
, wake_event
;
2346 thread_t inheritor
= NULL
, woken_up
= NULL
;
2349 T_LOG("Started thread pri %d %p", my_pri
, self
);
2352 * Need to use the threads ids, wait for all of them to be populated
2354 while (os_atomic_load(&info
->head
.threads
[info
->head
.nthreads
- 1], acquire
) == NULL
) {
2358 max_pri
= get_max_pri((struct synch_test_common
*) info
);
2360 if (self
== os_atomic_load(&info
->head
.threads
[0], acquire
)) {
2361 wait_threads(&info
->synch
, info
->synch_value
- 1);
2363 wait_for_waiters((struct synch_test_common
*)info
);
2365 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2367 ret
= wakeup_one_with_inheritor((event_t
) &info
->head
.threads
[0], THREAD_AWAKENED
, LCK_WAKE_DO_NOT_TRANSFER_PUSH
, &woken_up
);
2368 T_ASSERT(ret
== KERN_SUCCESS
, "wakeup_one_with_inheritor woke next");
2369 T_ASSERT(woken_up
== info
->head
.threads
[1], "thread woken up");
2371 // i am still the inheritor, wake all to drop inheritership
2372 ret
= wakeup_all_with_inheritor((event_t
) &info
->head
.threads
[0], LCK_WAKE_DEFAULT
);
2373 T_ASSERT(ret
== KERN_NOT_WAITING
, "waiters on event");
2377 for (i
= 0; i
< info
->head
.nthreads
; i
++) {
2378 if (info
->head
.threads
[i
] == self
) {
2379 inheritor
= info
->head
.threads
[i
- 1];
2380 wait_event
= (event_t
) &info
->head
.threads
[i
- 1];
2381 wake_event
= (event_t
) &info
->head
.threads
[i
];
2386 assert(wait_event
!= NULL
);
2387 lck_mtx_lock(&info
->mtx_lock
);
2388 wake_threads(&info
->synch
);
2390 lck_mtx_sleep_with_inheritor(&info
->mtx_lock
, LCK_SLEEP_UNLOCK
, wait_event
, inheritor
, THREAD_UNINT
| THREAD_WAIT_NOREPORT_USER
, TIMEOUT_WAIT_FOREVER
);
2392 T_ASSERT((uint
) self
->sched_pri
== max_pri
, "sleep_inheritor inheritor priority current is %d, should be %d", self
->sched_pri
, max_pri
);
2394 ret
= wakeup_one_with_inheritor(wake_event
, THREAD_AWAKENED
, LCK_WAKE_DO_NOT_TRANSFER_PUSH
, &woken_up
);
2395 if (ret
== KERN_SUCCESS
) {
2396 T_ASSERT(i
!= (info
->head
.nthreads
- 1), "thread id");
2397 T_ASSERT(woken_up
== info
->head
.threads
[i
+ 1], "wakeup_one_with_inheritor woke next");
2399 T_ASSERT(i
== (info
->head
.nthreads
- 1), "thread id");
2402 // i am still the inheritor, wake all to drop inheritership
2403 ret
= wakeup_all_with_inheritor(wake_event
, LCK_WAKE_DEFAULT
);
2404 T_ASSERT(ret
== KERN_NOT_WAITING
, "waiters on event");
2407 assert(current_thread()->kern_promotion_schedpri
== 0);
2408 notify_waiter((struct synch_test_common
*)info
);
2410 thread_terminate_self();
2414 test_sleep_chain(struct turnstile_chain_test
*info
)
2417 info
->synch_value
= info
->head
.nthreads
;
2419 start_threads((thread_continue_t
)thread_sleep_chain_work
, (struct synch_test_common
*)info
, FALSE
);
2420 wait_all_thread((struct synch_test_common
*)info
);
2424 test_gate_chain(struct turnstile_chain_test
*info
)
2428 info
->synch_value
= info
->head
.nthreads
;
2430 start_threads((thread_continue_t
)thread_gate_chain_work
, (struct synch_test_common
*)info
, FALSE
);
2431 wait_all_thread((struct synch_test_common
*)info
);
2435 test_sleep_gate_chain(struct turnstile_chain_test
*info
)
2439 info
->synch_value
= info
->head
.nthreads
;
2441 start_threads((thread_continue_t
)thread_sleep_gate_chain_work
, (struct synch_test_common
*)info
, FALSE
);
2442 wait_all_thread((struct synch_test_common
*)info
);
2446 ts_kernel_turnstile_chain_test(void)
2448 struct turnstile_chain_test info
= {};
2451 init_synch_test_common((struct synch_test_common
*)&info
, NUM_THREAD_CHAIN
);
2452 lck_attr_t
* lck_attr
= lck_attr_alloc_init();
2453 lck_grp_attr_t
* lck_grp_attr
= lck_grp_attr_alloc_init();
2454 lck_grp_t
* lck_grp
= lck_grp_alloc_init("test gate", lck_grp_attr
);
2456 lck_mtx_init(&info
.mtx_lock
, lck_grp
, lck_attr
);
2457 for (i
= 0; i
< NUM_THREAD_CHAIN
; i
++) {
2458 lck_mtx_gate_init(&info
.mtx_lock
, &info
.gates
[i
]);
2461 T_LOG("Testing sleep chain, lck");
2462 test_sleep_chain(&info
);
2464 T_LOG("Testing gate chain, lck");
2465 test_gate_chain(&info
);
2467 T_LOG("Testing sleep and gate chain, lck");
2468 test_sleep_gate_chain(&info
);
2470 destroy_synch_test_common((struct synch_test_common
*)&info
);
2471 for (i
= 0; i
< NUM_THREAD_CHAIN
; i
++) {
2472 lck_mtx_gate_destroy(&info
.mtx_lock
, &info
.gates
[i
]);
2474 lck_attr_free(lck_attr
);
2475 lck_grp_attr_free(lck_grp_attr
);
2476 lck_mtx_destroy(&info
.mtx_lock
, lck_grp
);
2477 lck_grp_free(lck_grp
);
2479 return KERN_SUCCESS
;
2483 ts_kernel_timingsafe_bcmp_test(void)
2489 T_ASSERT(timingsafe_bcmp(NULL
, NULL
, 0) == 0, NULL
);
2490 T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL
);
2491 T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL
);
2494 T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL
);
2497 T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL
);
2498 T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL
);
2499 T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL
);
2500 T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL
);
2502 // all possible bitwise differences
2503 for (i
= 1; i
< 256; i
+= 1) {
2504 unsigned char a
= 0;
2505 unsigned char b
= (unsigned char)i
;
2507 T_ASSERT(timingsafe_bcmp(&a
, &b
, sizeof(a
)) == 1, NULL
);
2511 buf_size
= 1024 * 16;
2512 buf
= kalloc(buf_size
);
2513 T_EXPECT_NOTNULL(buf
, "kalloc of buf");
2515 read_random(buf
, buf_size
);
2516 T_ASSERT(timingsafe_bcmp(buf
, buf
, buf_size
) == 0, NULL
);
2517 T_ASSERT(timingsafe_bcmp(buf
, buf
+ 1, buf_size
- 1) == 1, NULL
);
2518 T_ASSERT(timingsafe_bcmp(buf
, buf
+ 128, 128) == 1, NULL
);
2520 memcpy(buf
+ 128, buf
, 128);
2521 T_ASSERT(timingsafe_bcmp(buf
, buf
+ 128, 128) == 0, NULL
);
2523 kfree(buf
, buf_size
);
2525 return KERN_SUCCESS
;
2529 kprintf_hhx_test(void)
2531 printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
2532 (unsigned short)0xfeed, (unsigned short)0xface,
2533 (unsigned short)0xabad, (unsigned short)0xcafe,
2534 (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
2537 return KERN_SUCCESS
;