]> git.saurik.com Git - apple/xnu.git/blame - osfmk/tests/kernel_tests.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / tests / kernel_tests.c
CommitLineData
d9a64523 1/*
f427ee49 2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
d9a64523
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/kern_types.h>
30#include <kern/assert.h>
31#include <kern/host.h>
32#include <kern/macro_help.h>
33#include <kern/sched.h>
34#include <kern/locks.h>
35#include <kern/sched_prim.h>
36#include <kern/misc_protos.h>
37#include <kern/thread_call.h>
f427ee49 38#include <kern/zalloc_internal.h>
d9a64523
A
39#include <kern/kalloc.h>
40#include <tests/ktest.h>
41#include <sys/errno.h>
42#include <sys/random.h>
43#include <kern/kern_cdata.h>
44#include <machine/lowglobals.h>
45#include <vm/vm_page.h>
46#include <vm/vm_object.h>
f427ee49 47#include <vm/vm_protos.h>
cb323159 48#include <string.h>
d9a64523
A
49
50#if !(DEVELOPMENT || DEBUG)
51#error "Testing is not enabled on RELEASE configurations"
52#endif
53
54#include <tests/xnupost.h>
55
56extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
57__private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
58
59uint32_t total_post_tests_count = 0;
60void xnupost_reset_panic_widgets(void);
61
62/* test declarations */
63kern_return_t zalloc_test(void);
64kern_return_t RandomULong_test(void);
65kern_return_t kcdata_api_test(void);
cb323159
A
66kern_return_t ts_kernel_primitive_test(void);
67kern_return_t ts_kernel_sleep_inheritor_test(void);
68kern_return_t ts_kernel_gate_test(void);
69kern_return_t ts_kernel_turnstile_chain_test(void);
70kern_return_t ts_kernel_timingsafe_bcmp_test(void);
71
f427ee49
A
72#if __ARM_VFP__
73extern kern_return_t vfp_state_test(void);
74#endif
75
cb323159 76extern kern_return_t kprintf_hhx_test(void);
d9a64523
A
77
78#if defined(__arm__) || defined(__arm64__)
79kern_return_t pmap_coredump_test(void);
80#endif
81
82extern kern_return_t console_serial_test(void);
83extern kern_return_t console_serial_alloc_rel_tests(void);
84extern kern_return_t console_serial_parallel_log_tests(void);
85extern kern_return_t test_os_log(void);
86extern kern_return_t test_os_log_parallel(void);
87extern kern_return_t bitmap_post_test(void);
88
89#ifdef __arm64__
90extern kern_return_t arm64_munger_test(void);
91extern kern_return_t ex_cb_test(void);
92#if __ARM_PAN_AVAILABLE__
93extern kern_return_t arm64_pan_test(void);
94#endif
cb323159
A
95#if defined(HAS_APPLE_PAC)
96extern kern_return_t arm64_ropjop_test(void);
97#endif /* defined(HAS_APPLE_PAC) */
d9a64523
A
98#endif /* __arm64__ */
99
100extern kern_return_t test_thread_call(void);
101
102
cb323159
A
103struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
104 .xtp_outval_p = NULL,
105 .xtp_func_name = NULL,
106 .xtp_func = NULL};
d9a64523
A
107
108struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
0a7de745
A
109 XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
110 XNUPOST_TEST_CONFIG_BASIC(test_os_log),
111 XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
d9a64523 112#ifdef __arm64__
0a7de745
A
113 XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
114 XNUPOST_TEST_CONFIG_BASIC(ex_cb_test),
d9a64523 115#if __ARM_PAN_AVAILABLE__
0a7de745 116 XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
d9a64523 117#endif
cb323159
A
118#if defined(HAS_APPLE_PAC)
119 XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
120#endif /* defined(HAS_APPLE_PAC) */
d9a64523 121#endif /* __arm64__ */
0a7de745
A
122 XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
123 XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
124 XNUPOST_TEST_CONFIG_BASIC(console_serial_alloc_rel_tests),
125 XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
d9a64523 126#if defined(__arm__) || defined(__arm64__)
0a7de745 127 XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
d9a64523 128#endif
0a7de745
A
129 XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
130 //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
131 XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
cb323159
A
132 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
133 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
134 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
135 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
136 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
f427ee49
A
137 XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
138#if __ARM_VFP__
139 XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
140#endif
141 XNUPOST_TEST_CONFIG_BASIC(vm_tests), };
d9a64523
A
142
143uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
144
145#define POSTARGS_RUN_TESTS 0x1
146#define POSTARGS_CONTROLLER_AVAILABLE 0x2
147#define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
148uint64_t kernel_post_args = 0x0;
149
150/* static variables to hold state */
151static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
152static char kernel_post_test_configs[256];
153boolean_t xnupost_should_run_test(uint32_t test_num);
154
155kern_return_t
156xnupost_parse_config()
157{
0a7de745 158 if (parse_config_retval != KERN_INVALID_CAPABILITY) {
d9a64523 159 return parse_config_retval;
0a7de745 160 }
d9a64523
A
161 PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
162
163 if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
164 kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
165 }
166
167 if (kernel_post_args != 0) {
168 parse_config_retval = KERN_SUCCESS;
169 goto out;
170 }
171 parse_config_retval = KERN_NOT_SUPPORTED;
172out:
173 return parse_config_retval;
174}
175
176boolean_t
177xnupost_should_run_test(uint32_t test_num)
178{
179 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
180 int64_t begin = 0, end = 999999;
181 char * b = kernel_post_test_configs;
182 while (*b) {
183 get_range_bounds(b, &begin, &end);
184 if (test_num >= begin && test_num <= end) {
185 return TRUE;
186 }
187
188 /* skip to the next "," */
189 while (*b != ',') {
0a7de745 190 if (*b == '\0') {
d9a64523 191 return FALSE;
0a7de745 192 }
d9a64523
A
193 b++;
194 }
195 /* skip past the ',' */
196 b++;
197 }
198 return FALSE;
199 }
200 return TRUE;
201}
202
203kern_return_t
204xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
205{
0a7de745 206 if (KERN_SUCCESS != xnupost_parse_config()) {
d9a64523 207 return KERN_FAILURE;
0a7de745 208 }
d9a64523
A
209
210 xnupost_test_t testp;
211 for (uint32_t i = 0; i < test_count; i++) {
212 testp = &test_list[i];
213 if (testp->xt_test_num == 0) {
f427ee49
A
214 assert(total_post_tests_count < UINT16_MAX);
215 testp->xt_test_num = (uint16_t)++total_post_tests_count;
d9a64523
A
216 }
217 /* make sure the boot-arg based test run list is honored */
218 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
219 testp->xt_config |= XT_CONFIG_IGNORE;
220 if (xnupost_should_run_test(testp->xt_test_num)) {
221 testp->xt_config &= ~(XT_CONFIG_IGNORE);
222 testp->xt_config |= XT_CONFIG_RUN;
223 printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
224 }
225 }
226 printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
0a7de745 227 testp->xt_config);
d9a64523
A
228 }
229
230 return KERN_SUCCESS;
231}
232
233kern_return_t
234xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
235{
236 uint32_t i = 0;
237 int retval = KERN_SUCCESS;
238
239 if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
240 printf("No POST boot-arg set.\n");
241 return retval;
242 }
243
244 T_START;
245 xnupost_test_t testp;
246 for (; i < test_count; i++) {
247 xnupost_reset_panic_widgets();
248 testp = &test_list[i];
249 T_BEGIN(testp->xt_name);
250 testp->xt_begin_time = mach_absolute_time();
251 testp->xt_end_time = testp->xt_begin_time;
252
253 /*
254 * If test is designed to panic and controller
255 * is not available then mark as SKIPPED
256 */
257 if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
258 T_SKIP(
0a7de745
A
259 "Test expects panic but "
260 "no controller is present");
d9a64523
A
261 testp->xt_test_actions = XT_ACTION_SKIPPED;
262 continue;
263 }
264
265 if ((testp->xt_config & XT_CONFIG_IGNORE)) {
266 T_SKIP("Test is marked as XT_CONFIG_IGNORE");
267 testp->xt_test_actions = XT_ACTION_SKIPPED;
268 continue;
269 }
270
271 testp->xt_func();
272 T_END;
273 testp->xt_retval = T_TESTRESULT;
274 testp->xt_end_time = mach_absolute_time();
275 if (testp->xt_retval == testp->xt_expected_retval) {
276 testp->xt_test_actions = XT_ACTION_PASSED;
277 } else {
278 testp->xt_test_actions = XT_ACTION_FAILED;
279 }
280 }
281 T_FINISH;
282 return retval;
283}
284
285kern_return_t
286kernel_list_tests()
287{
288 return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
289}
290
291kern_return_t
292kernel_do_post()
293{
294 return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
295}
296
297kern_return_t
298xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
299{
0a7de745 300 if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
d9a64523 301 return KERN_RESOURCE_SHORTAGE;
0a7de745 302 }
d9a64523
A
303
304 xt_panic_widgets.xtp_context_p = context;
305 xt_panic_widgets.xtp_func = funcp;
306 xt_panic_widgets.xtp_func_name = funcname;
307 xt_panic_widgets.xtp_outval_p = outval;
308
309 return KERN_SUCCESS;
310}
311
312void
313xnupost_reset_panic_widgets()
314{
315 bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
316}
317
318kern_return_t
319xnupost_process_kdb_stop(const char * panic_s)
320{
321 xt_panic_return_t retval = 0;
322 struct xnupost_panic_widget * pw = &xt_panic_widgets;
323 const char * name = "unknown";
324 if (xt_panic_widgets.xtp_func_name) {
325 name = xt_panic_widgets.xtp_func_name;
326 }
327
328 /* bail early on if kernPOST is not set */
329 if (kernel_post_args == 0) {
330 return KERN_INVALID_CAPABILITY;
331 }
332
333 if (xt_panic_widgets.xtp_func) {
334 T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
335 retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
336 } else {
337 return KERN_INVALID_CAPABILITY;
338 }
339
340 switch (retval) {
341 case XT_RET_W_SUCCESS:
342 T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
343 /* KERN_SUCCESS means return from panic/assertion */
344 return KERN_SUCCESS;
345
346 case XT_RET_W_FAIL:
347 T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
348 return KERN_SUCCESS;
349
350 case XT_PANIC_W_FAIL:
351 T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
352 return KERN_FAILURE;
353
354 case XT_PANIC_W_SUCCESS:
355 T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
356 return KERN_FAILURE;
357
358 case XT_PANIC_UNRELATED:
359 default:
360 T_LOG("UNRELATED: Continuing to kdb_stop.");
361 return KERN_FAILURE;
362 }
363}
364
365xt_panic_return_t
366_xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
367{
368 xt_panic_return_t ret = XT_PANIC_UNRELATED;
369
370 if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
371 T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
372 ret = XT_RET_W_SUCCESS;
373 }
374
0a7de745 375 if (outval) {
d9a64523 376 *outval = (void *)(uintptr_t)ret;
0a7de745 377 }
d9a64523
A
378 return ret;
379}
380
381kern_return_t
382xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
383{
384 uint32_t i = 0;
385 xnupost_test_t testp;
386 for (; i < test_count; i++) {
387 testp = &test_list[i];
388 testp->xt_begin_time = 0;
389 testp->xt_end_time = 0;
390 testp->xt_test_actions = XT_ACTION_NONE;
391 testp->xt_retval = -1;
392 }
393 return KERN_SUCCESS;
394}
395
396
397kern_return_t
f427ee49 398zalloc_test(void)
d9a64523
A
399{
400 zone_t test_zone;
401 void * test_ptr;
402
403 T_SETUPBEGIN;
f427ee49
A
404 test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
405 ZC_DESTRUCTIBLE);
d9a64523
A
406 T_ASSERT_NOTNULL(test_zone, NULL);
407
f427ee49 408 T_ASSERT_EQ_INT(test_zone->countfree, 0, NULL);
d9a64523
A
409 T_SETUPEND;
410
411 T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
412
413 zfree(test_zone, test_ptr);
414
415 /* A sample report for perfdata */
416 T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
417
418 return KERN_SUCCESS;
419}
420
421/*
422 * Function used for comparison by qsort()
423 */
424static int
425compare_numbers_ascending(const void * a, const void * b)
426{
427 const uint64_t x = *(const uint64_t *)a;
428 const uint64_t y = *(const uint64_t *)b;
429 if (x < y) {
430 return -1;
431 } else if (x > y) {
432 return 1;
433 } else {
434 return 0;
435 }
436}
437
d9a64523
A
438/*
439 * Function to count number of bits that are set in a number.
440 * It uses Side Addition using Magic Binary Numbers
441 */
442static int
443count_bits(uint64_t number)
444{
445 return __builtin_popcountll(number);
446}
447
448kern_return_t
449RandomULong_test()
450{
451/*
452 * Randomness test for RandomULong()
453 *
454 * This test verifies that:
455 * a. RandomULong works
456 * b. The generated numbers match the following entropy criteria:
457 * For a thousand iterations, verify:
458 * 1. mean entropy > 12 bits
459 * 2. min entropy > 4 bits
460 * 3. No Duplicate
461 * 4. No incremental/decremental pattern in a window of 3
462 * 5. No Zero
463 * 6. No -1
464 *
465 * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
466 */
467
468#define CONF_MIN_ENTROPY 4
469#define CONF_MEAN_ENTROPY 12
470#define CONF_ITERATIONS 1000
471#define CONF_WINDOW_SIZE 3
472#define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
473
474 int i;
475 uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
476 uint32_t aggregate_bit_entropy = 0;
477 uint32_t mean_bit_entropy = 0;
478 uint64_t numbers[CONF_ITERATIONS];
479 min_bit_entropy = UINT32_MAX;
480 max_bit_entropy = 0;
481
482 /*
483 * TEST 1: Number generation and basic and basic validation
484 * Check for non-zero (no bits set), -1 (all bits set) and error
485 */
486 for (i = 0; i < CONF_ITERATIONS; i++) {
487 read_random(&numbers[i], sizeof(numbers[i]));
488 if (numbers[i] == 0) {
489 T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
490 }
491 if (numbers[i] == UINT64_MAX) {
492 T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
493 }
494 }
495 T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
496
497 /*
498 * TEST 2: Mean and Min Bit Entropy
499 * Check the bit entropy and its mean over the generated numbers.
500 */
501 for (i = 1; i < CONF_ITERATIONS; i++) {
502 bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
0a7de745 503 if (bit_entropy < min_bit_entropy) {
d9a64523 504 min_bit_entropy = bit_entropy;
0a7de745
A
505 }
506 if (bit_entropy > max_bit_entropy) {
d9a64523 507 max_bit_entropy = bit_entropy;
0a7de745 508 }
d9a64523
A
509
510 if (bit_entropy < CONF_MIN_ENTROPY) {
511 T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
0a7de745 512 "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
d9a64523
A
513 }
514
515 aggregate_bit_entropy += bit_entropy;
516 }
517 T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
518
519 mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
520 T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
521 T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
522 T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
0a7de745 523 min_bit_entropy, mean_bit_entropy, max_bit_entropy);
d9a64523
A
524 T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
525 T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
526 T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
527
528 /*
529 * TEST 3: Incremental Pattern Search
530 * Check that incremental/decremental pattern does not exist in the given window
531 */
532 int window_start, window_end, trend;
533 window_start = window_end = trend = 0;
534
535 do {
536 /*
537 * Set the window
538 */
539 window_end = window_start + CONF_WINDOW_SIZE - 1;
0a7de745 540 if (window_end >= CONF_ITERATIONS) {
d9a64523 541 window_end = CONF_ITERATIONS - 1;
0a7de745 542 }
d9a64523
A
543
544 trend = 0;
545 for (i = window_start; i < window_end; i++) {
0a7de745 546 if (numbers[i] < numbers[i + 1]) {
d9a64523 547 trend++;
0a7de745 548 } else if (numbers[i] > numbers[i + 1]) {
d9a64523 549 trend--;
0a7de745 550 }
d9a64523
A
551 }
552 /*
553 * Check that there is no increasing or decreasing trend
554 * i.e. trend <= ceil(window_size/2)
555 */
556 if (trend < 0) {
557 trend = -trend;
558 }
559 if (trend > CONF_WINDOW_TREND_LIMIT) {
560 T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
561 }
562
563 /*
564 * Move to the next window
565 */
566 window_start++;
d9a64523
A
567 } while (window_start < (CONF_ITERATIONS - 1));
568 T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
569
570 /*
571 * TEST 4: Find Duplicates
572 * Check no duplicate values are generated
573 */
574 qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
575 for (i = 1; i < CONF_ITERATIONS; i++) {
576 if (numbers[i] == numbers[i - 1]) {
577 T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
578 }
579 }
580 T_PASS("Test did not find any duplicates as expected.");
581
582 return KERN_SUCCESS;
583}
584
585
586/* KCDATA kernel api tests */
587static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
588struct sample_disk_io_stats {
589 uint64_t disk_reads_count;
590 uint64_t disk_reads_size;
591 uint64_t io_priority_count[4];
592 uint64_t io_priority_size;
593} __attribute__((packed));
594
595struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
cb323159
A
596 {
597 .kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
598 .kcs_elem_type = KC_ST_UINT64,
599 .kcs_elem_offset = 0 * sizeof(uint64_t),
600 .kcs_elem_size = sizeof(uint64_t),
601 .kcs_name = "disk_reads_count"
602 },
603 {
604 .kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
605 .kcs_elem_type = KC_ST_UINT64,
606 .kcs_elem_offset = 1 * sizeof(uint64_t),
607 .kcs_elem_size = sizeof(uint64_t),
608 .kcs_name = "disk_reads_size"
609 },
610 {
611 .kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
612 .kcs_elem_type = KC_ST_UINT64,
613 .kcs_elem_offset = 2 * sizeof(uint64_t),
614 .kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
615 .kcs_name = "io_priority_count"
616 },
617 {
618 .kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
619 .kcs_elem_type = KC_ST_UINT64,
620 .kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
621 .kcs_elem_size = sizeof(uint64_t),
622 .kcs_name = "io_priority_size"
623 },
d9a64523
A
624};
625
626kern_return_t
627kcdata_api_test()
628{
629 kern_return_t retval = KERN_SUCCESS;
630
631 /* test for NULL input */
632 retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
633 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
634
635 /* another negative test with buffer size < 32 bytes */
636 char data[30] = "sample_disk_io_stats";
637 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
0a7de745 638 KCFLAG_USE_MEMCOPY);
f427ee49 639 T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
d9a64523
A
640
641 /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
642 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
0a7de745 643 KCFLAG_USE_COPYOUT);
d9a64523
A
644 T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
645
646 /* test with successful kcdata_memory_static_init */
647 test_kc_data.kcd_length = 0xdeadbeef;
648 mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
649 T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
650
651 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
0a7de745 652 KCFLAG_USE_MEMCOPY);
d9a64523
A
653
654 T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
655
656 T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
657 T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
658 T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
659
660 /* verify we have BEGIN and END HEADERS set */
661 uint32_t * mem = (uint32_t *)address;
662 T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
663 T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
664 T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
665
666 /* verify kcdata_memory_get_used_bytes() */
667 uint64_t bytes_used = 0;
668 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
669 T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
670
671 /* test for kcdata_get_memory_addr() */
672
673 mach_vm_address_t user_addr = 0;
674 /* negative test for NULL user_addr AND/OR kcdata_descriptor */
675 retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
676 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
677
678 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
679 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
680
681 /* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
682 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
683 T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
684 T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
685
686 /* successful case with valid size. */
687 user_addr = 0xdeadbeef;
688 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
689 T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
690 T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
691 T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
692
693 /* Try creating an item with really large size */
694 user_addr = 0xdeadbeef;
695 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
696 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
f427ee49 697 T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
d9a64523
A
698 T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
699 T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
700
701 /* verify convenience functions for uint32_with_description */
702 retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
703 T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
704
705 retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
706 T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
707
708 /* verify creating an KCDATA_TYPE_ARRAY here */
709 user_addr = 0xdeadbeef;
710 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
711 /* save memory address where the array will come up */
712 struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
713
714 retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
715 T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
716 T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
717 T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
f427ee49 718 kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
d9a64523
A
719 T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
720
721 /* FIXME add tests here for ranges of sizes and counts */
722
723 T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
724
725 /* test adding of custom type */
726
727 retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
0a7de745 728 sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
d9a64523
A
729 T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
730
731 return KERN_SUCCESS;
732}
733
734/*
0a7de745
A
735 * kern_return_t
736 * kcdata_api_assert_tests()
737 * {
738 * kern_return_t retval = 0;
739 * void * assert_check_retval = NULL;
740 * test_kc_data2.kcd_length = 0xdeadbeef;
741 * mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
742 * T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
743 *
744 * retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
745 * KCFLAG_USE_MEMCOPY);
746 *
747 * T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
748 *
749 * retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
750 * T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
751 *
752 * // this will assert
753 * retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
754 * T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
755 * T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
756 *
757 * return KERN_SUCCESS;
758 * }
759 */
d9a64523
A
760
761#if defined(__arm__) || defined(__arm64__)
762
763#include <arm/pmap.h>
764
765#define MAX_PMAP_OBJECT_ELEMENT 100000
766
767extern struct vm_object pmap_object_store; /* store pt pages */
768extern unsigned long gPhysBase, gPhysSize, first_avail;
769
770/*
771 * Define macros to transverse the pmap object structures and extract
772 * physical page number with information from low global only
773 * This emulate how Astris extracts information from coredump
774 */
775#if defined(__arm64__)
776
777static inline uintptr_t
778astris_vm_page_unpack_ptr(uintptr_t p)
779{
0a7de745
A
780 if (!p) {
781 return (uintptr_t)0;
782 }
d9a64523
A
783
784 return (p & lowGlo.lgPmapMemFromArrayMask)
0a7de745
A
785 ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
786 : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
d9a64523
A
787}
788
789// assume next pointer is the first element
790#define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
791
792#endif
793
794#if defined(__arm__)
795
796// assume next pointer is the first element
797#define astris_vm_page_queue_next(qc) *((uintptr_t *)(qc))
798
799#endif
800
801#define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
802
803#define astris_vm_page_queue_end(q, qe) ((q) == (qe))
804
805#define astris_vm_page_queue_iterate(head, elt) \
806 for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
807 (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
808
809#define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
810
811static inline ppnum_t
812astris_vm_page_get_phys_page(uintptr_t m)
813{
814 return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
0a7de745
A
815 ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
816 : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
d9a64523
A
817}
818
819kern_return_t
820pmap_coredump_test(void)
821{
822 int iter = 0;
823 uintptr_t p;
824
825 T_LOG("Testing coredump info for PMAP.");
826
827 T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
828 T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
829 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
f427ee49 830 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
d9a64523
A
831 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
832
833 // check the constant values in lowGlo
f427ee49 834 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
d9a64523
A
835 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
836 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
837 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
838
839#if defined(__arm64__)
f427ee49
A
840 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
841 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
842 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
d9a64523
A
843#endif
844
845 vm_object_lock_shared(&pmap_object_store);
846 astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
847 {
848 ppnum_t ppnum = astris_vm_page_get_phys_page(p);
849 pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
850 T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
851 T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
852 iter++;
853 T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
854 }
855 vm_object_unlock(&pmap_object_store);
856
857 T_ASSERT_GT_INT(iter, 0, NULL);
858 return KERN_SUCCESS;
859}
860#endif
cb323159
A
861
862struct ts_kern_prim_test_args {
863 int *end_barrier;
864 int *notify_b;
865 int *wait_event_b;
866 int before_num;
867 int *notify_a;
868 int *wait_event_a;
869 int after_num;
870 int priority_to_check;
871};
872
873static void
874wait_threads(
875 int* var,
876 int num)
877{
878 if (var != NULL) {
879 while (os_atomic_load(var, acquire) != num) {
880 assert_wait((event_t) var, THREAD_UNINT);
881 if (os_atomic_load(var, acquire) != num) {
882 (void) thread_block(THREAD_CONTINUE_NULL);
883 } else {
884 clear_wait(current_thread(), THREAD_AWAKENED);
885 }
886 }
887 }
888}
889
890static void
891wake_threads(
892 int* var)
893{
894 if (var) {
895 os_atomic_inc(var, relaxed);
896 thread_wakeup((event_t) var);
897 }
898}
899
900extern void IOSleep(int);
901
902static void
903thread_lock_unlock_kernel_primitive(
904 void *args,
905 __unused wait_result_t wr)
906{
907 thread_t thread = current_thread();
908 struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
909 int pri;
910
911 thread_lock(thread);
912 pri = thread->sched_pri;
913 thread_unlock(thread);
914
915 wait_threads(info->wait_event_b, info->before_num);
916 wake_threads(info->notify_b);
917
918 tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
919
920 wake_threads(info->notify_a);
921 wait_threads(info->wait_event_a, info->after_num);
922
923 IOSleep(100);
924
925 if (info->priority_to_check) {
926 thread_lock(thread);
927 pri = thread->sched_pri;
928 thread_unlock(thread);
929 T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
930 }
931
932 tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
933
934 wake_threads(info->end_barrier);
935 thread_terminate_self();
936}
937
938kern_return_t
939ts_kernel_primitive_test(void)
940{
941 thread_t owner, thread1, thread2;
942 struct ts_kern_prim_test_args targs[2] = {};
943 kern_return_t result;
944 int end_barrier = 0;
945 int owner_locked = 0;
946 int waiters_ready = 0;
947
948 T_LOG("Testing turnstile kernel primitive");
949
950 targs[0].notify_b = NULL;
951 targs[0].wait_event_b = NULL;
952 targs[0].before_num = 0;
953 targs[0].notify_a = &owner_locked;
954 targs[0].wait_event_a = &waiters_ready;
955 targs[0].after_num = 2;
956 targs[0].priority_to_check = 90;
957 targs[0].end_barrier = &end_barrier;
958
959 // Start owner with priority 80
960 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
961 T_ASSERT(result == KERN_SUCCESS, "Starting owner");
962
963 targs[1].notify_b = &waiters_ready;
964 targs[1].wait_event_b = &owner_locked;
965 targs[1].before_num = 1;
966 targs[1].notify_a = NULL;
967 targs[1].wait_event_a = NULL;
968 targs[1].after_num = 0;
969 targs[1].priority_to_check = 0;
970 targs[1].end_barrier = &end_barrier;
971
972 // Start waiters with priority 85 and 90
973 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
974 T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
975
976 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
977 T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
978
979 wait_threads(&end_barrier, 3);
980
981 return KERN_SUCCESS;
982}
983
984#define MTX_LOCK 0
985#define RW_LOCK 1
986
987#define NUM_THREADS 4
988
989struct synch_test_common {
990 unsigned int nthreads;
991 thread_t *threads;
992 int max_pri;
993 int test_done;
994};
995
996static kern_return_t
997init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
998{
999 info->nthreads = nthreads;
1000 info->threads = kalloc(sizeof(thread_t) * nthreads);
1001 if (!info->threads) {
1002 return ENOMEM;
1003 }
1004
1005 return KERN_SUCCESS;
1006}
1007
1008static void
1009destroy_synch_test_common(struct synch_test_common *info)
1010{
1011 kfree(info->threads, sizeof(thread_t) * info->nthreads);
1012}
1013
1014static void
1015start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1016{
1017 thread_t thread;
1018 kern_return_t result;
1019 uint i;
1020 int priority = 75;
1021
1022 info->test_done = 0;
1023
1024 for (i = 0; i < info->nthreads; i++) {
1025 info->threads[i] = NULL;
1026 }
1027
1028 info->max_pri = priority + (info->nthreads - 1) * 5;
1029 if (info->max_pri > 95) {
1030 info->max_pri = 95;
1031 }
1032
1033 for (i = 0; i < info->nthreads; i++) {
1034 result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1035 os_atomic_store(&info->threads[i], thread, release);
1036 T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1037
1038 priority += 5;
1039
1040 if (i == 0 && sleep_after_first) {
1041 IOSleep(100);
1042 }
1043 }
1044}
1045
1046static unsigned int
1047get_max_pri(struct synch_test_common * info)
1048{
1049 return info->max_pri;
1050}
1051
1052static void
1053wait_all_thread(struct synch_test_common * info)
1054{
1055 wait_threads(&info->test_done, info->nthreads);
1056}
1057
1058static void
1059notify_waiter(struct synch_test_common * info)
1060{
1061 wake_threads(&info->test_done);
1062}
1063
1064static void
1065wait_for_waiters(struct synch_test_common *info)
1066{
1067 uint i, j;
1068 thread_t thread;
1069
1070 for (i = 0; i < info->nthreads; i++) {
1071 j = 0;
1072 while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1073 if (j % 100 == 0) {
1074 IOSleep(10);
1075 }
1076 j++;
1077 }
1078
1079 if (info->threads[i] != current_thread()) {
1080 j = 0;
1081 do {
1082 thread = os_atomic_load(&info->threads[i], relaxed);
1083 if (thread == (thread_t) 1) {
1084 break;
1085 }
1086
1087 if (!(thread->state & TH_RUN)) {
1088 break;
1089 }
1090
1091 if (j % 100 == 0) {
1092 IOSleep(100);
1093 }
1094 j++;
1095
1096 if (thread->started == FALSE) {
1097 continue;
1098 }
1099 } while (thread->state & TH_RUN);
1100 }
1101 }
1102}
1103
1104static void
1105exclude_current_waiter(struct synch_test_common *info)
1106{
1107 uint i, j;
1108
1109 for (i = 0; i < info->nthreads; i++) {
1110 j = 0;
1111 while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1112 if (j % 100 == 0) {
1113 IOSleep(10);
1114 }
1115 j++;
1116 }
1117
1118 if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1119 os_atomic_store(&info->threads[i], (thread_t)1, release);
1120 return;
1121 }
1122 }
1123}
1124
1125struct info_sleep_inheritor_test {
1126 struct synch_test_common head;
1127 lck_mtx_t mtx_lock;
1128 lck_rw_t rw_lock;
1129 decl_lck_mtx_gate_data(, gate);
1130 boolean_t gate_closed;
1131 int prim_type;
1132 boolean_t work_to_do;
1133 unsigned int max_pri;
1134 unsigned int steal_pri;
1135 int synch_value;
1136 int synch;
1137 int value;
1138 int handoff_failure;
1139 thread_t thread_inheritor;
1140};
1141
1142static void
1143primitive_lock(struct info_sleep_inheritor_test *info)
1144{
1145 switch (info->prim_type) {
1146 case MTX_LOCK:
1147 lck_mtx_lock(&info->mtx_lock);
1148 break;
1149 case RW_LOCK:
1150 lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1151 break;
1152 default:
1153 panic("invalid type %d", info->prim_type);
1154 }
1155}
1156
1157static void
1158primitive_unlock(struct info_sleep_inheritor_test *info)
1159{
1160 switch (info->prim_type) {
1161 case MTX_LOCK:
1162 lck_mtx_unlock(&info->mtx_lock);
1163 break;
1164 case RW_LOCK:
1165 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1166 break;
1167 default:
1168 panic("invalid type %d", info->prim_type);
1169 }
1170}
1171
1172static wait_result_t
1173primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1174{
1175 wait_result_t ret = KERN_SUCCESS;
1176 switch (info->prim_type) {
1177 case MTX_LOCK:
1178 ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1179 break;
1180 case RW_LOCK:
1181 ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1182 break;
1183 default:
1184 panic("invalid type %d", info->prim_type);
1185 }
1186
1187 return ret;
1188}
1189
1190static void
1191primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1192{
1193 switch (info->prim_type) {
1194 case MTX_LOCK:
1195 case RW_LOCK:
1196 wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1197 break;
1198 default:
1199 panic("invalid type %d", info->prim_type);
1200 }
1201}
1202
1203static void
1204primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1205{
1206 switch (info->prim_type) {
1207 case MTX_LOCK:
1208 case RW_LOCK:
1209 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1210 break;
1211 default:
1212 panic("invalid type %d", info->prim_type);
1213 }
1214 return;
1215}
1216
1217static void
1218primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1219{
1220 switch (info->prim_type) {
1221 case MTX_LOCK:
1222 case RW_LOCK:
1223 change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1224 break;
1225 default:
1226 panic("invalid type %d", info->prim_type);
1227 }
1228 return;
1229}
1230
1231static kern_return_t
1232primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1233{
1234 kern_return_t ret = KERN_SUCCESS;
1235 switch (info->prim_type) {
1236 case MTX_LOCK:
1237 ret = lck_mtx_gate_try_close(&info->mtx_lock, &info->gate);
1238 break;
1239 case RW_LOCK:
1240 ret = lck_rw_gate_try_close(&info->rw_lock, &info->gate);
1241 break;
1242 default:
1243 panic("invalid type %d", info->prim_type);
1244 }
1245 return ret;
1246}
1247
1248static gate_wait_result_t
1249primitive_gate_wait(struct info_sleep_inheritor_test *info)
1250{
1251 gate_wait_result_t ret = GATE_OPENED;
1252 switch (info->prim_type) {
1253 case MTX_LOCK:
1254 ret = lck_mtx_gate_wait(&info->mtx_lock, &info->gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1255 break;
1256 case RW_LOCK:
1257 ret = lck_rw_gate_wait(&info->rw_lock, &info->gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1258 break;
1259 default:
1260 panic("invalid type %d", info->prim_type);
1261 }
1262 return ret;
1263}
1264
1265static void
1266primitive_gate_open(struct info_sleep_inheritor_test *info)
1267{
1268 switch (info->prim_type) {
1269 case MTX_LOCK:
1270 lck_mtx_gate_open(&info->mtx_lock, &info->gate);
1271 break;
1272 case RW_LOCK:
1273 lck_rw_gate_open(&info->rw_lock, &info->gate);
1274 break;
1275 default:
1276 panic("invalid type %d", info->prim_type);
1277 }
1278}
1279
1280static void
1281primitive_gate_close(struct info_sleep_inheritor_test *info)
1282{
1283 switch (info->prim_type) {
1284 case MTX_LOCK:
1285 lck_mtx_gate_close(&info->mtx_lock, &info->gate);
1286 break;
1287 case RW_LOCK:
1288 lck_rw_gate_close(&info->rw_lock, &info->gate);
1289 break;
1290 default:
1291 panic("invalid type %d", info->prim_type);
1292 }
1293}
1294
1295static void
1296primitive_gate_steal(struct info_sleep_inheritor_test *info)
1297{
1298 switch (info->prim_type) {
1299 case MTX_LOCK:
1300 lck_mtx_gate_steal(&info->mtx_lock, &info->gate);
1301 break;
1302 case RW_LOCK:
1303 lck_rw_gate_steal(&info->rw_lock, &info->gate);
1304 break;
1305 default:
1306 panic("invalid type %d", info->prim_type);
1307 }
1308}
1309
1310static kern_return_t
1311primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1312{
1313 kern_return_t ret = KERN_SUCCESS;
1314 switch (info->prim_type) {
1315 case MTX_LOCK:
1316 ret = lck_mtx_gate_handoff(&info->mtx_lock, &info->gate, flags);
1317 break;
1318 case RW_LOCK:
1319 ret = lck_rw_gate_handoff(&info->rw_lock, &info->gate, flags);
1320 break;
1321 default:
1322 panic("invalid type %d", info->prim_type);
1323 }
1324 return ret;
1325}
1326
1327static void
1328primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1329{
1330 switch (info->prim_type) {
1331 case MTX_LOCK:
1332 lck_mtx_gate_assert(&info->mtx_lock, &info->gate, type);
1333 break;
1334 case RW_LOCK:
1335 lck_rw_gate_assert(&info->rw_lock, &info->gate, type);
1336 break;
1337 default:
1338 panic("invalid type %d", info->prim_type);
1339 }
1340}
1341
1342static void
1343primitive_gate_init(struct info_sleep_inheritor_test *info)
1344{
1345 switch (info->prim_type) {
1346 case MTX_LOCK:
1347 lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1348 break;
1349 case RW_LOCK:
1350 lck_rw_gate_init(&info->rw_lock, &info->gate);
1351 break;
1352 default:
1353 panic("invalid type %d", info->prim_type);
1354 }
1355}
1356
1357static void
1358primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1359{
1360 switch (info->prim_type) {
1361 case MTX_LOCK:
1362 lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1363 break;
1364 case RW_LOCK:
1365 lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1366 break;
1367 default:
1368 panic("invalid type %d", info->prim_type);
1369 }
1370}
1371
1372static void
1373thread_inheritor_like_mutex(
1374 void *args,
1375 __unused wait_result_t wr)
1376{
1377 wait_result_t wait;
1378
1379 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1380 uint my_pri = current_thread()->sched_pri;
1381
1382 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1383
1384 /*
1385 * spin here to start concurrently
1386 */
1387 wake_threads(&info->synch);
1388 wait_threads(&info->synch, info->synch_value);
1389
1390 primitive_lock(info);
1391
1392 if (info->thread_inheritor == NULL) {
1393 info->thread_inheritor = current_thread();
1394 } else {
1395 wait = primitive_sleep_with_inheritor(info);
1396 T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1397 }
1398 primitive_unlock(info);
1399
1400 IOSleep(100);
1401 info->value++;
1402
1403 primitive_lock(info);
1404
1405 T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1406 primitive_wakeup_one_with_inheritor(info);
1407 T_LOG("woken up %p", info->thread_inheritor);
1408
1409 if (info->thread_inheritor == NULL) {
1410 T_ASSERT(info->handoff_failure == 0, "handoff failures");
1411 info->handoff_failure++;
1412 } else {
1413 T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1414 thread_deallocate(info->thread_inheritor);
1415 }
1416
1417 primitive_unlock(info);
1418
1419 assert(current_thread()->kern_promotion_schedpri == 0);
1420 notify_waiter((struct synch_test_common *)info);
1421
1422 thread_terminate_self();
1423}
1424
1425static void
1426thread_just_inheritor_do_work(
1427 void *args,
1428 __unused wait_result_t wr)
1429{
1430 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1431 uint my_pri = current_thread()->sched_pri;
1432 uint max_pri;
1433
1434 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1435 primitive_lock(info);
1436
1437 if (info->thread_inheritor == NULL) {
1438 info->thread_inheritor = current_thread();
1439 primitive_unlock(info);
1440 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1441
1442 wait_threads(&info->synch, info->synch_value - 1);
1443
1444 wait_for_waiters((struct synch_test_common *)info);
1445
1446 max_pri = get_max_pri((struct synch_test_common *) info);
1447 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1448
1449 os_atomic_store(&info->synch, 0, relaxed);
1450 primitive_lock(info);
1451 primitive_wakeup_all_with_inheritor(info);
1452 } else {
1453 wake_threads(&info->synch);
1454 primitive_sleep_with_inheritor(info);
1455 }
1456
1457 primitive_unlock(info);
1458
1459 assert(current_thread()->kern_promotion_schedpri == 0);
1460 notify_waiter((struct synch_test_common *)info);
1461
1462 thread_terminate_self();
1463}
1464
1465static void
1466thread_steal_work(
1467 void *args,
1468 __unused wait_result_t wr)
1469{
1470 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1471 uint my_pri = current_thread()->sched_pri;
1472
1473 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1474 primitive_lock(info);
1475
1476 if (info->thread_inheritor == NULL) {
1477 info->thread_inheritor = current_thread();
1478 exclude_current_waiter((struct synch_test_common *)info);
1479
1480 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1481 primitive_unlock(info);
1482
1483 wait_threads(&info->synch, info->synch_value - 2);
1484
1485 wait_for_waiters((struct synch_test_common *)info);
1486 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1487 primitive_lock(info);
1488 if (info->thread_inheritor == current_thread()) {
1489 primitive_wakeup_all_with_inheritor(info);
1490 }
1491 } else {
1492 if (info->steal_pri == 0) {
1493 info->steal_pri = my_pri;
1494 info->thread_inheritor = current_thread();
1495 primitive_change_sleep_inheritor(info);
1496 exclude_current_waiter((struct synch_test_common *)info);
1497
1498 primitive_unlock(info);
1499
1500 wait_threads(&info->synch, info->synch_value - 2);
1501
1502 T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1503 wait_for_waiters((struct synch_test_common *)info);
1504
1505 T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1506
1507 primitive_lock(info);
1508 primitive_wakeup_all_with_inheritor(info);
1509 } else {
1510 if (my_pri > info->steal_pri) {
1511 info->steal_pri = my_pri;
1512 }
1513 wake_threads(&info->synch);
1514 primitive_sleep_with_inheritor(info);
1515 exclude_current_waiter((struct synch_test_common *)info);
1516 }
1517 }
1518 primitive_unlock(info);
1519
1520 assert(current_thread()->kern_promotion_schedpri == 0);
1521 notify_waiter((struct synch_test_common *)info);
1522
1523 thread_terminate_self();
1524}
1525
1526static void
1527thread_no_inheritor_work(
1528 void *args,
1529 __unused wait_result_t wr)
1530{
1531 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1532 uint my_pri = current_thread()->sched_pri;
1533
1534 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1535 primitive_lock(info);
1536
1537 info->value--;
1538 if (info->value == 0) {
1539 primitive_wakeup_all_with_inheritor(info);
1540 } else {
1541 info->thread_inheritor = NULL;
1542 primitive_sleep_with_inheritor(info);
1543 }
1544
1545 primitive_unlock(info);
1546
1547 assert(current_thread()->kern_promotion_schedpri == 0);
1548 notify_waiter((struct synch_test_common *)info);
1549
1550 thread_terminate_self();
1551}
1552
1553static void
1554thread_mtx_work(
1555 void *args,
1556 __unused wait_result_t wr)
1557{
1558 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1559 uint my_pri = current_thread()->sched_pri;
1560 int i;
1561 u_int8_t rand;
1562 unsigned int mod_rand;
1563 uint max_pri;
1564
1565 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1566
1567 for (i = 0; i < 10; i++) {
1568 lck_mtx_lock(&info->mtx_lock);
1569 if (info->thread_inheritor == NULL) {
1570 info->thread_inheritor = current_thread();
1571 lck_mtx_unlock(&info->mtx_lock);
1572
1573 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1574
1575 wait_threads(&info->synch, info->synch_value - 1);
1576 wait_for_waiters((struct synch_test_common *)info);
1577 max_pri = get_max_pri((struct synch_test_common *) info);
1578 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1579
1580 os_atomic_store(&info->synch, 0, relaxed);
1581
1582 lck_mtx_lock(&info->mtx_lock);
1583 info->thread_inheritor = NULL;
1584 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1585 lck_mtx_unlock(&info->mtx_lock);
1586 continue;
1587 }
1588
1589 read_random(&rand, sizeof(rand));
1590 mod_rand = rand % 2;
1591
1592 wake_threads(&info->synch);
1593 switch (mod_rand) {
1594 case 0:
1595 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1596 lck_mtx_unlock(&info->mtx_lock);
1597 break;
1598 case 1:
1599 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1600 break;
1601 default:
1602 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1603 }
1604 }
1605
1606 /*
1607 * spin here to stop using the lock as mutex
1608 */
1609 wake_threads(&info->synch);
1610 wait_threads(&info->synch, info->synch_value);
1611
1612 for (i = 0; i < 10; i++) {
1613 /* read_random might sleep so read it before acquiring the mtx as spin */
1614 read_random(&rand, sizeof(rand));
1615
1616 lck_mtx_lock_spin(&info->mtx_lock);
1617 if (info->thread_inheritor == NULL) {
1618 info->thread_inheritor = current_thread();
1619 lck_mtx_unlock(&info->mtx_lock);
1620
1621 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1622 wait_for_waiters((struct synch_test_common *)info);
1623 max_pri = get_max_pri((struct synch_test_common *) info);
1624 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1625
1626 lck_mtx_lock_spin(&info->mtx_lock);
1627 info->thread_inheritor = NULL;
1628 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1629 lck_mtx_unlock(&info->mtx_lock);
1630 continue;
1631 }
1632
1633 mod_rand = rand % 2;
1634 switch (mod_rand) {
1635 case 0:
1636 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1637 lck_mtx_unlock(&info->mtx_lock);
1638 break;
1639 case 1:
1640 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1641 lck_mtx_unlock(&info->mtx_lock);
1642 break;
1643 default:
1644 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1645 }
1646 }
1647 assert(current_thread()->kern_promotion_schedpri == 0);
1648 notify_waiter((struct synch_test_common *)info);
1649
1650 thread_terminate_self();
1651}
1652
1653static void
1654thread_rw_work(
1655 void *args,
1656 __unused wait_result_t wr)
1657{
1658 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1659 uint my_pri = current_thread()->sched_pri;
1660 int i;
1661 lck_rw_type_t type;
1662 u_int8_t rand;
1663 unsigned int mod_rand;
1664 uint max_pri;
1665
1666 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1667
1668 for (i = 0; i < 10; i++) {
1669try_again:
1670 type = LCK_RW_TYPE_SHARED;
1671 lck_rw_lock(&info->rw_lock, type);
1672 if (info->thread_inheritor == NULL) {
1673 type = LCK_RW_TYPE_EXCLUSIVE;
1674
1675 if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1676 if (info->thread_inheritor == NULL) {
1677 info->thread_inheritor = current_thread();
1678 lck_rw_unlock(&info->rw_lock, type);
1679 wait_threads(&info->synch, info->synch_value - 1);
1680
1681 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1682 wait_for_waiters((struct synch_test_common *)info);
1683 max_pri = get_max_pri((struct synch_test_common *) info);
1684 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1685
1686 os_atomic_store(&info->synch, 0, relaxed);
1687
1688 lck_rw_lock(&info->rw_lock, type);
1689 info->thread_inheritor = NULL;
1690 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1691 lck_rw_unlock(&info->rw_lock, type);
1692 continue;
1693 }
1694 } else {
1695 goto try_again;
1696 }
1697 }
1698
1699 read_random(&rand, sizeof(rand));
1700 mod_rand = rand % 4;
1701
1702 wake_threads(&info->synch);
1703 switch (mod_rand) {
1704 case 0:
1705 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1706 lck_rw_unlock(&info->rw_lock, type);
1707 break;
1708 case 1:
1709 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1710 break;
1711 case 2:
1712 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1713 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1714 break;
1715 case 3:
1716 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1717 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1718 break;
1719 default:
1720 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1721 }
1722 }
1723
1724 assert(current_thread()->kern_promotion_schedpri == 0);
1725 notify_waiter((struct synch_test_common *)info);
1726
1727 thread_terminate_self();
1728}
1729
1730static void
1731test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
1732{
1733 info->prim_type = prim_type;
1734 info->synch = 0;
1735 info->synch_value = info->head.nthreads;
1736
1737 info->thread_inheritor = NULL;
1738
1739 start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
1740 wait_all_thread((struct synch_test_common *)info);
1741}
1742
1743static void
1744test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
1745{
1746 info->prim_type = prim_type;
1747
1748 info->synch = 0;
1749 info->synch_value = info->head.nthreads;
1750 info->value = 0;
1751 info->handoff_failure = 0;
1752 info->thread_inheritor = NULL;
1753
1754 start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
1755 wait_all_thread((struct synch_test_common *)info);
1756
1757 T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
1758 T_ASSERT(info->handoff_failure == 1, "handoff failures");
1759}
1760
1761static void
1762test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
1763{
1764 info->prim_type = prim_type;
1765
1766 info->thread_inheritor = NULL;
1767 info->steal_pri = 0;
1768 info->synch = 0;
1769 info->synch_value = info->head.nthreads;
1770
1771 start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
1772 wait_all_thread((struct synch_test_common *)info);
1773}
1774
1775static void
1776test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
1777{
1778 info->prim_type = prim_type;
1779 info->synch = 0;
1780 info->synch_value = info->head.nthreads;
1781
1782 info->thread_inheritor = NULL;
1783 info->value = info->head.nthreads;
1784
1785 start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
1786 wait_all_thread((struct synch_test_common *)info);
1787}
1788
1789static void
1790test_rw_lock(struct info_sleep_inheritor_test *info)
1791{
1792 info->thread_inheritor = NULL;
1793 info->value = info->head.nthreads;
1794 info->synch = 0;
1795 info->synch_value = info->head.nthreads;
1796
1797 start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
1798 wait_all_thread((struct synch_test_common *)info);
1799}
1800
1801static void
1802test_mtx_lock(struct info_sleep_inheritor_test *info)
1803{
1804 info->thread_inheritor = NULL;
1805 info->value = info->head.nthreads;
1806 info->synch = 0;
1807 info->synch_value = info->head.nthreads;
1808
1809 start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
1810 wait_all_thread((struct synch_test_common *)info);
1811}
1812
1813kern_return_t
1814ts_kernel_sleep_inheritor_test(void)
1815{
1816 struct info_sleep_inheritor_test info = {};
1817
1818 init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
1819
1820 lck_attr_t* lck_attr = lck_attr_alloc_init();
1821 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
1822 lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
1823
1824 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
1825 lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
1826
1827 /*
1828 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1829 */
1830 T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
1831 test_sleep_with_wake_all(&info, MTX_LOCK);
1832
1833 /*
1834 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1835 */
1836 T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
1837 test_sleep_with_wake_all(&info, RW_LOCK);
1838
1839 /*
1840 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
1841 */
1842 T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
1843 test_sleep_with_wake_one(&info, MTX_LOCK);
1844
1845 /*
1846 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
1847 */
1848 T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
1849 test_sleep_with_wake_one(&info, RW_LOCK);
1850
1851 /*
1852 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1853 * and change_sleep_inheritor
1854 */
1855 T_LOG("Testing change_sleep_inheritor with mxt sleep");
1856 test_change_sleep_inheritor(&info, MTX_LOCK);
1857
1858 /*
1859 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1860 * and change_sleep_inheritor
1861 */
1862 T_LOG("Testing change_sleep_inheritor with rw sleep");
1863 test_change_sleep_inheritor(&info, RW_LOCK);
1864
1865 /*
1866 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1867 * with inheritor NULL
1868 */
1869 T_LOG("Testing inheritor NULL");
1870 test_no_inheritor(&info, MTX_LOCK);
1871
1872 /*
1873 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1874 * with inheritor NULL
1875 */
1876 T_LOG("Testing inheritor NULL");
1877 test_no_inheritor(&info, RW_LOCK);
1878
1879 /*
1880 * Testing mtx locking combinations
1881 */
1882 T_LOG("Testing mtx locking combinations");
1883 test_mtx_lock(&info);
1884
1885 /*
1886 * Testing rw locking combinations
1887 */
1888 T_LOG("Testing rw locking combinations");
1889 test_rw_lock(&info);
1890
1891 destroy_synch_test_common((struct synch_test_common *)&info);
1892
1893 lck_attr_free(lck_attr);
1894 lck_grp_attr_free(lck_grp_attr);
1895 lck_rw_destroy(&info.rw_lock, lck_grp);
1896 lck_mtx_destroy(&info.mtx_lock, lck_grp);
1897 lck_grp_free(lck_grp);
1898
1899 return KERN_SUCCESS;
1900}
1901
1902static void
1903thread_gate_aggressive(
1904 void *args,
1905 __unused wait_result_t wr)
1906{
1907 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1908 uint my_pri = current_thread()->sched_pri;
1909
1910 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1911
1912 primitive_lock(info);
1913 if (info->thread_inheritor == NULL) {
1914 info->thread_inheritor = current_thread();
1915 primitive_gate_assert(info, GATE_ASSERT_OPEN);
1916 primitive_gate_close(info);
1917 exclude_current_waiter((struct synch_test_common *)info);
1918
1919 primitive_unlock(info);
1920
1921 wait_threads(&info->synch, info->synch_value - 2);
1922 wait_for_waiters((struct synch_test_common *)info);
1923 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1924
1925 primitive_lock(info);
1926 if (info->thread_inheritor == current_thread()) {
1927 primitive_gate_open(info);
1928 }
1929 } else {
1930 if (info->steal_pri == 0) {
1931 info->steal_pri = my_pri;
1932 info->thread_inheritor = current_thread();
1933 primitive_gate_steal(info);
1934 exclude_current_waiter((struct synch_test_common *)info);
1935
1936 primitive_unlock(info);
1937 wait_threads(&info->synch, info->synch_value - 2);
1938
1939 T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1940 wait_for_waiters((struct synch_test_common *)info);
1941 T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1942
1943 primitive_lock(info);
1944 primitive_gate_open(info);
1945 } else {
1946 if (my_pri > info->steal_pri) {
1947 info->steal_pri = my_pri;
1948 }
1949 wake_threads(&info->synch);
1950 primitive_gate_wait(info);
1951 exclude_current_waiter((struct synch_test_common *)info);
1952 }
1953 }
1954 primitive_unlock(info);
1955
1956 assert(current_thread()->kern_promotion_schedpri == 0);
1957 notify_waiter((struct synch_test_common *)info);
1958
1959 thread_terminate_self();
1960}
1961
1962static void
1963thread_gate_like_mutex(
1964 void *args,
1965 __unused wait_result_t wr)
1966{
1967 gate_wait_result_t wait;
1968 kern_return_t ret;
1969 uint my_pri = current_thread()->sched_pri;
1970
1971 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1972
1973 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1974
1975 /*
1976 * spin here to start concurrently
1977 */
1978 wake_threads(&info->synch);
1979 wait_threads(&info->synch, info->synch_value);
1980
1981 primitive_lock(info);
1982
1983 if (primitive_gate_try_close(info) != KERN_SUCCESS) {
1984 wait = primitive_gate_wait(info);
1985 T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
1986 }
1987
1988 primitive_gate_assert(info, GATE_ASSERT_HELD);
1989
1990 primitive_unlock(info);
1991
1992 IOSleep(100);
1993 info->value++;
1994
1995 primitive_lock(info);
1996
1997 ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
1998 if (ret == KERN_NOT_WAITING) {
1999 T_ASSERT(info->handoff_failure == 0, "handoff failures");
2000 primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2001 info->handoff_failure++;
2002 }
2003
2004 primitive_unlock(info);
2005 notify_waiter((struct synch_test_common *)info);
2006
2007 thread_terminate_self();
2008}
2009
2010static void
2011thread_just_one_do_work(
2012 void *args,
2013 __unused wait_result_t wr)
2014{
2015 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2016 uint my_pri = current_thread()->sched_pri;
2017 uint max_pri;
2018
2019 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2020
2021 primitive_lock(info);
2022check_again:
2023 if (info->work_to_do) {
2024 if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2025 primitive_gate_assert(info, GATE_ASSERT_HELD);
2026 primitive_unlock(info);
2027
2028 T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2029 wait_threads(&info->synch, info->synch_value - 1);
2030 wait_for_waiters((struct synch_test_common *)info);
2031 max_pri = get_max_pri((struct synch_test_common *) info);
2032 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2033 os_atomic_store(&info->synch, 0, relaxed);
2034
2035 primitive_lock(info);
2036 info->work_to_do = FALSE;
2037 primitive_gate_open(info);
2038 } else {
2039 primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2040 wake_threads(&info->synch);
2041 primitive_gate_wait(info);
2042 goto check_again;
2043 }
2044 }
2045 primitive_unlock(info);
2046
2047 assert(current_thread()->kern_promotion_schedpri == 0);
2048 notify_waiter((struct synch_test_common *)info);
2049 thread_terminate_self();
2050}
2051
2052static void
2053test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2054{
2055 info->prim_type = prim_type;
2056
2057 primitive_gate_init(info);
2058 info->work_to_do = TRUE;
2059 info->synch = 0;
2060 info->synch_value = NUM_THREADS;
2061
2062 start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2063 wait_all_thread((struct synch_test_common *)info);
2064
2065 primitive_gate_destroy(info);
2066}
2067
2068static void
2069test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2070{
2071 info->prim_type = prim_type;
2072
2073 primitive_gate_init(info);
2074
2075 info->synch = 0;
2076 info->synch_value = NUM_THREADS;
2077 info->value = 0;
2078 info->handoff_failure = 0;
2079
2080 start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2081 wait_all_thread((struct synch_test_common *)info);
2082
2083 T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2084 T_ASSERT(info->handoff_failure == 1, "handoff failures");
2085
2086 primitive_gate_destroy(info);
2087}
2088
2089static void
2090test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2091{
2092 info->prim_type = prim_type;
2093
2094 primitive_gate_init(info);
2095
2096 info->synch = 0;
2097 info->synch_value = NUM_THREADS;
2098 info->thread_inheritor = NULL;
2099 info->steal_pri = 0;
2100
2101 start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2102 wait_all_thread((struct synch_test_common *)info);
2103
2104 primitive_gate_destroy(info);
2105}
2106
2107kern_return_t
2108ts_kernel_gate_test(void)
2109{
2110 struct info_sleep_inheritor_test info = {};
2111
2112 T_LOG("Testing gate primitive");
2113
2114 init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2115
2116 lck_attr_t* lck_attr = lck_attr_alloc_init();
2117 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2118 lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2119
2120 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2121 lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2122
2123 /*
2124 * Testing the priority inherited by the keeper
2125 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2126 */
2127 T_LOG("Testing gate push, lck");
2128 test_gate_push(&info, MTX_LOCK);
2129
2130 T_LOG("Testing gate push, rw");
2131 test_gate_push(&info, RW_LOCK);
2132
2133 /*
2134 * Testing the handoff
2135 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2136 */
2137 T_LOG("Testing gate handoff, lck");
2138 test_gate_handoff(&info, MTX_LOCK);
2139
2140 T_LOG("Testing gate handoff, rw");
2141 test_gate_handoff(&info, RW_LOCK);
2142
2143 /*
2144 * Testing the steal
2145 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2146 */
2147 T_LOG("Testing gate steal, lck");
2148 test_gate_steal(&info, MTX_LOCK);
2149
2150 T_LOG("Testing gate steal, rw");
2151 test_gate_steal(&info, RW_LOCK);
2152
2153 destroy_synch_test_common((struct synch_test_common *)&info);
2154
2155 lck_attr_free(lck_attr);
2156 lck_grp_attr_free(lck_grp_attr);
2157 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2158 lck_grp_free(lck_grp);
2159
2160 return KERN_SUCCESS;
2161}
2162
2163#define NUM_THREAD_CHAIN 6
2164
2165struct turnstile_chain_test {
2166 struct synch_test_common head;
2167 lck_mtx_t mtx_lock;
2168 int synch_value;
2169 int synch;
2170 int synch2;
2171 gate_t gates[NUM_THREAD_CHAIN];
2172};
2173
2174static void
2175thread_sleep_gate_chain_work(
2176 void *args,
2177 __unused wait_result_t wr)
2178{
2179 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2180 thread_t self = current_thread();
2181 uint my_pri = self->sched_pri;
2182 uint max_pri;
2183 uint i;
2184 thread_t inheritor = NULL, woken_up;
2185 event_t wait_event, wake_event;
2186 kern_return_t ret;
2187
2188 T_LOG("Started thread pri %d %p", my_pri, self);
2189
2190 /*
2191 * Need to use the threads ids, wait for all of them to be populated
2192 */
2193
2194 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2195 IOSleep(10);
2196 }
2197
2198 max_pri = get_max_pri((struct synch_test_common *) info);
2199
2200 for (i = 0; i < info->head.nthreads; i = i + 2) {
2201 // even threads will close a gate
2202 if (info->head.threads[i] == self) {
2203 lck_mtx_lock(&info->mtx_lock);
2204 lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2205 lck_mtx_unlock(&info->mtx_lock);
2206 break;
2207 }
2208 }
2209
2210 wake_threads(&info->synch2);
2211 wait_threads(&info->synch2, info->synch_value);
2212
2213 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2214 wait_threads(&info->synch, info->synch_value - 1);
2215 wait_for_waiters((struct synch_test_common *)info);
2216
2217 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2218
2219 lck_mtx_lock(&info->mtx_lock);
2220 lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2221 lck_mtx_unlock(&info->mtx_lock);
2222 } else {
2223 wait_event = NULL;
2224 wake_event = NULL;
2225 for (i = 0; i < info->head.nthreads; i++) {
2226 if (info->head.threads[i] == self) {
2227 inheritor = info->head.threads[i - 1];
2228 wait_event = (event_t) &info->head.threads[i - 1];
2229 wake_event = (event_t) &info->head.threads[i];
2230 break;
2231 }
2232 }
2233 assert(wait_event != NULL);
2234
2235 lck_mtx_lock(&info->mtx_lock);
2236 wake_threads(&info->synch);
2237
2238 if (i % 2 != 0) {
2239 lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2240 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2241
2242 ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2243 if (ret == KERN_SUCCESS) {
2244 T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2245 T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2246 } else {
2247 T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2248 }
2249
2250 // i am still the inheritor, wake all to drop inheritership
2251 ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2252 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2253 } else {
2254 // I previously closed a gate
2255 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2256 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2257
2258 lck_mtx_lock(&info->mtx_lock);
2259 lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2260 lck_mtx_unlock(&info->mtx_lock);
2261 }
2262 }
2263
2264 assert(current_thread()->kern_promotion_schedpri == 0);
2265 notify_waiter((struct synch_test_common *)info);
2266
2267 thread_terminate_self();
2268}
2269
2270static void
2271thread_gate_chain_work(
2272 void *args,
2273 __unused wait_result_t wr)
2274{
2275 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2276 thread_t self = current_thread();
2277 uint my_pri = self->sched_pri;
2278 uint max_pri;
2279 uint i;
2280 T_LOG("Started thread pri %d %p", my_pri, self);
2281
2282
2283 /*
2284 * Need to use the threads ids, wait for all of them to be populated
2285 */
2286 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2287 IOSleep(10);
2288 }
2289
2290 max_pri = get_max_pri((struct synch_test_common *) info);
2291
2292 for (i = 0; i < info->head.nthreads; i++) {
2293 if (info->head.threads[i] == self) {
2294 lck_mtx_lock(&info->mtx_lock);
2295 lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2296 lck_mtx_unlock(&info->mtx_lock);
2297 break;
2298 }
2299 }
2300 assert(i != info->head.nthreads);
2301
2302 wake_threads(&info->synch2);
2303 wait_threads(&info->synch2, info->synch_value);
2304
2305 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2306 wait_threads(&info->synch, info->synch_value - 1);
2307
2308 wait_for_waiters((struct synch_test_common *)info);
2309
2310 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2311
2312 lck_mtx_lock(&info->mtx_lock);
2313 lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2314 lck_mtx_unlock(&info->mtx_lock);
2315 } else {
2316 lck_mtx_lock(&info->mtx_lock);
2317 wake_threads(&info->synch);
2318 lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2319
2320 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2321
2322 lck_mtx_lock(&info->mtx_lock);
2323 lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2324 lck_mtx_unlock(&info->mtx_lock);
2325 }
2326
2327 assert(current_thread()->kern_promotion_schedpri == 0);
2328 notify_waiter((struct synch_test_common *)info);
2329
2330 thread_terminate_self();
2331}
2332
2333static void
2334thread_sleep_chain_work(
2335 void *args,
2336 __unused wait_result_t wr)
2337{
2338 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2339 thread_t self = current_thread();
2340 uint my_pri = self->sched_pri;
2341 uint max_pri;
2342 event_t wait_event, wake_event;
2343 uint i;
2344 thread_t inheritor = NULL, woken_up = NULL;
2345 kern_return_t ret;
2346
2347 T_LOG("Started thread pri %d %p", my_pri, self);
2348
2349 /*
2350 * Need to use the threads ids, wait for all of them to be populated
2351 */
2352 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2353 IOSleep(10);
2354 }
2355
2356 max_pri = get_max_pri((struct synch_test_common *) info);
2357
2358 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2359 wait_threads(&info->synch, info->synch_value - 1);
2360
2361 wait_for_waiters((struct synch_test_common *)info);
2362
2363 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2364
2365 ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2366 T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
2367 T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
2368
2369 // i am still the inheritor, wake all to drop inheritership
2370 ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
2371 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2372 } else {
2373 wait_event = NULL;
2374 wake_event = NULL;
2375 for (i = 0; i < info->head.nthreads; i++) {
2376 if (info->head.threads[i] == self) {
2377 inheritor = info->head.threads[i - 1];
2378 wait_event = (event_t) &info->head.threads[i - 1];
2379 wake_event = (event_t) &info->head.threads[i];
2380 break;
2381 }
2382 }
2383
2384 assert(wait_event != NULL);
2385 lck_mtx_lock(&info->mtx_lock);
2386 wake_threads(&info->synch);
2387
2388 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2389
2390 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2391
2392 ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2393 if (ret == KERN_SUCCESS) {
2394 T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2395 T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2396 } else {
2397 T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2398 }
2399
2400 // i am still the inheritor, wake all to drop inheritership
2401 ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2402 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2403 }
2404
2405 assert(current_thread()->kern_promotion_schedpri == 0);
2406 notify_waiter((struct synch_test_common *)info);
2407
2408 thread_terminate_self();
2409}
2410
2411static void
2412test_sleep_chain(struct turnstile_chain_test *info)
2413{
2414 info->synch = 0;
2415 info->synch_value = info->head.nthreads;
2416
2417 start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
2418 wait_all_thread((struct synch_test_common *)info);
2419}
2420
2421static void
2422test_gate_chain(struct turnstile_chain_test *info)
2423{
2424 info->synch = 0;
2425 info->synch2 = 0;
2426 info->synch_value = info->head.nthreads;
2427
2428 start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
2429 wait_all_thread((struct synch_test_common *)info);
2430}
2431
2432static void
2433test_sleep_gate_chain(struct turnstile_chain_test *info)
2434{
2435 info->synch = 0;
2436 info->synch2 = 0;
2437 info->synch_value = info->head.nthreads;
2438
2439 start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
2440 wait_all_thread((struct synch_test_common *)info);
2441}
2442
2443kern_return_t
2444ts_kernel_turnstile_chain_test(void)
2445{
2446 struct turnstile_chain_test info = {};
2447 int i;
2448
2449 init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
2450 lck_attr_t* lck_attr = lck_attr_alloc_init();
2451 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2452 lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2453
2454 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2455 for (i = 0; i < NUM_THREAD_CHAIN; i++) {
2456 lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
2457 }
2458
2459 T_LOG("Testing sleep chain, lck");
2460 test_sleep_chain(&info);
2461
2462 T_LOG("Testing gate chain, lck");
2463 test_gate_chain(&info);
2464
2465 T_LOG("Testing sleep and gate chain, lck");
2466 test_sleep_gate_chain(&info);
2467
2468 destroy_synch_test_common((struct synch_test_common *)&info);
2469 for (i = 0; i < NUM_THREAD_CHAIN; i++) {
2470 lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
2471 }
2472 lck_attr_free(lck_attr);
2473 lck_grp_attr_free(lck_grp_attr);
2474 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2475 lck_grp_free(lck_grp);
2476
2477 return KERN_SUCCESS;
2478}
2479
2480kern_return_t
2481ts_kernel_timingsafe_bcmp_test(void)
2482{
2483 int i, buf_size;
2484 char *buf = NULL;
2485
2486 // empty
2487 T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
2488 T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
2489 T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
2490
2491 // equal
2492 T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
2493
2494 // unequal
2495 T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
2496 T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
2497 T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
2498 T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
2499
2500 // all possible bitwise differences
2501 for (i = 1; i < 256; i += 1) {
2502 unsigned char a = 0;
2503 unsigned char b = (unsigned char)i;
2504
2505 T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
2506 }
2507
2508 // large
2509 buf_size = 1024 * 16;
2510 buf = kalloc(buf_size);
2511 T_EXPECT_NOTNULL(buf, "kalloc of buf");
2512
2513 read_random(buf, buf_size);
2514 T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
2515 T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
2516 T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
2517
2518 memcpy(buf + 128, buf, 128);
2519 T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
2520
2521 kfree(buf, buf_size);
2522
2523 return KERN_SUCCESS;
2524}
2525
2526kern_return_t
2527kprintf_hhx_test(void)
2528{
2529 printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
2530 (unsigned short)0xfeed, (unsigned short)0xface,
2531 (unsigned short)0xabad, (unsigned short)0xcafe,
2532 (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
2533 (unsigned char)'!',
2534 0xfeedfaceULL);
2535 return KERN_SUCCESS;
2536}