]> git.saurik.com Git - apple/xnu.git/blame - osfmk/tests/kernel_tests.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / tests / kernel_tests.c
CommitLineData
d9a64523 1/*
f427ee49 2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
d9a64523
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/kern_types.h>
30#include <kern/assert.h>
31#include <kern/host.h>
32#include <kern/macro_help.h>
33#include <kern/sched.h>
34#include <kern/locks.h>
35#include <kern/sched_prim.h>
36#include <kern/misc_protos.h>
37#include <kern/thread_call.h>
f427ee49 38#include <kern/zalloc_internal.h>
d9a64523
A
39#include <kern/kalloc.h>
40#include <tests/ktest.h>
41#include <sys/errno.h>
42#include <sys/random.h>
43#include <kern/kern_cdata.h>
44#include <machine/lowglobals.h>
45#include <vm/vm_page.h>
46#include <vm/vm_object.h>
f427ee49 47#include <vm/vm_protos.h>
cb323159 48#include <string.h>
d9a64523
A
49
50#if !(DEVELOPMENT || DEBUG)
51#error "Testing is not enabled on RELEASE configurations"
52#endif
53
54#include <tests/xnupost.h>
55
56extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
57__private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
58
59uint32_t total_post_tests_count = 0;
60void xnupost_reset_panic_widgets(void);
61
62/* test declarations */
63kern_return_t zalloc_test(void);
64kern_return_t RandomULong_test(void);
65kern_return_t kcdata_api_test(void);
cb323159
A
66kern_return_t ts_kernel_primitive_test(void);
67kern_return_t ts_kernel_sleep_inheritor_test(void);
68kern_return_t ts_kernel_gate_test(void);
69kern_return_t ts_kernel_turnstile_chain_test(void);
70kern_return_t ts_kernel_timingsafe_bcmp_test(void);
71
f427ee49
A
72#if __ARM_VFP__
73extern kern_return_t vfp_state_test(void);
74#endif
75
cb323159 76extern kern_return_t kprintf_hhx_test(void);
d9a64523
A
77
78#if defined(__arm__) || defined(__arm64__)
79kern_return_t pmap_coredump_test(void);
80#endif
81
82extern kern_return_t console_serial_test(void);
83extern kern_return_t console_serial_alloc_rel_tests(void);
84extern kern_return_t console_serial_parallel_log_tests(void);
85extern kern_return_t test_os_log(void);
86extern kern_return_t test_os_log_parallel(void);
87extern kern_return_t bitmap_post_test(void);
c3c9b80d 88extern kern_return_t counter_tests(void);
d9a64523
A
89
90#ifdef __arm64__
91extern kern_return_t arm64_munger_test(void);
92extern kern_return_t ex_cb_test(void);
93#if __ARM_PAN_AVAILABLE__
94extern kern_return_t arm64_pan_test(void);
95#endif
cb323159
A
96#if defined(HAS_APPLE_PAC)
97extern kern_return_t arm64_ropjop_test(void);
98#endif /* defined(HAS_APPLE_PAC) */
d9a64523
A
99#endif /* __arm64__ */
100
101extern kern_return_t test_thread_call(void);
102
103
cb323159
A
104struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
105 .xtp_outval_p = NULL,
106 .xtp_func_name = NULL,
107 .xtp_func = NULL};
d9a64523
A
108
109struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
0a7de745
A
110 XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
111 XNUPOST_TEST_CONFIG_BASIC(test_os_log),
112 XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
d9a64523 113#ifdef __arm64__
0a7de745
A
114 XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
115 XNUPOST_TEST_CONFIG_BASIC(ex_cb_test),
d9a64523 116#if __ARM_PAN_AVAILABLE__
0a7de745 117 XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
d9a64523 118#endif
cb323159
A
119#if defined(HAS_APPLE_PAC)
120 XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
121#endif /* defined(HAS_APPLE_PAC) */
d9a64523 122#endif /* __arm64__ */
0a7de745
A
123 XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
124 XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
125 XNUPOST_TEST_CONFIG_BASIC(console_serial_alloc_rel_tests),
126 XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
d9a64523 127#if defined(__arm__) || defined(__arm64__)
0a7de745 128 XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
d9a64523 129#endif
0a7de745
A
130 XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
131 //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
132 XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
cb323159
A
133 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
134 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
135 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
136 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
137 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
f427ee49
A
138 XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
139#if __ARM_VFP__
140 XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
141#endif
c3c9b80d
A
142 XNUPOST_TEST_CONFIG_BASIC(vm_tests),
143 XNUPOST_TEST_CONFIG_BASIC(counter_tests)};
d9a64523
A
144
145uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
146
147#define POSTARGS_RUN_TESTS 0x1
148#define POSTARGS_CONTROLLER_AVAILABLE 0x2
149#define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
150uint64_t kernel_post_args = 0x0;
151
152/* static variables to hold state */
153static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
154static char kernel_post_test_configs[256];
155boolean_t xnupost_should_run_test(uint32_t test_num);
156
157kern_return_t
158xnupost_parse_config()
159{
0a7de745 160 if (parse_config_retval != KERN_INVALID_CAPABILITY) {
d9a64523 161 return parse_config_retval;
0a7de745 162 }
d9a64523
A
163 PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
164
165 if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
166 kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
167 }
168
169 if (kernel_post_args != 0) {
170 parse_config_retval = KERN_SUCCESS;
171 goto out;
172 }
173 parse_config_retval = KERN_NOT_SUPPORTED;
174out:
175 return parse_config_retval;
176}
177
178boolean_t
179xnupost_should_run_test(uint32_t test_num)
180{
181 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
182 int64_t begin = 0, end = 999999;
183 char * b = kernel_post_test_configs;
184 while (*b) {
185 get_range_bounds(b, &begin, &end);
186 if (test_num >= begin && test_num <= end) {
187 return TRUE;
188 }
189
190 /* skip to the next "," */
191 while (*b != ',') {
0a7de745 192 if (*b == '\0') {
d9a64523 193 return FALSE;
0a7de745 194 }
d9a64523
A
195 b++;
196 }
197 /* skip past the ',' */
198 b++;
199 }
200 return FALSE;
201 }
202 return TRUE;
203}
204
205kern_return_t
206xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
207{
0a7de745 208 if (KERN_SUCCESS != xnupost_parse_config()) {
d9a64523 209 return KERN_FAILURE;
0a7de745 210 }
d9a64523
A
211
212 xnupost_test_t testp;
213 for (uint32_t i = 0; i < test_count; i++) {
214 testp = &test_list[i];
215 if (testp->xt_test_num == 0) {
f427ee49
A
216 assert(total_post_tests_count < UINT16_MAX);
217 testp->xt_test_num = (uint16_t)++total_post_tests_count;
d9a64523
A
218 }
219 /* make sure the boot-arg based test run list is honored */
220 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
221 testp->xt_config |= XT_CONFIG_IGNORE;
222 if (xnupost_should_run_test(testp->xt_test_num)) {
223 testp->xt_config &= ~(XT_CONFIG_IGNORE);
224 testp->xt_config |= XT_CONFIG_RUN;
225 printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
226 }
227 }
228 printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
0a7de745 229 testp->xt_config);
d9a64523
A
230 }
231
232 return KERN_SUCCESS;
233}
234
235kern_return_t
236xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
237{
238 uint32_t i = 0;
239 int retval = KERN_SUCCESS;
240
241 if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
242 printf("No POST boot-arg set.\n");
243 return retval;
244 }
245
246 T_START;
247 xnupost_test_t testp;
248 for (; i < test_count; i++) {
249 xnupost_reset_panic_widgets();
250 testp = &test_list[i];
251 T_BEGIN(testp->xt_name);
252 testp->xt_begin_time = mach_absolute_time();
253 testp->xt_end_time = testp->xt_begin_time;
254
255 /*
256 * If test is designed to panic and controller
257 * is not available then mark as SKIPPED
258 */
259 if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
260 T_SKIP(
0a7de745
A
261 "Test expects panic but "
262 "no controller is present");
d9a64523
A
263 testp->xt_test_actions = XT_ACTION_SKIPPED;
264 continue;
265 }
266
267 if ((testp->xt_config & XT_CONFIG_IGNORE)) {
268 T_SKIP("Test is marked as XT_CONFIG_IGNORE");
269 testp->xt_test_actions = XT_ACTION_SKIPPED;
270 continue;
271 }
272
273 testp->xt_func();
274 T_END;
275 testp->xt_retval = T_TESTRESULT;
276 testp->xt_end_time = mach_absolute_time();
277 if (testp->xt_retval == testp->xt_expected_retval) {
278 testp->xt_test_actions = XT_ACTION_PASSED;
279 } else {
280 testp->xt_test_actions = XT_ACTION_FAILED;
281 }
282 }
283 T_FINISH;
284 return retval;
285}
286
287kern_return_t
288kernel_list_tests()
289{
290 return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
291}
292
293kern_return_t
294kernel_do_post()
295{
296 return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
297}
298
299kern_return_t
300xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
301{
0a7de745 302 if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
d9a64523 303 return KERN_RESOURCE_SHORTAGE;
0a7de745 304 }
d9a64523
A
305
306 xt_panic_widgets.xtp_context_p = context;
307 xt_panic_widgets.xtp_func = funcp;
308 xt_panic_widgets.xtp_func_name = funcname;
309 xt_panic_widgets.xtp_outval_p = outval;
310
311 return KERN_SUCCESS;
312}
313
314void
315xnupost_reset_panic_widgets()
316{
317 bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
318}
319
320kern_return_t
321xnupost_process_kdb_stop(const char * panic_s)
322{
323 xt_panic_return_t retval = 0;
324 struct xnupost_panic_widget * pw = &xt_panic_widgets;
325 const char * name = "unknown";
326 if (xt_panic_widgets.xtp_func_name) {
327 name = xt_panic_widgets.xtp_func_name;
328 }
329
330 /* bail early on if kernPOST is not set */
331 if (kernel_post_args == 0) {
332 return KERN_INVALID_CAPABILITY;
333 }
334
335 if (xt_panic_widgets.xtp_func) {
336 T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
337 retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
338 } else {
339 return KERN_INVALID_CAPABILITY;
340 }
341
342 switch (retval) {
343 case XT_RET_W_SUCCESS:
344 T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
345 /* KERN_SUCCESS means return from panic/assertion */
346 return KERN_SUCCESS;
347
348 case XT_RET_W_FAIL:
349 T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
350 return KERN_SUCCESS;
351
352 case XT_PANIC_W_FAIL:
353 T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
354 return KERN_FAILURE;
355
356 case XT_PANIC_W_SUCCESS:
357 T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
358 return KERN_FAILURE;
359
360 case XT_PANIC_UNRELATED:
361 default:
362 T_LOG("UNRELATED: Continuing to kdb_stop.");
363 return KERN_FAILURE;
364 }
365}
366
367xt_panic_return_t
368_xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
369{
370 xt_panic_return_t ret = XT_PANIC_UNRELATED;
371
372 if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
373 T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
374 ret = XT_RET_W_SUCCESS;
375 }
376
0a7de745 377 if (outval) {
d9a64523 378 *outval = (void *)(uintptr_t)ret;
0a7de745 379 }
d9a64523
A
380 return ret;
381}
382
383kern_return_t
384xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
385{
386 uint32_t i = 0;
387 xnupost_test_t testp;
388 for (; i < test_count; i++) {
389 testp = &test_list[i];
390 testp->xt_begin_time = 0;
391 testp->xt_end_time = 0;
392 testp->xt_test_actions = XT_ACTION_NONE;
393 testp->xt_retval = -1;
394 }
395 return KERN_SUCCESS;
396}
397
398
399kern_return_t
f427ee49 400zalloc_test(void)
d9a64523
A
401{
402 zone_t test_zone;
403 void * test_ptr;
404
405 T_SETUPBEGIN;
f427ee49
A
406 test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
407 ZC_DESTRUCTIBLE);
d9a64523
A
408 T_ASSERT_NOTNULL(test_zone, NULL);
409
c3c9b80d 410 T_ASSERT_EQ_INT(test_zone->z_elems_free, 0, NULL);
d9a64523
A
411 T_SETUPEND;
412
413 T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
414
415 zfree(test_zone, test_ptr);
416
417 /* A sample report for perfdata */
418 T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
419
420 return KERN_SUCCESS;
421}
422
423/*
424 * Function used for comparison by qsort()
425 */
426static int
427compare_numbers_ascending(const void * a, const void * b)
428{
429 const uint64_t x = *(const uint64_t *)a;
430 const uint64_t y = *(const uint64_t *)b;
431 if (x < y) {
432 return -1;
433 } else if (x > y) {
434 return 1;
435 } else {
436 return 0;
437 }
438}
439
d9a64523
A
440/*
441 * Function to count number of bits that are set in a number.
442 * It uses Side Addition using Magic Binary Numbers
443 */
444static int
445count_bits(uint64_t number)
446{
447 return __builtin_popcountll(number);
448}
449
450kern_return_t
451RandomULong_test()
452{
453/*
454 * Randomness test for RandomULong()
455 *
456 * This test verifies that:
457 * a. RandomULong works
458 * b. The generated numbers match the following entropy criteria:
459 * For a thousand iterations, verify:
460 * 1. mean entropy > 12 bits
461 * 2. min entropy > 4 bits
462 * 3. No Duplicate
463 * 4. No incremental/decremental pattern in a window of 3
464 * 5. No Zero
465 * 6. No -1
466 *
467 * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
468 */
469
470#define CONF_MIN_ENTROPY 4
471#define CONF_MEAN_ENTROPY 12
472#define CONF_ITERATIONS 1000
473#define CONF_WINDOW_SIZE 3
474#define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
475
476 int i;
477 uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
478 uint32_t aggregate_bit_entropy = 0;
479 uint32_t mean_bit_entropy = 0;
480 uint64_t numbers[CONF_ITERATIONS];
481 min_bit_entropy = UINT32_MAX;
482 max_bit_entropy = 0;
483
484 /*
485 * TEST 1: Number generation and basic and basic validation
486 * Check for non-zero (no bits set), -1 (all bits set) and error
487 */
488 for (i = 0; i < CONF_ITERATIONS; i++) {
489 read_random(&numbers[i], sizeof(numbers[i]));
490 if (numbers[i] == 0) {
491 T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
492 }
493 if (numbers[i] == UINT64_MAX) {
494 T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
495 }
496 }
497 T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
498
499 /*
500 * TEST 2: Mean and Min Bit Entropy
501 * Check the bit entropy and its mean over the generated numbers.
502 */
503 for (i = 1; i < CONF_ITERATIONS; i++) {
504 bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
0a7de745 505 if (bit_entropy < min_bit_entropy) {
d9a64523 506 min_bit_entropy = bit_entropy;
0a7de745
A
507 }
508 if (bit_entropy > max_bit_entropy) {
d9a64523 509 max_bit_entropy = bit_entropy;
0a7de745 510 }
d9a64523
A
511
512 if (bit_entropy < CONF_MIN_ENTROPY) {
513 T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
0a7de745 514 "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
d9a64523
A
515 }
516
517 aggregate_bit_entropy += bit_entropy;
518 }
519 T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
520
521 mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
522 T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
523 T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
524 T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
0a7de745 525 min_bit_entropy, mean_bit_entropy, max_bit_entropy);
d9a64523
A
526 T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
527 T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
528 T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
529
530 /*
531 * TEST 3: Incremental Pattern Search
532 * Check that incremental/decremental pattern does not exist in the given window
533 */
534 int window_start, window_end, trend;
535 window_start = window_end = trend = 0;
536
537 do {
538 /*
539 * Set the window
540 */
541 window_end = window_start + CONF_WINDOW_SIZE - 1;
0a7de745 542 if (window_end >= CONF_ITERATIONS) {
d9a64523 543 window_end = CONF_ITERATIONS - 1;
0a7de745 544 }
d9a64523
A
545
546 trend = 0;
547 for (i = window_start; i < window_end; i++) {
0a7de745 548 if (numbers[i] < numbers[i + 1]) {
d9a64523 549 trend++;
0a7de745 550 } else if (numbers[i] > numbers[i + 1]) {
d9a64523 551 trend--;
0a7de745 552 }
d9a64523
A
553 }
554 /*
555 * Check that there is no increasing or decreasing trend
556 * i.e. trend <= ceil(window_size/2)
557 */
558 if (trend < 0) {
559 trend = -trend;
560 }
561 if (trend > CONF_WINDOW_TREND_LIMIT) {
562 T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
563 }
564
565 /*
566 * Move to the next window
567 */
568 window_start++;
d9a64523
A
569 } while (window_start < (CONF_ITERATIONS - 1));
570 T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
571
572 /*
573 * TEST 4: Find Duplicates
574 * Check no duplicate values are generated
575 */
576 qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
577 for (i = 1; i < CONF_ITERATIONS; i++) {
578 if (numbers[i] == numbers[i - 1]) {
579 T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
580 }
581 }
582 T_PASS("Test did not find any duplicates as expected.");
583
584 return KERN_SUCCESS;
585}
586
587
588/* KCDATA kernel api tests */
589static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
590struct sample_disk_io_stats {
591 uint64_t disk_reads_count;
592 uint64_t disk_reads_size;
593 uint64_t io_priority_count[4];
594 uint64_t io_priority_size;
595} __attribute__((packed));
596
597struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
cb323159
A
598 {
599 .kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
600 .kcs_elem_type = KC_ST_UINT64,
601 .kcs_elem_offset = 0 * sizeof(uint64_t),
602 .kcs_elem_size = sizeof(uint64_t),
603 .kcs_name = "disk_reads_count"
604 },
605 {
606 .kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
607 .kcs_elem_type = KC_ST_UINT64,
608 .kcs_elem_offset = 1 * sizeof(uint64_t),
609 .kcs_elem_size = sizeof(uint64_t),
610 .kcs_name = "disk_reads_size"
611 },
612 {
613 .kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
614 .kcs_elem_type = KC_ST_UINT64,
615 .kcs_elem_offset = 2 * sizeof(uint64_t),
616 .kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
617 .kcs_name = "io_priority_count"
618 },
619 {
620 .kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
621 .kcs_elem_type = KC_ST_UINT64,
622 .kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
623 .kcs_elem_size = sizeof(uint64_t),
624 .kcs_name = "io_priority_size"
625 },
d9a64523
A
626};
627
628kern_return_t
629kcdata_api_test()
630{
631 kern_return_t retval = KERN_SUCCESS;
632
633 /* test for NULL input */
634 retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
635 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
636
637 /* another negative test with buffer size < 32 bytes */
638 char data[30] = "sample_disk_io_stats";
639 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
0a7de745 640 KCFLAG_USE_MEMCOPY);
f427ee49 641 T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
d9a64523
A
642
643 /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
644 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
0a7de745 645 KCFLAG_USE_COPYOUT);
d9a64523
A
646 T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
647
648 /* test with successful kcdata_memory_static_init */
649 test_kc_data.kcd_length = 0xdeadbeef;
650 mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
651 T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
652
653 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
0a7de745 654 KCFLAG_USE_MEMCOPY);
d9a64523
A
655
656 T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
657
658 T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
659 T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
660 T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
661
662 /* verify we have BEGIN and END HEADERS set */
663 uint32_t * mem = (uint32_t *)address;
664 T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
665 T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
666 T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
667
668 /* verify kcdata_memory_get_used_bytes() */
669 uint64_t bytes_used = 0;
670 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
671 T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
672
673 /* test for kcdata_get_memory_addr() */
674
675 mach_vm_address_t user_addr = 0;
676 /* negative test for NULL user_addr AND/OR kcdata_descriptor */
677 retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
678 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
679
680 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
681 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
682
683 /* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
684 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
685 T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
686 T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
687
688 /* successful case with valid size. */
689 user_addr = 0xdeadbeef;
690 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
691 T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
692 T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
693 T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
694
695 /* Try creating an item with really large size */
696 user_addr = 0xdeadbeef;
697 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
698 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
f427ee49 699 T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
d9a64523
A
700 T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
701 T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
702
703 /* verify convenience functions for uint32_with_description */
704 retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
705 T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
706
707 retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
708 T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
709
710 /* verify creating an KCDATA_TYPE_ARRAY here */
711 user_addr = 0xdeadbeef;
712 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
713 /* save memory address where the array will come up */
714 struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
715
716 retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
717 T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
718 T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
719 T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
f427ee49 720 kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
d9a64523
A
721 T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
722
723 /* FIXME add tests here for ranges of sizes and counts */
724
725 T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
726
727 /* test adding of custom type */
728
729 retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
0a7de745 730 sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
d9a64523
A
731 T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
732
733 return KERN_SUCCESS;
734}
735
736/*
0a7de745
A
737 * kern_return_t
738 * kcdata_api_assert_tests()
739 * {
740 * kern_return_t retval = 0;
741 * void * assert_check_retval = NULL;
742 * test_kc_data2.kcd_length = 0xdeadbeef;
743 * mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
744 * T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
745 *
746 * retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
747 * KCFLAG_USE_MEMCOPY);
748 *
749 * T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
750 *
751 * retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
752 * T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
753 *
754 * // this will assert
755 * retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
756 * T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
757 * T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
758 *
759 * return KERN_SUCCESS;
760 * }
761 */
d9a64523
A
762
763#if defined(__arm__) || defined(__arm64__)
764
765#include <arm/pmap.h>
766
767#define MAX_PMAP_OBJECT_ELEMENT 100000
768
769extern struct vm_object pmap_object_store; /* store pt pages */
770extern unsigned long gPhysBase, gPhysSize, first_avail;
771
772/*
773 * Define macros to transverse the pmap object structures and extract
774 * physical page number with information from low global only
775 * This emulate how Astris extracts information from coredump
776 */
777#if defined(__arm64__)
778
779static inline uintptr_t
780astris_vm_page_unpack_ptr(uintptr_t p)
781{
0a7de745
A
782 if (!p) {
783 return (uintptr_t)0;
784 }
d9a64523
A
785
786 return (p & lowGlo.lgPmapMemFromArrayMask)
0a7de745
A
787 ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
788 : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
d9a64523
A
789}
790
791// assume next pointer is the first element
792#define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
793
794#endif
795
796#if defined(__arm__)
797
798// assume next pointer is the first element
799#define astris_vm_page_queue_next(qc) *((uintptr_t *)(qc))
800
801#endif
802
803#define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
804
805#define astris_vm_page_queue_end(q, qe) ((q) == (qe))
806
807#define astris_vm_page_queue_iterate(head, elt) \
808 for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
809 (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
810
811#define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
812
813static inline ppnum_t
814astris_vm_page_get_phys_page(uintptr_t m)
815{
816 return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
0a7de745
A
817 ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
818 : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
d9a64523
A
819}
820
821kern_return_t
822pmap_coredump_test(void)
823{
824 int iter = 0;
825 uintptr_t p;
826
827 T_LOG("Testing coredump info for PMAP.");
828
829 T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
830 T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
831 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
f427ee49 832 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
d9a64523
A
833 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
834
835 // check the constant values in lowGlo
f427ee49 836 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
d9a64523
A
837 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
838 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
839 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
840
841#if defined(__arm64__)
f427ee49
A
842 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
843 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
844 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
d9a64523
A
845#endif
846
847 vm_object_lock_shared(&pmap_object_store);
848 astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
849 {
850 ppnum_t ppnum = astris_vm_page_get_phys_page(p);
851 pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
852 T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
853 T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
854 iter++;
855 T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
856 }
857 vm_object_unlock(&pmap_object_store);
858
859 T_ASSERT_GT_INT(iter, 0, NULL);
860 return KERN_SUCCESS;
861}
862#endif
cb323159
A
863
864struct ts_kern_prim_test_args {
865 int *end_barrier;
866 int *notify_b;
867 int *wait_event_b;
868 int before_num;
869 int *notify_a;
870 int *wait_event_a;
871 int after_num;
872 int priority_to_check;
873};
874
875static void
876wait_threads(
877 int* var,
878 int num)
879{
880 if (var != NULL) {
881 while (os_atomic_load(var, acquire) != num) {
882 assert_wait((event_t) var, THREAD_UNINT);
883 if (os_atomic_load(var, acquire) != num) {
884 (void) thread_block(THREAD_CONTINUE_NULL);
885 } else {
886 clear_wait(current_thread(), THREAD_AWAKENED);
887 }
888 }
889 }
890}
891
892static void
893wake_threads(
894 int* var)
895{
896 if (var) {
897 os_atomic_inc(var, relaxed);
898 thread_wakeup((event_t) var);
899 }
900}
901
902extern void IOSleep(int);
903
904static void
905thread_lock_unlock_kernel_primitive(
906 void *args,
907 __unused wait_result_t wr)
908{
909 thread_t thread = current_thread();
910 struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
911 int pri;
912
913 thread_lock(thread);
914 pri = thread->sched_pri;
915 thread_unlock(thread);
916
917 wait_threads(info->wait_event_b, info->before_num);
918 wake_threads(info->notify_b);
919
920 tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
921
922 wake_threads(info->notify_a);
923 wait_threads(info->wait_event_a, info->after_num);
924
925 IOSleep(100);
926
927 if (info->priority_to_check) {
928 thread_lock(thread);
929 pri = thread->sched_pri;
930 thread_unlock(thread);
931 T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
932 }
933
934 tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
935
936 wake_threads(info->end_barrier);
937 thread_terminate_self();
938}
939
940kern_return_t
941ts_kernel_primitive_test(void)
942{
943 thread_t owner, thread1, thread2;
944 struct ts_kern_prim_test_args targs[2] = {};
945 kern_return_t result;
946 int end_barrier = 0;
947 int owner_locked = 0;
948 int waiters_ready = 0;
949
950 T_LOG("Testing turnstile kernel primitive");
951
952 targs[0].notify_b = NULL;
953 targs[0].wait_event_b = NULL;
954 targs[0].before_num = 0;
955 targs[0].notify_a = &owner_locked;
956 targs[0].wait_event_a = &waiters_ready;
957 targs[0].after_num = 2;
958 targs[0].priority_to_check = 90;
959 targs[0].end_barrier = &end_barrier;
960
961 // Start owner with priority 80
962 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
963 T_ASSERT(result == KERN_SUCCESS, "Starting owner");
964
965 targs[1].notify_b = &waiters_ready;
966 targs[1].wait_event_b = &owner_locked;
967 targs[1].before_num = 1;
968 targs[1].notify_a = NULL;
969 targs[1].wait_event_a = NULL;
970 targs[1].after_num = 0;
971 targs[1].priority_to_check = 0;
972 targs[1].end_barrier = &end_barrier;
973
974 // Start waiters with priority 85 and 90
975 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
976 T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
977
978 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
979 T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
980
981 wait_threads(&end_barrier, 3);
982
983 return KERN_SUCCESS;
984}
985
986#define MTX_LOCK 0
987#define RW_LOCK 1
988
989#define NUM_THREADS 4
990
991struct synch_test_common {
992 unsigned int nthreads;
993 thread_t *threads;
994 int max_pri;
995 int test_done;
996};
997
998static kern_return_t
999init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1000{
1001 info->nthreads = nthreads;
1002 info->threads = kalloc(sizeof(thread_t) * nthreads);
1003 if (!info->threads) {
1004 return ENOMEM;
1005 }
1006
1007 return KERN_SUCCESS;
1008}
1009
1010static void
1011destroy_synch_test_common(struct synch_test_common *info)
1012{
1013 kfree(info->threads, sizeof(thread_t) * info->nthreads);
1014}
1015
1016static void
1017start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1018{
1019 thread_t thread;
1020 kern_return_t result;
1021 uint i;
1022 int priority = 75;
1023
1024 info->test_done = 0;
1025
1026 for (i = 0; i < info->nthreads; i++) {
1027 info->threads[i] = NULL;
1028 }
1029
1030 info->max_pri = priority + (info->nthreads - 1) * 5;
1031 if (info->max_pri > 95) {
1032 info->max_pri = 95;
1033 }
1034
1035 for (i = 0; i < info->nthreads; i++) {
1036 result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1037 os_atomic_store(&info->threads[i], thread, release);
1038 T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1039
1040 priority += 5;
1041
1042 if (i == 0 && sleep_after_first) {
1043 IOSleep(100);
1044 }
1045 }
1046}
1047
1048static unsigned int
1049get_max_pri(struct synch_test_common * info)
1050{
1051 return info->max_pri;
1052}
1053
1054static void
1055wait_all_thread(struct synch_test_common * info)
1056{
1057 wait_threads(&info->test_done, info->nthreads);
1058}
1059
1060static void
1061notify_waiter(struct synch_test_common * info)
1062{
1063 wake_threads(&info->test_done);
1064}
1065
1066static void
1067wait_for_waiters(struct synch_test_common *info)
1068{
1069 uint i, j;
1070 thread_t thread;
1071
1072 for (i = 0; i < info->nthreads; i++) {
1073 j = 0;
1074 while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1075 if (j % 100 == 0) {
1076 IOSleep(10);
1077 }
1078 j++;
1079 }
1080
1081 if (info->threads[i] != current_thread()) {
1082 j = 0;
1083 do {
1084 thread = os_atomic_load(&info->threads[i], relaxed);
1085 if (thread == (thread_t) 1) {
1086 break;
1087 }
1088
1089 if (!(thread->state & TH_RUN)) {
1090 break;
1091 }
1092
1093 if (j % 100 == 0) {
1094 IOSleep(100);
1095 }
1096 j++;
1097
1098 if (thread->started == FALSE) {
1099 continue;
1100 }
1101 } while (thread->state & TH_RUN);
1102 }
1103 }
1104}
1105
1106static void
1107exclude_current_waiter(struct synch_test_common *info)
1108{
1109 uint i, j;
1110
1111 for (i = 0; i < info->nthreads; i++) {
1112 j = 0;
1113 while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1114 if (j % 100 == 0) {
1115 IOSleep(10);
1116 }
1117 j++;
1118 }
1119
1120 if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1121 os_atomic_store(&info->threads[i], (thread_t)1, release);
1122 return;
1123 }
1124 }
1125}
1126
1127struct info_sleep_inheritor_test {
1128 struct synch_test_common head;
1129 lck_mtx_t mtx_lock;
1130 lck_rw_t rw_lock;
1131 decl_lck_mtx_gate_data(, gate);
1132 boolean_t gate_closed;
1133 int prim_type;
1134 boolean_t work_to_do;
1135 unsigned int max_pri;
1136 unsigned int steal_pri;
1137 int synch_value;
1138 int synch;
1139 int value;
1140 int handoff_failure;
1141 thread_t thread_inheritor;
1142};
1143
1144static void
1145primitive_lock(struct info_sleep_inheritor_test *info)
1146{
1147 switch (info->prim_type) {
1148 case MTX_LOCK:
1149 lck_mtx_lock(&info->mtx_lock);
1150 break;
1151 case RW_LOCK:
1152 lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1153 break;
1154 default:
1155 panic("invalid type %d", info->prim_type);
1156 }
1157}
1158
1159static void
1160primitive_unlock(struct info_sleep_inheritor_test *info)
1161{
1162 switch (info->prim_type) {
1163 case MTX_LOCK:
1164 lck_mtx_unlock(&info->mtx_lock);
1165 break;
1166 case RW_LOCK:
1167 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1168 break;
1169 default:
1170 panic("invalid type %d", info->prim_type);
1171 }
1172}
1173
1174static wait_result_t
1175primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1176{
1177 wait_result_t ret = KERN_SUCCESS;
1178 switch (info->prim_type) {
1179 case MTX_LOCK:
1180 ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1181 break;
1182 case RW_LOCK:
1183 ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1184 break;
1185 default:
1186 panic("invalid type %d", info->prim_type);
1187 }
1188
1189 return ret;
1190}
1191
1192static void
1193primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1194{
1195 switch (info->prim_type) {
1196 case MTX_LOCK:
1197 case RW_LOCK:
1198 wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1199 break;
1200 default:
1201 panic("invalid type %d", info->prim_type);
1202 }
1203}
1204
1205static void
1206primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1207{
1208 switch (info->prim_type) {
1209 case MTX_LOCK:
1210 case RW_LOCK:
1211 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1212 break;
1213 default:
1214 panic("invalid type %d", info->prim_type);
1215 }
1216 return;
1217}
1218
1219static void
1220primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1221{
1222 switch (info->prim_type) {
1223 case MTX_LOCK:
1224 case RW_LOCK:
1225 change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1226 break;
1227 default:
1228 panic("invalid type %d", info->prim_type);
1229 }
1230 return;
1231}
1232
1233static kern_return_t
1234primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1235{
1236 kern_return_t ret = KERN_SUCCESS;
1237 switch (info->prim_type) {
1238 case MTX_LOCK:
1239 ret = lck_mtx_gate_try_close(&info->mtx_lock, &info->gate);
1240 break;
1241 case RW_LOCK:
1242 ret = lck_rw_gate_try_close(&info->rw_lock, &info->gate);
1243 break;
1244 default:
1245 panic("invalid type %d", info->prim_type);
1246 }
1247 return ret;
1248}
1249
1250static gate_wait_result_t
1251primitive_gate_wait(struct info_sleep_inheritor_test *info)
1252{
1253 gate_wait_result_t ret = GATE_OPENED;
1254 switch (info->prim_type) {
1255 case MTX_LOCK:
1256 ret = lck_mtx_gate_wait(&info->mtx_lock, &info->gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1257 break;
1258 case RW_LOCK:
1259 ret = lck_rw_gate_wait(&info->rw_lock, &info->gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1260 break;
1261 default:
1262 panic("invalid type %d", info->prim_type);
1263 }
1264 return ret;
1265}
1266
1267static void
1268primitive_gate_open(struct info_sleep_inheritor_test *info)
1269{
1270 switch (info->prim_type) {
1271 case MTX_LOCK:
1272 lck_mtx_gate_open(&info->mtx_lock, &info->gate);
1273 break;
1274 case RW_LOCK:
1275 lck_rw_gate_open(&info->rw_lock, &info->gate);
1276 break;
1277 default:
1278 panic("invalid type %d", info->prim_type);
1279 }
1280}
1281
1282static void
1283primitive_gate_close(struct info_sleep_inheritor_test *info)
1284{
1285 switch (info->prim_type) {
1286 case MTX_LOCK:
1287 lck_mtx_gate_close(&info->mtx_lock, &info->gate);
1288 break;
1289 case RW_LOCK:
1290 lck_rw_gate_close(&info->rw_lock, &info->gate);
1291 break;
1292 default:
1293 panic("invalid type %d", info->prim_type);
1294 }
1295}
1296
1297static void
1298primitive_gate_steal(struct info_sleep_inheritor_test *info)
1299{
1300 switch (info->prim_type) {
1301 case MTX_LOCK:
1302 lck_mtx_gate_steal(&info->mtx_lock, &info->gate);
1303 break;
1304 case RW_LOCK:
1305 lck_rw_gate_steal(&info->rw_lock, &info->gate);
1306 break;
1307 default:
1308 panic("invalid type %d", info->prim_type);
1309 }
1310}
1311
1312static kern_return_t
1313primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1314{
1315 kern_return_t ret = KERN_SUCCESS;
1316 switch (info->prim_type) {
1317 case MTX_LOCK:
1318 ret = lck_mtx_gate_handoff(&info->mtx_lock, &info->gate, flags);
1319 break;
1320 case RW_LOCK:
1321 ret = lck_rw_gate_handoff(&info->rw_lock, &info->gate, flags);
1322 break;
1323 default:
1324 panic("invalid type %d", info->prim_type);
1325 }
1326 return ret;
1327}
1328
1329static void
1330primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1331{
1332 switch (info->prim_type) {
1333 case MTX_LOCK:
1334 lck_mtx_gate_assert(&info->mtx_lock, &info->gate, type);
1335 break;
1336 case RW_LOCK:
1337 lck_rw_gate_assert(&info->rw_lock, &info->gate, type);
1338 break;
1339 default:
1340 panic("invalid type %d", info->prim_type);
1341 }
1342}
1343
1344static void
1345primitive_gate_init(struct info_sleep_inheritor_test *info)
1346{
1347 switch (info->prim_type) {
1348 case MTX_LOCK:
1349 lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1350 break;
1351 case RW_LOCK:
1352 lck_rw_gate_init(&info->rw_lock, &info->gate);
1353 break;
1354 default:
1355 panic("invalid type %d", info->prim_type);
1356 }
1357}
1358
1359static void
1360primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1361{
1362 switch (info->prim_type) {
1363 case MTX_LOCK:
1364 lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1365 break;
1366 case RW_LOCK:
1367 lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1368 break;
1369 default:
1370 panic("invalid type %d", info->prim_type);
1371 }
1372}
1373
1374static void
1375thread_inheritor_like_mutex(
1376 void *args,
1377 __unused wait_result_t wr)
1378{
1379 wait_result_t wait;
1380
1381 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1382 uint my_pri = current_thread()->sched_pri;
1383
1384 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1385
1386 /*
1387 * spin here to start concurrently
1388 */
1389 wake_threads(&info->synch);
1390 wait_threads(&info->synch, info->synch_value);
1391
1392 primitive_lock(info);
1393
1394 if (info->thread_inheritor == NULL) {
1395 info->thread_inheritor = current_thread();
1396 } else {
1397 wait = primitive_sleep_with_inheritor(info);
1398 T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1399 }
1400 primitive_unlock(info);
1401
1402 IOSleep(100);
1403 info->value++;
1404
1405 primitive_lock(info);
1406
1407 T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1408 primitive_wakeup_one_with_inheritor(info);
1409 T_LOG("woken up %p", info->thread_inheritor);
1410
1411 if (info->thread_inheritor == NULL) {
1412 T_ASSERT(info->handoff_failure == 0, "handoff failures");
1413 info->handoff_failure++;
1414 } else {
1415 T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1416 thread_deallocate(info->thread_inheritor);
1417 }
1418
1419 primitive_unlock(info);
1420
1421 assert(current_thread()->kern_promotion_schedpri == 0);
1422 notify_waiter((struct synch_test_common *)info);
1423
1424 thread_terminate_self();
1425}
1426
1427static void
1428thread_just_inheritor_do_work(
1429 void *args,
1430 __unused wait_result_t wr)
1431{
1432 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1433 uint my_pri = current_thread()->sched_pri;
1434 uint max_pri;
1435
1436 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1437 primitive_lock(info);
1438
1439 if (info->thread_inheritor == NULL) {
1440 info->thread_inheritor = current_thread();
1441 primitive_unlock(info);
1442 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1443
1444 wait_threads(&info->synch, info->synch_value - 1);
1445
1446 wait_for_waiters((struct synch_test_common *)info);
1447
1448 max_pri = get_max_pri((struct synch_test_common *) info);
1449 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1450
1451 os_atomic_store(&info->synch, 0, relaxed);
1452 primitive_lock(info);
1453 primitive_wakeup_all_with_inheritor(info);
1454 } else {
1455 wake_threads(&info->synch);
1456 primitive_sleep_with_inheritor(info);
1457 }
1458
1459 primitive_unlock(info);
1460
1461 assert(current_thread()->kern_promotion_schedpri == 0);
1462 notify_waiter((struct synch_test_common *)info);
1463
1464 thread_terminate_self();
1465}
1466
1467static void
1468thread_steal_work(
1469 void *args,
1470 __unused wait_result_t wr)
1471{
1472 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1473 uint my_pri = current_thread()->sched_pri;
1474
1475 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1476 primitive_lock(info);
1477
1478 if (info->thread_inheritor == NULL) {
1479 info->thread_inheritor = current_thread();
1480 exclude_current_waiter((struct synch_test_common *)info);
1481
1482 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1483 primitive_unlock(info);
1484
1485 wait_threads(&info->synch, info->synch_value - 2);
1486
1487 wait_for_waiters((struct synch_test_common *)info);
1488 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1489 primitive_lock(info);
1490 if (info->thread_inheritor == current_thread()) {
1491 primitive_wakeup_all_with_inheritor(info);
1492 }
1493 } else {
1494 if (info->steal_pri == 0) {
1495 info->steal_pri = my_pri;
1496 info->thread_inheritor = current_thread();
1497 primitive_change_sleep_inheritor(info);
1498 exclude_current_waiter((struct synch_test_common *)info);
1499
1500 primitive_unlock(info);
1501
1502 wait_threads(&info->synch, info->synch_value - 2);
1503
1504 T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1505 wait_for_waiters((struct synch_test_common *)info);
1506
1507 T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1508
1509 primitive_lock(info);
1510 primitive_wakeup_all_with_inheritor(info);
1511 } else {
1512 if (my_pri > info->steal_pri) {
1513 info->steal_pri = my_pri;
1514 }
1515 wake_threads(&info->synch);
1516 primitive_sleep_with_inheritor(info);
1517 exclude_current_waiter((struct synch_test_common *)info);
1518 }
1519 }
1520 primitive_unlock(info);
1521
1522 assert(current_thread()->kern_promotion_schedpri == 0);
1523 notify_waiter((struct synch_test_common *)info);
1524
1525 thread_terminate_self();
1526}
1527
1528static void
1529thread_no_inheritor_work(
1530 void *args,
1531 __unused wait_result_t wr)
1532{
1533 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1534 uint my_pri = current_thread()->sched_pri;
1535
1536 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1537 primitive_lock(info);
1538
1539 info->value--;
1540 if (info->value == 0) {
1541 primitive_wakeup_all_with_inheritor(info);
1542 } else {
1543 info->thread_inheritor = NULL;
1544 primitive_sleep_with_inheritor(info);
1545 }
1546
1547 primitive_unlock(info);
1548
1549 assert(current_thread()->kern_promotion_schedpri == 0);
1550 notify_waiter((struct synch_test_common *)info);
1551
1552 thread_terminate_self();
1553}
1554
1555static void
1556thread_mtx_work(
1557 void *args,
1558 __unused wait_result_t wr)
1559{
1560 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1561 uint my_pri = current_thread()->sched_pri;
1562 int i;
1563 u_int8_t rand;
1564 unsigned int mod_rand;
1565 uint max_pri;
1566
1567 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1568
1569 for (i = 0; i < 10; i++) {
1570 lck_mtx_lock(&info->mtx_lock);
1571 if (info->thread_inheritor == NULL) {
1572 info->thread_inheritor = current_thread();
1573 lck_mtx_unlock(&info->mtx_lock);
1574
1575 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1576
1577 wait_threads(&info->synch, info->synch_value - 1);
1578 wait_for_waiters((struct synch_test_common *)info);
1579 max_pri = get_max_pri((struct synch_test_common *) info);
1580 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1581
1582 os_atomic_store(&info->synch, 0, relaxed);
1583
1584 lck_mtx_lock(&info->mtx_lock);
1585 info->thread_inheritor = NULL;
1586 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1587 lck_mtx_unlock(&info->mtx_lock);
1588 continue;
1589 }
1590
1591 read_random(&rand, sizeof(rand));
1592 mod_rand = rand % 2;
1593
1594 wake_threads(&info->synch);
1595 switch (mod_rand) {
1596 case 0:
1597 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1598 lck_mtx_unlock(&info->mtx_lock);
1599 break;
1600 case 1:
1601 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1602 break;
1603 default:
1604 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1605 }
1606 }
1607
1608 /*
1609 * spin here to stop using the lock as mutex
1610 */
1611 wake_threads(&info->synch);
1612 wait_threads(&info->synch, info->synch_value);
1613
1614 for (i = 0; i < 10; i++) {
1615 /* read_random might sleep so read it before acquiring the mtx as spin */
1616 read_random(&rand, sizeof(rand));
1617
1618 lck_mtx_lock_spin(&info->mtx_lock);
1619 if (info->thread_inheritor == NULL) {
1620 info->thread_inheritor = current_thread();
1621 lck_mtx_unlock(&info->mtx_lock);
1622
1623 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1624 wait_for_waiters((struct synch_test_common *)info);
1625 max_pri = get_max_pri((struct synch_test_common *) info);
1626 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1627
1628 lck_mtx_lock_spin(&info->mtx_lock);
1629 info->thread_inheritor = NULL;
1630 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1631 lck_mtx_unlock(&info->mtx_lock);
1632 continue;
1633 }
1634
1635 mod_rand = rand % 2;
1636 switch (mod_rand) {
1637 case 0:
1638 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1639 lck_mtx_unlock(&info->mtx_lock);
1640 break;
1641 case 1:
1642 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1643 lck_mtx_unlock(&info->mtx_lock);
1644 break;
1645 default:
1646 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1647 }
1648 }
1649 assert(current_thread()->kern_promotion_schedpri == 0);
1650 notify_waiter((struct synch_test_common *)info);
1651
1652 thread_terminate_self();
1653}
1654
1655static void
1656thread_rw_work(
1657 void *args,
1658 __unused wait_result_t wr)
1659{
1660 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1661 uint my_pri = current_thread()->sched_pri;
1662 int i;
1663 lck_rw_type_t type;
1664 u_int8_t rand;
1665 unsigned int mod_rand;
1666 uint max_pri;
1667
1668 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1669
1670 for (i = 0; i < 10; i++) {
1671try_again:
1672 type = LCK_RW_TYPE_SHARED;
1673 lck_rw_lock(&info->rw_lock, type);
1674 if (info->thread_inheritor == NULL) {
1675 type = LCK_RW_TYPE_EXCLUSIVE;
1676
1677 if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1678 if (info->thread_inheritor == NULL) {
1679 info->thread_inheritor = current_thread();
1680 lck_rw_unlock(&info->rw_lock, type);
1681 wait_threads(&info->synch, info->synch_value - 1);
1682
1683 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1684 wait_for_waiters((struct synch_test_common *)info);
1685 max_pri = get_max_pri((struct synch_test_common *) info);
1686 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1687
1688 os_atomic_store(&info->synch, 0, relaxed);
1689
1690 lck_rw_lock(&info->rw_lock, type);
1691 info->thread_inheritor = NULL;
1692 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1693 lck_rw_unlock(&info->rw_lock, type);
1694 continue;
1695 }
1696 } else {
1697 goto try_again;
1698 }
1699 }
1700
1701 read_random(&rand, sizeof(rand));
1702 mod_rand = rand % 4;
1703
1704 wake_threads(&info->synch);
1705 switch (mod_rand) {
1706 case 0:
1707 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1708 lck_rw_unlock(&info->rw_lock, type);
1709 break;
1710 case 1:
1711 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1712 break;
1713 case 2:
1714 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1715 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1716 break;
1717 case 3:
1718 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1719 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1720 break;
1721 default:
1722 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1723 }
1724 }
1725
1726 assert(current_thread()->kern_promotion_schedpri == 0);
1727 notify_waiter((struct synch_test_common *)info);
1728
1729 thread_terminate_self();
1730}
1731
1732static void
1733test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
1734{
1735 info->prim_type = prim_type;
1736 info->synch = 0;
1737 info->synch_value = info->head.nthreads;
1738
1739 info->thread_inheritor = NULL;
1740
1741 start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
1742 wait_all_thread((struct synch_test_common *)info);
1743}
1744
1745static void
1746test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
1747{
1748 info->prim_type = prim_type;
1749
1750 info->synch = 0;
1751 info->synch_value = info->head.nthreads;
1752 info->value = 0;
1753 info->handoff_failure = 0;
1754 info->thread_inheritor = NULL;
1755
1756 start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
1757 wait_all_thread((struct synch_test_common *)info);
1758
1759 T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
1760 T_ASSERT(info->handoff_failure == 1, "handoff failures");
1761}
1762
1763static void
1764test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
1765{
1766 info->prim_type = prim_type;
1767
1768 info->thread_inheritor = NULL;
1769 info->steal_pri = 0;
1770 info->synch = 0;
1771 info->synch_value = info->head.nthreads;
1772
1773 start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
1774 wait_all_thread((struct synch_test_common *)info);
1775}
1776
1777static void
1778test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
1779{
1780 info->prim_type = prim_type;
1781 info->synch = 0;
1782 info->synch_value = info->head.nthreads;
1783
1784 info->thread_inheritor = NULL;
1785 info->value = info->head.nthreads;
1786
1787 start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
1788 wait_all_thread((struct synch_test_common *)info);
1789}
1790
1791static void
1792test_rw_lock(struct info_sleep_inheritor_test *info)
1793{
1794 info->thread_inheritor = NULL;
1795 info->value = info->head.nthreads;
1796 info->synch = 0;
1797 info->synch_value = info->head.nthreads;
1798
1799 start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
1800 wait_all_thread((struct synch_test_common *)info);
1801}
1802
1803static void
1804test_mtx_lock(struct info_sleep_inheritor_test *info)
1805{
1806 info->thread_inheritor = NULL;
1807 info->value = info->head.nthreads;
1808 info->synch = 0;
1809 info->synch_value = info->head.nthreads;
1810
1811 start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
1812 wait_all_thread((struct synch_test_common *)info);
1813}
1814
1815kern_return_t
1816ts_kernel_sleep_inheritor_test(void)
1817{
1818 struct info_sleep_inheritor_test info = {};
1819
1820 init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
1821
1822 lck_attr_t* lck_attr = lck_attr_alloc_init();
1823 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
1824 lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
1825
1826 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
1827 lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
1828
1829 /*
1830 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1831 */
1832 T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
1833 test_sleep_with_wake_all(&info, MTX_LOCK);
1834
1835 /*
1836 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1837 */
1838 T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
1839 test_sleep_with_wake_all(&info, RW_LOCK);
1840
1841 /*
1842 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
1843 */
1844 T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
1845 test_sleep_with_wake_one(&info, MTX_LOCK);
1846
1847 /*
1848 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
1849 */
1850 T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
1851 test_sleep_with_wake_one(&info, RW_LOCK);
1852
1853 /*
1854 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1855 * and change_sleep_inheritor
1856 */
1857 T_LOG("Testing change_sleep_inheritor with mxt sleep");
1858 test_change_sleep_inheritor(&info, MTX_LOCK);
1859
1860 /*
1861 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1862 * and change_sleep_inheritor
1863 */
1864 T_LOG("Testing change_sleep_inheritor with rw sleep");
1865 test_change_sleep_inheritor(&info, RW_LOCK);
1866
1867 /*
1868 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1869 * with inheritor NULL
1870 */
1871 T_LOG("Testing inheritor NULL");
1872 test_no_inheritor(&info, MTX_LOCK);
1873
1874 /*
1875 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1876 * with inheritor NULL
1877 */
1878 T_LOG("Testing inheritor NULL");
1879 test_no_inheritor(&info, RW_LOCK);
1880
1881 /*
1882 * Testing mtx locking combinations
1883 */
1884 T_LOG("Testing mtx locking combinations");
1885 test_mtx_lock(&info);
1886
1887 /*
1888 * Testing rw locking combinations
1889 */
1890 T_LOG("Testing rw locking combinations");
1891 test_rw_lock(&info);
1892
1893 destroy_synch_test_common((struct synch_test_common *)&info);
1894
1895 lck_attr_free(lck_attr);
1896 lck_grp_attr_free(lck_grp_attr);
1897 lck_rw_destroy(&info.rw_lock, lck_grp);
1898 lck_mtx_destroy(&info.mtx_lock, lck_grp);
1899 lck_grp_free(lck_grp);
1900
1901 return KERN_SUCCESS;
1902}
1903
1904static void
1905thread_gate_aggressive(
1906 void *args,
1907 __unused wait_result_t wr)
1908{
1909 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1910 uint my_pri = current_thread()->sched_pri;
1911
1912 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1913
1914 primitive_lock(info);
1915 if (info->thread_inheritor == NULL) {
1916 info->thread_inheritor = current_thread();
1917 primitive_gate_assert(info, GATE_ASSERT_OPEN);
1918 primitive_gate_close(info);
1919 exclude_current_waiter((struct synch_test_common *)info);
1920
1921 primitive_unlock(info);
1922
1923 wait_threads(&info->synch, info->synch_value - 2);
1924 wait_for_waiters((struct synch_test_common *)info);
1925 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1926
1927 primitive_lock(info);
1928 if (info->thread_inheritor == current_thread()) {
1929 primitive_gate_open(info);
1930 }
1931 } else {
1932 if (info->steal_pri == 0) {
1933 info->steal_pri = my_pri;
1934 info->thread_inheritor = current_thread();
1935 primitive_gate_steal(info);
1936 exclude_current_waiter((struct synch_test_common *)info);
1937
1938 primitive_unlock(info);
1939 wait_threads(&info->synch, info->synch_value - 2);
1940
1941 T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1942 wait_for_waiters((struct synch_test_common *)info);
1943 T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1944
1945 primitive_lock(info);
1946 primitive_gate_open(info);
1947 } else {
1948 if (my_pri > info->steal_pri) {
1949 info->steal_pri = my_pri;
1950 }
1951 wake_threads(&info->synch);
1952 primitive_gate_wait(info);
1953 exclude_current_waiter((struct synch_test_common *)info);
1954 }
1955 }
1956 primitive_unlock(info);
1957
1958 assert(current_thread()->kern_promotion_schedpri == 0);
1959 notify_waiter((struct synch_test_common *)info);
1960
1961 thread_terminate_self();
1962}
1963
1964static void
1965thread_gate_like_mutex(
1966 void *args,
1967 __unused wait_result_t wr)
1968{
1969 gate_wait_result_t wait;
1970 kern_return_t ret;
1971 uint my_pri = current_thread()->sched_pri;
1972
1973 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1974
1975 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1976
1977 /*
1978 * spin here to start concurrently
1979 */
1980 wake_threads(&info->synch);
1981 wait_threads(&info->synch, info->synch_value);
1982
1983 primitive_lock(info);
1984
1985 if (primitive_gate_try_close(info) != KERN_SUCCESS) {
1986 wait = primitive_gate_wait(info);
1987 T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
1988 }
1989
1990 primitive_gate_assert(info, GATE_ASSERT_HELD);
1991
1992 primitive_unlock(info);
1993
1994 IOSleep(100);
1995 info->value++;
1996
1997 primitive_lock(info);
1998
1999 ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2000 if (ret == KERN_NOT_WAITING) {
2001 T_ASSERT(info->handoff_failure == 0, "handoff failures");
2002 primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2003 info->handoff_failure++;
2004 }
2005
2006 primitive_unlock(info);
2007 notify_waiter((struct synch_test_common *)info);
2008
2009 thread_terminate_self();
2010}
2011
2012static void
2013thread_just_one_do_work(
2014 void *args,
2015 __unused wait_result_t wr)
2016{
2017 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2018 uint my_pri = current_thread()->sched_pri;
2019 uint max_pri;
2020
2021 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2022
2023 primitive_lock(info);
2024check_again:
2025 if (info->work_to_do) {
2026 if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2027 primitive_gate_assert(info, GATE_ASSERT_HELD);
2028 primitive_unlock(info);
2029
2030 T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2031 wait_threads(&info->synch, info->synch_value - 1);
2032 wait_for_waiters((struct synch_test_common *)info);
2033 max_pri = get_max_pri((struct synch_test_common *) info);
2034 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2035 os_atomic_store(&info->synch, 0, relaxed);
2036
2037 primitive_lock(info);
2038 info->work_to_do = FALSE;
2039 primitive_gate_open(info);
2040 } else {
2041 primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2042 wake_threads(&info->synch);
2043 primitive_gate_wait(info);
2044 goto check_again;
2045 }
2046 }
2047 primitive_unlock(info);
2048
2049 assert(current_thread()->kern_promotion_schedpri == 0);
2050 notify_waiter((struct synch_test_common *)info);
2051 thread_terminate_self();
2052}
2053
2054static void
2055test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2056{
2057 info->prim_type = prim_type;
2058
2059 primitive_gate_init(info);
2060 info->work_to_do = TRUE;
2061 info->synch = 0;
2062 info->synch_value = NUM_THREADS;
2063
2064 start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2065 wait_all_thread((struct synch_test_common *)info);
2066
2067 primitive_gate_destroy(info);
2068}
2069
2070static void
2071test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2072{
2073 info->prim_type = prim_type;
2074
2075 primitive_gate_init(info);
2076
2077 info->synch = 0;
2078 info->synch_value = NUM_THREADS;
2079 info->value = 0;
2080 info->handoff_failure = 0;
2081
2082 start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2083 wait_all_thread((struct synch_test_common *)info);
2084
2085 T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2086 T_ASSERT(info->handoff_failure == 1, "handoff failures");
2087
2088 primitive_gate_destroy(info);
2089}
2090
2091static void
2092test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2093{
2094 info->prim_type = prim_type;
2095
2096 primitive_gate_init(info);
2097
2098 info->synch = 0;
2099 info->synch_value = NUM_THREADS;
2100 info->thread_inheritor = NULL;
2101 info->steal_pri = 0;
2102
2103 start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2104 wait_all_thread((struct synch_test_common *)info);
2105
2106 primitive_gate_destroy(info);
2107}
2108
2109kern_return_t
2110ts_kernel_gate_test(void)
2111{
2112 struct info_sleep_inheritor_test info = {};
2113
2114 T_LOG("Testing gate primitive");
2115
2116 init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2117
2118 lck_attr_t* lck_attr = lck_attr_alloc_init();
2119 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2120 lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2121
2122 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2123 lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2124
2125 /*
2126 * Testing the priority inherited by the keeper
2127 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2128 */
2129 T_LOG("Testing gate push, lck");
2130 test_gate_push(&info, MTX_LOCK);
2131
2132 T_LOG("Testing gate push, rw");
2133 test_gate_push(&info, RW_LOCK);
2134
2135 /*
2136 * Testing the handoff
2137 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2138 */
2139 T_LOG("Testing gate handoff, lck");
2140 test_gate_handoff(&info, MTX_LOCK);
2141
2142 T_LOG("Testing gate handoff, rw");
2143 test_gate_handoff(&info, RW_LOCK);
2144
2145 /*
2146 * Testing the steal
2147 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2148 */
2149 T_LOG("Testing gate steal, lck");
2150 test_gate_steal(&info, MTX_LOCK);
2151
2152 T_LOG("Testing gate steal, rw");
2153 test_gate_steal(&info, RW_LOCK);
2154
2155 destroy_synch_test_common((struct synch_test_common *)&info);
2156
2157 lck_attr_free(lck_attr);
2158 lck_grp_attr_free(lck_grp_attr);
2159 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2160 lck_grp_free(lck_grp);
2161
2162 return KERN_SUCCESS;
2163}
2164
2165#define NUM_THREAD_CHAIN 6
2166
2167struct turnstile_chain_test {
2168 struct synch_test_common head;
2169 lck_mtx_t mtx_lock;
2170 int synch_value;
2171 int synch;
2172 int synch2;
2173 gate_t gates[NUM_THREAD_CHAIN];
2174};
2175
2176static void
2177thread_sleep_gate_chain_work(
2178 void *args,
2179 __unused wait_result_t wr)
2180{
2181 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2182 thread_t self = current_thread();
2183 uint my_pri = self->sched_pri;
2184 uint max_pri;
2185 uint i;
2186 thread_t inheritor = NULL, woken_up;
2187 event_t wait_event, wake_event;
2188 kern_return_t ret;
2189
2190 T_LOG("Started thread pri %d %p", my_pri, self);
2191
2192 /*
2193 * Need to use the threads ids, wait for all of them to be populated
2194 */
2195
2196 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2197 IOSleep(10);
2198 }
2199
2200 max_pri = get_max_pri((struct synch_test_common *) info);
2201
2202 for (i = 0; i < info->head.nthreads; i = i + 2) {
2203 // even threads will close a gate
2204 if (info->head.threads[i] == self) {
2205 lck_mtx_lock(&info->mtx_lock);
2206 lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2207 lck_mtx_unlock(&info->mtx_lock);
2208 break;
2209 }
2210 }
2211
2212 wake_threads(&info->synch2);
2213 wait_threads(&info->synch2, info->synch_value);
2214
2215 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2216 wait_threads(&info->synch, info->synch_value - 1);
2217 wait_for_waiters((struct synch_test_common *)info);
2218
2219 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2220
2221 lck_mtx_lock(&info->mtx_lock);
2222 lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2223 lck_mtx_unlock(&info->mtx_lock);
2224 } else {
2225 wait_event = NULL;
2226 wake_event = NULL;
2227 for (i = 0; i < info->head.nthreads; i++) {
2228 if (info->head.threads[i] == self) {
2229 inheritor = info->head.threads[i - 1];
2230 wait_event = (event_t) &info->head.threads[i - 1];
2231 wake_event = (event_t) &info->head.threads[i];
2232 break;
2233 }
2234 }
2235 assert(wait_event != NULL);
2236
2237 lck_mtx_lock(&info->mtx_lock);
2238 wake_threads(&info->synch);
2239
2240 if (i % 2 != 0) {
2241 lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2242 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2243
2244 ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2245 if (ret == KERN_SUCCESS) {
2246 T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2247 T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2248 } else {
2249 T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2250 }
2251
2252 // i am still the inheritor, wake all to drop inheritership
2253 ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2254 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2255 } else {
2256 // I previously closed a gate
2257 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2258 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2259
2260 lck_mtx_lock(&info->mtx_lock);
2261 lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2262 lck_mtx_unlock(&info->mtx_lock);
2263 }
2264 }
2265
2266 assert(current_thread()->kern_promotion_schedpri == 0);
2267 notify_waiter((struct synch_test_common *)info);
2268
2269 thread_terminate_self();
2270}
2271
2272static void
2273thread_gate_chain_work(
2274 void *args,
2275 __unused wait_result_t wr)
2276{
2277 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2278 thread_t self = current_thread();
2279 uint my_pri = self->sched_pri;
2280 uint max_pri;
2281 uint i;
2282 T_LOG("Started thread pri %d %p", my_pri, self);
2283
2284
2285 /*
2286 * Need to use the threads ids, wait for all of them to be populated
2287 */
2288 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2289 IOSleep(10);
2290 }
2291
2292 max_pri = get_max_pri((struct synch_test_common *) info);
2293
2294 for (i = 0; i < info->head.nthreads; i++) {
2295 if (info->head.threads[i] == self) {
2296 lck_mtx_lock(&info->mtx_lock);
2297 lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2298 lck_mtx_unlock(&info->mtx_lock);
2299 break;
2300 }
2301 }
2302 assert(i != info->head.nthreads);
2303
2304 wake_threads(&info->synch2);
2305 wait_threads(&info->synch2, info->synch_value);
2306
2307 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2308 wait_threads(&info->synch, info->synch_value - 1);
2309
2310 wait_for_waiters((struct synch_test_common *)info);
2311
2312 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2313
2314 lck_mtx_lock(&info->mtx_lock);
2315 lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2316 lck_mtx_unlock(&info->mtx_lock);
2317 } else {
2318 lck_mtx_lock(&info->mtx_lock);
2319 wake_threads(&info->synch);
2320 lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2321
2322 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2323
2324 lck_mtx_lock(&info->mtx_lock);
2325 lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2326 lck_mtx_unlock(&info->mtx_lock);
2327 }
2328
2329 assert(current_thread()->kern_promotion_schedpri == 0);
2330 notify_waiter((struct synch_test_common *)info);
2331
2332 thread_terminate_self();
2333}
2334
2335static void
2336thread_sleep_chain_work(
2337 void *args,
2338 __unused wait_result_t wr)
2339{
2340 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2341 thread_t self = current_thread();
2342 uint my_pri = self->sched_pri;
2343 uint max_pri;
2344 event_t wait_event, wake_event;
2345 uint i;
2346 thread_t inheritor = NULL, woken_up = NULL;
2347 kern_return_t ret;
2348
2349 T_LOG("Started thread pri %d %p", my_pri, self);
2350
2351 /*
2352 * Need to use the threads ids, wait for all of them to be populated
2353 */
2354 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2355 IOSleep(10);
2356 }
2357
2358 max_pri = get_max_pri((struct synch_test_common *) info);
2359
2360 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2361 wait_threads(&info->synch, info->synch_value - 1);
2362
2363 wait_for_waiters((struct synch_test_common *)info);
2364
2365 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2366
2367 ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2368 T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
2369 T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
2370
2371 // i am still the inheritor, wake all to drop inheritership
2372 ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
2373 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2374 } else {
2375 wait_event = NULL;
2376 wake_event = NULL;
2377 for (i = 0; i < info->head.nthreads; i++) {
2378 if (info->head.threads[i] == self) {
2379 inheritor = info->head.threads[i - 1];
2380 wait_event = (event_t) &info->head.threads[i - 1];
2381 wake_event = (event_t) &info->head.threads[i];
2382 break;
2383 }
2384 }
2385
2386 assert(wait_event != NULL);
2387 lck_mtx_lock(&info->mtx_lock);
2388 wake_threads(&info->synch);
2389
2390 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2391
2392 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2393
2394 ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2395 if (ret == KERN_SUCCESS) {
2396 T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2397 T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2398 } else {
2399 T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2400 }
2401
2402 // i am still the inheritor, wake all to drop inheritership
2403 ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2404 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2405 }
2406
2407 assert(current_thread()->kern_promotion_schedpri == 0);
2408 notify_waiter((struct synch_test_common *)info);
2409
2410 thread_terminate_self();
2411}
2412
2413static void
2414test_sleep_chain(struct turnstile_chain_test *info)
2415{
2416 info->synch = 0;
2417 info->synch_value = info->head.nthreads;
2418
2419 start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
2420 wait_all_thread((struct synch_test_common *)info);
2421}
2422
2423static void
2424test_gate_chain(struct turnstile_chain_test *info)
2425{
2426 info->synch = 0;
2427 info->synch2 = 0;
2428 info->synch_value = info->head.nthreads;
2429
2430 start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
2431 wait_all_thread((struct synch_test_common *)info);
2432}
2433
2434static void
2435test_sleep_gate_chain(struct turnstile_chain_test *info)
2436{
2437 info->synch = 0;
2438 info->synch2 = 0;
2439 info->synch_value = info->head.nthreads;
2440
2441 start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
2442 wait_all_thread((struct synch_test_common *)info);
2443}
2444
2445kern_return_t
2446ts_kernel_turnstile_chain_test(void)
2447{
2448 struct turnstile_chain_test info = {};
2449 int i;
2450
2451 init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
2452 lck_attr_t* lck_attr = lck_attr_alloc_init();
2453 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2454 lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2455
2456 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2457 for (i = 0; i < NUM_THREAD_CHAIN; i++) {
2458 lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
2459 }
2460
2461 T_LOG("Testing sleep chain, lck");
2462 test_sleep_chain(&info);
2463
2464 T_LOG("Testing gate chain, lck");
2465 test_gate_chain(&info);
2466
2467 T_LOG("Testing sleep and gate chain, lck");
2468 test_sleep_gate_chain(&info);
2469
2470 destroy_synch_test_common((struct synch_test_common *)&info);
2471 for (i = 0; i < NUM_THREAD_CHAIN; i++) {
2472 lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
2473 }
2474 lck_attr_free(lck_attr);
2475 lck_grp_attr_free(lck_grp_attr);
2476 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2477 lck_grp_free(lck_grp);
2478
2479 return KERN_SUCCESS;
2480}
2481
2482kern_return_t
2483ts_kernel_timingsafe_bcmp_test(void)
2484{
2485 int i, buf_size;
2486 char *buf = NULL;
2487
2488 // empty
2489 T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
2490 T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
2491 T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
2492
2493 // equal
2494 T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
2495
2496 // unequal
2497 T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
2498 T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
2499 T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
2500 T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
2501
2502 // all possible bitwise differences
2503 for (i = 1; i < 256; i += 1) {
2504 unsigned char a = 0;
2505 unsigned char b = (unsigned char)i;
2506
2507 T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
2508 }
2509
2510 // large
2511 buf_size = 1024 * 16;
2512 buf = kalloc(buf_size);
2513 T_EXPECT_NOTNULL(buf, "kalloc of buf");
2514
2515 read_random(buf, buf_size);
2516 T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
2517 T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
2518 T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
2519
2520 memcpy(buf + 128, buf, 128);
2521 T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
2522
2523 kfree(buf, buf_size);
2524
2525 return KERN_SUCCESS;
2526}
2527
2528kern_return_t
2529kprintf_hhx_test(void)
2530{
2531 printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
2532 (unsigned short)0xfeed, (unsigned short)0xface,
2533 (unsigned short)0xabad, (unsigned short)0xcafe,
2534 (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
2535 (unsigned char)'!',
2536 0xfeedfaceULL);
2537 return KERN_SUCCESS;
2538}