]> git.saurik.com Git - apple/xnu.git/blob - osfmk/tests/kernel_tests.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / osfmk / tests / kernel_tests.c
1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object.h>
47 #include <kern/priority_queue.h>
48 #include <string.h>
49
50 #if !(DEVELOPMENT || DEBUG)
51 #error "Testing is not enabled on RELEASE configurations"
52 #endif
53
54 #include <tests/xnupost.h>
55
56 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
57 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
58
59 uint32_t total_post_tests_count = 0;
60 void xnupost_reset_panic_widgets(void);
61
62 /* test declarations */
63 kern_return_t zalloc_test(void);
64 kern_return_t RandomULong_test(void);
65 kern_return_t kcdata_api_test(void);
66 kern_return_t priority_queue_test(void);
67 kern_return_t ts_kernel_primitive_test(void);
68 kern_return_t ts_kernel_sleep_inheritor_test(void);
69 kern_return_t ts_kernel_gate_test(void);
70 kern_return_t ts_kernel_turnstile_chain_test(void);
71 kern_return_t ts_kernel_timingsafe_bcmp_test(void);
72
73 extern kern_return_t kprintf_hhx_test(void);
74
75 #if defined(__arm__) || defined(__arm64__)
76 kern_return_t pmap_coredump_test(void);
77 #endif
78
79 extern kern_return_t console_serial_test(void);
80 extern kern_return_t console_serial_alloc_rel_tests(void);
81 extern kern_return_t console_serial_parallel_log_tests(void);
82 extern kern_return_t test_os_log(void);
83 extern kern_return_t test_os_log_parallel(void);
84 extern kern_return_t bitmap_post_test(void);
85
86 #ifdef __arm64__
87 extern kern_return_t arm64_munger_test(void);
88 extern kern_return_t ex_cb_test(void);
89 #if __ARM_PAN_AVAILABLE__
90 extern kern_return_t arm64_pan_test(void);
91 #endif
92 #if defined(HAS_APPLE_PAC)
93 extern kern_return_t arm64_ropjop_test(void);
94 #endif /* defined(HAS_APPLE_PAC) */
95 #endif /* __arm64__ */
96
97 extern kern_return_t test_thread_call(void);
98
99
100 struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
101 .xtp_outval_p = NULL,
102 .xtp_func_name = NULL,
103 .xtp_func = NULL};
104
105 struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
106 XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
107 XNUPOST_TEST_CONFIG_BASIC(test_os_log),
108 XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
109 #ifdef __arm64__
110 XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
111 XNUPOST_TEST_CONFIG_BASIC(ex_cb_test),
112 #if __ARM_PAN_AVAILABLE__
113 XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
114 #endif
115 #if defined(HAS_APPLE_PAC)
116 XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
117 #endif /* defined(HAS_APPLE_PAC) */
118 #endif /* __arm64__ */
119 XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
120 XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
121 XNUPOST_TEST_CONFIG_BASIC(console_serial_alloc_rel_tests),
122 XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
123 #if defined(__arm__) || defined(__arm64__)
124 XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
125 #endif
126 XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
127 //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
128 XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
129 XNUPOST_TEST_CONFIG_BASIC(priority_queue_test),
130 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
131 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
132 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
133 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
134 XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
135 XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test), };
136
137 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
138
139 #define POSTARGS_RUN_TESTS 0x1
140 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
141 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
142 uint64_t kernel_post_args = 0x0;
143
144 /* static variables to hold state */
145 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
146 static char kernel_post_test_configs[256];
147 boolean_t xnupost_should_run_test(uint32_t test_num);
148
149 kern_return_t
150 xnupost_parse_config()
151 {
152 if (parse_config_retval != KERN_INVALID_CAPABILITY) {
153 return parse_config_retval;
154 }
155 PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
156
157 if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
158 kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
159 }
160
161 if (kernel_post_args != 0) {
162 parse_config_retval = KERN_SUCCESS;
163 goto out;
164 }
165 parse_config_retval = KERN_NOT_SUPPORTED;
166 out:
167 return parse_config_retval;
168 }
169
170 boolean_t
171 xnupost_should_run_test(uint32_t test_num)
172 {
173 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
174 int64_t begin = 0, end = 999999;
175 char * b = kernel_post_test_configs;
176 while (*b) {
177 get_range_bounds(b, &begin, &end);
178 if (test_num >= begin && test_num <= end) {
179 return TRUE;
180 }
181
182 /* skip to the next "," */
183 while (*b != ',') {
184 if (*b == '\0') {
185 return FALSE;
186 }
187 b++;
188 }
189 /* skip past the ',' */
190 b++;
191 }
192 return FALSE;
193 }
194 return TRUE;
195 }
196
197 kern_return_t
198 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
199 {
200 if (KERN_SUCCESS != xnupost_parse_config()) {
201 return KERN_FAILURE;
202 }
203
204 xnupost_test_t testp;
205 for (uint32_t i = 0; i < test_count; i++) {
206 testp = &test_list[i];
207 if (testp->xt_test_num == 0) {
208 testp->xt_test_num = ++total_post_tests_count;
209 }
210 /* make sure the boot-arg based test run list is honored */
211 if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
212 testp->xt_config |= XT_CONFIG_IGNORE;
213 if (xnupost_should_run_test(testp->xt_test_num)) {
214 testp->xt_config &= ~(XT_CONFIG_IGNORE);
215 testp->xt_config |= XT_CONFIG_RUN;
216 printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
217 }
218 }
219 printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
220 testp->xt_config);
221 }
222
223 return KERN_SUCCESS;
224 }
225
226 kern_return_t
227 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
228 {
229 uint32_t i = 0;
230 int retval = KERN_SUCCESS;
231
232 if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
233 printf("No POST boot-arg set.\n");
234 return retval;
235 }
236
237 T_START;
238 xnupost_test_t testp;
239 for (; i < test_count; i++) {
240 xnupost_reset_panic_widgets();
241 testp = &test_list[i];
242 T_BEGIN(testp->xt_name);
243 testp->xt_begin_time = mach_absolute_time();
244 testp->xt_end_time = testp->xt_begin_time;
245
246 /*
247 * If test is designed to panic and controller
248 * is not available then mark as SKIPPED
249 */
250 if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
251 T_SKIP(
252 "Test expects panic but "
253 "no controller is present");
254 testp->xt_test_actions = XT_ACTION_SKIPPED;
255 continue;
256 }
257
258 if ((testp->xt_config & XT_CONFIG_IGNORE)) {
259 T_SKIP("Test is marked as XT_CONFIG_IGNORE");
260 testp->xt_test_actions = XT_ACTION_SKIPPED;
261 continue;
262 }
263
264 testp->xt_func();
265 T_END;
266 testp->xt_retval = T_TESTRESULT;
267 testp->xt_end_time = mach_absolute_time();
268 if (testp->xt_retval == testp->xt_expected_retval) {
269 testp->xt_test_actions = XT_ACTION_PASSED;
270 } else {
271 testp->xt_test_actions = XT_ACTION_FAILED;
272 }
273 }
274 T_FINISH;
275 return retval;
276 }
277
278 kern_return_t
279 kernel_list_tests()
280 {
281 return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
282 }
283
284 kern_return_t
285 kernel_do_post()
286 {
287 return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
288 }
289
290 kern_return_t
291 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
292 {
293 if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
294 return KERN_RESOURCE_SHORTAGE;
295 }
296
297 xt_panic_widgets.xtp_context_p = context;
298 xt_panic_widgets.xtp_func = funcp;
299 xt_panic_widgets.xtp_func_name = funcname;
300 xt_panic_widgets.xtp_outval_p = outval;
301
302 return KERN_SUCCESS;
303 }
304
305 void
306 xnupost_reset_panic_widgets()
307 {
308 bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
309 }
310
311 kern_return_t
312 xnupost_process_kdb_stop(const char * panic_s)
313 {
314 xt_panic_return_t retval = 0;
315 struct xnupost_panic_widget * pw = &xt_panic_widgets;
316 const char * name = "unknown";
317 if (xt_panic_widgets.xtp_func_name) {
318 name = xt_panic_widgets.xtp_func_name;
319 }
320
321 /* bail early on if kernPOST is not set */
322 if (kernel_post_args == 0) {
323 return KERN_INVALID_CAPABILITY;
324 }
325
326 if (xt_panic_widgets.xtp_func) {
327 T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
328 retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
329 } else {
330 return KERN_INVALID_CAPABILITY;
331 }
332
333 switch (retval) {
334 case XT_RET_W_SUCCESS:
335 T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
336 /* KERN_SUCCESS means return from panic/assertion */
337 return KERN_SUCCESS;
338
339 case XT_RET_W_FAIL:
340 T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
341 return KERN_SUCCESS;
342
343 case XT_PANIC_W_FAIL:
344 T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
345 return KERN_FAILURE;
346
347 case XT_PANIC_W_SUCCESS:
348 T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
349 return KERN_FAILURE;
350
351 case XT_PANIC_UNRELATED:
352 default:
353 T_LOG("UNRELATED: Continuing to kdb_stop.");
354 return KERN_FAILURE;
355 }
356 }
357
358 xt_panic_return_t
359 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
360 {
361 xt_panic_return_t ret = XT_PANIC_UNRELATED;
362
363 if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
364 T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
365 ret = XT_RET_W_SUCCESS;
366 }
367
368 if (outval) {
369 *outval = (void *)(uintptr_t)ret;
370 }
371 return ret;
372 }
373
374 kern_return_t
375 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
376 {
377 uint32_t i = 0;
378 xnupost_test_t testp;
379 for (; i < test_count; i++) {
380 testp = &test_list[i];
381 testp->xt_begin_time = 0;
382 testp->xt_end_time = 0;
383 testp->xt_test_actions = XT_ACTION_NONE;
384 testp->xt_retval = -1;
385 }
386 return KERN_SUCCESS;
387 }
388
389
390 kern_return_t
391 zalloc_test()
392 {
393 zone_t test_zone;
394 void * test_ptr;
395
396 T_SETUPBEGIN;
397 test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_uint64_zone");
398 T_ASSERT_NOTNULL(test_zone, NULL);
399
400 T_ASSERT_EQ_INT(zone_free_count(test_zone), 0, NULL);
401 T_SETUPEND;
402
403 T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
404
405 zfree(test_zone, test_ptr);
406
407 /* A sample report for perfdata */
408 T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
409
410 return KERN_SUCCESS;
411 }
412
413 /*
414 * Function used for comparison by qsort()
415 */
416 static int
417 compare_numbers_ascending(const void * a, const void * b)
418 {
419 const uint64_t x = *(const uint64_t *)a;
420 const uint64_t y = *(const uint64_t *)b;
421 if (x < y) {
422 return -1;
423 } else if (x > y) {
424 return 1;
425 } else {
426 return 0;
427 }
428 }
429
430 /*
431 * Function used for comparison by qsort()
432 */
433 static int
434 compare_numbers_descending(const void * a, const void * b)
435 {
436 const uint32_t x = *(const uint32_t *)a;
437 const uint32_t y = *(const uint32_t *)b;
438 if (x > y) {
439 return -1;
440 } else if (x < y) {
441 return 1;
442 } else {
443 return 0;
444 }
445 }
446
447 /* Node structure for the priority queue tests */
448 struct priority_queue_test_node {
449 struct priority_queue_entry link;
450 priority_queue_key_t node_key;
451 };
452
453 static void
454 priority_queue_test_queue(struct priority_queue *pq, int type,
455 priority_queue_compare_fn_t cmp_fn)
456 {
457 /* Configuration for the test */
458 #define PRIORITY_QUEUE_NODES 7
459 static uint32_t priority_list[] = { 20, 3, 7, 6, 50, 2, 8};
460 uint32_t increase_pri = 100;
461 uint32_t decrease_pri = 90;
462 struct priority_queue_test_node *result;
463 uint32_t key = 0;
464 boolean_t update_result = false;
465
466 struct priority_queue_test_node *node = NULL;
467 /* Add all priorities to the first priority queue */
468 for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) {
469 node = kalloc(sizeof(struct priority_queue_test_node));
470 T_ASSERT_NOTNULL(node, NULL);
471
472 priority_queue_entry_init(&(node->link));
473 node->node_key = priority_list[i];
474 key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? PRIORITY_QUEUE_KEY_NONE : priority_list[i];
475 priority_queue_insert(pq, &(node->link), key, cmp_fn);
476 }
477
478 T_ASSERT_NOTNULL(node, NULL);
479 key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? node->node_key : priority_queue_entry_key(pq, &(node->link));
480 T_ASSERT((key == node->node_key), "verify node stored key correctly");
481
482 /* Test the priority increase operation by updating the last node added (8) */
483 T_ASSERT_NOTNULL(node, NULL);
484 node->node_key = increase_pri;
485 key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? PRIORITY_QUEUE_KEY_NONE : node->node_key;
486 update_result = priority_queue_entry_increase(pq, &node->link, key, cmp_fn);
487 T_ASSERT((update_result == true), "increase key updated root");
488 result = priority_queue_max(pq, struct priority_queue_test_node, link);
489 T_ASSERT((result->node_key == increase_pri), "verify priority_queue_entry_increase() operation");
490
491
492 /* Test the priority decrease operation by updating the last node added */
493 T_ASSERT((result == node), NULL);
494 node->node_key = decrease_pri;
495 key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? PRIORITY_QUEUE_KEY_NONE : node->node_key;
496 update_result = priority_queue_entry_decrease(pq, &node->link, key, cmp_fn);
497 T_ASSERT((update_result == true), "decrease key updated root");
498 result = priority_queue_max(pq, struct priority_queue_test_node, link);
499 T_ASSERT((result->node_key == decrease_pri), "verify priority_queue_entry_decrease() operation");
500
501 /* Update our local priority list as well */
502 priority_list[PRIORITY_QUEUE_NODES - 1] = decrease_pri;
503
504 /* Sort the local list in descending order */
505 qsort(priority_list, PRIORITY_QUEUE_NODES, sizeof(priority_list[0]), compare_numbers_descending);
506
507 /* Test the maximum operation by comparing max node with local list */
508 result = priority_queue_max(pq, struct priority_queue_test_node, link);
509 T_ASSERT((result->node_key == priority_list[0]), "(heap (%u) == qsort (%u)) priority queue max node lookup",
510 (uint32_t)result->node_key, priority_list[0]);
511
512 /* Remove all remaining elements and verify they match local list */
513 for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) {
514 result = priority_queue_remove_max(pq, struct priority_queue_test_node, link, cmp_fn);
515 T_ASSERT((result->node_key == priority_list[i]), "(heap (%u) == qsort (%u)) priority queue max node removal",
516 (uint32_t)result->node_key, priority_list[i]);
517 }
518
519 priority_queue_destroy(pq, struct priority_queue_test_node, link, ^(void *n) {
520 kfree(n, sizeof(struct priority_queue_test_node));
521 });
522 }
523
524 kern_return_t
525 priority_queue_test(void)
526 {
527 /*
528 * Initialize two priority queues
529 * - One which uses the key comparator
530 * - Other which uses the node comparator
531 */
532 static struct priority_queue pq;
533 static struct priority_queue pq_nodes;
534
535 T_SETUPBEGIN;
536
537 priority_queue_init(&pq, PRIORITY_QUEUE_BUILTIN_KEY | PRIORITY_QUEUE_MAX_HEAP);
538 priority_queue_init(&pq_nodes, PRIORITY_QUEUE_GENERIC_KEY | PRIORITY_QUEUE_MAX_HEAP);
539
540 T_SETUPEND;
541
542 priority_queue_test_queue(&pq, PRIORITY_QUEUE_BUILTIN_KEY,
543 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
544
545 priority_queue_test_queue(&pq_nodes, PRIORITY_QUEUE_GENERIC_KEY,
546 priority_heap_make_comparator(a, b, struct priority_queue_test_node, link, {
547 return (a->node_key > b->node_key) ? 1 : ((a->node_key == b->node_key) ? 0 : -1);
548 }));
549
550 return KERN_SUCCESS;
551 }
552
553 /*
554 * Function to count number of bits that are set in a number.
555 * It uses Side Addition using Magic Binary Numbers
556 */
557 static int
558 count_bits(uint64_t number)
559 {
560 return __builtin_popcountll(number);
561 }
562
563 kern_return_t
564 RandomULong_test()
565 {
566 /*
567 * Randomness test for RandomULong()
568 *
569 * This test verifies that:
570 * a. RandomULong works
571 * b. The generated numbers match the following entropy criteria:
572 * For a thousand iterations, verify:
573 * 1. mean entropy > 12 bits
574 * 2. min entropy > 4 bits
575 * 3. No Duplicate
576 * 4. No incremental/decremental pattern in a window of 3
577 * 5. No Zero
578 * 6. No -1
579 *
580 * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
581 */
582
583 #define CONF_MIN_ENTROPY 4
584 #define CONF_MEAN_ENTROPY 12
585 #define CONF_ITERATIONS 1000
586 #define CONF_WINDOW_SIZE 3
587 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
588
589 int i;
590 uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
591 uint32_t aggregate_bit_entropy = 0;
592 uint32_t mean_bit_entropy = 0;
593 uint64_t numbers[CONF_ITERATIONS];
594 min_bit_entropy = UINT32_MAX;
595 max_bit_entropy = 0;
596
597 /*
598 * TEST 1: Number generation and basic and basic validation
599 * Check for non-zero (no bits set), -1 (all bits set) and error
600 */
601 for (i = 0; i < CONF_ITERATIONS; i++) {
602 read_random(&numbers[i], sizeof(numbers[i]));
603 if (numbers[i] == 0) {
604 T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
605 }
606 if (numbers[i] == UINT64_MAX) {
607 T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
608 }
609 }
610 T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
611
612 /*
613 * TEST 2: Mean and Min Bit Entropy
614 * Check the bit entropy and its mean over the generated numbers.
615 */
616 for (i = 1; i < CONF_ITERATIONS; i++) {
617 bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
618 if (bit_entropy < min_bit_entropy) {
619 min_bit_entropy = bit_entropy;
620 }
621 if (bit_entropy > max_bit_entropy) {
622 max_bit_entropy = bit_entropy;
623 }
624
625 if (bit_entropy < CONF_MIN_ENTROPY) {
626 T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
627 "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
628 }
629
630 aggregate_bit_entropy += bit_entropy;
631 }
632 T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
633
634 mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
635 T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
636 T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
637 T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
638 min_bit_entropy, mean_bit_entropy, max_bit_entropy);
639 T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
640 T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
641 T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
642
643 /*
644 * TEST 3: Incremental Pattern Search
645 * Check that incremental/decremental pattern does not exist in the given window
646 */
647 int window_start, window_end, trend;
648 window_start = window_end = trend = 0;
649
650 do {
651 /*
652 * Set the window
653 */
654 window_end = window_start + CONF_WINDOW_SIZE - 1;
655 if (window_end >= CONF_ITERATIONS) {
656 window_end = CONF_ITERATIONS - 1;
657 }
658
659 trend = 0;
660 for (i = window_start; i < window_end; i++) {
661 if (numbers[i] < numbers[i + 1]) {
662 trend++;
663 } else if (numbers[i] > numbers[i + 1]) {
664 trend--;
665 }
666 }
667 /*
668 * Check that there is no increasing or decreasing trend
669 * i.e. trend <= ceil(window_size/2)
670 */
671 if (trend < 0) {
672 trend = -trend;
673 }
674 if (trend > CONF_WINDOW_TREND_LIMIT) {
675 T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
676 }
677
678 /*
679 * Move to the next window
680 */
681 window_start++;
682 } while (window_start < (CONF_ITERATIONS - 1));
683 T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
684
685 /*
686 * TEST 4: Find Duplicates
687 * Check no duplicate values are generated
688 */
689 qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
690 for (i = 1; i < CONF_ITERATIONS; i++) {
691 if (numbers[i] == numbers[i - 1]) {
692 T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
693 }
694 }
695 T_PASS("Test did not find any duplicates as expected.");
696
697 return KERN_SUCCESS;
698 }
699
700
701 /* KCDATA kernel api tests */
702 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
703 struct sample_disk_io_stats {
704 uint64_t disk_reads_count;
705 uint64_t disk_reads_size;
706 uint64_t io_priority_count[4];
707 uint64_t io_priority_size;
708 } __attribute__((packed));
709
710 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
711 {
712 .kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
713 .kcs_elem_type = KC_ST_UINT64,
714 .kcs_elem_offset = 0 * sizeof(uint64_t),
715 .kcs_elem_size = sizeof(uint64_t),
716 .kcs_name = "disk_reads_count"
717 },
718 {
719 .kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
720 .kcs_elem_type = KC_ST_UINT64,
721 .kcs_elem_offset = 1 * sizeof(uint64_t),
722 .kcs_elem_size = sizeof(uint64_t),
723 .kcs_name = "disk_reads_size"
724 },
725 {
726 .kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
727 .kcs_elem_type = KC_ST_UINT64,
728 .kcs_elem_offset = 2 * sizeof(uint64_t),
729 .kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
730 .kcs_name = "io_priority_count"
731 },
732 {
733 .kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
734 .kcs_elem_type = KC_ST_UINT64,
735 .kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
736 .kcs_elem_size = sizeof(uint64_t),
737 .kcs_name = "io_priority_size"
738 },
739 };
740
741 kern_return_t
742 kcdata_api_test()
743 {
744 kern_return_t retval = KERN_SUCCESS;
745
746 /* test for NULL input */
747 retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
748 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
749
750 /* another negative test with buffer size < 32 bytes */
751 char data[30] = "sample_disk_io_stats";
752 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
753 KCFLAG_USE_MEMCOPY);
754 T_ASSERT(retval == KERN_RESOURCE_SHORTAGE, "init with 30 bytes failed as expected with KERN_RESOURCE_SHORTAGE");
755
756 /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
757 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
758 KCFLAG_USE_COPYOUT);
759 T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
760
761 /* test with successful kcdata_memory_static_init */
762 test_kc_data.kcd_length = 0xdeadbeef;
763 mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
764 T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
765
766 retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
767 KCFLAG_USE_MEMCOPY);
768
769 T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
770
771 T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
772 T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
773 T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
774
775 /* verify we have BEGIN and END HEADERS set */
776 uint32_t * mem = (uint32_t *)address;
777 T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
778 T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
779 T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
780
781 /* verify kcdata_memory_get_used_bytes() */
782 uint64_t bytes_used = 0;
783 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
784 T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
785
786 /* test for kcdata_get_memory_addr() */
787
788 mach_vm_address_t user_addr = 0;
789 /* negative test for NULL user_addr AND/OR kcdata_descriptor */
790 retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
791 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
792
793 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
794 T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
795
796 /* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
797 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
798 T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
799 T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
800
801 /* successful case with valid size. */
802 user_addr = 0xdeadbeef;
803 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
804 T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
805 T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
806 T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
807
808 /* Try creating an item with really large size */
809 user_addr = 0xdeadbeef;
810 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
811 retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
812 T_ASSERT(retval == KERN_RESOURCE_SHORTAGE, "Allocating entry with size > buffer -> KERN_RESOURCE_SHORTAGE");
813 T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
814 T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
815
816 /* verify convenience functions for uint32_with_description */
817 retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
818 T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
819
820 retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
821 T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
822
823 /* verify creating an KCDATA_TYPE_ARRAY here */
824 user_addr = 0xdeadbeef;
825 bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
826 /* save memory address where the array will come up */
827 struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
828
829 retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
830 T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
831 T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
832 T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
833 kcdata_iter_t iter = kcdata_iter(item_p, PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data));
834 T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
835
836 /* FIXME add tests here for ranges of sizes and counts */
837
838 T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
839
840 /* test adding of custom type */
841
842 retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
843 sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
844 T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
845
846 return KERN_SUCCESS;
847 }
848
849 /*
850 * kern_return_t
851 * kcdata_api_assert_tests()
852 * {
853 * kern_return_t retval = 0;
854 * void * assert_check_retval = NULL;
855 * test_kc_data2.kcd_length = 0xdeadbeef;
856 * mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
857 * T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
858 *
859 * retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
860 * KCFLAG_USE_MEMCOPY);
861 *
862 * T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
863 *
864 * retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
865 * T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
866 *
867 * // this will assert
868 * retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
869 * T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
870 * T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
871 *
872 * return KERN_SUCCESS;
873 * }
874 */
875
876 #if defined(__arm__) || defined(__arm64__)
877
878 #include <arm/pmap.h>
879
880 #define MAX_PMAP_OBJECT_ELEMENT 100000
881
882 extern struct vm_object pmap_object_store; /* store pt pages */
883 extern unsigned long gPhysBase, gPhysSize, first_avail;
884
885 /*
886 * Define macros to transverse the pmap object structures and extract
887 * physical page number with information from low global only
888 * This emulate how Astris extracts information from coredump
889 */
890 #if defined(__arm64__)
891
892 static inline uintptr_t
893 astris_vm_page_unpack_ptr(uintptr_t p)
894 {
895 if (!p) {
896 return (uintptr_t)0;
897 }
898
899 return (p & lowGlo.lgPmapMemFromArrayMask)
900 ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
901 : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
902 }
903
904 // assume next pointer is the first element
905 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
906
907 #endif
908
909 #if defined(__arm__)
910
911 // assume next pointer is the first element
912 #define astris_vm_page_queue_next(qc) *((uintptr_t *)(qc))
913
914 #endif
915
916 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
917
918 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
919
920 #define astris_vm_page_queue_iterate(head, elt) \
921 for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
922 (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
923
924 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
925
926 static inline ppnum_t
927 astris_vm_page_get_phys_page(uintptr_t m)
928 {
929 return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
930 ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
931 : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
932 }
933
934 kern_return_t
935 pmap_coredump_test(void)
936 {
937 int iter = 0;
938 uintptr_t p;
939
940 T_LOG("Testing coredump info for PMAP.");
941
942 T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
943 T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
944 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
945 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMinorVersion, 0, NULL);
946 T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
947
948 // check the constant values in lowGlo
949 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((uint64_t) &(pmap_object_store.memq)), NULL);
950 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
951 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
952 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
953
954 #if defined(__arm64__)
955 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PACKED_FROM_VM_PAGES_ARRAY, NULL);
956 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PACKED_POINTER_SHIFT, NULL);
957 T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_MIN_KERNEL_AND_KEXT_ADDRESS, NULL);
958 #endif
959
960 vm_object_lock_shared(&pmap_object_store);
961 astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
962 {
963 ppnum_t ppnum = astris_vm_page_get_phys_page(p);
964 pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
965 T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
966 T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
967 iter++;
968 T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
969 }
970 vm_object_unlock(&pmap_object_store);
971
972 T_ASSERT_GT_INT(iter, 0, NULL);
973 return KERN_SUCCESS;
974 }
975 #endif
976
977 struct ts_kern_prim_test_args {
978 int *end_barrier;
979 int *notify_b;
980 int *wait_event_b;
981 int before_num;
982 int *notify_a;
983 int *wait_event_a;
984 int after_num;
985 int priority_to_check;
986 };
987
988 static void
989 wait_threads(
990 int* var,
991 int num)
992 {
993 if (var != NULL) {
994 while (os_atomic_load(var, acquire) != num) {
995 assert_wait((event_t) var, THREAD_UNINT);
996 if (os_atomic_load(var, acquire) != num) {
997 (void) thread_block(THREAD_CONTINUE_NULL);
998 } else {
999 clear_wait(current_thread(), THREAD_AWAKENED);
1000 }
1001 }
1002 }
1003 }
1004
1005 static void
1006 wake_threads(
1007 int* var)
1008 {
1009 if (var) {
1010 os_atomic_inc(var, relaxed);
1011 thread_wakeup((event_t) var);
1012 }
1013 }
1014
1015 extern void IOSleep(int);
1016
1017 static void
1018 thread_lock_unlock_kernel_primitive(
1019 void *args,
1020 __unused wait_result_t wr)
1021 {
1022 thread_t thread = current_thread();
1023 struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
1024 int pri;
1025
1026 thread_lock(thread);
1027 pri = thread->sched_pri;
1028 thread_unlock(thread);
1029
1030 wait_threads(info->wait_event_b, info->before_num);
1031 wake_threads(info->notify_b);
1032
1033 tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
1034
1035 wake_threads(info->notify_a);
1036 wait_threads(info->wait_event_a, info->after_num);
1037
1038 IOSleep(100);
1039
1040 if (info->priority_to_check) {
1041 thread_lock(thread);
1042 pri = thread->sched_pri;
1043 thread_unlock(thread);
1044 T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
1045 }
1046
1047 tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
1048
1049 wake_threads(info->end_barrier);
1050 thread_terminate_self();
1051 }
1052
1053 kern_return_t
1054 ts_kernel_primitive_test(void)
1055 {
1056 thread_t owner, thread1, thread2;
1057 struct ts_kern_prim_test_args targs[2] = {};
1058 kern_return_t result;
1059 int end_barrier = 0;
1060 int owner_locked = 0;
1061 int waiters_ready = 0;
1062
1063 T_LOG("Testing turnstile kernel primitive");
1064
1065 targs[0].notify_b = NULL;
1066 targs[0].wait_event_b = NULL;
1067 targs[0].before_num = 0;
1068 targs[0].notify_a = &owner_locked;
1069 targs[0].wait_event_a = &waiters_ready;
1070 targs[0].after_num = 2;
1071 targs[0].priority_to_check = 90;
1072 targs[0].end_barrier = &end_barrier;
1073
1074 // Start owner with priority 80
1075 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
1076 T_ASSERT(result == KERN_SUCCESS, "Starting owner");
1077
1078 targs[1].notify_b = &waiters_ready;
1079 targs[1].wait_event_b = &owner_locked;
1080 targs[1].before_num = 1;
1081 targs[1].notify_a = NULL;
1082 targs[1].wait_event_a = NULL;
1083 targs[1].after_num = 0;
1084 targs[1].priority_to_check = 0;
1085 targs[1].end_barrier = &end_barrier;
1086
1087 // Start waiters with priority 85 and 90
1088 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
1089 T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
1090
1091 result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
1092 T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
1093
1094 wait_threads(&end_barrier, 3);
1095
1096 return KERN_SUCCESS;
1097 }
1098
1099 #define MTX_LOCK 0
1100 #define RW_LOCK 1
1101
1102 #define NUM_THREADS 4
1103
1104 struct synch_test_common {
1105 unsigned int nthreads;
1106 thread_t *threads;
1107 int max_pri;
1108 int test_done;
1109 };
1110
1111 static kern_return_t
1112 init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1113 {
1114 info->nthreads = nthreads;
1115 info->threads = kalloc(sizeof(thread_t) * nthreads);
1116 if (!info->threads) {
1117 return ENOMEM;
1118 }
1119
1120 return KERN_SUCCESS;
1121 }
1122
1123 static void
1124 destroy_synch_test_common(struct synch_test_common *info)
1125 {
1126 kfree(info->threads, sizeof(thread_t) * info->nthreads);
1127 }
1128
1129 static void
1130 start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1131 {
1132 thread_t thread;
1133 kern_return_t result;
1134 uint i;
1135 int priority = 75;
1136
1137 info->test_done = 0;
1138
1139 for (i = 0; i < info->nthreads; i++) {
1140 info->threads[i] = NULL;
1141 }
1142
1143 info->max_pri = priority + (info->nthreads - 1) * 5;
1144 if (info->max_pri > 95) {
1145 info->max_pri = 95;
1146 }
1147
1148 for (i = 0; i < info->nthreads; i++) {
1149 result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1150 os_atomic_store(&info->threads[i], thread, release);
1151 T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1152
1153 priority += 5;
1154
1155 if (i == 0 && sleep_after_first) {
1156 IOSleep(100);
1157 }
1158 }
1159 }
1160
1161 static unsigned int
1162 get_max_pri(struct synch_test_common * info)
1163 {
1164 return info->max_pri;
1165 }
1166
1167 static void
1168 wait_all_thread(struct synch_test_common * info)
1169 {
1170 wait_threads(&info->test_done, info->nthreads);
1171 }
1172
1173 static void
1174 notify_waiter(struct synch_test_common * info)
1175 {
1176 wake_threads(&info->test_done);
1177 }
1178
1179 static void
1180 wait_for_waiters(struct synch_test_common *info)
1181 {
1182 uint i, j;
1183 thread_t thread;
1184
1185 for (i = 0; i < info->nthreads; i++) {
1186 j = 0;
1187 while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1188 if (j % 100 == 0) {
1189 IOSleep(10);
1190 }
1191 j++;
1192 }
1193
1194 if (info->threads[i] != current_thread()) {
1195 j = 0;
1196 do {
1197 thread = os_atomic_load(&info->threads[i], relaxed);
1198 if (thread == (thread_t) 1) {
1199 break;
1200 }
1201
1202 if (!(thread->state & TH_RUN)) {
1203 break;
1204 }
1205
1206 if (j % 100 == 0) {
1207 IOSleep(100);
1208 }
1209 j++;
1210
1211 if (thread->started == FALSE) {
1212 continue;
1213 }
1214 } while (thread->state & TH_RUN);
1215 }
1216 }
1217 }
1218
1219 static void
1220 exclude_current_waiter(struct synch_test_common *info)
1221 {
1222 uint i, j;
1223
1224 for (i = 0; i < info->nthreads; i++) {
1225 j = 0;
1226 while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1227 if (j % 100 == 0) {
1228 IOSleep(10);
1229 }
1230 j++;
1231 }
1232
1233 if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1234 os_atomic_store(&info->threads[i], (thread_t)1, release);
1235 return;
1236 }
1237 }
1238 }
1239
1240 struct info_sleep_inheritor_test {
1241 struct synch_test_common head;
1242 lck_mtx_t mtx_lock;
1243 lck_rw_t rw_lock;
1244 decl_lck_mtx_gate_data(, gate);
1245 boolean_t gate_closed;
1246 int prim_type;
1247 boolean_t work_to_do;
1248 unsigned int max_pri;
1249 unsigned int steal_pri;
1250 int synch_value;
1251 int synch;
1252 int value;
1253 int handoff_failure;
1254 thread_t thread_inheritor;
1255 };
1256
1257 static void
1258 primitive_lock(struct info_sleep_inheritor_test *info)
1259 {
1260 switch (info->prim_type) {
1261 case MTX_LOCK:
1262 lck_mtx_lock(&info->mtx_lock);
1263 break;
1264 case RW_LOCK:
1265 lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1266 break;
1267 default:
1268 panic("invalid type %d", info->prim_type);
1269 }
1270 }
1271
1272 static void
1273 primitive_unlock(struct info_sleep_inheritor_test *info)
1274 {
1275 switch (info->prim_type) {
1276 case MTX_LOCK:
1277 lck_mtx_unlock(&info->mtx_lock);
1278 break;
1279 case RW_LOCK:
1280 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1281 break;
1282 default:
1283 panic("invalid type %d", info->prim_type);
1284 }
1285 }
1286
1287 static wait_result_t
1288 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1289 {
1290 wait_result_t ret = KERN_SUCCESS;
1291 switch (info->prim_type) {
1292 case MTX_LOCK:
1293 ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1294 break;
1295 case RW_LOCK:
1296 ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1297 break;
1298 default:
1299 panic("invalid type %d", info->prim_type);
1300 }
1301
1302 return ret;
1303 }
1304
1305 static void
1306 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1307 {
1308 switch (info->prim_type) {
1309 case MTX_LOCK:
1310 case RW_LOCK:
1311 wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1312 break;
1313 default:
1314 panic("invalid type %d", info->prim_type);
1315 }
1316 }
1317
1318 static void
1319 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1320 {
1321 switch (info->prim_type) {
1322 case MTX_LOCK:
1323 case RW_LOCK:
1324 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1325 break;
1326 default:
1327 panic("invalid type %d", info->prim_type);
1328 }
1329 return;
1330 }
1331
1332 static void
1333 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1334 {
1335 switch (info->prim_type) {
1336 case MTX_LOCK:
1337 case RW_LOCK:
1338 change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1339 break;
1340 default:
1341 panic("invalid type %d", info->prim_type);
1342 }
1343 return;
1344 }
1345
1346 static kern_return_t
1347 primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1348 {
1349 kern_return_t ret = KERN_SUCCESS;
1350 switch (info->prim_type) {
1351 case MTX_LOCK:
1352 ret = lck_mtx_gate_try_close(&info->mtx_lock, &info->gate);
1353 break;
1354 case RW_LOCK:
1355 ret = lck_rw_gate_try_close(&info->rw_lock, &info->gate);
1356 break;
1357 default:
1358 panic("invalid type %d", info->prim_type);
1359 }
1360 return ret;
1361 }
1362
1363 static gate_wait_result_t
1364 primitive_gate_wait(struct info_sleep_inheritor_test *info)
1365 {
1366 gate_wait_result_t ret = GATE_OPENED;
1367 switch (info->prim_type) {
1368 case MTX_LOCK:
1369 ret = lck_mtx_gate_wait(&info->mtx_lock, &info->gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1370 break;
1371 case RW_LOCK:
1372 ret = lck_rw_gate_wait(&info->rw_lock, &info->gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1373 break;
1374 default:
1375 panic("invalid type %d", info->prim_type);
1376 }
1377 return ret;
1378 }
1379
1380 static void
1381 primitive_gate_open(struct info_sleep_inheritor_test *info)
1382 {
1383 switch (info->prim_type) {
1384 case MTX_LOCK:
1385 lck_mtx_gate_open(&info->mtx_lock, &info->gate);
1386 break;
1387 case RW_LOCK:
1388 lck_rw_gate_open(&info->rw_lock, &info->gate);
1389 break;
1390 default:
1391 panic("invalid type %d", info->prim_type);
1392 }
1393 }
1394
1395 static void
1396 primitive_gate_close(struct info_sleep_inheritor_test *info)
1397 {
1398 switch (info->prim_type) {
1399 case MTX_LOCK:
1400 lck_mtx_gate_close(&info->mtx_lock, &info->gate);
1401 break;
1402 case RW_LOCK:
1403 lck_rw_gate_close(&info->rw_lock, &info->gate);
1404 break;
1405 default:
1406 panic("invalid type %d", info->prim_type);
1407 }
1408 }
1409
1410 static void
1411 primitive_gate_steal(struct info_sleep_inheritor_test *info)
1412 {
1413 switch (info->prim_type) {
1414 case MTX_LOCK:
1415 lck_mtx_gate_steal(&info->mtx_lock, &info->gate);
1416 break;
1417 case RW_LOCK:
1418 lck_rw_gate_steal(&info->rw_lock, &info->gate);
1419 break;
1420 default:
1421 panic("invalid type %d", info->prim_type);
1422 }
1423 }
1424
1425 static kern_return_t
1426 primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1427 {
1428 kern_return_t ret = KERN_SUCCESS;
1429 switch (info->prim_type) {
1430 case MTX_LOCK:
1431 ret = lck_mtx_gate_handoff(&info->mtx_lock, &info->gate, flags);
1432 break;
1433 case RW_LOCK:
1434 ret = lck_rw_gate_handoff(&info->rw_lock, &info->gate, flags);
1435 break;
1436 default:
1437 panic("invalid type %d", info->prim_type);
1438 }
1439 return ret;
1440 }
1441
1442 static void
1443 primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1444 {
1445 switch (info->prim_type) {
1446 case MTX_LOCK:
1447 lck_mtx_gate_assert(&info->mtx_lock, &info->gate, type);
1448 break;
1449 case RW_LOCK:
1450 lck_rw_gate_assert(&info->rw_lock, &info->gate, type);
1451 break;
1452 default:
1453 panic("invalid type %d", info->prim_type);
1454 }
1455 }
1456
1457 static void
1458 primitive_gate_init(struct info_sleep_inheritor_test *info)
1459 {
1460 switch (info->prim_type) {
1461 case MTX_LOCK:
1462 lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1463 break;
1464 case RW_LOCK:
1465 lck_rw_gate_init(&info->rw_lock, &info->gate);
1466 break;
1467 default:
1468 panic("invalid type %d", info->prim_type);
1469 }
1470 }
1471
1472 static void
1473 primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1474 {
1475 switch (info->prim_type) {
1476 case MTX_LOCK:
1477 lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1478 break;
1479 case RW_LOCK:
1480 lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1481 break;
1482 default:
1483 panic("invalid type %d", info->prim_type);
1484 }
1485 }
1486
1487 static void
1488 thread_inheritor_like_mutex(
1489 void *args,
1490 __unused wait_result_t wr)
1491 {
1492 wait_result_t wait;
1493
1494 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1495 uint my_pri = current_thread()->sched_pri;
1496
1497 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1498
1499 /*
1500 * spin here to start concurrently
1501 */
1502 wake_threads(&info->synch);
1503 wait_threads(&info->synch, info->synch_value);
1504
1505 primitive_lock(info);
1506
1507 if (info->thread_inheritor == NULL) {
1508 info->thread_inheritor = current_thread();
1509 } else {
1510 wait = primitive_sleep_with_inheritor(info);
1511 T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1512 }
1513 primitive_unlock(info);
1514
1515 IOSleep(100);
1516 info->value++;
1517
1518 primitive_lock(info);
1519
1520 T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1521 primitive_wakeup_one_with_inheritor(info);
1522 T_LOG("woken up %p", info->thread_inheritor);
1523
1524 if (info->thread_inheritor == NULL) {
1525 T_ASSERT(info->handoff_failure == 0, "handoff failures");
1526 info->handoff_failure++;
1527 } else {
1528 T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1529 thread_deallocate(info->thread_inheritor);
1530 }
1531
1532 primitive_unlock(info);
1533
1534 assert(current_thread()->kern_promotion_schedpri == 0);
1535 notify_waiter((struct synch_test_common *)info);
1536
1537 thread_terminate_self();
1538 }
1539
1540 static void
1541 thread_just_inheritor_do_work(
1542 void *args,
1543 __unused wait_result_t wr)
1544 {
1545 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1546 uint my_pri = current_thread()->sched_pri;
1547 uint max_pri;
1548
1549 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1550 primitive_lock(info);
1551
1552 if (info->thread_inheritor == NULL) {
1553 info->thread_inheritor = current_thread();
1554 primitive_unlock(info);
1555 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1556
1557 wait_threads(&info->synch, info->synch_value - 1);
1558
1559 wait_for_waiters((struct synch_test_common *)info);
1560
1561 max_pri = get_max_pri((struct synch_test_common *) info);
1562 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1563
1564 os_atomic_store(&info->synch, 0, relaxed);
1565 primitive_lock(info);
1566 primitive_wakeup_all_with_inheritor(info);
1567 } else {
1568 wake_threads(&info->synch);
1569 primitive_sleep_with_inheritor(info);
1570 }
1571
1572 primitive_unlock(info);
1573
1574 assert(current_thread()->kern_promotion_schedpri == 0);
1575 notify_waiter((struct synch_test_common *)info);
1576
1577 thread_terminate_self();
1578 }
1579
1580 static void
1581 thread_steal_work(
1582 void *args,
1583 __unused wait_result_t wr)
1584 {
1585 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1586 uint my_pri = current_thread()->sched_pri;
1587
1588 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1589 primitive_lock(info);
1590
1591 if (info->thread_inheritor == NULL) {
1592 info->thread_inheritor = current_thread();
1593 exclude_current_waiter((struct synch_test_common *)info);
1594
1595 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1596 primitive_unlock(info);
1597
1598 wait_threads(&info->synch, info->synch_value - 2);
1599
1600 wait_for_waiters((struct synch_test_common *)info);
1601 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1602 primitive_lock(info);
1603 if (info->thread_inheritor == current_thread()) {
1604 primitive_wakeup_all_with_inheritor(info);
1605 }
1606 } else {
1607 if (info->steal_pri == 0) {
1608 info->steal_pri = my_pri;
1609 info->thread_inheritor = current_thread();
1610 primitive_change_sleep_inheritor(info);
1611 exclude_current_waiter((struct synch_test_common *)info);
1612
1613 primitive_unlock(info);
1614
1615 wait_threads(&info->synch, info->synch_value - 2);
1616
1617 T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1618 wait_for_waiters((struct synch_test_common *)info);
1619
1620 T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1621
1622 primitive_lock(info);
1623 primitive_wakeup_all_with_inheritor(info);
1624 } else {
1625 if (my_pri > info->steal_pri) {
1626 info->steal_pri = my_pri;
1627 }
1628 wake_threads(&info->synch);
1629 primitive_sleep_with_inheritor(info);
1630 exclude_current_waiter((struct synch_test_common *)info);
1631 }
1632 }
1633 primitive_unlock(info);
1634
1635 assert(current_thread()->kern_promotion_schedpri == 0);
1636 notify_waiter((struct synch_test_common *)info);
1637
1638 thread_terminate_self();
1639 }
1640
1641 static void
1642 thread_no_inheritor_work(
1643 void *args,
1644 __unused wait_result_t wr)
1645 {
1646 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1647 uint my_pri = current_thread()->sched_pri;
1648
1649 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1650 primitive_lock(info);
1651
1652 info->value--;
1653 if (info->value == 0) {
1654 primitive_wakeup_all_with_inheritor(info);
1655 } else {
1656 info->thread_inheritor = NULL;
1657 primitive_sleep_with_inheritor(info);
1658 }
1659
1660 primitive_unlock(info);
1661
1662 assert(current_thread()->kern_promotion_schedpri == 0);
1663 notify_waiter((struct synch_test_common *)info);
1664
1665 thread_terminate_self();
1666 }
1667
1668 static void
1669 thread_mtx_work(
1670 void *args,
1671 __unused wait_result_t wr)
1672 {
1673 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1674 uint my_pri = current_thread()->sched_pri;
1675 int i;
1676 u_int8_t rand;
1677 unsigned int mod_rand;
1678 uint max_pri;
1679
1680 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1681
1682 for (i = 0; i < 10; i++) {
1683 lck_mtx_lock(&info->mtx_lock);
1684 if (info->thread_inheritor == NULL) {
1685 info->thread_inheritor = current_thread();
1686 lck_mtx_unlock(&info->mtx_lock);
1687
1688 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1689
1690 wait_threads(&info->synch, info->synch_value - 1);
1691 wait_for_waiters((struct synch_test_common *)info);
1692 max_pri = get_max_pri((struct synch_test_common *) info);
1693 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1694
1695 os_atomic_store(&info->synch, 0, relaxed);
1696
1697 lck_mtx_lock(&info->mtx_lock);
1698 info->thread_inheritor = NULL;
1699 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1700 lck_mtx_unlock(&info->mtx_lock);
1701 continue;
1702 }
1703
1704 read_random(&rand, sizeof(rand));
1705 mod_rand = rand % 2;
1706
1707 wake_threads(&info->synch);
1708 switch (mod_rand) {
1709 case 0:
1710 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1711 lck_mtx_unlock(&info->mtx_lock);
1712 break;
1713 case 1:
1714 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1715 break;
1716 default:
1717 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1718 }
1719 }
1720
1721 /*
1722 * spin here to stop using the lock as mutex
1723 */
1724 wake_threads(&info->synch);
1725 wait_threads(&info->synch, info->synch_value);
1726
1727 for (i = 0; i < 10; i++) {
1728 /* read_random might sleep so read it before acquiring the mtx as spin */
1729 read_random(&rand, sizeof(rand));
1730
1731 lck_mtx_lock_spin(&info->mtx_lock);
1732 if (info->thread_inheritor == NULL) {
1733 info->thread_inheritor = current_thread();
1734 lck_mtx_unlock(&info->mtx_lock);
1735
1736 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1737 wait_for_waiters((struct synch_test_common *)info);
1738 max_pri = get_max_pri((struct synch_test_common *) info);
1739 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1740
1741 lck_mtx_lock_spin(&info->mtx_lock);
1742 info->thread_inheritor = NULL;
1743 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1744 lck_mtx_unlock(&info->mtx_lock);
1745 continue;
1746 }
1747
1748 mod_rand = rand % 2;
1749 switch (mod_rand) {
1750 case 0:
1751 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1752 lck_mtx_unlock(&info->mtx_lock);
1753 break;
1754 case 1:
1755 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1756 lck_mtx_unlock(&info->mtx_lock);
1757 break;
1758 default:
1759 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1760 }
1761 }
1762 assert(current_thread()->kern_promotion_schedpri == 0);
1763 notify_waiter((struct synch_test_common *)info);
1764
1765 thread_terminate_self();
1766 }
1767
1768 static void
1769 thread_rw_work(
1770 void *args,
1771 __unused wait_result_t wr)
1772 {
1773 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1774 uint my_pri = current_thread()->sched_pri;
1775 int i;
1776 lck_rw_type_t type;
1777 u_int8_t rand;
1778 unsigned int mod_rand;
1779 uint max_pri;
1780
1781 T_LOG("Started thread pri %d %p", my_pri, current_thread());
1782
1783 for (i = 0; i < 10; i++) {
1784 try_again:
1785 type = LCK_RW_TYPE_SHARED;
1786 lck_rw_lock(&info->rw_lock, type);
1787 if (info->thread_inheritor == NULL) {
1788 type = LCK_RW_TYPE_EXCLUSIVE;
1789
1790 if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1791 if (info->thread_inheritor == NULL) {
1792 info->thread_inheritor = current_thread();
1793 lck_rw_unlock(&info->rw_lock, type);
1794 wait_threads(&info->synch, info->synch_value - 1);
1795
1796 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1797 wait_for_waiters((struct synch_test_common *)info);
1798 max_pri = get_max_pri((struct synch_test_common *) info);
1799 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1800
1801 os_atomic_store(&info->synch, 0, relaxed);
1802
1803 lck_rw_lock(&info->rw_lock, type);
1804 info->thread_inheritor = NULL;
1805 wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1806 lck_rw_unlock(&info->rw_lock, type);
1807 continue;
1808 }
1809 } else {
1810 goto try_again;
1811 }
1812 }
1813
1814 read_random(&rand, sizeof(rand));
1815 mod_rand = rand % 4;
1816
1817 wake_threads(&info->synch);
1818 switch (mod_rand) {
1819 case 0:
1820 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1821 lck_rw_unlock(&info->rw_lock, type);
1822 break;
1823 case 1:
1824 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1825 break;
1826 case 2:
1827 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1828 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1829 break;
1830 case 3:
1831 lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1832 lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1833 break;
1834 default:
1835 panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1836 }
1837 }
1838
1839 assert(current_thread()->kern_promotion_schedpri == 0);
1840 notify_waiter((struct synch_test_common *)info);
1841
1842 thread_terminate_self();
1843 }
1844
1845 static void
1846 test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
1847 {
1848 info->prim_type = prim_type;
1849 info->synch = 0;
1850 info->synch_value = info->head.nthreads;
1851
1852 info->thread_inheritor = NULL;
1853
1854 start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
1855 wait_all_thread((struct synch_test_common *)info);
1856 }
1857
1858 static void
1859 test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
1860 {
1861 info->prim_type = prim_type;
1862
1863 info->synch = 0;
1864 info->synch_value = info->head.nthreads;
1865 info->value = 0;
1866 info->handoff_failure = 0;
1867 info->thread_inheritor = NULL;
1868
1869 start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
1870 wait_all_thread((struct synch_test_common *)info);
1871
1872 T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
1873 T_ASSERT(info->handoff_failure == 1, "handoff failures");
1874 }
1875
1876 static void
1877 test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
1878 {
1879 info->prim_type = prim_type;
1880
1881 info->thread_inheritor = NULL;
1882 info->steal_pri = 0;
1883 info->synch = 0;
1884 info->synch_value = info->head.nthreads;
1885
1886 start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
1887 wait_all_thread((struct synch_test_common *)info);
1888 }
1889
1890 static void
1891 test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
1892 {
1893 info->prim_type = prim_type;
1894 info->synch = 0;
1895 info->synch_value = info->head.nthreads;
1896
1897 info->thread_inheritor = NULL;
1898 info->value = info->head.nthreads;
1899
1900 start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
1901 wait_all_thread((struct synch_test_common *)info);
1902 }
1903
1904 static void
1905 test_rw_lock(struct info_sleep_inheritor_test *info)
1906 {
1907 info->thread_inheritor = NULL;
1908 info->value = info->head.nthreads;
1909 info->synch = 0;
1910 info->synch_value = info->head.nthreads;
1911
1912 start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
1913 wait_all_thread((struct synch_test_common *)info);
1914 }
1915
1916 static void
1917 test_mtx_lock(struct info_sleep_inheritor_test *info)
1918 {
1919 info->thread_inheritor = NULL;
1920 info->value = info->head.nthreads;
1921 info->synch = 0;
1922 info->synch_value = info->head.nthreads;
1923
1924 start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
1925 wait_all_thread((struct synch_test_common *)info);
1926 }
1927
1928 kern_return_t
1929 ts_kernel_sleep_inheritor_test(void)
1930 {
1931 struct info_sleep_inheritor_test info = {};
1932
1933 init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
1934
1935 lck_attr_t* lck_attr = lck_attr_alloc_init();
1936 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
1937 lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
1938
1939 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
1940 lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
1941
1942 /*
1943 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1944 */
1945 T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
1946 test_sleep_with_wake_all(&info, MTX_LOCK);
1947
1948 /*
1949 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1950 */
1951 T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
1952 test_sleep_with_wake_all(&info, RW_LOCK);
1953
1954 /*
1955 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
1956 */
1957 T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
1958 test_sleep_with_wake_one(&info, MTX_LOCK);
1959
1960 /*
1961 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
1962 */
1963 T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
1964 test_sleep_with_wake_one(&info, RW_LOCK);
1965
1966 /*
1967 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1968 * and change_sleep_inheritor
1969 */
1970 T_LOG("Testing change_sleep_inheritor with mxt sleep");
1971 test_change_sleep_inheritor(&info, MTX_LOCK);
1972
1973 /*
1974 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1975 * and change_sleep_inheritor
1976 */
1977 T_LOG("Testing change_sleep_inheritor with rw sleep");
1978 test_change_sleep_inheritor(&info, RW_LOCK);
1979
1980 /*
1981 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1982 * with inheritor NULL
1983 */
1984 T_LOG("Testing inheritor NULL");
1985 test_no_inheritor(&info, MTX_LOCK);
1986
1987 /*
1988 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
1989 * with inheritor NULL
1990 */
1991 T_LOG("Testing inheritor NULL");
1992 test_no_inheritor(&info, RW_LOCK);
1993
1994 /*
1995 * Testing mtx locking combinations
1996 */
1997 T_LOG("Testing mtx locking combinations");
1998 test_mtx_lock(&info);
1999
2000 /*
2001 * Testing rw locking combinations
2002 */
2003 T_LOG("Testing rw locking combinations");
2004 test_rw_lock(&info);
2005
2006 destroy_synch_test_common((struct synch_test_common *)&info);
2007
2008 lck_attr_free(lck_attr);
2009 lck_grp_attr_free(lck_grp_attr);
2010 lck_rw_destroy(&info.rw_lock, lck_grp);
2011 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2012 lck_grp_free(lck_grp);
2013
2014 return KERN_SUCCESS;
2015 }
2016
2017 static void
2018 thread_gate_aggressive(
2019 void *args,
2020 __unused wait_result_t wr)
2021 {
2022 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2023 uint my_pri = current_thread()->sched_pri;
2024
2025 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2026
2027 primitive_lock(info);
2028 if (info->thread_inheritor == NULL) {
2029 info->thread_inheritor = current_thread();
2030 primitive_gate_assert(info, GATE_ASSERT_OPEN);
2031 primitive_gate_close(info);
2032 exclude_current_waiter((struct synch_test_common *)info);
2033
2034 primitive_unlock(info);
2035
2036 wait_threads(&info->synch, info->synch_value - 2);
2037 wait_for_waiters((struct synch_test_common *)info);
2038 T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
2039
2040 primitive_lock(info);
2041 if (info->thread_inheritor == current_thread()) {
2042 primitive_gate_open(info);
2043 }
2044 } else {
2045 if (info->steal_pri == 0) {
2046 info->steal_pri = my_pri;
2047 info->thread_inheritor = current_thread();
2048 primitive_gate_steal(info);
2049 exclude_current_waiter((struct synch_test_common *)info);
2050
2051 primitive_unlock(info);
2052 wait_threads(&info->synch, info->synch_value - 2);
2053
2054 T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
2055 wait_for_waiters((struct synch_test_common *)info);
2056 T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
2057
2058 primitive_lock(info);
2059 primitive_gate_open(info);
2060 } else {
2061 if (my_pri > info->steal_pri) {
2062 info->steal_pri = my_pri;
2063 }
2064 wake_threads(&info->synch);
2065 primitive_gate_wait(info);
2066 exclude_current_waiter((struct synch_test_common *)info);
2067 }
2068 }
2069 primitive_unlock(info);
2070
2071 assert(current_thread()->kern_promotion_schedpri == 0);
2072 notify_waiter((struct synch_test_common *)info);
2073
2074 thread_terminate_self();
2075 }
2076
2077 static void
2078 thread_gate_like_mutex(
2079 void *args,
2080 __unused wait_result_t wr)
2081 {
2082 gate_wait_result_t wait;
2083 kern_return_t ret;
2084 uint my_pri = current_thread()->sched_pri;
2085
2086 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2087
2088 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2089
2090 /*
2091 * spin here to start concurrently
2092 */
2093 wake_threads(&info->synch);
2094 wait_threads(&info->synch, info->synch_value);
2095
2096 primitive_lock(info);
2097
2098 if (primitive_gate_try_close(info) != KERN_SUCCESS) {
2099 wait = primitive_gate_wait(info);
2100 T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
2101 }
2102
2103 primitive_gate_assert(info, GATE_ASSERT_HELD);
2104
2105 primitive_unlock(info);
2106
2107 IOSleep(100);
2108 info->value++;
2109
2110 primitive_lock(info);
2111
2112 ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2113 if (ret == KERN_NOT_WAITING) {
2114 T_ASSERT(info->handoff_failure == 0, "handoff failures");
2115 primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2116 info->handoff_failure++;
2117 }
2118
2119 primitive_unlock(info);
2120 notify_waiter((struct synch_test_common *)info);
2121
2122 thread_terminate_self();
2123 }
2124
2125 static void
2126 thread_just_one_do_work(
2127 void *args,
2128 __unused wait_result_t wr)
2129 {
2130 struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2131 uint my_pri = current_thread()->sched_pri;
2132 uint max_pri;
2133
2134 T_LOG("Started thread pri %d %p", my_pri, current_thread());
2135
2136 primitive_lock(info);
2137 check_again:
2138 if (info->work_to_do) {
2139 if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2140 primitive_gate_assert(info, GATE_ASSERT_HELD);
2141 primitive_unlock(info);
2142
2143 T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2144 wait_threads(&info->synch, info->synch_value - 1);
2145 wait_for_waiters((struct synch_test_common *)info);
2146 max_pri = get_max_pri((struct synch_test_common *) info);
2147 T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2148 os_atomic_store(&info->synch, 0, relaxed);
2149
2150 primitive_lock(info);
2151 info->work_to_do = FALSE;
2152 primitive_gate_open(info);
2153 } else {
2154 primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2155 wake_threads(&info->synch);
2156 primitive_gate_wait(info);
2157 goto check_again;
2158 }
2159 }
2160 primitive_unlock(info);
2161
2162 assert(current_thread()->kern_promotion_schedpri == 0);
2163 notify_waiter((struct synch_test_common *)info);
2164 thread_terminate_self();
2165 }
2166
2167 static void
2168 test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2169 {
2170 info->prim_type = prim_type;
2171
2172 primitive_gate_init(info);
2173 info->work_to_do = TRUE;
2174 info->synch = 0;
2175 info->synch_value = NUM_THREADS;
2176
2177 start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2178 wait_all_thread((struct synch_test_common *)info);
2179
2180 primitive_gate_destroy(info);
2181 }
2182
2183 static void
2184 test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2185 {
2186 info->prim_type = prim_type;
2187
2188 primitive_gate_init(info);
2189
2190 info->synch = 0;
2191 info->synch_value = NUM_THREADS;
2192 info->value = 0;
2193 info->handoff_failure = 0;
2194
2195 start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2196 wait_all_thread((struct synch_test_common *)info);
2197
2198 T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2199 T_ASSERT(info->handoff_failure == 1, "handoff failures");
2200
2201 primitive_gate_destroy(info);
2202 }
2203
2204 static void
2205 test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2206 {
2207 info->prim_type = prim_type;
2208
2209 primitive_gate_init(info);
2210
2211 info->synch = 0;
2212 info->synch_value = NUM_THREADS;
2213 info->thread_inheritor = NULL;
2214 info->steal_pri = 0;
2215
2216 start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2217 wait_all_thread((struct synch_test_common *)info);
2218
2219 primitive_gate_destroy(info);
2220 }
2221
2222 kern_return_t
2223 ts_kernel_gate_test(void)
2224 {
2225 struct info_sleep_inheritor_test info = {};
2226
2227 T_LOG("Testing gate primitive");
2228
2229 init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2230
2231 lck_attr_t* lck_attr = lck_attr_alloc_init();
2232 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2233 lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2234
2235 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2236 lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2237
2238 /*
2239 * Testing the priority inherited by the keeper
2240 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2241 */
2242 T_LOG("Testing gate push, lck");
2243 test_gate_push(&info, MTX_LOCK);
2244
2245 T_LOG("Testing gate push, rw");
2246 test_gate_push(&info, RW_LOCK);
2247
2248 /*
2249 * Testing the handoff
2250 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2251 */
2252 T_LOG("Testing gate handoff, lck");
2253 test_gate_handoff(&info, MTX_LOCK);
2254
2255 T_LOG("Testing gate handoff, rw");
2256 test_gate_handoff(&info, RW_LOCK);
2257
2258 /*
2259 * Testing the steal
2260 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2261 */
2262 T_LOG("Testing gate steal, lck");
2263 test_gate_steal(&info, MTX_LOCK);
2264
2265 T_LOG("Testing gate steal, rw");
2266 test_gate_steal(&info, RW_LOCK);
2267
2268 destroy_synch_test_common((struct synch_test_common *)&info);
2269
2270 lck_attr_free(lck_attr);
2271 lck_grp_attr_free(lck_grp_attr);
2272 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2273 lck_grp_free(lck_grp);
2274
2275 return KERN_SUCCESS;
2276 }
2277
2278 #define NUM_THREAD_CHAIN 6
2279
2280 struct turnstile_chain_test {
2281 struct synch_test_common head;
2282 lck_mtx_t mtx_lock;
2283 int synch_value;
2284 int synch;
2285 int synch2;
2286 gate_t gates[NUM_THREAD_CHAIN];
2287 };
2288
2289 static void
2290 thread_sleep_gate_chain_work(
2291 void *args,
2292 __unused wait_result_t wr)
2293 {
2294 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2295 thread_t self = current_thread();
2296 uint my_pri = self->sched_pri;
2297 uint max_pri;
2298 uint i;
2299 thread_t inheritor = NULL, woken_up;
2300 event_t wait_event, wake_event;
2301 kern_return_t ret;
2302
2303 T_LOG("Started thread pri %d %p", my_pri, self);
2304
2305 /*
2306 * Need to use the threads ids, wait for all of them to be populated
2307 */
2308
2309 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2310 IOSleep(10);
2311 }
2312
2313 max_pri = get_max_pri((struct synch_test_common *) info);
2314
2315 for (i = 0; i < info->head.nthreads; i = i + 2) {
2316 // even threads will close a gate
2317 if (info->head.threads[i] == self) {
2318 lck_mtx_lock(&info->mtx_lock);
2319 lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2320 lck_mtx_unlock(&info->mtx_lock);
2321 break;
2322 }
2323 }
2324
2325 wake_threads(&info->synch2);
2326 wait_threads(&info->synch2, info->synch_value);
2327
2328 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2329 wait_threads(&info->synch, info->synch_value - 1);
2330 wait_for_waiters((struct synch_test_common *)info);
2331
2332 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2333
2334 lck_mtx_lock(&info->mtx_lock);
2335 lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2336 lck_mtx_unlock(&info->mtx_lock);
2337 } else {
2338 wait_event = NULL;
2339 wake_event = NULL;
2340 for (i = 0; i < info->head.nthreads; i++) {
2341 if (info->head.threads[i] == self) {
2342 inheritor = info->head.threads[i - 1];
2343 wait_event = (event_t) &info->head.threads[i - 1];
2344 wake_event = (event_t) &info->head.threads[i];
2345 break;
2346 }
2347 }
2348 assert(wait_event != NULL);
2349
2350 lck_mtx_lock(&info->mtx_lock);
2351 wake_threads(&info->synch);
2352
2353 if (i % 2 != 0) {
2354 lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2355 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2356
2357 ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2358 if (ret == KERN_SUCCESS) {
2359 T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2360 T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2361 } else {
2362 T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2363 }
2364
2365 // i am still the inheritor, wake all to drop inheritership
2366 ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2367 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2368 } else {
2369 // I previously closed a gate
2370 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2371 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2372
2373 lck_mtx_lock(&info->mtx_lock);
2374 lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2375 lck_mtx_unlock(&info->mtx_lock);
2376 }
2377 }
2378
2379 assert(current_thread()->kern_promotion_schedpri == 0);
2380 notify_waiter((struct synch_test_common *)info);
2381
2382 thread_terminate_self();
2383 }
2384
2385 static void
2386 thread_gate_chain_work(
2387 void *args,
2388 __unused wait_result_t wr)
2389 {
2390 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2391 thread_t self = current_thread();
2392 uint my_pri = self->sched_pri;
2393 uint max_pri;
2394 uint i;
2395 T_LOG("Started thread pri %d %p", my_pri, self);
2396
2397
2398 /*
2399 * Need to use the threads ids, wait for all of them to be populated
2400 */
2401 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2402 IOSleep(10);
2403 }
2404
2405 max_pri = get_max_pri((struct synch_test_common *) info);
2406
2407 for (i = 0; i < info->head.nthreads; i++) {
2408 if (info->head.threads[i] == self) {
2409 lck_mtx_lock(&info->mtx_lock);
2410 lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2411 lck_mtx_unlock(&info->mtx_lock);
2412 break;
2413 }
2414 }
2415 assert(i != info->head.nthreads);
2416
2417 wake_threads(&info->synch2);
2418 wait_threads(&info->synch2, info->synch_value);
2419
2420 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2421 wait_threads(&info->synch, info->synch_value - 1);
2422
2423 wait_for_waiters((struct synch_test_common *)info);
2424
2425 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2426
2427 lck_mtx_lock(&info->mtx_lock);
2428 lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2429 lck_mtx_unlock(&info->mtx_lock);
2430 } else {
2431 lck_mtx_lock(&info->mtx_lock);
2432 wake_threads(&info->synch);
2433 lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2434
2435 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2436
2437 lck_mtx_lock(&info->mtx_lock);
2438 lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
2439 lck_mtx_unlock(&info->mtx_lock);
2440 }
2441
2442 assert(current_thread()->kern_promotion_schedpri == 0);
2443 notify_waiter((struct synch_test_common *)info);
2444
2445 thread_terminate_self();
2446 }
2447
2448 static void
2449 thread_sleep_chain_work(
2450 void *args,
2451 __unused wait_result_t wr)
2452 {
2453 struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2454 thread_t self = current_thread();
2455 uint my_pri = self->sched_pri;
2456 uint max_pri;
2457 event_t wait_event, wake_event;
2458 uint i;
2459 thread_t inheritor = NULL, woken_up = NULL;
2460 kern_return_t ret;
2461
2462 T_LOG("Started thread pri %d %p", my_pri, self);
2463
2464 /*
2465 * Need to use the threads ids, wait for all of them to be populated
2466 */
2467 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2468 IOSleep(10);
2469 }
2470
2471 max_pri = get_max_pri((struct synch_test_common *) info);
2472
2473 if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2474 wait_threads(&info->synch, info->synch_value - 1);
2475
2476 wait_for_waiters((struct synch_test_common *)info);
2477
2478 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2479
2480 ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2481 T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
2482 T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
2483
2484 // i am still the inheritor, wake all to drop inheritership
2485 ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
2486 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2487 } else {
2488 wait_event = NULL;
2489 wake_event = NULL;
2490 for (i = 0; i < info->head.nthreads; i++) {
2491 if (info->head.threads[i] == self) {
2492 inheritor = info->head.threads[i - 1];
2493 wait_event = (event_t) &info->head.threads[i - 1];
2494 wake_event = (event_t) &info->head.threads[i];
2495 break;
2496 }
2497 }
2498
2499 assert(wait_event != NULL);
2500 lck_mtx_lock(&info->mtx_lock);
2501 wake_threads(&info->synch);
2502
2503 lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2504
2505 T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2506
2507 ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2508 if (ret == KERN_SUCCESS) {
2509 T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2510 T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2511 } else {
2512 T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2513 }
2514
2515 // i am still the inheritor, wake all to drop inheritership
2516 ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2517 T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2518 }
2519
2520 assert(current_thread()->kern_promotion_schedpri == 0);
2521 notify_waiter((struct synch_test_common *)info);
2522
2523 thread_terminate_self();
2524 }
2525
2526 static void
2527 test_sleep_chain(struct turnstile_chain_test *info)
2528 {
2529 info->synch = 0;
2530 info->synch_value = info->head.nthreads;
2531
2532 start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
2533 wait_all_thread((struct synch_test_common *)info);
2534 }
2535
2536 static void
2537 test_gate_chain(struct turnstile_chain_test *info)
2538 {
2539 info->synch = 0;
2540 info->synch2 = 0;
2541 info->synch_value = info->head.nthreads;
2542
2543 start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
2544 wait_all_thread((struct synch_test_common *)info);
2545 }
2546
2547 static void
2548 test_sleep_gate_chain(struct turnstile_chain_test *info)
2549 {
2550 info->synch = 0;
2551 info->synch2 = 0;
2552 info->synch_value = info->head.nthreads;
2553
2554 start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
2555 wait_all_thread((struct synch_test_common *)info);
2556 }
2557
2558 kern_return_t
2559 ts_kernel_turnstile_chain_test(void)
2560 {
2561 struct turnstile_chain_test info = {};
2562 int i;
2563
2564 init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
2565 lck_attr_t* lck_attr = lck_attr_alloc_init();
2566 lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2567 lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2568
2569 lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2570 for (i = 0; i < NUM_THREAD_CHAIN; i++) {
2571 lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
2572 }
2573
2574 T_LOG("Testing sleep chain, lck");
2575 test_sleep_chain(&info);
2576
2577 T_LOG("Testing gate chain, lck");
2578 test_gate_chain(&info);
2579
2580 T_LOG("Testing sleep and gate chain, lck");
2581 test_sleep_gate_chain(&info);
2582
2583 destroy_synch_test_common((struct synch_test_common *)&info);
2584 for (i = 0; i < NUM_THREAD_CHAIN; i++) {
2585 lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
2586 }
2587 lck_attr_free(lck_attr);
2588 lck_grp_attr_free(lck_grp_attr);
2589 lck_mtx_destroy(&info.mtx_lock, lck_grp);
2590 lck_grp_free(lck_grp);
2591
2592 return KERN_SUCCESS;
2593 }
2594
2595 kern_return_t
2596 ts_kernel_timingsafe_bcmp_test(void)
2597 {
2598 int i, buf_size;
2599 char *buf = NULL;
2600
2601 // empty
2602 T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
2603 T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
2604 T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
2605
2606 // equal
2607 T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
2608
2609 // unequal
2610 T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
2611 T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
2612 T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
2613 T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
2614
2615 // all possible bitwise differences
2616 for (i = 1; i < 256; i += 1) {
2617 unsigned char a = 0;
2618 unsigned char b = (unsigned char)i;
2619
2620 T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
2621 }
2622
2623 // large
2624 buf_size = 1024 * 16;
2625 buf = kalloc(buf_size);
2626 T_EXPECT_NOTNULL(buf, "kalloc of buf");
2627
2628 read_random(buf, buf_size);
2629 T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
2630 T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
2631 T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
2632
2633 memcpy(buf + 128, buf, 128);
2634 T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
2635
2636 kfree(buf, buf_size);
2637
2638 return KERN_SUCCESS;
2639 }
2640
2641 kern_return_t
2642 kprintf_hhx_test(void)
2643 {
2644 printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
2645 (unsigned short)0xfeed, (unsigned short)0xface,
2646 (unsigned short)0xabad, (unsigned short)0xcafe,
2647 (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
2648 (unsigned char)'!',
2649 0xfeedfaceULL);
2650 return KERN_SUCCESS;
2651 }