]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/platform_tests.c
9026e45f11cf0d6ba27e9237022ec1b77d8f43df
[apple/xnu.git] / osfmk / arm64 / platform_tests.c
1 /*
2 * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
40 *
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * Carnegie Mellon requests users of this software to return to
46 *
47 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55 #include <mach_ldebug.h>
56
57 #define LOCK_PRIVATE 1
58
59 #include <vm/pmap.h>
60 #include <kern/kalloc.h>
61 #include <kern/locks.h>
62 #include <kern/misc_protos.h>
63 #include <kern/thread.h>
64 #include <kern/processor.h>
65 #include <kern/sched_prim.h>
66 #include <kern/debug.h>
67 #include <string.h>
68 #include <tests/xnupost.h>
69
70 #if MACH_KDB
71 #include <ddb/db_command.h>
72 #include <ddb/db_output.h>
73 #include <ddb/db_sym.h>
74 #include <ddb/db_print.h>
75 #endif /* MACH_KDB */
76
77 #include <sys/kdebug.h>
78 #include <sys/munge.h>
79 #include <machine/cpu_capabilities.h>
80 #include <arm/cpu_data_internal.h>
81 #include <arm/pmap.h>
82
83 kern_return_t arm64_lock_test(void);
84 kern_return_t arm64_munger_test(void);
85 kern_return_t ex_cb_test(void);
86 kern_return_t arm64_pan_test(void);
87 kern_return_t arm64_late_pan_test(void);
88 #if defined(HAS_APPLE_PAC)
89 #include <ptrauth.h>
90 kern_return_t arm64_ropjop_test(void);
91 #endif
92 #if HAS_TWO_STAGE_SPR_LOCK
93 kern_return_t arm64_spr_lock_test(void);
94 extern void arm64_msr_lock_test(uint64_t);
95 #endif
96
97 // exception handler ignores this fault address during PAN test
98 #if __ARM_PAN_AVAILABLE__
99 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
100 vm_offset_t pan_test_addr = 0;
101 vm_offset_t pan_ro_addr = 0;
102 volatile int pan_exception_level = 0;
103 volatile char pan_fault_value = 0;
104 #endif
105
106 #include <libkern/OSAtomic.h>
107 #define LOCK_TEST_ITERATIONS 50
108 static hw_lock_data_t lt_hw_lock;
109 static lck_spin_t lt_lck_spin_t;
110 static lck_mtx_t lt_mtx;
111 static lck_rw_t lt_rwlock;
112 static volatile uint32_t lt_counter = 0;
113 static volatile int lt_spinvolatile;
114 static volatile uint32_t lt_max_holders = 0;
115 static volatile uint32_t lt_upgrade_holders = 0;
116 static volatile uint32_t lt_max_upgrade_holders = 0;
117 static volatile uint32_t lt_num_holders = 0;
118 static volatile uint32_t lt_done_threads;
119 static volatile uint32_t lt_target_done_threads;
120 static volatile uint32_t lt_cpu_bind_id = 0;
121
122 static void
123 lt_note_another_blocking_lock_holder()
124 {
125 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
126 lt_num_holders++;
127 lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
128 hw_lock_unlock(&lt_hw_lock);
129 }
130
131 static void
132 lt_note_blocking_lock_release()
133 {
134 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
135 lt_num_holders--;
136 hw_lock_unlock(&lt_hw_lock);
137 }
138
139 static void
140 lt_spin_a_little_bit()
141 {
142 uint32_t i;
143
144 for (i = 0; i < 10000; i++) {
145 lt_spinvolatile++;
146 }
147 }
148
149 static void
150 lt_sleep_a_little_bit()
151 {
152 delay(100);
153 }
154
155 static void
156 lt_grab_mutex()
157 {
158 lck_mtx_lock(&lt_mtx);
159 lt_note_another_blocking_lock_holder();
160 lt_sleep_a_little_bit();
161 lt_counter++;
162 lt_note_blocking_lock_release();
163 lck_mtx_unlock(&lt_mtx);
164 }
165
166 static void
167 lt_grab_mutex_with_try()
168 {
169 while (0 == lck_mtx_try_lock(&lt_mtx)) {
170 ;
171 }
172 lt_note_another_blocking_lock_holder();
173 lt_sleep_a_little_bit();
174 lt_counter++;
175 lt_note_blocking_lock_release();
176 lck_mtx_unlock(&lt_mtx);
177 }
178
179 static void
180 lt_grab_rw_exclusive()
181 {
182 lck_rw_lock_exclusive(&lt_rwlock);
183 lt_note_another_blocking_lock_holder();
184 lt_sleep_a_little_bit();
185 lt_counter++;
186 lt_note_blocking_lock_release();
187 lck_rw_done(&lt_rwlock);
188 }
189
190 static void
191 lt_grab_rw_exclusive_with_try()
192 {
193 while (0 == lck_rw_try_lock_exclusive(&lt_rwlock)) {
194 lt_sleep_a_little_bit();
195 }
196
197 lt_note_another_blocking_lock_holder();
198 lt_sleep_a_little_bit();
199 lt_counter++;
200 lt_note_blocking_lock_release();
201 lck_rw_done(&lt_rwlock);
202 }
203
204 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
205 * static void
206 * lt_grab_rw_shared()
207 * {
208 * lck_rw_lock_shared(&lt_rwlock);
209 * lt_counter++;
210 *
211 * lt_note_another_blocking_lock_holder();
212 * lt_sleep_a_little_bit();
213 * lt_note_blocking_lock_release();
214 *
215 * lck_rw_done(&lt_rwlock);
216 * }
217 */
218
219 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
220 * static void
221 * lt_grab_rw_shared_with_try()
222 * {
223 * while(0 == lck_rw_try_lock_shared(&lt_rwlock));
224 * lt_counter++;
225 *
226 * lt_note_another_blocking_lock_holder();
227 * lt_sleep_a_little_bit();
228 * lt_note_blocking_lock_release();
229 *
230 * lck_rw_done(&lt_rwlock);
231 * }
232 */
233
234 static void
235 lt_upgrade_downgrade_rw()
236 {
237 boolean_t upgraded, success;
238
239 success = lck_rw_try_lock_shared(&lt_rwlock);
240 if (!success) {
241 lck_rw_lock_shared(&lt_rwlock);
242 }
243
244 lt_note_another_blocking_lock_holder();
245 lt_sleep_a_little_bit();
246 lt_note_blocking_lock_release();
247
248 upgraded = lck_rw_lock_shared_to_exclusive(&lt_rwlock);
249 if (!upgraded) {
250 success = lck_rw_try_lock_exclusive(&lt_rwlock);
251
252 if (!success) {
253 lck_rw_lock_exclusive(&lt_rwlock);
254 }
255 }
256
257 lt_upgrade_holders++;
258 if (lt_upgrade_holders > lt_max_upgrade_holders) {
259 lt_max_upgrade_holders = lt_upgrade_holders;
260 }
261
262 lt_counter++;
263 lt_sleep_a_little_bit();
264
265 lt_upgrade_holders--;
266
267 lck_rw_lock_exclusive_to_shared(&lt_rwlock);
268
269 lt_spin_a_little_bit();
270 lck_rw_done(&lt_rwlock);
271 }
272
273 const int limit = 1000000;
274 static int lt_stress_local_counters[MAX_CPUS];
275
276 static void
277 lt_stress_hw_lock()
278 {
279 int local_counter = 0;
280
281 uint cpuid = current_processor()->cpu_id;
282
283 kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
284
285 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
286 lt_counter++;
287 local_counter++;
288 hw_lock_unlock(&lt_hw_lock);
289
290 while (lt_counter < lt_target_done_threads) {
291 ;
292 }
293
294 kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
295
296 while (lt_counter < limit) {
297 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
298 if (lt_counter < limit) {
299 lt_counter++;
300 local_counter++;
301 }
302 hw_lock_unlock(&lt_hw_lock);
303 }
304
305 lt_stress_local_counters[cpuid] = local_counter;
306
307 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
308 }
309
310 static void
311 lt_grab_hw_lock()
312 {
313 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
314 lt_counter++;
315 lt_spin_a_little_bit();
316 hw_lock_unlock(&lt_hw_lock);
317 }
318
319 static void
320 lt_grab_hw_lock_with_try()
321 {
322 while (0 == hw_lock_try(&lt_hw_lock, LCK_GRP_NULL)) {
323 ;
324 }
325 lt_counter++;
326 lt_spin_a_little_bit();
327 hw_lock_unlock(&lt_hw_lock);
328 }
329
330 static void
331 lt_grab_hw_lock_with_to()
332 {
333 while (0 == hw_lock_to(&lt_hw_lock, LockTimeOut, LCK_GRP_NULL)) {
334 mp_enable_preemption();
335 }
336 lt_counter++;
337 lt_spin_a_little_bit();
338 hw_lock_unlock(&lt_hw_lock);
339 }
340
341 static void
342 lt_grab_spin_lock()
343 {
344 lck_spin_lock(&lt_lck_spin_t);
345 lt_counter++;
346 lt_spin_a_little_bit();
347 lck_spin_unlock(&lt_lck_spin_t);
348 }
349
350 static void
351 lt_grab_spin_lock_with_try()
352 {
353 while (0 == lck_spin_try_lock(&lt_lck_spin_t)) {
354 ;
355 }
356 lt_counter++;
357 lt_spin_a_little_bit();
358 lck_spin_unlock(&lt_lck_spin_t);
359 }
360
361 static volatile boolean_t lt_thread_lock_grabbed;
362 static volatile boolean_t lt_thread_lock_success;
363
364 static void
365 lt_reset()
366 {
367 lt_counter = 0;
368 lt_max_holders = 0;
369 lt_num_holders = 0;
370 lt_max_upgrade_holders = 0;
371 lt_upgrade_holders = 0;
372 lt_done_threads = 0;
373 lt_target_done_threads = 0;
374 lt_cpu_bind_id = 0;
375
376 OSMemoryBarrier();
377 }
378
379 static void
380 lt_trylock_hw_lock_with_to()
381 {
382 OSMemoryBarrier();
383 while (!lt_thread_lock_grabbed) {
384 lt_sleep_a_little_bit();
385 OSMemoryBarrier();
386 }
387 lt_thread_lock_success = hw_lock_to(&lt_hw_lock, 100, LCK_GRP_NULL);
388 OSMemoryBarrier();
389 mp_enable_preemption();
390 }
391
392 static void
393 lt_trylock_spin_try_lock()
394 {
395 OSMemoryBarrier();
396 while (!lt_thread_lock_grabbed) {
397 lt_sleep_a_little_bit();
398 OSMemoryBarrier();
399 }
400 lt_thread_lock_success = lck_spin_try_lock(&lt_lck_spin_t);
401 OSMemoryBarrier();
402 }
403
404 static void
405 lt_trylock_thread(void *arg, wait_result_t wres __unused)
406 {
407 void (*func)(void) = (void (*)(void))arg;
408
409 func();
410
411 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
412 }
413
414 static void
415 lt_start_trylock_thread(thread_continue_t func)
416 {
417 thread_t thread;
418 kern_return_t kr;
419
420 kr = kernel_thread_start(lt_trylock_thread, func, &thread);
421 assert(kr == KERN_SUCCESS);
422
423 thread_deallocate(thread);
424 }
425
426 static void
427 lt_wait_for_lock_test_threads()
428 {
429 OSMemoryBarrier();
430 /* Spin to reduce dependencies */
431 while (lt_done_threads < lt_target_done_threads) {
432 lt_sleep_a_little_bit();
433 OSMemoryBarrier();
434 }
435 OSMemoryBarrier();
436 }
437
438 static kern_return_t
439 lt_test_trylocks()
440 {
441 boolean_t success;
442 extern unsigned int real_ncpus;
443
444 /*
445 * First mtx try lock succeeds, second fails.
446 */
447 success = lck_mtx_try_lock(&lt_mtx);
448 T_ASSERT_NOTNULL(success, "First mtx try lock");
449 success = lck_mtx_try_lock(&lt_mtx);
450 T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
451 lck_mtx_unlock(&lt_mtx);
452
453 /*
454 * After regular grab, can't try lock.
455 */
456 lck_mtx_lock(&lt_mtx);
457 success = lck_mtx_try_lock(&lt_mtx);
458 T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
459 lck_mtx_unlock(&lt_mtx);
460
461 /*
462 * Two shared try locks on a previously unheld rwlock suceed, and a
463 * subsequent exclusive attempt fails.
464 */
465 success = lck_rw_try_lock_shared(&lt_rwlock);
466 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
467 success = lck_rw_try_lock_shared(&lt_rwlock);
468 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
469 success = lck_rw_try_lock_exclusive(&lt_rwlock);
470 T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
471 lck_rw_done(&lt_rwlock);
472 lck_rw_done(&lt_rwlock);
473
474 /*
475 * After regular shared grab, can trylock
476 * for shared but not for exclusive.
477 */
478 lck_rw_lock_shared(&lt_rwlock);
479 success = lck_rw_try_lock_shared(&lt_rwlock);
480 T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
481 success = lck_rw_try_lock_exclusive(&lt_rwlock);
482 T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
483 lck_rw_done(&lt_rwlock);
484 lck_rw_done(&lt_rwlock);
485
486 /*
487 * An exclusive try lock succeeds, subsequent shared and exclusive
488 * attempts fail.
489 */
490 success = lck_rw_try_lock_exclusive(&lt_rwlock);
491 T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
492 success = lck_rw_try_lock_shared(&lt_rwlock);
493 T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
494 success = lck_rw_try_lock_exclusive(&lt_rwlock);
495 T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
496 lck_rw_done(&lt_rwlock);
497
498 /*
499 * After regular exclusive grab, neither kind of trylock succeeds.
500 */
501 lck_rw_lock_exclusive(&lt_rwlock);
502 success = lck_rw_try_lock_shared(&lt_rwlock);
503 T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
504 success = lck_rw_try_lock_exclusive(&lt_rwlock);
505 T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
506 lck_rw_done(&lt_rwlock);
507
508 /*
509 * First spin lock attempts succeed, second attempts fail.
510 */
511 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
512 T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
513 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
514 T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
515 hw_lock_unlock(&lt_hw_lock);
516
517 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
518 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
519 T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
520 hw_lock_unlock(&lt_hw_lock);
521
522 lt_reset();
523 lt_thread_lock_grabbed = false;
524 lt_thread_lock_success = true;
525 lt_target_done_threads = 1;
526 OSMemoryBarrier();
527 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
528 success = hw_lock_to(&lt_hw_lock, 100, LCK_GRP_NULL);
529 T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
530 if (real_ncpus == 1) {
531 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
532 }
533 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
534 lt_wait_for_lock_test_threads();
535 T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
536 if (real_ncpus == 1) {
537 mp_disable_preemption(); /* don't double-enable when we unlock */
538 }
539 hw_lock_unlock(&lt_hw_lock);
540
541 lt_reset();
542 lt_thread_lock_grabbed = false;
543 lt_thread_lock_success = true;
544 lt_target_done_threads = 1;
545 OSMemoryBarrier();
546 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
547 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
548 if (real_ncpus == 1) {
549 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
550 }
551 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
552 lt_wait_for_lock_test_threads();
553 T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
554 if (real_ncpus == 1) {
555 mp_disable_preemption(); /* don't double-enable when we unlock */
556 }
557 hw_lock_unlock(&lt_hw_lock);
558
559 success = lck_spin_try_lock(&lt_lck_spin_t);
560 T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
561 success = lck_spin_try_lock(&lt_lck_spin_t);
562 T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
563 lck_spin_unlock(&lt_lck_spin_t);
564
565 lt_reset();
566 lt_thread_lock_grabbed = false;
567 lt_thread_lock_success = true;
568 lt_target_done_threads = 1;
569 lt_start_trylock_thread(lt_trylock_spin_try_lock);
570 lck_spin_lock(&lt_lck_spin_t);
571 if (real_ncpus == 1) {
572 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
573 }
574 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
575 lt_wait_for_lock_test_threads();
576 T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
577 if (real_ncpus == 1) {
578 mp_disable_preemption(); /* don't double-enable when we unlock */
579 }
580 lck_spin_unlock(&lt_lck_spin_t);
581
582 return KERN_SUCCESS;
583 }
584
585 static void
586 lt_thread(void *arg, wait_result_t wres __unused)
587 {
588 void (*func)(void) = (void (*)(void))arg;
589 uint32_t i;
590
591 for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
592 func();
593 }
594
595 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
596 }
597
598 static void
599 lt_bound_thread(void *arg, wait_result_t wres __unused)
600 {
601 void (*func)(void) = (void (*)(void))arg;
602
603 int cpuid = OSIncrementAtomic((volatile SInt32 *)&lt_cpu_bind_id);
604
605 processor_t processor = processor_list;
606 while ((processor != NULL) && (processor->cpu_id != cpuid)) {
607 processor = processor->processor_list;
608 }
609
610 if (processor != NULL) {
611 thread_bind(processor);
612 }
613
614 thread_block(THREAD_CONTINUE_NULL);
615
616 func();
617
618 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
619 }
620
621 static void
622 lt_start_lock_thread(thread_continue_t func)
623 {
624 thread_t thread;
625 kern_return_t kr;
626
627 kr = kernel_thread_start(lt_thread, func, &thread);
628 assert(kr == KERN_SUCCESS);
629
630 thread_deallocate(thread);
631 }
632
633
634 static void
635 lt_start_lock_thread_bound(thread_continue_t func)
636 {
637 thread_t thread;
638 kern_return_t kr;
639
640 kr = kernel_thread_start(lt_bound_thread, func, &thread);
641 assert(kr == KERN_SUCCESS);
642
643 thread_deallocate(thread);
644 }
645
646 static kern_return_t
647 lt_test_locks()
648 {
649 kern_return_t kr = KERN_SUCCESS;
650 lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
651 lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
652
653 lck_mtx_init(&lt_mtx, lg, LCK_ATTR_NULL);
654 lck_rw_init(&lt_rwlock, lg, LCK_ATTR_NULL);
655 lck_spin_init(&lt_lck_spin_t, lg, LCK_ATTR_NULL);
656 hw_lock_init(&lt_hw_lock);
657
658 T_LOG("Testing locks.");
659
660 /* Try locks (custom) */
661 lt_reset();
662
663 T_LOG("Running try lock test.");
664 kr = lt_test_trylocks();
665 T_EXPECT_NULL(kr, "try lock test failed.");
666
667 /* Uncontended mutex */
668 T_LOG("Running uncontended mutex test.");
669 lt_reset();
670 lt_target_done_threads = 1;
671 lt_start_lock_thread(lt_grab_mutex);
672 lt_wait_for_lock_test_threads();
673 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
674 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
675
676 /* Contended mutex:try locks*/
677 T_LOG("Running contended mutex test.");
678 lt_reset();
679 lt_target_done_threads = 3;
680 lt_start_lock_thread(lt_grab_mutex);
681 lt_start_lock_thread(lt_grab_mutex);
682 lt_start_lock_thread(lt_grab_mutex);
683 lt_wait_for_lock_test_threads();
684 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
685 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
686
687 /* Contended mutex: try locks*/
688 T_LOG("Running contended mutex trylock test.");
689 lt_reset();
690 lt_target_done_threads = 3;
691 lt_start_lock_thread(lt_grab_mutex_with_try);
692 lt_start_lock_thread(lt_grab_mutex_with_try);
693 lt_start_lock_thread(lt_grab_mutex_with_try);
694 lt_wait_for_lock_test_threads();
695 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
696 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
697
698 /* Uncontended exclusive rwlock */
699 T_LOG("Running uncontended exclusive rwlock test.");
700 lt_reset();
701 lt_target_done_threads = 1;
702 lt_start_lock_thread(lt_grab_rw_exclusive);
703 lt_wait_for_lock_test_threads();
704 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
705 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
706
707 /* Uncontended shared rwlock */
708
709 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
710 * T_LOG("Running uncontended shared rwlock test.");
711 * lt_reset();
712 * lt_target_done_threads = 1;
713 * lt_start_lock_thread(lt_grab_rw_shared);
714 * lt_wait_for_lock_test_threads();
715 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
716 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
717 */
718
719 /* Contended exclusive rwlock */
720 T_LOG("Running contended exclusive rwlock test.");
721 lt_reset();
722 lt_target_done_threads = 3;
723 lt_start_lock_thread(lt_grab_rw_exclusive);
724 lt_start_lock_thread(lt_grab_rw_exclusive);
725 lt_start_lock_thread(lt_grab_rw_exclusive);
726 lt_wait_for_lock_test_threads();
727 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
728 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
729
730 /* One shared, two exclusive */
731 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
732 * T_LOG("Running test with one shared and two exclusive rw lock threads.");
733 * lt_reset();
734 * lt_target_done_threads = 3;
735 * lt_start_lock_thread(lt_grab_rw_shared);
736 * lt_start_lock_thread(lt_grab_rw_exclusive);
737 * lt_start_lock_thread(lt_grab_rw_exclusive);
738 * lt_wait_for_lock_test_threads();
739 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
740 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
741 */
742
743 /* Four shared */
744 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
745 * T_LOG("Running test with four shared holders.");
746 * lt_reset();
747 * lt_target_done_threads = 4;
748 * lt_start_lock_thread(lt_grab_rw_shared);
749 * lt_start_lock_thread(lt_grab_rw_shared);
750 * lt_start_lock_thread(lt_grab_rw_shared);
751 * lt_start_lock_thread(lt_grab_rw_shared);
752 * lt_wait_for_lock_test_threads();
753 * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
754 */
755
756 /* Three doing upgrades and downgrades */
757 T_LOG("Running test with threads upgrading and downgrading.");
758 lt_reset();
759 lt_target_done_threads = 3;
760 lt_start_lock_thread(lt_upgrade_downgrade_rw);
761 lt_start_lock_thread(lt_upgrade_downgrade_rw);
762 lt_start_lock_thread(lt_upgrade_downgrade_rw);
763 lt_wait_for_lock_test_threads();
764 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
765 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
766 T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
767
768 /* Uncontended - exclusive trylocks */
769 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
770 lt_reset();
771 lt_target_done_threads = 1;
772 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
773 lt_wait_for_lock_test_threads();
774 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
775 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
776
777 /* Uncontended - shared trylocks */
778 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
779 * T_LOG("Running test with single thread doing shared rwlock trylocks.");
780 * lt_reset();
781 * lt_target_done_threads = 1;
782 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
783 * lt_wait_for_lock_test_threads();
784 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
785 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
786 */
787
788 /* Three doing exclusive trylocks */
789 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
790 lt_reset();
791 lt_target_done_threads = 3;
792 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
793 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
794 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
795 lt_wait_for_lock_test_threads();
796 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
797 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
798
799 /* Three doing shared trylocks */
800 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
801 * T_LOG("Running test with threads doing shared rwlock trylocks.");
802 * lt_reset();
803 * lt_target_done_threads = 3;
804 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
805 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
806 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
807 * lt_wait_for_lock_test_threads();
808 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
809 * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
810 */
811
812 /* Three doing various trylocks */
813 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
814 * T_LOG("Running test with threads doing mixed rwlock trylocks.");
815 * lt_reset();
816 * lt_target_done_threads = 4;
817 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
818 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
819 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
820 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
821 * lt_wait_for_lock_test_threads();
822 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
823 * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
824 */
825
826 /* HW locks */
827 T_LOG("Running test with hw_lock_lock()");
828 lt_reset();
829 lt_target_done_threads = 3;
830 lt_start_lock_thread(lt_grab_hw_lock);
831 lt_start_lock_thread(lt_grab_hw_lock);
832 lt_start_lock_thread(lt_grab_hw_lock);
833 lt_wait_for_lock_test_threads();
834 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
835
836 /* HW locks stress test */
837 T_LOG("Running HW locks stress test with hw_lock_lock()");
838 extern unsigned int real_ncpus;
839 lt_reset();
840 lt_target_done_threads = real_ncpus;
841 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
842 lt_start_lock_thread_bound(lt_stress_hw_lock);
843 }
844 lt_wait_for_lock_test_threads();
845 bool starvation = false;
846 uint total_local_count = 0;
847 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
848 starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
849 total_local_count += lt_stress_local_counters[processor->cpu_id];
850 }
851 if (total_local_count != lt_counter) {
852 T_FAIL("Lock failure\n");
853 } else if (starvation) {
854 T_FAIL("Lock starvation found\n");
855 } else {
856 T_PASS("HW locks stress test with hw_lock_lock()");
857 }
858
859
860 /* HW locks: trylocks */
861 T_LOG("Running test with hw_lock_try()");
862 lt_reset();
863 lt_target_done_threads = 3;
864 lt_start_lock_thread(lt_grab_hw_lock_with_try);
865 lt_start_lock_thread(lt_grab_hw_lock_with_try);
866 lt_start_lock_thread(lt_grab_hw_lock_with_try);
867 lt_wait_for_lock_test_threads();
868 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
869
870 /* HW locks: with timeout */
871 T_LOG("Running test with hw_lock_to()");
872 lt_reset();
873 lt_target_done_threads = 3;
874 lt_start_lock_thread(lt_grab_hw_lock_with_to);
875 lt_start_lock_thread(lt_grab_hw_lock_with_to);
876 lt_start_lock_thread(lt_grab_hw_lock_with_to);
877 lt_wait_for_lock_test_threads();
878 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
879
880 /* Spin locks */
881 T_LOG("Running test with lck_spin_lock()");
882 lt_reset();
883 lt_target_done_threads = 3;
884 lt_start_lock_thread(lt_grab_spin_lock);
885 lt_start_lock_thread(lt_grab_spin_lock);
886 lt_start_lock_thread(lt_grab_spin_lock);
887 lt_wait_for_lock_test_threads();
888 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
889
890 /* Spin locks: trylocks */
891 T_LOG("Running test with lck_spin_try_lock()");
892 lt_reset();
893 lt_target_done_threads = 3;
894 lt_start_lock_thread(lt_grab_spin_lock_with_try);
895 lt_start_lock_thread(lt_grab_spin_lock_with_try);
896 lt_start_lock_thread(lt_grab_spin_lock_with_try);
897 lt_wait_for_lock_test_threads();
898 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
899
900 return KERN_SUCCESS;
901 }
902
903 #define MT_MAX_ARGS 8
904 #define MT_INITIAL_VALUE 0xfeedbeef
905 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
906 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
907 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
908
909 typedef void (*sy_munge_t)(void*);
910
911 #define MT_FUNC(x) #x, x
912 struct munger_test {
913 const char *mt_name;
914 sy_munge_t mt_func;
915 uint32_t mt_in_words;
916 uint32_t mt_nout;
917 uint64_t mt_expected[MT_MAX_ARGS];
918 } munger_tests[] = {
919 {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
920 {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
921 {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
922 {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
923 {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
924 {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
925 {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
926 {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
927 {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
928 {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
929 {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
930 {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
931 {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
932 {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
933 {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
934 {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
935 {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
936 {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
937 {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
938 {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
939 {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
940 {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
941 {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
942 {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
943 {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
944 {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
945 {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
946 {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
947 {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
948 {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
949 {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
950 {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
951 {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
952 {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
953 {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
954 {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
955 {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
956 {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
957 {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
958 {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
959 };
960
961 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
962
963 static void
964 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
965 {
966 uint32_t i;
967
968 for (i = 0; i < in_words; i++) {
969 data[i] = MT_INITIAL_VALUE;
970 }
971
972 if (in_words * sizeof(uint32_t) < total_size) {
973 bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
974 }
975 }
976
977 static void
978 mt_test_mungers()
979 {
980 uint64_t data[MT_MAX_ARGS];
981 uint32_t i, j;
982
983 for (i = 0; i < MT_TEST_COUNT; i++) {
984 struct munger_test *test = &munger_tests[i];
985 int pass = 1;
986
987 T_LOG("Testing %s", test->mt_name);
988
989 mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
990 test->mt_func(data);
991
992 for (j = 0; j < test->mt_nout; j++) {
993 if (data[j] != test->mt_expected[j]) {
994 T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
995 pass = 0;
996 }
997 }
998 if (pass) {
999 T_PASS(test->mt_name);
1000 }
1001 }
1002 }
1003
1004 /* Exception Callback Test */
1005 static ex_cb_action_t
1006 excb_test_action(
1007 ex_cb_class_t cb_class,
1008 void *refcon,
1009 const ex_cb_state_t *state
1010 )
1011 {
1012 ex_cb_state_t *context = (ex_cb_state_t *)refcon;
1013
1014 if ((NULL == refcon) || (NULL == state)) {
1015 return EXCB_ACTION_TEST_FAIL;
1016 }
1017
1018 context->far = state->far;
1019
1020 switch (cb_class) {
1021 case EXCB_CLASS_TEST1:
1022 return EXCB_ACTION_RERUN;
1023 case EXCB_CLASS_TEST2:
1024 return EXCB_ACTION_NONE;
1025 default:
1026 return EXCB_ACTION_TEST_FAIL;
1027 }
1028 }
1029
1030
1031 kern_return_t
1032 ex_cb_test()
1033 {
1034 const vm_offset_t far1 = 0xdead0001;
1035 const vm_offset_t far2 = 0xdead0002;
1036 kern_return_t kr;
1037 ex_cb_state_t test_context_1 = {0xdeadbeef};
1038 ex_cb_state_t test_context_2 = {0xdeadbeef};
1039 ex_cb_action_t action;
1040
1041 T_LOG("Testing Exception Callback.");
1042
1043 T_LOG("Running registration test.");
1044
1045 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1046 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback");
1047 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1048 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback");
1049
1050 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1051 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback");
1052 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1053 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback");
1054
1055 T_LOG("Running invocation test.");
1056
1057 action = ex_cb_invoke(EXCB_CLASS_TEST1, far1);
1058 T_ASSERT(EXCB_ACTION_RERUN == action, NULL);
1059 T_ASSERT(far1 == test_context_1.far, NULL);
1060
1061 action = ex_cb_invoke(EXCB_CLASS_TEST2, far2);
1062 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1063 T_ASSERT(far2 == test_context_2.far, NULL);
1064
1065 action = ex_cb_invoke(EXCB_CLASS_TEST3, 0);
1066 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1067
1068 return KERN_SUCCESS;
1069 }
1070
1071 #if defined(HAS_APPLE_PAC)
1072
1073 /*
1074 *
1075 * arm64_ropjop_test - basic xnu ROP/JOP test plan
1076 *
1077 * - assert ROP/JOP configured and running status match
1078 * - assert all AppleMode ROP/JOP features enabled
1079 * - ensure ROP/JOP keys are set and diversified
1080 * - sign a KVA (the address of this function),assert it was signed (changed)
1081 * - authenticate the newly signed KVA
1082 * - assert the authed KVA is the original KVA
1083 * - corrupt a signed ptr, auth it, ensure auth failed
1084 * - assert the failed authIB of corrupted pointer is tagged
1085 *
1086 */
1087
1088 kern_return_t
1089 arm64_ropjop_test()
1090 {
1091 T_LOG("Testing ROP/JOP");
1092
1093 /* how is ROP/JOP configured */
1094 boolean_t config_rop_enabled = TRUE;
1095 boolean_t config_jop_enabled = !(BootArgs->bootFlags & kBootFlagsDisableJOP);
1096
1097
1098 /* assert all AppleMode ROP/JOP features enabled */
1099 uint64_t apctl = __builtin_arm_rsr64(ARM64_REG_APCTL_EL1);
1100 #if __APSTS_SUPPORTED__
1101 uint64_t apsts = __builtin_arm_rsr64(ARM64_REG_APSTS_EL1);
1102 T_ASSERT(apsts & APSTS_EL1_MKEYVld, NULL);
1103 #else
1104 T_ASSERT(apctl & APCTL_EL1_MKEYVld, NULL);
1105 #endif /* __APSTS_SUPPORTED__ */
1106 T_ASSERT(apctl & APCTL_EL1_AppleMode, NULL);
1107 T_ASSERT(apctl & APCTL_EL1_KernKeyEn, NULL);
1108
1109 /* ROP/JOP keys enabled current status */
1110 bool status_jop_enabled, status_rop_enabled;
1111 #if __APSTS_SUPPORTED__ /* H13+ */
1112 // TODO: update unit test to understand ROP/JOP enabled config for H13+
1113 status_jop_enabled = status_rop_enabled = apctl & APCTL_EL1_EnAPKey1;
1114 #elif __APCFG_SUPPORTED__ /* H12 */
1115 uint64_t apcfg_el1 = __builtin_arm_rsr64(APCFG_EL1);
1116 status_jop_enabled = status_rop_enabled = apcfg_el1 & APCFG_EL1_ELXENKEY;
1117 #else /* !__APCFG_SUPPORTED__ H11 */
1118 uint64_t sctlr_el1 = __builtin_arm_rsr64("SCTLR_EL1");
1119 status_jop_enabled = sctlr_el1 & SCTLR_PACIA_ENABLED;
1120 status_rop_enabled = sctlr_el1 & SCTLR_PACIB_ENABLED;
1121 #endif /* __APSTS_SUPPORTED__ */
1122
1123 /* assert configured and running status match */
1124 T_ASSERT(config_rop_enabled == status_rop_enabled, NULL);
1125 T_ASSERT(config_jop_enabled == status_jop_enabled, NULL);
1126
1127
1128 if (config_jop_enabled) {
1129 /* jop key */
1130 uint64_t apiakey_hi = __builtin_arm_rsr64(ARM64_REG_APIAKEYHI_EL1);
1131 uint64_t apiakey_lo = __builtin_arm_rsr64(ARM64_REG_APIAKEYLO_EL1);
1132
1133 /* ensure JOP key is set and diversified */
1134 T_EXPECT(apiakey_hi != KERNEL_ROP_ID && apiakey_lo != KERNEL_ROP_ID, NULL);
1135 T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
1136 }
1137
1138 if (config_rop_enabled) {
1139 /* rop key */
1140 uint64_t apibkey_hi = __builtin_arm_rsr64(ARM64_REG_APIBKEYHI_EL1);
1141 uint64_t apibkey_lo = __builtin_arm_rsr64(ARM64_REG_APIBKEYLO_EL1);
1142
1143 /* ensure ROP key is set and diversified */
1144 T_EXPECT(apibkey_hi != KERNEL_ROP_ID && apibkey_lo != KERNEL_ROP_ID, NULL);
1145 T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
1146
1147 /* sign a KVA (the address of this function) */
1148 uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
1149
1150 /* assert it was signed (changed) */
1151 T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
1152
1153 /* authenticate the newly signed KVA */
1154 uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
1155
1156 /* assert the authed KVA is the original KVA */
1157 T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
1158
1159 /* corrupt a signed ptr, auth it, ensure auth failed */
1160 uint64_t kva_corrupted = kva_signed ^ 1;
1161
1162 /* authenticate the corrupted pointer */
1163 kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
1164
1165 /* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1166 uint64_t auth_fail_mask = 3ULL << 61;
1167 uint64_t authib_fail = 2ULL << 61;
1168
1169 /* assert the failed authIB of corrupted pointer is tagged */
1170 T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
1171 }
1172
1173 return KERN_SUCCESS;
1174 }
1175 #endif /* defined(HAS_APPLE_PAC) */
1176
1177 #if __ARM_PAN_AVAILABLE__
1178
1179 struct pan_test_thread_args {
1180 volatile bool join;
1181 };
1182
1183 static void
1184 arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
1185 {
1186 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1187
1188 struct pan_test_thread_args *args = arg;
1189
1190 for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
1191 thread_bind(p);
1192 thread_block(THREAD_CONTINUE_NULL);
1193 kprintf("Running PAN test on cpu %d\n", p->cpu_id);
1194 arm64_pan_test();
1195 }
1196
1197 /* unbind thread from specific cpu */
1198 thread_bind(PROCESSOR_NULL);
1199 thread_block(THREAD_CONTINUE_NULL);
1200
1201 while (!args->join) {
1202 ;
1203 }
1204
1205 thread_wakeup(args);
1206 }
1207
1208 kern_return_t
1209 arm64_late_pan_test()
1210 {
1211 thread_t thread;
1212 kern_return_t kr;
1213
1214 struct pan_test_thread_args args;
1215 args.join = false;
1216
1217 kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
1218 assert(kr == KERN_SUCCESS);
1219
1220 thread_deallocate(thread);
1221
1222 assert_wait(&args, THREAD_UNINT);
1223 args.join = true;
1224 thread_block(THREAD_CONTINUE_NULL);
1225 return KERN_SUCCESS;
1226 }
1227
1228 kern_return_t
1229 arm64_pan_test()
1230 {
1231 vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE;
1232
1233 T_LOG("Testing PAN.");
1234
1235
1236 T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
1237
1238 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1239
1240 pan_exception_level = 0;
1241 pan_fault_value = 0xDE;
1242 // convert priv_addr to one that is accessible from user mode
1243 pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS -
1244 _COMM_PAGE_START_ADDRESS;
1245
1246 // Below should trigger a PAN exception as pan_test_addr is accessible
1247 // in user mode
1248 // The exception handler, upon recognizing the fault address is pan_test_addr,
1249 // will disable PAN and rerun this instruction successfully
1250 T_ASSERT(*(char *)pan_test_addr == *(char *)priv_addr, NULL);
1251
1252 T_ASSERT(pan_exception_level == 2, NULL);
1253
1254 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1255
1256 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1257
1258 pan_exception_level = 0;
1259 pan_fault_value = 0xAD;
1260 pan_ro_addr = (vm_offset_t) &pan_ro_value;
1261
1262 // Force a permission fault while PAN is disabled to make sure PAN is
1263 // re-enabled during the exception handler.
1264 *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1265
1266 T_ASSERT(pan_exception_level == 2, NULL);
1267
1268 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1269
1270 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1271
1272 pan_test_addr = 0;
1273 pan_ro_addr = 0;
1274
1275 __builtin_arm_wsr("pan", 1);
1276
1277 return KERN_SUCCESS;
1278 }
1279 #endif /* __ARM_PAN_AVAILABLE__ */
1280
1281
1282 kern_return_t
1283 arm64_lock_test()
1284 {
1285 return lt_test_locks();
1286 }
1287
1288 kern_return_t
1289 arm64_munger_test()
1290 {
1291 mt_test_mungers();
1292 return 0;
1293 }
1294
1295
1296 #if HAS_TWO_STAGE_SPR_LOCK
1297
1298 #define STR1(x) #x
1299 #define STR(x) STR1(x)
1300
1301 volatile vm_offset_t spr_lock_test_addr;
1302 volatile uint32_t spr_lock_exception_esr;
1303
1304 kern_return_t
1305 arm64_spr_lock_test()
1306 {
1307 processor_t p;
1308
1309 for (p = processor_list; p != NULL; p = p->processor_list) {
1310 thread_bind(p);
1311 thread_block(THREAD_CONTINUE_NULL);
1312 T_LOG("Running SPR lock test on cpu %d\n", p->cpu_id);
1313
1314 uint64_t orig_value = __builtin_arm_rsr64(STR(ARM64_REG_HID8));
1315 spr_lock_test_addr = (vm_offset_t)VM_KERNEL_STRIP_PTR(arm64_msr_lock_test);
1316 spr_lock_exception_esr = 0;
1317 arm64_msr_lock_test(~orig_value);
1318 T_EXPECT(spr_lock_exception_esr != 0, "MSR write generated synchronous abort");
1319
1320 uint64_t new_value = __builtin_arm_rsr64(STR(ARM64_REG_HID8));
1321 T_EXPECT(orig_value == new_value, "MSR write did not succeed");
1322
1323 spr_lock_test_addr = 0;
1324 }
1325
1326 /* unbind thread from specific cpu */
1327 thread_bind(PROCESSOR_NULL);
1328 thread_block(THREAD_CONTINUE_NULL);
1329
1330 T_PASS("Done running SPR lock tests");
1331
1332 return KERN_SUCCESS;
1333 }
1334
1335 #endif /* HAS_TWO_STAGE_SPR_LOCK */