]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/platform_tests.c
5f6e474fb6057a5bb2d6ec65a5407fad61bc03bd
[apple/xnu.git] / osfmk / arm64 / platform_tests.c
1 /*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
40 *
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * Carnegie Mellon requests users of this software to return to
46 *
47 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55 #include <mach_ldebug.h>
56
57 #define LOCK_PRIVATE 1
58
59 #include <vm/pmap.h>
60 #include <kern/kalloc.h>
61 #include <kern/locks.h>
62 #include <kern/misc_protos.h>
63 #include <kern/thread.h>
64 #include <kern/processor.h>
65 #include <kern/sched_prim.h>
66 #include <kern/xpr.h>
67 #include <kern/debug.h>
68 #include <string.h>
69 #include <tests/xnupost.h>
70
71 #if MACH_KDB
72 #include <ddb/db_command.h>
73 #include <ddb/db_output.h>
74 #include <ddb/db_sym.h>
75 #include <ddb/db_print.h>
76 #endif /* MACH_KDB */
77
78 #include <sys/kdebug.h>
79 #include <sys/munge.h>
80 #include <machine/cpu_capabilities.h>
81 #include <arm/cpu_data_internal.h>
82 #include <arm/pmap.h>
83
84 kern_return_t arm64_lock_test(void);
85 kern_return_t arm64_munger_test(void);
86 kern_return_t ex_cb_test(void);
87 kern_return_t arm64_pan_test(void);
88
89 // exception handler ignores this fault address during PAN test
90 #if __ARM_PAN_AVAILABLE__
91 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
92 vm_offset_t pan_test_addr = 0;
93 vm_offset_t pan_ro_addr = 0;
94 volatile int pan_exception_level = 0;
95 volatile char pan_fault_value = 0;
96 #endif
97
98 #include <libkern/OSAtomic.h>
99 #define LOCK_TEST_ITERATIONS 50
100 static hw_lock_data_t lt_hw_lock;
101 static lck_spin_t lt_lck_spin_t;
102 static lck_mtx_t lt_mtx;
103 static lck_rw_t lt_rwlock;
104 static volatile uint32_t lt_counter = 0;
105 static volatile int lt_spinvolatile;
106 static volatile uint32_t lt_max_holders = 0;
107 static volatile uint32_t lt_upgrade_holders = 0;
108 static volatile uint32_t lt_max_upgrade_holders = 0;
109 static volatile uint32_t lt_num_holders = 0;
110 static volatile uint32_t lt_done_threads;
111 static volatile uint32_t lt_target_done_threads;
112 static volatile uint32_t lt_cpu_bind_id = 0;
113
114 static void
115 lt_note_another_blocking_lock_holder()
116 {
117 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
118 lt_num_holders++;
119 lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
120 hw_lock_unlock(&lt_hw_lock);
121 }
122
123 static void
124 lt_note_blocking_lock_release()
125 {
126 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
127 lt_num_holders--;
128 hw_lock_unlock(&lt_hw_lock);
129 }
130
131 static void
132 lt_spin_a_little_bit()
133 {
134 uint32_t i;
135
136 for (i = 0; i < 10000; i++) {
137 lt_spinvolatile++;
138 }
139 }
140
141 static void
142 lt_sleep_a_little_bit()
143 {
144 delay(100);
145 }
146
147 static void
148 lt_grab_mutex()
149 {
150 lck_mtx_lock(&lt_mtx);
151 lt_note_another_blocking_lock_holder();
152 lt_sleep_a_little_bit();
153 lt_counter++;
154 lt_note_blocking_lock_release();
155 lck_mtx_unlock(&lt_mtx);
156 }
157
158 static void
159 lt_grab_mutex_with_try()
160 {
161 while (0 == lck_mtx_try_lock(&lt_mtx)) {
162 ;
163 }
164 lt_note_another_blocking_lock_holder();
165 lt_sleep_a_little_bit();
166 lt_counter++;
167 lt_note_blocking_lock_release();
168 lck_mtx_unlock(&lt_mtx);
169 }
170
171 static void
172 lt_grab_rw_exclusive()
173 {
174 lck_rw_lock_exclusive(&lt_rwlock);
175 lt_note_another_blocking_lock_holder();
176 lt_sleep_a_little_bit();
177 lt_counter++;
178 lt_note_blocking_lock_release();
179 lck_rw_done(&lt_rwlock);
180 }
181
182 static void
183 lt_grab_rw_exclusive_with_try()
184 {
185 while (0 == lck_rw_try_lock_exclusive(&lt_rwlock)) {
186 lt_sleep_a_little_bit();
187 }
188
189 lt_note_another_blocking_lock_holder();
190 lt_sleep_a_little_bit();
191 lt_counter++;
192 lt_note_blocking_lock_release();
193 lck_rw_done(&lt_rwlock);
194 }
195
196 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
197 * static void
198 * lt_grab_rw_shared()
199 * {
200 * lck_rw_lock_shared(&lt_rwlock);
201 * lt_counter++;
202 *
203 * lt_note_another_blocking_lock_holder();
204 * lt_sleep_a_little_bit();
205 * lt_note_blocking_lock_release();
206 *
207 * lck_rw_done(&lt_rwlock);
208 * }
209 */
210
211 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
212 * static void
213 * lt_grab_rw_shared_with_try()
214 * {
215 * while(0 == lck_rw_try_lock_shared(&lt_rwlock));
216 * lt_counter++;
217 *
218 * lt_note_another_blocking_lock_holder();
219 * lt_sleep_a_little_bit();
220 * lt_note_blocking_lock_release();
221 *
222 * lck_rw_done(&lt_rwlock);
223 * }
224 */
225
226 static void
227 lt_upgrade_downgrade_rw()
228 {
229 boolean_t upgraded, success;
230
231 success = lck_rw_try_lock_shared(&lt_rwlock);
232 if (!success) {
233 lck_rw_lock_shared(&lt_rwlock);
234 }
235
236 lt_note_another_blocking_lock_holder();
237 lt_sleep_a_little_bit();
238 lt_note_blocking_lock_release();
239
240 upgraded = lck_rw_lock_shared_to_exclusive(&lt_rwlock);
241 if (!upgraded) {
242 success = lck_rw_try_lock_exclusive(&lt_rwlock);
243
244 if (!success) {
245 lck_rw_lock_exclusive(&lt_rwlock);
246 }
247 }
248
249 lt_upgrade_holders++;
250 if (lt_upgrade_holders > lt_max_upgrade_holders) {
251 lt_max_upgrade_holders = lt_upgrade_holders;
252 }
253
254 lt_counter++;
255 lt_sleep_a_little_bit();
256
257 lt_upgrade_holders--;
258
259 lck_rw_lock_exclusive_to_shared(&lt_rwlock);
260
261 lt_spin_a_little_bit();
262 lck_rw_done(&lt_rwlock);
263 }
264
265 const int limit = 1000000;
266 static int lt_stress_local_counters[MAX_CPUS];
267
268 static void
269 lt_stress_hw_lock()
270 {
271 int local_counter = 0;
272
273 uint cpuid = current_processor()->cpu_id;
274
275 kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
276
277 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
278 lt_counter++;
279 local_counter++;
280 hw_lock_unlock(&lt_hw_lock);
281
282 while (lt_counter < lt_target_done_threads) {
283 ;
284 }
285
286 kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
287
288 while (lt_counter < limit) {
289 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
290 if (lt_counter < limit) {
291 lt_counter++;
292 local_counter++;
293 }
294 hw_lock_unlock(&lt_hw_lock);
295 }
296
297 lt_stress_local_counters[cpuid] = local_counter;
298
299 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
300 }
301
302 static void
303 lt_grab_hw_lock()
304 {
305 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
306 lt_counter++;
307 lt_spin_a_little_bit();
308 hw_lock_unlock(&lt_hw_lock);
309 }
310
311 static void
312 lt_grab_hw_lock_with_try()
313 {
314 while (0 == hw_lock_try(&lt_hw_lock, LCK_GRP_NULL)) {
315 ;
316 }
317 lt_counter++;
318 lt_spin_a_little_bit();
319 hw_lock_unlock(&lt_hw_lock);
320 }
321
322 static void
323 lt_grab_hw_lock_with_to()
324 {
325 while (0 == hw_lock_to(&lt_hw_lock, LockTimeOut, LCK_GRP_NULL)) {
326 mp_enable_preemption();
327 }
328 lt_counter++;
329 lt_spin_a_little_bit();
330 hw_lock_unlock(&lt_hw_lock);
331 }
332
333 static void
334 lt_grab_spin_lock()
335 {
336 lck_spin_lock(&lt_lck_spin_t);
337 lt_counter++;
338 lt_spin_a_little_bit();
339 lck_spin_unlock(&lt_lck_spin_t);
340 }
341
342 static void
343 lt_grab_spin_lock_with_try()
344 {
345 while (0 == lck_spin_try_lock(&lt_lck_spin_t)) {
346 ;
347 }
348 lt_counter++;
349 lt_spin_a_little_bit();
350 lck_spin_unlock(&lt_lck_spin_t);
351 }
352
353 static volatile boolean_t lt_thread_lock_grabbed;
354 static volatile boolean_t lt_thread_lock_success;
355
356 static void
357 lt_reset()
358 {
359 lt_counter = 0;
360 lt_max_holders = 0;
361 lt_num_holders = 0;
362 lt_max_upgrade_holders = 0;
363 lt_upgrade_holders = 0;
364 lt_done_threads = 0;
365 lt_target_done_threads = 0;
366 lt_cpu_bind_id = 0;
367
368 OSMemoryBarrier();
369 }
370
371 static void
372 lt_trylock_hw_lock_with_to()
373 {
374 OSMemoryBarrier();
375 while (!lt_thread_lock_grabbed) {
376 lt_sleep_a_little_bit();
377 OSMemoryBarrier();
378 }
379 lt_thread_lock_success = hw_lock_to(&lt_hw_lock, 100, LCK_GRP_NULL);
380 OSMemoryBarrier();
381 mp_enable_preemption();
382 }
383
384 static void
385 lt_trylock_spin_try_lock()
386 {
387 OSMemoryBarrier();
388 while (!lt_thread_lock_grabbed) {
389 lt_sleep_a_little_bit();
390 OSMemoryBarrier();
391 }
392 lt_thread_lock_success = lck_spin_try_lock(&lt_lck_spin_t);
393 OSMemoryBarrier();
394 }
395
396 static void
397 lt_trylock_thread(void *arg, wait_result_t wres __unused)
398 {
399 void (*func)(void) = (void (*)(void))arg;
400
401 func();
402
403 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
404 }
405
406 static void
407 lt_start_trylock_thread(thread_continue_t func)
408 {
409 thread_t thread;
410 kern_return_t kr;
411
412 kr = kernel_thread_start(lt_trylock_thread, func, &thread);
413 assert(kr == KERN_SUCCESS);
414
415 thread_deallocate(thread);
416 }
417
418 static void
419 lt_wait_for_lock_test_threads()
420 {
421 OSMemoryBarrier();
422 /* Spin to reduce dependencies */
423 while (lt_done_threads < lt_target_done_threads) {
424 lt_sleep_a_little_bit();
425 OSMemoryBarrier();
426 }
427 OSMemoryBarrier();
428 }
429
430 static kern_return_t
431 lt_test_trylocks()
432 {
433 boolean_t success;
434 extern unsigned int real_ncpus;
435
436 /*
437 * First mtx try lock succeeds, second fails.
438 */
439 success = lck_mtx_try_lock(&lt_mtx);
440 T_ASSERT_NOTNULL(success, "First mtx try lock");
441 success = lck_mtx_try_lock(&lt_mtx);
442 T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
443 lck_mtx_unlock(&lt_mtx);
444
445 /*
446 * After regular grab, can't try lock.
447 */
448 lck_mtx_lock(&lt_mtx);
449 success = lck_mtx_try_lock(&lt_mtx);
450 T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
451 lck_mtx_unlock(&lt_mtx);
452
453 /*
454 * Two shared try locks on a previously unheld rwlock suceed, and a
455 * subsequent exclusive attempt fails.
456 */
457 success = lck_rw_try_lock_shared(&lt_rwlock);
458 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
459 success = lck_rw_try_lock_shared(&lt_rwlock);
460 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
461 success = lck_rw_try_lock_exclusive(&lt_rwlock);
462 T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
463 lck_rw_done(&lt_rwlock);
464 lck_rw_done(&lt_rwlock);
465
466 /*
467 * After regular shared grab, can trylock
468 * for shared but not for exclusive.
469 */
470 lck_rw_lock_shared(&lt_rwlock);
471 success = lck_rw_try_lock_shared(&lt_rwlock);
472 T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
473 success = lck_rw_try_lock_exclusive(&lt_rwlock);
474 T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
475 lck_rw_done(&lt_rwlock);
476 lck_rw_done(&lt_rwlock);
477
478 /*
479 * An exclusive try lock succeeds, subsequent shared and exclusive
480 * attempts fail.
481 */
482 success = lck_rw_try_lock_exclusive(&lt_rwlock);
483 T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
484 success = lck_rw_try_lock_shared(&lt_rwlock);
485 T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
486 success = lck_rw_try_lock_exclusive(&lt_rwlock);
487 T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
488 lck_rw_done(&lt_rwlock);
489
490 /*
491 * After regular exclusive grab, neither kind of trylock succeeds.
492 */
493 lck_rw_lock_exclusive(&lt_rwlock);
494 success = lck_rw_try_lock_shared(&lt_rwlock);
495 T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
496 success = lck_rw_try_lock_exclusive(&lt_rwlock);
497 T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
498 lck_rw_done(&lt_rwlock);
499
500 /*
501 * First spin lock attempts succeed, second attempts fail.
502 */
503 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
504 T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
505 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
506 T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
507 hw_lock_unlock(&lt_hw_lock);
508
509 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
510 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
511 T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
512 hw_lock_unlock(&lt_hw_lock);
513
514 lt_reset();
515 lt_thread_lock_grabbed = false;
516 lt_thread_lock_success = true;
517 lt_target_done_threads = 1;
518 OSMemoryBarrier();
519 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
520 success = hw_lock_to(&lt_hw_lock, 100, LCK_GRP_NULL);
521 T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
522 if (real_ncpus == 1) {
523 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
524 }
525 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
526 lt_wait_for_lock_test_threads();
527 T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
528 if (real_ncpus == 1) {
529 mp_disable_preemption(); /* don't double-enable when we unlock */
530 }
531 hw_lock_unlock(&lt_hw_lock);
532
533 lt_reset();
534 lt_thread_lock_grabbed = false;
535 lt_thread_lock_success = true;
536 lt_target_done_threads = 1;
537 OSMemoryBarrier();
538 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
539 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
540 if (real_ncpus == 1) {
541 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
542 }
543 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
544 lt_wait_for_lock_test_threads();
545 T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
546 if (real_ncpus == 1) {
547 mp_disable_preemption(); /* don't double-enable when we unlock */
548 }
549 hw_lock_unlock(&lt_hw_lock);
550
551 success = lck_spin_try_lock(&lt_lck_spin_t);
552 T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
553 success = lck_spin_try_lock(&lt_lck_spin_t);
554 T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
555 lck_spin_unlock(&lt_lck_spin_t);
556
557 lt_reset();
558 lt_thread_lock_grabbed = false;
559 lt_thread_lock_success = true;
560 lt_target_done_threads = 1;
561 lt_start_trylock_thread(lt_trylock_spin_try_lock);
562 lck_spin_lock(&lt_lck_spin_t);
563 if (real_ncpus == 1) {
564 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
565 }
566 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
567 lt_wait_for_lock_test_threads();
568 T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
569 if (real_ncpus == 1) {
570 mp_disable_preemption(); /* don't double-enable when we unlock */
571 }
572 lck_spin_unlock(&lt_lck_spin_t);
573
574 return KERN_SUCCESS;
575 }
576
577 static void
578 lt_thread(void *arg, wait_result_t wres __unused)
579 {
580 void (*func)(void) = (void (*)(void))arg;
581 uint32_t i;
582
583 for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
584 func();
585 }
586
587 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
588 }
589
590 static void
591 lt_bound_thread(void *arg, wait_result_t wres __unused)
592 {
593 void (*func)(void) = (void (*)(void))arg;
594
595 int cpuid = OSIncrementAtomic((volatile SInt32 *)&lt_cpu_bind_id);
596
597 processor_t processor = processor_list;
598 while ((processor != NULL) && (processor->cpu_id != cpuid)) {
599 processor = processor->processor_list;
600 }
601
602 if (processor != NULL) {
603 thread_bind(processor);
604 }
605
606 thread_block(THREAD_CONTINUE_NULL);
607
608 func();
609
610 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
611 }
612
613 static void
614 lt_start_lock_thread(thread_continue_t func)
615 {
616 thread_t thread;
617 kern_return_t kr;
618
619 kr = kernel_thread_start(lt_thread, func, &thread);
620 assert(kr == KERN_SUCCESS);
621
622 thread_deallocate(thread);
623 }
624
625
626 static void
627 lt_start_lock_thread_bound(thread_continue_t func)
628 {
629 thread_t thread;
630 kern_return_t kr;
631
632 kr = kernel_thread_start(lt_bound_thread, func, &thread);
633 assert(kr == KERN_SUCCESS);
634
635 thread_deallocate(thread);
636 }
637
638 static kern_return_t
639 lt_test_locks()
640 {
641 kern_return_t kr = KERN_SUCCESS;
642 lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
643 lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
644
645 lck_mtx_init(&lt_mtx, lg, LCK_ATTR_NULL);
646 lck_rw_init(&lt_rwlock, lg, LCK_ATTR_NULL);
647 lck_spin_init(&lt_lck_spin_t, lg, LCK_ATTR_NULL);
648 hw_lock_init(&lt_hw_lock);
649
650 T_LOG("Testing locks.");
651
652 /* Try locks (custom) */
653 lt_reset();
654
655 T_LOG("Running try lock test.");
656 kr = lt_test_trylocks();
657 T_EXPECT_NULL(kr, "try lock test failed.");
658
659 /* Uncontended mutex */
660 T_LOG("Running uncontended mutex test.");
661 lt_reset();
662 lt_target_done_threads = 1;
663 lt_start_lock_thread(lt_grab_mutex);
664 lt_wait_for_lock_test_threads();
665 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
666 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
667
668 /* Contended mutex:try locks*/
669 T_LOG("Running contended mutex test.");
670 lt_reset();
671 lt_target_done_threads = 3;
672 lt_start_lock_thread(lt_grab_mutex);
673 lt_start_lock_thread(lt_grab_mutex);
674 lt_start_lock_thread(lt_grab_mutex);
675 lt_wait_for_lock_test_threads();
676 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
677 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
678
679 /* Contended mutex: try locks*/
680 T_LOG("Running contended mutex trylock test.");
681 lt_reset();
682 lt_target_done_threads = 3;
683 lt_start_lock_thread(lt_grab_mutex_with_try);
684 lt_start_lock_thread(lt_grab_mutex_with_try);
685 lt_start_lock_thread(lt_grab_mutex_with_try);
686 lt_wait_for_lock_test_threads();
687 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
688 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
689
690 /* Uncontended exclusive rwlock */
691 T_LOG("Running uncontended exclusive rwlock test.");
692 lt_reset();
693 lt_target_done_threads = 1;
694 lt_start_lock_thread(lt_grab_rw_exclusive);
695 lt_wait_for_lock_test_threads();
696 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
697 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
698
699 /* Uncontended shared rwlock */
700
701 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
702 * T_LOG("Running uncontended shared rwlock test.");
703 * lt_reset();
704 * lt_target_done_threads = 1;
705 * lt_start_lock_thread(lt_grab_rw_shared);
706 * lt_wait_for_lock_test_threads();
707 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
708 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
709 */
710
711 /* Contended exclusive rwlock */
712 T_LOG("Running contended exclusive rwlock test.");
713 lt_reset();
714 lt_target_done_threads = 3;
715 lt_start_lock_thread(lt_grab_rw_exclusive);
716 lt_start_lock_thread(lt_grab_rw_exclusive);
717 lt_start_lock_thread(lt_grab_rw_exclusive);
718 lt_wait_for_lock_test_threads();
719 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
720 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
721
722 /* One shared, two exclusive */
723 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
724 * T_LOG("Running test with one shared and two exclusive rw lock threads.");
725 * lt_reset();
726 * lt_target_done_threads = 3;
727 * lt_start_lock_thread(lt_grab_rw_shared);
728 * lt_start_lock_thread(lt_grab_rw_exclusive);
729 * lt_start_lock_thread(lt_grab_rw_exclusive);
730 * lt_wait_for_lock_test_threads();
731 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
732 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
733 */
734
735 /* Four shared */
736 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
737 * T_LOG("Running test with four shared holders.");
738 * lt_reset();
739 * lt_target_done_threads = 4;
740 * lt_start_lock_thread(lt_grab_rw_shared);
741 * lt_start_lock_thread(lt_grab_rw_shared);
742 * lt_start_lock_thread(lt_grab_rw_shared);
743 * lt_start_lock_thread(lt_grab_rw_shared);
744 * lt_wait_for_lock_test_threads();
745 * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
746 */
747
748 /* Three doing upgrades and downgrades */
749 T_LOG("Running test with threads upgrading and downgrading.");
750 lt_reset();
751 lt_target_done_threads = 3;
752 lt_start_lock_thread(lt_upgrade_downgrade_rw);
753 lt_start_lock_thread(lt_upgrade_downgrade_rw);
754 lt_start_lock_thread(lt_upgrade_downgrade_rw);
755 lt_wait_for_lock_test_threads();
756 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
757 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
758 T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
759
760 /* Uncontended - exclusive trylocks */
761 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
762 lt_reset();
763 lt_target_done_threads = 1;
764 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
765 lt_wait_for_lock_test_threads();
766 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
767 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
768
769 /* Uncontended - shared trylocks */
770 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
771 * T_LOG("Running test with single thread doing shared rwlock trylocks.");
772 * lt_reset();
773 * lt_target_done_threads = 1;
774 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
775 * lt_wait_for_lock_test_threads();
776 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
777 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
778 */
779
780 /* Three doing exclusive trylocks */
781 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
782 lt_reset();
783 lt_target_done_threads = 3;
784 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
785 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
786 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
787 lt_wait_for_lock_test_threads();
788 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
789 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
790
791 /* Three doing shared trylocks */
792 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
793 * T_LOG("Running test with threads doing shared rwlock trylocks.");
794 * lt_reset();
795 * lt_target_done_threads = 3;
796 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
797 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
798 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
799 * lt_wait_for_lock_test_threads();
800 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
801 * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
802 */
803
804 /* Three doing various trylocks */
805 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
806 * T_LOG("Running test with threads doing mixed rwlock trylocks.");
807 * lt_reset();
808 * lt_target_done_threads = 4;
809 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
810 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
811 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
812 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
813 * lt_wait_for_lock_test_threads();
814 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
815 * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
816 */
817
818 /* HW locks */
819 T_LOG("Running test with hw_lock_lock()");
820 lt_reset();
821 lt_target_done_threads = 3;
822 lt_start_lock_thread(lt_grab_hw_lock);
823 lt_start_lock_thread(lt_grab_hw_lock);
824 lt_start_lock_thread(lt_grab_hw_lock);
825 lt_wait_for_lock_test_threads();
826 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
827
828 /* HW locks stress test */
829 T_LOG("Running HW locks stress test with hw_lock_lock()");
830 extern unsigned int real_ncpus;
831 lt_reset();
832 lt_target_done_threads = real_ncpus;
833 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
834 lt_start_lock_thread_bound(lt_stress_hw_lock);
835 }
836 lt_wait_for_lock_test_threads();
837 bool starvation = false;
838 uint total_local_count = 0;
839 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
840 starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
841 total_local_count += lt_stress_local_counters[processor->cpu_id];
842 }
843 if (total_local_count != lt_counter) {
844 T_FAIL("Lock failure\n");
845 } else if (starvation) {
846 T_FAIL("Lock starvation found\n");
847 } else {
848 T_PASS("HW locks stress test with hw_lock_lock()");
849 }
850
851
852 /* HW locks: trylocks */
853 T_LOG("Running test with hw_lock_try()");
854 lt_reset();
855 lt_target_done_threads = 3;
856 lt_start_lock_thread(lt_grab_hw_lock_with_try);
857 lt_start_lock_thread(lt_grab_hw_lock_with_try);
858 lt_start_lock_thread(lt_grab_hw_lock_with_try);
859 lt_wait_for_lock_test_threads();
860 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
861
862 /* HW locks: with timeout */
863 T_LOG("Running test with hw_lock_to()");
864 lt_reset();
865 lt_target_done_threads = 3;
866 lt_start_lock_thread(lt_grab_hw_lock_with_to);
867 lt_start_lock_thread(lt_grab_hw_lock_with_to);
868 lt_start_lock_thread(lt_grab_hw_lock_with_to);
869 lt_wait_for_lock_test_threads();
870 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
871
872 /* Spin locks */
873 T_LOG("Running test with lck_spin_lock()");
874 lt_reset();
875 lt_target_done_threads = 3;
876 lt_start_lock_thread(lt_grab_spin_lock);
877 lt_start_lock_thread(lt_grab_spin_lock);
878 lt_start_lock_thread(lt_grab_spin_lock);
879 lt_wait_for_lock_test_threads();
880 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
881
882 /* Spin locks: trylocks */
883 T_LOG("Running test with lck_spin_try_lock()");
884 lt_reset();
885 lt_target_done_threads = 3;
886 lt_start_lock_thread(lt_grab_spin_lock_with_try);
887 lt_start_lock_thread(lt_grab_spin_lock_with_try);
888 lt_start_lock_thread(lt_grab_spin_lock_with_try);
889 lt_wait_for_lock_test_threads();
890 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
891
892 return KERN_SUCCESS;
893 }
894
895 #define MT_MAX_ARGS 8
896 #define MT_INITIAL_VALUE 0xfeedbeef
897 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
898 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
899 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
900
901 typedef void (*sy_munge_t)(void*);
902
903 #define MT_FUNC(x) #x, x
904 struct munger_test {
905 const char *mt_name;
906 sy_munge_t mt_func;
907 uint32_t mt_in_words;
908 uint32_t mt_nout;
909 uint64_t mt_expected[MT_MAX_ARGS];
910 } munger_tests[] = {
911 {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
912 {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
913 {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
914 {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
915 {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
916 {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
917 {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
918 {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
919 {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
920 {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
921 {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
922 {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
923 {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
924 {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
925 {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
926 {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
927 {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
928 {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
929 {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
930 {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
931 {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
932 {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
933 {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
934 {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
935 {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
936 {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
937 {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
938 {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
939 {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
940 {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
941 {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
942 {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
943 {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
944 {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
945 {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
946 {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
947 {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
948 {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
949 {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
950 {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
951 };
952
953 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
954
955 static void
956 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
957 {
958 uint32_t i;
959
960 for (i = 0; i < in_words; i++) {
961 data[i] = MT_INITIAL_VALUE;
962 }
963
964 if (in_words * sizeof(uint32_t) < total_size) {
965 bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
966 }
967 }
968
969 static void
970 mt_test_mungers()
971 {
972 uint64_t data[MT_MAX_ARGS];
973 uint32_t i, j;
974
975 for (i = 0; i < MT_TEST_COUNT; i++) {
976 struct munger_test *test = &munger_tests[i];
977 int pass = 1;
978
979 T_LOG("Testing %s", test->mt_name);
980
981 mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
982 test->mt_func(data);
983
984 for (j = 0; j < test->mt_nout; j++) {
985 if (data[j] != test->mt_expected[j]) {
986 T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
987 pass = 0;
988 }
989 }
990 if (pass) {
991 T_PASS(test->mt_name);
992 }
993 }
994 }
995
996 /* Exception Callback Test */
997 static ex_cb_action_t
998 excb_test_action(
999 ex_cb_class_t cb_class,
1000 void *refcon,
1001 const ex_cb_state_t *state
1002 )
1003 {
1004 ex_cb_state_t *context = (ex_cb_state_t *)refcon;
1005
1006 if ((NULL == refcon) || (NULL == state)) {
1007 return EXCB_ACTION_TEST_FAIL;
1008 }
1009
1010 context->far = state->far;
1011
1012 switch (cb_class) {
1013 case EXCB_CLASS_TEST1:
1014 return EXCB_ACTION_RERUN;
1015 case EXCB_CLASS_TEST2:
1016 return EXCB_ACTION_NONE;
1017 default:
1018 return EXCB_ACTION_TEST_FAIL;
1019 }
1020 }
1021
1022
1023 kern_return_t
1024 ex_cb_test()
1025 {
1026 const vm_offset_t far1 = 0xdead0001;
1027 const vm_offset_t far2 = 0xdead0002;
1028 kern_return_t kr;
1029 ex_cb_state_t test_context_1 = {0xdeadbeef};
1030 ex_cb_state_t test_context_2 = {0xdeadbeef};
1031 ex_cb_action_t action;
1032
1033 T_LOG("Testing Exception Callback.");
1034
1035 T_LOG("Running registration test.");
1036
1037 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1038 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback");
1039 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1040 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback");
1041
1042 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1043 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback");
1044 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1045 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback");
1046
1047 T_LOG("Running invocation test.");
1048
1049 action = ex_cb_invoke(EXCB_CLASS_TEST1, far1);
1050 T_ASSERT(EXCB_ACTION_RERUN == action, NULL);
1051 T_ASSERT(far1 == test_context_1.far, NULL);
1052
1053 action = ex_cb_invoke(EXCB_CLASS_TEST2, far2);
1054 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1055 T_ASSERT(far2 == test_context_2.far, NULL);
1056
1057 action = ex_cb_invoke(EXCB_CLASS_TEST3, 0);
1058 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1059
1060 return KERN_SUCCESS;
1061 }
1062
1063
1064 #if __ARM_PAN_AVAILABLE__
1065 kern_return_t
1066 arm64_pan_test()
1067 {
1068 vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE;
1069
1070 T_LOG("Testing PAN.");
1071
1072 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1073
1074 pan_exception_level = 0;
1075 pan_fault_value = 0xDE;
1076 // convert priv_addr to one that is accessible from user mode
1077 pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS -
1078 _COMM_PAGE_START_ADDRESS;
1079
1080 // Below should trigger a PAN exception as pan_test_addr is accessible
1081 // in user mode
1082 // The exception handler, upon recognizing the fault address is pan_test_addr,
1083 // will disable PAN and rerun this instruction successfully
1084 T_ASSERT(*(char *)pan_test_addr == *(char *)priv_addr, NULL);
1085
1086 T_ASSERT(pan_exception_level == 2, NULL);
1087
1088 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1089
1090 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1091
1092 pan_exception_level = 0;
1093 pan_fault_value = 0xAD;
1094 pan_ro_addr = (vm_offset_t) &pan_ro_value;
1095
1096 // Force a permission fault while PAN is disabled to make sure PAN is
1097 // re-enabled during the exception handler.
1098 *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1099
1100 T_ASSERT(pan_exception_level == 2, NULL);
1101
1102 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1103
1104 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1105
1106 pan_test_addr = 0;
1107 pan_ro_addr = 0;
1108
1109 __builtin_arm_wsr("pan", 1);
1110 return KERN_SUCCESS;
1111 }
1112 #endif
1113
1114
1115 kern_return_t
1116 arm64_lock_test()
1117 {
1118 return lt_test_locks();
1119 }
1120
1121 kern_return_t
1122 arm64_munger_test()
1123 {
1124 mt_test_mungers();
1125 return 0;
1126 }
1127