]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/platform_tests.c
xnu-4903.231.4.tar.gz
[apple/xnu.git] / osfmk / arm64 / platform_tests.c
1 /*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
40 *
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * Carnegie Mellon requests users of this software to return to
46 *
47 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55 #include <mach_ldebug.h>
56
57 #define LOCK_PRIVATE 1
58
59 #include <vm/pmap.h>
60 #include <kern/kalloc.h>
61 #include <kern/locks.h>
62 #include <kern/misc_protos.h>
63 #include <kern/thread.h>
64 #include <kern/processor.h>
65 #include <kern/sched_prim.h>
66 #include <kern/xpr.h>
67 #include <kern/debug.h>
68 #include <string.h>
69 #include <tests/xnupost.h>
70
71 #if MACH_KDB
72 #include <ddb/db_command.h>
73 #include <ddb/db_output.h>
74 #include <ddb/db_sym.h>
75 #include <ddb/db_print.h>
76 #endif /* MACH_KDB */
77
78 #include <sys/kdebug.h>
79 #include <sys/munge.h>
80 #include <machine/cpu_capabilities.h>
81 #include <arm/cpu_data_internal.h>
82 #include <arm/pmap.h>
83
84 kern_return_t arm64_lock_test(void);
85 kern_return_t arm64_munger_test(void);
86 kern_return_t ex_cb_test(void);
87 kern_return_t arm64_pan_test(void);
88
89 // exception handler ignores this fault address during PAN test
90 #if __ARM_PAN_AVAILABLE__
91 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
92 vm_offset_t pan_test_addr = 0;
93 vm_offset_t pan_ro_addr = 0;
94 volatile int pan_exception_level = 0;
95 volatile char pan_fault_value = 0;
96 #endif
97
98 #include <libkern/OSAtomic.h>
99 #define LOCK_TEST_ITERATIONS 50
100 static hw_lock_data_t lt_hw_lock;
101 static lck_spin_t lt_lck_spin_t;
102 static lck_mtx_t lt_mtx;
103 static lck_rw_t lt_rwlock;
104 static volatile uint32_t lt_counter = 0;
105 static volatile int lt_spinvolatile;
106 static volatile uint32_t lt_max_holders = 0;
107 static volatile uint32_t lt_upgrade_holders = 0;
108 static volatile uint32_t lt_max_upgrade_holders = 0;
109 static volatile uint32_t lt_num_holders = 0;
110 static volatile uint32_t lt_done_threads;
111 static volatile uint32_t lt_target_done_threads;
112 static volatile uint32_t lt_cpu_bind_id = 0;
113
114 static void
115 lt_note_another_blocking_lock_holder()
116 {
117 hw_lock_lock(&lt_hw_lock);
118 lt_num_holders++;
119 lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
120 hw_lock_unlock(&lt_hw_lock);
121 }
122
123 static void
124 lt_note_blocking_lock_release()
125 {
126 hw_lock_lock(&lt_hw_lock);
127 lt_num_holders--;
128 hw_lock_unlock(&lt_hw_lock);
129 }
130
131 static void
132 lt_spin_a_little_bit()
133 {
134 uint32_t i;
135
136 for (i = 0; i < 10000; i++) {
137 lt_spinvolatile++;
138 }
139 }
140
141 static void
142 lt_sleep_a_little_bit()
143 {
144 delay(100);
145 }
146
147 static void
148 lt_grab_mutex()
149 {
150 lck_mtx_lock(&lt_mtx);
151 lt_note_another_blocking_lock_holder();
152 lt_sleep_a_little_bit();
153 lt_counter++;
154 lt_note_blocking_lock_release();
155 lck_mtx_unlock(&lt_mtx);
156 }
157
158 static void
159 lt_grab_mutex_with_try()
160 {
161 while(0 == lck_mtx_try_lock(&lt_mtx));
162 lt_note_another_blocking_lock_holder();
163 lt_sleep_a_little_bit();
164 lt_counter++;
165 lt_note_blocking_lock_release();
166 lck_mtx_unlock(&lt_mtx);
167
168 }
169
170 static void
171 lt_grab_rw_exclusive()
172 {
173 lck_rw_lock_exclusive(&lt_rwlock);
174 lt_note_another_blocking_lock_holder();
175 lt_sleep_a_little_bit();
176 lt_counter++;
177 lt_note_blocking_lock_release();
178 lck_rw_done(&lt_rwlock);
179 }
180
181 static void
182 lt_grab_rw_exclusive_with_try()
183 {
184 while(0 == lck_rw_try_lock_exclusive(&lt_rwlock)) {
185 lt_sleep_a_little_bit();
186 }
187
188 lt_note_another_blocking_lock_holder();
189 lt_sleep_a_little_bit();
190 lt_counter++;
191 lt_note_blocking_lock_release();
192 lck_rw_done(&lt_rwlock);
193 }
194
195 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
196 static void
197 lt_grab_rw_shared()
198 {
199 lck_rw_lock_shared(&lt_rwlock);
200 lt_counter++;
201
202 lt_note_another_blocking_lock_holder();
203 lt_sleep_a_little_bit();
204 lt_note_blocking_lock_release();
205
206 lck_rw_done(&lt_rwlock);
207 }
208 */
209
210 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
211 static void
212 lt_grab_rw_shared_with_try()
213 {
214 while(0 == lck_rw_try_lock_shared(&lt_rwlock));
215 lt_counter++;
216
217 lt_note_another_blocking_lock_holder();
218 lt_sleep_a_little_bit();
219 lt_note_blocking_lock_release();
220
221 lck_rw_done(&lt_rwlock);
222 }
223 */
224
225 static void
226 lt_upgrade_downgrade_rw()
227 {
228 boolean_t upgraded, success;
229
230 success = lck_rw_try_lock_shared(&lt_rwlock);
231 if (!success) {
232 lck_rw_lock_shared(&lt_rwlock);
233 }
234
235 lt_note_another_blocking_lock_holder();
236 lt_sleep_a_little_bit();
237 lt_note_blocking_lock_release();
238
239 upgraded = lck_rw_lock_shared_to_exclusive(&lt_rwlock);
240 if (!upgraded) {
241 success = lck_rw_try_lock_exclusive(&lt_rwlock);
242
243 if (!success) {
244 lck_rw_lock_exclusive(&lt_rwlock);
245 }
246 }
247
248 lt_upgrade_holders++;
249 if (lt_upgrade_holders > lt_max_upgrade_holders) {
250 lt_max_upgrade_holders = lt_upgrade_holders;
251 }
252
253 lt_counter++;
254 lt_sleep_a_little_bit();
255
256 lt_upgrade_holders--;
257
258 lck_rw_lock_exclusive_to_shared(&lt_rwlock);
259
260 lt_spin_a_little_bit();
261 lck_rw_done(&lt_rwlock);
262 }
263
264 const int limit = 1000000;
265 static int lt_stress_local_counters[MAX_CPUS];
266
267 static void
268 lt_stress_hw_lock()
269 {
270 int local_counter = 0;
271
272 uint cpuid = current_processor()->cpu_id;
273
274 kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
275
276 hw_lock_lock(&lt_hw_lock);
277 lt_counter++;
278 local_counter++;
279 hw_lock_unlock(&lt_hw_lock);
280
281 while (lt_counter < lt_target_done_threads) {
282 ;
283 }
284
285 kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
286
287 while (lt_counter < limit) {
288 spl_t s = splsched();
289 hw_lock_lock(&lt_hw_lock);
290 if (lt_counter < limit) {
291 lt_counter++;
292 local_counter++;
293 }
294 hw_lock_unlock(&lt_hw_lock);
295 splx(s);
296 }
297
298 lt_stress_local_counters[cpuid] = local_counter;
299
300 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
301 }
302
303 static void
304 lt_grab_hw_lock()
305 {
306 hw_lock_lock(&lt_hw_lock);
307 lt_counter++;
308 lt_spin_a_little_bit();
309 hw_lock_unlock(&lt_hw_lock);
310 }
311
312 static void
313 lt_grab_hw_lock_with_try()
314 {
315 while(0 == hw_lock_try(&lt_hw_lock));
316 lt_counter++;
317 lt_spin_a_little_bit();
318 hw_lock_unlock(&lt_hw_lock);
319 }
320
321 static void
322 lt_grab_hw_lock_with_to()
323 {
324 while(0 == hw_lock_to(&lt_hw_lock, LockTimeOut))
325 mp_enable_preemption();
326 lt_counter++;
327 lt_spin_a_little_bit();
328 hw_lock_unlock(&lt_hw_lock);
329 }
330
331 static void
332 lt_grab_spin_lock()
333 {
334 lck_spin_lock(&lt_lck_spin_t);
335 lt_counter++;
336 lt_spin_a_little_bit();
337 lck_spin_unlock(&lt_lck_spin_t);
338 }
339
340 static void
341 lt_grab_spin_lock_with_try()
342 {
343 while(0 == lck_spin_try_lock(&lt_lck_spin_t));
344 lt_counter++;
345 lt_spin_a_little_bit();
346 lck_spin_unlock(&lt_lck_spin_t);
347 }
348
349 static volatile boolean_t lt_thread_lock_grabbed;
350 static volatile boolean_t lt_thread_lock_success;
351
352 static void
353 lt_reset()
354 {
355 lt_counter = 0;
356 lt_max_holders = 0;
357 lt_num_holders = 0;
358 lt_max_upgrade_holders = 0;
359 lt_upgrade_holders = 0;
360 lt_done_threads = 0;
361 lt_target_done_threads = 0;
362 lt_cpu_bind_id = 0;
363
364 OSMemoryBarrier();
365 }
366
367 static void
368 lt_trylock_hw_lock_with_to()
369 {
370 OSMemoryBarrier();
371 while (!lt_thread_lock_grabbed) {
372 lt_sleep_a_little_bit();
373 OSMemoryBarrier();
374 }
375 lt_thread_lock_success = hw_lock_to(&lt_hw_lock, 100);
376 OSMemoryBarrier();
377 mp_enable_preemption();
378 }
379
380 static void
381 lt_trylock_spin_try_lock()
382 {
383 OSMemoryBarrier();
384 while (!lt_thread_lock_grabbed) {
385 lt_sleep_a_little_bit();
386 OSMemoryBarrier();
387 }
388 lt_thread_lock_success = lck_spin_try_lock(&lt_lck_spin_t);
389 OSMemoryBarrier();
390 }
391
392 static void
393 lt_trylock_thread(void *arg, wait_result_t wres __unused)
394 {
395 void (*func)(void) = (void(*)(void))arg;
396
397 func();
398
399 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
400 }
401
402 static void
403 lt_start_trylock_thread(thread_continue_t func)
404 {
405 thread_t thread;
406 kern_return_t kr;
407
408 kr = kernel_thread_start(lt_trylock_thread, func, &thread);
409 assert(kr == KERN_SUCCESS);
410
411 thread_deallocate(thread);
412 }
413
414 static void
415 lt_wait_for_lock_test_threads()
416 {
417 OSMemoryBarrier();
418 /* Spin to reduce dependencies */
419 while (lt_done_threads < lt_target_done_threads) {
420 lt_sleep_a_little_bit();
421 OSMemoryBarrier();
422 }
423 OSMemoryBarrier();
424 }
425
426 static kern_return_t
427 lt_test_trylocks()
428 {
429 boolean_t success;
430 extern unsigned int real_ncpus;
431
432 /*
433 * First mtx try lock succeeds, second fails.
434 */
435 success = lck_mtx_try_lock(&lt_mtx);
436 T_ASSERT_NOTNULL(success, "First mtx try lock");
437 success = lck_mtx_try_lock(&lt_mtx);
438 T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
439 lck_mtx_unlock(&lt_mtx);
440
441 /*
442 * After regular grab, can't try lock.
443 */
444 lck_mtx_lock(&lt_mtx);
445 success = lck_mtx_try_lock(&lt_mtx);
446 T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
447 lck_mtx_unlock(&lt_mtx);
448
449 /*
450 * Two shared try locks on a previously unheld rwlock suceed, and a
451 * subsequent exclusive attempt fails.
452 */
453 success = lck_rw_try_lock_shared(&lt_rwlock);
454 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
455 success = lck_rw_try_lock_shared(&lt_rwlock);
456 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
457 success = lck_rw_try_lock_exclusive(&lt_rwlock);
458 T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
459 lck_rw_done(&lt_rwlock);
460 lck_rw_done(&lt_rwlock);
461
462 /*
463 * After regular shared grab, can trylock
464 * for shared but not for exclusive.
465 */
466 lck_rw_lock_shared(&lt_rwlock);
467 success = lck_rw_try_lock_shared(&lt_rwlock);
468 T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
469 success = lck_rw_try_lock_exclusive(&lt_rwlock);
470 T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
471 lck_rw_done(&lt_rwlock);
472 lck_rw_done(&lt_rwlock);
473
474 /*
475 * An exclusive try lock succeeds, subsequent shared and exclusive
476 * attempts fail.
477 */
478 success = lck_rw_try_lock_exclusive(&lt_rwlock);
479 T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
480 success = lck_rw_try_lock_shared(&lt_rwlock);
481 T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
482 success = lck_rw_try_lock_exclusive(&lt_rwlock);
483 T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
484 lck_rw_done(&lt_rwlock);
485
486 /*
487 * After regular exclusive grab, neither kind of trylock succeeds.
488 */
489 lck_rw_lock_exclusive(&lt_rwlock);
490 success = lck_rw_try_lock_shared(&lt_rwlock);
491 T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
492 success = lck_rw_try_lock_exclusive(&lt_rwlock);
493 T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
494 lck_rw_done(&lt_rwlock);
495
496 /*
497 * First spin lock attempts succeed, second attempts fail.
498 */
499 success = hw_lock_try(&lt_hw_lock);
500 T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
501 success = hw_lock_try(&lt_hw_lock);
502 T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
503 hw_lock_unlock(&lt_hw_lock);
504
505 hw_lock_lock(&lt_hw_lock);
506 success = hw_lock_try(&lt_hw_lock);
507 T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
508 hw_lock_unlock(&lt_hw_lock);
509
510 lt_reset();
511 lt_thread_lock_grabbed = false;
512 lt_thread_lock_success = true;
513 lt_target_done_threads = 1;
514 OSMemoryBarrier();
515 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
516 success = hw_lock_to(&lt_hw_lock, 100);
517 T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
518 if (real_ncpus == 1) {
519 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
520 }
521 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
522 lt_wait_for_lock_test_threads();
523 T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
524 if (real_ncpus == 1) {
525 mp_disable_preemption(); /* don't double-enable when we unlock */
526 }
527 hw_lock_unlock(&lt_hw_lock);
528
529 lt_reset();
530 lt_thread_lock_grabbed = false;
531 lt_thread_lock_success = true;
532 lt_target_done_threads = 1;
533 OSMemoryBarrier();
534 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
535 hw_lock_lock(&lt_hw_lock);
536 if (real_ncpus == 1) {
537 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
538 }
539 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
540 lt_wait_for_lock_test_threads();
541 T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
542 if (real_ncpus == 1) {
543 mp_disable_preemption(); /* don't double-enable when we unlock */
544 }
545 hw_lock_unlock(&lt_hw_lock);
546
547 success = lck_spin_try_lock(&lt_lck_spin_t);
548 T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
549 success = lck_spin_try_lock(&lt_lck_spin_t);
550 T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
551 lck_spin_unlock(&lt_lck_spin_t);
552
553 lt_reset();
554 lt_thread_lock_grabbed = false;
555 lt_thread_lock_success = true;
556 lt_target_done_threads = 1;
557 lt_start_trylock_thread(lt_trylock_spin_try_lock);
558 lck_spin_lock(&lt_lck_spin_t);
559 if (real_ncpus == 1) {
560 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
561 }
562 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
563 lt_wait_for_lock_test_threads();
564 T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
565 if (real_ncpus == 1) {
566 mp_disable_preemption(); /* don't double-enable when we unlock */
567 }
568 lck_spin_unlock(&lt_lck_spin_t);
569
570 return KERN_SUCCESS;
571 }
572
573 static void
574 lt_thread(void *arg, wait_result_t wres __unused)
575 {
576 void (*func)(void) = (void(*)(void)) arg;
577 uint32_t i;
578
579 for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
580 func();
581 }
582
583 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
584 }
585
586 static void
587 lt_bound_thread(void *arg, wait_result_t wres __unused)
588 {
589 void (*func)(void) = (void(*)(void)) arg;
590
591 int cpuid = OSIncrementAtomic((volatile SInt32 *)&lt_cpu_bind_id);
592
593 processor_t processor = processor_list;
594 while ((processor != NULL) && (processor->cpu_id != cpuid)) {
595 processor = processor->processor_list;
596 }
597
598 if (processor != NULL) {
599 thread_bind(processor);
600 }
601
602 thread_block(THREAD_CONTINUE_NULL);
603
604 func();
605
606 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
607 }
608
609 static void
610 lt_start_lock_thread(thread_continue_t func)
611 {
612 thread_t thread;
613 kern_return_t kr;
614
615 kr = kernel_thread_start(lt_thread, func, &thread);
616 assert(kr == KERN_SUCCESS);
617
618 thread_deallocate(thread);
619 }
620
621
622 static void
623 lt_start_lock_thread_bound(thread_continue_t func)
624 {
625 thread_t thread;
626 kern_return_t kr;
627
628 kr = kernel_thread_start(lt_bound_thread, func, &thread);
629 assert(kr == KERN_SUCCESS);
630
631 thread_deallocate(thread);
632 }
633
634 static kern_return_t
635 lt_test_locks()
636 {
637 kern_return_t kr = KERN_SUCCESS;
638 lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
639 lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
640
641 lck_mtx_init(&lt_mtx, lg, LCK_ATTR_NULL);
642 lck_rw_init(&lt_rwlock, lg, LCK_ATTR_NULL);
643 lck_spin_init(&lt_lck_spin_t, lg, LCK_ATTR_NULL);
644 hw_lock_init(&lt_hw_lock);
645
646 T_LOG("Testing locks.");
647
648 /* Try locks (custom) */
649 lt_reset();
650
651 T_LOG("Running try lock test.");
652 kr = lt_test_trylocks();
653 T_EXPECT_NULL(kr, "try lock test failed.");
654
655 /* Uncontended mutex */
656 T_LOG("Running uncontended mutex test.");
657 lt_reset();
658 lt_target_done_threads = 1;
659 lt_start_lock_thread(lt_grab_mutex);
660 lt_wait_for_lock_test_threads();
661 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
662 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
663
664 /* Contended mutex:try locks*/
665 T_LOG("Running contended mutex test.");
666 lt_reset();
667 lt_target_done_threads = 3;
668 lt_start_lock_thread(lt_grab_mutex);
669 lt_start_lock_thread(lt_grab_mutex);
670 lt_start_lock_thread(lt_grab_mutex);
671 lt_wait_for_lock_test_threads();
672 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
673 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
674
675 /* Contended mutex: try locks*/
676 T_LOG("Running contended mutex trylock test.");
677 lt_reset();
678 lt_target_done_threads = 3;
679 lt_start_lock_thread(lt_grab_mutex_with_try);
680 lt_start_lock_thread(lt_grab_mutex_with_try);
681 lt_start_lock_thread(lt_grab_mutex_with_try);
682 lt_wait_for_lock_test_threads();
683 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
684 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
685
686 /* Uncontended exclusive rwlock */
687 T_LOG("Running uncontended exclusive rwlock test.");
688 lt_reset();
689 lt_target_done_threads = 1;
690 lt_start_lock_thread(lt_grab_rw_exclusive);
691 lt_wait_for_lock_test_threads();
692 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
693 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
694
695 /* Uncontended shared rwlock */
696
697 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
698 T_LOG("Running uncontended shared rwlock test.");
699 lt_reset();
700 lt_target_done_threads = 1;
701 lt_start_lock_thread(lt_grab_rw_shared);
702 lt_wait_for_lock_test_threads();
703 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
704 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
705 */
706
707 /* Contended exclusive rwlock */
708 T_LOG("Running contended exclusive rwlock test.");
709 lt_reset();
710 lt_target_done_threads = 3;
711 lt_start_lock_thread(lt_grab_rw_exclusive);
712 lt_start_lock_thread(lt_grab_rw_exclusive);
713 lt_start_lock_thread(lt_grab_rw_exclusive);
714 lt_wait_for_lock_test_threads();
715 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
716 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
717
718 /* One shared, two exclusive */
719 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
720 T_LOG("Running test with one shared and two exclusive rw lock threads.");
721 lt_reset();
722 lt_target_done_threads = 3;
723 lt_start_lock_thread(lt_grab_rw_shared);
724 lt_start_lock_thread(lt_grab_rw_exclusive);
725 lt_start_lock_thread(lt_grab_rw_exclusive);
726 lt_wait_for_lock_test_threads();
727 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
728 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
729 */
730
731 /* Four shared */
732 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
733 T_LOG("Running test with four shared holders.");
734 lt_reset();
735 lt_target_done_threads = 4;
736 lt_start_lock_thread(lt_grab_rw_shared);
737 lt_start_lock_thread(lt_grab_rw_shared);
738 lt_start_lock_thread(lt_grab_rw_shared);
739 lt_start_lock_thread(lt_grab_rw_shared);
740 lt_wait_for_lock_test_threads();
741 T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
742 */
743
744 /* Three doing upgrades and downgrades */
745 T_LOG("Running test with threads upgrading and downgrading.");
746 lt_reset();
747 lt_target_done_threads = 3;
748 lt_start_lock_thread(lt_upgrade_downgrade_rw);
749 lt_start_lock_thread(lt_upgrade_downgrade_rw);
750 lt_start_lock_thread(lt_upgrade_downgrade_rw);
751 lt_wait_for_lock_test_threads();
752 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
753 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
754 T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
755
756 /* Uncontended - exclusive trylocks */
757 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
758 lt_reset();
759 lt_target_done_threads = 1;
760 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
761 lt_wait_for_lock_test_threads();
762 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
763 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
764
765 /* Uncontended - shared trylocks */
766 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
767 T_LOG("Running test with single thread doing shared rwlock trylocks.");
768 lt_reset();
769 lt_target_done_threads = 1;
770 lt_start_lock_thread(lt_grab_rw_shared_with_try);
771 lt_wait_for_lock_test_threads();
772 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
773 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
774 */
775
776 /* Three doing exclusive trylocks */
777 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
778 lt_reset();
779 lt_target_done_threads = 3;
780 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
781 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
782 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
783 lt_wait_for_lock_test_threads();
784 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
785 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
786
787 /* Three doing shared trylocks */
788 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
789 T_LOG("Running test with threads doing shared rwlock trylocks.");
790 lt_reset();
791 lt_target_done_threads = 3;
792 lt_start_lock_thread(lt_grab_rw_shared_with_try);
793 lt_start_lock_thread(lt_grab_rw_shared_with_try);
794 lt_start_lock_thread(lt_grab_rw_shared_with_try);
795 lt_wait_for_lock_test_threads();
796 T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
797 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
798 */
799
800 /* Three doing various trylocks */
801 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
802 T_LOG("Running test with threads doing mixed rwlock trylocks.");
803 lt_reset();
804 lt_target_done_threads = 4;
805 lt_start_lock_thread(lt_grab_rw_shared_with_try);
806 lt_start_lock_thread(lt_grab_rw_shared_with_try);
807 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
808 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
809 lt_wait_for_lock_test_threads();
810 T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
811 T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
812 */
813
814 /* HW locks */
815 T_LOG("Running test with hw_lock_lock()");
816 lt_reset();
817 lt_target_done_threads = 3;
818 lt_start_lock_thread(lt_grab_hw_lock);
819 lt_start_lock_thread(lt_grab_hw_lock);
820 lt_start_lock_thread(lt_grab_hw_lock);
821 lt_wait_for_lock_test_threads();
822 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
823
824 /* HW locks stress test */
825 T_LOG("Running HW locks stress test with hw_lock_lock()");
826 extern unsigned int real_ncpus;
827 lt_reset();
828 lt_target_done_threads = real_ncpus;
829 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
830 lt_start_lock_thread_bound(lt_stress_hw_lock);
831 }
832 lt_wait_for_lock_test_threads();
833 bool starvation = false;
834 uint total_local_count = 0;
835 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
836 starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
837 total_local_count += lt_stress_local_counters[processor->cpu_id];
838 }
839 if (total_local_count != lt_counter) {
840 T_FAIL("Lock failure\n");
841 } else if (starvation) {
842 T_FAIL("Lock starvation found\n");
843 } else {
844 T_PASS("HW locks stress test with hw_lock_lock()");
845 }
846
847
848 /* HW locks: trylocks */
849 T_LOG("Running test with hw_lock_try()");
850 lt_reset();
851 lt_target_done_threads = 3;
852 lt_start_lock_thread(lt_grab_hw_lock_with_try);
853 lt_start_lock_thread(lt_grab_hw_lock_with_try);
854 lt_start_lock_thread(lt_grab_hw_lock_with_try);
855 lt_wait_for_lock_test_threads();
856 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
857
858 /* HW locks: with timeout */
859 T_LOG("Running test with hw_lock_to()");
860 lt_reset();
861 lt_target_done_threads = 3;
862 lt_start_lock_thread(lt_grab_hw_lock_with_to);
863 lt_start_lock_thread(lt_grab_hw_lock_with_to);
864 lt_start_lock_thread(lt_grab_hw_lock_with_to);
865 lt_wait_for_lock_test_threads();
866 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
867
868 /* Spin locks */
869 T_LOG("Running test with lck_spin_lock()");
870 lt_reset();
871 lt_target_done_threads = 3;
872 lt_start_lock_thread(lt_grab_spin_lock);
873 lt_start_lock_thread(lt_grab_spin_lock);
874 lt_start_lock_thread(lt_grab_spin_lock);
875 lt_wait_for_lock_test_threads();
876 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
877
878 /* Spin locks: trylocks */
879 T_LOG("Running test with lck_spin_try_lock()");
880 lt_reset();
881 lt_target_done_threads = 3;
882 lt_start_lock_thread(lt_grab_spin_lock_with_try);
883 lt_start_lock_thread(lt_grab_spin_lock_with_try);
884 lt_start_lock_thread(lt_grab_spin_lock_with_try);
885 lt_wait_for_lock_test_threads();
886 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
887
888 return KERN_SUCCESS;
889 }
890
891 #define MT_MAX_ARGS 8
892 #define MT_INITIAL_VALUE 0xfeedbeef
893 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
894 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
895 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
896
897 typedef void (*sy_munge_t)(void*);
898
899 #define MT_FUNC(x) #x, x
900 struct munger_test {
901 const char *mt_name;
902 sy_munge_t mt_func;
903 uint32_t mt_in_words;
904 uint32_t mt_nout;
905 uint64_t mt_expected[MT_MAX_ARGS];
906 } munger_tests[] = {
907 {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
908 {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
909 {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
910 {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
911 {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
912 {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
913 {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
914 {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
915 {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
916 {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
917 {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
918 {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
919 {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
920 {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
921 {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
922 {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
923 {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
924 {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
925 {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
926 {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
927 {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
928 {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
929 {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
930 {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
931 {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
932 {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
933 {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
934 {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
935 {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
936 {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
937 {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
938 {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
939 {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
940 {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
941 {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
942 {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
943 {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
944 {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
945 {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
946 {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
947 };
948
949 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
950
951 static void
952 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
953 {
954 uint32_t i;
955
956 for (i = 0; i < in_words; i++) {
957 data[i] = MT_INITIAL_VALUE;
958 }
959
960 if (in_words * sizeof(uint32_t) < total_size) {
961 bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
962 }
963 }
964
965 static void
966 mt_test_mungers()
967 {
968 uint64_t data[MT_MAX_ARGS];
969 uint32_t i, j;
970
971 for (i = 0; i < MT_TEST_COUNT; i++) {
972 struct munger_test *test = &munger_tests[i];
973 int pass = 1;
974
975 T_LOG("Testing %s", test->mt_name);
976
977 mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
978 test->mt_func(data);
979
980 for (j = 0; j < test->mt_nout; j++) {
981 if (data[j] != test->mt_expected[j]) {
982 T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
983 pass = 0;
984 }
985 }
986 if (pass) {
987 T_PASS(test->mt_name);
988 }
989 }
990 }
991
992 /* Exception Callback Test */
993 static ex_cb_action_t excb_test_action(
994 ex_cb_class_t cb_class,
995 void *refcon,
996 const ex_cb_state_t *state
997 )
998 {
999 ex_cb_state_t *context = (ex_cb_state_t *)refcon;
1000
1001 if ((NULL == refcon) || (NULL == state))
1002 {
1003 return EXCB_ACTION_TEST_FAIL;
1004 }
1005
1006 context->far = state->far;
1007
1008 switch (cb_class)
1009 {
1010 case EXCB_CLASS_TEST1:
1011 return EXCB_ACTION_RERUN;
1012 case EXCB_CLASS_TEST2:
1013 return EXCB_ACTION_NONE;
1014 default:
1015 return EXCB_ACTION_TEST_FAIL;
1016 }
1017 }
1018
1019
1020 kern_return_t
1021 ex_cb_test()
1022 {
1023 const vm_offset_t far1 = 0xdead0001;
1024 const vm_offset_t far2 = 0xdead0002;
1025 kern_return_t kr;
1026 ex_cb_state_t test_context_1 = {0xdeadbeef};
1027 ex_cb_state_t test_context_2 = {0xdeadbeef};
1028 ex_cb_action_t action;
1029
1030 T_LOG("Testing Exception Callback.");
1031
1032 T_LOG("Running registration test.");
1033
1034 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1035 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback");
1036 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1037 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback");
1038
1039 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1040 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback");
1041 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1042 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback");
1043
1044 T_LOG("Running invocation test.");
1045
1046 action = ex_cb_invoke(EXCB_CLASS_TEST1, far1);
1047 T_ASSERT(EXCB_ACTION_RERUN == action, NULL);
1048 T_ASSERT(far1 == test_context_1.far, NULL);
1049
1050 action = ex_cb_invoke(EXCB_CLASS_TEST2, far2);
1051 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1052 T_ASSERT(far2 == test_context_2.far, NULL);
1053
1054 action = ex_cb_invoke(EXCB_CLASS_TEST3, 0);
1055 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1056
1057 return KERN_SUCCESS;
1058 }
1059
1060
1061 #if __ARM_PAN_AVAILABLE__
1062 kern_return_t
1063 arm64_pan_test()
1064 {
1065 vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE;
1066
1067 T_LOG("Testing PAN.");
1068
1069 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1070
1071 pan_exception_level = 0;
1072 pan_fault_value = 0xDE;
1073 // convert priv_addr to one that is accessible from user mode
1074 pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS -
1075 _COMM_PAGE_START_ADDRESS;
1076
1077 // Below should trigger a PAN exception as pan_test_addr is accessible
1078 // in user mode
1079 // The exception handler, upon recognizing the fault address is pan_test_addr,
1080 // will disable PAN and rerun this instruction successfully
1081 T_ASSERT(*(char *)pan_test_addr == *(char *)priv_addr, NULL);
1082
1083 T_ASSERT(pan_exception_level == 2, NULL);
1084
1085 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1086
1087 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1088
1089 pan_exception_level = 0;
1090 pan_fault_value = 0xAD;
1091 pan_ro_addr = (vm_offset_t) &pan_ro_value;
1092
1093 // Force a permission fault while PAN is disabled to make sure PAN is
1094 // re-enabled during the exception handler.
1095 *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1096
1097 T_ASSERT(pan_exception_level == 2, NULL);
1098
1099 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1100
1101 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1102
1103 pan_test_addr = 0;
1104 pan_ro_addr = 0;
1105
1106 __builtin_arm_wsr("pan", 1);
1107 return KERN_SUCCESS;
1108 }
1109 #endif
1110
1111
1112 kern_return_t
1113 arm64_lock_test()
1114 {
1115 return lt_test_locks();
1116 }
1117
1118 kern_return_t
1119 arm64_munger_test()
1120 {
1121 mt_test_mungers();
1122 return 0;
1123 }
1124
1125