]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/platform_tests.c
xnu-4570.51.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / platform_tests.c
1 /*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
40 *
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * Carnegie Mellon requests users of this software to return to
46 *
47 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55 #include <mach_ldebug.h>
56
57 #define LOCK_PRIVATE 1
58
59 #include <kern/kalloc.h>
60 #include <kern/locks.h>
61 #include <kern/misc_protos.h>
62 #include <kern/thread.h>
63 #include <kern/processor.h>
64 #include <kern/sched_prim.h>
65 #include <kern/xpr.h>
66 #include <kern/debug.h>
67 #include <string.h>
68 #include <tests/xnupost.h>
69
70 #if MACH_KDB
71 #include <ddb/db_command.h>
72 #include <ddb/db_output.h>
73 #include <ddb/db_sym.h>
74 #include <ddb/db_print.h>
75 #endif /* MACH_KDB */
76
77 #include <sys/kdebug.h>
78 #include <sys/munge.h>
79 #include <machine/cpu_capabilities.h>
80 #include <arm/cpu_data_internal.h>
81
82 kern_return_t arm64_lock_test(void);
83 kern_return_t arm64_munger_test(void);
84 kern_return_t ex_cb_test(void);
85 kern_return_t arm64_pan_test(void);
86
87 // exception handler ignores this fault address during PAN test
88 #if __ARM_PAN_AVAILABLE__
89 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
90 vm_offset_t pan_test_addr = 0;
91 vm_offset_t pan_ro_addr = 0;
92 volatile int pan_exception_level = 0;
93 volatile char pan_fault_value = 0;
94 #endif
95
96 #include <libkern/OSAtomic.h>
97 #define LOCK_TEST_ITERATIONS 50
98 static hw_lock_data_t lt_hw_lock;
99 static lck_spin_t lt_lck_spin_t;
100 static lck_mtx_t lt_mtx;
101 static lck_rw_t lt_rwlock;
102 static volatile uint32_t lt_counter = 0;
103 static volatile int lt_spinvolatile;
104 static volatile uint32_t lt_max_holders = 0;
105 static volatile uint32_t lt_upgrade_holders = 0;
106 static volatile uint32_t lt_max_upgrade_holders = 0;
107 static volatile uint32_t lt_num_holders = 0;
108 static volatile uint32_t lt_done_threads;
109 static volatile uint32_t lt_target_done_threads;
110 static volatile uint32_t lt_cpu_bind_id = 0;
111
112 static void
113 lt_note_another_blocking_lock_holder()
114 {
115 hw_lock_lock(&lt_hw_lock);
116 lt_num_holders++;
117 lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
118 hw_lock_unlock(&lt_hw_lock);
119 }
120
121 static void
122 lt_note_blocking_lock_release()
123 {
124 hw_lock_lock(&lt_hw_lock);
125 lt_num_holders--;
126 hw_lock_unlock(&lt_hw_lock);
127 }
128
129 static void
130 lt_spin_a_little_bit()
131 {
132 uint32_t i;
133
134 for (i = 0; i < 10000; i++) {
135 lt_spinvolatile++;
136 }
137 }
138
139 static void
140 lt_sleep_a_little_bit()
141 {
142 delay(100);
143 }
144
145 static void
146 lt_grab_mutex()
147 {
148 lck_mtx_lock(&lt_mtx);
149 lt_note_another_blocking_lock_holder();
150 lt_sleep_a_little_bit();
151 lt_counter++;
152 lt_note_blocking_lock_release();
153 lck_mtx_unlock(&lt_mtx);
154 }
155
156 static void
157 lt_grab_mutex_with_try()
158 {
159 while(0 == lck_mtx_try_lock(&lt_mtx));
160 lt_note_another_blocking_lock_holder();
161 lt_sleep_a_little_bit();
162 lt_counter++;
163 lt_note_blocking_lock_release();
164 lck_mtx_unlock(&lt_mtx);
165
166 }
167
168 static void
169 lt_grab_rw_exclusive()
170 {
171 lck_rw_lock_exclusive(&lt_rwlock);
172 lt_note_another_blocking_lock_holder();
173 lt_sleep_a_little_bit();
174 lt_counter++;
175 lt_note_blocking_lock_release();
176 lck_rw_done(&lt_rwlock);
177 }
178
179 static void
180 lt_grab_rw_exclusive_with_try()
181 {
182 while(0 == lck_rw_try_lock_exclusive(&lt_rwlock)) {
183 lt_sleep_a_little_bit();
184 }
185
186 lt_note_another_blocking_lock_holder();
187 lt_sleep_a_little_bit();
188 lt_counter++;
189 lt_note_blocking_lock_release();
190 lck_rw_done(&lt_rwlock);
191 }
192
193 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
194 static void
195 lt_grab_rw_shared()
196 {
197 lck_rw_lock_shared(&lt_rwlock);
198 lt_counter++;
199
200 lt_note_another_blocking_lock_holder();
201 lt_sleep_a_little_bit();
202 lt_note_blocking_lock_release();
203
204 lck_rw_done(&lt_rwlock);
205 }
206 */
207
208 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
209 static void
210 lt_grab_rw_shared_with_try()
211 {
212 while(0 == lck_rw_try_lock_shared(&lt_rwlock));
213 lt_counter++;
214
215 lt_note_another_blocking_lock_holder();
216 lt_sleep_a_little_bit();
217 lt_note_blocking_lock_release();
218
219 lck_rw_done(&lt_rwlock);
220 }
221 */
222
223 static void
224 lt_upgrade_downgrade_rw()
225 {
226 boolean_t upgraded, success;
227
228 success = lck_rw_try_lock_shared(&lt_rwlock);
229 if (!success) {
230 lck_rw_lock_shared(&lt_rwlock);
231 }
232
233 lt_note_another_blocking_lock_holder();
234 lt_sleep_a_little_bit();
235 lt_note_blocking_lock_release();
236
237 upgraded = lck_rw_lock_shared_to_exclusive(&lt_rwlock);
238 if (!upgraded) {
239 success = lck_rw_try_lock_exclusive(&lt_rwlock);
240
241 if (!success) {
242 lck_rw_lock_exclusive(&lt_rwlock);
243 }
244 }
245
246 lt_upgrade_holders++;
247 if (lt_upgrade_holders > lt_max_upgrade_holders) {
248 lt_max_upgrade_holders = lt_upgrade_holders;
249 }
250
251 lt_counter++;
252 lt_sleep_a_little_bit();
253
254 lt_upgrade_holders--;
255
256 lck_rw_lock_exclusive_to_shared(&lt_rwlock);
257
258 lt_spin_a_little_bit();
259 lck_rw_done(&lt_rwlock);
260 }
261
262 const int limit = 1000000;
263 static int lt_stress_local_counters[MAX_CPUS];
264
265 static void
266 lt_stress_hw_lock()
267 {
268 int local_counter = 0;
269
270 uint cpuid = current_processor()->cpu_id;
271
272 kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
273
274 hw_lock_lock(&lt_hw_lock);
275 lt_counter++;
276 local_counter++;
277 hw_lock_unlock(&lt_hw_lock);
278
279 while (lt_counter < lt_target_done_threads) {
280 ;
281 }
282
283 kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
284
285 while (lt_counter < limit) {
286 spl_t s = splsched();
287 hw_lock_lock(&lt_hw_lock);
288 if (lt_counter < limit) {
289 lt_counter++;
290 local_counter++;
291 }
292 hw_lock_unlock(&lt_hw_lock);
293 splx(s);
294 }
295
296 lt_stress_local_counters[cpuid] = local_counter;
297
298 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
299 }
300
301 static void
302 lt_grab_hw_lock()
303 {
304 hw_lock_lock(&lt_hw_lock);
305 lt_counter++;
306 lt_spin_a_little_bit();
307 hw_lock_unlock(&lt_hw_lock);
308 }
309
310 static void
311 lt_grab_hw_lock_with_try()
312 {
313 while(0 == hw_lock_try(&lt_hw_lock));
314 lt_counter++;
315 lt_spin_a_little_bit();
316 hw_lock_unlock(&lt_hw_lock);
317 }
318
319 static void
320 lt_grab_hw_lock_with_to()
321 {
322 while(0 == hw_lock_to(&lt_hw_lock, LockTimeOut))
323 mp_enable_preemption();
324 lt_counter++;
325 lt_spin_a_little_bit();
326 hw_lock_unlock(&lt_hw_lock);
327 }
328
329 static void
330 lt_grab_spin_lock()
331 {
332 lck_spin_lock(&lt_lck_spin_t);
333 lt_counter++;
334 lt_spin_a_little_bit();
335 lck_spin_unlock(&lt_lck_spin_t);
336 }
337
338 static void
339 lt_grab_spin_lock_with_try()
340 {
341 while(0 == lck_spin_try_lock(&lt_lck_spin_t));
342 lt_counter++;
343 lt_spin_a_little_bit();
344 lck_spin_unlock(&lt_lck_spin_t);
345 }
346
347 static volatile boolean_t lt_thread_lock_grabbed;
348 static volatile boolean_t lt_thread_lock_success;
349
350 static void
351 lt_reset()
352 {
353 lt_counter = 0;
354 lt_max_holders = 0;
355 lt_num_holders = 0;
356 lt_max_upgrade_holders = 0;
357 lt_upgrade_holders = 0;
358 lt_done_threads = 0;
359 lt_target_done_threads = 0;
360 lt_cpu_bind_id = 0;
361
362 OSMemoryBarrier();
363 }
364
365 static void
366 lt_trylock_hw_lock_with_to()
367 {
368 OSMemoryBarrier();
369 while (!lt_thread_lock_grabbed) {
370 lt_sleep_a_little_bit();
371 OSMemoryBarrier();
372 }
373 lt_thread_lock_success = hw_lock_to(&lt_hw_lock, 100);
374 OSMemoryBarrier();
375 mp_enable_preemption();
376 }
377
378 static void
379 lt_trylock_spin_try_lock()
380 {
381 OSMemoryBarrier();
382 while (!lt_thread_lock_grabbed) {
383 lt_sleep_a_little_bit();
384 OSMemoryBarrier();
385 }
386 lt_thread_lock_success = lck_spin_try_lock(&lt_lck_spin_t);
387 OSMemoryBarrier();
388 }
389
390 static void
391 lt_trylock_thread(void *arg, wait_result_t wres __unused)
392 {
393 void (*func)(void) = (void(*)(void))arg;
394
395 func();
396
397 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
398 }
399
400 static void
401 lt_start_trylock_thread(thread_continue_t func)
402 {
403 thread_t thread;
404 kern_return_t kr;
405
406 kr = kernel_thread_start(lt_trylock_thread, func, &thread);
407 assert(kr == KERN_SUCCESS);
408
409 thread_deallocate(thread);
410 }
411
412 static void
413 lt_wait_for_lock_test_threads()
414 {
415 OSMemoryBarrier();
416 /* Spin to reduce dependencies */
417 while (lt_done_threads < lt_target_done_threads) {
418 lt_sleep_a_little_bit();
419 OSMemoryBarrier();
420 }
421 OSMemoryBarrier();
422 }
423
424 static kern_return_t
425 lt_test_trylocks()
426 {
427 boolean_t success;
428 extern unsigned int real_ncpus;
429
430 /*
431 * First mtx try lock succeeds, second fails.
432 */
433 success = lck_mtx_try_lock(&lt_mtx);
434 T_ASSERT_NOTNULL(success, "First mtx try lock");
435 success = lck_mtx_try_lock(&lt_mtx);
436 T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
437 lck_mtx_unlock(&lt_mtx);
438
439 /*
440 * After regular grab, can't try lock.
441 */
442 lck_mtx_lock(&lt_mtx);
443 success = lck_mtx_try_lock(&lt_mtx);
444 T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
445 lck_mtx_unlock(&lt_mtx);
446
447 /*
448 * Two shared try locks on a previously unheld rwlock suceed, and a
449 * subsequent exclusive attempt fails.
450 */
451 success = lck_rw_try_lock_shared(&lt_rwlock);
452 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
453 success = lck_rw_try_lock_shared(&lt_rwlock);
454 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
455 success = lck_rw_try_lock_exclusive(&lt_rwlock);
456 T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
457 lck_rw_done(&lt_rwlock);
458 lck_rw_done(&lt_rwlock);
459
460 /*
461 * After regular shared grab, can trylock
462 * for shared but not for exclusive.
463 */
464 lck_rw_lock_shared(&lt_rwlock);
465 success = lck_rw_try_lock_shared(&lt_rwlock);
466 T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
467 success = lck_rw_try_lock_exclusive(&lt_rwlock);
468 T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
469 lck_rw_done(&lt_rwlock);
470 lck_rw_done(&lt_rwlock);
471
472 /*
473 * An exclusive try lock succeeds, subsequent shared and exclusive
474 * attempts fail.
475 */
476 success = lck_rw_try_lock_exclusive(&lt_rwlock);
477 T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
478 success = lck_rw_try_lock_shared(&lt_rwlock);
479 T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
480 success = lck_rw_try_lock_exclusive(&lt_rwlock);
481 T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
482 lck_rw_done(&lt_rwlock);
483
484 /*
485 * After regular exclusive grab, neither kind of trylock succeeds.
486 */
487 lck_rw_lock_exclusive(&lt_rwlock);
488 success = lck_rw_try_lock_shared(&lt_rwlock);
489 T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
490 success = lck_rw_try_lock_exclusive(&lt_rwlock);
491 T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
492 lck_rw_done(&lt_rwlock);
493
494 /*
495 * First spin lock attempts succeed, second attempts fail.
496 */
497 success = hw_lock_try(&lt_hw_lock);
498 T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
499 success = hw_lock_try(&lt_hw_lock);
500 T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
501 hw_lock_unlock(&lt_hw_lock);
502
503 hw_lock_lock(&lt_hw_lock);
504 success = hw_lock_try(&lt_hw_lock);
505 T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
506 hw_lock_unlock(&lt_hw_lock);
507
508 lt_reset();
509 lt_thread_lock_grabbed = false;
510 lt_thread_lock_success = true;
511 lt_target_done_threads = 1;
512 OSMemoryBarrier();
513 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
514 success = hw_lock_to(&lt_hw_lock, 100);
515 T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
516 if (real_ncpus == 1) {
517 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
518 }
519 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
520 lt_wait_for_lock_test_threads();
521 T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
522 if (real_ncpus == 1) {
523 mp_disable_preemption(); /* don't double-enable when we unlock */
524 }
525 hw_lock_unlock(&lt_hw_lock);
526
527 lt_reset();
528 lt_thread_lock_grabbed = false;
529 lt_thread_lock_success = true;
530 lt_target_done_threads = 1;
531 OSMemoryBarrier();
532 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
533 hw_lock_lock(&lt_hw_lock);
534 if (real_ncpus == 1) {
535 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
536 }
537 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
538 lt_wait_for_lock_test_threads();
539 T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
540 if (real_ncpus == 1) {
541 mp_disable_preemption(); /* don't double-enable when we unlock */
542 }
543 hw_lock_unlock(&lt_hw_lock);
544
545 success = lck_spin_try_lock(&lt_lck_spin_t);
546 T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
547 success = lck_spin_try_lock(&lt_lck_spin_t);
548 T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
549 lck_spin_unlock(&lt_lck_spin_t);
550
551 lt_reset();
552 lt_thread_lock_grabbed = false;
553 lt_thread_lock_success = true;
554 lt_target_done_threads = 1;
555 lt_start_trylock_thread(lt_trylock_spin_try_lock);
556 lck_spin_lock(&lt_lck_spin_t);
557 if (real_ncpus == 1) {
558 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
559 }
560 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
561 lt_wait_for_lock_test_threads();
562 T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
563 if (real_ncpus == 1) {
564 mp_disable_preemption(); /* don't double-enable when we unlock */
565 }
566 lck_spin_unlock(&lt_lck_spin_t);
567
568 return KERN_SUCCESS;
569 }
570
571 static void
572 lt_thread(void *arg, wait_result_t wres __unused)
573 {
574 void (*func)(void) = (void(*)(void)) arg;
575 uint32_t i;
576
577 for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
578 func();
579 }
580
581 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
582 }
583
584 static void
585 lt_bound_thread(void *arg, wait_result_t wres __unused)
586 {
587 void (*func)(void) = (void(*)(void)) arg;
588
589 int cpuid = OSIncrementAtomic((volatile SInt32 *)&lt_cpu_bind_id);
590
591 processor_t processor = processor_list;
592 while ((processor != NULL) && (processor->cpu_id != cpuid)) {
593 processor = processor->processor_list;
594 }
595
596 if (processor != NULL) {
597 thread_bind(processor);
598 }
599
600 thread_block(THREAD_CONTINUE_NULL);
601
602 func();
603
604 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
605 }
606
607 static void
608 lt_start_lock_thread(thread_continue_t func)
609 {
610 thread_t thread;
611 kern_return_t kr;
612
613 kr = kernel_thread_start(lt_thread, func, &thread);
614 assert(kr == KERN_SUCCESS);
615
616 thread_deallocate(thread);
617 }
618
619
620 static void
621 lt_start_lock_thread_bound(thread_continue_t func)
622 {
623 thread_t thread;
624 kern_return_t kr;
625
626 kr = kernel_thread_start(lt_bound_thread, func, &thread);
627 assert(kr == KERN_SUCCESS);
628
629 thread_deallocate(thread);
630 }
631
632 static kern_return_t
633 lt_test_locks()
634 {
635 kern_return_t kr = KERN_SUCCESS;
636 lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
637 lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
638
639 lck_mtx_init(&lt_mtx, lg, LCK_ATTR_NULL);
640 lck_rw_init(&lt_rwlock, lg, LCK_ATTR_NULL);
641 lck_spin_init(&lt_lck_spin_t, lg, LCK_ATTR_NULL);
642 hw_lock_init(&lt_hw_lock);
643
644 T_LOG("Testing locks.");
645
646 /* Try locks (custom) */
647 lt_reset();
648
649 T_LOG("Running try lock test.");
650 kr = lt_test_trylocks();
651 T_EXPECT_NULL(kr, "try lock test failed.");
652
653 /* Uncontended mutex */
654 T_LOG("Running uncontended mutex test.");
655 lt_reset();
656 lt_target_done_threads = 1;
657 lt_start_lock_thread(lt_grab_mutex);
658 lt_wait_for_lock_test_threads();
659 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
660 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
661
662 /* Contended mutex:try locks*/
663 T_LOG("Running contended mutex test.");
664 lt_reset();
665 lt_target_done_threads = 3;
666 lt_start_lock_thread(lt_grab_mutex);
667 lt_start_lock_thread(lt_grab_mutex);
668 lt_start_lock_thread(lt_grab_mutex);
669 lt_wait_for_lock_test_threads();
670 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
671 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
672
673 /* Contended mutex: try locks*/
674 T_LOG("Running contended mutex trylock test.");
675 lt_reset();
676 lt_target_done_threads = 3;
677 lt_start_lock_thread(lt_grab_mutex_with_try);
678 lt_start_lock_thread(lt_grab_mutex_with_try);
679 lt_start_lock_thread(lt_grab_mutex_with_try);
680 lt_wait_for_lock_test_threads();
681 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
682 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
683
684 /* Uncontended exclusive rwlock */
685 T_LOG("Running uncontended exclusive rwlock test.");
686 lt_reset();
687 lt_target_done_threads = 1;
688 lt_start_lock_thread(lt_grab_rw_exclusive);
689 lt_wait_for_lock_test_threads();
690 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
691 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
692
693 /* Uncontended shared rwlock */
694
695 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
696 T_LOG("Running uncontended shared rwlock test.");
697 lt_reset();
698 lt_target_done_threads = 1;
699 lt_start_lock_thread(lt_grab_rw_shared);
700 lt_wait_for_lock_test_threads();
701 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
702 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
703 */
704
705 /* Contended exclusive rwlock */
706 T_LOG("Running contended exclusive rwlock test.");
707 lt_reset();
708 lt_target_done_threads = 3;
709 lt_start_lock_thread(lt_grab_rw_exclusive);
710 lt_start_lock_thread(lt_grab_rw_exclusive);
711 lt_start_lock_thread(lt_grab_rw_exclusive);
712 lt_wait_for_lock_test_threads();
713 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
714 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
715
716 /* One shared, two exclusive */
717 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
718 T_LOG("Running test with one shared and two exclusive rw lock threads.");
719 lt_reset();
720 lt_target_done_threads = 3;
721 lt_start_lock_thread(lt_grab_rw_shared);
722 lt_start_lock_thread(lt_grab_rw_exclusive);
723 lt_start_lock_thread(lt_grab_rw_exclusive);
724 lt_wait_for_lock_test_threads();
725 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
726 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
727 */
728
729 /* Four shared */
730 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
731 T_LOG("Running test with four shared holders.");
732 lt_reset();
733 lt_target_done_threads = 4;
734 lt_start_lock_thread(lt_grab_rw_shared);
735 lt_start_lock_thread(lt_grab_rw_shared);
736 lt_start_lock_thread(lt_grab_rw_shared);
737 lt_start_lock_thread(lt_grab_rw_shared);
738 lt_wait_for_lock_test_threads();
739 T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
740 */
741
742 /* Three doing upgrades and downgrades */
743 T_LOG("Running test with threads upgrading and downgrading.");
744 lt_reset();
745 lt_target_done_threads = 3;
746 lt_start_lock_thread(lt_upgrade_downgrade_rw);
747 lt_start_lock_thread(lt_upgrade_downgrade_rw);
748 lt_start_lock_thread(lt_upgrade_downgrade_rw);
749 lt_wait_for_lock_test_threads();
750 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
751 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
752 T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
753
754 /* Uncontended - exclusive trylocks */
755 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
756 lt_reset();
757 lt_target_done_threads = 1;
758 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
759 lt_wait_for_lock_test_threads();
760 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
761 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
762
763 /* Uncontended - shared trylocks */
764 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
765 T_LOG("Running test with single thread doing shared rwlock trylocks.");
766 lt_reset();
767 lt_target_done_threads = 1;
768 lt_start_lock_thread(lt_grab_rw_shared_with_try);
769 lt_wait_for_lock_test_threads();
770 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
771 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
772 */
773
774 /* Three doing exclusive trylocks */
775 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
776 lt_reset();
777 lt_target_done_threads = 3;
778 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
779 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
780 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
781 lt_wait_for_lock_test_threads();
782 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
783 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
784
785 /* Three doing shared trylocks */
786 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
787 T_LOG("Running test with threads doing shared rwlock trylocks.");
788 lt_reset();
789 lt_target_done_threads = 3;
790 lt_start_lock_thread(lt_grab_rw_shared_with_try);
791 lt_start_lock_thread(lt_grab_rw_shared_with_try);
792 lt_start_lock_thread(lt_grab_rw_shared_with_try);
793 lt_wait_for_lock_test_threads();
794 T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
795 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
796 */
797
798 /* Three doing various trylocks */
799 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
800 T_LOG("Running test with threads doing mixed rwlock trylocks.");
801 lt_reset();
802 lt_target_done_threads = 4;
803 lt_start_lock_thread(lt_grab_rw_shared_with_try);
804 lt_start_lock_thread(lt_grab_rw_shared_with_try);
805 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
806 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
807 lt_wait_for_lock_test_threads();
808 T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
809 T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
810 */
811
812 /* HW locks */
813 T_LOG("Running test with hw_lock_lock()");
814 lt_reset();
815 lt_target_done_threads = 3;
816 lt_start_lock_thread(lt_grab_hw_lock);
817 lt_start_lock_thread(lt_grab_hw_lock);
818 lt_start_lock_thread(lt_grab_hw_lock);
819 lt_wait_for_lock_test_threads();
820 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
821
822 /* HW locks stress test */
823 T_LOG("Running HW locks stress test with hw_lock_lock()");
824 extern unsigned int real_ncpus;
825 lt_reset();
826 lt_target_done_threads = real_ncpus;
827 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
828 lt_start_lock_thread_bound(lt_stress_hw_lock);
829 }
830 lt_wait_for_lock_test_threads();
831 bool starvation = false;
832 uint total_local_count = 0;
833 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
834 starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
835 total_local_count += lt_stress_local_counters[processor->cpu_id];
836 }
837 if (total_local_count != lt_counter) {
838 T_FAIL("Lock failure\n");
839 } else if (starvation) {
840 T_FAIL("Lock starvation found\n");
841 } else {
842 T_PASS("HW locks stress test with hw_lock_lock()");
843 }
844
845
846 /* HW locks: trylocks */
847 T_LOG("Running test with hw_lock_try()");
848 lt_reset();
849 lt_target_done_threads = 3;
850 lt_start_lock_thread(lt_grab_hw_lock_with_try);
851 lt_start_lock_thread(lt_grab_hw_lock_with_try);
852 lt_start_lock_thread(lt_grab_hw_lock_with_try);
853 lt_wait_for_lock_test_threads();
854 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
855
856 /* HW locks: with timeout */
857 T_LOG("Running test with hw_lock_to()");
858 lt_reset();
859 lt_target_done_threads = 3;
860 lt_start_lock_thread(lt_grab_hw_lock_with_to);
861 lt_start_lock_thread(lt_grab_hw_lock_with_to);
862 lt_start_lock_thread(lt_grab_hw_lock_with_to);
863 lt_wait_for_lock_test_threads();
864 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
865
866 /* Spin locks */
867 T_LOG("Running test with lck_spin_lock()");
868 lt_reset();
869 lt_target_done_threads = 3;
870 lt_start_lock_thread(lt_grab_spin_lock);
871 lt_start_lock_thread(lt_grab_spin_lock);
872 lt_start_lock_thread(lt_grab_spin_lock);
873 lt_wait_for_lock_test_threads();
874 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
875
876 /* Spin locks: trylocks */
877 T_LOG("Running test with lck_spin_try_lock()");
878 lt_reset();
879 lt_target_done_threads = 3;
880 lt_start_lock_thread(lt_grab_spin_lock_with_try);
881 lt_start_lock_thread(lt_grab_spin_lock_with_try);
882 lt_start_lock_thread(lt_grab_spin_lock_with_try);
883 lt_wait_for_lock_test_threads();
884 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
885
886 return KERN_SUCCESS;
887 }
888
889 #define MT_MAX_ARGS 8
890 #define MT_INITIAL_VALUE 0xfeedbeef
891 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
892 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
893 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
894
895 typedef void (*sy_munge_t)(void*);
896
897 #define MT_FUNC(x) #x, x
898 struct munger_test {
899 const char *mt_name;
900 sy_munge_t mt_func;
901 uint32_t mt_in_words;
902 uint32_t mt_nout;
903 uint64_t mt_expected[MT_MAX_ARGS];
904 } munger_tests[] = {
905 {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
906 {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
907 {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
908 {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
909 {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
910 {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
911 {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
912 {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
913 {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
914 {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
915 {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
916 {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
917 {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
918 {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
919 {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
920 {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
921 {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
922 {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
923 {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
924 {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
925 {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
926 {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
927 {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
928 {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
929 {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
930 {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
931 {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
932 {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
933 {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
934 {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
935 {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
936 {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
937 {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
938 {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
939 {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
940 {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
941 {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
942 {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
943 {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
944 {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
945 };
946
947 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
948
949 static void
950 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
951 {
952 uint32_t i;
953
954 for (i = 0; i < in_words; i++) {
955 data[i] = MT_INITIAL_VALUE;
956 }
957
958 if (in_words * sizeof(uint32_t) < total_size) {
959 bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
960 }
961 }
962
963 static void
964 mt_test_mungers()
965 {
966 uint64_t data[MT_MAX_ARGS];
967 uint32_t i, j;
968
969 for (i = 0; i < MT_TEST_COUNT; i++) {
970 struct munger_test *test = &munger_tests[i];
971 int pass = 1;
972
973 T_LOG("Testing %s", test->mt_name);
974
975 mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
976 test->mt_func(data);
977
978 for (j = 0; j < test->mt_nout; j++) {
979 if (data[j] != test->mt_expected[j]) {
980 T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
981 pass = 0;
982 }
983 }
984 if (pass) {
985 T_PASS(test->mt_name);
986 }
987 }
988 }
989
990 /* Exception Callback Test */
991 static ex_cb_action_t excb_test_action(
992 ex_cb_class_t cb_class,
993 void *refcon,
994 const ex_cb_state_t *state
995 )
996 {
997 ex_cb_state_t *context = (ex_cb_state_t *)refcon;
998
999 if ((NULL == refcon) || (NULL == state))
1000 {
1001 return EXCB_ACTION_TEST_FAIL;
1002 }
1003
1004 context->far = state->far;
1005
1006 switch (cb_class)
1007 {
1008 case EXCB_CLASS_TEST1:
1009 return EXCB_ACTION_RERUN;
1010 case EXCB_CLASS_TEST2:
1011 return EXCB_ACTION_NONE;
1012 default:
1013 return EXCB_ACTION_TEST_FAIL;
1014 }
1015 }
1016
1017
1018 kern_return_t
1019 ex_cb_test()
1020 {
1021 const vm_offset_t far1 = 0xdead0001;
1022 const vm_offset_t far2 = 0xdead0002;
1023 kern_return_t kr;
1024 ex_cb_state_t test_context_1 = {0xdeadbeef};
1025 ex_cb_state_t test_context_2 = {0xdeadbeef};
1026 ex_cb_action_t action;
1027
1028 T_LOG("Testing Exception Callback.");
1029
1030 T_LOG("Running registration test.");
1031
1032 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1033 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback");
1034 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1035 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback");
1036
1037 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1038 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback");
1039 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1040 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback");
1041
1042 T_LOG("Running invocation test.");
1043
1044 action = ex_cb_invoke(EXCB_CLASS_TEST1, far1);
1045 T_ASSERT(EXCB_ACTION_RERUN == action, NULL);
1046 T_ASSERT(far1 == test_context_1.far, NULL);
1047
1048 action = ex_cb_invoke(EXCB_CLASS_TEST2, far2);
1049 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1050 T_ASSERT(far2 == test_context_2.far, NULL);
1051
1052 action = ex_cb_invoke(EXCB_CLASS_TEST3, 0);
1053 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1054
1055 return KERN_SUCCESS;
1056 }
1057
1058 #if __ARM_PAN_AVAILABLE__
1059 kern_return_t
1060 arm64_pan_test()
1061 {
1062 vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE;
1063
1064 T_LOG("Testing PAN.");
1065
1066 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1067
1068 pan_exception_level = 0;
1069 pan_fault_value = 0xDE;
1070 // convert priv_addr to one that is accessible from user mode
1071 pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS -
1072 _COMM_PAGE_START_ADDRESS;
1073
1074 // Below should trigger a PAN exception as pan_test_addr is accessible
1075 // in user mode
1076 // The exception handler, upon recognizing the fault address is pan_test_addr,
1077 // will disable PAN and rerun this instruction successfully
1078 T_ASSERT(*(char *)pan_test_addr == *(char *)priv_addr, NULL);
1079
1080 T_ASSERT(pan_exception_level == 2, NULL);
1081
1082 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1083
1084 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1085
1086 pan_exception_level = 0;
1087 pan_fault_value = 0xAD;
1088 pan_ro_addr = (vm_offset_t) &pan_ro_value;
1089
1090 // Force a permission fault while PAN is disabled to make sure PAN is
1091 // re-enabled during the exception handler.
1092 *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1093
1094 T_ASSERT(pan_exception_level == 2, NULL);
1095
1096 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1097
1098 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1099
1100 pan_test_addr = 0;
1101 pan_ro_addr = 0;
1102
1103 __builtin_arm_wsr("pan", 1);
1104 return KERN_SUCCESS;
1105 }
1106 #endif
1107
1108
1109 kern_return_t
1110 arm64_lock_test()
1111 {
1112 return lt_test_locks();
1113 }
1114
1115 kern_return_t
1116 arm64_munger_test()
1117 {
1118 mt_test_mungers();
1119 return 0;
1120 }
1121