]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/platform_tests.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / platform_tests.c
1 /*
2 * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
40 *
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * Carnegie Mellon requests users of this software to return to
46 *
47 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55 #include <mach_ldebug.h>
56
57 #define LOCK_PRIVATE 1
58
59 #include <vm/pmap.h>
60 #include <kern/kalloc.h>
61 #include <kern/cpu_number.h>
62 #include <kern/locks.h>
63 #include <kern/misc_protos.h>
64 #include <kern/thread.h>
65 #include <kern/processor.h>
66 #include <kern/sched_prim.h>
67 #include <kern/debug.h>
68 #include <string.h>
69 #include <tests/xnupost.h>
70
71 #if MACH_KDB
72 #include <ddb/db_command.h>
73 #include <ddb/db_output.h>
74 #include <ddb/db_sym.h>
75 #include <ddb/db_print.h>
76 #endif /* MACH_KDB */
77
78 #include <sys/kdebug.h>
79 #include <sys/munge.h>
80 #include <machine/cpu_capabilities.h>
81 #include <arm/cpu_data_internal.h>
82 #include <arm/pmap.h>
83
84 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
85 #include <arm64/amcc_rorgn.h>
86 #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
87
88 kern_return_t arm64_lock_test(void);
89 kern_return_t arm64_munger_test(void);
90 kern_return_t ex_cb_test(void);
91 kern_return_t arm64_pan_test(void);
92 kern_return_t arm64_late_pan_test(void);
93 #if defined(HAS_APPLE_PAC)
94 #include <ptrauth.h>
95 kern_return_t arm64_ropjop_test(void);
96 #endif
97 #if defined(KERNEL_INTEGRITY_CTRR)
98 kern_return_t ctrr_test(void);
99 kern_return_t ctrr_test_cpu(void);
100 #endif
101 #if HAS_TWO_STAGE_SPR_LOCK
102 kern_return_t arm64_spr_lock_test(void);
103 extern void arm64_msr_lock_test(uint64_t);
104 #endif
105
106 // exception handler ignores this fault address during PAN test
107 #if __ARM_PAN_AVAILABLE__
108 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
109 vm_offset_t pan_test_addr = 0;
110 vm_offset_t pan_ro_addr = 0;
111 volatile int pan_exception_level = 0;
112 volatile char pan_fault_value = 0;
113 #endif
114
115 #include <libkern/OSAtomic.h>
116 #define LOCK_TEST_ITERATIONS 50
117 static hw_lock_data_t lt_hw_lock;
118 static lck_spin_t lt_lck_spin_t;
119 static lck_mtx_t lt_mtx;
120 static lck_rw_t lt_rwlock;
121 static volatile uint32_t lt_counter = 0;
122 static volatile int lt_spinvolatile;
123 static volatile uint32_t lt_max_holders = 0;
124 static volatile uint32_t lt_upgrade_holders = 0;
125 static volatile uint32_t lt_max_upgrade_holders = 0;
126 static volatile uint32_t lt_num_holders = 0;
127 static volatile uint32_t lt_done_threads;
128 static volatile uint32_t lt_target_done_threads;
129 static volatile uint32_t lt_cpu_bind_id = 0;
130
131 static void
132 lt_note_another_blocking_lock_holder()
133 {
134 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
135 lt_num_holders++;
136 lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
137 hw_lock_unlock(&lt_hw_lock);
138 }
139
140 static void
141 lt_note_blocking_lock_release()
142 {
143 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
144 lt_num_holders--;
145 hw_lock_unlock(&lt_hw_lock);
146 }
147
148 static void
149 lt_spin_a_little_bit()
150 {
151 uint32_t i;
152
153 for (i = 0; i < 10000; i++) {
154 lt_spinvolatile++;
155 }
156 }
157
158 static void
159 lt_sleep_a_little_bit()
160 {
161 delay(100);
162 }
163
164 static void
165 lt_grab_mutex()
166 {
167 lck_mtx_lock(&lt_mtx);
168 lt_note_another_blocking_lock_holder();
169 lt_sleep_a_little_bit();
170 lt_counter++;
171 lt_note_blocking_lock_release();
172 lck_mtx_unlock(&lt_mtx);
173 }
174
175 static void
176 lt_grab_mutex_with_try()
177 {
178 while (0 == lck_mtx_try_lock(&lt_mtx)) {
179 ;
180 }
181 lt_note_another_blocking_lock_holder();
182 lt_sleep_a_little_bit();
183 lt_counter++;
184 lt_note_blocking_lock_release();
185 lck_mtx_unlock(&lt_mtx);
186 }
187
188 static void
189 lt_grab_rw_exclusive()
190 {
191 lck_rw_lock_exclusive(&lt_rwlock);
192 lt_note_another_blocking_lock_holder();
193 lt_sleep_a_little_bit();
194 lt_counter++;
195 lt_note_blocking_lock_release();
196 lck_rw_done(&lt_rwlock);
197 }
198
199 static void
200 lt_grab_rw_exclusive_with_try()
201 {
202 while (0 == lck_rw_try_lock_exclusive(&lt_rwlock)) {
203 lt_sleep_a_little_bit();
204 }
205
206 lt_note_another_blocking_lock_holder();
207 lt_sleep_a_little_bit();
208 lt_counter++;
209 lt_note_blocking_lock_release();
210 lck_rw_done(&lt_rwlock);
211 }
212
213 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
214 * static void
215 * lt_grab_rw_shared()
216 * {
217 * lck_rw_lock_shared(&lt_rwlock);
218 * lt_counter++;
219 *
220 * lt_note_another_blocking_lock_holder();
221 * lt_sleep_a_little_bit();
222 * lt_note_blocking_lock_release();
223 *
224 * lck_rw_done(&lt_rwlock);
225 * }
226 */
227
228 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
229 * static void
230 * lt_grab_rw_shared_with_try()
231 * {
232 * while(0 == lck_rw_try_lock_shared(&lt_rwlock));
233 * lt_counter++;
234 *
235 * lt_note_another_blocking_lock_holder();
236 * lt_sleep_a_little_bit();
237 * lt_note_blocking_lock_release();
238 *
239 * lck_rw_done(&lt_rwlock);
240 * }
241 */
242
243 static void
244 lt_upgrade_downgrade_rw()
245 {
246 boolean_t upgraded, success;
247
248 success = lck_rw_try_lock_shared(&lt_rwlock);
249 if (!success) {
250 lck_rw_lock_shared(&lt_rwlock);
251 }
252
253 lt_note_another_blocking_lock_holder();
254 lt_sleep_a_little_bit();
255 lt_note_blocking_lock_release();
256
257 upgraded = lck_rw_lock_shared_to_exclusive(&lt_rwlock);
258 if (!upgraded) {
259 success = lck_rw_try_lock_exclusive(&lt_rwlock);
260
261 if (!success) {
262 lck_rw_lock_exclusive(&lt_rwlock);
263 }
264 }
265
266 lt_upgrade_holders++;
267 if (lt_upgrade_holders > lt_max_upgrade_holders) {
268 lt_max_upgrade_holders = lt_upgrade_holders;
269 }
270
271 lt_counter++;
272 lt_sleep_a_little_bit();
273
274 lt_upgrade_holders--;
275
276 lck_rw_lock_exclusive_to_shared(&lt_rwlock);
277
278 lt_spin_a_little_bit();
279 lck_rw_done(&lt_rwlock);
280 }
281
282 #if __AMP__
283 const int limit = 1000000;
284 static int lt_stress_local_counters[MAX_CPUS];
285
286 lck_ticket_t lt_ticket_lock;
287 lck_grp_t lt_ticket_grp;
288
289 static void
290 lt_stress_ticket_lock()
291 {
292 int local_counter = 0;
293
294 uint cpuid = cpu_number();
295
296 kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
297
298 lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
299 lt_counter++;
300 local_counter++;
301 lck_ticket_unlock(&lt_ticket_lock);
302
303 while (lt_counter < lt_target_done_threads) {
304 ;
305 }
306
307 kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
308
309 while (lt_counter < limit) {
310 lck_ticket_lock(&lt_ticket_lock, &lt_ticket_grp);
311 if (lt_counter < limit) {
312 lt_counter++;
313 local_counter++;
314 }
315 lck_ticket_unlock(&lt_ticket_lock);
316 }
317
318 lt_stress_local_counters[cpuid] = local_counter;
319
320 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
321 }
322 #endif
323
324 static void
325 lt_grab_hw_lock()
326 {
327 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
328 lt_counter++;
329 lt_spin_a_little_bit();
330 hw_lock_unlock(&lt_hw_lock);
331 }
332
333 static void
334 lt_grab_hw_lock_with_try()
335 {
336 while (0 == hw_lock_try(&lt_hw_lock, LCK_GRP_NULL)) {
337 ;
338 }
339 lt_counter++;
340 lt_spin_a_little_bit();
341 hw_lock_unlock(&lt_hw_lock);
342 }
343
344 static void
345 lt_grab_hw_lock_with_to()
346 {
347 while (0 == hw_lock_to(&lt_hw_lock, LockTimeOut, LCK_GRP_NULL)) {
348 mp_enable_preemption();
349 }
350 lt_counter++;
351 lt_spin_a_little_bit();
352 hw_lock_unlock(&lt_hw_lock);
353 }
354
355 static void
356 lt_grab_spin_lock()
357 {
358 lck_spin_lock(&lt_lck_spin_t);
359 lt_counter++;
360 lt_spin_a_little_bit();
361 lck_spin_unlock(&lt_lck_spin_t);
362 }
363
364 static void
365 lt_grab_spin_lock_with_try()
366 {
367 while (0 == lck_spin_try_lock(&lt_lck_spin_t)) {
368 ;
369 }
370 lt_counter++;
371 lt_spin_a_little_bit();
372 lck_spin_unlock(&lt_lck_spin_t);
373 }
374
375 static volatile boolean_t lt_thread_lock_grabbed;
376 static volatile boolean_t lt_thread_lock_success;
377
378 static void
379 lt_reset()
380 {
381 lt_counter = 0;
382 lt_max_holders = 0;
383 lt_num_holders = 0;
384 lt_max_upgrade_holders = 0;
385 lt_upgrade_holders = 0;
386 lt_done_threads = 0;
387 lt_target_done_threads = 0;
388 lt_cpu_bind_id = 0;
389
390 OSMemoryBarrier();
391 }
392
393 static void
394 lt_trylock_hw_lock_with_to()
395 {
396 OSMemoryBarrier();
397 while (!lt_thread_lock_grabbed) {
398 lt_sleep_a_little_bit();
399 OSMemoryBarrier();
400 }
401 lt_thread_lock_success = hw_lock_to(&lt_hw_lock, 100, LCK_GRP_NULL);
402 OSMemoryBarrier();
403 mp_enable_preemption();
404 }
405
406 static void
407 lt_trylock_spin_try_lock()
408 {
409 OSMemoryBarrier();
410 while (!lt_thread_lock_grabbed) {
411 lt_sleep_a_little_bit();
412 OSMemoryBarrier();
413 }
414 lt_thread_lock_success = lck_spin_try_lock(&lt_lck_spin_t);
415 OSMemoryBarrier();
416 }
417
418 static void
419 lt_trylock_thread(void *arg, wait_result_t wres __unused)
420 {
421 void (*func)(void) = (void (*)(void))arg;
422
423 func();
424
425 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
426 }
427
428 static void
429 lt_start_trylock_thread(thread_continue_t func)
430 {
431 thread_t thread;
432 kern_return_t kr;
433
434 kr = kernel_thread_start(lt_trylock_thread, func, &thread);
435 assert(kr == KERN_SUCCESS);
436
437 thread_deallocate(thread);
438 }
439
440 static void
441 lt_wait_for_lock_test_threads()
442 {
443 OSMemoryBarrier();
444 /* Spin to reduce dependencies */
445 while (lt_done_threads < lt_target_done_threads) {
446 lt_sleep_a_little_bit();
447 OSMemoryBarrier();
448 }
449 OSMemoryBarrier();
450 }
451
452 static kern_return_t
453 lt_test_trylocks()
454 {
455 boolean_t success;
456 extern unsigned int real_ncpus;
457
458 /*
459 * First mtx try lock succeeds, second fails.
460 */
461 success = lck_mtx_try_lock(&lt_mtx);
462 T_ASSERT_NOTNULL(success, "First mtx try lock");
463 success = lck_mtx_try_lock(&lt_mtx);
464 T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
465 lck_mtx_unlock(&lt_mtx);
466
467 /*
468 * After regular grab, can't try lock.
469 */
470 lck_mtx_lock(&lt_mtx);
471 success = lck_mtx_try_lock(&lt_mtx);
472 T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
473 lck_mtx_unlock(&lt_mtx);
474
475 /*
476 * Two shared try locks on a previously unheld rwlock suceed, and a
477 * subsequent exclusive attempt fails.
478 */
479 success = lck_rw_try_lock_shared(&lt_rwlock);
480 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
481 success = lck_rw_try_lock_shared(&lt_rwlock);
482 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
483 success = lck_rw_try_lock_exclusive(&lt_rwlock);
484 T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
485 lck_rw_done(&lt_rwlock);
486 lck_rw_done(&lt_rwlock);
487
488 /*
489 * After regular shared grab, can trylock
490 * for shared but not for exclusive.
491 */
492 lck_rw_lock_shared(&lt_rwlock);
493 success = lck_rw_try_lock_shared(&lt_rwlock);
494 T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
495 success = lck_rw_try_lock_exclusive(&lt_rwlock);
496 T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
497 lck_rw_done(&lt_rwlock);
498 lck_rw_done(&lt_rwlock);
499
500 /*
501 * An exclusive try lock succeeds, subsequent shared and exclusive
502 * attempts fail.
503 */
504 success = lck_rw_try_lock_exclusive(&lt_rwlock);
505 T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
506 success = lck_rw_try_lock_shared(&lt_rwlock);
507 T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
508 success = lck_rw_try_lock_exclusive(&lt_rwlock);
509 T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
510 lck_rw_done(&lt_rwlock);
511
512 /*
513 * After regular exclusive grab, neither kind of trylock succeeds.
514 */
515 lck_rw_lock_exclusive(&lt_rwlock);
516 success = lck_rw_try_lock_shared(&lt_rwlock);
517 T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
518 success = lck_rw_try_lock_exclusive(&lt_rwlock);
519 T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
520 lck_rw_done(&lt_rwlock);
521
522 /*
523 * First spin lock attempts succeed, second attempts fail.
524 */
525 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
526 T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
527 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
528 T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
529 hw_lock_unlock(&lt_hw_lock);
530
531 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
532 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
533 T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
534 hw_lock_unlock(&lt_hw_lock);
535
536 lt_reset();
537 lt_thread_lock_grabbed = false;
538 lt_thread_lock_success = true;
539 lt_target_done_threads = 1;
540 OSMemoryBarrier();
541 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
542 success = hw_lock_to(&lt_hw_lock, 100, LCK_GRP_NULL);
543 T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
544 if (real_ncpus == 1) {
545 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
546 }
547 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
548 lt_wait_for_lock_test_threads();
549 T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
550 if (real_ncpus == 1) {
551 mp_disable_preemption(); /* don't double-enable when we unlock */
552 }
553 hw_lock_unlock(&lt_hw_lock);
554
555 lt_reset();
556 lt_thread_lock_grabbed = false;
557 lt_thread_lock_success = true;
558 lt_target_done_threads = 1;
559 OSMemoryBarrier();
560 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
561 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
562 if (real_ncpus == 1) {
563 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
564 }
565 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
566 lt_wait_for_lock_test_threads();
567 T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
568 if (real_ncpus == 1) {
569 mp_disable_preemption(); /* don't double-enable when we unlock */
570 }
571 hw_lock_unlock(&lt_hw_lock);
572
573 success = lck_spin_try_lock(&lt_lck_spin_t);
574 T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
575 success = lck_spin_try_lock(&lt_lck_spin_t);
576 T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
577 lck_spin_unlock(&lt_lck_spin_t);
578
579 lt_reset();
580 lt_thread_lock_grabbed = false;
581 lt_thread_lock_success = true;
582 lt_target_done_threads = 1;
583 lt_start_trylock_thread(lt_trylock_spin_try_lock);
584 lck_spin_lock(&lt_lck_spin_t);
585 if (real_ncpus == 1) {
586 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
587 }
588 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
589 lt_wait_for_lock_test_threads();
590 T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
591 if (real_ncpus == 1) {
592 mp_disable_preemption(); /* don't double-enable when we unlock */
593 }
594 lck_spin_unlock(&lt_lck_spin_t);
595
596 return KERN_SUCCESS;
597 }
598
599 static void
600 lt_thread(void *arg, wait_result_t wres __unused)
601 {
602 void (*func)(void) = (void (*)(void))arg;
603 uint32_t i;
604
605 for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
606 func();
607 }
608
609 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
610 }
611
612 static void
613 lt_start_lock_thread(thread_continue_t func)
614 {
615 thread_t thread;
616 kern_return_t kr;
617
618 kr = kernel_thread_start(lt_thread, func, &thread);
619 assert(kr == KERN_SUCCESS);
620
621 thread_deallocate(thread);
622 }
623
624 #if __AMP__
625 static void
626 lt_bound_thread(void *arg, wait_result_t wres __unused)
627 {
628 void (*func)(void) = (void (*)(void))arg;
629
630 int cpuid = OSIncrementAtomic((volatile SInt32 *)&lt_cpu_bind_id);
631
632 processor_t processor = processor_list;
633 while ((processor != NULL) && (processor->cpu_id != cpuid)) {
634 processor = processor->processor_list;
635 }
636
637 if (processor != NULL) {
638 thread_bind(processor);
639 }
640
641 thread_block(THREAD_CONTINUE_NULL);
642
643 func();
644
645 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
646 }
647
648 static void
649 lt_e_thread(void *arg, wait_result_t wres __unused)
650 {
651 void (*func)(void) = (void (*)(void))arg;
652
653 thread_t thread = current_thread();
654
655 spl_t s = splsched();
656 thread_lock(thread);
657 thread->sched_flags |= TH_SFLAG_ECORE_ONLY;
658 thread_unlock(thread);
659 splx(s);
660
661 thread_block(THREAD_CONTINUE_NULL);
662
663 func();
664
665 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
666 }
667
668 static void
669 lt_p_thread(void *arg, wait_result_t wres __unused)
670 {
671 void (*func)(void) = (void (*)(void))arg;
672
673 thread_t thread = current_thread();
674
675 spl_t s = splsched();
676 thread_lock(thread);
677 thread->sched_flags |= TH_SFLAG_PCORE_ONLY;
678 thread_unlock(thread);
679 splx(s);
680
681 thread_block(THREAD_CONTINUE_NULL);
682
683 func();
684
685 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
686 }
687
688 static void
689 lt_start_lock_thread_e(thread_continue_t func)
690 {
691 thread_t thread;
692 kern_return_t kr;
693
694 kr = kernel_thread_start(lt_e_thread, func, &thread);
695 assert(kr == KERN_SUCCESS);
696
697 thread_deallocate(thread);
698 }
699
700 static void
701 lt_start_lock_thread_p(thread_continue_t func)
702 {
703 thread_t thread;
704 kern_return_t kr;
705
706 kr = kernel_thread_start(lt_p_thread, func, &thread);
707 assert(kr == KERN_SUCCESS);
708
709 thread_deallocate(thread);
710 }
711
712 static void
713 lt_start_lock_thread_bound(thread_continue_t func)
714 {
715 thread_t thread;
716 kern_return_t kr;
717
718 kr = kernel_thread_start(lt_bound_thread, func, &thread);
719 assert(kr == KERN_SUCCESS);
720
721 thread_deallocate(thread);
722 }
723 #endif
724
725 static kern_return_t
726 lt_test_locks()
727 {
728 kern_return_t kr = KERN_SUCCESS;
729 lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
730 lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
731
732 lck_mtx_init(&lt_mtx, lg, LCK_ATTR_NULL);
733 lck_rw_init(&lt_rwlock, lg, LCK_ATTR_NULL);
734 lck_spin_init(&lt_lck_spin_t, lg, LCK_ATTR_NULL);
735 hw_lock_init(&lt_hw_lock);
736
737 T_LOG("Testing locks.");
738
739 /* Try locks (custom) */
740 lt_reset();
741
742 T_LOG("Running try lock test.");
743 kr = lt_test_trylocks();
744 T_EXPECT_NULL(kr, "try lock test failed.");
745
746 /* Uncontended mutex */
747 T_LOG("Running uncontended mutex test.");
748 lt_reset();
749 lt_target_done_threads = 1;
750 lt_start_lock_thread(lt_grab_mutex);
751 lt_wait_for_lock_test_threads();
752 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
753 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
754
755 /* Contended mutex:try locks*/
756 T_LOG("Running contended mutex test.");
757 lt_reset();
758 lt_target_done_threads = 3;
759 lt_start_lock_thread(lt_grab_mutex);
760 lt_start_lock_thread(lt_grab_mutex);
761 lt_start_lock_thread(lt_grab_mutex);
762 lt_wait_for_lock_test_threads();
763 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
764 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
765
766 /* Contended mutex: try locks*/
767 T_LOG("Running contended mutex trylock test.");
768 lt_reset();
769 lt_target_done_threads = 3;
770 lt_start_lock_thread(lt_grab_mutex_with_try);
771 lt_start_lock_thread(lt_grab_mutex_with_try);
772 lt_start_lock_thread(lt_grab_mutex_with_try);
773 lt_wait_for_lock_test_threads();
774 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
775 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
776
777 /* Uncontended exclusive rwlock */
778 T_LOG("Running uncontended exclusive rwlock test.");
779 lt_reset();
780 lt_target_done_threads = 1;
781 lt_start_lock_thread(lt_grab_rw_exclusive);
782 lt_wait_for_lock_test_threads();
783 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
784 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
785
786 /* Uncontended shared rwlock */
787
788 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
789 * T_LOG("Running uncontended shared rwlock test.");
790 * lt_reset();
791 * lt_target_done_threads = 1;
792 * lt_start_lock_thread(lt_grab_rw_shared);
793 * lt_wait_for_lock_test_threads();
794 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
795 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
796 */
797
798 /* Contended exclusive rwlock */
799 T_LOG("Running contended exclusive rwlock test.");
800 lt_reset();
801 lt_target_done_threads = 3;
802 lt_start_lock_thread(lt_grab_rw_exclusive);
803 lt_start_lock_thread(lt_grab_rw_exclusive);
804 lt_start_lock_thread(lt_grab_rw_exclusive);
805 lt_wait_for_lock_test_threads();
806 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
807 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
808
809 /* One shared, two exclusive */
810 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
811 * T_LOG("Running test with one shared and two exclusive rw lock threads.");
812 * lt_reset();
813 * lt_target_done_threads = 3;
814 * lt_start_lock_thread(lt_grab_rw_shared);
815 * lt_start_lock_thread(lt_grab_rw_exclusive);
816 * lt_start_lock_thread(lt_grab_rw_exclusive);
817 * lt_wait_for_lock_test_threads();
818 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
819 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
820 */
821
822 /* Four shared */
823 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
824 * T_LOG("Running test with four shared holders.");
825 * lt_reset();
826 * lt_target_done_threads = 4;
827 * lt_start_lock_thread(lt_grab_rw_shared);
828 * lt_start_lock_thread(lt_grab_rw_shared);
829 * lt_start_lock_thread(lt_grab_rw_shared);
830 * lt_start_lock_thread(lt_grab_rw_shared);
831 * lt_wait_for_lock_test_threads();
832 * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
833 */
834
835 /* Three doing upgrades and downgrades */
836 T_LOG("Running test with threads upgrading and downgrading.");
837 lt_reset();
838 lt_target_done_threads = 3;
839 lt_start_lock_thread(lt_upgrade_downgrade_rw);
840 lt_start_lock_thread(lt_upgrade_downgrade_rw);
841 lt_start_lock_thread(lt_upgrade_downgrade_rw);
842 lt_wait_for_lock_test_threads();
843 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
844 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
845 T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
846
847 /* Uncontended - exclusive trylocks */
848 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
849 lt_reset();
850 lt_target_done_threads = 1;
851 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
852 lt_wait_for_lock_test_threads();
853 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
854 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
855
856 /* Uncontended - shared trylocks */
857 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
858 * T_LOG("Running test with single thread doing shared rwlock trylocks.");
859 * lt_reset();
860 * lt_target_done_threads = 1;
861 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
862 * lt_wait_for_lock_test_threads();
863 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
864 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
865 */
866
867 /* Three doing exclusive trylocks */
868 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
869 lt_reset();
870 lt_target_done_threads = 3;
871 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
872 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
873 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
874 lt_wait_for_lock_test_threads();
875 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
876 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
877
878 /* Three doing shared trylocks */
879 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
880 * T_LOG("Running test with threads doing shared rwlock trylocks.");
881 * lt_reset();
882 * lt_target_done_threads = 3;
883 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
884 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
885 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
886 * lt_wait_for_lock_test_threads();
887 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
888 * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
889 */
890
891 /* Three doing various trylocks */
892 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
893 * T_LOG("Running test with threads doing mixed rwlock trylocks.");
894 * lt_reset();
895 * lt_target_done_threads = 4;
896 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
897 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
898 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
899 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
900 * lt_wait_for_lock_test_threads();
901 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
902 * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
903 */
904
905 /* HW locks */
906 T_LOG("Running test with hw_lock_lock()");
907 lt_reset();
908 lt_target_done_threads = 3;
909 lt_start_lock_thread(lt_grab_hw_lock);
910 lt_start_lock_thread(lt_grab_hw_lock);
911 lt_start_lock_thread(lt_grab_hw_lock);
912 lt_wait_for_lock_test_threads();
913 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
914
915 #if __AMP__
916 /* Ticket locks stress test */
917 T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
918 extern unsigned int real_ncpus;
919 lck_grp_init(&lt_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL);
920 lck_ticket_init(&lt_ticket_lock, &lt_ticket_grp);
921 lt_reset();
922 lt_target_done_threads = real_ncpus;
923 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
924 lt_start_lock_thread_bound(lt_stress_ticket_lock);
925 }
926 lt_wait_for_lock_test_threads();
927 bool starvation = false;
928 uint total_local_count = 0;
929 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
930 starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
931 total_local_count += lt_stress_local_counters[processor->cpu_id];
932 }
933 if (total_local_count != lt_counter) {
934 T_FAIL("Lock failure\n");
935 } else if (starvation) {
936 T_FAIL("Lock starvation found\n");
937 } else {
938 T_PASS("Ticket locks stress test with lck_ticket_lock()");
939 }
940
941 /* AMP ticket locks stress test */
942 T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
943 lt_reset();
944 lt_target_done_threads = real_ncpus;
945 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
946 processor_set_t pset = processor->processor_set;
947 if (pset->pset_cluster_type == PSET_AMP_P) {
948 lt_start_lock_thread_p(lt_stress_ticket_lock);
949 } else if (pset->pset_cluster_type == PSET_AMP_E) {
950 lt_start_lock_thread_e(lt_stress_ticket_lock);
951 } else {
952 lt_start_lock_thread(lt_stress_ticket_lock);
953 }
954 }
955 lt_wait_for_lock_test_threads();
956 #endif
957
958 /* HW locks: trylocks */
959 T_LOG("Running test with hw_lock_try()");
960 lt_reset();
961 lt_target_done_threads = 3;
962 lt_start_lock_thread(lt_grab_hw_lock_with_try);
963 lt_start_lock_thread(lt_grab_hw_lock_with_try);
964 lt_start_lock_thread(lt_grab_hw_lock_with_try);
965 lt_wait_for_lock_test_threads();
966 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
967
968 /* HW locks: with timeout */
969 T_LOG("Running test with hw_lock_to()");
970 lt_reset();
971 lt_target_done_threads = 3;
972 lt_start_lock_thread(lt_grab_hw_lock_with_to);
973 lt_start_lock_thread(lt_grab_hw_lock_with_to);
974 lt_start_lock_thread(lt_grab_hw_lock_with_to);
975 lt_wait_for_lock_test_threads();
976 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
977
978 /* Spin locks */
979 T_LOG("Running test with lck_spin_lock()");
980 lt_reset();
981 lt_target_done_threads = 3;
982 lt_start_lock_thread(lt_grab_spin_lock);
983 lt_start_lock_thread(lt_grab_spin_lock);
984 lt_start_lock_thread(lt_grab_spin_lock);
985 lt_wait_for_lock_test_threads();
986 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
987
988 /* Spin locks: trylocks */
989 T_LOG("Running test with lck_spin_try_lock()");
990 lt_reset();
991 lt_target_done_threads = 3;
992 lt_start_lock_thread(lt_grab_spin_lock_with_try);
993 lt_start_lock_thread(lt_grab_spin_lock_with_try);
994 lt_start_lock_thread(lt_grab_spin_lock_with_try);
995 lt_wait_for_lock_test_threads();
996 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
997
998 return KERN_SUCCESS;
999 }
1000
1001 #define MT_MAX_ARGS 8
1002 #define MT_INITIAL_VALUE 0xfeedbeef
1003 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
1004 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
1005 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
1006
1007 typedef void (*sy_munge_t)(void*);
1008
1009 #define MT_FUNC(x) #x, x
1010 struct munger_test {
1011 const char *mt_name;
1012 sy_munge_t mt_func;
1013 uint32_t mt_in_words;
1014 uint32_t mt_nout;
1015 uint64_t mt_expected[MT_MAX_ARGS];
1016 } munger_tests[] = {
1017 {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
1018 {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
1019 {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1020 {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1021 {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1022 {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1023 {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1024 {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1025 {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
1026 {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1027 {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1028 {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1029 {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1030 {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1031 {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1032 {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1033 {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1034 {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1035 {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1036 {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1037 {MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1038 {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1039 {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1040 {MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1041 {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1042 {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1043 {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1044 {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1045 {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1046 {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1047 {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1048 {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1049 {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1050 {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
1051 {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1052 {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1053 {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
1054 {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
1055 {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1056 {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1057 {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1058 {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
1059 };
1060
1061 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
1062
1063 static void
1064 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
1065 {
1066 uint32_t i;
1067
1068 for (i = 0; i < in_words; i++) {
1069 data[i] = MT_INITIAL_VALUE;
1070 }
1071
1072 if (in_words * sizeof(uint32_t) < total_size) {
1073 bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
1074 }
1075 }
1076
1077 static void
1078 mt_test_mungers()
1079 {
1080 uint64_t data[MT_MAX_ARGS];
1081 uint32_t i, j;
1082
1083 for (i = 0; i < MT_TEST_COUNT; i++) {
1084 struct munger_test *test = &munger_tests[i];
1085 int pass = 1;
1086
1087 T_LOG("Testing %s", test->mt_name);
1088
1089 mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
1090 test->mt_func(data);
1091
1092 for (j = 0; j < test->mt_nout; j++) {
1093 if (data[j] != test->mt_expected[j]) {
1094 T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
1095 pass = 0;
1096 }
1097 }
1098 if (pass) {
1099 T_PASS(test->mt_name);
1100 }
1101 }
1102 }
1103
1104 /* Exception Callback Test */
1105 static ex_cb_action_t
1106 excb_test_action(
1107 ex_cb_class_t cb_class,
1108 void *refcon,
1109 const ex_cb_state_t *state
1110 )
1111 {
1112 ex_cb_state_t *context = (ex_cb_state_t *)refcon;
1113
1114 if ((NULL == refcon) || (NULL == state)) {
1115 return EXCB_ACTION_TEST_FAIL;
1116 }
1117
1118 context->far = state->far;
1119
1120 switch (cb_class) {
1121 case EXCB_CLASS_TEST1:
1122 return EXCB_ACTION_RERUN;
1123 case EXCB_CLASS_TEST2:
1124 return EXCB_ACTION_NONE;
1125 default:
1126 return EXCB_ACTION_TEST_FAIL;
1127 }
1128 }
1129
1130
1131 kern_return_t
1132 ex_cb_test()
1133 {
1134 const vm_offset_t far1 = 0xdead0001;
1135 const vm_offset_t far2 = 0xdead0002;
1136 kern_return_t kr;
1137 ex_cb_state_t test_context_1 = {0xdeadbeef};
1138 ex_cb_state_t test_context_2 = {0xdeadbeef};
1139 ex_cb_action_t action;
1140
1141 T_LOG("Testing Exception Callback.");
1142
1143 T_LOG("Running registration test.");
1144
1145 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1146 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback");
1147 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1148 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback");
1149
1150 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1151 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback");
1152 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1153 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback");
1154
1155 T_LOG("Running invocation test.");
1156
1157 action = ex_cb_invoke(EXCB_CLASS_TEST1, far1);
1158 T_ASSERT(EXCB_ACTION_RERUN == action, NULL);
1159 T_ASSERT(far1 == test_context_1.far, NULL);
1160
1161 action = ex_cb_invoke(EXCB_CLASS_TEST2, far2);
1162 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1163 T_ASSERT(far2 == test_context_2.far, NULL);
1164
1165 action = ex_cb_invoke(EXCB_CLASS_TEST3, 0);
1166 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1167
1168 return KERN_SUCCESS;
1169 }
1170
1171 #if defined(HAS_APPLE_PAC)
1172
1173 /*
1174 *
1175 * arm64_ropjop_test - basic xnu ROP/JOP test plan
1176 *
1177 * - assert ROP/JOP configured and running status match
1178 * - assert all AppleMode ROP/JOP features enabled
1179 * - ensure ROP/JOP keys are set and diversified
1180 * - sign a KVA (the address of this function),assert it was signed (changed)
1181 * - authenticate the newly signed KVA
1182 * - assert the authed KVA is the original KVA
1183 * - corrupt a signed ptr, auth it, ensure auth failed
1184 * - assert the failed authIB of corrupted pointer is tagged
1185 *
1186 */
1187
1188 kern_return_t
1189 arm64_ropjop_test()
1190 {
1191 T_LOG("Testing ROP/JOP");
1192
1193 /* how is ROP/JOP configured */
1194 boolean_t config_rop_enabled = TRUE;
1195 boolean_t config_jop_enabled = TRUE;
1196
1197
1198 /* assert all AppleMode ROP/JOP features enabled */
1199 uint64_t apctl = __builtin_arm_rsr64(ARM64_REG_APCTL_EL1);
1200 #if __APSTS_SUPPORTED__
1201 uint64_t apsts = __builtin_arm_rsr64(ARM64_REG_APSTS_EL1);
1202 T_EXPECT(apsts & APSTS_EL1_MKEYVld, NULL);
1203 #else
1204 T_EXPECT(apctl & APCTL_EL1_MKEYVld, NULL);
1205 #endif /* __APSTS_SUPPORTED__ */
1206 T_EXPECT(apctl & APCTL_EL1_AppleMode, NULL);
1207
1208 bool kernkeyen = apctl & APCTL_EL1_KernKeyEn;
1209 #if HAS_APCTL_EL1_USERKEYEN
1210 bool userkeyen = apctl & APCTL_EL1_UserKeyEn;
1211 #else
1212 bool userkeyen = false;
1213 #endif
1214 /* for KernKey to work as a diversifier, it must be enabled at exactly one of {EL0, EL1/2} */
1215 T_EXPECT(kernkeyen || userkeyen, "KernKey is enabled");
1216 T_EXPECT(!(kernkeyen && userkeyen), "KernKey is not simultaneously enabled at userspace and kernel space");
1217
1218 /* ROP/JOP keys enabled current status */
1219 bool status_jop_enabled, status_rop_enabled;
1220 #if __APSTS_SUPPORTED__ /* H13+ */
1221 status_jop_enabled = status_rop_enabled = apctl & APCTL_EL1_EnAPKey1;
1222 #elif __APCFG_SUPPORTED__ /* H12 */
1223 uint64_t apcfg_el1 = __builtin_arm_rsr64(APCFG_EL1);
1224 status_jop_enabled = status_rop_enabled = apcfg_el1 & APCFG_EL1_ELXENKEY;
1225 #else /* !__APCFG_SUPPORTED__ H11 */
1226 uint64_t sctlr_el1 = __builtin_arm_rsr64("SCTLR_EL1");
1227 status_jop_enabled = sctlr_el1 & SCTLR_PACIA_ENABLED;
1228 status_rop_enabled = sctlr_el1 & SCTLR_PACIB_ENABLED;
1229 #endif /* __APSTS_SUPPORTED__ */
1230
1231 /* assert configured and running status match */
1232 T_EXPECT(config_rop_enabled == status_rop_enabled, NULL);
1233 T_EXPECT(config_jop_enabled == status_jop_enabled, NULL);
1234
1235
1236 if (config_jop_enabled) {
1237 /* jop key */
1238 uint64_t apiakey_hi = __builtin_arm_rsr64(ARM64_REG_APIAKEYHI_EL1);
1239 uint64_t apiakey_lo = __builtin_arm_rsr64(ARM64_REG_APIAKEYLO_EL1);
1240
1241 /* ensure JOP key is set and diversified */
1242 T_EXPECT(apiakey_hi != KERNEL_ROP_ID && apiakey_lo != KERNEL_ROP_ID, NULL);
1243 T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
1244 }
1245
1246 if (config_rop_enabled) {
1247 /* rop key */
1248 uint64_t apibkey_hi = __builtin_arm_rsr64(ARM64_REG_APIBKEYHI_EL1);
1249 uint64_t apibkey_lo = __builtin_arm_rsr64(ARM64_REG_APIBKEYLO_EL1);
1250
1251 /* ensure ROP key is set and diversified */
1252 T_EXPECT(apibkey_hi != KERNEL_ROP_ID && apibkey_lo != KERNEL_ROP_ID, NULL);
1253 T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
1254
1255 /* sign a KVA (the address of this function) */
1256 uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
1257
1258 /* assert it was signed (changed) */
1259 T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
1260
1261 /* authenticate the newly signed KVA */
1262 uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
1263
1264 /* assert the authed KVA is the original KVA */
1265 T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
1266
1267 /* corrupt a signed ptr, auth it, ensure auth failed */
1268 uint64_t kva_corrupted = kva_signed ^ 1;
1269
1270 /* authenticate the corrupted pointer */
1271 kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
1272
1273 /* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1274 uint64_t auth_fail_mask = 3ULL << 61;
1275 uint64_t authib_fail = 2ULL << 61;
1276
1277 /* assert the failed authIB of corrupted pointer is tagged */
1278 T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
1279 }
1280
1281 return KERN_SUCCESS;
1282 }
1283 #endif /* defined(HAS_APPLE_PAC) */
1284
1285 #if __ARM_PAN_AVAILABLE__
1286
1287 struct pan_test_thread_args {
1288 volatile bool join;
1289 };
1290
1291 static void
1292 arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
1293 {
1294 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1295
1296 struct pan_test_thread_args *args = arg;
1297
1298 for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
1299 thread_bind(p);
1300 thread_block(THREAD_CONTINUE_NULL);
1301 kprintf("Running PAN test on cpu %d\n", p->cpu_id);
1302 arm64_pan_test();
1303 }
1304
1305 /* unbind thread from specific cpu */
1306 thread_bind(PROCESSOR_NULL);
1307 thread_block(THREAD_CONTINUE_NULL);
1308
1309 while (!args->join) {
1310 ;
1311 }
1312
1313 thread_wakeup(args);
1314 }
1315
1316 kern_return_t
1317 arm64_late_pan_test()
1318 {
1319 thread_t thread;
1320 kern_return_t kr;
1321
1322 struct pan_test_thread_args args;
1323 args.join = false;
1324
1325 kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
1326 assert(kr == KERN_SUCCESS);
1327
1328 thread_deallocate(thread);
1329
1330 assert_wait(&args, THREAD_UNINT);
1331 args.join = true;
1332 thread_block(THREAD_CONTINUE_NULL);
1333 return KERN_SUCCESS;
1334 }
1335
1336 static bool
1337 arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state)
1338 {
1339 bool retval = false;
1340 uint32_t esr = get_saved_state_esr(state);
1341 esr_exception_class_t class = ESR_EC(esr);
1342 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1343 uint32_t cpsr = get_saved_state_cpsr(state);
1344 uint64_t far = get_saved_state_far(state);
1345
1346 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1347 (cpsr & PSR64_PAN) &&
1348 ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) {
1349 ++pan_exception_level;
1350 // read the user-accessible value to make sure
1351 // pan is enabled and produces a 2nd fault from
1352 // the exception handler
1353 if (pan_exception_level == 1) {
1354 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far);
1355 pan_fault_value = *(volatile char *)far;
1356 ml_expect_fault_end();
1357 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1358 }
1359 // this fault address is used for PAN test
1360 // disable PAN and rerun
1361 mask_saved_state_cpsr(state, 0, PSR64_PAN);
1362
1363 retval = true;
1364 }
1365
1366 return retval;
1367 }
1368
1369 static bool
1370 arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state)
1371 {
1372 bool retval = false;
1373 uint32_t esr = get_saved_state_esr(state);
1374 esr_exception_class_t class = ESR_EC(esr);
1375 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1376 uint32_t cpsr = get_saved_state_cpsr(state);
1377
1378 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) &&
1379 !(cpsr & PSR64_PAN)) {
1380 ++pan_exception_level;
1381 // On an exception taken from a PAN-disabled context, verify
1382 // that PAN is re-enabled for the exception handler and that
1383 // accessing the test address produces a PAN fault.
1384 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1385 pan_fault_value = *(volatile char *)pan_test_addr;
1386 ml_expect_fault_end();
1387 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1388 add_saved_state_pc(state, 4);
1389
1390 retval = true;
1391 }
1392
1393 return retval;
1394 }
1395
1396 kern_return_t
1397 arm64_pan_test()
1398 {
1399 bool values_match = false;
1400 vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE;
1401
1402 T_LOG("Testing PAN.");
1403
1404
1405 T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
1406
1407 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1408
1409 pan_exception_level = 0;
1410 pan_fault_value = 0xDE;
1411 // convert priv_addr to one that is accessible from user mode
1412 pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS -
1413 _COMM_PAGE_START_ADDRESS;
1414
1415 // Context-switch with PAN disabled is prohibited; prevent test logging from
1416 // triggering a voluntary context switch.
1417 mp_disable_preemption();
1418
1419 // Below should trigger a PAN exception as pan_test_addr is accessible
1420 // in user mode
1421 // The exception handler, upon recognizing the fault address is pan_test_addr,
1422 // will disable PAN and rerun this instruction successfully
1423 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr);
1424 values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr);
1425 ml_expect_fault_end();
1426 T_ASSERT(values_match, NULL);
1427
1428 T_ASSERT(pan_exception_level == 2, NULL);
1429
1430 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1431
1432 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1433
1434 pan_exception_level = 0;
1435 pan_fault_value = 0xAD;
1436 pan_ro_addr = (vm_offset_t) &pan_ro_value;
1437
1438 // Force a permission fault while PAN is disabled to make sure PAN is
1439 // re-enabled during the exception handler.
1440 ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr);
1441 *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1442 ml_expect_fault_end();
1443
1444 T_ASSERT(pan_exception_level == 2, NULL);
1445
1446 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1447
1448 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1449
1450 pan_test_addr = 0;
1451 pan_ro_addr = 0;
1452
1453 __builtin_arm_wsr("pan", 1);
1454
1455 mp_enable_preemption();
1456
1457 return KERN_SUCCESS;
1458 }
1459 #endif /* __ARM_PAN_AVAILABLE__ */
1460
1461
1462 kern_return_t
1463 arm64_lock_test()
1464 {
1465 return lt_test_locks();
1466 }
1467
1468 kern_return_t
1469 arm64_munger_test()
1470 {
1471 mt_test_mungers();
1472 return 0;
1473 }
1474
1475 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1476 SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test;
1477 uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */
1478 volatile uint64_t ctrr_exception_esr;
1479 vm_offset_t ctrr_test_va;
1480 vm_offset_t ctrr_test_page;
1481
1482 kern_return_t
1483 ctrr_test(void)
1484 {
1485 processor_t p;
1486 boolean_t ctrr_disable = FALSE;
1487
1488 PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
1489
1490 #if CONFIG_CSR_FROM_DT
1491 if (csr_unsafe_kernel_text) {
1492 ctrr_disable = TRUE;
1493 }
1494 #endif /* CONFIG_CSR_FROM_DT */
1495
1496 if (ctrr_disable) {
1497 T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
1498 return KERN_SUCCESS;
1499 }
1500
1501 T_LOG("Running CTRR test.");
1502
1503 for (p = processor_list; p != NULL; p = p->processor_list) {
1504 thread_bind(p);
1505 thread_block(THREAD_CONTINUE_NULL);
1506 T_LOG("Running CTRR test on cpu %d\n", p->cpu_id);
1507 ctrr_test_cpu();
1508 }
1509
1510 /* unbind thread from specific cpu */
1511 thread_bind(PROCESSOR_NULL);
1512 thread_block(THREAD_CONTINUE_NULL);
1513
1514 return KERN_SUCCESS;
1515 }
1516
1517 static bool
1518 ctrr_test_ro_fault_handler(arm_saved_state_t * state)
1519 {
1520 bool retval = false;
1521 uint32_t esr = get_saved_state_esr(state);
1522 esr_exception_class_t class = ESR_EC(esr);
1523 fault_status_t fsc = ISS_DA_FSC(ESR_ISS(esr));
1524
1525 if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1526 ctrr_exception_esr = esr;
1527 add_saved_state_pc(state, 4);
1528 retval = true;
1529 }
1530
1531 return retval;
1532 }
1533
1534 static bool
1535 ctrr_test_nx_fault_handler(arm_saved_state_t * state)
1536 {
1537 bool retval = false;
1538 uint32_t esr = get_saved_state_esr(state);
1539 esr_exception_class_t class = ESR_EC(esr);
1540 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
1541
1542 if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) {
1543 ctrr_exception_esr = esr;
1544 /* return to the instruction immediately after the call to NX page */
1545 set_saved_state_pc(state, get_saved_state_lr(state));
1546 retval = true;
1547 }
1548
1549 return retval;
1550 }
1551
1552 /* test CTRR on a cpu, caller to bind thread to desired cpu */
1553 /* ctrr_test_page was reserved during bootstrap process */
1554 kern_return_t
1555 ctrr_test_cpu(void)
1556 {
1557 ppnum_t ro_pn, nx_pn;
1558 uint64_t *ctrr_ro_test_ptr;
1559 void (*ctrr_nx_test_ptr)(void);
1560 kern_return_t kr;
1561 uint64_t prot = 0;
1562 extern vm_offset_t virtual_space_start;
1563
1564 /* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */
1565
1566 vm_offset_t rorgn_begin_va = phystokv(ctrr_begin);
1567 vm_offset_t rorgn_end_va = phystokv(ctrr_end) + 1;
1568 vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
1569 vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
1570
1571 T_EXPECT(rorgn_begin_va <= ro_test_va && ro_test_va < rorgn_end_va, "Expect ro_test_va to be inside the CTRR region");
1572 T_EXPECT((nx_test_va < rorgn_begin_va) ^ (nx_test_va >= rorgn_end_va), "Expect nx_test_va to be outside the CTRR region");
1573
1574 ro_pn = pmap_find_phys(kernel_pmap, ro_test_va);
1575 nx_pn = pmap_find_phys(kernel_pmap, nx_test_va);
1576 T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero");
1577
1578 T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
1579 (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn);
1580
1581 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1582 T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped");
1583
1584 T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn);
1585 kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn,
1586 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
1587 T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed");
1588
1589 // assert entire mmu prot path (Hierarchical protection model) is NOT RO
1590 // fetch effective block level protections from table/block entries
1591 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1592 T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX");
1593
1594 ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK);
1595 ctrr_ro_test_ptr = (void *)ctrr_test_va;
1596
1597 T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
1598
1599 // should cause data abort
1600 ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va);
1601 *ctrr_ro_test_ptr = 1;
1602 ml_expect_fault_end();
1603
1604 // ensure write permission fault at expected level
1605 // data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
1606
1607 T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected");
1608 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1609 T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected");
1610
1611 ctrr_test_va = 0;
1612 ctrr_exception_esr = 0;
1613 pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1614
1615 T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn);
1616
1617 kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn,
1618 VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
1619 T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed");
1620
1621 // assert entire mmu prot path (Hierarchical protection model) is NOT XN
1622 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1623 T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX");
1624
1625 ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
1626 #if __has_feature(ptrauth_calls)
1627 ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0);
1628 #else
1629 ctrr_nx_test_ptr = (void *)ctrr_test_va;
1630 #endif
1631
1632 T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
1633
1634 // should cause prefetch abort
1635 ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va);
1636 ctrr_nx_test_ptr();
1637 ml_expect_fault_end();
1638
1639 // TODO: ensure execute permission fault at expected level
1640 T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
1641 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1642
1643 ctrr_test_va = 0;
1644 ctrr_exception_esr = 0;
1645
1646 pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1647
1648 T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
1649 for (vm_offset_t addr = rorgn_begin_va; addr < rorgn_end_va; addr += 8) {
1650 volatile uint64_t x = *(uint64_t *)addr;
1651 (void) x; /* read for side effect only */
1652 }
1653
1654 return KERN_SUCCESS;
1655 }
1656 #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */
1657
1658 #if HAS_TWO_STAGE_SPR_LOCK
1659
1660 #define STR1(x) #x
1661 #define STR(x) STR1(x)
1662
1663 volatile vm_offset_t spr_lock_test_addr;
1664 volatile uint32_t spr_lock_exception_esr;
1665
1666 kern_return_t
1667 arm64_spr_lock_test()
1668 {
1669 processor_t p;
1670
1671 for (p = processor_list; p != NULL; p = p->processor_list) {
1672 thread_bind(p);
1673 thread_block(THREAD_CONTINUE_NULL);
1674 T_LOG("Running SPR lock test on cpu %d\n", p->cpu_id);
1675
1676 uint64_t orig_value = __builtin_arm_rsr64(STR(ARM64_REG_HID8));
1677 spr_lock_test_addr = (vm_offset_t)VM_KERNEL_STRIP_PTR(arm64_msr_lock_test);
1678 spr_lock_exception_esr = 0;
1679 arm64_msr_lock_test(~orig_value);
1680 T_EXPECT(spr_lock_exception_esr != 0, "MSR write generated synchronous abort");
1681
1682 uint64_t new_value = __builtin_arm_rsr64(STR(ARM64_REG_HID8));
1683 T_EXPECT(orig_value == new_value, "MSR write did not succeed");
1684
1685 spr_lock_test_addr = 0;
1686 }
1687
1688 /* unbind thread from specific cpu */
1689 thread_bind(PROCESSOR_NULL);
1690 thread_block(THREAD_CONTINUE_NULL);
1691
1692 T_PASS("Done running SPR lock tests");
1693
1694 return KERN_SUCCESS;
1695 }
1696
1697 #endif /* HAS_TWO_STAGE_SPR_LOCK */