]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/platform_tests.c
8523c57ab2c2eda1ef81e7f13a82e656577fc579
[apple/xnu.git] / osfmk / arm64 / platform_tests.c
1 /*
2 * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
40 *
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * Carnegie Mellon requests users of this software to return to
46 *
47 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55 #include <mach_ldebug.h>
56
57 #define LOCK_PRIVATE 1
58
59 #include <vm/pmap.h>
60 #include <kern/kalloc.h>
61 #include <kern/locks.h>
62 #include <kern/misc_protos.h>
63 #include <kern/thread.h>
64 #include <kern/processor.h>
65 #include <kern/sched_prim.h>
66 #include <kern/debug.h>
67 #include <string.h>
68 #include <tests/xnupost.h>
69
70 #if MACH_KDB
71 #include <ddb/db_command.h>
72 #include <ddb/db_output.h>
73 #include <ddb/db_sym.h>
74 #include <ddb/db_print.h>
75 #endif /* MACH_KDB */
76
77 #include <sys/kdebug.h>
78 #include <sys/munge.h>
79 #include <machine/cpu_capabilities.h>
80 #include <arm/cpu_data_internal.h>
81 #include <arm/pmap.h>
82
83 kern_return_t arm64_lock_test(void);
84 kern_return_t arm64_munger_test(void);
85 kern_return_t ex_cb_test(void);
86 kern_return_t arm64_pan_test(void);
87 kern_return_t arm64_late_pan_test(void);
88 #if defined(HAS_APPLE_PAC)
89 #include <ptrauth.h>
90 kern_return_t arm64_ropjop_test(void);
91 #endif
92 #if HAS_TWO_STAGE_SPR_LOCK
93 kern_return_t arm64_spr_lock_test(void);
94 extern void arm64_msr_lock_test(uint64_t);
95 #endif
96
97 // exception handler ignores this fault address during PAN test
98 #if __ARM_PAN_AVAILABLE__
99 const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
100 vm_offset_t pan_test_addr = 0;
101 vm_offset_t pan_ro_addr = 0;
102 volatile int pan_exception_level = 0;
103 volatile char pan_fault_value = 0;
104 #endif
105
106 #include <libkern/OSAtomic.h>
107 #define LOCK_TEST_ITERATIONS 50
108 static hw_lock_data_t lt_hw_lock;
109 static lck_spin_t lt_lck_spin_t;
110 static lck_mtx_t lt_mtx;
111 static lck_rw_t lt_rwlock;
112 static volatile uint32_t lt_counter = 0;
113 static volatile int lt_spinvolatile;
114 static volatile uint32_t lt_max_holders = 0;
115 static volatile uint32_t lt_upgrade_holders = 0;
116 static volatile uint32_t lt_max_upgrade_holders = 0;
117 static volatile uint32_t lt_num_holders = 0;
118 static volatile uint32_t lt_done_threads;
119 static volatile uint32_t lt_target_done_threads;
120 static volatile uint32_t lt_cpu_bind_id = 0;
121
122 static void
123 lt_note_another_blocking_lock_holder()
124 {
125 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
126 lt_num_holders++;
127 lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
128 hw_lock_unlock(&lt_hw_lock);
129 }
130
131 static void
132 lt_note_blocking_lock_release()
133 {
134 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
135 lt_num_holders--;
136 hw_lock_unlock(&lt_hw_lock);
137 }
138
139 static void
140 lt_spin_a_little_bit()
141 {
142 uint32_t i;
143
144 for (i = 0; i < 10000; i++) {
145 lt_spinvolatile++;
146 }
147 }
148
149 static void
150 lt_sleep_a_little_bit()
151 {
152 delay(100);
153 }
154
155 static void
156 lt_grab_mutex()
157 {
158 lck_mtx_lock(&lt_mtx);
159 lt_note_another_blocking_lock_holder();
160 lt_sleep_a_little_bit();
161 lt_counter++;
162 lt_note_blocking_lock_release();
163 lck_mtx_unlock(&lt_mtx);
164 }
165
166 static void
167 lt_grab_mutex_with_try()
168 {
169 while (0 == lck_mtx_try_lock(&lt_mtx)) {
170 ;
171 }
172 lt_note_another_blocking_lock_holder();
173 lt_sleep_a_little_bit();
174 lt_counter++;
175 lt_note_blocking_lock_release();
176 lck_mtx_unlock(&lt_mtx);
177 }
178
179 static void
180 lt_grab_rw_exclusive()
181 {
182 lck_rw_lock_exclusive(&lt_rwlock);
183 lt_note_another_blocking_lock_holder();
184 lt_sleep_a_little_bit();
185 lt_counter++;
186 lt_note_blocking_lock_release();
187 lck_rw_done(&lt_rwlock);
188 }
189
190 static void
191 lt_grab_rw_exclusive_with_try()
192 {
193 while (0 == lck_rw_try_lock_exclusive(&lt_rwlock)) {
194 lt_sleep_a_little_bit();
195 }
196
197 lt_note_another_blocking_lock_holder();
198 lt_sleep_a_little_bit();
199 lt_counter++;
200 lt_note_blocking_lock_release();
201 lck_rw_done(&lt_rwlock);
202 }
203
204 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
205 * static void
206 * lt_grab_rw_shared()
207 * {
208 * lck_rw_lock_shared(&lt_rwlock);
209 * lt_counter++;
210 *
211 * lt_note_another_blocking_lock_holder();
212 * lt_sleep_a_little_bit();
213 * lt_note_blocking_lock_release();
214 *
215 * lck_rw_done(&lt_rwlock);
216 * }
217 */
218
219 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
220 * static void
221 * lt_grab_rw_shared_with_try()
222 * {
223 * while(0 == lck_rw_try_lock_shared(&lt_rwlock));
224 * lt_counter++;
225 *
226 * lt_note_another_blocking_lock_holder();
227 * lt_sleep_a_little_bit();
228 * lt_note_blocking_lock_release();
229 *
230 * lck_rw_done(&lt_rwlock);
231 * }
232 */
233
234 static void
235 lt_upgrade_downgrade_rw()
236 {
237 boolean_t upgraded, success;
238
239 success = lck_rw_try_lock_shared(&lt_rwlock);
240 if (!success) {
241 lck_rw_lock_shared(&lt_rwlock);
242 }
243
244 lt_note_another_blocking_lock_holder();
245 lt_sleep_a_little_bit();
246 lt_note_blocking_lock_release();
247
248 upgraded = lck_rw_lock_shared_to_exclusive(&lt_rwlock);
249 if (!upgraded) {
250 success = lck_rw_try_lock_exclusive(&lt_rwlock);
251
252 if (!success) {
253 lck_rw_lock_exclusive(&lt_rwlock);
254 }
255 }
256
257 lt_upgrade_holders++;
258 if (lt_upgrade_holders > lt_max_upgrade_holders) {
259 lt_max_upgrade_holders = lt_upgrade_holders;
260 }
261
262 lt_counter++;
263 lt_sleep_a_little_bit();
264
265 lt_upgrade_holders--;
266
267 lck_rw_lock_exclusive_to_shared(&lt_rwlock);
268
269 lt_spin_a_little_bit();
270 lck_rw_done(&lt_rwlock);
271 }
272
273
274 static void
275 lt_grab_hw_lock()
276 {
277 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
278 lt_counter++;
279 lt_spin_a_little_bit();
280 hw_lock_unlock(&lt_hw_lock);
281 }
282
283 static void
284 lt_grab_hw_lock_with_try()
285 {
286 while (0 == hw_lock_try(&lt_hw_lock, LCK_GRP_NULL)) {
287 ;
288 }
289 lt_counter++;
290 lt_spin_a_little_bit();
291 hw_lock_unlock(&lt_hw_lock);
292 }
293
294 static void
295 lt_grab_hw_lock_with_to()
296 {
297 while (0 == hw_lock_to(&lt_hw_lock, LockTimeOut, LCK_GRP_NULL)) {
298 mp_enable_preemption();
299 }
300 lt_counter++;
301 lt_spin_a_little_bit();
302 hw_lock_unlock(&lt_hw_lock);
303 }
304
305 static void
306 lt_grab_spin_lock()
307 {
308 lck_spin_lock(&lt_lck_spin_t);
309 lt_counter++;
310 lt_spin_a_little_bit();
311 lck_spin_unlock(&lt_lck_spin_t);
312 }
313
314 static void
315 lt_grab_spin_lock_with_try()
316 {
317 while (0 == lck_spin_try_lock(&lt_lck_spin_t)) {
318 ;
319 }
320 lt_counter++;
321 lt_spin_a_little_bit();
322 lck_spin_unlock(&lt_lck_spin_t);
323 }
324
325 static volatile boolean_t lt_thread_lock_grabbed;
326 static volatile boolean_t lt_thread_lock_success;
327
328 static void
329 lt_reset()
330 {
331 lt_counter = 0;
332 lt_max_holders = 0;
333 lt_num_holders = 0;
334 lt_max_upgrade_holders = 0;
335 lt_upgrade_holders = 0;
336 lt_done_threads = 0;
337 lt_target_done_threads = 0;
338 lt_cpu_bind_id = 0;
339
340 OSMemoryBarrier();
341 }
342
343 static void
344 lt_trylock_hw_lock_with_to()
345 {
346 OSMemoryBarrier();
347 while (!lt_thread_lock_grabbed) {
348 lt_sleep_a_little_bit();
349 OSMemoryBarrier();
350 }
351 lt_thread_lock_success = hw_lock_to(&lt_hw_lock, 100, LCK_GRP_NULL);
352 OSMemoryBarrier();
353 mp_enable_preemption();
354 }
355
356 static void
357 lt_trylock_spin_try_lock()
358 {
359 OSMemoryBarrier();
360 while (!lt_thread_lock_grabbed) {
361 lt_sleep_a_little_bit();
362 OSMemoryBarrier();
363 }
364 lt_thread_lock_success = lck_spin_try_lock(&lt_lck_spin_t);
365 OSMemoryBarrier();
366 }
367
368 static void
369 lt_trylock_thread(void *arg, wait_result_t wres __unused)
370 {
371 void (*func)(void) = (void (*)(void))arg;
372
373 func();
374
375 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
376 }
377
378 static void
379 lt_start_trylock_thread(thread_continue_t func)
380 {
381 thread_t thread;
382 kern_return_t kr;
383
384 kr = kernel_thread_start(lt_trylock_thread, func, &thread);
385 assert(kr == KERN_SUCCESS);
386
387 thread_deallocate(thread);
388 }
389
390 static void
391 lt_wait_for_lock_test_threads()
392 {
393 OSMemoryBarrier();
394 /* Spin to reduce dependencies */
395 while (lt_done_threads < lt_target_done_threads) {
396 lt_sleep_a_little_bit();
397 OSMemoryBarrier();
398 }
399 OSMemoryBarrier();
400 }
401
402 static kern_return_t
403 lt_test_trylocks()
404 {
405 boolean_t success;
406 extern unsigned int real_ncpus;
407
408 /*
409 * First mtx try lock succeeds, second fails.
410 */
411 success = lck_mtx_try_lock(&lt_mtx);
412 T_ASSERT_NOTNULL(success, "First mtx try lock");
413 success = lck_mtx_try_lock(&lt_mtx);
414 T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
415 lck_mtx_unlock(&lt_mtx);
416
417 /*
418 * After regular grab, can't try lock.
419 */
420 lck_mtx_lock(&lt_mtx);
421 success = lck_mtx_try_lock(&lt_mtx);
422 T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
423 lck_mtx_unlock(&lt_mtx);
424
425 /*
426 * Two shared try locks on a previously unheld rwlock suceed, and a
427 * subsequent exclusive attempt fails.
428 */
429 success = lck_rw_try_lock_shared(&lt_rwlock);
430 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
431 success = lck_rw_try_lock_shared(&lt_rwlock);
432 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
433 success = lck_rw_try_lock_exclusive(&lt_rwlock);
434 T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
435 lck_rw_done(&lt_rwlock);
436 lck_rw_done(&lt_rwlock);
437
438 /*
439 * After regular shared grab, can trylock
440 * for shared but not for exclusive.
441 */
442 lck_rw_lock_shared(&lt_rwlock);
443 success = lck_rw_try_lock_shared(&lt_rwlock);
444 T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
445 success = lck_rw_try_lock_exclusive(&lt_rwlock);
446 T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
447 lck_rw_done(&lt_rwlock);
448 lck_rw_done(&lt_rwlock);
449
450 /*
451 * An exclusive try lock succeeds, subsequent shared and exclusive
452 * attempts fail.
453 */
454 success = lck_rw_try_lock_exclusive(&lt_rwlock);
455 T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
456 success = lck_rw_try_lock_shared(&lt_rwlock);
457 T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
458 success = lck_rw_try_lock_exclusive(&lt_rwlock);
459 T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
460 lck_rw_done(&lt_rwlock);
461
462 /*
463 * After regular exclusive grab, neither kind of trylock succeeds.
464 */
465 lck_rw_lock_exclusive(&lt_rwlock);
466 success = lck_rw_try_lock_shared(&lt_rwlock);
467 T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
468 success = lck_rw_try_lock_exclusive(&lt_rwlock);
469 T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
470 lck_rw_done(&lt_rwlock);
471
472 /*
473 * First spin lock attempts succeed, second attempts fail.
474 */
475 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
476 T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
477 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
478 T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
479 hw_lock_unlock(&lt_hw_lock);
480
481 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
482 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
483 T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
484 hw_lock_unlock(&lt_hw_lock);
485
486 lt_reset();
487 lt_thread_lock_grabbed = false;
488 lt_thread_lock_success = true;
489 lt_target_done_threads = 1;
490 OSMemoryBarrier();
491 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
492 success = hw_lock_to(&lt_hw_lock, 100, LCK_GRP_NULL);
493 T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
494 if (real_ncpus == 1) {
495 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
496 }
497 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
498 lt_wait_for_lock_test_threads();
499 T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
500 if (real_ncpus == 1) {
501 mp_disable_preemption(); /* don't double-enable when we unlock */
502 }
503 hw_lock_unlock(&lt_hw_lock);
504
505 lt_reset();
506 lt_thread_lock_grabbed = false;
507 lt_thread_lock_success = true;
508 lt_target_done_threads = 1;
509 OSMemoryBarrier();
510 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
511 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
512 if (real_ncpus == 1) {
513 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
514 }
515 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
516 lt_wait_for_lock_test_threads();
517 T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
518 if (real_ncpus == 1) {
519 mp_disable_preemption(); /* don't double-enable when we unlock */
520 }
521 hw_lock_unlock(&lt_hw_lock);
522
523 success = lck_spin_try_lock(&lt_lck_spin_t);
524 T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
525 success = lck_spin_try_lock(&lt_lck_spin_t);
526 T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
527 lck_spin_unlock(&lt_lck_spin_t);
528
529 lt_reset();
530 lt_thread_lock_grabbed = false;
531 lt_thread_lock_success = true;
532 lt_target_done_threads = 1;
533 lt_start_trylock_thread(lt_trylock_spin_try_lock);
534 lck_spin_lock(&lt_lck_spin_t);
535 if (real_ncpus == 1) {
536 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
537 }
538 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
539 lt_wait_for_lock_test_threads();
540 T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
541 if (real_ncpus == 1) {
542 mp_disable_preemption(); /* don't double-enable when we unlock */
543 }
544 lck_spin_unlock(&lt_lck_spin_t);
545
546 return KERN_SUCCESS;
547 }
548
549 static void
550 lt_thread(void *arg, wait_result_t wres __unused)
551 {
552 void (*func)(void) = (void (*)(void))arg;
553 uint32_t i;
554
555 for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
556 func();
557 }
558
559 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
560 }
561
562 static void
563 lt_start_lock_thread(thread_continue_t func)
564 {
565 thread_t thread;
566 kern_return_t kr;
567
568 kr = kernel_thread_start(lt_thread, func, &thread);
569 assert(kr == KERN_SUCCESS);
570
571 thread_deallocate(thread);
572 }
573
574
575 static kern_return_t
576 lt_test_locks()
577 {
578 kern_return_t kr = KERN_SUCCESS;
579 lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
580 lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
581
582 lck_mtx_init(&lt_mtx, lg, LCK_ATTR_NULL);
583 lck_rw_init(&lt_rwlock, lg, LCK_ATTR_NULL);
584 lck_spin_init(&lt_lck_spin_t, lg, LCK_ATTR_NULL);
585 hw_lock_init(&lt_hw_lock);
586
587 T_LOG("Testing locks.");
588
589 /* Try locks (custom) */
590 lt_reset();
591
592 T_LOG("Running try lock test.");
593 kr = lt_test_trylocks();
594 T_EXPECT_NULL(kr, "try lock test failed.");
595
596 /* Uncontended mutex */
597 T_LOG("Running uncontended mutex test.");
598 lt_reset();
599 lt_target_done_threads = 1;
600 lt_start_lock_thread(lt_grab_mutex);
601 lt_wait_for_lock_test_threads();
602 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
603 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
604
605 /* Contended mutex:try locks*/
606 T_LOG("Running contended mutex test.");
607 lt_reset();
608 lt_target_done_threads = 3;
609 lt_start_lock_thread(lt_grab_mutex);
610 lt_start_lock_thread(lt_grab_mutex);
611 lt_start_lock_thread(lt_grab_mutex);
612 lt_wait_for_lock_test_threads();
613 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
614 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
615
616 /* Contended mutex: try locks*/
617 T_LOG("Running contended mutex trylock test.");
618 lt_reset();
619 lt_target_done_threads = 3;
620 lt_start_lock_thread(lt_grab_mutex_with_try);
621 lt_start_lock_thread(lt_grab_mutex_with_try);
622 lt_start_lock_thread(lt_grab_mutex_with_try);
623 lt_wait_for_lock_test_threads();
624 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
625 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
626
627 /* Uncontended exclusive rwlock */
628 T_LOG("Running uncontended exclusive rwlock test.");
629 lt_reset();
630 lt_target_done_threads = 1;
631 lt_start_lock_thread(lt_grab_rw_exclusive);
632 lt_wait_for_lock_test_threads();
633 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
634 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
635
636 /* Uncontended shared rwlock */
637
638 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
639 * T_LOG("Running uncontended shared rwlock test.");
640 * lt_reset();
641 * lt_target_done_threads = 1;
642 * lt_start_lock_thread(lt_grab_rw_shared);
643 * lt_wait_for_lock_test_threads();
644 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
645 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
646 */
647
648 /* Contended exclusive rwlock */
649 T_LOG("Running contended exclusive rwlock test.");
650 lt_reset();
651 lt_target_done_threads = 3;
652 lt_start_lock_thread(lt_grab_rw_exclusive);
653 lt_start_lock_thread(lt_grab_rw_exclusive);
654 lt_start_lock_thread(lt_grab_rw_exclusive);
655 lt_wait_for_lock_test_threads();
656 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
657 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
658
659 /* One shared, two exclusive */
660 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
661 * T_LOG("Running test with one shared and two exclusive rw lock threads.");
662 * lt_reset();
663 * lt_target_done_threads = 3;
664 * lt_start_lock_thread(lt_grab_rw_shared);
665 * lt_start_lock_thread(lt_grab_rw_exclusive);
666 * lt_start_lock_thread(lt_grab_rw_exclusive);
667 * lt_wait_for_lock_test_threads();
668 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
669 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
670 */
671
672 /* Four shared */
673 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
674 * T_LOG("Running test with four shared holders.");
675 * lt_reset();
676 * lt_target_done_threads = 4;
677 * lt_start_lock_thread(lt_grab_rw_shared);
678 * lt_start_lock_thread(lt_grab_rw_shared);
679 * lt_start_lock_thread(lt_grab_rw_shared);
680 * lt_start_lock_thread(lt_grab_rw_shared);
681 * lt_wait_for_lock_test_threads();
682 * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
683 */
684
685 /* Three doing upgrades and downgrades */
686 T_LOG("Running test with threads upgrading and downgrading.");
687 lt_reset();
688 lt_target_done_threads = 3;
689 lt_start_lock_thread(lt_upgrade_downgrade_rw);
690 lt_start_lock_thread(lt_upgrade_downgrade_rw);
691 lt_start_lock_thread(lt_upgrade_downgrade_rw);
692 lt_wait_for_lock_test_threads();
693 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
694 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
695 T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
696
697 /* Uncontended - exclusive trylocks */
698 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
699 lt_reset();
700 lt_target_done_threads = 1;
701 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
702 lt_wait_for_lock_test_threads();
703 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
704 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
705
706 /* Uncontended - shared trylocks */
707 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
708 * T_LOG("Running test with single thread doing shared rwlock trylocks.");
709 * lt_reset();
710 * lt_target_done_threads = 1;
711 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
712 * lt_wait_for_lock_test_threads();
713 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
714 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
715 */
716
717 /* Three doing exclusive trylocks */
718 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
719 lt_reset();
720 lt_target_done_threads = 3;
721 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
722 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
723 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
724 lt_wait_for_lock_test_threads();
725 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
726 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
727
728 /* Three doing shared trylocks */
729 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
730 * T_LOG("Running test with threads doing shared rwlock trylocks.");
731 * lt_reset();
732 * lt_target_done_threads = 3;
733 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
734 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
735 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
736 * lt_wait_for_lock_test_threads();
737 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
738 * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
739 */
740
741 /* Three doing various trylocks */
742 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
743 * T_LOG("Running test with threads doing mixed rwlock trylocks.");
744 * lt_reset();
745 * lt_target_done_threads = 4;
746 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
747 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
748 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
749 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
750 * lt_wait_for_lock_test_threads();
751 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
752 * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
753 */
754
755 /* HW locks */
756 T_LOG("Running test with hw_lock_lock()");
757 lt_reset();
758 lt_target_done_threads = 3;
759 lt_start_lock_thread(lt_grab_hw_lock);
760 lt_start_lock_thread(lt_grab_hw_lock);
761 lt_start_lock_thread(lt_grab_hw_lock);
762 lt_wait_for_lock_test_threads();
763 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
764
765
766 /* HW locks: trylocks */
767 T_LOG("Running test with hw_lock_try()");
768 lt_reset();
769 lt_target_done_threads = 3;
770 lt_start_lock_thread(lt_grab_hw_lock_with_try);
771 lt_start_lock_thread(lt_grab_hw_lock_with_try);
772 lt_start_lock_thread(lt_grab_hw_lock_with_try);
773 lt_wait_for_lock_test_threads();
774 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
775
776 /* HW locks: with timeout */
777 T_LOG("Running test with hw_lock_to()");
778 lt_reset();
779 lt_target_done_threads = 3;
780 lt_start_lock_thread(lt_grab_hw_lock_with_to);
781 lt_start_lock_thread(lt_grab_hw_lock_with_to);
782 lt_start_lock_thread(lt_grab_hw_lock_with_to);
783 lt_wait_for_lock_test_threads();
784 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
785
786 /* Spin locks */
787 T_LOG("Running test with lck_spin_lock()");
788 lt_reset();
789 lt_target_done_threads = 3;
790 lt_start_lock_thread(lt_grab_spin_lock);
791 lt_start_lock_thread(lt_grab_spin_lock);
792 lt_start_lock_thread(lt_grab_spin_lock);
793 lt_wait_for_lock_test_threads();
794 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
795
796 /* Spin locks: trylocks */
797 T_LOG("Running test with lck_spin_try_lock()");
798 lt_reset();
799 lt_target_done_threads = 3;
800 lt_start_lock_thread(lt_grab_spin_lock_with_try);
801 lt_start_lock_thread(lt_grab_spin_lock_with_try);
802 lt_start_lock_thread(lt_grab_spin_lock_with_try);
803 lt_wait_for_lock_test_threads();
804 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
805
806 return KERN_SUCCESS;
807 }
808
809 #define MT_MAX_ARGS 8
810 #define MT_INITIAL_VALUE 0xfeedbeef
811 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
812 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
813 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
814
815 typedef void (*sy_munge_t)(void*);
816
817 #define MT_FUNC(x) #x, x
818 struct munger_test {
819 const char *mt_name;
820 sy_munge_t mt_func;
821 uint32_t mt_in_words;
822 uint32_t mt_nout;
823 uint64_t mt_expected[MT_MAX_ARGS];
824 } munger_tests[] = {
825 {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
826 {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
827 {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
828 {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
829 {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
830 {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
831 {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
832 {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
833 {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
834 {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
835 {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
836 {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
837 {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
838 {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
839 {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
840 {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
841 {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
842 {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
843 {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
844 {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
845 {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
846 {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
847 {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
848 {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
849 {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
850 {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
851 {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
852 {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
853 {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
854 {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
855 {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
856 {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
857 {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
858 {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
859 {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
860 {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
861 {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
862 {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
863 {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
864 {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
865 };
866
867 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
868
869 static void
870 mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
871 {
872 uint32_t i;
873
874 for (i = 0; i < in_words; i++) {
875 data[i] = MT_INITIAL_VALUE;
876 }
877
878 if (in_words * sizeof(uint32_t) < total_size) {
879 bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
880 }
881 }
882
883 static void
884 mt_test_mungers()
885 {
886 uint64_t data[MT_MAX_ARGS];
887 uint32_t i, j;
888
889 for (i = 0; i < MT_TEST_COUNT; i++) {
890 struct munger_test *test = &munger_tests[i];
891 int pass = 1;
892
893 T_LOG("Testing %s", test->mt_name);
894
895 mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
896 test->mt_func(data);
897
898 for (j = 0; j < test->mt_nout; j++) {
899 if (data[j] != test->mt_expected[j]) {
900 T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
901 pass = 0;
902 }
903 }
904 if (pass) {
905 T_PASS(test->mt_name);
906 }
907 }
908 }
909
910 /* Exception Callback Test */
911 static ex_cb_action_t
912 excb_test_action(
913 ex_cb_class_t cb_class,
914 void *refcon,
915 const ex_cb_state_t *state
916 )
917 {
918 ex_cb_state_t *context = (ex_cb_state_t *)refcon;
919
920 if ((NULL == refcon) || (NULL == state)) {
921 return EXCB_ACTION_TEST_FAIL;
922 }
923
924 context->far = state->far;
925
926 switch (cb_class) {
927 case EXCB_CLASS_TEST1:
928 return EXCB_ACTION_RERUN;
929 case EXCB_CLASS_TEST2:
930 return EXCB_ACTION_NONE;
931 default:
932 return EXCB_ACTION_TEST_FAIL;
933 }
934 }
935
936
937 kern_return_t
938 ex_cb_test()
939 {
940 const vm_offset_t far1 = 0xdead0001;
941 const vm_offset_t far2 = 0xdead0002;
942 kern_return_t kr;
943 ex_cb_state_t test_context_1 = {0xdeadbeef};
944 ex_cb_state_t test_context_2 = {0xdeadbeef};
945 ex_cb_action_t action;
946
947 T_LOG("Testing Exception Callback.");
948
949 T_LOG("Running registration test.");
950
951 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
952 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback");
953 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
954 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback");
955
956 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
957 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback");
958 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
959 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback");
960
961 T_LOG("Running invocation test.");
962
963 action = ex_cb_invoke(EXCB_CLASS_TEST1, far1);
964 T_ASSERT(EXCB_ACTION_RERUN == action, NULL);
965 T_ASSERT(far1 == test_context_1.far, NULL);
966
967 action = ex_cb_invoke(EXCB_CLASS_TEST2, far2);
968 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
969 T_ASSERT(far2 == test_context_2.far, NULL);
970
971 action = ex_cb_invoke(EXCB_CLASS_TEST3, 0);
972 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
973
974 return KERN_SUCCESS;
975 }
976
977 #if defined(HAS_APPLE_PAC)
978
979 /*
980 *
981 * arm64_ropjop_test - basic xnu ROP/JOP test plan
982 *
983 * - assert ROP/JOP configured and running status match
984 * - assert all AppleMode ROP/JOP features enabled
985 * - ensure ROP/JOP keys are set and diversified
986 * - sign a KVA (the address of this function),assert it was signed (changed)
987 * - authenticate the newly signed KVA
988 * - assert the authed KVA is the original KVA
989 * - corrupt a signed ptr, auth it, ensure auth failed
990 * - assert the failed authIB of corrupted pointer is tagged
991 *
992 */
993
994 kern_return_t
995 arm64_ropjop_test()
996 {
997 T_LOG("Testing ROP/JOP");
998
999 /* how is ROP/JOP configured */
1000 boolean_t config_rop_enabled = TRUE;
1001 boolean_t config_jop_enabled = !(BootArgs->bootFlags & kBootFlagsDisableJOP);
1002
1003
1004 /* assert all AppleMode ROP/JOP features enabled */
1005 uint64_t apctl = __builtin_arm_rsr64(ARM64_REG_APCTL_EL1);
1006 #if __APSTS_SUPPORTED__
1007 uint64_t apsts = __builtin_arm_rsr64(ARM64_REG_APSTS_EL1);
1008 T_ASSERT(apsts & APSTS_EL1_MKEYVld, NULL);
1009 #else
1010 T_ASSERT(apctl & APCTL_EL1_MKEYVld, NULL);
1011 #endif /* __APSTS_SUPPORTED__ */
1012 T_ASSERT(apctl & APCTL_EL1_AppleMode, NULL);
1013 T_ASSERT(apctl & APCTL_EL1_KernKeyEn, NULL);
1014
1015 /* ROP/JOP keys enabled current status */
1016 bool status_jop_enabled, status_rop_enabled;
1017 #if __APSTS_SUPPORTED__ /* H13+ */
1018 // TODO: update unit test to understand ROP/JOP enabled config for H13+
1019 status_jop_enabled = status_rop_enabled = apctl & APCTL_EL1_EnAPKey1;
1020 #elif __APCFG_SUPPORTED__ /* H12 */
1021 uint64_t apcfg_el1 = __builtin_arm_rsr64(APCFG_EL1);
1022 status_jop_enabled = status_rop_enabled = apcfg_el1 & APCFG_EL1_ELXENKEY;
1023 #else /* !__APCFG_SUPPORTED__ H11 */
1024 uint64_t sctlr_el1 = __builtin_arm_rsr64("SCTLR_EL1");
1025 status_jop_enabled = sctlr_el1 & SCTLR_PACIA_ENABLED;
1026 status_rop_enabled = sctlr_el1 & SCTLR_PACIB_ENABLED;
1027 #endif /* __APSTS_SUPPORTED__ */
1028
1029 /* assert configured and running status match */
1030 T_ASSERT(config_rop_enabled == status_rop_enabled, NULL);
1031 T_ASSERT(config_jop_enabled == status_jop_enabled, NULL);
1032
1033
1034 if (config_jop_enabled) {
1035 /* jop key */
1036 uint64_t apiakey_hi = __builtin_arm_rsr64(ARM64_REG_APIAKEYHI_EL1);
1037 uint64_t apiakey_lo = __builtin_arm_rsr64(ARM64_REG_APIAKEYLO_EL1);
1038
1039 /* ensure JOP key is set and diversified */
1040 T_EXPECT(apiakey_hi != KERNEL_ROP_ID && apiakey_lo != KERNEL_ROP_ID, NULL);
1041 T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
1042 }
1043
1044 if (config_rop_enabled) {
1045 /* rop key */
1046 uint64_t apibkey_hi = __builtin_arm_rsr64(ARM64_REG_APIBKEYHI_EL1);
1047 uint64_t apibkey_lo = __builtin_arm_rsr64(ARM64_REG_APIBKEYLO_EL1);
1048
1049 /* ensure ROP key is set and diversified */
1050 T_EXPECT(apibkey_hi != KERNEL_ROP_ID && apibkey_lo != KERNEL_ROP_ID, NULL);
1051 T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
1052
1053 /* sign a KVA (the address of this function) */
1054 uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
1055
1056 /* assert it was signed (changed) */
1057 T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
1058
1059 /* authenticate the newly signed KVA */
1060 uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
1061
1062 /* assert the authed KVA is the original KVA */
1063 T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
1064
1065 /* corrupt a signed ptr, auth it, ensure auth failed */
1066 uint64_t kva_corrupted = kva_signed ^ 1;
1067
1068 /* authenticate the corrupted pointer */
1069 kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
1070
1071 /* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1072 uint64_t auth_fail_mask = 3ULL << 61;
1073 uint64_t authib_fail = 2ULL << 61;
1074
1075 /* assert the failed authIB of corrupted pointer is tagged */
1076 T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
1077 }
1078
1079 return KERN_SUCCESS;
1080 }
1081 #endif /* defined(HAS_APPLE_PAC) */
1082
1083 #if __ARM_PAN_AVAILABLE__
1084
1085 struct pan_test_thread_args {
1086 volatile bool join;
1087 };
1088
1089 static void
1090 arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
1091 {
1092 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1093
1094 struct pan_test_thread_args *args = arg;
1095
1096 for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
1097 thread_bind(p);
1098 thread_block(THREAD_CONTINUE_NULL);
1099 kprintf("Running PAN test on cpu %d\n", p->cpu_id);
1100 arm64_pan_test();
1101 }
1102
1103 /* unbind thread from specific cpu */
1104 thread_bind(PROCESSOR_NULL);
1105 thread_block(THREAD_CONTINUE_NULL);
1106
1107 while (!args->join) {
1108 ;
1109 }
1110
1111 thread_wakeup(args);
1112 }
1113
1114 kern_return_t
1115 arm64_late_pan_test()
1116 {
1117 thread_t thread;
1118 kern_return_t kr;
1119
1120 struct pan_test_thread_args args;
1121 args.join = false;
1122
1123 kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
1124 assert(kr == KERN_SUCCESS);
1125
1126 thread_deallocate(thread);
1127
1128 assert_wait(&args, THREAD_UNINT);
1129 args.join = true;
1130 thread_block(THREAD_CONTINUE_NULL);
1131 return KERN_SUCCESS;
1132 }
1133
1134 kern_return_t
1135 arm64_pan_test()
1136 {
1137 vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE;
1138
1139 T_LOG("Testing PAN.");
1140
1141
1142 T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
1143
1144 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1145
1146 pan_exception_level = 0;
1147 pan_fault_value = 0xDE;
1148 // convert priv_addr to one that is accessible from user mode
1149 pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS -
1150 _COMM_PAGE_START_ADDRESS;
1151
1152 // Below should trigger a PAN exception as pan_test_addr is accessible
1153 // in user mode
1154 // The exception handler, upon recognizing the fault address is pan_test_addr,
1155 // will disable PAN and rerun this instruction successfully
1156 T_ASSERT(*(char *)pan_test_addr == *(char *)priv_addr, NULL);
1157
1158 T_ASSERT(pan_exception_level == 2, NULL);
1159
1160 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1161
1162 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1163
1164 pan_exception_level = 0;
1165 pan_fault_value = 0xAD;
1166 pan_ro_addr = (vm_offset_t) &pan_ro_value;
1167
1168 // Force a permission fault while PAN is disabled to make sure PAN is
1169 // re-enabled during the exception handler.
1170 *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1171
1172 T_ASSERT(pan_exception_level == 2, NULL);
1173
1174 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1175
1176 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1177
1178 pan_test_addr = 0;
1179 pan_ro_addr = 0;
1180
1181 __builtin_arm_wsr("pan", 1);
1182
1183 return KERN_SUCCESS;
1184 }
1185 #endif /* __ARM_PAN_AVAILABLE__ */
1186
1187
1188 kern_return_t
1189 arm64_lock_test()
1190 {
1191 return lt_test_locks();
1192 }
1193
1194 kern_return_t
1195 arm64_munger_test()
1196 {
1197 mt_test_mungers();
1198 return 0;
1199 }
1200
1201
1202 #if HAS_TWO_STAGE_SPR_LOCK
1203
1204 #define STR1(x) #x
1205 #define STR(x) STR1(x)
1206
1207 volatile vm_offset_t spr_lock_test_addr;
1208 volatile uint32_t spr_lock_exception_esr;
1209
1210 kern_return_t
1211 arm64_spr_lock_test()
1212 {
1213 processor_t p;
1214
1215 for (p = processor_list; p != NULL; p = p->processor_list) {
1216 thread_bind(p);
1217 thread_block(THREAD_CONTINUE_NULL);
1218 T_LOG("Running SPR lock test on cpu %d\n", p->cpu_id);
1219
1220 uint64_t orig_value = __builtin_arm_rsr64(STR(ARM64_REG_HID8));
1221 spr_lock_test_addr = (vm_offset_t)VM_KERNEL_STRIP_PTR(arm64_msr_lock_test);
1222 spr_lock_exception_esr = 0;
1223 arm64_msr_lock_test(~orig_value);
1224 T_EXPECT(spr_lock_exception_esr != 0, "MSR write generated synchronous abort");
1225
1226 uint64_t new_value = __builtin_arm_rsr64(STR(ARM64_REG_HID8));
1227 T_EXPECT(orig_value == new_value, "MSR write did not succeed");
1228
1229 spr_lock_test_addr = 0;
1230 }
1231
1232 /* unbind thread from specific cpu */
1233 thread_bind(PROCESSOR_NULL);
1234 thread_block(THREAD_CONTINUE_NULL);
1235
1236 T_PASS("Done running SPR lock tests");
1237
1238 return KERN_SUCCESS;
1239 }
1240
1241 #endif /* HAS_TWO_STAGE_SPR_LOCK */