]>
Commit | Line | Data |
---|---|---|
5ba3f43e | 1 | /* |
cb323159 | 2 | * Copyright (c) 2011-2018 Apple Inc. All rights reserved. |
5ba3f43e A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie | |
33 | * Mellon University All Rights Reserved. | |
0a7de745 | 34 | * |
5ba3f43e A |
35 | * Permission to use, copy, modify and distribute this software and its |
36 | * documentation is hereby granted, provided that both the copyright notice | |
37 | * and this permission notice appear in all copies of the software, | |
38 | * derivative works or modified versions, and any portions thereof, and that | |
39 | * both notices appear in supporting documentation. | |
0a7de745 | 40 | * |
5ba3f43e A |
41 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. |
42 | * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES | |
43 | * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
0a7de745 | 44 | * |
5ba3f43e | 45 | * Carnegie Mellon requests users of this software to return to |
0a7de745 | 46 | * |
5ba3f43e A |
47 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
48 | * School of Computer Science Carnegie Mellon University Pittsburgh PA | |
49 | * 15213-3890 | |
0a7de745 | 50 | * |
5ba3f43e A |
51 | * any improvements or extensions that they make and grant Carnegie Mellon the |
52 | * rights to redistribute these changes. | |
53 | */ | |
54 | ||
55 | #include <mach_ldebug.h> | |
56 | ||
57 | #define LOCK_PRIVATE 1 | |
58 | ||
d9a64523 | 59 | #include <vm/pmap.h> |
5ba3f43e | 60 | #include <kern/kalloc.h> |
f427ee49 | 61 | #include <kern/cpu_number.h> |
5ba3f43e A |
62 | #include <kern/locks.h> |
63 | #include <kern/misc_protos.h> | |
64 | #include <kern/thread.h> | |
65 | #include <kern/processor.h> | |
66 | #include <kern/sched_prim.h> | |
5ba3f43e A |
67 | #include <kern/debug.h> |
68 | #include <string.h> | |
69 | #include <tests/xnupost.h> | |
70 | ||
0a7de745 | 71 | #if MACH_KDB |
5ba3f43e A |
72 | #include <ddb/db_command.h> |
73 | #include <ddb/db_output.h> | |
74 | #include <ddb/db_sym.h> | |
75 | #include <ddb/db_print.h> | |
0a7de745 | 76 | #endif /* MACH_KDB */ |
5ba3f43e A |
77 | |
78 | #include <sys/kdebug.h> | |
79 | #include <sys/munge.h> | |
80 | #include <machine/cpu_capabilities.h> | |
81 | #include <arm/cpu_data_internal.h> | |
d9a64523 | 82 | #include <arm/pmap.h> |
5ba3f43e | 83 | |
f427ee49 A |
84 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
85 | #include <arm64/amcc_rorgn.h> | |
86 | #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) | |
87 | ||
5ba3f43e A |
88 | kern_return_t arm64_lock_test(void); |
89 | kern_return_t arm64_munger_test(void); | |
90 | kern_return_t ex_cb_test(void); | |
91 | kern_return_t arm64_pan_test(void); | |
cb323159 A |
92 | kern_return_t arm64_late_pan_test(void); |
93 | #if defined(HAS_APPLE_PAC) | |
94 | #include <ptrauth.h> | |
95 | kern_return_t arm64_ropjop_test(void); | |
96 | #endif | |
c6bf4f31 A |
97 | #if defined(KERNEL_INTEGRITY_CTRR) |
98 | kern_return_t ctrr_test(void); | |
99 | kern_return_t ctrr_test_cpu(void); | |
100 | #endif | |
cb323159 A |
101 | #if HAS_TWO_STAGE_SPR_LOCK |
102 | kern_return_t arm64_spr_lock_test(void); | |
103 | extern void arm64_msr_lock_test(uint64_t); | |
104 | #endif | |
5ba3f43e A |
105 | |
106 | // exception handler ignores this fault address during PAN test | |
107 | #if __ARM_PAN_AVAILABLE__ | |
cc8bc92a A |
108 | const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF; |
109 | vm_offset_t pan_test_addr = 0; | |
110 | vm_offset_t pan_ro_addr = 0; | |
111 | volatile int pan_exception_level = 0; | |
112 | volatile char pan_fault_value = 0; | |
5ba3f43e A |
113 | #endif |
114 | ||
115 | #include <libkern/OSAtomic.h> | |
116 | #define LOCK_TEST_ITERATIONS 50 | |
0a7de745 A |
117 | static hw_lock_data_t lt_hw_lock; |
118 | static lck_spin_t lt_lck_spin_t; | |
119 | static lck_mtx_t lt_mtx; | |
120 | static lck_rw_t lt_rwlock; | |
5ba3f43e | 121 | static volatile uint32_t lt_counter = 0; |
0a7de745 | 122 | static volatile int lt_spinvolatile; |
5ba3f43e A |
123 | static volatile uint32_t lt_max_holders = 0; |
124 | static volatile uint32_t lt_upgrade_holders = 0; | |
125 | static volatile uint32_t lt_max_upgrade_holders = 0; | |
126 | static volatile uint32_t lt_num_holders = 0; | |
127 | static volatile uint32_t lt_done_threads; | |
128 | static volatile uint32_t lt_target_done_threads; | |
129 | static volatile uint32_t lt_cpu_bind_id = 0; | |
130 | ||
131 | static void | |
0a7de745 | 132 | lt_note_another_blocking_lock_holder() |
5ba3f43e | 133 | { |
0a7de745 | 134 | hw_lock_lock(<_hw_lock, LCK_GRP_NULL); |
5ba3f43e A |
135 | lt_num_holders++; |
136 | lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders; | |
137 | hw_lock_unlock(<_hw_lock); | |
138 | } | |
139 | ||
140 | static void | |
0a7de745 | 141 | lt_note_blocking_lock_release() |
5ba3f43e | 142 | { |
0a7de745 | 143 | hw_lock_lock(<_hw_lock, LCK_GRP_NULL); |
5ba3f43e A |
144 | lt_num_holders--; |
145 | hw_lock_unlock(<_hw_lock); | |
146 | } | |
147 | ||
148 | static void | |
0a7de745 | 149 | lt_spin_a_little_bit() |
5ba3f43e A |
150 | { |
151 | uint32_t i; | |
0a7de745 | 152 | |
5ba3f43e A |
153 | for (i = 0; i < 10000; i++) { |
154 | lt_spinvolatile++; | |
155 | } | |
156 | } | |
157 | ||
158 | static void | |
0a7de745 | 159 | lt_sleep_a_little_bit() |
5ba3f43e A |
160 | { |
161 | delay(100); | |
162 | } | |
163 | ||
164 | static void | |
0a7de745 | 165 | lt_grab_mutex() |
5ba3f43e A |
166 | { |
167 | lck_mtx_lock(<_mtx); | |
168 | lt_note_another_blocking_lock_holder(); | |
169 | lt_sleep_a_little_bit(); | |
170 | lt_counter++; | |
171 | lt_note_blocking_lock_release(); | |
172 | lck_mtx_unlock(<_mtx); | |
173 | } | |
174 | ||
175 | static void | |
176 | lt_grab_mutex_with_try() | |
177 | { | |
0a7de745 A |
178 | while (0 == lck_mtx_try_lock(<_mtx)) { |
179 | ; | |
180 | } | |
5ba3f43e A |
181 | lt_note_another_blocking_lock_holder(); |
182 | lt_sleep_a_little_bit(); | |
183 | lt_counter++; | |
184 | lt_note_blocking_lock_release(); | |
185 | lck_mtx_unlock(<_mtx); | |
5ba3f43e A |
186 | } |
187 | ||
188 | static void | |
189 | lt_grab_rw_exclusive() | |
190 | { | |
191 | lck_rw_lock_exclusive(<_rwlock); | |
192 | lt_note_another_blocking_lock_holder(); | |
193 | lt_sleep_a_little_bit(); | |
194 | lt_counter++; | |
195 | lt_note_blocking_lock_release(); | |
196 | lck_rw_done(<_rwlock); | |
197 | } | |
198 | ||
199 | static void | |
200 | lt_grab_rw_exclusive_with_try() | |
201 | { | |
0a7de745 | 202 | while (0 == lck_rw_try_lock_exclusive(<_rwlock)) { |
5ba3f43e A |
203 | lt_sleep_a_little_bit(); |
204 | } | |
205 | ||
206 | lt_note_another_blocking_lock_holder(); | |
207 | lt_sleep_a_little_bit(); | |
208 | lt_counter++; | |
209 | lt_note_blocking_lock_release(); | |
210 | lck_rw_done(<_rwlock); | |
211 | } | |
212 | ||
213 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
0a7de745 A |
214 | * static void |
215 | * lt_grab_rw_shared() | |
216 | * { | |
217 | * lck_rw_lock_shared(<_rwlock); | |
218 | * lt_counter++; | |
219 | * | |
220 | * lt_note_another_blocking_lock_holder(); | |
221 | * lt_sleep_a_little_bit(); | |
222 | * lt_note_blocking_lock_release(); | |
223 | * | |
224 | * lck_rw_done(<_rwlock); | |
225 | * } | |
226 | */ | |
5ba3f43e A |
227 | |
228 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
0a7de745 A |
229 | * static void |
230 | * lt_grab_rw_shared_with_try() | |
231 | * { | |
232 | * while(0 == lck_rw_try_lock_shared(<_rwlock)); | |
233 | * lt_counter++; | |
234 | * | |
235 | * lt_note_another_blocking_lock_holder(); | |
236 | * lt_sleep_a_little_bit(); | |
237 | * lt_note_blocking_lock_release(); | |
238 | * | |
239 | * lck_rw_done(<_rwlock); | |
240 | * } | |
241 | */ | |
5ba3f43e A |
242 | |
243 | static void | |
0a7de745 | 244 | lt_upgrade_downgrade_rw() |
5ba3f43e A |
245 | { |
246 | boolean_t upgraded, success; | |
247 | ||
248 | success = lck_rw_try_lock_shared(<_rwlock); | |
249 | if (!success) { | |
250 | lck_rw_lock_shared(<_rwlock); | |
251 | } | |
252 | ||
253 | lt_note_another_blocking_lock_holder(); | |
254 | lt_sleep_a_little_bit(); | |
255 | lt_note_blocking_lock_release(); | |
0a7de745 | 256 | |
5ba3f43e A |
257 | upgraded = lck_rw_lock_shared_to_exclusive(<_rwlock); |
258 | if (!upgraded) { | |
259 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
260 | ||
261 | if (!success) { | |
262 | lck_rw_lock_exclusive(<_rwlock); | |
263 | } | |
264 | } | |
265 | ||
266 | lt_upgrade_holders++; | |
267 | if (lt_upgrade_holders > lt_max_upgrade_holders) { | |
268 | lt_max_upgrade_holders = lt_upgrade_holders; | |
269 | } | |
270 | ||
271 | lt_counter++; | |
272 | lt_sleep_a_little_bit(); | |
273 | ||
274 | lt_upgrade_holders--; | |
0a7de745 | 275 | |
5ba3f43e A |
276 | lck_rw_lock_exclusive_to_shared(<_rwlock); |
277 | ||
278 | lt_spin_a_little_bit(); | |
279 | lck_rw_done(<_rwlock); | |
280 | } | |
281 | ||
c6bf4f31 A |
282 | #if __AMP__ |
283 | const int limit = 1000000; | |
284 | static int lt_stress_local_counters[MAX_CPUS]; | |
285 | ||
286 | lck_ticket_t lt_ticket_lock; | |
f427ee49 | 287 | lck_grp_t lt_ticket_grp; |
c6bf4f31 A |
288 | |
289 | static void | |
290 | lt_stress_ticket_lock() | |
291 | { | |
292 | int local_counter = 0; | |
293 | ||
f427ee49 | 294 | uint cpuid = cpu_number(); |
c6bf4f31 A |
295 | |
296 | kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid); | |
297 | ||
f427ee49 | 298 | lck_ticket_lock(<_ticket_lock, <_ticket_grp); |
c6bf4f31 A |
299 | lt_counter++; |
300 | local_counter++; | |
301 | lck_ticket_unlock(<_ticket_lock); | |
302 | ||
303 | while (lt_counter < lt_target_done_threads) { | |
304 | ; | |
305 | } | |
306 | ||
307 | kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid); | |
308 | ||
309 | while (lt_counter < limit) { | |
f427ee49 | 310 | lck_ticket_lock(<_ticket_lock, <_ticket_grp); |
c6bf4f31 A |
311 | if (lt_counter < limit) { |
312 | lt_counter++; | |
313 | local_counter++; | |
314 | } | |
315 | lck_ticket_unlock(<_ticket_lock); | |
316 | } | |
317 | ||
318 | lt_stress_local_counters[cpuid] = local_counter; | |
319 | ||
320 | kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter); | |
321 | } | |
322 | #endif | |
5ba3f43e A |
323 | |
324 | static void | |
0a7de745 | 325 | lt_grab_hw_lock() |
5ba3f43e | 326 | { |
0a7de745 | 327 | hw_lock_lock(<_hw_lock, LCK_GRP_NULL); |
5ba3f43e A |
328 | lt_counter++; |
329 | lt_spin_a_little_bit(); | |
330 | hw_lock_unlock(<_hw_lock); | |
331 | } | |
332 | ||
333 | static void | |
334 | lt_grab_hw_lock_with_try() | |
335 | { | |
0a7de745 A |
336 | while (0 == hw_lock_try(<_hw_lock, LCK_GRP_NULL)) { |
337 | ; | |
338 | } | |
5ba3f43e A |
339 | lt_counter++; |
340 | lt_spin_a_little_bit(); | |
341 | hw_lock_unlock(<_hw_lock); | |
342 | } | |
343 | ||
344 | static void | |
345 | lt_grab_hw_lock_with_to() | |
346 | { | |
0a7de745 | 347 | while (0 == hw_lock_to(<_hw_lock, LockTimeOut, LCK_GRP_NULL)) { |
5ba3f43e | 348 | mp_enable_preemption(); |
0a7de745 | 349 | } |
5ba3f43e A |
350 | lt_counter++; |
351 | lt_spin_a_little_bit(); | |
352 | hw_lock_unlock(<_hw_lock); | |
353 | } | |
354 | ||
355 | static void | |
0a7de745 | 356 | lt_grab_spin_lock() |
5ba3f43e A |
357 | { |
358 | lck_spin_lock(<_lck_spin_t); | |
359 | lt_counter++; | |
360 | lt_spin_a_little_bit(); | |
361 | lck_spin_unlock(<_lck_spin_t); | |
362 | } | |
363 | ||
364 | static void | |
0a7de745 | 365 | lt_grab_spin_lock_with_try() |
5ba3f43e | 366 | { |
0a7de745 A |
367 | while (0 == lck_spin_try_lock(<_lck_spin_t)) { |
368 | ; | |
369 | } | |
5ba3f43e A |
370 | lt_counter++; |
371 | lt_spin_a_little_bit(); | |
372 | lck_spin_unlock(<_lck_spin_t); | |
373 | } | |
374 | ||
375 | static volatile boolean_t lt_thread_lock_grabbed; | |
376 | static volatile boolean_t lt_thread_lock_success; | |
377 | ||
378 | static void | |
379 | lt_reset() | |
380 | { | |
381 | lt_counter = 0; | |
382 | lt_max_holders = 0; | |
383 | lt_num_holders = 0; | |
384 | lt_max_upgrade_holders = 0; | |
385 | lt_upgrade_holders = 0; | |
386 | lt_done_threads = 0; | |
387 | lt_target_done_threads = 0; | |
388 | lt_cpu_bind_id = 0; | |
389 | ||
390 | OSMemoryBarrier(); | |
391 | } | |
392 | ||
393 | static void | |
394 | lt_trylock_hw_lock_with_to() | |
395 | { | |
396 | OSMemoryBarrier(); | |
397 | while (!lt_thread_lock_grabbed) { | |
398 | lt_sleep_a_little_bit(); | |
399 | OSMemoryBarrier(); | |
400 | } | |
0a7de745 | 401 | lt_thread_lock_success = hw_lock_to(<_hw_lock, 100, LCK_GRP_NULL); |
5ba3f43e A |
402 | OSMemoryBarrier(); |
403 | mp_enable_preemption(); | |
404 | } | |
405 | ||
406 | static void | |
407 | lt_trylock_spin_try_lock() | |
408 | { | |
409 | OSMemoryBarrier(); | |
410 | while (!lt_thread_lock_grabbed) { | |
411 | lt_sleep_a_little_bit(); | |
412 | OSMemoryBarrier(); | |
413 | } | |
414 | lt_thread_lock_success = lck_spin_try_lock(<_lck_spin_t); | |
415 | OSMemoryBarrier(); | |
416 | } | |
417 | ||
418 | static void | |
419 | lt_trylock_thread(void *arg, wait_result_t wres __unused) | |
420 | { | |
0a7de745 | 421 | void (*func)(void) = (void (*)(void))arg; |
5ba3f43e A |
422 | |
423 | func(); | |
424 | ||
425 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
426 | } | |
427 | ||
428 | static void | |
429 | lt_start_trylock_thread(thread_continue_t func) | |
430 | { | |
431 | thread_t thread; | |
432 | kern_return_t kr; | |
433 | ||
434 | kr = kernel_thread_start(lt_trylock_thread, func, &thread); | |
435 | assert(kr == KERN_SUCCESS); | |
436 | ||
437 | thread_deallocate(thread); | |
438 | } | |
439 | ||
440 | static void | |
441 | lt_wait_for_lock_test_threads() | |
442 | { | |
443 | OSMemoryBarrier(); | |
444 | /* Spin to reduce dependencies */ | |
445 | while (lt_done_threads < lt_target_done_threads) { | |
446 | lt_sleep_a_little_bit(); | |
447 | OSMemoryBarrier(); | |
448 | } | |
449 | OSMemoryBarrier(); | |
450 | } | |
451 | ||
452 | static kern_return_t | |
453 | lt_test_trylocks() | |
454 | { | |
0a7de745 | 455 | boolean_t success; |
a39ff7e2 | 456 | extern unsigned int real_ncpus; |
0a7de745 A |
457 | |
458 | /* | |
5ba3f43e A |
459 | * First mtx try lock succeeds, second fails. |
460 | */ | |
461 | success = lck_mtx_try_lock(<_mtx); | |
462 | T_ASSERT_NOTNULL(success, "First mtx try lock"); | |
463 | success = lck_mtx_try_lock(<_mtx); | |
464 | T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx"); | |
465 | lck_mtx_unlock(<_mtx); | |
466 | ||
467 | /* | |
468 | * After regular grab, can't try lock. | |
469 | */ | |
470 | lck_mtx_lock(<_mtx); | |
471 | success = lck_mtx_try_lock(<_mtx); | |
472 | T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock"); | |
473 | lck_mtx_unlock(<_mtx); | |
474 | ||
475 | /* | |
0a7de745 | 476 | * Two shared try locks on a previously unheld rwlock suceed, and a |
5ba3f43e A |
477 | * subsequent exclusive attempt fails. |
478 | */ | |
479 | success = lck_rw_try_lock_shared(<_rwlock); | |
480 | T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed"); | |
481 | success = lck_rw_try_lock_shared(<_rwlock); | |
482 | T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed"); | |
483 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
484 | T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail"); | |
485 | lck_rw_done(<_rwlock); | |
486 | lck_rw_done(<_rwlock); | |
487 | ||
488 | /* | |
489 | * After regular shared grab, can trylock | |
490 | * for shared but not for exclusive. | |
491 | */ | |
492 | lck_rw_lock_shared(<_rwlock); | |
493 | success = lck_rw_try_lock_shared(<_rwlock); | |
494 | T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed."); | |
495 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
496 | T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail."); | |
497 | lck_rw_done(<_rwlock); | |
498 | lck_rw_done(<_rwlock); | |
499 | ||
500 | /* | |
501 | * An exclusive try lock succeeds, subsequent shared and exclusive | |
502 | * attempts fail. | |
503 | */ | |
504 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
505 | T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed"); | |
506 | success = lck_rw_try_lock_shared(<_rwlock); | |
507 | T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail"); | |
508 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
509 | T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail"); | |
510 | lck_rw_done(<_rwlock); | |
511 | ||
512 | /* | |
513 | * After regular exclusive grab, neither kind of trylock succeeds. | |
514 | */ | |
515 | lck_rw_lock_exclusive(<_rwlock); | |
516 | success = lck_rw_try_lock_shared(<_rwlock); | |
517 | T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed"); | |
518 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
519 | T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed"); | |
520 | lck_rw_done(<_rwlock); | |
521 | ||
0a7de745 | 522 | /* |
5ba3f43e A |
523 | * First spin lock attempts succeed, second attempts fail. |
524 | */ | |
0a7de745 | 525 | success = hw_lock_try(<_hw_lock, LCK_GRP_NULL); |
5ba3f43e | 526 | T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed"); |
0a7de745 | 527 | success = hw_lock_try(<_hw_lock, LCK_GRP_NULL); |
5ba3f43e A |
528 | T_ASSERT_NULL(success, "Second attempt to spin lock should fail"); |
529 | hw_lock_unlock(<_hw_lock); | |
0a7de745 A |
530 | |
531 | hw_lock_lock(<_hw_lock, LCK_GRP_NULL); | |
532 | success = hw_lock_try(<_hw_lock, LCK_GRP_NULL); | |
5ba3f43e A |
533 | T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail"); |
534 | hw_lock_unlock(<_hw_lock); | |
535 | ||
536 | lt_reset(); | |
537 | lt_thread_lock_grabbed = false; | |
538 | lt_thread_lock_success = true; | |
539 | lt_target_done_threads = 1; | |
540 | OSMemoryBarrier(); | |
541 | lt_start_trylock_thread(lt_trylock_hw_lock_with_to); | |
0a7de745 | 542 | success = hw_lock_to(<_hw_lock, 100, LCK_GRP_NULL); |
5ba3f43e | 543 | T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed"); |
a39ff7e2 A |
544 | if (real_ncpus == 1) { |
545 | mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */ | |
546 | } | |
5ba3f43e A |
547 | OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed); |
548 | lt_wait_for_lock_test_threads(); | |
549 | T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout"); | |
a39ff7e2 A |
550 | if (real_ncpus == 1) { |
551 | mp_disable_preemption(); /* don't double-enable when we unlock */ | |
552 | } | |
5ba3f43e A |
553 | hw_lock_unlock(<_hw_lock); |
554 | ||
555 | lt_reset(); | |
556 | lt_thread_lock_grabbed = false; | |
557 | lt_thread_lock_success = true; | |
558 | lt_target_done_threads = 1; | |
559 | OSMemoryBarrier(); | |
560 | lt_start_trylock_thread(lt_trylock_hw_lock_with_to); | |
0a7de745 | 561 | hw_lock_lock(<_hw_lock, LCK_GRP_NULL); |
a39ff7e2 A |
562 | if (real_ncpus == 1) { |
563 | mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */ | |
564 | } | |
5ba3f43e A |
565 | OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed); |
566 | lt_wait_for_lock_test_threads(); | |
567 | T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail"); | |
a39ff7e2 A |
568 | if (real_ncpus == 1) { |
569 | mp_disable_preemption(); /* don't double-enable when we unlock */ | |
570 | } | |
5ba3f43e A |
571 | hw_lock_unlock(<_hw_lock); |
572 | ||
573 | success = lck_spin_try_lock(<_lck_spin_t); | |
574 | T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed"); | |
575 | success = lck_spin_try_lock(<_lck_spin_t); | |
576 | T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail"); | |
577 | lck_spin_unlock(<_lck_spin_t); | |
578 | ||
579 | lt_reset(); | |
580 | lt_thread_lock_grabbed = false; | |
581 | lt_thread_lock_success = true; | |
582 | lt_target_done_threads = 1; | |
583 | lt_start_trylock_thread(lt_trylock_spin_try_lock); | |
584 | lck_spin_lock(<_lck_spin_t); | |
a39ff7e2 A |
585 | if (real_ncpus == 1) { |
586 | mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */ | |
587 | } | |
5ba3f43e A |
588 | OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed); |
589 | lt_wait_for_lock_test_threads(); | |
590 | T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail"); | |
a39ff7e2 A |
591 | if (real_ncpus == 1) { |
592 | mp_disable_preemption(); /* don't double-enable when we unlock */ | |
593 | } | |
5ba3f43e A |
594 | lck_spin_unlock(<_lck_spin_t); |
595 | ||
596 | return KERN_SUCCESS; | |
597 | } | |
598 | ||
599 | static void | |
0a7de745 | 600 | lt_thread(void *arg, wait_result_t wres __unused) |
5ba3f43e | 601 | { |
0a7de745 | 602 | void (*func)(void) = (void (*)(void))arg; |
5ba3f43e A |
603 | uint32_t i; |
604 | ||
605 | for (i = 0; i < LOCK_TEST_ITERATIONS; i++) { | |
606 | func(); | |
607 | } | |
608 | ||
609 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
610 | } | |
611 | ||
5ba3f43e A |
612 | static void |
613 | lt_start_lock_thread(thread_continue_t func) | |
614 | { | |
615 | thread_t thread; | |
616 | kern_return_t kr; | |
617 | ||
618 | kr = kernel_thread_start(lt_thread, func, &thread); | |
619 | assert(kr == KERN_SUCCESS); | |
620 | ||
621 | thread_deallocate(thread); | |
622 | } | |
623 | ||
c6bf4f31 A |
624 | #if __AMP__ |
625 | static void | |
626 | lt_bound_thread(void *arg, wait_result_t wres __unused) | |
627 | { | |
628 | void (*func)(void) = (void (*)(void))arg; | |
629 | ||
630 | int cpuid = OSIncrementAtomic((volatile SInt32 *)<_cpu_bind_id); | |
631 | ||
632 | processor_t processor = processor_list; | |
633 | while ((processor != NULL) && (processor->cpu_id != cpuid)) { | |
634 | processor = processor->processor_list; | |
635 | } | |
636 | ||
637 | if (processor != NULL) { | |
638 | thread_bind(processor); | |
639 | } | |
640 | ||
641 | thread_block(THREAD_CONTINUE_NULL); | |
642 | ||
643 | func(); | |
644 | ||
645 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
646 | } | |
647 | ||
648 | static void | |
649 | lt_e_thread(void *arg, wait_result_t wres __unused) | |
650 | { | |
651 | void (*func)(void) = (void (*)(void))arg; | |
652 | ||
653 | thread_t thread = current_thread(); | |
654 | ||
655 | spl_t s = splsched(); | |
656 | thread_lock(thread); | |
657 | thread->sched_flags |= TH_SFLAG_ECORE_ONLY; | |
658 | thread_unlock(thread); | |
659 | splx(s); | |
660 | ||
661 | thread_block(THREAD_CONTINUE_NULL); | |
662 | ||
663 | func(); | |
664 | ||
665 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
666 | } | |
667 | ||
668 | static void | |
669 | lt_p_thread(void *arg, wait_result_t wres __unused) | |
670 | { | |
671 | void (*func)(void) = (void (*)(void))arg; | |
672 | ||
673 | thread_t thread = current_thread(); | |
674 | ||
675 | spl_t s = splsched(); | |
676 | thread_lock(thread); | |
677 | thread->sched_flags |= TH_SFLAG_PCORE_ONLY; | |
678 | thread_unlock(thread); | |
679 | splx(s); | |
680 | ||
681 | thread_block(THREAD_CONTINUE_NULL); | |
682 | ||
683 | func(); | |
684 | ||
685 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
686 | } | |
687 | ||
688 | static void | |
689 | lt_start_lock_thread_e(thread_continue_t func) | |
690 | { | |
691 | thread_t thread; | |
692 | kern_return_t kr; | |
693 | ||
694 | kr = kernel_thread_start(lt_e_thread, func, &thread); | |
695 | assert(kr == KERN_SUCCESS); | |
696 | ||
697 | thread_deallocate(thread); | |
698 | } | |
699 | ||
700 | static void | |
701 | lt_start_lock_thread_p(thread_continue_t func) | |
702 | { | |
703 | thread_t thread; | |
704 | kern_return_t kr; | |
705 | ||
706 | kr = kernel_thread_start(lt_p_thread, func, &thread); | |
707 | assert(kr == KERN_SUCCESS); | |
708 | ||
709 | thread_deallocate(thread); | |
710 | } | |
711 | ||
712 | static void | |
713 | lt_start_lock_thread_bound(thread_continue_t func) | |
714 | { | |
715 | thread_t thread; | |
716 | kern_return_t kr; | |
717 | ||
718 | kr = kernel_thread_start(lt_bound_thread, func, &thread); | |
719 | assert(kr == KERN_SUCCESS); | |
720 | ||
721 | thread_deallocate(thread); | |
722 | } | |
723 | #endif | |
5ba3f43e | 724 | |
5ba3f43e A |
725 | static kern_return_t |
726 | lt_test_locks() | |
727 | { | |
728 | kern_return_t kr = KERN_SUCCESS; | |
729 | lck_grp_attr_t *lga = lck_grp_attr_alloc_init(); | |
730 | lck_grp_t *lg = lck_grp_alloc_init("lock test", lga); | |
731 | ||
732 | lck_mtx_init(<_mtx, lg, LCK_ATTR_NULL); | |
733 | lck_rw_init(<_rwlock, lg, LCK_ATTR_NULL); | |
734 | lck_spin_init(<_lck_spin_t, lg, LCK_ATTR_NULL); | |
735 | hw_lock_init(<_hw_lock); | |
736 | ||
737 | T_LOG("Testing locks."); | |
738 | ||
739 | /* Try locks (custom) */ | |
740 | lt_reset(); | |
741 | ||
742 | T_LOG("Running try lock test."); | |
743 | kr = lt_test_trylocks(); | |
744 | T_EXPECT_NULL(kr, "try lock test failed."); | |
745 | ||
746 | /* Uncontended mutex */ | |
747 | T_LOG("Running uncontended mutex test."); | |
748 | lt_reset(); | |
749 | lt_target_done_threads = 1; | |
750 | lt_start_lock_thread(lt_grab_mutex); | |
751 | lt_wait_for_lock_test_threads(); | |
752 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
753 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
754 | ||
755 | /* Contended mutex:try locks*/ | |
756 | T_LOG("Running contended mutex test."); | |
757 | lt_reset(); | |
758 | lt_target_done_threads = 3; | |
759 | lt_start_lock_thread(lt_grab_mutex); | |
760 | lt_start_lock_thread(lt_grab_mutex); | |
761 | lt_start_lock_thread(lt_grab_mutex); | |
762 | lt_wait_for_lock_test_threads(); | |
763 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
764 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
765 | ||
766 | /* Contended mutex: try locks*/ | |
767 | T_LOG("Running contended mutex trylock test."); | |
768 | lt_reset(); | |
769 | lt_target_done_threads = 3; | |
770 | lt_start_lock_thread(lt_grab_mutex_with_try); | |
771 | lt_start_lock_thread(lt_grab_mutex_with_try); | |
772 | lt_start_lock_thread(lt_grab_mutex_with_try); | |
773 | lt_wait_for_lock_test_threads(); | |
774 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
775 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
776 | ||
777 | /* Uncontended exclusive rwlock */ | |
778 | T_LOG("Running uncontended exclusive rwlock test."); | |
779 | lt_reset(); | |
780 | lt_target_done_threads = 1; | |
781 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
782 | lt_wait_for_lock_test_threads(); | |
783 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
784 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
785 | ||
786 | /* Uncontended shared rwlock */ | |
787 | ||
788 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
0a7de745 A |
789 | * T_LOG("Running uncontended shared rwlock test."); |
790 | * lt_reset(); | |
791 | * lt_target_done_threads = 1; | |
792 | * lt_start_lock_thread(lt_grab_rw_shared); | |
793 | * lt_wait_for_lock_test_threads(); | |
794 | * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
795 | * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
796 | */ | |
5ba3f43e A |
797 | |
798 | /* Contended exclusive rwlock */ | |
799 | T_LOG("Running contended exclusive rwlock test."); | |
800 | lt_reset(); | |
801 | lt_target_done_threads = 3; | |
802 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
803 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
804 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
805 | lt_wait_for_lock_test_threads(); | |
806 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
807 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
808 | ||
809 | /* One shared, two exclusive */ | |
810 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
0a7de745 A |
811 | * T_LOG("Running test with one shared and two exclusive rw lock threads."); |
812 | * lt_reset(); | |
813 | * lt_target_done_threads = 3; | |
814 | * lt_start_lock_thread(lt_grab_rw_shared); | |
815 | * lt_start_lock_thread(lt_grab_rw_exclusive); | |
816 | * lt_start_lock_thread(lt_grab_rw_exclusive); | |
817 | * lt_wait_for_lock_test_threads(); | |
818 | * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
819 | * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
820 | */ | |
5ba3f43e A |
821 | |
822 | /* Four shared */ | |
823 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
0a7de745 A |
824 | * T_LOG("Running test with four shared holders."); |
825 | * lt_reset(); | |
826 | * lt_target_done_threads = 4; | |
827 | * lt_start_lock_thread(lt_grab_rw_shared); | |
828 | * lt_start_lock_thread(lt_grab_rw_shared); | |
829 | * lt_start_lock_thread(lt_grab_rw_shared); | |
830 | * lt_start_lock_thread(lt_grab_rw_shared); | |
831 | * lt_wait_for_lock_test_threads(); | |
832 | * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL); | |
833 | */ | |
5ba3f43e A |
834 | |
835 | /* Three doing upgrades and downgrades */ | |
836 | T_LOG("Running test with threads upgrading and downgrading."); | |
837 | lt_reset(); | |
838 | lt_target_done_threads = 3; | |
839 | lt_start_lock_thread(lt_upgrade_downgrade_rw); | |
840 | lt_start_lock_thread(lt_upgrade_downgrade_rw); | |
841 | lt_start_lock_thread(lt_upgrade_downgrade_rw); | |
842 | lt_wait_for_lock_test_threads(); | |
843 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
844 | T_EXPECT_LE_UINT(lt_max_holders, 3, NULL); | |
845 | T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL); | |
846 | ||
847 | /* Uncontended - exclusive trylocks */ | |
848 | T_LOG("Running test with single thread doing exclusive rwlock trylocks."); | |
849 | lt_reset(); | |
850 | lt_target_done_threads = 1; | |
851 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
852 | lt_wait_for_lock_test_threads(); | |
853 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
854 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
855 | ||
856 | /* Uncontended - shared trylocks */ | |
857 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
0a7de745 A |
858 | * T_LOG("Running test with single thread doing shared rwlock trylocks."); |
859 | * lt_reset(); | |
860 | * lt_target_done_threads = 1; | |
861 | * lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
862 | * lt_wait_for_lock_test_threads(); | |
863 | * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
864 | * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
865 | */ | |
5ba3f43e A |
866 | |
867 | /* Three doing exclusive trylocks */ | |
868 | T_LOG("Running test with threads doing exclusive rwlock trylocks."); | |
869 | lt_reset(); | |
870 | lt_target_done_threads = 3; | |
871 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
872 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
873 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
874 | lt_wait_for_lock_test_threads(); | |
875 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
876 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
877 | ||
878 | /* Three doing shared trylocks */ | |
879 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
0a7de745 A |
880 | * T_LOG("Running test with threads doing shared rwlock trylocks."); |
881 | * lt_reset(); | |
882 | * lt_target_done_threads = 3; | |
883 | * lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
884 | * lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
885 | * lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
886 | * lt_wait_for_lock_test_threads(); | |
887 | * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
888 | * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL); | |
889 | */ | |
5ba3f43e A |
890 | |
891 | /* Three doing various trylocks */ | |
892 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
0a7de745 A |
893 | * T_LOG("Running test with threads doing mixed rwlock trylocks."); |
894 | * lt_reset(); | |
895 | * lt_target_done_threads = 4; | |
896 | * lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
897 | * lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
898 | * lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
899 | * lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
900 | * lt_wait_for_lock_test_threads(); | |
901 | * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
902 | * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL); | |
903 | */ | |
5ba3f43e A |
904 | |
905 | /* HW locks */ | |
906 | T_LOG("Running test with hw_lock_lock()"); | |
907 | lt_reset(); | |
908 | lt_target_done_threads = 3; | |
909 | lt_start_lock_thread(lt_grab_hw_lock); | |
910 | lt_start_lock_thread(lt_grab_hw_lock); | |
911 | lt_start_lock_thread(lt_grab_hw_lock); | |
912 | lt_wait_for_lock_test_threads(); | |
913 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
914 | ||
c6bf4f31 A |
915 | #if __AMP__ |
916 | /* Ticket locks stress test */ | |
917 | T_LOG("Running Ticket locks stress test with lck_ticket_lock()"); | |
918 | extern unsigned int real_ncpus; | |
f427ee49 A |
919 | lck_grp_init(<_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL); |
920 | lck_ticket_init(<_ticket_lock, <_ticket_grp); | |
c6bf4f31 A |
921 | lt_reset(); |
922 | lt_target_done_threads = real_ncpus; | |
923 | for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) { | |
924 | lt_start_lock_thread_bound(lt_stress_ticket_lock); | |
925 | } | |
926 | lt_wait_for_lock_test_threads(); | |
927 | bool starvation = false; | |
928 | uint total_local_count = 0; | |
929 | for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) { | |
930 | starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10); | |
931 | total_local_count += lt_stress_local_counters[processor->cpu_id]; | |
932 | } | |
933 | if (total_local_count != lt_counter) { | |
934 | T_FAIL("Lock failure\n"); | |
935 | } else if (starvation) { | |
936 | T_FAIL("Lock starvation found\n"); | |
937 | } else { | |
938 | T_PASS("Ticket locks stress test with lck_ticket_lock()"); | |
939 | } | |
940 | ||
941 | /* AMP ticket locks stress test */ | |
942 | T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()"); | |
943 | lt_reset(); | |
944 | lt_target_done_threads = real_ncpus; | |
945 | for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) { | |
946 | processor_set_t pset = processor->processor_set; | |
947 | if (pset->pset_cluster_type == PSET_AMP_P) { | |
948 | lt_start_lock_thread_p(lt_stress_ticket_lock); | |
949 | } else if (pset->pset_cluster_type == PSET_AMP_E) { | |
950 | lt_start_lock_thread_e(lt_stress_ticket_lock); | |
951 | } else { | |
952 | lt_start_lock_thread(lt_stress_ticket_lock); | |
953 | } | |
954 | } | |
955 | lt_wait_for_lock_test_threads(); | |
956 | #endif | |
5ba3f43e A |
957 | |
958 | /* HW locks: trylocks */ | |
959 | T_LOG("Running test with hw_lock_try()"); | |
960 | lt_reset(); | |
961 | lt_target_done_threads = 3; | |
962 | lt_start_lock_thread(lt_grab_hw_lock_with_try); | |
963 | lt_start_lock_thread(lt_grab_hw_lock_with_try); | |
964 | lt_start_lock_thread(lt_grab_hw_lock_with_try); | |
965 | lt_wait_for_lock_test_threads(); | |
966 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
967 | ||
968 | /* HW locks: with timeout */ | |
969 | T_LOG("Running test with hw_lock_to()"); | |
970 | lt_reset(); | |
971 | lt_target_done_threads = 3; | |
972 | lt_start_lock_thread(lt_grab_hw_lock_with_to); | |
973 | lt_start_lock_thread(lt_grab_hw_lock_with_to); | |
974 | lt_start_lock_thread(lt_grab_hw_lock_with_to); | |
975 | lt_wait_for_lock_test_threads(); | |
976 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
977 | ||
978 | /* Spin locks */ | |
979 | T_LOG("Running test with lck_spin_lock()"); | |
980 | lt_reset(); | |
981 | lt_target_done_threads = 3; | |
982 | lt_start_lock_thread(lt_grab_spin_lock); | |
983 | lt_start_lock_thread(lt_grab_spin_lock); | |
984 | lt_start_lock_thread(lt_grab_spin_lock); | |
985 | lt_wait_for_lock_test_threads(); | |
986 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
987 | ||
988 | /* Spin locks: trylocks */ | |
989 | T_LOG("Running test with lck_spin_try_lock()"); | |
990 | lt_reset(); | |
991 | lt_target_done_threads = 3; | |
992 | lt_start_lock_thread(lt_grab_spin_lock_with_try); | |
993 | lt_start_lock_thread(lt_grab_spin_lock_with_try); | |
994 | lt_start_lock_thread(lt_grab_spin_lock_with_try); | |
995 | lt_wait_for_lock_test_threads(); | |
996 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
997 | ||
998 | return KERN_SUCCESS; | |
999 | } | |
1000 | ||
0a7de745 A |
1001 | #define MT_MAX_ARGS 8 |
1002 | #define MT_INITIAL_VALUE 0xfeedbeef | |
1003 | #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */ | |
1004 | #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */ | |
1005 | #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */ | |
5ba3f43e A |
1006 | |
1007 | typedef void (*sy_munge_t)(void*); | |
1008 | ||
1009 | #define MT_FUNC(x) #x, x | |
1010 | struct munger_test { | |
0a7de745 A |
1011 | const char *mt_name; |
1012 | sy_munge_t mt_func; | |
1013 | uint32_t mt_in_words; | |
1014 | uint32_t mt_nout; | |
1015 | uint64_t mt_expected[MT_MAX_ARGS]; | |
5ba3f43e | 1016 | } munger_tests[] = { |
0a7de745 A |
1017 | {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}}, |
1018 | {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}}, | |
1019 | {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1020 | {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1021 | {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1022 | {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1023 | {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1024 | {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1025 | {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}}, | |
1026 | {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
1027 | {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
1028 | {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
1029 | {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
1030 | {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}}, | |
1031 | {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
1032 | {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
1033 | {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
1034 | {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
1035 | {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
1036 | {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, | |
f427ee49 | 1037 | {MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, |
0a7de745 A |
1038 | {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, |
1039 | {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
f427ee49 | 1040 | {MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, |
0a7de745 A |
1041 | {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, |
1042 | {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
1043 | {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, | |
1044 | {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}}, | |
1045 | {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
1046 | {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
1047 | {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
1048 | {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
1049 | {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}}, | |
1050 | {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}}, | |
1051 | {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}}, | |
1052 | {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
1053 | {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}}, | |
1054 | {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}}, | |
1055 | {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1056 | {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1057 | {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
1058 | {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}} | |
5ba3f43e A |
1059 | }; |
1060 | ||
1061 | #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test)) | |
1062 | ||
1063 | static void | |
0a7de745 | 1064 | mt_reset(uint32_t in_words, size_t total_size, uint32_t *data) |
5ba3f43e A |
1065 | { |
1066 | uint32_t i; | |
1067 | ||
1068 | for (i = 0; i < in_words; i++) { | |
1069 | data[i] = MT_INITIAL_VALUE; | |
1070 | } | |
1071 | ||
1072 | if (in_words * sizeof(uint32_t) < total_size) { | |
1073 | bzero(&data[in_words], total_size - in_words * sizeof(uint32_t)); | |
1074 | } | |
1075 | } | |
1076 | ||
1077 | static void | |
1078 | mt_test_mungers() | |
1079 | { | |
1080 | uint64_t data[MT_MAX_ARGS]; | |
1081 | uint32_t i, j; | |
1082 | ||
1083 | for (i = 0; i < MT_TEST_COUNT; i++) { | |
1084 | struct munger_test *test = &munger_tests[i]; | |
1085 | int pass = 1; | |
1086 | ||
1087 | T_LOG("Testing %s", test->mt_name); | |
1088 | ||
1089 | mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data); | |
1090 | test->mt_func(data); | |
1091 | ||
1092 | for (j = 0; j < test->mt_nout; j++) { | |
1093 | if (data[j] != test->mt_expected[j]) { | |
1094 | T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]); | |
1095 | pass = 0; | |
1096 | } | |
1097 | } | |
1098 | if (pass) { | |
1099 | T_PASS(test->mt_name); | |
1100 | } | |
1101 | } | |
1102 | } | |
1103 | ||
1104 | /* Exception Callback Test */ | |
0a7de745 A |
1105 | static ex_cb_action_t |
1106 | excb_test_action( | |
1107 | ex_cb_class_t cb_class, | |
1108 | void *refcon, | |
1109 | const ex_cb_state_t *state | |
5ba3f43e A |
1110 | ) |
1111 | { | |
1112 | ex_cb_state_t *context = (ex_cb_state_t *)refcon; | |
1113 | ||
0a7de745 | 1114 | if ((NULL == refcon) || (NULL == state)) { |
5ba3f43e A |
1115 | return EXCB_ACTION_TEST_FAIL; |
1116 | } | |
1117 | ||
1118 | context->far = state->far; | |
1119 | ||
0a7de745 A |
1120 | switch (cb_class) { |
1121 | case EXCB_CLASS_TEST1: | |
1122 | return EXCB_ACTION_RERUN; | |
1123 | case EXCB_CLASS_TEST2: | |
1124 | return EXCB_ACTION_NONE; | |
1125 | default: | |
1126 | return EXCB_ACTION_TEST_FAIL; | |
5ba3f43e A |
1127 | } |
1128 | } | |
1129 | ||
1130 | ||
1131 | kern_return_t | |
1132 | ex_cb_test() | |
1133 | { | |
1134 | const vm_offset_t far1 = 0xdead0001; | |
1135 | const vm_offset_t far2 = 0xdead0002; | |
1136 | kern_return_t kr; | |
1137 | ex_cb_state_t test_context_1 = {0xdeadbeef}; | |
1138 | ex_cb_state_t test_context_2 = {0xdeadbeef}; | |
1139 | ex_cb_action_t action; | |
1140 | ||
1141 | T_LOG("Testing Exception Callback."); | |
0a7de745 | 1142 | |
5ba3f43e A |
1143 | T_LOG("Running registration test."); |
1144 | ||
1145 | kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1); | |
1146 | T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback"); | |
1147 | kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2); | |
1148 | T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback"); | |
1149 | ||
1150 | kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2); | |
1151 | T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback"); | |
1152 | kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1); | |
1153 | T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback"); | |
1154 | ||
1155 | T_LOG("Running invocation test."); | |
1156 | ||
1157 | action = ex_cb_invoke(EXCB_CLASS_TEST1, far1); | |
1158 | T_ASSERT(EXCB_ACTION_RERUN == action, NULL); | |
1159 | T_ASSERT(far1 == test_context_1.far, NULL); | |
1160 | ||
1161 | action = ex_cb_invoke(EXCB_CLASS_TEST2, far2); | |
1162 | T_ASSERT(EXCB_ACTION_NONE == action, NULL); | |
1163 | T_ASSERT(far2 == test_context_2.far, NULL); | |
1164 | ||
1165 | action = ex_cb_invoke(EXCB_CLASS_TEST3, 0); | |
1166 | T_ASSERT(EXCB_ACTION_NONE == action, NULL); | |
1167 | ||
1168 | return KERN_SUCCESS; | |
1169 | } | |
1170 | ||
cb323159 A |
1171 | #if defined(HAS_APPLE_PAC) |
1172 | ||
cb323159 A |
1173 | |
1174 | kern_return_t | |
1175 | arm64_ropjop_test() | |
1176 | { | |
1177 | T_LOG("Testing ROP/JOP"); | |
1178 | ||
1179 | /* how is ROP/JOP configured */ | |
1180 | boolean_t config_rop_enabled = TRUE; | |
f427ee49 | 1181 | boolean_t config_jop_enabled = TRUE; |
cb323159 A |
1182 | |
1183 | ||
cb323159 A |
1184 | if (config_jop_enabled) { |
1185 | /* jop key */ | |
1186 | uint64_t apiakey_hi = __builtin_arm_rsr64(ARM64_REG_APIAKEYHI_EL1); | |
1187 | uint64_t apiakey_lo = __builtin_arm_rsr64(ARM64_REG_APIAKEYLO_EL1); | |
1188 | ||
cb323159 A |
1189 | T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL); |
1190 | } | |
1191 | ||
1192 | if (config_rop_enabled) { | |
1193 | /* rop key */ | |
1194 | uint64_t apibkey_hi = __builtin_arm_rsr64(ARM64_REG_APIBKEYHI_EL1); | |
1195 | uint64_t apibkey_lo = __builtin_arm_rsr64(ARM64_REG_APIBKEYLO_EL1); | |
1196 | ||
cb323159 A |
1197 | T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL); |
1198 | ||
1199 | /* sign a KVA (the address of this function) */ | |
1200 | uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0); | |
1201 | ||
1202 | /* assert it was signed (changed) */ | |
1203 | T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL); | |
1204 | ||
1205 | /* authenticate the newly signed KVA */ | |
1206 | uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0); | |
1207 | ||
1208 | /* assert the authed KVA is the original KVA */ | |
1209 | T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL); | |
1210 | ||
1211 | /* corrupt a signed ptr, auth it, ensure auth failed */ | |
1212 | uint64_t kva_corrupted = kva_signed ^ 1; | |
1213 | ||
1214 | /* authenticate the corrupted pointer */ | |
1215 | kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0); | |
1216 | ||
1217 | /* when AuthIB fails, bits 63:62 will be set to 2'b10 */ | |
1218 | uint64_t auth_fail_mask = 3ULL << 61; | |
1219 | uint64_t authib_fail = 2ULL << 61; | |
1220 | ||
1221 | /* assert the failed authIB of corrupted pointer is tagged */ | |
1222 | T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL); | |
1223 | } | |
1224 | ||
1225 | return KERN_SUCCESS; | |
1226 | } | |
1227 | #endif /* defined(HAS_APPLE_PAC) */ | |
d9a64523 | 1228 | |
5ba3f43e | 1229 | #if __ARM_PAN_AVAILABLE__ |
cb323159 A |
1230 | |
1231 | struct pan_test_thread_args { | |
1232 | volatile bool join; | |
1233 | }; | |
1234 | ||
1235 | static void | |
1236 | arm64_pan_test_thread(void *arg, wait_result_t __unused wres) | |
1237 | { | |
1238 | T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL); | |
1239 | ||
1240 | struct pan_test_thread_args *args = arg; | |
1241 | ||
1242 | for (processor_t p = processor_list; p != NULL; p = p->processor_list) { | |
1243 | thread_bind(p); | |
1244 | thread_block(THREAD_CONTINUE_NULL); | |
1245 | kprintf("Running PAN test on cpu %d\n", p->cpu_id); | |
1246 | arm64_pan_test(); | |
1247 | } | |
1248 | ||
1249 | /* unbind thread from specific cpu */ | |
1250 | thread_bind(PROCESSOR_NULL); | |
1251 | thread_block(THREAD_CONTINUE_NULL); | |
1252 | ||
1253 | while (!args->join) { | |
1254 | ; | |
1255 | } | |
1256 | ||
1257 | thread_wakeup(args); | |
1258 | } | |
1259 | ||
1260 | kern_return_t | |
1261 | arm64_late_pan_test() | |
1262 | { | |
1263 | thread_t thread; | |
1264 | kern_return_t kr; | |
1265 | ||
1266 | struct pan_test_thread_args args; | |
1267 | args.join = false; | |
1268 | ||
1269 | kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread); | |
1270 | assert(kr == KERN_SUCCESS); | |
1271 | ||
1272 | thread_deallocate(thread); | |
1273 | ||
1274 | assert_wait(&args, THREAD_UNINT); | |
1275 | args.join = true; | |
1276 | thread_block(THREAD_CONTINUE_NULL); | |
1277 | return KERN_SUCCESS; | |
1278 | } | |
1279 | ||
f427ee49 A |
1280 | static bool |
1281 | arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state) | |
1282 | { | |
1283 | bool retval = false; | |
1284 | uint32_t esr = get_saved_state_esr(state); | |
1285 | esr_exception_class_t class = ESR_EC(esr); | |
1286 | fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr)); | |
1287 | uint32_t cpsr = get_saved_state_cpsr(state); | |
1288 | uint64_t far = get_saved_state_far(state); | |
1289 | ||
1290 | if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) && | |
1291 | (cpsr & PSR64_PAN) && | |
1292 | ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) { | |
1293 | ++pan_exception_level; | |
1294 | // read the user-accessible value to make sure | |
1295 | // pan is enabled and produces a 2nd fault from | |
1296 | // the exception handler | |
1297 | if (pan_exception_level == 1) { | |
1298 | ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far); | |
1299 | pan_fault_value = *(volatile char *)far; | |
1300 | ml_expect_fault_end(); | |
1301 | __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context | |
1302 | } | |
1303 | // this fault address is used for PAN test | |
1304 | // disable PAN and rerun | |
1305 | mask_saved_state_cpsr(state, 0, PSR64_PAN); | |
1306 | ||
1307 | retval = true; | |
1308 | } | |
1309 | ||
1310 | return retval; | |
1311 | } | |
1312 | ||
1313 | static bool | |
1314 | arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state) | |
1315 | { | |
1316 | bool retval = false; | |
1317 | uint32_t esr = get_saved_state_esr(state); | |
1318 | esr_exception_class_t class = ESR_EC(esr); | |
1319 | fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr)); | |
1320 | uint32_t cpsr = get_saved_state_cpsr(state); | |
1321 | ||
1322 | if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) && | |
1323 | !(cpsr & PSR64_PAN)) { | |
1324 | ++pan_exception_level; | |
1325 | // On an exception taken from a PAN-disabled context, verify | |
1326 | // that PAN is re-enabled for the exception handler and that | |
1327 | // accessing the test address produces a PAN fault. | |
1328 | ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr); | |
1329 | pan_fault_value = *(volatile char *)pan_test_addr; | |
1330 | ml_expect_fault_end(); | |
1331 | __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context | |
1332 | add_saved_state_pc(state, 4); | |
1333 | ||
1334 | retval = true; | |
1335 | } | |
1336 | ||
1337 | return retval; | |
1338 | } | |
1339 | ||
5ba3f43e A |
1340 | kern_return_t |
1341 | arm64_pan_test() | |
1342 | { | |
f427ee49 | 1343 | bool values_match = false; |
5ba3f43e A |
1344 | vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE; |
1345 | ||
1346 | T_LOG("Testing PAN."); | |
1347 | ||
cb323159 A |
1348 | |
1349 | T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared"); | |
1350 | ||
5ba3f43e A |
1351 | T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL); |
1352 | ||
cc8bc92a A |
1353 | pan_exception_level = 0; |
1354 | pan_fault_value = 0xDE; | |
5ba3f43e | 1355 | // convert priv_addr to one that is accessible from user mode |
5c9f4661 | 1356 | pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS - |
0a7de745 | 1357 | _COMM_PAGE_START_ADDRESS; |
5ba3f43e | 1358 | |
f427ee49 A |
1359 | // Context-switch with PAN disabled is prohibited; prevent test logging from |
1360 | // triggering a voluntary context switch. | |
1361 | mp_disable_preemption(); | |
1362 | ||
5c9f4661 | 1363 | // Below should trigger a PAN exception as pan_test_addr is accessible |
5ba3f43e A |
1364 | // in user mode |
1365 | // The exception handler, upon recognizing the fault address is pan_test_addr, | |
1366 | // will disable PAN and rerun this instruction successfully | |
f427ee49 A |
1367 | ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr); |
1368 | values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr); | |
1369 | ml_expect_fault_end(); | |
1370 | T_ASSERT(values_match, NULL); | |
cc8bc92a A |
1371 | |
1372 | T_ASSERT(pan_exception_level == 2, NULL); | |
5ba3f43e A |
1373 | |
1374 | T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL); | |
1375 | ||
cc8bc92a A |
1376 | T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL); |
1377 | ||
1378 | pan_exception_level = 0; | |
1379 | pan_fault_value = 0xAD; | |
1380 | pan_ro_addr = (vm_offset_t) &pan_ro_value; | |
1381 | ||
1382 | // Force a permission fault while PAN is disabled to make sure PAN is | |
1383 | // re-enabled during the exception handler. | |
f427ee49 | 1384 | ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr); |
cc8bc92a | 1385 | *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE; |
f427ee49 | 1386 | ml_expect_fault_end(); |
cc8bc92a A |
1387 | |
1388 | T_ASSERT(pan_exception_level == 2, NULL); | |
1389 | ||
1390 | T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL); | |
1391 | ||
1392 | T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL); | |
1393 | ||
1394 | pan_test_addr = 0; | |
1395 | pan_ro_addr = 0; | |
5ba3f43e | 1396 | |
cc8bc92a | 1397 | __builtin_arm_wsr("pan", 1); |
cb323159 | 1398 | |
f427ee49 A |
1399 | mp_enable_preemption(); |
1400 | ||
5ba3f43e A |
1401 | return KERN_SUCCESS; |
1402 | } | |
cb323159 | 1403 | #endif /* __ARM_PAN_AVAILABLE__ */ |
5ba3f43e A |
1404 | |
1405 | ||
1406 | kern_return_t | |
1407 | arm64_lock_test() | |
1408 | { | |
1409 | return lt_test_locks(); | |
1410 | } | |
1411 | ||
1412 | kern_return_t | |
1413 | arm64_munger_test() | |
1414 | { | |
1415 | mt_test_mungers(); | |
1416 | return 0; | |
1417 | } | |
1418 | ||
c6bf4f31 A |
1419 | #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) |
1420 | SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test; | |
1421 | uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */ | |
1422 | volatile uint64_t ctrr_exception_esr; | |
1423 | vm_offset_t ctrr_test_va; | |
1424 | vm_offset_t ctrr_test_page; | |
1425 | ||
1426 | kern_return_t | |
1427 | ctrr_test(void) | |
1428 | { | |
1429 | processor_t p; | |
1430 | boolean_t ctrr_disable = FALSE; | |
1431 | ||
1432 | PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable)); | |
1433 | ||
f427ee49 A |
1434 | #if CONFIG_CSR_FROM_DT |
1435 | if (csr_unsafe_kernel_text) { | |
1436 | ctrr_disable = TRUE; | |
1437 | } | |
1438 | #endif /* CONFIG_CSR_FROM_DT */ | |
1439 | ||
c6bf4f31 A |
1440 | if (ctrr_disable) { |
1441 | T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present"); | |
1442 | return KERN_SUCCESS; | |
1443 | } | |
1444 | ||
1445 | T_LOG("Running CTRR test."); | |
1446 | ||
1447 | for (p = processor_list; p != NULL; p = p->processor_list) { | |
1448 | thread_bind(p); | |
1449 | thread_block(THREAD_CONTINUE_NULL); | |
1450 | T_LOG("Running CTRR test on cpu %d\n", p->cpu_id); | |
1451 | ctrr_test_cpu(); | |
1452 | } | |
1453 | ||
1454 | /* unbind thread from specific cpu */ | |
1455 | thread_bind(PROCESSOR_NULL); | |
1456 | thread_block(THREAD_CONTINUE_NULL); | |
1457 | ||
1458 | return KERN_SUCCESS; | |
1459 | } | |
1460 | ||
f427ee49 A |
1461 | static bool |
1462 | ctrr_test_ro_fault_handler(arm_saved_state_t * state) | |
1463 | { | |
1464 | bool retval = false; | |
1465 | uint32_t esr = get_saved_state_esr(state); | |
1466 | esr_exception_class_t class = ESR_EC(esr); | |
1467 | fault_status_t fsc = ISS_DA_FSC(ESR_ISS(esr)); | |
1468 | ||
1469 | if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) { | |
1470 | ctrr_exception_esr = esr; | |
1471 | add_saved_state_pc(state, 4); | |
1472 | retval = true; | |
1473 | } | |
1474 | ||
1475 | return retval; | |
1476 | } | |
1477 | ||
1478 | static bool | |
1479 | ctrr_test_nx_fault_handler(arm_saved_state_t * state) | |
1480 | { | |
1481 | bool retval = false; | |
1482 | uint32_t esr = get_saved_state_esr(state); | |
1483 | esr_exception_class_t class = ESR_EC(esr); | |
1484 | fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr)); | |
1485 | ||
1486 | if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) { | |
1487 | ctrr_exception_esr = esr; | |
1488 | /* return to the instruction immediately after the call to NX page */ | |
1489 | set_saved_state_pc(state, get_saved_state_lr(state)); | |
1490 | retval = true; | |
1491 | } | |
1492 | ||
1493 | return retval; | |
1494 | } | |
1495 | ||
c6bf4f31 A |
1496 | /* test CTRR on a cpu, caller to bind thread to desired cpu */ |
1497 | /* ctrr_test_page was reserved during bootstrap process */ | |
1498 | kern_return_t | |
1499 | ctrr_test_cpu(void) | |
1500 | { | |
1501 | ppnum_t ro_pn, nx_pn; | |
1502 | uint64_t *ctrr_ro_test_ptr; | |
1503 | void (*ctrr_nx_test_ptr)(void); | |
1504 | kern_return_t kr; | |
1505 | uint64_t prot = 0; | |
c6bf4f31 A |
1506 | extern vm_offset_t virtual_space_start; |
1507 | ||
f427ee49 | 1508 | /* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */ |
c6bf4f31 | 1509 | |
f427ee49 A |
1510 | vm_offset_t rorgn_begin_va = phystokv(ctrr_begin); |
1511 | vm_offset_t rorgn_end_va = phystokv(ctrr_end) + 1; | |
c6bf4f31 A |
1512 | vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test; |
1513 | vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test; | |
1514 | ||
1515 | T_EXPECT(rorgn_begin_va <= ro_test_va && ro_test_va < rorgn_end_va, "Expect ro_test_va to be inside the CTRR region"); | |
1516 | T_EXPECT((nx_test_va < rorgn_begin_va) ^ (nx_test_va >= rorgn_end_va), "Expect nx_test_va to be outside the CTRR region"); | |
1517 | ||
1518 | ro_pn = pmap_find_phys(kernel_pmap, ro_test_va); | |
1519 | nx_pn = pmap_find_phys(kernel_pmap, nx_test_va); | |
1520 | T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero"); | |
1521 | ||
1522 | T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ", | |
1523 | (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn); | |
1524 | ||
1525 | prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page); | |
1526 | T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped"); | |
1527 | ||
1528 | T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn); | |
1529 | kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn, | |
1530 | VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE); | |
1531 | T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed"); | |
1532 | ||
1533 | // assert entire mmu prot path (Hierarchical protection model) is NOT RO | |
1534 | // fetch effective block level protections from table/block entries | |
1535 | prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page); | |
1536 | T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX"); | |
1537 | ||
1538 | ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK); | |
1539 | ctrr_ro_test_ptr = (void *)ctrr_test_va; | |
1540 | ||
1541 | T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr); | |
1542 | ||
1543 | // should cause data abort | |
f427ee49 | 1544 | ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va); |
c6bf4f31 | 1545 | *ctrr_ro_test_ptr = 1; |
f427ee49 | 1546 | ml_expect_fault_end(); |
c6bf4f31 A |
1547 | |
1548 | // ensure write permission fault at expected level | |
1549 | // data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault | |
1550 | ||
1551 | T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected"); | |
1552 | T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected"); | |
1553 | T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected"); | |
1554 | ||
1555 | ctrr_test_va = 0; | |
1556 | ctrr_exception_esr = 0; | |
1557 | pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE); | |
1558 | ||
1559 | T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn); | |
1560 | ||
1561 | kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn, | |
1562 | VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE); | |
1563 | T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed"); | |
1564 | ||
1565 | // assert entire mmu prot path (Hierarchical protection model) is NOT XN | |
1566 | prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page); | |
1567 | T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX"); | |
1568 | ||
1569 | ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK); | |
f427ee49 A |
1570 | #if __has_feature(ptrauth_calls) |
1571 | ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0); | |
1572 | #else | |
c6bf4f31 | 1573 | ctrr_nx_test_ptr = (void *)ctrr_test_va; |
f427ee49 | 1574 | #endif |
c6bf4f31 A |
1575 | |
1576 | T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr); | |
1577 | ||
c6bf4f31 | 1578 | // should cause prefetch abort |
f427ee49 | 1579 | ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va); |
c6bf4f31 | 1580 | ctrr_nx_test_ptr(); |
f427ee49 | 1581 | ml_expect_fault_end(); |
c6bf4f31 A |
1582 | |
1583 | // TODO: ensure execute permission fault at expected level | |
1584 | T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected"); | |
1585 | T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected"); | |
1586 | ||
1587 | ctrr_test_va = 0; | |
1588 | ctrr_exception_esr = 0; | |
f427ee49 | 1589 | |
c6bf4f31 | 1590 | pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE); |
f427ee49 A |
1591 | |
1592 | T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits"); | |
1593 | for (vm_offset_t addr = rorgn_begin_va; addr < rorgn_end_va; addr += 8) { | |
1594 | volatile uint64_t x = *(uint64_t *)addr; | |
1595 | (void) x; /* read for side effect only */ | |
1596 | } | |
1597 | ||
c6bf4f31 A |
1598 | return KERN_SUCCESS; |
1599 | } | |
1600 | #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */ | |
cb323159 A |
1601 | |
1602 | #if HAS_TWO_STAGE_SPR_LOCK | |
1603 | ||
1604 | #define STR1(x) #x | |
1605 | #define STR(x) STR1(x) | |
1606 | ||
1607 | volatile vm_offset_t spr_lock_test_addr; | |
1608 | volatile uint32_t spr_lock_exception_esr; | |
1609 | ||
1610 | kern_return_t | |
1611 | arm64_spr_lock_test() | |
1612 | { | |
1613 | processor_t p; | |
1614 | ||
1615 | for (p = processor_list; p != NULL; p = p->processor_list) { | |
1616 | thread_bind(p); | |
1617 | thread_block(THREAD_CONTINUE_NULL); | |
1618 | T_LOG("Running SPR lock test on cpu %d\n", p->cpu_id); | |
1619 | ||
1620 | uint64_t orig_value = __builtin_arm_rsr64(STR(ARM64_REG_HID8)); | |
1621 | spr_lock_test_addr = (vm_offset_t)VM_KERNEL_STRIP_PTR(arm64_msr_lock_test); | |
1622 | spr_lock_exception_esr = 0; | |
1623 | arm64_msr_lock_test(~orig_value); | |
1624 | T_EXPECT(spr_lock_exception_esr != 0, "MSR write generated synchronous abort"); | |
1625 | ||
1626 | uint64_t new_value = __builtin_arm_rsr64(STR(ARM64_REG_HID8)); | |
1627 | T_EXPECT(orig_value == new_value, "MSR write did not succeed"); | |
1628 | ||
1629 | spr_lock_test_addr = 0; | |
1630 | } | |
1631 | ||
1632 | /* unbind thread from specific cpu */ | |
1633 | thread_bind(PROCESSOR_NULL); | |
1634 | thread_block(THREAD_CONTINUE_NULL); | |
1635 | ||
1636 | T_PASS("Done running SPR lock tests"); | |
1637 | ||
1638 | return KERN_SUCCESS; | |
1639 | } | |
1640 | ||
1641 | #endif /* HAS_TWO_STAGE_SPR_LOCK */ |