]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2011 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie | |
33 | * Mellon University All Rights Reserved. | |
34 | * | |
35 | * Permission to use, copy, modify and distribute this software and its | |
36 | * documentation is hereby granted, provided that both the copyright notice | |
37 | * and this permission notice appear in all copies of the software, | |
38 | * derivative works or modified versions, and any portions thereof, and that | |
39 | * both notices appear in supporting documentation. | |
40 | * | |
41 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. | |
42 | * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES | |
43 | * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
44 | * | |
45 | * Carnegie Mellon requests users of this software to return to | |
46 | * | |
47 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
48 | * School of Computer Science Carnegie Mellon University Pittsburgh PA | |
49 | * 15213-3890 | |
50 | * | |
51 | * any improvements or extensions that they make and grant Carnegie Mellon the | |
52 | * rights to redistribute these changes. | |
53 | */ | |
54 | ||
55 | #include <mach_ldebug.h> | |
56 | ||
57 | #define LOCK_PRIVATE 1 | |
58 | ||
59 | #include <kern/kalloc.h> | |
60 | #include <kern/locks.h> | |
61 | #include <kern/misc_protos.h> | |
62 | #include <kern/thread.h> | |
63 | #include <kern/processor.h> | |
64 | #include <kern/sched_prim.h> | |
65 | #include <kern/xpr.h> | |
66 | #include <kern/debug.h> | |
67 | #include <string.h> | |
68 | #include <tests/xnupost.h> | |
69 | ||
70 | #if MACH_KDB | |
71 | #include <ddb/db_command.h> | |
72 | #include <ddb/db_output.h> | |
73 | #include <ddb/db_sym.h> | |
74 | #include <ddb/db_print.h> | |
75 | #endif /* MACH_KDB */ | |
76 | ||
77 | #include <sys/kdebug.h> | |
78 | #include <sys/munge.h> | |
79 | #include <machine/cpu_capabilities.h> | |
80 | #include <arm/cpu_data_internal.h> | |
81 | ||
5ba3f43e A |
82 | kern_return_t arm64_lock_test(void); |
83 | kern_return_t arm64_munger_test(void); | |
84 | kern_return_t ex_cb_test(void); | |
85 | kern_return_t arm64_pan_test(void); | |
86 | ||
87 | // exception handler ignores this fault address during PAN test | |
88 | #if __ARM_PAN_AVAILABLE__ | |
cc8bc92a A |
89 | const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF; |
90 | vm_offset_t pan_test_addr = 0; | |
91 | vm_offset_t pan_ro_addr = 0; | |
92 | volatile int pan_exception_level = 0; | |
93 | volatile char pan_fault_value = 0; | |
5ba3f43e A |
94 | #endif |
95 | ||
96 | #include <libkern/OSAtomic.h> | |
97 | #define LOCK_TEST_ITERATIONS 50 | |
98 | static hw_lock_data_t lt_hw_lock; | |
99 | static lck_spin_t lt_lck_spin_t; | |
100 | static lck_mtx_t lt_mtx; | |
101 | static lck_rw_t lt_rwlock; | |
102 | static volatile uint32_t lt_counter = 0; | |
103 | static volatile int lt_spinvolatile; | |
104 | static volatile uint32_t lt_max_holders = 0; | |
105 | static volatile uint32_t lt_upgrade_holders = 0; | |
106 | static volatile uint32_t lt_max_upgrade_holders = 0; | |
107 | static volatile uint32_t lt_num_holders = 0; | |
108 | static volatile uint32_t lt_done_threads; | |
109 | static volatile uint32_t lt_target_done_threads; | |
110 | static volatile uint32_t lt_cpu_bind_id = 0; | |
111 | ||
112 | static void | |
113 | lt_note_another_blocking_lock_holder() | |
114 | { | |
115 | hw_lock_lock(<_hw_lock); | |
116 | lt_num_holders++; | |
117 | lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders; | |
118 | hw_lock_unlock(<_hw_lock); | |
119 | } | |
120 | ||
121 | static void | |
122 | lt_note_blocking_lock_release() | |
123 | { | |
124 | hw_lock_lock(<_hw_lock); | |
125 | lt_num_holders--; | |
126 | hw_lock_unlock(<_hw_lock); | |
127 | } | |
128 | ||
129 | static void | |
130 | lt_spin_a_little_bit() | |
131 | { | |
132 | uint32_t i; | |
133 | ||
134 | for (i = 0; i < 10000; i++) { | |
135 | lt_spinvolatile++; | |
136 | } | |
137 | } | |
138 | ||
139 | static void | |
140 | lt_sleep_a_little_bit() | |
141 | { | |
142 | delay(100); | |
143 | } | |
144 | ||
145 | static void | |
146 | lt_grab_mutex() | |
147 | { | |
148 | lck_mtx_lock(<_mtx); | |
149 | lt_note_another_blocking_lock_holder(); | |
150 | lt_sleep_a_little_bit(); | |
151 | lt_counter++; | |
152 | lt_note_blocking_lock_release(); | |
153 | lck_mtx_unlock(<_mtx); | |
154 | } | |
155 | ||
156 | static void | |
157 | lt_grab_mutex_with_try() | |
158 | { | |
159 | while(0 == lck_mtx_try_lock(<_mtx)); | |
160 | lt_note_another_blocking_lock_holder(); | |
161 | lt_sleep_a_little_bit(); | |
162 | lt_counter++; | |
163 | lt_note_blocking_lock_release(); | |
164 | lck_mtx_unlock(<_mtx); | |
165 | ||
166 | } | |
167 | ||
168 | static void | |
169 | lt_grab_rw_exclusive() | |
170 | { | |
171 | lck_rw_lock_exclusive(<_rwlock); | |
172 | lt_note_another_blocking_lock_holder(); | |
173 | lt_sleep_a_little_bit(); | |
174 | lt_counter++; | |
175 | lt_note_blocking_lock_release(); | |
176 | lck_rw_done(<_rwlock); | |
177 | } | |
178 | ||
179 | static void | |
180 | lt_grab_rw_exclusive_with_try() | |
181 | { | |
182 | while(0 == lck_rw_try_lock_exclusive(<_rwlock)) { | |
183 | lt_sleep_a_little_bit(); | |
184 | } | |
185 | ||
186 | lt_note_another_blocking_lock_holder(); | |
187 | lt_sleep_a_little_bit(); | |
188 | lt_counter++; | |
189 | lt_note_blocking_lock_release(); | |
190 | lck_rw_done(<_rwlock); | |
191 | } | |
192 | ||
193 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
194 | static void | |
195 | lt_grab_rw_shared() | |
196 | { | |
197 | lck_rw_lock_shared(<_rwlock); | |
198 | lt_counter++; | |
199 | ||
200 | lt_note_another_blocking_lock_holder(); | |
201 | lt_sleep_a_little_bit(); | |
202 | lt_note_blocking_lock_release(); | |
203 | ||
204 | lck_rw_done(<_rwlock); | |
205 | } | |
206 | */ | |
207 | ||
208 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
209 | static void | |
210 | lt_grab_rw_shared_with_try() | |
211 | { | |
212 | while(0 == lck_rw_try_lock_shared(<_rwlock)); | |
213 | lt_counter++; | |
214 | ||
215 | lt_note_another_blocking_lock_holder(); | |
216 | lt_sleep_a_little_bit(); | |
217 | lt_note_blocking_lock_release(); | |
218 | ||
219 | lck_rw_done(<_rwlock); | |
220 | } | |
221 | */ | |
222 | ||
223 | static void | |
224 | lt_upgrade_downgrade_rw() | |
225 | { | |
226 | boolean_t upgraded, success; | |
227 | ||
228 | success = lck_rw_try_lock_shared(<_rwlock); | |
229 | if (!success) { | |
230 | lck_rw_lock_shared(<_rwlock); | |
231 | } | |
232 | ||
233 | lt_note_another_blocking_lock_holder(); | |
234 | lt_sleep_a_little_bit(); | |
235 | lt_note_blocking_lock_release(); | |
236 | ||
237 | upgraded = lck_rw_lock_shared_to_exclusive(<_rwlock); | |
238 | if (!upgraded) { | |
239 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
240 | ||
241 | if (!success) { | |
242 | lck_rw_lock_exclusive(<_rwlock); | |
243 | } | |
244 | } | |
245 | ||
246 | lt_upgrade_holders++; | |
247 | if (lt_upgrade_holders > lt_max_upgrade_holders) { | |
248 | lt_max_upgrade_holders = lt_upgrade_holders; | |
249 | } | |
250 | ||
251 | lt_counter++; | |
252 | lt_sleep_a_little_bit(); | |
253 | ||
254 | lt_upgrade_holders--; | |
255 | ||
256 | lck_rw_lock_exclusive_to_shared(<_rwlock); | |
257 | ||
258 | lt_spin_a_little_bit(); | |
259 | lck_rw_done(<_rwlock); | |
260 | } | |
261 | ||
262 | const int limit = 1000000; | |
263 | static int lt_stress_local_counters[MAX_CPUS]; | |
264 | ||
265 | static void | |
266 | lt_stress_hw_lock() | |
267 | { | |
268 | int local_counter = 0; | |
269 | ||
270 | uint cpuid = current_processor()->cpu_id; | |
271 | ||
272 | kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid); | |
273 | ||
274 | hw_lock_lock(<_hw_lock); | |
275 | lt_counter++; | |
276 | local_counter++; | |
277 | hw_lock_unlock(<_hw_lock); | |
278 | ||
279 | while (lt_counter < lt_target_done_threads) { | |
280 | ; | |
281 | } | |
282 | ||
283 | kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid); | |
284 | ||
285 | while (lt_counter < limit) { | |
286 | spl_t s = splsched(); | |
287 | hw_lock_lock(<_hw_lock); | |
288 | if (lt_counter < limit) { | |
289 | lt_counter++; | |
290 | local_counter++; | |
291 | } | |
292 | hw_lock_unlock(<_hw_lock); | |
293 | splx(s); | |
294 | } | |
295 | ||
296 | lt_stress_local_counters[cpuid] = local_counter; | |
297 | ||
298 | kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter); | |
299 | } | |
300 | ||
301 | static void | |
302 | lt_grab_hw_lock() | |
303 | { | |
304 | hw_lock_lock(<_hw_lock); | |
305 | lt_counter++; | |
306 | lt_spin_a_little_bit(); | |
307 | hw_lock_unlock(<_hw_lock); | |
308 | } | |
309 | ||
310 | static void | |
311 | lt_grab_hw_lock_with_try() | |
312 | { | |
313 | while(0 == hw_lock_try(<_hw_lock)); | |
314 | lt_counter++; | |
315 | lt_spin_a_little_bit(); | |
316 | hw_lock_unlock(<_hw_lock); | |
317 | } | |
318 | ||
319 | static void | |
320 | lt_grab_hw_lock_with_to() | |
321 | { | |
322 | while(0 == hw_lock_to(<_hw_lock, LockTimeOut)) | |
323 | mp_enable_preemption(); | |
324 | lt_counter++; | |
325 | lt_spin_a_little_bit(); | |
326 | hw_lock_unlock(<_hw_lock); | |
327 | } | |
328 | ||
329 | static void | |
330 | lt_grab_spin_lock() | |
331 | { | |
332 | lck_spin_lock(<_lck_spin_t); | |
333 | lt_counter++; | |
334 | lt_spin_a_little_bit(); | |
335 | lck_spin_unlock(<_lck_spin_t); | |
336 | } | |
337 | ||
338 | static void | |
339 | lt_grab_spin_lock_with_try() | |
340 | { | |
341 | while(0 == lck_spin_try_lock(<_lck_spin_t)); | |
342 | lt_counter++; | |
343 | lt_spin_a_little_bit(); | |
344 | lck_spin_unlock(<_lck_spin_t); | |
345 | } | |
346 | ||
347 | static volatile boolean_t lt_thread_lock_grabbed; | |
348 | static volatile boolean_t lt_thread_lock_success; | |
349 | ||
350 | static void | |
351 | lt_reset() | |
352 | { | |
353 | lt_counter = 0; | |
354 | lt_max_holders = 0; | |
355 | lt_num_holders = 0; | |
356 | lt_max_upgrade_holders = 0; | |
357 | lt_upgrade_holders = 0; | |
358 | lt_done_threads = 0; | |
359 | lt_target_done_threads = 0; | |
360 | lt_cpu_bind_id = 0; | |
361 | ||
362 | OSMemoryBarrier(); | |
363 | } | |
364 | ||
365 | static void | |
366 | lt_trylock_hw_lock_with_to() | |
367 | { | |
368 | OSMemoryBarrier(); | |
369 | while (!lt_thread_lock_grabbed) { | |
370 | lt_sleep_a_little_bit(); | |
371 | OSMemoryBarrier(); | |
372 | } | |
373 | lt_thread_lock_success = hw_lock_to(<_hw_lock, 100); | |
374 | OSMemoryBarrier(); | |
375 | mp_enable_preemption(); | |
376 | } | |
377 | ||
378 | static void | |
379 | lt_trylock_spin_try_lock() | |
380 | { | |
381 | OSMemoryBarrier(); | |
382 | while (!lt_thread_lock_grabbed) { | |
383 | lt_sleep_a_little_bit(); | |
384 | OSMemoryBarrier(); | |
385 | } | |
386 | lt_thread_lock_success = lck_spin_try_lock(<_lck_spin_t); | |
387 | OSMemoryBarrier(); | |
388 | } | |
389 | ||
390 | static void | |
391 | lt_trylock_thread(void *arg, wait_result_t wres __unused) | |
392 | { | |
393 | void (*func)(void) = (void(*)(void))arg; | |
394 | ||
395 | func(); | |
396 | ||
397 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
398 | } | |
399 | ||
400 | static void | |
401 | lt_start_trylock_thread(thread_continue_t func) | |
402 | { | |
403 | thread_t thread; | |
404 | kern_return_t kr; | |
405 | ||
406 | kr = kernel_thread_start(lt_trylock_thread, func, &thread); | |
407 | assert(kr == KERN_SUCCESS); | |
408 | ||
409 | thread_deallocate(thread); | |
410 | } | |
411 | ||
412 | static void | |
413 | lt_wait_for_lock_test_threads() | |
414 | { | |
415 | OSMemoryBarrier(); | |
416 | /* Spin to reduce dependencies */ | |
417 | while (lt_done_threads < lt_target_done_threads) { | |
418 | lt_sleep_a_little_bit(); | |
419 | OSMemoryBarrier(); | |
420 | } | |
421 | OSMemoryBarrier(); | |
422 | } | |
423 | ||
424 | static kern_return_t | |
425 | lt_test_trylocks() | |
426 | { | |
427 | boolean_t success; | |
428 | ||
429 | /* | |
430 | * First mtx try lock succeeds, second fails. | |
431 | */ | |
432 | success = lck_mtx_try_lock(<_mtx); | |
433 | T_ASSERT_NOTNULL(success, "First mtx try lock"); | |
434 | success = lck_mtx_try_lock(<_mtx); | |
435 | T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx"); | |
436 | lck_mtx_unlock(<_mtx); | |
437 | ||
438 | /* | |
439 | * After regular grab, can't try lock. | |
440 | */ | |
441 | lck_mtx_lock(<_mtx); | |
442 | success = lck_mtx_try_lock(<_mtx); | |
443 | T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock"); | |
444 | lck_mtx_unlock(<_mtx); | |
445 | ||
446 | /* | |
447 | * Two shared try locks on a previously unheld rwlock suceed, and a | |
448 | * subsequent exclusive attempt fails. | |
449 | */ | |
450 | success = lck_rw_try_lock_shared(<_rwlock); | |
451 | T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed"); | |
452 | success = lck_rw_try_lock_shared(<_rwlock); | |
453 | T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed"); | |
454 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
455 | T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail"); | |
456 | lck_rw_done(<_rwlock); | |
457 | lck_rw_done(<_rwlock); | |
458 | ||
459 | /* | |
460 | * After regular shared grab, can trylock | |
461 | * for shared but not for exclusive. | |
462 | */ | |
463 | lck_rw_lock_shared(<_rwlock); | |
464 | success = lck_rw_try_lock_shared(<_rwlock); | |
465 | T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed."); | |
466 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
467 | T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail."); | |
468 | lck_rw_done(<_rwlock); | |
469 | lck_rw_done(<_rwlock); | |
470 | ||
471 | /* | |
472 | * An exclusive try lock succeeds, subsequent shared and exclusive | |
473 | * attempts fail. | |
474 | */ | |
475 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
476 | T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed"); | |
477 | success = lck_rw_try_lock_shared(<_rwlock); | |
478 | T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail"); | |
479 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
480 | T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail"); | |
481 | lck_rw_done(<_rwlock); | |
482 | ||
483 | /* | |
484 | * After regular exclusive grab, neither kind of trylock succeeds. | |
485 | */ | |
486 | lck_rw_lock_exclusive(<_rwlock); | |
487 | success = lck_rw_try_lock_shared(<_rwlock); | |
488 | T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed"); | |
489 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
490 | T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed"); | |
491 | lck_rw_done(<_rwlock); | |
492 | ||
493 | /* | |
494 | * First spin lock attempts succeed, second attempts fail. | |
495 | */ | |
496 | success = hw_lock_try(<_hw_lock); | |
497 | T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed"); | |
498 | success = hw_lock_try(<_hw_lock); | |
499 | T_ASSERT_NULL(success, "Second attempt to spin lock should fail"); | |
500 | hw_lock_unlock(<_hw_lock); | |
501 | ||
502 | hw_lock_lock(<_hw_lock); | |
503 | success = hw_lock_try(<_hw_lock); | |
504 | T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail"); | |
505 | hw_lock_unlock(<_hw_lock); | |
506 | ||
507 | lt_reset(); | |
508 | lt_thread_lock_grabbed = false; | |
509 | lt_thread_lock_success = true; | |
510 | lt_target_done_threads = 1; | |
511 | OSMemoryBarrier(); | |
512 | lt_start_trylock_thread(lt_trylock_hw_lock_with_to); | |
513 | success = hw_lock_to(<_hw_lock, 100); | |
514 | T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed"); | |
515 | OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed); | |
516 | lt_wait_for_lock_test_threads(); | |
517 | T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout"); | |
518 | hw_lock_unlock(<_hw_lock); | |
519 | ||
520 | lt_reset(); | |
521 | lt_thread_lock_grabbed = false; | |
522 | lt_thread_lock_success = true; | |
523 | lt_target_done_threads = 1; | |
524 | OSMemoryBarrier(); | |
525 | lt_start_trylock_thread(lt_trylock_hw_lock_with_to); | |
526 | hw_lock_lock(<_hw_lock); | |
527 | OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed); | |
528 | lt_wait_for_lock_test_threads(); | |
529 | T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail"); | |
530 | hw_lock_unlock(<_hw_lock); | |
531 | ||
532 | success = lck_spin_try_lock(<_lck_spin_t); | |
533 | T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed"); | |
534 | success = lck_spin_try_lock(<_lck_spin_t); | |
535 | T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail"); | |
536 | lck_spin_unlock(<_lck_spin_t); | |
537 | ||
538 | lt_reset(); | |
539 | lt_thread_lock_grabbed = false; | |
540 | lt_thread_lock_success = true; | |
541 | lt_target_done_threads = 1; | |
542 | lt_start_trylock_thread(lt_trylock_spin_try_lock); | |
543 | lck_spin_lock(<_lck_spin_t); | |
544 | OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed); | |
545 | lt_wait_for_lock_test_threads(); | |
546 | T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail"); | |
547 | lck_spin_unlock(<_lck_spin_t); | |
548 | ||
549 | return KERN_SUCCESS; | |
550 | } | |
551 | ||
552 | static void | |
553 | lt_thread(void *arg, wait_result_t wres __unused) | |
554 | { | |
555 | void (*func)(void) = (void(*)(void)) arg; | |
556 | uint32_t i; | |
557 | ||
558 | for (i = 0; i < LOCK_TEST_ITERATIONS; i++) { | |
559 | func(); | |
560 | } | |
561 | ||
562 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
563 | } | |
564 | ||
565 | static void | |
566 | lt_bound_thread(void *arg, wait_result_t wres __unused) | |
567 | { | |
568 | void (*func)(void) = (void(*)(void)) arg; | |
569 | ||
570 | int cpuid = OSIncrementAtomic((volatile SInt32 *)<_cpu_bind_id); | |
571 | ||
572 | processor_t processor = processor_list; | |
573 | while ((processor != NULL) && (processor->cpu_id != cpuid)) { | |
574 | processor = processor->processor_list; | |
575 | } | |
576 | ||
577 | if (processor != NULL) { | |
578 | thread_bind(processor); | |
579 | } | |
580 | ||
581 | thread_block(THREAD_CONTINUE_NULL); | |
582 | ||
583 | func(); | |
584 | ||
585 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
586 | } | |
587 | ||
588 | static void | |
589 | lt_start_lock_thread(thread_continue_t func) | |
590 | { | |
591 | thread_t thread; | |
592 | kern_return_t kr; | |
593 | ||
594 | kr = kernel_thread_start(lt_thread, func, &thread); | |
595 | assert(kr == KERN_SUCCESS); | |
596 | ||
597 | thread_deallocate(thread); | |
598 | } | |
599 | ||
600 | ||
601 | static void | |
602 | lt_start_lock_thread_bound(thread_continue_t func) | |
603 | { | |
604 | thread_t thread; | |
605 | kern_return_t kr; | |
606 | ||
607 | kr = kernel_thread_start(lt_bound_thread, func, &thread); | |
608 | assert(kr == KERN_SUCCESS); | |
609 | ||
610 | thread_deallocate(thread); | |
611 | } | |
612 | ||
613 | static kern_return_t | |
614 | lt_test_locks() | |
615 | { | |
616 | kern_return_t kr = KERN_SUCCESS; | |
617 | lck_grp_attr_t *lga = lck_grp_attr_alloc_init(); | |
618 | lck_grp_t *lg = lck_grp_alloc_init("lock test", lga); | |
619 | ||
620 | lck_mtx_init(<_mtx, lg, LCK_ATTR_NULL); | |
621 | lck_rw_init(<_rwlock, lg, LCK_ATTR_NULL); | |
622 | lck_spin_init(<_lck_spin_t, lg, LCK_ATTR_NULL); | |
623 | hw_lock_init(<_hw_lock); | |
624 | ||
625 | T_LOG("Testing locks."); | |
626 | ||
627 | /* Try locks (custom) */ | |
628 | lt_reset(); | |
629 | ||
630 | T_LOG("Running try lock test."); | |
631 | kr = lt_test_trylocks(); | |
632 | T_EXPECT_NULL(kr, "try lock test failed."); | |
633 | ||
634 | /* Uncontended mutex */ | |
635 | T_LOG("Running uncontended mutex test."); | |
636 | lt_reset(); | |
637 | lt_target_done_threads = 1; | |
638 | lt_start_lock_thread(lt_grab_mutex); | |
639 | lt_wait_for_lock_test_threads(); | |
640 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
641 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
642 | ||
643 | /* Contended mutex:try locks*/ | |
644 | T_LOG("Running contended mutex test."); | |
645 | lt_reset(); | |
646 | lt_target_done_threads = 3; | |
647 | lt_start_lock_thread(lt_grab_mutex); | |
648 | lt_start_lock_thread(lt_grab_mutex); | |
649 | lt_start_lock_thread(lt_grab_mutex); | |
650 | lt_wait_for_lock_test_threads(); | |
651 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
652 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
653 | ||
654 | /* Contended mutex: try locks*/ | |
655 | T_LOG("Running contended mutex trylock test."); | |
656 | lt_reset(); | |
657 | lt_target_done_threads = 3; | |
658 | lt_start_lock_thread(lt_grab_mutex_with_try); | |
659 | lt_start_lock_thread(lt_grab_mutex_with_try); | |
660 | lt_start_lock_thread(lt_grab_mutex_with_try); | |
661 | lt_wait_for_lock_test_threads(); | |
662 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
663 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
664 | ||
665 | /* Uncontended exclusive rwlock */ | |
666 | T_LOG("Running uncontended exclusive rwlock test."); | |
667 | lt_reset(); | |
668 | lt_target_done_threads = 1; | |
669 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
670 | lt_wait_for_lock_test_threads(); | |
671 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
672 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
673 | ||
674 | /* Uncontended shared rwlock */ | |
675 | ||
676 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
677 | T_LOG("Running uncontended shared rwlock test."); | |
678 | lt_reset(); | |
679 | lt_target_done_threads = 1; | |
680 | lt_start_lock_thread(lt_grab_rw_shared); | |
681 | lt_wait_for_lock_test_threads(); | |
682 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
683 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
684 | */ | |
685 | ||
686 | /* Contended exclusive rwlock */ | |
687 | T_LOG("Running contended exclusive rwlock test."); | |
688 | lt_reset(); | |
689 | lt_target_done_threads = 3; | |
690 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
691 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
692 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
693 | lt_wait_for_lock_test_threads(); | |
694 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
695 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
696 | ||
697 | /* One shared, two exclusive */ | |
698 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
699 | T_LOG("Running test with one shared and two exclusive rw lock threads."); | |
700 | lt_reset(); | |
701 | lt_target_done_threads = 3; | |
702 | lt_start_lock_thread(lt_grab_rw_shared); | |
703 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
704 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
705 | lt_wait_for_lock_test_threads(); | |
706 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
707 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
708 | */ | |
709 | ||
710 | /* Four shared */ | |
711 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
712 | T_LOG("Running test with four shared holders."); | |
713 | lt_reset(); | |
714 | lt_target_done_threads = 4; | |
715 | lt_start_lock_thread(lt_grab_rw_shared); | |
716 | lt_start_lock_thread(lt_grab_rw_shared); | |
717 | lt_start_lock_thread(lt_grab_rw_shared); | |
718 | lt_start_lock_thread(lt_grab_rw_shared); | |
719 | lt_wait_for_lock_test_threads(); | |
720 | T_EXPECT_LE_UINT(lt_max_holders, 4, NULL); | |
721 | */ | |
722 | ||
723 | /* Three doing upgrades and downgrades */ | |
724 | T_LOG("Running test with threads upgrading and downgrading."); | |
725 | lt_reset(); | |
726 | lt_target_done_threads = 3; | |
727 | lt_start_lock_thread(lt_upgrade_downgrade_rw); | |
728 | lt_start_lock_thread(lt_upgrade_downgrade_rw); | |
729 | lt_start_lock_thread(lt_upgrade_downgrade_rw); | |
730 | lt_wait_for_lock_test_threads(); | |
731 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
732 | T_EXPECT_LE_UINT(lt_max_holders, 3, NULL); | |
733 | T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL); | |
734 | ||
735 | /* Uncontended - exclusive trylocks */ | |
736 | T_LOG("Running test with single thread doing exclusive rwlock trylocks."); | |
737 | lt_reset(); | |
738 | lt_target_done_threads = 1; | |
739 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
740 | lt_wait_for_lock_test_threads(); | |
741 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
742 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
743 | ||
744 | /* Uncontended - shared trylocks */ | |
745 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
746 | T_LOG("Running test with single thread doing shared rwlock trylocks."); | |
747 | lt_reset(); | |
748 | lt_target_done_threads = 1; | |
749 | lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
750 | lt_wait_for_lock_test_threads(); | |
751 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
752 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
753 | */ | |
754 | ||
755 | /* Three doing exclusive trylocks */ | |
756 | T_LOG("Running test with threads doing exclusive rwlock trylocks."); | |
757 | lt_reset(); | |
758 | lt_target_done_threads = 3; | |
759 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
760 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
761 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
762 | lt_wait_for_lock_test_threads(); | |
763 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
764 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
765 | ||
766 | /* Three doing shared trylocks */ | |
767 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
768 | T_LOG("Running test with threads doing shared rwlock trylocks."); | |
769 | lt_reset(); | |
770 | lt_target_done_threads = 3; | |
771 | lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
772 | lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
773 | lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
774 | lt_wait_for_lock_test_threads(); | |
775 | T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
776 | T_EXPECT_LE_UINT(lt_max_holders, 3, NULL); | |
777 | */ | |
778 | ||
779 | /* Three doing various trylocks */ | |
780 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
781 | T_LOG("Running test with threads doing mixed rwlock trylocks."); | |
782 | lt_reset(); | |
783 | lt_target_done_threads = 4; | |
784 | lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
785 | lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
786 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
787 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
788 | lt_wait_for_lock_test_threads(); | |
789 | T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
790 | T_EXPECT_LE_UINT(lt_max_holders, 2, NULL); | |
791 | */ | |
792 | ||
793 | /* HW locks */ | |
794 | T_LOG("Running test with hw_lock_lock()"); | |
795 | lt_reset(); | |
796 | lt_target_done_threads = 3; | |
797 | lt_start_lock_thread(lt_grab_hw_lock); | |
798 | lt_start_lock_thread(lt_grab_hw_lock); | |
799 | lt_start_lock_thread(lt_grab_hw_lock); | |
800 | lt_wait_for_lock_test_threads(); | |
801 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
802 | ||
803 | /* HW locks stress test */ | |
804 | T_LOG("Running HW locks stress test with hw_lock_lock()"); | |
805 | extern unsigned int real_ncpus; | |
806 | lt_reset(); | |
807 | lt_target_done_threads = real_ncpus; | |
808 | for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) { | |
809 | lt_start_lock_thread_bound(lt_stress_hw_lock); | |
810 | } | |
811 | lt_wait_for_lock_test_threads(); | |
812 | bool starvation = false; | |
813 | uint total_local_count = 0; | |
814 | for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) { | |
815 | starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10); | |
816 | total_local_count += lt_stress_local_counters[processor->cpu_id]; | |
817 | } | |
818 | if (total_local_count != lt_counter) { | |
819 | T_FAIL("Lock failure\n"); | |
820 | } else if (starvation) { | |
821 | T_FAIL("Lock starvation found\n"); | |
822 | } else { | |
823 | T_PASS("HW locks stress test with hw_lock_lock()"); | |
824 | } | |
825 | ||
826 | ||
827 | /* HW locks: trylocks */ | |
828 | T_LOG("Running test with hw_lock_try()"); | |
829 | lt_reset(); | |
830 | lt_target_done_threads = 3; | |
831 | lt_start_lock_thread(lt_grab_hw_lock_with_try); | |
832 | lt_start_lock_thread(lt_grab_hw_lock_with_try); | |
833 | lt_start_lock_thread(lt_grab_hw_lock_with_try); | |
834 | lt_wait_for_lock_test_threads(); | |
835 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
836 | ||
837 | /* HW locks: with timeout */ | |
838 | T_LOG("Running test with hw_lock_to()"); | |
839 | lt_reset(); | |
840 | lt_target_done_threads = 3; | |
841 | lt_start_lock_thread(lt_grab_hw_lock_with_to); | |
842 | lt_start_lock_thread(lt_grab_hw_lock_with_to); | |
843 | lt_start_lock_thread(lt_grab_hw_lock_with_to); | |
844 | lt_wait_for_lock_test_threads(); | |
845 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
846 | ||
847 | /* Spin locks */ | |
848 | T_LOG("Running test with lck_spin_lock()"); | |
849 | lt_reset(); | |
850 | lt_target_done_threads = 3; | |
851 | lt_start_lock_thread(lt_grab_spin_lock); | |
852 | lt_start_lock_thread(lt_grab_spin_lock); | |
853 | lt_start_lock_thread(lt_grab_spin_lock); | |
854 | lt_wait_for_lock_test_threads(); | |
855 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
856 | ||
857 | /* Spin locks: trylocks */ | |
858 | T_LOG("Running test with lck_spin_try_lock()"); | |
859 | lt_reset(); | |
860 | lt_target_done_threads = 3; | |
861 | lt_start_lock_thread(lt_grab_spin_lock_with_try); | |
862 | lt_start_lock_thread(lt_grab_spin_lock_with_try); | |
863 | lt_start_lock_thread(lt_grab_spin_lock_with_try); | |
864 | lt_wait_for_lock_test_threads(); | |
865 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
866 | ||
867 | return KERN_SUCCESS; | |
868 | } | |
869 | ||
870 | #define MT_MAX_ARGS 8 | |
871 | #define MT_INITIAL_VALUE 0xfeedbeef | |
872 | #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */ | |
873 | #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */ | |
874 | #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */ | |
875 | ||
876 | typedef void (*sy_munge_t)(void*); | |
877 | ||
878 | #define MT_FUNC(x) #x, x | |
879 | struct munger_test { | |
880 | const char *mt_name; | |
881 | sy_munge_t mt_func; | |
882 | uint32_t mt_in_words; | |
883 | uint32_t mt_nout; | |
884 | uint64_t mt_expected[MT_MAX_ARGS]; | |
885 | } munger_tests[] = { | |
886 | {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}}, | |
887 | {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}}, | |
888 | {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
889 | {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
890 | {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
891 | {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
892 | {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
893 | {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
894 | {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}}, | |
895 | {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
896 | {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
897 | {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
898 | {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
899 | {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}}, | |
900 | {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
901 | {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
902 | {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
903 | {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
904 | {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
905 | {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, | |
906 | {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
907 | {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
908 | {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
909 | {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
910 | {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, | |
911 | {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}}, | |
912 | {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
913 | {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
914 | {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
915 | {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
916 | {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}}, | |
917 | {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}}, | |
918 | {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}}, | |
919 | {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
920 | {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}}, | |
921 | {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}}, | |
922 | {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
923 | {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
924 | {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
925 | {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}} | |
926 | }; | |
927 | ||
928 | #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test)) | |
929 | ||
930 | static void | |
931 | mt_reset(uint32_t in_words, size_t total_size, uint32_t *data) | |
932 | { | |
933 | uint32_t i; | |
934 | ||
935 | for (i = 0; i < in_words; i++) { | |
936 | data[i] = MT_INITIAL_VALUE; | |
937 | } | |
938 | ||
939 | if (in_words * sizeof(uint32_t) < total_size) { | |
940 | bzero(&data[in_words], total_size - in_words * sizeof(uint32_t)); | |
941 | } | |
942 | } | |
943 | ||
944 | static void | |
945 | mt_test_mungers() | |
946 | { | |
947 | uint64_t data[MT_MAX_ARGS]; | |
948 | uint32_t i, j; | |
949 | ||
950 | for (i = 0; i < MT_TEST_COUNT; i++) { | |
951 | struct munger_test *test = &munger_tests[i]; | |
952 | int pass = 1; | |
953 | ||
954 | T_LOG("Testing %s", test->mt_name); | |
955 | ||
956 | mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data); | |
957 | test->mt_func(data); | |
958 | ||
959 | for (j = 0; j < test->mt_nout; j++) { | |
960 | if (data[j] != test->mt_expected[j]) { | |
961 | T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]); | |
962 | pass = 0; | |
963 | } | |
964 | } | |
965 | if (pass) { | |
966 | T_PASS(test->mt_name); | |
967 | } | |
968 | } | |
969 | } | |
970 | ||
971 | /* Exception Callback Test */ | |
972 | static ex_cb_action_t excb_test_action( | |
973 | ex_cb_class_t cb_class, | |
974 | void *refcon, | |
975 | const ex_cb_state_t *state | |
976 | ) | |
977 | { | |
978 | ex_cb_state_t *context = (ex_cb_state_t *)refcon; | |
979 | ||
980 | if ((NULL == refcon) || (NULL == state)) | |
981 | { | |
982 | return EXCB_ACTION_TEST_FAIL; | |
983 | } | |
984 | ||
985 | context->far = state->far; | |
986 | ||
987 | switch (cb_class) | |
988 | { | |
989 | case EXCB_CLASS_TEST1: | |
990 | return EXCB_ACTION_RERUN; | |
991 | case EXCB_CLASS_TEST2: | |
992 | return EXCB_ACTION_NONE; | |
993 | default: | |
994 | return EXCB_ACTION_TEST_FAIL; | |
995 | } | |
996 | } | |
997 | ||
998 | ||
999 | kern_return_t | |
1000 | ex_cb_test() | |
1001 | { | |
1002 | const vm_offset_t far1 = 0xdead0001; | |
1003 | const vm_offset_t far2 = 0xdead0002; | |
1004 | kern_return_t kr; | |
1005 | ex_cb_state_t test_context_1 = {0xdeadbeef}; | |
1006 | ex_cb_state_t test_context_2 = {0xdeadbeef}; | |
1007 | ex_cb_action_t action; | |
1008 | ||
1009 | T_LOG("Testing Exception Callback."); | |
1010 | ||
1011 | T_LOG("Running registration test."); | |
1012 | ||
1013 | kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1); | |
1014 | T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback"); | |
1015 | kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2); | |
1016 | T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback"); | |
1017 | ||
1018 | kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2); | |
1019 | T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback"); | |
1020 | kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1); | |
1021 | T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback"); | |
1022 | ||
1023 | T_LOG("Running invocation test."); | |
1024 | ||
1025 | action = ex_cb_invoke(EXCB_CLASS_TEST1, far1); | |
1026 | T_ASSERT(EXCB_ACTION_RERUN == action, NULL); | |
1027 | T_ASSERT(far1 == test_context_1.far, NULL); | |
1028 | ||
1029 | action = ex_cb_invoke(EXCB_CLASS_TEST2, far2); | |
1030 | T_ASSERT(EXCB_ACTION_NONE == action, NULL); | |
1031 | T_ASSERT(far2 == test_context_2.far, NULL); | |
1032 | ||
1033 | action = ex_cb_invoke(EXCB_CLASS_TEST3, 0); | |
1034 | T_ASSERT(EXCB_ACTION_NONE == action, NULL); | |
1035 | ||
1036 | return KERN_SUCCESS; | |
1037 | } | |
1038 | ||
1039 | #if __ARM_PAN_AVAILABLE__ | |
1040 | kern_return_t | |
1041 | arm64_pan_test() | |
1042 | { | |
5ba3f43e A |
1043 | vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE; |
1044 | ||
1045 | T_LOG("Testing PAN."); | |
1046 | ||
5ba3f43e A |
1047 | T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL); |
1048 | ||
cc8bc92a A |
1049 | pan_exception_level = 0; |
1050 | pan_fault_value = 0xDE; | |
5ba3f43e | 1051 | // convert priv_addr to one that is accessible from user mode |
5c9f4661 | 1052 | pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS - |
5ba3f43e A |
1053 | _COMM_PAGE_START_ADDRESS; |
1054 | ||
5c9f4661 | 1055 | // Below should trigger a PAN exception as pan_test_addr is accessible |
5ba3f43e A |
1056 | // in user mode |
1057 | // The exception handler, upon recognizing the fault address is pan_test_addr, | |
1058 | // will disable PAN and rerun this instruction successfully | |
1059 | T_ASSERT(*(char *)pan_test_addr == *(char *)priv_addr, NULL); | |
cc8bc92a A |
1060 | |
1061 | T_ASSERT(pan_exception_level == 2, NULL); | |
5ba3f43e A |
1062 | |
1063 | T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL); | |
1064 | ||
cc8bc92a A |
1065 | T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL); |
1066 | ||
1067 | pan_exception_level = 0; | |
1068 | pan_fault_value = 0xAD; | |
1069 | pan_ro_addr = (vm_offset_t) &pan_ro_value; | |
1070 | ||
1071 | // Force a permission fault while PAN is disabled to make sure PAN is | |
1072 | // re-enabled during the exception handler. | |
1073 | *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE; | |
1074 | ||
1075 | T_ASSERT(pan_exception_level == 2, NULL); | |
1076 | ||
1077 | T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL); | |
1078 | ||
1079 | T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL); | |
1080 | ||
1081 | pan_test_addr = 0; | |
1082 | pan_ro_addr = 0; | |
5ba3f43e | 1083 | |
cc8bc92a | 1084 | __builtin_arm_wsr("pan", 1); |
5ba3f43e A |
1085 | return KERN_SUCCESS; |
1086 | } | |
1087 | #endif | |
1088 | ||
1089 | ||
1090 | kern_return_t | |
1091 | arm64_lock_test() | |
1092 | { | |
1093 | return lt_test_locks(); | |
1094 | } | |
1095 | ||
1096 | kern_return_t | |
1097 | arm64_munger_test() | |
1098 | { | |
1099 | mt_test_mungers(); | |
1100 | return 0; | |
1101 | } | |
1102 |