]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2011 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie | |
33 | * Mellon University All Rights Reserved. | |
34 | * | |
35 | * Permission to use, copy, modify and distribute this software and its | |
36 | * documentation is hereby granted, provided that both the copyright notice | |
37 | * and this permission notice appear in all copies of the software, | |
38 | * derivative works or modified versions, and any portions thereof, and that | |
39 | * both notices appear in supporting documentation. | |
40 | * | |
41 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. | |
42 | * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES | |
43 | * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
44 | * | |
45 | * Carnegie Mellon requests users of this software to return to | |
46 | * | |
47 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
48 | * School of Computer Science Carnegie Mellon University Pittsburgh PA | |
49 | * 15213-3890 | |
50 | * | |
51 | * any improvements or extensions that they make and grant Carnegie Mellon the | |
52 | * rights to redistribute these changes. | |
53 | */ | |
54 | ||
55 | #include <mach_ldebug.h> | |
56 | ||
57 | #define LOCK_PRIVATE 1 | |
58 | ||
59 | #include <kern/kalloc.h> | |
60 | #include <kern/locks.h> | |
61 | #include <kern/misc_protos.h> | |
62 | #include <kern/thread.h> | |
63 | #include <kern/processor.h> | |
64 | #include <kern/sched_prim.h> | |
65 | #include <kern/xpr.h> | |
66 | #include <kern/debug.h> | |
67 | #include <string.h> | |
68 | #include <tests/xnupost.h> | |
69 | ||
70 | #if MACH_KDB | |
71 | #include <ddb/db_command.h> | |
72 | #include <ddb/db_output.h> | |
73 | #include <ddb/db_sym.h> | |
74 | #include <ddb/db_print.h> | |
75 | #endif /* MACH_KDB */ | |
76 | ||
77 | #include <sys/kdebug.h> | |
78 | #include <sys/munge.h> | |
79 | #include <machine/cpu_capabilities.h> | |
80 | #include <arm/cpu_data_internal.h> | |
81 | ||
82 | extern boolean_t arm_pan_enabled; | |
83 | kern_return_t arm64_lock_test(void); | |
84 | kern_return_t arm64_munger_test(void); | |
85 | kern_return_t ex_cb_test(void); | |
86 | kern_return_t arm64_pan_test(void); | |
87 | ||
88 | // exception handler ignores this fault address during PAN test | |
89 | #if __ARM_PAN_AVAILABLE__ | |
90 | vm_offset_t pan_test_addr; | |
91 | #endif | |
92 | ||
93 | #include <libkern/OSAtomic.h> | |
94 | #define LOCK_TEST_ITERATIONS 50 | |
95 | static hw_lock_data_t lt_hw_lock; | |
96 | static lck_spin_t lt_lck_spin_t; | |
97 | static lck_mtx_t lt_mtx; | |
98 | static lck_rw_t lt_rwlock; | |
99 | static volatile uint32_t lt_counter = 0; | |
100 | static volatile int lt_spinvolatile; | |
101 | static volatile uint32_t lt_max_holders = 0; | |
102 | static volatile uint32_t lt_upgrade_holders = 0; | |
103 | static volatile uint32_t lt_max_upgrade_holders = 0; | |
104 | static volatile uint32_t lt_num_holders = 0; | |
105 | static volatile uint32_t lt_done_threads; | |
106 | static volatile uint32_t lt_target_done_threads; | |
107 | static volatile uint32_t lt_cpu_bind_id = 0; | |
108 | ||
109 | static void | |
110 | lt_note_another_blocking_lock_holder() | |
111 | { | |
112 | hw_lock_lock(<_hw_lock); | |
113 | lt_num_holders++; | |
114 | lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders; | |
115 | hw_lock_unlock(<_hw_lock); | |
116 | } | |
117 | ||
118 | static void | |
119 | lt_note_blocking_lock_release() | |
120 | { | |
121 | hw_lock_lock(<_hw_lock); | |
122 | lt_num_holders--; | |
123 | hw_lock_unlock(<_hw_lock); | |
124 | } | |
125 | ||
126 | static void | |
127 | lt_spin_a_little_bit() | |
128 | { | |
129 | uint32_t i; | |
130 | ||
131 | for (i = 0; i < 10000; i++) { | |
132 | lt_spinvolatile++; | |
133 | } | |
134 | } | |
135 | ||
136 | static void | |
137 | lt_sleep_a_little_bit() | |
138 | { | |
139 | delay(100); | |
140 | } | |
141 | ||
142 | static void | |
143 | lt_grab_mutex() | |
144 | { | |
145 | lck_mtx_lock(<_mtx); | |
146 | lt_note_another_blocking_lock_holder(); | |
147 | lt_sleep_a_little_bit(); | |
148 | lt_counter++; | |
149 | lt_note_blocking_lock_release(); | |
150 | lck_mtx_unlock(<_mtx); | |
151 | } | |
152 | ||
153 | static void | |
154 | lt_grab_mutex_with_try() | |
155 | { | |
156 | while(0 == lck_mtx_try_lock(<_mtx)); | |
157 | lt_note_another_blocking_lock_holder(); | |
158 | lt_sleep_a_little_bit(); | |
159 | lt_counter++; | |
160 | lt_note_blocking_lock_release(); | |
161 | lck_mtx_unlock(<_mtx); | |
162 | ||
163 | } | |
164 | ||
165 | static void | |
166 | lt_grab_rw_exclusive() | |
167 | { | |
168 | lck_rw_lock_exclusive(<_rwlock); | |
169 | lt_note_another_blocking_lock_holder(); | |
170 | lt_sleep_a_little_bit(); | |
171 | lt_counter++; | |
172 | lt_note_blocking_lock_release(); | |
173 | lck_rw_done(<_rwlock); | |
174 | } | |
175 | ||
176 | static void | |
177 | lt_grab_rw_exclusive_with_try() | |
178 | { | |
179 | while(0 == lck_rw_try_lock_exclusive(<_rwlock)) { | |
180 | lt_sleep_a_little_bit(); | |
181 | } | |
182 | ||
183 | lt_note_another_blocking_lock_holder(); | |
184 | lt_sleep_a_little_bit(); | |
185 | lt_counter++; | |
186 | lt_note_blocking_lock_release(); | |
187 | lck_rw_done(<_rwlock); | |
188 | } | |
189 | ||
190 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
191 | static void | |
192 | lt_grab_rw_shared() | |
193 | { | |
194 | lck_rw_lock_shared(<_rwlock); | |
195 | lt_counter++; | |
196 | ||
197 | lt_note_another_blocking_lock_holder(); | |
198 | lt_sleep_a_little_bit(); | |
199 | lt_note_blocking_lock_release(); | |
200 | ||
201 | lck_rw_done(<_rwlock); | |
202 | } | |
203 | */ | |
204 | ||
205 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
206 | static void | |
207 | lt_grab_rw_shared_with_try() | |
208 | { | |
209 | while(0 == lck_rw_try_lock_shared(<_rwlock)); | |
210 | lt_counter++; | |
211 | ||
212 | lt_note_another_blocking_lock_holder(); | |
213 | lt_sleep_a_little_bit(); | |
214 | lt_note_blocking_lock_release(); | |
215 | ||
216 | lck_rw_done(<_rwlock); | |
217 | } | |
218 | */ | |
219 | ||
220 | static void | |
221 | lt_upgrade_downgrade_rw() | |
222 | { | |
223 | boolean_t upgraded, success; | |
224 | ||
225 | success = lck_rw_try_lock_shared(<_rwlock); | |
226 | if (!success) { | |
227 | lck_rw_lock_shared(<_rwlock); | |
228 | } | |
229 | ||
230 | lt_note_another_blocking_lock_holder(); | |
231 | lt_sleep_a_little_bit(); | |
232 | lt_note_blocking_lock_release(); | |
233 | ||
234 | upgraded = lck_rw_lock_shared_to_exclusive(<_rwlock); | |
235 | if (!upgraded) { | |
236 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
237 | ||
238 | if (!success) { | |
239 | lck_rw_lock_exclusive(<_rwlock); | |
240 | } | |
241 | } | |
242 | ||
243 | lt_upgrade_holders++; | |
244 | if (lt_upgrade_holders > lt_max_upgrade_holders) { | |
245 | lt_max_upgrade_holders = lt_upgrade_holders; | |
246 | } | |
247 | ||
248 | lt_counter++; | |
249 | lt_sleep_a_little_bit(); | |
250 | ||
251 | lt_upgrade_holders--; | |
252 | ||
253 | lck_rw_lock_exclusive_to_shared(<_rwlock); | |
254 | ||
255 | lt_spin_a_little_bit(); | |
256 | lck_rw_done(<_rwlock); | |
257 | } | |
258 | ||
259 | const int limit = 1000000; | |
260 | static int lt_stress_local_counters[MAX_CPUS]; | |
261 | ||
262 | static void | |
263 | lt_stress_hw_lock() | |
264 | { | |
265 | int local_counter = 0; | |
266 | ||
267 | uint cpuid = current_processor()->cpu_id; | |
268 | ||
269 | kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid); | |
270 | ||
271 | hw_lock_lock(<_hw_lock); | |
272 | lt_counter++; | |
273 | local_counter++; | |
274 | hw_lock_unlock(<_hw_lock); | |
275 | ||
276 | while (lt_counter < lt_target_done_threads) { | |
277 | ; | |
278 | } | |
279 | ||
280 | kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid); | |
281 | ||
282 | while (lt_counter < limit) { | |
283 | spl_t s = splsched(); | |
284 | hw_lock_lock(<_hw_lock); | |
285 | if (lt_counter < limit) { | |
286 | lt_counter++; | |
287 | local_counter++; | |
288 | } | |
289 | hw_lock_unlock(<_hw_lock); | |
290 | splx(s); | |
291 | } | |
292 | ||
293 | lt_stress_local_counters[cpuid] = local_counter; | |
294 | ||
295 | kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter); | |
296 | } | |
297 | ||
298 | static void | |
299 | lt_grab_hw_lock() | |
300 | { | |
301 | hw_lock_lock(<_hw_lock); | |
302 | lt_counter++; | |
303 | lt_spin_a_little_bit(); | |
304 | hw_lock_unlock(<_hw_lock); | |
305 | } | |
306 | ||
307 | static void | |
308 | lt_grab_hw_lock_with_try() | |
309 | { | |
310 | while(0 == hw_lock_try(<_hw_lock)); | |
311 | lt_counter++; | |
312 | lt_spin_a_little_bit(); | |
313 | hw_lock_unlock(<_hw_lock); | |
314 | } | |
315 | ||
316 | static void | |
317 | lt_grab_hw_lock_with_to() | |
318 | { | |
319 | while(0 == hw_lock_to(<_hw_lock, LockTimeOut)) | |
320 | mp_enable_preemption(); | |
321 | lt_counter++; | |
322 | lt_spin_a_little_bit(); | |
323 | hw_lock_unlock(<_hw_lock); | |
324 | } | |
325 | ||
326 | static void | |
327 | lt_grab_spin_lock() | |
328 | { | |
329 | lck_spin_lock(<_lck_spin_t); | |
330 | lt_counter++; | |
331 | lt_spin_a_little_bit(); | |
332 | lck_spin_unlock(<_lck_spin_t); | |
333 | } | |
334 | ||
335 | static void | |
336 | lt_grab_spin_lock_with_try() | |
337 | { | |
338 | while(0 == lck_spin_try_lock(<_lck_spin_t)); | |
339 | lt_counter++; | |
340 | lt_spin_a_little_bit(); | |
341 | lck_spin_unlock(<_lck_spin_t); | |
342 | } | |
343 | ||
344 | static volatile boolean_t lt_thread_lock_grabbed; | |
345 | static volatile boolean_t lt_thread_lock_success; | |
346 | ||
347 | static void | |
348 | lt_reset() | |
349 | { | |
350 | lt_counter = 0; | |
351 | lt_max_holders = 0; | |
352 | lt_num_holders = 0; | |
353 | lt_max_upgrade_holders = 0; | |
354 | lt_upgrade_holders = 0; | |
355 | lt_done_threads = 0; | |
356 | lt_target_done_threads = 0; | |
357 | lt_cpu_bind_id = 0; | |
358 | ||
359 | OSMemoryBarrier(); | |
360 | } | |
361 | ||
362 | static void | |
363 | lt_trylock_hw_lock_with_to() | |
364 | { | |
365 | OSMemoryBarrier(); | |
366 | while (!lt_thread_lock_grabbed) { | |
367 | lt_sleep_a_little_bit(); | |
368 | OSMemoryBarrier(); | |
369 | } | |
370 | lt_thread_lock_success = hw_lock_to(<_hw_lock, 100); | |
371 | OSMemoryBarrier(); | |
372 | mp_enable_preemption(); | |
373 | } | |
374 | ||
375 | static void | |
376 | lt_trylock_spin_try_lock() | |
377 | { | |
378 | OSMemoryBarrier(); | |
379 | while (!lt_thread_lock_grabbed) { | |
380 | lt_sleep_a_little_bit(); | |
381 | OSMemoryBarrier(); | |
382 | } | |
383 | lt_thread_lock_success = lck_spin_try_lock(<_lck_spin_t); | |
384 | OSMemoryBarrier(); | |
385 | } | |
386 | ||
387 | static void | |
388 | lt_trylock_thread(void *arg, wait_result_t wres __unused) | |
389 | { | |
390 | void (*func)(void) = (void(*)(void))arg; | |
391 | ||
392 | func(); | |
393 | ||
394 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
395 | } | |
396 | ||
397 | static void | |
398 | lt_start_trylock_thread(thread_continue_t func) | |
399 | { | |
400 | thread_t thread; | |
401 | kern_return_t kr; | |
402 | ||
403 | kr = kernel_thread_start(lt_trylock_thread, func, &thread); | |
404 | assert(kr == KERN_SUCCESS); | |
405 | ||
406 | thread_deallocate(thread); | |
407 | } | |
408 | ||
409 | static void | |
410 | lt_wait_for_lock_test_threads() | |
411 | { | |
412 | OSMemoryBarrier(); | |
413 | /* Spin to reduce dependencies */ | |
414 | while (lt_done_threads < lt_target_done_threads) { | |
415 | lt_sleep_a_little_bit(); | |
416 | OSMemoryBarrier(); | |
417 | } | |
418 | OSMemoryBarrier(); | |
419 | } | |
420 | ||
421 | static kern_return_t | |
422 | lt_test_trylocks() | |
423 | { | |
424 | boolean_t success; | |
425 | ||
426 | /* | |
427 | * First mtx try lock succeeds, second fails. | |
428 | */ | |
429 | success = lck_mtx_try_lock(<_mtx); | |
430 | T_ASSERT_NOTNULL(success, "First mtx try lock"); | |
431 | success = lck_mtx_try_lock(<_mtx); | |
432 | T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx"); | |
433 | lck_mtx_unlock(<_mtx); | |
434 | ||
435 | /* | |
436 | * After regular grab, can't try lock. | |
437 | */ | |
438 | lck_mtx_lock(<_mtx); | |
439 | success = lck_mtx_try_lock(<_mtx); | |
440 | T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock"); | |
441 | lck_mtx_unlock(<_mtx); | |
442 | ||
443 | /* | |
444 | * Two shared try locks on a previously unheld rwlock suceed, and a | |
445 | * subsequent exclusive attempt fails. | |
446 | */ | |
447 | success = lck_rw_try_lock_shared(<_rwlock); | |
448 | T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed"); | |
449 | success = lck_rw_try_lock_shared(<_rwlock); | |
450 | T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed"); | |
451 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
452 | T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail"); | |
453 | lck_rw_done(<_rwlock); | |
454 | lck_rw_done(<_rwlock); | |
455 | ||
456 | /* | |
457 | * After regular shared grab, can trylock | |
458 | * for shared but not for exclusive. | |
459 | */ | |
460 | lck_rw_lock_shared(<_rwlock); | |
461 | success = lck_rw_try_lock_shared(<_rwlock); | |
462 | T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed."); | |
463 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
464 | T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail."); | |
465 | lck_rw_done(<_rwlock); | |
466 | lck_rw_done(<_rwlock); | |
467 | ||
468 | /* | |
469 | * An exclusive try lock succeeds, subsequent shared and exclusive | |
470 | * attempts fail. | |
471 | */ | |
472 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
473 | T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed"); | |
474 | success = lck_rw_try_lock_shared(<_rwlock); | |
475 | T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail"); | |
476 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
477 | T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail"); | |
478 | lck_rw_done(<_rwlock); | |
479 | ||
480 | /* | |
481 | * After regular exclusive grab, neither kind of trylock succeeds. | |
482 | */ | |
483 | lck_rw_lock_exclusive(<_rwlock); | |
484 | success = lck_rw_try_lock_shared(<_rwlock); | |
485 | T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed"); | |
486 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
487 | T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed"); | |
488 | lck_rw_done(<_rwlock); | |
489 | ||
490 | /* | |
491 | * First spin lock attempts succeed, second attempts fail. | |
492 | */ | |
493 | success = hw_lock_try(<_hw_lock); | |
494 | T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed"); | |
495 | success = hw_lock_try(<_hw_lock); | |
496 | T_ASSERT_NULL(success, "Second attempt to spin lock should fail"); | |
497 | hw_lock_unlock(<_hw_lock); | |
498 | ||
499 | hw_lock_lock(<_hw_lock); | |
500 | success = hw_lock_try(<_hw_lock); | |
501 | T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail"); | |
502 | hw_lock_unlock(<_hw_lock); | |
503 | ||
504 | lt_reset(); | |
505 | lt_thread_lock_grabbed = false; | |
506 | lt_thread_lock_success = true; | |
507 | lt_target_done_threads = 1; | |
508 | OSMemoryBarrier(); | |
509 | lt_start_trylock_thread(lt_trylock_hw_lock_with_to); | |
510 | success = hw_lock_to(<_hw_lock, 100); | |
511 | T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed"); | |
512 | OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed); | |
513 | lt_wait_for_lock_test_threads(); | |
514 | T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout"); | |
515 | hw_lock_unlock(<_hw_lock); | |
516 | ||
517 | lt_reset(); | |
518 | lt_thread_lock_grabbed = false; | |
519 | lt_thread_lock_success = true; | |
520 | lt_target_done_threads = 1; | |
521 | OSMemoryBarrier(); | |
522 | lt_start_trylock_thread(lt_trylock_hw_lock_with_to); | |
523 | hw_lock_lock(<_hw_lock); | |
524 | OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed); | |
525 | lt_wait_for_lock_test_threads(); | |
526 | T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail"); | |
527 | hw_lock_unlock(<_hw_lock); | |
528 | ||
529 | success = lck_spin_try_lock(<_lck_spin_t); | |
530 | T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed"); | |
531 | success = lck_spin_try_lock(<_lck_spin_t); | |
532 | T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail"); | |
533 | lck_spin_unlock(<_lck_spin_t); | |
534 | ||
535 | lt_reset(); | |
536 | lt_thread_lock_grabbed = false; | |
537 | lt_thread_lock_success = true; | |
538 | lt_target_done_threads = 1; | |
539 | lt_start_trylock_thread(lt_trylock_spin_try_lock); | |
540 | lck_spin_lock(<_lck_spin_t); | |
541 | OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed); | |
542 | lt_wait_for_lock_test_threads(); | |
543 | T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail"); | |
544 | lck_spin_unlock(<_lck_spin_t); | |
545 | ||
546 | return KERN_SUCCESS; | |
547 | } | |
548 | ||
549 | static void | |
550 | lt_thread(void *arg, wait_result_t wres __unused) | |
551 | { | |
552 | void (*func)(void) = (void(*)(void)) arg; | |
553 | uint32_t i; | |
554 | ||
555 | for (i = 0; i < LOCK_TEST_ITERATIONS; i++) { | |
556 | func(); | |
557 | } | |
558 | ||
559 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
560 | } | |
561 | ||
562 | static void | |
563 | lt_bound_thread(void *arg, wait_result_t wres __unused) | |
564 | { | |
565 | void (*func)(void) = (void(*)(void)) arg; | |
566 | ||
567 | int cpuid = OSIncrementAtomic((volatile SInt32 *)<_cpu_bind_id); | |
568 | ||
569 | processor_t processor = processor_list; | |
570 | while ((processor != NULL) && (processor->cpu_id != cpuid)) { | |
571 | processor = processor->processor_list; | |
572 | } | |
573 | ||
574 | if (processor != NULL) { | |
575 | thread_bind(processor); | |
576 | } | |
577 | ||
578 | thread_block(THREAD_CONTINUE_NULL); | |
579 | ||
580 | func(); | |
581 | ||
582 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
583 | } | |
584 | ||
585 | static void | |
586 | lt_start_lock_thread(thread_continue_t func) | |
587 | { | |
588 | thread_t thread; | |
589 | kern_return_t kr; | |
590 | ||
591 | kr = kernel_thread_start(lt_thread, func, &thread); | |
592 | assert(kr == KERN_SUCCESS); | |
593 | ||
594 | thread_deallocate(thread); | |
595 | } | |
596 | ||
597 | ||
598 | static void | |
599 | lt_start_lock_thread_bound(thread_continue_t func) | |
600 | { | |
601 | thread_t thread; | |
602 | kern_return_t kr; | |
603 | ||
604 | kr = kernel_thread_start(lt_bound_thread, func, &thread); | |
605 | assert(kr == KERN_SUCCESS); | |
606 | ||
607 | thread_deallocate(thread); | |
608 | } | |
609 | ||
610 | static kern_return_t | |
611 | lt_test_locks() | |
612 | { | |
613 | kern_return_t kr = KERN_SUCCESS; | |
614 | lck_grp_attr_t *lga = lck_grp_attr_alloc_init(); | |
615 | lck_grp_t *lg = lck_grp_alloc_init("lock test", lga); | |
616 | ||
617 | lck_mtx_init(<_mtx, lg, LCK_ATTR_NULL); | |
618 | lck_rw_init(<_rwlock, lg, LCK_ATTR_NULL); | |
619 | lck_spin_init(<_lck_spin_t, lg, LCK_ATTR_NULL); | |
620 | hw_lock_init(<_hw_lock); | |
621 | ||
622 | T_LOG("Testing locks."); | |
623 | ||
624 | /* Try locks (custom) */ | |
625 | lt_reset(); | |
626 | ||
627 | T_LOG("Running try lock test."); | |
628 | kr = lt_test_trylocks(); | |
629 | T_EXPECT_NULL(kr, "try lock test failed."); | |
630 | ||
631 | /* Uncontended mutex */ | |
632 | T_LOG("Running uncontended mutex test."); | |
633 | lt_reset(); | |
634 | lt_target_done_threads = 1; | |
635 | lt_start_lock_thread(lt_grab_mutex); | |
636 | lt_wait_for_lock_test_threads(); | |
637 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
638 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
639 | ||
640 | /* Contended mutex:try locks*/ | |
641 | T_LOG("Running contended mutex test."); | |
642 | lt_reset(); | |
643 | lt_target_done_threads = 3; | |
644 | lt_start_lock_thread(lt_grab_mutex); | |
645 | lt_start_lock_thread(lt_grab_mutex); | |
646 | lt_start_lock_thread(lt_grab_mutex); | |
647 | lt_wait_for_lock_test_threads(); | |
648 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
649 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
650 | ||
651 | /* Contended mutex: try locks*/ | |
652 | T_LOG("Running contended mutex trylock test."); | |
653 | lt_reset(); | |
654 | lt_target_done_threads = 3; | |
655 | lt_start_lock_thread(lt_grab_mutex_with_try); | |
656 | lt_start_lock_thread(lt_grab_mutex_with_try); | |
657 | lt_start_lock_thread(lt_grab_mutex_with_try); | |
658 | lt_wait_for_lock_test_threads(); | |
659 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
660 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
661 | ||
662 | /* Uncontended exclusive rwlock */ | |
663 | T_LOG("Running uncontended exclusive rwlock test."); | |
664 | lt_reset(); | |
665 | lt_target_done_threads = 1; | |
666 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
667 | lt_wait_for_lock_test_threads(); | |
668 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
669 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
670 | ||
671 | /* Uncontended shared rwlock */ | |
672 | ||
673 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
674 | T_LOG("Running uncontended shared rwlock test."); | |
675 | lt_reset(); | |
676 | lt_target_done_threads = 1; | |
677 | lt_start_lock_thread(lt_grab_rw_shared); | |
678 | lt_wait_for_lock_test_threads(); | |
679 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
680 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
681 | */ | |
682 | ||
683 | /* Contended exclusive rwlock */ | |
684 | T_LOG("Running contended exclusive rwlock test."); | |
685 | lt_reset(); | |
686 | lt_target_done_threads = 3; | |
687 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
688 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
689 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
690 | lt_wait_for_lock_test_threads(); | |
691 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
692 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
693 | ||
694 | /* One shared, two exclusive */ | |
695 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
696 | T_LOG("Running test with one shared and two exclusive rw lock threads."); | |
697 | lt_reset(); | |
698 | lt_target_done_threads = 3; | |
699 | lt_start_lock_thread(lt_grab_rw_shared); | |
700 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
701 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
702 | lt_wait_for_lock_test_threads(); | |
703 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
704 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
705 | */ | |
706 | ||
707 | /* Four shared */ | |
708 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
709 | T_LOG("Running test with four shared holders."); | |
710 | lt_reset(); | |
711 | lt_target_done_threads = 4; | |
712 | lt_start_lock_thread(lt_grab_rw_shared); | |
713 | lt_start_lock_thread(lt_grab_rw_shared); | |
714 | lt_start_lock_thread(lt_grab_rw_shared); | |
715 | lt_start_lock_thread(lt_grab_rw_shared); | |
716 | lt_wait_for_lock_test_threads(); | |
717 | T_EXPECT_LE_UINT(lt_max_holders, 4, NULL); | |
718 | */ | |
719 | ||
720 | /* Three doing upgrades and downgrades */ | |
721 | T_LOG("Running test with threads upgrading and downgrading."); | |
722 | lt_reset(); | |
723 | lt_target_done_threads = 3; | |
724 | lt_start_lock_thread(lt_upgrade_downgrade_rw); | |
725 | lt_start_lock_thread(lt_upgrade_downgrade_rw); | |
726 | lt_start_lock_thread(lt_upgrade_downgrade_rw); | |
727 | lt_wait_for_lock_test_threads(); | |
728 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
729 | T_EXPECT_LE_UINT(lt_max_holders, 3, NULL); | |
730 | T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL); | |
731 | ||
732 | /* Uncontended - exclusive trylocks */ | |
733 | T_LOG("Running test with single thread doing exclusive rwlock trylocks."); | |
734 | lt_reset(); | |
735 | lt_target_done_threads = 1; | |
736 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
737 | lt_wait_for_lock_test_threads(); | |
738 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
739 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
740 | ||
741 | /* Uncontended - shared trylocks */ | |
742 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
743 | T_LOG("Running test with single thread doing shared rwlock trylocks."); | |
744 | lt_reset(); | |
745 | lt_target_done_threads = 1; | |
746 | lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
747 | lt_wait_for_lock_test_threads(); | |
748 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
749 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
750 | */ | |
751 | ||
752 | /* Three doing exclusive trylocks */ | |
753 | T_LOG("Running test with threads doing exclusive rwlock trylocks."); | |
754 | lt_reset(); | |
755 | lt_target_done_threads = 3; | |
756 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
757 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
758 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
759 | lt_wait_for_lock_test_threads(); | |
760 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
761 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
762 | ||
763 | /* Three doing shared trylocks */ | |
764 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
765 | T_LOG("Running test with threads doing shared rwlock trylocks."); | |
766 | lt_reset(); | |
767 | lt_target_done_threads = 3; | |
768 | lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
769 | lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
770 | lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
771 | lt_wait_for_lock_test_threads(); | |
772 | T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
773 | T_EXPECT_LE_UINT(lt_max_holders, 3, NULL); | |
774 | */ | |
775 | ||
776 | /* Three doing various trylocks */ | |
777 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
778 | T_LOG("Running test with threads doing mixed rwlock trylocks."); | |
779 | lt_reset(); | |
780 | lt_target_done_threads = 4; | |
781 | lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
782 | lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
783 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
784 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
785 | lt_wait_for_lock_test_threads(); | |
786 | T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
787 | T_EXPECT_LE_UINT(lt_max_holders, 2, NULL); | |
788 | */ | |
789 | ||
790 | /* HW locks */ | |
791 | T_LOG("Running test with hw_lock_lock()"); | |
792 | lt_reset(); | |
793 | lt_target_done_threads = 3; | |
794 | lt_start_lock_thread(lt_grab_hw_lock); | |
795 | lt_start_lock_thread(lt_grab_hw_lock); | |
796 | lt_start_lock_thread(lt_grab_hw_lock); | |
797 | lt_wait_for_lock_test_threads(); | |
798 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
799 | ||
800 | /* HW locks stress test */ | |
801 | T_LOG("Running HW locks stress test with hw_lock_lock()"); | |
802 | extern unsigned int real_ncpus; | |
803 | lt_reset(); | |
804 | lt_target_done_threads = real_ncpus; | |
805 | for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) { | |
806 | lt_start_lock_thread_bound(lt_stress_hw_lock); | |
807 | } | |
808 | lt_wait_for_lock_test_threads(); | |
809 | bool starvation = false; | |
810 | uint total_local_count = 0; | |
811 | for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) { | |
812 | starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10); | |
813 | total_local_count += lt_stress_local_counters[processor->cpu_id]; | |
814 | } | |
815 | if (total_local_count != lt_counter) { | |
816 | T_FAIL("Lock failure\n"); | |
817 | } else if (starvation) { | |
818 | T_FAIL("Lock starvation found\n"); | |
819 | } else { | |
820 | T_PASS("HW locks stress test with hw_lock_lock()"); | |
821 | } | |
822 | ||
823 | ||
824 | /* HW locks: trylocks */ | |
825 | T_LOG("Running test with hw_lock_try()"); | |
826 | lt_reset(); | |
827 | lt_target_done_threads = 3; | |
828 | lt_start_lock_thread(lt_grab_hw_lock_with_try); | |
829 | lt_start_lock_thread(lt_grab_hw_lock_with_try); | |
830 | lt_start_lock_thread(lt_grab_hw_lock_with_try); | |
831 | lt_wait_for_lock_test_threads(); | |
832 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
833 | ||
834 | /* HW locks: with timeout */ | |
835 | T_LOG("Running test with hw_lock_to()"); | |
836 | lt_reset(); | |
837 | lt_target_done_threads = 3; | |
838 | lt_start_lock_thread(lt_grab_hw_lock_with_to); | |
839 | lt_start_lock_thread(lt_grab_hw_lock_with_to); | |
840 | lt_start_lock_thread(lt_grab_hw_lock_with_to); | |
841 | lt_wait_for_lock_test_threads(); | |
842 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
843 | ||
844 | /* Spin locks */ | |
845 | T_LOG("Running test with lck_spin_lock()"); | |
846 | lt_reset(); | |
847 | lt_target_done_threads = 3; | |
848 | lt_start_lock_thread(lt_grab_spin_lock); | |
849 | lt_start_lock_thread(lt_grab_spin_lock); | |
850 | lt_start_lock_thread(lt_grab_spin_lock); | |
851 | lt_wait_for_lock_test_threads(); | |
852 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
853 | ||
854 | /* Spin locks: trylocks */ | |
855 | T_LOG("Running test with lck_spin_try_lock()"); | |
856 | lt_reset(); | |
857 | lt_target_done_threads = 3; | |
858 | lt_start_lock_thread(lt_grab_spin_lock_with_try); | |
859 | lt_start_lock_thread(lt_grab_spin_lock_with_try); | |
860 | lt_start_lock_thread(lt_grab_spin_lock_with_try); | |
861 | lt_wait_for_lock_test_threads(); | |
862 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
863 | ||
864 | return KERN_SUCCESS; | |
865 | } | |
866 | ||
867 | #define MT_MAX_ARGS 8 | |
868 | #define MT_INITIAL_VALUE 0xfeedbeef | |
869 | #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */ | |
870 | #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */ | |
871 | #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */ | |
872 | ||
873 | typedef void (*sy_munge_t)(void*); | |
874 | ||
875 | #define MT_FUNC(x) #x, x | |
876 | struct munger_test { | |
877 | const char *mt_name; | |
878 | sy_munge_t mt_func; | |
879 | uint32_t mt_in_words; | |
880 | uint32_t mt_nout; | |
881 | uint64_t mt_expected[MT_MAX_ARGS]; | |
882 | } munger_tests[] = { | |
883 | {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}}, | |
884 | {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}}, | |
885 | {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
886 | {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
887 | {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
888 | {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
889 | {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
890 | {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
891 | {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}}, | |
892 | {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
893 | {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
894 | {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
895 | {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
896 | {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}}, | |
897 | {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
898 | {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
899 | {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
900 | {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
901 | {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
902 | {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, | |
903 | {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
904 | {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
905 | {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
906 | {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
907 | {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, | |
908 | {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}}, | |
909 | {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
910 | {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
911 | {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
912 | {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
913 | {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}}, | |
914 | {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}}, | |
915 | {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}}, | |
916 | {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
917 | {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}}, | |
918 | {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}}, | |
919 | {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
920 | {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
921 | {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
922 | {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}} | |
923 | }; | |
924 | ||
925 | #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test)) | |
926 | ||
927 | static void | |
928 | mt_reset(uint32_t in_words, size_t total_size, uint32_t *data) | |
929 | { | |
930 | uint32_t i; | |
931 | ||
932 | for (i = 0; i < in_words; i++) { | |
933 | data[i] = MT_INITIAL_VALUE; | |
934 | } | |
935 | ||
936 | if (in_words * sizeof(uint32_t) < total_size) { | |
937 | bzero(&data[in_words], total_size - in_words * sizeof(uint32_t)); | |
938 | } | |
939 | } | |
940 | ||
941 | static void | |
942 | mt_test_mungers() | |
943 | { | |
944 | uint64_t data[MT_MAX_ARGS]; | |
945 | uint32_t i, j; | |
946 | ||
947 | for (i = 0; i < MT_TEST_COUNT; i++) { | |
948 | struct munger_test *test = &munger_tests[i]; | |
949 | int pass = 1; | |
950 | ||
951 | T_LOG("Testing %s", test->mt_name); | |
952 | ||
953 | mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data); | |
954 | test->mt_func(data); | |
955 | ||
956 | for (j = 0; j < test->mt_nout; j++) { | |
957 | if (data[j] != test->mt_expected[j]) { | |
958 | T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]); | |
959 | pass = 0; | |
960 | } | |
961 | } | |
962 | if (pass) { | |
963 | T_PASS(test->mt_name); | |
964 | } | |
965 | } | |
966 | } | |
967 | ||
968 | /* Exception Callback Test */ | |
969 | static ex_cb_action_t excb_test_action( | |
970 | ex_cb_class_t cb_class, | |
971 | void *refcon, | |
972 | const ex_cb_state_t *state | |
973 | ) | |
974 | { | |
975 | ex_cb_state_t *context = (ex_cb_state_t *)refcon; | |
976 | ||
977 | if ((NULL == refcon) || (NULL == state)) | |
978 | { | |
979 | return EXCB_ACTION_TEST_FAIL; | |
980 | } | |
981 | ||
982 | context->far = state->far; | |
983 | ||
984 | switch (cb_class) | |
985 | { | |
986 | case EXCB_CLASS_TEST1: | |
987 | return EXCB_ACTION_RERUN; | |
988 | case EXCB_CLASS_TEST2: | |
989 | return EXCB_ACTION_NONE; | |
990 | default: | |
991 | return EXCB_ACTION_TEST_FAIL; | |
992 | } | |
993 | } | |
994 | ||
995 | ||
996 | kern_return_t | |
997 | ex_cb_test() | |
998 | { | |
999 | const vm_offset_t far1 = 0xdead0001; | |
1000 | const vm_offset_t far2 = 0xdead0002; | |
1001 | kern_return_t kr; | |
1002 | ex_cb_state_t test_context_1 = {0xdeadbeef}; | |
1003 | ex_cb_state_t test_context_2 = {0xdeadbeef}; | |
1004 | ex_cb_action_t action; | |
1005 | ||
1006 | T_LOG("Testing Exception Callback."); | |
1007 | ||
1008 | T_LOG("Running registration test."); | |
1009 | ||
1010 | kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1); | |
1011 | T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback"); | |
1012 | kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2); | |
1013 | T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback"); | |
1014 | ||
1015 | kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2); | |
1016 | T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback"); | |
1017 | kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1); | |
1018 | T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback"); | |
1019 | ||
1020 | T_LOG("Running invocation test."); | |
1021 | ||
1022 | action = ex_cb_invoke(EXCB_CLASS_TEST1, far1); | |
1023 | T_ASSERT(EXCB_ACTION_RERUN == action, NULL); | |
1024 | T_ASSERT(far1 == test_context_1.far, NULL); | |
1025 | ||
1026 | action = ex_cb_invoke(EXCB_CLASS_TEST2, far2); | |
1027 | T_ASSERT(EXCB_ACTION_NONE == action, NULL); | |
1028 | T_ASSERT(far2 == test_context_2.far, NULL); | |
1029 | ||
1030 | action = ex_cb_invoke(EXCB_CLASS_TEST3, 0); | |
1031 | T_ASSERT(EXCB_ACTION_NONE == action, NULL); | |
1032 | ||
1033 | return KERN_SUCCESS; | |
1034 | } | |
1035 | ||
1036 | #if __ARM_PAN_AVAILABLE__ | |
1037 | kern_return_t | |
1038 | arm64_pan_test() | |
1039 | { | |
1040 | unsigned long last_pan_config; | |
1041 | vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE; | |
1042 | ||
1043 | T_LOG("Testing PAN."); | |
1044 | ||
1045 | last_pan_config = __builtin_arm_rsr("pan"); | |
1046 | if (!last_pan_config) { | |
1047 | T_ASSERT(!arm_pan_enabled, "PAN is not enabled even though it is configured to be"); | |
1048 | __builtin_arm_wsr("pan", 1); | |
1049 | } | |
1050 | ||
1051 | T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL); | |
1052 | ||
1053 | // convert priv_addr to one that is accessible from user mode | |
1054 | pan_test_addr = priv_addr + _COMM_PAGE64_BASE_ADDRESS - | |
1055 | _COMM_PAGE_START_ADDRESS; | |
1056 | ||
1057 | // Below should trigger a PAN exception as pan_test_addr is accessible | |
1058 | // in user mode | |
1059 | // The exception handler, upon recognizing the fault address is pan_test_addr, | |
1060 | // will disable PAN and rerun this instruction successfully | |
1061 | T_ASSERT(*(char *)pan_test_addr == *(char *)priv_addr, NULL); | |
1062 | pan_test_addr = 0; | |
1063 | ||
1064 | T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL); | |
1065 | ||
1066 | // restore previous PAN config value | |
1067 | if (last_pan_config) | |
1068 | __builtin_arm_wsr("pan", 1); | |
1069 | ||
1070 | return KERN_SUCCESS; | |
1071 | } | |
1072 | #endif | |
1073 | ||
1074 | ||
1075 | kern_return_t | |
1076 | arm64_lock_test() | |
1077 | { | |
1078 | return lt_test_locks(); | |
1079 | } | |
1080 | ||
1081 | kern_return_t | |
1082 | arm64_munger_test() | |
1083 | { | |
1084 | mt_test_mungers(); | |
1085 | return 0; | |
1086 | } | |
1087 |