]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-lockdebug.mm
objc4-723.tar.gz
[apple/objc4.git] / runtime / objc-lockdebug.mm
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-lock.m
26 * Error-checking locks for debugging.
27 **********************************************************************/
28
29 #include "objc-private.h"
30
31 #if LOCKDEBUG && !TARGET_OS_WIN32
32
33 #include <unordered_map>
34
35
36 /***********************************************************************
37 * Thread-local bool set during _objc_atfork_prepare().
38 * That function is allowed to break some lock ordering rules.
39 **********************************************************************/
40
41 static tls_key_t fork_prepare_tls;
42
43 void
44 lockdebug_setInForkPrepare(bool inForkPrepare)
45 {
46 INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0);
47 tls_set(fork_prepare_tls, (void*)inForkPrepare);
48 }
49
50 static bool
51 inForkPrepare()
52 {
53 INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0);
54 return (bool)tls_get(fork_prepare_tls);
55 }
56
57
58
59 /***********************************************************************
60 * Lock order graph.
61 * "lock X precedes lock Y" means that X must be acquired first.
62 * This property is transitive.
63 **********************************************************************/
64
65 struct lockorder {
66 const void *l;
67 std::vector<const lockorder *> predecessors;
68
69 mutable std::unordered_map<const lockorder *, bool> memo;
70
71 lockorder(const void *newl) : l(newl) { }
72 };
73
74 static std::unordered_map<const void*, lockorder *> lockOrderList;
75 // not mutex_t because we don't want lock debugging on this lock
76 static mutex_tt<false> lockOrderLock;
77
78 static bool
79 lockPrecedesLock(const lockorder *oldlock, const lockorder *newlock)
80 {
81 auto memoed = newlock->memo.find(oldlock);
82 if (memoed != newlock->memo.end()) {
83 return memoed->second;
84 }
85
86 bool result = false;
87 for (const auto *pre : newlock->predecessors) {
88 if (oldlock == pre || lockPrecedesLock(oldlock, pre)) {
89 result = true;
90 break;
91 }
92 }
93
94 newlock->memo[oldlock] = result;
95 return result;
96 }
97
98 static bool
99 lockPrecedesLock(const void *oldlock, const void *newlock)
100 {
101 mutex_tt<false>::locker lock(lockOrderLock);
102
103 auto oldorder = lockOrderList.find(oldlock);
104 auto neworder = lockOrderList.find(newlock);
105 if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) {
106 return false;
107 }
108 return lockPrecedesLock(oldorder->second, neworder->second);
109 }
110
111 static bool
112 lockUnorderedWithLock(const void *oldlock, const void *newlock)
113 {
114 mutex_tt<false>::locker lock(lockOrderLock);
115
116 auto oldorder = lockOrderList.find(oldlock);
117 auto neworder = lockOrderList.find(newlock);
118 if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) {
119 return true;
120 }
121
122 if (lockPrecedesLock(oldorder->second, neworder->second) ||
123 lockPrecedesLock(neworder->second, oldorder->second))
124 {
125 return false;
126 }
127
128 return true;
129 }
130
131 void lockdebug_lock_precedes_lock(const void *oldlock, const void *newlock)
132 {
133 if (lockPrecedesLock(newlock, oldlock)) {
134 _objc_fatal("contradiction in lock order declaration");
135 }
136
137 mutex_tt<false>::locker lock(lockOrderLock);
138
139 auto oldorder = lockOrderList.find(oldlock);
140 auto neworder = lockOrderList.find(newlock);
141 if (oldorder == lockOrderList.end()) {
142 lockOrderList[oldlock] = new lockorder(oldlock);
143 oldorder = lockOrderList.find(oldlock);
144 }
145 if (neworder == lockOrderList.end()) {
146 lockOrderList[newlock] = new lockorder(newlock);
147 neworder = lockOrderList.find(newlock);
148 }
149
150 neworder->second->predecessors.push_back(oldorder->second);
151 }
152
153
154 /***********************************************************************
155 * Recording - per-thread list of mutexes and monitors held
156 **********************************************************************/
157
158 enum class lockkind {
159 MUTEX = 1, MONITOR = 2, RDLOCK = 3, WRLOCK = 4, RECURSIVE = 5
160 };
161
162 #define MUTEX lockkind::MUTEX
163 #define MONITOR lockkind::MONITOR
164 #define RDLOCK lockkind::RDLOCK
165 #define WRLOCK lockkind::WRLOCK
166 #define RECURSIVE lockkind::RECURSIVE
167
168 struct lockcount {
169 lockkind k; // the kind of lock it is (MUTEX, MONITOR, etc)
170 int i; // the lock's nest count
171 };
172
173 using objc_lock_list = std::unordered_map<const void *, lockcount>;
174
175
176 // Thread-local list of locks owned by a thread.
177 // Used by lock ownership checks.
178 static tls_key_t lock_tls;
179
180 // Global list of all locks.
181 // Used by fork() safety check.
182 // This can't be a static struct because of C++ initialization order problems.
183 static objc_lock_list& AllLocks() {
184 static objc_lock_list *locks;
185 INIT_ONCE_PTR(locks, new objc_lock_list, (void)0);
186 return *locks;
187 }
188
189
190 static void
191 destroyLocks(void *value)
192 {
193 auto locks = (objc_lock_list *)value;
194 // fixme complain about any still-held locks?
195 if (locks) delete locks;
196 }
197
198 static objc_lock_list&
199 ownedLocks()
200 {
201 // Use a dedicated tls key to prevent differences vs non-debug in
202 // usage of objc's other tls keys (required for some unit tests).
203 INIT_ONCE_PTR(lock_tls, tls_create(&destroyLocks), (void)0);
204
205 auto locks = (objc_lock_list *)tls_get(lock_tls);
206 if (!locks) {
207 locks = new objc_lock_list;
208 tls_set(lock_tls, locks);
209 }
210
211 return *locks;
212 }
213
214 static bool
215 hasLock(objc_lock_list& locks, const void *lock, lockkind kind)
216 {
217 auto iter = locks.find(lock);
218 if (iter != locks.end() && iter->second.k == kind) return true;
219 return false;
220 }
221
222
223 static const char *sym(const void *lock)
224 {
225 Dl_info info;
226 int ok = dladdr(lock, &info);
227 if (ok && info.dli_sname && info.dli_sname[0]) return info.dli_sname;
228 else return "??";
229 }
230
231 static void
232 setLock(objc_lock_list& locks, const void *lock, lockkind kind)
233 {
234 // Check if we already own this lock.
235 auto iter = locks.find(lock);
236 if (iter != locks.end() && iter->second.k == kind) {
237 iter->second.i++;
238 return;
239 }
240
241 // Newly-acquired lock. Verify lock ordering.
242 // Locks not in AllLocks are exempt (i.e. @synchronize locks)
243 if (&locks != &AllLocks() && AllLocks().find(lock) != AllLocks().end()) {
244 for (auto& oldlock : locks) {
245 if (AllLocks().find(oldlock.first) == AllLocks().end()) {
246 // oldlock is exempt
247 continue;
248 }
249
250 if (lockPrecedesLock(lock, oldlock.first)) {
251 _objc_fatal("lock %p (%s) incorrectly acquired before %p (%s)",
252 oldlock.first, sym(oldlock.first), lock, sym(lock));
253 }
254 if (!inForkPrepare() &&
255 lockUnorderedWithLock(lock, oldlock.first))
256 {
257 // _objc_atfork_prepare is allowed to acquire
258 // otherwise-unordered locks, but nothing else may.
259 _objc_fatal("lock %p (%s) acquired before %p (%s) "
260 "with no defined lock order",
261 oldlock.first, sym(oldlock.first), lock, sym(lock));
262 }
263 }
264 }
265
266 locks[lock] = lockcount{kind, 1};
267 }
268
269 static void
270 clearLock(objc_lock_list& locks, const void *lock, lockkind kind)
271 {
272 auto iter = locks.find(lock);
273 if (iter != locks.end()) {
274 auto& l = iter->second;
275 if (l.k == kind) {
276 if (--l.i == 0) {
277 locks.erase(iter);
278 }
279 return;
280 }
281 }
282
283 _objc_fatal("lock not found!");
284 }
285
286
287 /***********************************************************************
288 * fork() safety checking
289 **********************************************************************/
290
291 void
292 lockdebug_remember_mutex(mutex_t *lock)
293 {
294 setLock(AllLocks(), lock, MUTEX);
295 }
296
297 void
298 lockdebug_remember_recursive_mutex(recursive_mutex_t *lock)
299 {
300 setLock(AllLocks(), lock, RECURSIVE);
301 }
302
303 void
304 lockdebug_remember_monitor(monitor_t *lock)
305 {
306 setLock(AllLocks(), lock, MONITOR);
307 }
308
309 void
310 lockdebug_remember_rwlock(rwlock_t *lock)
311 {
312 setLock(AllLocks(), lock, WRLOCK);
313 }
314
315 void
316 lockdebug_assert_all_locks_locked()
317 {
318 auto& owned = ownedLocks();
319
320 for (const auto& l : AllLocks()) {
321 if (!hasLock(owned, l.first, l.second.k)) {
322 _objc_fatal("lock %p:%d is incorrectly not owned",
323 l.first, l.second.k);
324 }
325 }
326 }
327
328 void
329 lockdebug_assert_no_locks_locked()
330 {
331 auto& owned = ownedLocks();
332
333 for (const auto& l : AllLocks()) {
334 if (hasLock(owned, l.first, l.second.k)) {
335 _objc_fatal("lock %p:%d is incorrectly owned", l.first, l.second.k);
336 }
337 }
338 }
339
340
341 /***********************************************************************
342 * Mutex checking
343 **********************************************************************/
344
345 void
346 lockdebug_mutex_lock(mutex_t *lock)
347 {
348 auto& locks = ownedLocks();
349
350 if (hasLock(locks, lock, MUTEX)) {
351 _objc_fatal("deadlock: relocking mutex");
352 }
353 setLock(locks, lock, MUTEX);
354 }
355
356 // try-lock success is the only case with lockdebug effects.
357 // try-lock when already locked is OK (will fail)
358 // try-lock failure does nothing.
359 void
360 lockdebug_mutex_try_lock_success(mutex_t *lock)
361 {
362 auto& locks = ownedLocks();
363 setLock(locks, lock, MUTEX);
364 }
365
366 void
367 lockdebug_mutex_unlock(mutex_t *lock)
368 {
369 auto& locks = ownedLocks();
370
371 if (!hasLock(locks, lock, MUTEX)) {
372 _objc_fatal("unlocking unowned mutex");
373 }
374 clearLock(locks, lock, MUTEX);
375 }
376
377
378 void
379 lockdebug_mutex_assert_locked(mutex_t *lock)
380 {
381 auto& locks = ownedLocks();
382
383 if (!hasLock(locks, lock, MUTEX)) {
384 _objc_fatal("mutex incorrectly not locked");
385 }
386 }
387
388 void
389 lockdebug_mutex_assert_unlocked(mutex_t *lock)
390 {
391 auto& locks = ownedLocks();
392
393 if (hasLock(locks, lock, MUTEX)) {
394 _objc_fatal("mutex incorrectly locked");
395 }
396 }
397
398
399 /***********************************************************************
400 * Recursive mutex checking
401 **********************************************************************/
402
403 void
404 lockdebug_recursive_mutex_lock(recursive_mutex_t *lock)
405 {
406 auto& locks = ownedLocks();
407 setLock(locks, lock, RECURSIVE);
408 }
409
410 void
411 lockdebug_recursive_mutex_unlock(recursive_mutex_t *lock)
412 {
413 auto& locks = ownedLocks();
414
415 if (!hasLock(locks, lock, RECURSIVE)) {
416 _objc_fatal("unlocking unowned recursive mutex");
417 }
418 clearLock(locks, lock, RECURSIVE);
419 }
420
421
422 void
423 lockdebug_recursive_mutex_assert_locked(recursive_mutex_t *lock)
424 {
425 auto& locks = ownedLocks();
426
427 if (!hasLock(locks, lock, RECURSIVE)) {
428 _objc_fatal("recursive mutex incorrectly not locked");
429 }
430 }
431
432 void
433 lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_t *lock)
434 {
435 auto& locks = ownedLocks();
436
437 if (hasLock(locks, lock, RECURSIVE)) {
438 _objc_fatal("recursive mutex incorrectly locked");
439 }
440 }
441
442
443 /***********************************************************************
444 * Monitor checking
445 **********************************************************************/
446
447 void
448 lockdebug_monitor_enter(monitor_t *lock)
449 {
450 auto& locks = ownedLocks();
451
452 if (hasLock(locks, lock, MONITOR)) {
453 _objc_fatal("deadlock: relocking monitor");
454 }
455 setLock(locks, lock, MONITOR);
456 }
457
458 void
459 lockdebug_monitor_leave(monitor_t *lock)
460 {
461 auto& locks = ownedLocks();
462
463 if (!hasLock(locks, lock, MONITOR)) {
464 _objc_fatal("unlocking unowned monitor");
465 }
466 clearLock(locks, lock, MONITOR);
467 }
468
469 void
470 lockdebug_monitor_wait(monitor_t *lock)
471 {
472 auto& locks = ownedLocks();
473
474 if (!hasLock(locks, lock, MONITOR)) {
475 _objc_fatal("waiting in unowned monitor");
476 }
477 }
478
479
480 void
481 lockdebug_monitor_assert_locked(monitor_t *lock)
482 {
483 auto& locks = ownedLocks();
484
485 if (!hasLock(locks, lock, MONITOR)) {
486 _objc_fatal("monitor incorrectly not locked");
487 }
488 }
489
490 void
491 lockdebug_monitor_assert_unlocked(monitor_t *lock)
492 {
493 auto& locks = ownedLocks();
494
495 if (hasLock(locks, lock, MONITOR)) {
496 _objc_fatal("monitor incorrectly held");
497 }
498 }
499
500
501 /***********************************************************************
502 * rwlock checking
503 **********************************************************************/
504
505 void
506 lockdebug_rwlock_read(rwlock_t *lock)
507 {
508 auto& locks = ownedLocks();
509
510 if (hasLock(locks, lock, RDLOCK)) {
511 // Recursive rwlock read is bad (may deadlock vs pending writer)
512 _objc_fatal("recursive rwlock read");
513 }
514 if (hasLock(locks, lock, WRLOCK)) {
515 _objc_fatal("deadlock: read after write for rwlock");
516 }
517 setLock(locks, lock, RDLOCK);
518 }
519
520 // try-read success is the only case with lockdebug effects.
521 // try-read when already reading is OK (won't deadlock)
522 // try-read when already writing is OK (will fail)
523 // try-read failure does nothing.
524 void
525 lockdebug_rwlock_try_read_success(rwlock_t *lock)
526 {
527 auto& locks = ownedLocks();
528 setLock(locks, lock, RDLOCK);
529 }
530
531 void
532 lockdebug_rwlock_unlock_read(rwlock_t *lock)
533 {
534 auto& locks = ownedLocks();
535
536 if (!hasLock(locks, lock, RDLOCK)) {
537 _objc_fatal("un-reading unowned rwlock");
538 }
539 clearLock(locks, lock, RDLOCK);
540 }
541
542
543 void
544 lockdebug_rwlock_write(rwlock_t *lock)
545 {
546 auto& locks = ownedLocks();
547
548 if (hasLock(locks, lock, RDLOCK)) {
549 // Lock promotion not allowed (may deadlock)
550 _objc_fatal("deadlock: write after read for rwlock");
551 }
552 if (hasLock(locks, lock, WRLOCK)) {
553 _objc_fatal("recursive rwlock write");
554 }
555 setLock(locks, lock, WRLOCK);
556 }
557
558 // try-write success is the only case with lockdebug effects.
559 // try-write when already reading is OK (will fail)
560 // try-write when already writing is OK (will fail)
561 // try-write failure does nothing.
562 void
563 lockdebug_rwlock_try_write_success(rwlock_t *lock)
564 {
565 auto& locks = ownedLocks();
566 setLock(locks, lock, WRLOCK);
567 }
568
569 void
570 lockdebug_rwlock_unlock_write(rwlock_t *lock)
571 {
572 auto& locks = ownedLocks();
573
574 if (!hasLock(locks, lock, WRLOCK)) {
575 _objc_fatal("un-writing unowned rwlock");
576 }
577 clearLock(locks, lock, WRLOCK);
578 }
579
580
581 void
582 lockdebug_rwlock_assert_reading(rwlock_t *lock)
583 {
584 auto& locks = ownedLocks();
585
586 if (!hasLock(locks, lock, RDLOCK)) {
587 _objc_fatal("rwlock incorrectly not reading");
588 }
589 }
590
591 void
592 lockdebug_rwlock_assert_writing(rwlock_t *lock)
593 {
594 auto& locks = ownedLocks();
595
596 if (!hasLock(locks, lock, WRLOCK)) {
597 _objc_fatal("rwlock incorrectly not writing");
598 }
599 }
600
601 void
602 lockdebug_rwlock_assert_locked(rwlock_t *lock)
603 {
604 auto& locks = ownedLocks();
605
606 if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) {
607 _objc_fatal("rwlock incorrectly neither reading nor writing");
608 }
609 }
610
611 void
612 lockdebug_rwlock_assert_unlocked(rwlock_t *lock)
613 {
614 auto& locks = ownedLocks();
615
616 if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) {
617 _objc_fatal("rwlock incorrectly not unlocked");
618 }
619 }
620
621
622 #endif