]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-lockdebug.mm
objc4-709.tar.gz
[apple/objc4.git] / runtime / objc-lockdebug.mm
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-lock.m
26 * Error-checking locks for debugging.
27 **********************************************************************/
28
29 #include "objc-private.h"
30
31 #if DEBUG && !TARGET_OS_WIN32
32
33 #include <unordered_map>
34
35
36 /***********************************************************************
37 * Thread-local bool set during _objc_atfork_prepare().
38 * That function is allowed to break some lock ordering rules.
39 **********************************************************************/
40
41 static tls_key_t fork_prepare_tls;
42
43 void
44 lockdebug_setInForkPrepare(bool inForkPrepare)
45 {
46 INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0);
47 tls_set(fork_prepare_tls, (void*)inForkPrepare);
48 }
49
50 static bool
51 inForkPrepare()
52 {
53 INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0);
54 return (bool)tls_get(fork_prepare_tls);
55 }
56
57
58
59 /***********************************************************************
60 * Lock order graph.
61 * "lock X precedes lock Y" means that X must be acquired first.
62 * This property is transitive.
63 **********************************************************************/
64
65 struct lockorder {
66 const void *l;
67 std::vector<const lockorder *> predecessors;
68 };
69
70 static std::unordered_map<const void*, lockorder> lockOrderList;
71
72 static bool
73 lockPrecedesLock(const lockorder& oldlock, const lockorder& newlock)
74 {
75 for (const auto *pre : newlock.predecessors) {
76 if (&oldlock == pre) return true;
77 if (lockPrecedesLock(oldlock, *pre)) return true;
78 }
79 return false;
80 }
81
82 static bool
83 lockPrecedesLock(const void *oldlock, const void *newlock)
84 {
85 auto oldorder = lockOrderList.find(oldlock);
86 auto neworder = lockOrderList.find(newlock);
87 if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) {
88 return false;
89 }
90 return lockPrecedesLock(oldorder->second, neworder->second);
91 }
92
93 static bool
94 lockUnorderedWithLock(const void *oldlock, const void *newlock)
95 {
96 auto oldorder = lockOrderList.find(oldlock);
97 auto neworder = lockOrderList.find(newlock);
98 if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) {
99 return true;
100 }
101
102 if (lockPrecedesLock(oldorder->second, neworder->second) ||
103 lockPrecedesLock(neworder->second, oldorder->second))
104 {
105 return false;
106 }
107
108 return true;
109 }
110
111 void lockdebug_lock_precedes_lock(const void *oldlock, const void *newlock)
112 {
113 if (lockPrecedesLock(newlock, oldlock)) {
114 _objc_fatal("contradiction in lock order declaration");
115 }
116
117 auto oldorder = lockOrderList.find(oldlock);
118 auto neworder = lockOrderList.find(newlock);
119 if (oldorder == lockOrderList.end()) {
120 lockOrderList[oldlock] = lockorder{oldlock, {}};
121 oldorder = lockOrderList.find(oldlock);
122 }
123 if (neworder == lockOrderList.end()) {
124 lockOrderList[newlock] = lockorder{newlock, {}};
125 neworder = lockOrderList.find(newlock);
126 }
127
128 neworder->second.predecessors.push_back(&oldorder->second);
129 }
130
131
132 /***********************************************************************
133 * Recording - per-thread list of mutexes and monitors held
134 **********************************************************************/
135
136 enum class lockkind {
137 MUTEX = 1, MONITOR = 2, RDLOCK = 3, WRLOCK = 4, RECURSIVE = 5
138 };
139
140 #define MUTEX lockkind::MUTEX
141 #define MONITOR lockkind::MONITOR
142 #define RDLOCK lockkind::RDLOCK
143 #define WRLOCK lockkind::WRLOCK
144 #define RECURSIVE lockkind::RECURSIVE
145
146 struct lockcount {
147 lockkind k; // the kind of lock it is (MUTEX, MONITOR, etc)
148 int i; // the lock's nest count
149 };
150
151 using objc_lock_list = std::unordered_map<const void *, lockcount>;
152
153
154 // Thread-local list of locks owned by a thread.
155 // Used by lock ownership checks.
156 static tls_key_t lock_tls;
157
158 // Global list of all locks.
159 // Used by fork() safety check.
160 // This can't be a static struct because of C++ initialization order problems.
161 static objc_lock_list& AllLocks() {
162 static objc_lock_list *locks;
163 INIT_ONCE_PTR(locks, new objc_lock_list, (void)0);
164 return *locks;
165 }
166
167
168 static void
169 destroyLocks(void *value)
170 {
171 auto locks = (objc_lock_list *)value;
172 // fixme complain about any still-held locks?
173 if (locks) delete locks;
174 }
175
176 static objc_lock_list&
177 ownedLocks()
178 {
179 // Use a dedicated tls key to prevent differences vs non-debug in
180 // usage of objc's other tls keys (required for some unit tests).
181 INIT_ONCE_PTR(lock_tls, tls_create(&destroyLocks), (void)0);
182
183 auto locks = (objc_lock_list *)tls_get(lock_tls);
184 if (!locks) {
185 locks = new objc_lock_list;
186 tls_set(lock_tls, locks);
187 }
188
189 return *locks;
190 }
191
192 static bool
193 hasLock(objc_lock_list& locks, const void *lock, lockkind kind)
194 {
195 auto iter = locks.find(lock);
196 if (iter != locks.end() && iter->second.k == kind) return true;
197 return false;
198 }
199
200
201 static const char *sym(const void *lock)
202 {
203 Dl_info info;
204 int ok = dladdr(lock, &info);
205 if (ok && info.dli_sname && info.dli_sname[0]) return info.dli_sname;
206 else return "??";
207 }
208
209 static void
210 setLock(objc_lock_list& locks, const void *lock, lockkind kind)
211 {
212 // Check if we already own this lock.
213 auto iter = locks.find(lock);
214 if (iter != locks.end() && iter->second.k == kind) {
215 iter->second.i++;
216 return;
217 }
218
219 // Newly-acquired lock. Verify lock ordering.
220 // Locks not in AllLocks are exempt (i.e. @synchronize locks)
221 if (&locks != &AllLocks() && AllLocks().find(lock) != AllLocks().end()) {
222 for (auto& oldlock : locks) {
223 if (lockPrecedesLock(lock, oldlock.first)) {
224 _objc_fatal("lock %p (%s) incorrectly acquired before %p (%s)",
225 oldlock.first, sym(oldlock.first), lock, sym(lock));
226 }
227 if (!inForkPrepare() &&
228 lockUnorderedWithLock(lock, oldlock.first))
229 {
230 // _objc_atfork_prepare is allowed to acquire
231 // otherwise-unordered locks, but nothing else may.
232 _objc_fatal("lock %p (%s) acquired before %p (%s) "
233 "with no defined lock order",
234 oldlock.first, sym(oldlock.first), lock, sym(lock));
235 }
236 }
237 }
238
239 locks[lock] = lockcount{kind, 1};
240 }
241
242 static void
243 clearLock(objc_lock_list& locks, const void *lock, lockkind kind)
244 {
245 auto iter = locks.find(lock);
246 if (iter != locks.end()) {
247 auto& l = iter->second;
248 if (l.k == kind) {
249 if (--l.i == 0) {
250 locks.erase(iter);
251 }
252 return;
253 }
254 }
255
256 _objc_fatal("lock not found!");
257 }
258
259
260 /***********************************************************************
261 * fork() safety checking
262 **********************************************************************/
263
264 void
265 lockdebug_remember_mutex(mutex_t *lock)
266 {
267 setLock(AllLocks(), lock, MUTEX);
268 }
269
270 void
271 lockdebug_remember_recursive_mutex(recursive_mutex_t *lock)
272 {
273 setLock(AllLocks(), lock, RECURSIVE);
274 }
275
276 void
277 lockdebug_remember_monitor(monitor_t *lock)
278 {
279 setLock(AllLocks(), lock, MONITOR);
280 }
281
282 void
283 lockdebug_remember_rwlock(rwlock_t *lock)
284 {
285 setLock(AllLocks(), lock, WRLOCK);
286 }
287
288 void
289 lockdebug_assert_all_locks_locked()
290 {
291 auto& owned = ownedLocks();
292
293 for (const auto& l : AllLocks()) {
294 if (!hasLock(owned, l.first, l.second.k)) {
295 _objc_fatal("lock %p:%d is incorrectly not owned",
296 l.first, l.second.k);
297 }
298 }
299 }
300
301 void
302 lockdebug_assert_no_locks_locked()
303 {
304 auto& owned = ownedLocks();
305
306 for (const auto& l : AllLocks()) {
307 if (hasLock(owned, l.first, l.second.k)) {
308 _objc_fatal("lock %p:%d is incorrectly owned", l.first, l.second.k);
309 }
310 }
311 }
312
313
314 /***********************************************************************
315 * Mutex checking
316 **********************************************************************/
317
318 void
319 lockdebug_mutex_lock(mutex_t *lock)
320 {
321 auto& locks = ownedLocks();
322
323 if (hasLock(locks, lock, MUTEX)) {
324 _objc_fatal("deadlock: relocking mutex");
325 }
326 setLock(locks, lock, MUTEX);
327 }
328
329 // try-lock success is the only case with lockdebug effects.
330 // try-lock when already locked is OK (will fail)
331 // try-lock failure does nothing.
332 void
333 lockdebug_mutex_try_lock_success(mutex_t *lock)
334 {
335 auto& locks = ownedLocks();
336 setLock(locks, lock, MUTEX);
337 }
338
339 void
340 lockdebug_mutex_unlock(mutex_t *lock)
341 {
342 auto& locks = ownedLocks();
343
344 if (!hasLock(locks, lock, MUTEX)) {
345 _objc_fatal("unlocking unowned mutex");
346 }
347 clearLock(locks, lock, MUTEX);
348 }
349
350
351 void
352 lockdebug_mutex_assert_locked(mutex_t *lock)
353 {
354 auto& locks = ownedLocks();
355
356 if (!hasLock(locks, lock, MUTEX)) {
357 _objc_fatal("mutex incorrectly not locked");
358 }
359 }
360
361 void
362 lockdebug_mutex_assert_unlocked(mutex_t *lock)
363 {
364 auto& locks = ownedLocks();
365
366 if (hasLock(locks, lock, MUTEX)) {
367 _objc_fatal("mutex incorrectly locked");
368 }
369 }
370
371
372 /***********************************************************************
373 * Recursive mutex checking
374 **********************************************************************/
375
376 void
377 lockdebug_recursive_mutex_lock(recursive_mutex_t *lock)
378 {
379 auto& locks = ownedLocks();
380 setLock(locks, lock, RECURSIVE);
381 }
382
383 void
384 lockdebug_recursive_mutex_unlock(recursive_mutex_t *lock)
385 {
386 auto& locks = ownedLocks();
387
388 if (!hasLock(locks, lock, RECURSIVE)) {
389 _objc_fatal("unlocking unowned recursive mutex");
390 }
391 clearLock(locks, lock, RECURSIVE);
392 }
393
394
395 void
396 lockdebug_recursive_mutex_assert_locked(recursive_mutex_t *lock)
397 {
398 auto& locks = ownedLocks();
399
400 if (!hasLock(locks, lock, RECURSIVE)) {
401 _objc_fatal("recursive mutex incorrectly not locked");
402 }
403 }
404
405 void
406 lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_t *lock)
407 {
408 auto& locks = ownedLocks();
409
410 if (hasLock(locks, lock, RECURSIVE)) {
411 _objc_fatal("recursive mutex incorrectly locked");
412 }
413 }
414
415
416 /***********************************************************************
417 * Monitor checking
418 **********************************************************************/
419
420 void
421 lockdebug_monitor_enter(monitor_t *lock)
422 {
423 auto& locks = ownedLocks();
424
425 if (hasLock(locks, lock, MONITOR)) {
426 _objc_fatal("deadlock: relocking monitor");
427 }
428 setLock(locks, lock, MONITOR);
429 }
430
431 void
432 lockdebug_monitor_leave(monitor_t *lock)
433 {
434 auto& locks = ownedLocks();
435
436 if (!hasLock(locks, lock, MONITOR)) {
437 _objc_fatal("unlocking unowned monitor");
438 }
439 clearLock(locks, lock, MONITOR);
440 }
441
442 void
443 lockdebug_monitor_wait(monitor_t *lock)
444 {
445 auto& locks = ownedLocks();
446
447 if (!hasLock(locks, lock, MONITOR)) {
448 _objc_fatal("waiting in unowned monitor");
449 }
450 }
451
452
453 void
454 lockdebug_monitor_assert_locked(monitor_t *lock)
455 {
456 auto& locks = ownedLocks();
457
458 if (!hasLock(locks, lock, MONITOR)) {
459 _objc_fatal("monitor incorrectly not locked");
460 }
461 }
462
463 void
464 lockdebug_monitor_assert_unlocked(monitor_t *lock)
465 {
466 auto& locks = ownedLocks();
467
468 if (hasLock(locks, lock, MONITOR)) {
469 _objc_fatal("monitor incorrectly held");
470 }
471 }
472
473
474 /***********************************************************************
475 * rwlock checking
476 **********************************************************************/
477
478 void
479 lockdebug_rwlock_read(rwlock_t *lock)
480 {
481 auto& locks = ownedLocks();
482
483 if (hasLock(locks, lock, RDLOCK)) {
484 // Recursive rwlock read is bad (may deadlock vs pending writer)
485 _objc_fatal("recursive rwlock read");
486 }
487 if (hasLock(locks, lock, WRLOCK)) {
488 _objc_fatal("deadlock: read after write for rwlock");
489 }
490 setLock(locks, lock, RDLOCK);
491 }
492
493 // try-read success is the only case with lockdebug effects.
494 // try-read when already reading is OK (won't deadlock)
495 // try-read when already writing is OK (will fail)
496 // try-read failure does nothing.
497 void
498 lockdebug_rwlock_try_read_success(rwlock_t *lock)
499 {
500 auto& locks = ownedLocks();
501 setLock(locks, lock, RDLOCK);
502 }
503
504 void
505 lockdebug_rwlock_unlock_read(rwlock_t *lock)
506 {
507 auto& locks = ownedLocks();
508
509 if (!hasLock(locks, lock, RDLOCK)) {
510 _objc_fatal("un-reading unowned rwlock");
511 }
512 clearLock(locks, lock, RDLOCK);
513 }
514
515
516 void
517 lockdebug_rwlock_write(rwlock_t *lock)
518 {
519 auto& locks = ownedLocks();
520
521 if (hasLock(locks, lock, RDLOCK)) {
522 // Lock promotion not allowed (may deadlock)
523 _objc_fatal("deadlock: write after read for rwlock");
524 }
525 if (hasLock(locks, lock, WRLOCK)) {
526 _objc_fatal("recursive rwlock write");
527 }
528 setLock(locks, lock, WRLOCK);
529 }
530
531 // try-write success is the only case with lockdebug effects.
532 // try-write when already reading is OK (will fail)
533 // try-write when already writing is OK (will fail)
534 // try-write failure does nothing.
535 void
536 lockdebug_rwlock_try_write_success(rwlock_t *lock)
537 {
538 auto& locks = ownedLocks();
539 setLock(locks, lock, WRLOCK);
540 }
541
542 void
543 lockdebug_rwlock_unlock_write(rwlock_t *lock)
544 {
545 auto& locks = ownedLocks();
546
547 if (!hasLock(locks, lock, WRLOCK)) {
548 _objc_fatal("un-writing unowned rwlock");
549 }
550 clearLock(locks, lock, WRLOCK);
551 }
552
553
554 void
555 lockdebug_rwlock_assert_reading(rwlock_t *lock)
556 {
557 auto& locks = ownedLocks();
558
559 if (!hasLock(locks, lock, RDLOCK)) {
560 _objc_fatal("rwlock incorrectly not reading");
561 }
562 }
563
564 void
565 lockdebug_rwlock_assert_writing(rwlock_t *lock)
566 {
567 auto& locks = ownedLocks();
568
569 if (!hasLock(locks, lock, WRLOCK)) {
570 _objc_fatal("rwlock incorrectly not writing");
571 }
572 }
573
574 void
575 lockdebug_rwlock_assert_locked(rwlock_t *lock)
576 {
577 auto& locks = ownedLocks();
578
579 if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) {
580 _objc_fatal("rwlock incorrectly neither reading nor writing");
581 }
582 }
583
584 void
585 lockdebug_rwlock_assert_unlocked(rwlock_t *lock)
586 {
587 auto& locks = ownedLocks();
588
589 if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) {
590 _objc_fatal("rwlock incorrectly not unlocked");
591 }
592 }
593
594
595 #endif