2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /***********************************************************************
26 * Error-checking locks for debugging.
27 **********************************************************************/
29 #include "objc-private.h"
31 #if LOCKDEBUG && !TARGET_OS_WIN32
33 #include <unordered_map>
36 /***********************************************************************
37 * Thread-local bool set during _objc_atfork_prepare().
38 * That function is allowed to break some lock ordering rules.
39 **********************************************************************/
41 static tls_key_t fork_prepare_tls;
44 lockdebug_setInForkPrepare(bool inForkPrepare)
46 INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0);
47 tls_set(fork_prepare_tls, (void*)inForkPrepare);
53 INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0);
54 return (bool)tls_get(fork_prepare_tls);
59 /***********************************************************************
61 * "lock X precedes lock Y" means that X must be acquired first.
62 * This property is transitive.
63 **********************************************************************/
67 std::vector<const lockorder *> predecessors;
69 mutable std::unordered_map<const lockorder *, bool> memo;
71 lockorder(const void *newl) : l(newl) { }
74 static std::unordered_map<const void*, lockorder *> lockOrderList;
75 // not mutex_t because we don't want lock debugging on this lock
76 static mutex_tt<false> lockOrderLock;
79 lockPrecedesLock(const lockorder *oldlock, const lockorder *newlock)
81 auto memoed = newlock->memo.find(oldlock);
82 if (memoed != newlock->memo.end()) {
83 return memoed->second;
87 for (const auto *pre : newlock->predecessors) {
88 if (oldlock == pre || lockPrecedesLock(oldlock, pre)) {
94 newlock->memo[oldlock] = result;
99 lockPrecedesLock(const void *oldlock, const void *newlock)
101 mutex_tt<false>::locker lock(lockOrderLock);
103 auto oldorder = lockOrderList.find(oldlock);
104 auto neworder = lockOrderList.find(newlock);
105 if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) {
108 return lockPrecedesLock(oldorder->second, neworder->second);
112 lockUnorderedWithLock(const void *oldlock, const void *newlock)
114 mutex_tt<false>::locker lock(lockOrderLock);
116 auto oldorder = lockOrderList.find(oldlock);
117 auto neworder = lockOrderList.find(newlock);
118 if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) {
122 if (lockPrecedesLock(oldorder->second, neworder->second) ||
123 lockPrecedesLock(neworder->second, oldorder->second))
131 void lockdebug_lock_precedes_lock(const void *oldlock, const void *newlock)
133 if (lockPrecedesLock(newlock, oldlock)) {
134 _objc_fatal("contradiction in lock order declaration");
137 mutex_tt<false>::locker lock(lockOrderLock);
139 auto oldorder = lockOrderList.find(oldlock);
140 auto neworder = lockOrderList.find(newlock);
141 if (oldorder == lockOrderList.end()) {
142 lockOrderList[oldlock] = new lockorder(oldlock);
143 oldorder = lockOrderList.find(oldlock);
145 if (neworder == lockOrderList.end()) {
146 lockOrderList[newlock] = new lockorder(newlock);
147 neworder = lockOrderList.find(newlock);
150 neworder->second->predecessors.push_back(oldorder->second);
154 /***********************************************************************
155 * Recording - per-thread list of mutexes and monitors held
156 **********************************************************************/
158 enum class lockkind {
159 MUTEX = 1, MONITOR = 2, RDLOCK = 3, WRLOCK = 4, RECURSIVE = 5
162 #define MUTEX lockkind::MUTEX
163 #define MONITOR lockkind::MONITOR
164 #define RDLOCK lockkind::RDLOCK
165 #define WRLOCK lockkind::WRLOCK
166 #define RECURSIVE lockkind::RECURSIVE
169 lockkind k; // the kind of lock it is (MUTEX, MONITOR, etc)
170 int i; // the lock's nest count
173 using objc_lock_list = std::unordered_map<const void *, lockcount>;
176 // Thread-local list of locks owned by a thread.
177 // Used by lock ownership checks.
178 static tls_key_t lock_tls;
180 // Global list of all locks.
181 // Used by fork() safety check.
182 // This can't be a static struct because of C++ initialization order problems.
183 static objc_lock_list& AllLocks() {
184 static objc_lock_list *locks;
185 INIT_ONCE_PTR(locks, new objc_lock_list, (void)0);
191 destroyLocks(void *value)
193 auto locks = (objc_lock_list *)value;
194 // fixme complain about any still-held locks?
195 if (locks) delete locks;
198 static objc_lock_list&
201 // Use a dedicated tls key to prevent differences vs non-debug in
202 // usage of objc's other tls keys (required for some unit tests).
203 INIT_ONCE_PTR(lock_tls, tls_create(&destroyLocks), (void)0);
205 auto locks = (objc_lock_list *)tls_get(lock_tls);
207 locks = new objc_lock_list;
208 tls_set(lock_tls, locks);
215 hasLock(objc_lock_list& locks, const void *lock, lockkind kind)
217 auto iter = locks.find(lock);
218 if (iter != locks.end() && iter->second.k == kind) return true;
223 static const char *sym(const void *lock)
226 int ok = dladdr(lock, &info);
227 if (ok && info.dli_sname && info.dli_sname[0]) return info.dli_sname;
232 setLock(objc_lock_list& locks, const void *lock, lockkind kind)
234 // Check if we already own this lock.
235 auto iter = locks.find(lock);
236 if (iter != locks.end() && iter->second.k == kind) {
241 // Newly-acquired lock. Verify lock ordering.
242 // Locks not in AllLocks are exempt (i.e. @synchronize locks)
243 if (&locks != &AllLocks() && AllLocks().find(lock) != AllLocks().end()) {
244 for (auto& oldlock : locks) {
245 if (AllLocks().find(oldlock.first) == AllLocks().end()) {
250 if (lockPrecedesLock(lock, oldlock.first)) {
251 _objc_fatal("lock %p (%s) incorrectly acquired before %p (%s)",
252 oldlock.first, sym(oldlock.first), lock, sym(lock));
254 if (!inForkPrepare() &&
255 lockUnorderedWithLock(lock, oldlock.first))
257 // _objc_atfork_prepare is allowed to acquire
258 // otherwise-unordered locks, but nothing else may.
259 _objc_fatal("lock %p (%s) acquired before %p (%s) "
260 "with no defined lock order",
261 oldlock.first, sym(oldlock.first), lock, sym(lock));
266 locks[lock] = lockcount{kind, 1};
270 clearLock(objc_lock_list& locks, const void *lock, lockkind kind)
272 auto iter = locks.find(lock);
273 if (iter != locks.end()) {
274 auto& l = iter->second;
283 _objc_fatal("lock not found!");
287 /***********************************************************************
288 * fork() safety checking
289 **********************************************************************/
292 lockdebug_remember_mutex(mutex_t *lock)
294 setLock(AllLocks(), lock, MUTEX);
298 lockdebug_remember_recursive_mutex(recursive_mutex_t *lock)
300 setLock(AllLocks(), lock, RECURSIVE);
304 lockdebug_remember_monitor(monitor_t *lock)
306 setLock(AllLocks(), lock, MONITOR);
310 lockdebug_assert_all_locks_locked()
312 auto& owned = ownedLocks();
314 for (const auto& l : AllLocks()) {
315 if (!hasLock(owned, l.first, l.second.k)) {
316 _objc_fatal("lock %p:%d is incorrectly not owned",
317 l.first, l.second.k);
323 lockdebug_assert_no_locks_locked()
325 lockdebug_assert_no_locks_locked_except({});
328 void lockdebug_assert_no_locks_locked_except(std::initializer_list<void *> canBeLocked)
330 auto& owned = ownedLocks();
332 for (const auto& l : AllLocks()) {
333 if (std::find(canBeLocked.begin(), canBeLocked.end(), l.first) != canBeLocked.end())
336 if (hasLock(owned, l.first, l.second.k)) {
337 _objc_fatal("lock %p:%d is incorrectly owned", l.first, l.second.k);
343 /***********************************************************************
345 **********************************************************************/
348 lockdebug_mutex_lock(mutex_t *lock)
350 auto& locks = ownedLocks();
352 if (hasLock(locks, lock, MUTEX)) {
353 _objc_fatal("deadlock: relocking mutex");
355 setLock(locks, lock, MUTEX);
358 // try-lock success is the only case with lockdebug effects.
359 // try-lock when already locked is OK (will fail)
360 // try-lock failure does nothing.
362 lockdebug_mutex_try_lock_success(mutex_t *lock)
364 auto& locks = ownedLocks();
365 setLock(locks, lock, MUTEX);
369 lockdebug_mutex_unlock(mutex_t *lock)
371 auto& locks = ownedLocks();
373 if (!hasLock(locks, lock, MUTEX)) {
374 _objc_fatal("unlocking unowned mutex");
376 clearLock(locks, lock, MUTEX);
381 lockdebug_mutex_assert_locked(mutex_t *lock)
383 auto& locks = ownedLocks();
385 if (!hasLock(locks, lock, MUTEX)) {
386 _objc_fatal("mutex incorrectly not locked");
391 lockdebug_mutex_assert_unlocked(mutex_t *lock)
393 auto& locks = ownedLocks();
395 if (hasLock(locks, lock, MUTEX)) {
396 _objc_fatal("mutex incorrectly locked");
401 /***********************************************************************
402 * Recursive mutex checking
403 **********************************************************************/
406 lockdebug_recursive_mutex_lock(recursive_mutex_t *lock)
408 auto& locks = ownedLocks();
409 setLock(locks, lock, RECURSIVE);
413 lockdebug_recursive_mutex_unlock(recursive_mutex_t *lock)
415 auto& locks = ownedLocks();
417 if (!hasLock(locks, lock, RECURSIVE)) {
418 _objc_fatal("unlocking unowned recursive mutex");
420 clearLock(locks, lock, RECURSIVE);
425 lockdebug_recursive_mutex_assert_locked(recursive_mutex_t *lock)
427 auto& locks = ownedLocks();
429 if (!hasLock(locks, lock, RECURSIVE)) {
430 _objc_fatal("recursive mutex incorrectly not locked");
435 lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_t *lock)
437 auto& locks = ownedLocks();
439 if (hasLock(locks, lock, RECURSIVE)) {
440 _objc_fatal("recursive mutex incorrectly locked");
445 /***********************************************************************
447 **********************************************************************/
450 lockdebug_monitor_enter(monitor_t *lock)
452 auto& locks = ownedLocks();
454 if (hasLock(locks, lock, MONITOR)) {
455 _objc_fatal("deadlock: relocking monitor");
457 setLock(locks, lock, MONITOR);
461 lockdebug_monitor_leave(monitor_t *lock)
463 auto& locks = ownedLocks();
465 if (!hasLock(locks, lock, MONITOR)) {
466 _objc_fatal("unlocking unowned monitor");
468 clearLock(locks, lock, MONITOR);
472 lockdebug_monitor_wait(monitor_t *lock)
474 auto& locks = ownedLocks();
476 if (!hasLock(locks, lock, MONITOR)) {
477 _objc_fatal("waiting in unowned monitor");
483 lockdebug_monitor_assert_locked(monitor_t *lock)
485 auto& locks = ownedLocks();
487 if (!hasLock(locks, lock, MONITOR)) {
488 _objc_fatal("monitor incorrectly not locked");
493 lockdebug_monitor_assert_unlocked(monitor_t *lock)
495 auto& locks = ownedLocks();
497 if (hasLock(locks, lock, MONITOR)) {
498 _objc_fatal("monitor incorrectly held");