2  * Copyright (c) 2007 Apple Inc.  All Rights Reserved.
 
   4  * @APPLE_LICENSE_HEADER_START@
 
   6  * This file contains Original Code and/or Modifications of Original Code
 
   7  * as defined in and that are subject to the Apple Public Source License
 
   8  * Version 2.0 (the 'License'). You may not use this file except in
 
   9  * compliance with the License. Please obtain a copy of the License at
 
  10  * http://www.opensource.apple.com/apsl/ and read it before using this
 
  13  * The Original Code and all software distributed under the License are
 
  14  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 
  15  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 
  16  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 
  17  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 
  18  * Please see the License for the specific language governing rights and
 
  19  * limitations under the License.
 
  21  * @APPLE_LICENSE_HEADER_END@
 
  24 /***********************************************************************
 
  26 * Error-checking locks for debugging.
 
  27 **********************************************************************/
 
  29 #include "objc-private.h"
 
  31 #if LOCKDEBUG  &&  !TARGET_OS_WIN32
 
  33 #include <unordered_map>
 
  36 /***********************************************************************
 
  37 * Thread-local bool set during _objc_atfork_prepare().
 
  38 * That function is allowed to break some lock ordering rules.
 
  39 **********************************************************************/
 
  41 static tls_key_t fork_prepare_tls;
 
  44 lockdebug_setInForkPrepare(bool inForkPrepare)
 
  46     INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0);
 
  47     tls_set(fork_prepare_tls, (void*)inForkPrepare);
 
  53     INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0);
 
  54     return (bool)tls_get(fork_prepare_tls);
 
  59 /***********************************************************************
 
  61 * "lock X precedes lock Y" means that X must be acquired first.
 
  62 * This property is transitive.
 
  63 **********************************************************************/
 
  67     std::vector<const lockorder *> predecessors;
 
  69     mutable std::unordered_map<const lockorder *, bool> memo;
 
  71     lockorder(const void *newl) : l(newl) { }
 
  74 static std::unordered_map<const void*, lockorder *> lockOrderList;
 
  75 // not mutex_t because we don't want lock debugging on this lock
 
  76 static mutex_tt<false> lockOrderLock;
 
  79 lockPrecedesLock(const lockorder *oldlock, const lockorder *newlock)
 
  81     auto memoed = newlock->memo.find(oldlock);
 
  82     if (memoed != newlock->memo.end()) {
 
  83         return memoed->second;
 
  87     for (const auto *pre : newlock->predecessors) {
 
  88         if (oldlock == pre  ||  lockPrecedesLock(oldlock, pre)) {
 
  94     newlock->memo[oldlock] = result;
 
  99 lockPrecedesLock(const void *oldlock, const void *newlock)
 
 101     mutex_tt<false>::locker lock(lockOrderLock);
 
 103     auto oldorder = lockOrderList.find(oldlock);
 
 104     auto neworder = lockOrderList.find(newlock);
 
 105     if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) {
 
 108     return lockPrecedesLock(oldorder->second, neworder->second);
 
 112 lockUnorderedWithLock(const void *oldlock, const void *newlock)
 
 114     mutex_tt<false>::locker lock(lockOrderLock);
 
 116     auto oldorder = lockOrderList.find(oldlock);
 
 117     auto neworder = lockOrderList.find(newlock);
 
 118     if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) {
 
 122     if (lockPrecedesLock(oldorder->second, neworder->second) ||
 
 123         lockPrecedesLock(neworder->second, oldorder->second))
 
 131 void lockdebug_lock_precedes_lock(const void *oldlock, const void *newlock)
 
 133     if (lockPrecedesLock(newlock, oldlock)) {
 
 134         _objc_fatal("contradiction in lock order declaration");
 
 137     mutex_tt<false>::locker lock(lockOrderLock);
 
 139     auto oldorder = lockOrderList.find(oldlock);
 
 140     auto neworder = lockOrderList.find(newlock);
 
 141     if (oldorder == lockOrderList.end()) {
 
 142         lockOrderList[oldlock] = new lockorder(oldlock);
 
 143         oldorder = lockOrderList.find(oldlock);
 
 145     if (neworder == lockOrderList.end()) {
 
 146         lockOrderList[newlock] = new lockorder(newlock);
 
 147         neworder = lockOrderList.find(newlock);
 
 150     neworder->second->predecessors.push_back(oldorder->second);
 
 154 /***********************************************************************
 
 155 * Recording - per-thread list of mutexes and monitors held
 
 156 **********************************************************************/
 
 158 enum class lockkind {
 
 159     MUTEX = 1, MONITOR = 2, RDLOCK = 3, WRLOCK = 4, RECURSIVE = 5
 
 162 #define MUTEX     lockkind::MUTEX
 
 163 #define MONITOR   lockkind::MONITOR
 
 164 #define RDLOCK    lockkind::RDLOCK
 
 165 #define WRLOCK    lockkind::WRLOCK
 
 166 #define RECURSIVE lockkind::RECURSIVE
 
 169     lockkind k;  // the kind of lock it is (MUTEX, MONITOR, etc)
 
 170     int i;       // the lock's nest count
 
 173 using objc_lock_list = std::unordered_map<const void *, lockcount>;
 
 176 // Thread-local list of locks owned by a thread.
 
 177 // Used by lock ownership checks.
 
 178 static tls_key_t lock_tls;
 
 180 // Global list of all locks.
 
 181 // Used by fork() safety check.
 
 182 // This can't be a static struct because of C++ initialization order problems.
 
 183 static objc_lock_list& AllLocks() {
 
 184     static objc_lock_list *locks;
 
 185     INIT_ONCE_PTR(locks, new objc_lock_list, (void)0);
 
 191 destroyLocks(void *value)
 
 193     auto locks = (objc_lock_list *)value;
 
 194     // fixme complain about any still-held locks?
 
 195     if (locks) delete locks;
 
 198 static objc_lock_list&
 
 201     // Use a dedicated tls key to prevent differences vs non-debug in 
 
 202     // usage of objc's other tls keys (required for some unit tests).
 
 203     INIT_ONCE_PTR(lock_tls, tls_create(&destroyLocks), (void)0);
 
 205     auto locks = (objc_lock_list *)tls_get(lock_tls);
 
 207         locks = new objc_lock_list;
 
 208         tls_set(lock_tls, locks);
 
 215 hasLock(objc_lock_list& locks, const void *lock, lockkind kind)
 
 217     auto iter = locks.find(lock);
 
 218     if (iter != locks.end() && iter->second.k == kind) return true;
 
 223 static const char *sym(const void *lock)
 
 226     int ok = dladdr(lock, &info);
 
 227     if (ok && info.dli_sname && info.dli_sname[0]) return info.dli_sname;
 
 232 setLock(objc_lock_list& locks, const void *lock, lockkind kind)
 
 234     // Check if we already own this lock.
 
 235     auto iter = locks.find(lock);
 
 236     if (iter != locks.end() && iter->second.k == kind) {
 
 241     // Newly-acquired lock. Verify lock ordering.
 
 242     // Locks not in AllLocks are exempt (i.e. @synchronize locks)
 
 243     if (&locks != &AllLocks() && AllLocks().find(lock) != AllLocks().end()) {
 
 244         for (auto& oldlock : locks) {
 
 245             if (AllLocks().find(oldlock.first) == AllLocks().end()) {
 
 250             if (lockPrecedesLock(lock, oldlock.first)) {
 
 251                 _objc_fatal("lock %p (%s) incorrectly acquired before %p (%s)",
 
 252                             oldlock.first, sym(oldlock.first), lock, sym(lock));
 
 254             if (!inForkPrepare() &&
 
 255                 lockUnorderedWithLock(lock, oldlock.first))
 
 257                 // _objc_atfork_prepare is allowed to acquire
 
 258                 // otherwise-unordered locks, but nothing else may.
 
 259                 _objc_fatal("lock %p (%s) acquired before %p (%s) "
 
 260                             "with no defined lock order",
 
 261                             oldlock.first, sym(oldlock.first), lock, sym(lock));
 
 266     locks[lock] = lockcount{kind, 1};
 
 270 clearLock(objc_lock_list& locks, const void *lock, lockkind kind)
 
 272     auto iter = locks.find(lock);
 
 273     if (iter != locks.end()) {
 
 274         auto& l = iter->second;
 
 283     _objc_fatal("lock not found!");
 
 287 /***********************************************************************
 
 288 * fork() safety checking
 
 289 **********************************************************************/
 
 292 lockdebug_remember_mutex(mutex_t *lock)
 
 294     setLock(AllLocks(), lock, MUTEX);
 
 298 lockdebug_remember_recursive_mutex(recursive_mutex_t *lock)
 
 300     setLock(AllLocks(), lock, RECURSIVE);
 
 304 lockdebug_remember_monitor(monitor_t *lock)
 
 306     setLock(AllLocks(), lock, MONITOR);
 
 310 lockdebug_assert_all_locks_locked()
 
 312     auto& owned = ownedLocks();
 
 314     for (const auto& l : AllLocks()) {
 
 315         if (!hasLock(owned, l.first, l.second.k)) {
 
 316             _objc_fatal("lock %p:%d is incorrectly not owned",
 
 317                         l.first, l.second.k);
 
 323 lockdebug_assert_no_locks_locked()
 
 325     auto& owned = ownedLocks();
 
 327     for (const auto& l : AllLocks()) {
 
 328         if (hasLock(owned, l.first, l.second.k)) {
 
 329             _objc_fatal("lock %p:%d is incorrectly owned", l.first, l.second.k);
 
 335 /***********************************************************************
 
 337 **********************************************************************/
 
 340 lockdebug_mutex_lock(mutex_t *lock)
 
 342     auto& locks = ownedLocks();
 
 344     if (hasLock(locks, lock, MUTEX)) {
 
 345         _objc_fatal("deadlock: relocking mutex");
 
 347     setLock(locks, lock, MUTEX);
 
 350 // try-lock success is the only case with lockdebug effects.
 
 351 // try-lock when already locked is OK (will fail)
 
 352 // try-lock failure does nothing.
 
 354 lockdebug_mutex_try_lock_success(mutex_t *lock)
 
 356     auto& locks = ownedLocks();
 
 357     setLock(locks, lock, MUTEX);
 
 361 lockdebug_mutex_unlock(mutex_t *lock)
 
 363     auto& locks = ownedLocks();
 
 365     if (!hasLock(locks, lock, MUTEX)) {
 
 366         _objc_fatal("unlocking unowned mutex");
 
 368     clearLock(locks, lock, MUTEX);
 
 373 lockdebug_mutex_assert_locked(mutex_t *lock)
 
 375     auto& locks = ownedLocks();
 
 377     if (!hasLock(locks, lock, MUTEX)) {
 
 378         _objc_fatal("mutex incorrectly not locked");
 
 383 lockdebug_mutex_assert_unlocked(mutex_t *lock)
 
 385     auto& locks = ownedLocks();
 
 387     if (hasLock(locks, lock, MUTEX)) {
 
 388         _objc_fatal("mutex incorrectly locked");
 
 393 /***********************************************************************
 
 394 * Recursive mutex checking
 
 395 **********************************************************************/
 
 398 lockdebug_recursive_mutex_lock(recursive_mutex_t *lock)
 
 400     auto& locks = ownedLocks();
 
 401     setLock(locks, lock, RECURSIVE);
 
 405 lockdebug_recursive_mutex_unlock(recursive_mutex_t *lock)
 
 407     auto& locks = ownedLocks();
 
 409     if (!hasLock(locks, lock, RECURSIVE)) {
 
 410         _objc_fatal("unlocking unowned recursive mutex");
 
 412     clearLock(locks, lock, RECURSIVE);
 
 417 lockdebug_recursive_mutex_assert_locked(recursive_mutex_t *lock)
 
 419     auto& locks = ownedLocks();
 
 421     if (!hasLock(locks, lock, RECURSIVE)) {
 
 422         _objc_fatal("recursive mutex incorrectly not locked");
 
 427 lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_t *lock)
 
 429     auto& locks = ownedLocks();
 
 431     if (hasLock(locks, lock, RECURSIVE)) {
 
 432         _objc_fatal("recursive mutex incorrectly locked");
 
 437 /***********************************************************************
 
 439 **********************************************************************/
 
 442 lockdebug_monitor_enter(monitor_t *lock)
 
 444     auto& locks = ownedLocks();
 
 446     if (hasLock(locks, lock, MONITOR)) {
 
 447         _objc_fatal("deadlock: relocking monitor");
 
 449     setLock(locks, lock, MONITOR);
 
 453 lockdebug_monitor_leave(monitor_t *lock)
 
 455     auto& locks = ownedLocks();
 
 457     if (!hasLock(locks, lock, MONITOR)) {
 
 458         _objc_fatal("unlocking unowned monitor");
 
 460     clearLock(locks, lock, MONITOR);
 
 464 lockdebug_monitor_wait(monitor_t *lock)
 
 466     auto& locks = ownedLocks();
 
 468     if (!hasLock(locks, lock, MONITOR)) {
 
 469         _objc_fatal("waiting in unowned monitor");
 
 475 lockdebug_monitor_assert_locked(monitor_t *lock)
 
 477     auto& locks = ownedLocks();
 
 479     if (!hasLock(locks, lock, MONITOR)) {
 
 480         _objc_fatal("monitor incorrectly not locked");
 
 485 lockdebug_monitor_assert_unlocked(monitor_t *lock)
 
 487     auto& locks = ownedLocks();
 
 489     if (hasLock(locks, lock, MONITOR)) {
 
 490         _objc_fatal("monitor incorrectly held");