2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /***********************************************************************
26 * Error-checking locks for debugging.
27 **********************************************************************/
29 #include "objc-private.h"
31 #if DEBUG && !TARGET_OS_WIN32
33 /***********************************************************************
34 * Recording - per-thread list of mutexes and monitors held
35 **********************************************************************/
38 void *l; // the lock itself
39 int k; // the kind of lock it is (MUTEX, MONITOR, etc)
40 int i; // the lock's nest count
49 typedef struct _objc_lock_list {
55 static tls_key_t lock_tls;
58 destroyLocks(void *value)
60 _objc_lock_list *locks = (_objc_lock_list *)value;
61 // fixme complain about any still-held locks?
62 if (locks) free(locks);
65 static struct _objc_lock_list *
68 _objc_lock_list *locks;
70 // Use a dedicated tls key to prevent differences vs non-debug in
71 // usage of objc's other tls keys (required for some unit tests).
72 INIT_ONCE_PTR(lock_tls, tls_create(&destroyLocks), (void)0);
74 locks = (_objc_lock_list *)tls_get(lock_tls);
79 locks = (_objc_lock_list *)calloc(1, sizeof(_objc_lock_list) + sizeof(lockcount) * 16);
80 locks->allocated = 16;
82 tls_set(lock_tls, locks);
86 if (locks->allocated == locks->used) {
90 _objc_lock_list *oldlocks = locks;
91 locks = (_objc_lock_list *)calloc(1, sizeof(_objc_lock_list) + 2 * oldlocks->used * sizeof(lockcount));
92 locks->used = oldlocks->used;
93 locks->allocated = oldlocks->used * 2;
94 memcpy(locks->list, oldlocks->list, locks->used * sizeof(lockcount));
95 tls_set(lock_tls, locks);
104 hasLock(_objc_lock_list *locks, void *lock, int kind)
107 if (!locks) return NO;
109 for (i = 0; i < locks->used; i++) {
110 if (locks->list[i].l == lock && locks->list[i].k == kind) return YES;
117 setLock(_objc_lock_list *locks, void *lock, int kind)
120 for (i = 0; i < locks->used; i++) {
121 if (locks->list[i].l == lock && locks->list[i].k == kind) {
127 locks->list[locks->used].l = lock;
128 locks->list[locks->used].i = 1;
129 locks->list[locks->used].k = kind;
134 clearLock(_objc_lock_list *locks, void *lock, int kind)
137 for (i = 0; i < locks->used; i++) {
138 if (locks->list[i].l == lock && locks->list[i].k == kind) {
139 if (--locks->list[i].i == 0) {
140 locks->list[i].l = NULL;
141 locks->list[i] = locks->list[--locks->used];
147 _objc_fatal("lock not found!");
151 /***********************************************************************
153 **********************************************************************/
156 lockdebug_mutex_lock(mutex_t *lock)
158 _objc_lock_list *locks = getLocks(YES);
160 if (hasLock(locks, lock, MUTEX)) {
161 _objc_fatal("deadlock: relocking mutex");
163 setLock(locks, lock, MUTEX);
166 // try-lock success is the only case with lockdebug effects.
167 // try-lock when already locked is OK (will fail)
168 // try-lock failure does nothing.
170 lockdebug_mutex_try_lock_success(mutex_t *lock)
172 _objc_lock_list *locks = getLocks(YES);
173 setLock(locks, lock, MUTEX);
177 lockdebug_mutex_unlock(mutex_t *lock)
179 _objc_lock_list *locks = getLocks(NO);
181 if (!hasLock(locks, lock, MUTEX)) {
182 _objc_fatal("unlocking unowned mutex");
184 clearLock(locks, lock, MUTEX);
189 lockdebug_mutex_assert_locked(mutex_t *lock)
191 _objc_lock_list *locks = getLocks(NO);
193 if (!hasLock(locks, lock, MUTEX)) {
194 _objc_fatal("mutex incorrectly not locked");
199 lockdebug_mutex_assert_unlocked(mutex_t *lock)
201 _objc_lock_list *locks = getLocks(NO);
203 if (hasLock(locks, lock, MUTEX)) {
204 _objc_fatal("mutex incorrectly locked");
209 /***********************************************************************
210 * Recursive mutex checking
211 **********************************************************************/
214 lockdebug_recursive_mutex_lock(recursive_mutex_tt<true> *lock)
216 _objc_lock_list *locks = getLocks(YES);
217 setLock(locks, lock, RECURSIVE);
221 lockdebug_recursive_mutex_unlock(recursive_mutex_tt<true> *lock)
223 _objc_lock_list *locks = getLocks(NO);
225 if (!hasLock(locks, lock, RECURSIVE)) {
226 _objc_fatal("unlocking unowned recursive mutex");
228 clearLock(locks, lock, RECURSIVE);
233 lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt<true> *lock)
235 _objc_lock_list *locks = getLocks(NO);
237 if (!hasLock(locks, lock, RECURSIVE)) {
238 _objc_fatal("recursive mutex incorrectly not locked");
243 lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt<true> *lock)
245 _objc_lock_list *locks = getLocks(NO);
247 if (hasLock(locks, lock, RECURSIVE)) {
248 _objc_fatal("recursive mutex incorrectly locked");
253 /***********************************************************************
255 **********************************************************************/
258 lockdebug_monitor_enter(monitor_t *lock)
260 _objc_lock_list *locks = getLocks(YES);
262 if (hasLock(locks, lock, MONITOR)) {
263 _objc_fatal("deadlock: relocking monitor");
265 setLock(locks, lock, MONITOR);
269 lockdebug_monitor_leave(monitor_t *lock)
271 _objc_lock_list *locks = getLocks(NO);
273 if (!hasLock(locks, lock, MONITOR)) {
274 _objc_fatal("unlocking unowned monitor");
276 clearLock(locks, lock, MONITOR);
280 lockdebug_monitor_wait(monitor_t *lock)
282 _objc_lock_list *locks = getLocks(NO);
284 if (!hasLock(locks, lock, MONITOR)) {
285 _objc_fatal("waiting in unowned monitor");
291 lockdebug_monitor_assert_locked(monitor_t *lock)
293 _objc_lock_list *locks = getLocks(NO);
295 if (!hasLock(locks, lock, MONITOR)) {
296 _objc_fatal("monitor incorrectly not locked");
301 lockdebug_monitor_assert_unlocked(monitor_t *lock)
303 _objc_lock_list *locks = getLocks(NO);
305 if (hasLock(locks, lock, MONITOR)) {
306 _objc_fatal("monitor incorrectly held");
311 /***********************************************************************
313 **********************************************************************/
316 lockdebug_rwlock_read(rwlock_tt<true> *lock)
318 _objc_lock_list *locks = getLocks(YES);
320 if (hasLock(locks, lock, RDLOCK)) {
321 // Recursive rwlock read is bad (may deadlock vs pending writer)
322 _objc_fatal("recursive rwlock read");
324 if (hasLock(locks, lock, WRLOCK)) {
325 _objc_fatal("deadlock: read after write for rwlock");
327 setLock(locks, lock, RDLOCK);
330 // try-read success is the only case with lockdebug effects.
331 // try-read when already reading is OK (won't deadlock)
332 // try-read when already writing is OK (will fail)
333 // try-read failure does nothing.
335 lockdebug_rwlock_try_read_success(rwlock_tt<true> *lock)
337 _objc_lock_list *locks = getLocks(YES);
338 setLock(locks, lock, RDLOCK);
342 lockdebug_rwlock_unlock_read(rwlock_tt<true> *lock)
344 _objc_lock_list *locks = getLocks(NO);
346 if (!hasLock(locks, lock, RDLOCK)) {
347 _objc_fatal("un-reading unowned rwlock");
349 clearLock(locks, lock, RDLOCK);
354 lockdebug_rwlock_write(rwlock_tt<true> *lock)
356 _objc_lock_list *locks = getLocks(YES);
358 if (hasLock(locks, lock, RDLOCK)) {
359 // Lock promotion not allowed (may deadlock)
360 _objc_fatal("deadlock: write after read for rwlock");
362 if (hasLock(locks, lock, WRLOCK)) {
363 _objc_fatal("recursive rwlock write");
365 setLock(locks, lock, WRLOCK);
368 // try-write success is the only case with lockdebug effects.
369 // try-write when already reading is OK (will fail)
370 // try-write when already writing is OK (will fail)
371 // try-write failure does nothing.
373 lockdebug_rwlock_try_write_success(rwlock_tt<true> *lock)
375 _objc_lock_list *locks = getLocks(YES);
376 setLock(locks, lock, WRLOCK);
380 lockdebug_rwlock_unlock_write(rwlock_tt<true> *lock)
382 _objc_lock_list *locks = getLocks(NO);
384 if (!hasLock(locks, lock, WRLOCK)) {
385 _objc_fatal("un-writing unowned rwlock");
387 clearLock(locks, lock, WRLOCK);
392 lockdebug_rwlock_assert_reading(rwlock_tt<true> *lock)
394 _objc_lock_list *locks = getLocks(NO);
396 if (!hasLock(locks, lock, RDLOCK)) {
397 _objc_fatal("rwlock incorrectly not reading");
402 lockdebug_rwlock_assert_writing(rwlock_tt<true> *lock)
404 _objc_lock_list *locks = getLocks(NO);
406 if (!hasLock(locks, lock, WRLOCK)) {
407 _objc_fatal("rwlock incorrectly not writing");
412 lockdebug_rwlock_assert_locked(rwlock_tt<true> *lock)
414 _objc_lock_list *locks = getLocks(NO);
416 if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) {
417 _objc_fatal("rwlock incorrectly neither reading nor writing");
422 lockdebug_rwlock_assert_unlocked(rwlock_tt<true> *lock)
424 _objc_lock_list *locks = getLocks(NO);
426 if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) {
427 _objc_fatal("rwlock incorrectly not unlocked");