2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /***********************************************************************
26 * Error-checking locks for debugging.
27 **********************************************************************/
29 #include "objc-private.h"
31 #if !defined(NDEBUG) && !TARGET_OS_WIN32
33 /***********************************************************************
34 * Recording - per-thread list of mutexes and monitors held
35 **********************************************************************/
38 void *l; // the lock itself
39 int k; // the kind of lock it is (MUTEX, MONITOR, etc)
40 int i; // the lock's nest count
49 typedef struct _objc_lock_list {
55 static tls_key_t lock_tls;
58 destroyLocks(void *value)
60 _objc_lock_list *locks = (_objc_lock_list *)value;
61 // fixme complain about any still-held locks?
62 if (locks) _free_internal(locks);
65 static struct _objc_lock_list *
68 _objc_lock_list *locks;
70 // Use a dedicated tls key to prevent differences vs non-debug in
71 // usage of objc's other tls keys (required for some unit tests).
72 INIT_ONCE_PTR(lock_tls, tls_create(&destroyLocks), (void)0);
74 locks = (_objc_lock_list *)tls_get(lock_tls);
79 locks = (_objc_lock_list *)_calloc_internal(1, sizeof(_objc_lock_list) + sizeof(lockcount) * 16);
80 locks->allocated = 16;
82 tls_set(lock_tls, locks);
86 if (locks->allocated == locks->used) {
90 _objc_lock_list *oldlocks = locks;
91 locks = (_objc_lock_list *)_calloc_internal(1, sizeof(_objc_lock_list) + 2 * oldlocks->used * sizeof(lockcount));
92 locks->used = oldlocks->used;
93 locks->allocated = oldlocks->used * 2;
94 memcpy(locks->list, oldlocks->list, locks->used * sizeof(lockcount));
95 tls_set(lock_tls, locks);
96 _free_internal(oldlocks);
104 hasLock(_objc_lock_list *locks, void *lock, int kind)
107 if (!locks) return NO;
109 for (i = 0; i < locks->used; i++) {
110 if (locks->list[i].l == lock && locks->list[i].k == kind) return YES;
117 setLock(_objc_lock_list *locks, void *lock, int kind)
120 for (i = 0; i < locks->used; i++) {
121 if (locks->list[i].l == lock && locks->list[i].k == kind) {
127 locks->list[locks->used].l = lock;
128 locks->list[locks->used].i = 1;
129 locks->list[locks->used].k = kind;
134 clearLock(_objc_lock_list *locks, void *lock, int kind)
137 for (i = 0; i < locks->used; i++) {
138 if (locks->list[i].l == lock && locks->list[i].k == kind) {
139 if (--locks->list[i].i == 0) {
140 locks->list[i].l = NULL;
141 locks->list[i] = locks->list[--locks->used];
147 _objc_fatal("lock not found!");
151 /***********************************************************************
153 **********************************************************************/
156 _mutex_lock_debug(mutex_t *lock, const char *name)
158 _objc_lock_list *locks = getLocks(YES);
160 if (hasLock(locks, lock, MUTEX)) {
161 _objc_fatal("deadlock: relocking mutex %s\n", name+1);
163 setLock(locks, lock, MUTEX);
165 return _mutex_lock_nodebug(lock);
169 _mutex_try_lock_debug(mutex_t *lock, const char *name)
171 _objc_lock_list *locks = getLocks(YES);
173 // attempting to relock in try_lock is OK
174 int result = _mutex_try_lock_nodebug(lock);
177 setLock(locks, lock, MUTEX);
184 _mutex_unlock_debug(mutex_t *lock, const char *name)
186 _objc_lock_list *locks = getLocks(NO);
188 if (!hasLock(locks, lock, MUTEX)) {
189 _objc_fatal("unlocking unowned mutex %s\n", name+1);
191 clearLock(locks, lock, MUTEX);
193 return _mutex_unlock_nodebug(lock);
197 _mutex_assert_locked_debug(mutex_t *lock, const char *name)
199 _objc_lock_list *locks = getLocks(NO);
201 if (!hasLock(locks, lock, MUTEX)) {
202 _objc_fatal("mutex %s incorrectly not held\n",name+1);
208 _mutex_assert_unlocked_debug(mutex_t *lock, const char *name)
210 _objc_lock_list *locks = getLocks(NO);
212 if (hasLock(locks, lock, MUTEX)) {
213 _objc_fatal("mutex %s incorrectly held\n", name+1);
218 /***********************************************************************
219 * Recursive mutex checking
220 **********************************************************************/
223 _recursive_mutex_lock_debug(recursive_mutex_t *lock, const char *name)
225 _objc_lock_list *locks = getLocks(YES);
227 setLock(locks, lock, RECURSIVE);
229 return _recursive_mutex_lock_nodebug(lock);
233 _recursive_mutex_try_lock_debug(recursive_mutex_t *lock, const char *name)
235 _objc_lock_list *locks = getLocks(YES);
237 int result = _recursive_mutex_try_lock_nodebug(lock);
240 setLock(locks, lock, RECURSIVE);
247 _recursive_mutex_unlock_debug(recursive_mutex_t *lock, const char *name)
249 _objc_lock_list *locks = getLocks(NO);
251 if (!hasLock(locks, lock, RECURSIVE)) {
252 _objc_fatal("unlocking unowned recursive mutex %s\n", name+1);
254 clearLock(locks, lock, RECURSIVE);
256 return _recursive_mutex_unlock_nodebug(lock);
260 _recursive_mutex_assert_locked_debug(recursive_mutex_t *lock, const char *name)
262 _objc_lock_list *locks = getLocks(NO);
264 if (!hasLock(locks, lock, RECURSIVE)) {
265 _objc_fatal("recursive mutex %s incorrectly not held\n",name+1);
271 _recursive_mutex_assert_unlocked_debug(recursive_mutex_t *lock, const char *name)
273 _objc_lock_list *locks = getLocks(NO);
275 if (hasLock(locks, lock, RECURSIVE)) {
276 _objc_fatal("recursive mutex %s incorrectly held\n", name+1);
281 /***********************************************************************
283 **********************************************************************/
286 _monitor_enter_debug(monitor_t *lock, const char *name)
288 _objc_lock_list *locks = getLocks(YES);
290 if (hasLock(locks, lock, MONITOR)) {
291 _objc_fatal("deadlock: relocking monitor %s\n", name+1);
293 setLock(locks, lock, MONITOR);
295 return _monitor_enter_nodebug(lock);
299 _monitor_exit_debug(monitor_t *lock, const char *name)
301 _objc_lock_list *locks = getLocks(NO);
303 if (!hasLock(locks, lock, MONITOR)) {
304 _objc_fatal("unlocking unowned monitor%s\n", name+1);
306 clearLock(locks, lock, MONITOR);
308 return _monitor_exit_nodebug(lock);
312 _monitor_wait_debug(monitor_t *lock, const char *name)
314 _objc_lock_list *locks = getLocks(NO);
316 if (!hasLock(locks, lock, MONITOR)) {
317 _objc_fatal("waiting in unowned monitor%s\n", name+1);
320 return _monitor_wait_nodebug(lock);
324 _monitor_assert_locked_debug(monitor_t *lock, const char *name)
326 _objc_lock_list *locks = getLocks(NO);
328 if (!hasLock(locks, lock, MONITOR)) {
329 _objc_fatal("monitor %s incorrectly not held\n",name+1);
334 _monitor_assert_unlocked_debug(monitor_t *lock, const char *name)
336 _objc_lock_list *locks = getLocks(NO);
338 if (hasLock(locks, lock, MONITOR)) {
339 _objc_fatal("monitor %s incorrectly held\n", name+1);
344 /***********************************************************************
346 **********************************************************************/
349 _rwlock_read_debug(rwlock_t *lock, const char *name)
351 _objc_lock_list *locks = getLocks(YES);
353 if (hasLock(locks, lock, RDLOCK)) {
354 // Recursive rwlock read is bad (may deadlock vs pending writer)
355 _objc_fatal("recursive rwlock read %s\n", name+1);
357 if (hasLock(locks, lock, WRLOCK)) {
358 _objc_fatal("deadlock: read after write for rwlock %s\n", name+1);
360 setLock(locks, lock, RDLOCK);
362 _rwlock_read_nodebug(lock);
366 _rwlock_try_read_debug(rwlock_t *lock, const char *name)
368 _objc_lock_list *locks = getLocks(YES);
370 // try-read when already reading is OK (won't deadlock against writer)
371 // try-read when already writing is OK (will fail)
372 int result = _rwlock_try_read_nodebug(lock);
375 setLock(locks, lock, RDLOCK);
382 _rwlock_unlock_read_debug(rwlock_t *lock, const char *name)
384 _objc_lock_list *locks = getLocks(NO);
386 if (!hasLock(locks, lock, RDLOCK)) {
387 _objc_fatal("un-reading unowned rwlock %s\n", name+1);
389 clearLock(locks, lock, RDLOCK);
391 _rwlock_unlock_read_nodebug(lock);
395 _rwlock_write_debug(rwlock_t *lock, const char *name)
397 _objc_lock_list *locks = getLocks(YES);
399 if (hasLock(locks, lock, RDLOCK)) {
400 // Lock promotion not allowed (may deadlock)
401 _objc_fatal("deadlock: write after read for rwlock %s\n", name+1);
403 if (hasLock(locks, lock, WRLOCK)) {
404 _objc_fatal("recursive rwlock write %s\n", name+1);
406 setLock(locks, lock, WRLOCK);
408 _rwlock_write_nodebug(lock);
413 _rwlock_try_write_debug(rwlock_t *lock, const char *name)
415 _objc_lock_list *locks = getLocks(YES);
417 // try-write when already reading is OK (will fail)
418 // try-write when already writing is OK (will fail)
419 int result = _rwlock_try_write_nodebug(lock);
422 setLock(locks, lock, WRLOCK);
429 _rwlock_unlock_write_debug(rwlock_t *lock, const char *name)
431 _objc_lock_list *locks = getLocks(NO);
433 if (!hasLock(locks, lock, WRLOCK)) {
434 _objc_fatal("un-writing unowned rwlock %s\n", name+1);
436 clearLock(locks, lock, WRLOCK);
438 _rwlock_unlock_write_nodebug(lock);
443 _rwlock_assert_reading_debug(rwlock_t *lock, const char *name)
445 _objc_lock_list *locks = getLocks(NO);
447 if (!hasLock(locks, lock, RDLOCK)) {
448 _objc_fatal("rwlock %s incorrectly not reading\n", name+1);
453 _rwlock_assert_writing_debug(rwlock_t *lock, const char *name)
455 _objc_lock_list *locks = getLocks(NO);
457 if (!hasLock(locks, lock, WRLOCK)) {
458 _objc_fatal("rwlock %s incorrectly not writing\n", name+1);
463 _rwlock_assert_locked_debug(rwlock_t *lock, const char *name)
465 _objc_lock_list *locks = getLocks(NO);
467 if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) {
468 _objc_fatal("rwlock %s incorrectly neither reading nor writing\n",
474 _rwlock_assert_unlocked_debug(rwlock_t *lock, const char *name)
476 _objc_lock_list *locks = getLocks(NO);
478 if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) {
479 _objc_fatal("rwlock %s incorrectly not unlocked\n", name+1);