2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /***********************************************************************
26 * Error-checking locks for debugging.
27 **********************************************************************/
29 #include "objc-private.h"
31 #if !defined(NDEBUG) && !TARGET_OS_WIN32
33 /***********************************************************************
34 * Recording - per-thread list of mutexes and monitors held
35 **********************************************************************/
38 void *l; // the lock itself
39 int k; // the kind of lock it is (MUTEX, MONITOR, etc)
40 int i; // the lock's nest count
49 typedef struct _objc_lock_list {
55 static struct _objc_lock_list *
58 _objc_pthread_data *data;
59 _objc_lock_list *locks;
61 data = _objc_fetch_pthread_data(create);
62 if (!data && !create) return NULL;
64 locks = data->lockList;
69 locks = _calloc_internal(1, sizeof(_objc_lock_list) + sizeof(lockcount) * 16);
70 locks->allocated = 16;
72 data->lockList = locks;
76 if (locks->allocated == locks->used) {
80 data->lockList = _calloc_internal(1, sizeof(_objc_lock_list) + 2 * locks->used * sizeof(lockcount));
81 data->lockList->used = locks->used;
82 data->lockList->allocated = locks->used * 2;
83 memcpy(data->lockList->list, locks->list, locks->used * sizeof(lockcount));
84 _free_internal(locks);
85 locks = data->lockList;
93 hasLock(_objc_lock_list *locks, void *lock, int kind)
96 if (!locks) return NO;
98 for (i = 0; i < locks->used; i++) {
99 if (locks->list[i].l == lock && locks->list[i].k == kind) return YES;
106 setLock(_objc_lock_list *locks, void *lock, int kind)
109 for (i = 0; i < locks->used; i++) {
110 if (locks->list[i].l == lock && locks->list[i].k == kind) {
116 locks->list[locks->used].l = lock;
117 locks->list[locks->used].i = 1;
118 locks->list[locks->used].k = kind;
123 clearLock(_objc_lock_list *locks, void *lock, int kind)
126 for (i = 0; i < locks->used; i++) {
127 if (locks->list[i].l == lock && locks->list[i].k == kind) {
128 if (--locks->list[i].i == 0) {
129 locks->list[i].l = NULL;
130 locks->list[i] = locks->list[--locks->used];
136 _objc_fatal("lock not found!");
139 __private_extern__ void
140 _destroyLockList(struct _objc_lock_list *locks)
142 // fixme complain about any still-held locks?
143 if (locks) _free_internal(locks);
147 /***********************************************************************
149 **********************************************************************/
151 __private_extern__ int
152 _mutex_lock_debug(mutex_t *lock, const char *name)
154 _objc_lock_list *locks = getLocks(YES);
156 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
157 if (hasLock(locks, lock, MUTEX)) {
158 _objc_fatal("deadlock: relocking mutex %s\n", name+1);
160 setLock(locks, lock, MUTEX);
163 return _mutex_lock_nodebug(lock);
166 __private_extern__ int
167 _mutex_try_lock_debug(mutex_t *lock, const char *name)
169 _objc_lock_list *locks = getLocks(YES);
171 // attempting to relock in try_lock is OK
172 int result = _mutex_try_lock_nodebug(lock);
174 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
176 setLock(locks, lock, MUTEX);
182 __private_extern__ int
183 _mutex_unlock_debug(mutex_t *lock, const char *name)
185 _objc_lock_list *locks = getLocks(NO);
187 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
188 if (!hasLock(locks, lock, MUTEX)) {
189 _objc_fatal("unlocking unowned mutex %s\n", name+1);
191 clearLock(locks, lock, MUTEX);
194 return _mutex_unlock_nodebug(lock);
197 __private_extern__ void
198 _mutex_assert_locked_debug(mutex_t *lock, const char *name)
200 _objc_lock_list *locks = getLocks(NO);
202 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
203 if (!hasLock(locks, lock, MUTEX)) {
204 _objc_fatal("mutex %s incorrectly not held\n",name+1);
210 __private_extern__ void
211 _mutex_assert_unlocked_debug(mutex_t *lock, const char *name)
213 _objc_lock_list *locks = getLocks(NO);
215 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
216 if (hasLock(locks, lock, MUTEX)) {
217 _objc_fatal("mutex %s incorrectly held\n", name+1);
223 /***********************************************************************
224 * Recursive mutex checking
225 **********************************************************************/
227 __private_extern__ int
228 _recursive_mutex_lock_debug(recursive_mutex_t *lock, const char *name)
230 _objc_lock_list *locks = getLocks(YES);
232 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
233 setLock(locks, lock, RECURSIVE);
236 return _recursive_mutex_lock_nodebug(lock);
239 __private_extern__ int
240 _recursive_mutex_try_lock_debug(recursive_mutex_t *lock, const char *name)
242 _objc_lock_list *locks = getLocks(YES);
244 int result = _recursive_mutex_try_lock_nodebug(lock);
246 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
248 setLock(locks, lock, RECURSIVE);
254 __private_extern__ int
255 _recursive_mutex_unlock_debug(recursive_mutex_t *lock, const char *name)
257 _objc_lock_list *locks = getLocks(NO);
259 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
260 if (!hasLock(locks, lock, RECURSIVE)) {
261 _objc_fatal("unlocking unowned recursive mutex %s\n", name+1);
263 clearLock(locks, lock, RECURSIVE);
266 return _recursive_mutex_unlock_nodebug(lock);
269 __private_extern__ void
270 _recursive_mutex_assert_locked_debug(recursive_mutex_t *lock, const char *name)
272 _objc_lock_list *locks = getLocks(NO);
274 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
275 if (!hasLock(locks, lock, RECURSIVE)) {
276 _objc_fatal("recursive mutex %s incorrectly not held\n",name+1);
282 __private_extern__ void
283 _recursive_mutex_assert_unlocked_debug(recursive_mutex_t *lock, const char *name)
285 _objc_lock_list *locks = getLocks(NO);
287 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
288 if (hasLock(locks, lock, RECURSIVE)) {
289 _objc_fatal("recursive mutex %s incorrectly held\n", name+1);
295 /***********************************************************************
297 **********************************************************************/
299 __private_extern__ int
300 _monitor_enter_debug(monitor_t *lock, const char *name)
302 _objc_lock_list *locks = getLocks(YES);
304 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
305 if (hasLock(locks, lock, MONITOR)) {
306 _objc_fatal("deadlock: relocking monitor %s\n", name+1);
308 setLock(locks, lock, MONITOR);
311 return _monitor_enter_nodebug(lock);
314 __private_extern__ int
315 _monitor_exit_debug(monitor_t *lock, const char *name)
317 _objc_lock_list *locks = getLocks(NO);
319 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
320 if (!hasLock(locks, lock, MONITOR)) {
321 _objc_fatal("unlocking unowned monitor%s\n", name+1);
323 clearLock(locks, lock, MONITOR);
326 return _monitor_exit_nodebug(lock);
329 __private_extern__ int
330 _monitor_wait_debug(monitor_t *lock, const char *name)
332 _objc_lock_list *locks = getLocks(NO);
334 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
335 if (!hasLock(locks, lock, MONITOR)) {
336 _objc_fatal("waiting in unowned monitor%s\n", name+1);
340 return _monitor_wait_nodebug(lock);
343 __private_extern__ void
344 _monitor_assert_locked_debug(monitor_t *lock, const char *name)
346 _objc_lock_list *locks = getLocks(NO);
348 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
349 if (!hasLock(locks, lock, MONITOR)) {
350 _objc_fatal("monitor %s incorrectly not held\n",name+1);
355 __private_extern__ void
356 _monitor_assert_unlocked_debug(monitor_t *lock, const char *name)
358 _objc_lock_list *locks = getLocks(NO);
360 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
361 if (hasLock(locks, lock, MONITOR)) {
362 _objc_fatal("monitor %s incorrectly held\n", name+1);
368 /***********************************************************************
370 **********************************************************************/
372 __private_extern__ void
373 _rwlock_read_debug(rwlock_t *lock, const char *name)
375 _objc_lock_list *locks = getLocks(YES);
377 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
378 if (hasLock(locks, lock, RDLOCK)) {
379 // Recursive rwlock read is bad (may deadlock vs pending writer)
380 _objc_fatal("recursive rwlock read %s\n", name+1);
382 if (hasLock(locks, lock, WRLOCK)) {
383 _objc_fatal("deadlock: read after write for rwlock %s\n", name+1);
385 setLock(locks, lock, RDLOCK);
388 _rwlock_read_nodebug(lock);
391 __private_extern__ int
392 _rwlock_try_read_debug(rwlock_t *lock, const char *name)
394 _objc_lock_list *locks = getLocks(YES);
396 // try-read when already reading is OK (won't deadlock against writer)
397 // try-read when already writing is OK (will fail)
398 int result = _rwlock_try_read_nodebug(lock);
400 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
402 setLock(locks, lock, RDLOCK);
408 __private_extern__ void
409 _rwlock_unlock_read_debug(rwlock_t *lock, const char *name)
411 _objc_lock_list *locks = getLocks(NO);
413 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
414 if (!hasLock(locks, lock, RDLOCK)) {
415 _objc_fatal("un-reading unowned rwlock %s\n", name+1);
417 clearLock(locks, lock, RDLOCK);
420 _rwlock_unlock_read_nodebug(lock);
423 __private_extern__ void
424 _rwlock_write_debug(rwlock_t *lock, const char *name)
426 _objc_lock_list *locks = getLocks(YES);
428 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
429 if (hasLock(locks, lock, RDLOCK)) {
430 // Lock promotion not allowed (may deadlock)
431 _objc_fatal("deadlock: write after read for rwlock %s\n", name+1);
433 if (hasLock(locks, lock, WRLOCK)) {
434 _objc_fatal("recursive rwlock write %s\n", name+1);
436 setLock(locks, lock, WRLOCK);
439 _rwlock_write_nodebug(lock);
443 __private_extern__ int
444 _rwlock_try_write_debug(rwlock_t *lock, const char *name)
446 _objc_lock_list *locks = getLocks(YES);
448 // try-write when already reading is OK (will fail)
449 // try-write when already writing is OK (will fail)
450 int result = _rwlock_try_write_nodebug(lock);
452 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
454 setLock(locks, lock, WRLOCK);
460 __private_extern__ void
461 _rwlock_unlock_write_debug(rwlock_t *lock, const char *name)
463 _objc_lock_list *locks = getLocks(NO);
465 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
466 if (!hasLock(locks, lock, WRLOCK)) {
467 _objc_fatal("un-writing unowned rwlock %s\n", name+1);
469 clearLock(locks, lock, WRLOCK);
472 _rwlock_unlock_write_nodebug(lock);
476 __private_extern__ void
477 _rwlock_assert_reading_debug(rwlock_t *lock, const char *name)
479 _objc_lock_list *locks = getLocks(NO);
481 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
482 if (!hasLock(locks, lock, RDLOCK)) {
483 _objc_fatal("rwlock %s incorrectly not reading\n", name+1);
488 __private_extern__ void
489 _rwlock_assert_writing_debug(rwlock_t *lock, const char *name)
491 _objc_lock_list *locks = getLocks(NO);
493 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
494 if (!hasLock(locks, lock, WRLOCK)) {
495 _objc_fatal("rwlock %s incorrectly not writing\n", name+1);
500 __private_extern__ void
501 _rwlock_assert_locked_debug(rwlock_t *lock, const char *name)
503 _objc_lock_list *locks = getLocks(NO);
505 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
506 if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) {
507 _objc_fatal("rwlock %s incorrectly neither reading nor writing\n",
513 __private_extern__ void
514 _rwlock_assert_unlocked_debug(rwlock_t *lock, const char *name)
516 _objc_lock_list *locks = getLocks(NO);
518 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
519 if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) {
520 _objc_fatal("rwlock %s incorrectly not unlocked\n", name+1);