]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-lockdebug.mm
objc4-532.tar.gz
[apple/objc4.git] / runtime / objc-lockdebug.mm
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-lock.m
26 * Error-checking locks for debugging.
27 **********************************************************************/
28
29 #include "objc-private.h"
30
31 #if !defined(NDEBUG) && !TARGET_OS_WIN32
32
33 /***********************************************************************
34 * Recording - per-thread list of mutexes and monitors held
35 **********************************************************************/
36
37 typedef struct {
38 void *l; // the lock itself
39 int k; // the kind of lock it is (MUTEX, MONITOR, etc)
40 int i; // the lock's nest count
41 } lockcount;
42
43 #define MUTEX 1
44 #define MONITOR 2
45 #define RDLOCK 3
46 #define WRLOCK 4
47 #define RECURSIVE 5
48
49 typedef struct _objc_lock_list {
50 int allocated;
51 int used;
52 lockcount list[0];
53 } _objc_lock_list;
54
55 static tls_key_t lock_tls;
56
57 static void
58 destroyLocks(void *value)
59 {
60 _objc_lock_list *locks = (_objc_lock_list *)value;
61 // fixme complain about any still-held locks?
62 if (locks) _free_internal(locks);
63 }
64
65 static struct _objc_lock_list *
66 getLocks(BOOL create)
67 {
68 _objc_lock_list *locks;
69
70 // Use a dedicated tls key to prevent differences vs non-debug in
71 // usage of objc's other tls keys (required for some unit tests).
72 INIT_ONCE_PTR(lock_tls, tls_create(&destroyLocks), (void)0);
73
74 locks = (_objc_lock_list *)tls_get(lock_tls);
75 if (!locks) {
76 if (!create) {
77 return NULL;
78 } else {
79 locks = (_objc_lock_list *)_calloc_internal(1, sizeof(_objc_lock_list) + sizeof(lockcount) * 16);
80 locks->allocated = 16;
81 locks->used = 0;
82 tls_set(lock_tls, locks);
83 }
84 }
85
86 if (locks->allocated == locks->used) {
87 if (!create) {
88 return locks;
89 } else {
90 _objc_lock_list *oldlocks = locks;
91 locks = (_objc_lock_list *)_calloc_internal(1, sizeof(_objc_lock_list) + 2 * oldlocks->used * sizeof(lockcount));
92 locks->used = oldlocks->used;
93 locks->allocated = oldlocks->used * 2;
94 memcpy(locks->list, oldlocks->list, locks->used * sizeof(lockcount));
95 tls_set(lock_tls, locks);
96 _free_internal(oldlocks);
97 }
98 }
99
100 return locks;
101 }
102
103 static BOOL
104 hasLock(_objc_lock_list *locks, void *lock, int kind)
105 {
106 int i;
107 if (!locks) return NO;
108
109 for (i = 0; i < locks->used; i++) {
110 if (locks->list[i].l == lock && locks->list[i].k == kind) return YES;
111 }
112 return NO;
113 }
114
115
116 static void
117 setLock(_objc_lock_list *locks, void *lock, int kind)
118 {
119 int i;
120 for (i = 0; i < locks->used; i++) {
121 if (locks->list[i].l == lock && locks->list[i].k == kind) {
122 locks->list[i].i++;
123 return;
124 }
125 }
126
127 locks->list[locks->used].l = lock;
128 locks->list[locks->used].i = 1;
129 locks->list[locks->used].k = kind;
130 locks->used++;
131 }
132
133 static void
134 clearLock(_objc_lock_list *locks, void *lock, int kind)
135 {
136 int i;
137 for (i = 0; i < locks->used; i++) {
138 if (locks->list[i].l == lock && locks->list[i].k == kind) {
139 if (--locks->list[i].i == 0) {
140 locks->list[i].l = NULL;
141 locks->list[i] = locks->list[--locks->used];
142 }
143 return;
144 }
145 }
146
147 _objc_fatal("lock not found!");
148 }
149
150
151 /***********************************************************************
152 * Mutex checking
153 **********************************************************************/
154
155 int
156 _mutex_lock_debug(mutex_t *lock, const char *name)
157 {
158 _objc_lock_list *locks = getLocks(YES);
159
160 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
161 if (hasLock(locks, lock, MUTEX)) {
162 _objc_fatal("deadlock: relocking mutex %s\n", name+1);
163 }
164 setLock(locks, lock, MUTEX);
165 }
166
167 return _mutex_lock_nodebug(lock);
168 }
169
170 int
171 _mutex_try_lock_debug(mutex_t *lock, const char *name)
172 {
173 _objc_lock_list *locks = getLocks(YES);
174
175 // attempting to relock in try_lock is OK
176 int result = _mutex_try_lock_nodebug(lock);
177
178 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
179 if (result) {
180 setLock(locks, lock, MUTEX);
181 }
182 }
183 return result;
184 }
185
186 int
187 _mutex_unlock_debug(mutex_t *lock, const char *name)
188 {
189 _objc_lock_list *locks = getLocks(NO);
190
191 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
192 if (!hasLock(locks, lock, MUTEX)) {
193 _objc_fatal("unlocking unowned mutex %s\n", name+1);
194 }
195 clearLock(locks, lock, MUTEX);
196 }
197
198 return _mutex_unlock_nodebug(lock);
199 }
200
201 void
202 _mutex_assert_locked_debug(mutex_t *lock, const char *name)
203 {
204 _objc_lock_list *locks = getLocks(NO);
205
206 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
207 if (!hasLock(locks, lock, MUTEX)) {
208 _objc_fatal("mutex %s incorrectly not held\n",name+1);
209 }
210 }
211 }
212
213
214 void
215 _mutex_assert_unlocked_debug(mutex_t *lock, const char *name)
216 {
217 _objc_lock_list *locks = getLocks(NO);
218
219 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
220 if (hasLock(locks, lock, MUTEX)) {
221 _objc_fatal("mutex %s incorrectly held\n", name+1);
222 }
223 }
224 }
225
226
227 /***********************************************************************
228 * Recursive mutex checking
229 **********************************************************************/
230
231 int
232 _recursive_mutex_lock_debug(recursive_mutex_t *lock, const char *name)
233 {
234 _objc_lock_list *locks = getLocks(YES);
235
236 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
237 setLock(locks, lock, RECURSIVE);
238 }
239
240 return _recursive_mutex_lock_nodebug(lock);
241 }
242
243 int
244 _recursive_mutex_try_lock_debug(recursive_mutex_t *lock, const char *name)
245 {
246 _objc_lock_list *locks = getLocks(YES);
247
248 int result = _recursive_mutex_try_lock_nodebug(lock);
249
250 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
251 if (result) {
252 setLock(locks, lock, RECURSIVE);
253 }
254 }
255 return result;
256 }
257
258 int
259 _recursive_mutex_unlock_debug(recursive_mutex_t *lock, const char *name)
260 {
261 _objc_lock_list *locks = getLocks(NO);
262
263 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
264 if (!hasLock(locks, lock, RECURSIVE)) {
265 _objc_fatal("unlocking unowned recursive mutex %s\n", name+1);
266 }
267 clearLock(locks, lock, RECURSIVE);
268 }
269
270 return _recursive_mutex_unlock_nodebug(lock);
271 }
272
273 void
274 _recursive_mutex_assert_locked_debug(recursive_mutex_t *lock, const char *name)
275 {
276 _objc_lock_list *locks = getLocks(NO);
277
278 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
279 if (!hasLock(locks, lock, RECURSIVE)) {
280 _objc_fatal("recursive mutex %s incorrectly not held\n",name+1);
281 }
282 }
283 }
284
285
286 void
287 _recursive_mutex_assert_unlocked_debug(recursive_mutex_t *lock, const char *name)
288 {
289 _objc_lock_list *locks = getLocks(NO);
290
291 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
292 if (hasLock(locks, lock, RECURSIVE)) {
293 _objc_fatal("recursive mutex %s incorrectly held\n", name+1);
294 }
295 }
296 }
297
298
299 /***********************************************************************
300 * Monitor checking
301 **********************************************************************/
302
303 int
304 _monitor_enter_debug(monitor_t *lock, const char *name)
305 {
306 _objc_lock_list *locks = getLocks(YES);
307
308 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
309 if (hasLock(locks, lock, MONITOR)) {
310 _objc_fatal("deadlock: relocking monitor %s\n", name+1);
311 }
312 setLock(locks, lock, MONITOR);
313 }
314
315 return _monitor_enter_nodebug(lock);
316 }
317
318 int
319 _monitor_exit_debug(monitor_t *lock, const char *name)
320 {
321 _objc_lock_list *locks = getLocks(NO);
322
323 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
324 if (!hasLock(locks, lock, MONITOR)) {
325 _objc_fatal("unlocking unowned monitor%s\n", name+1);
326 }
327 clearLock(locks, lock, MONITOR);
328 }
329
330 return _monitor_exit_nodebug(lock);
331 }
332
333 int
334 _monitor_wait_debug(monitor_t *lock, const char *name)
335 {
336 _objc_lock_list *locks = getLocks(NO);
337
338 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
339 if (!hasLock(locks, lock, MONITOR)) {
340 _objc_fatal("waiting in unowned monitor%s\n", name+1);
341 }
342 }
343
344 return _monitor_wait_nodebug(lock);
345 }
346
347 void
348 _monitor_assert_locked_debug(monitor_t *lock, const char *name)
349 {
350 _objc_lock_list *locks = getLocks(NO);
351
352 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
353 if (!hasLock(locks, lock, MONITOR)) {
354 _objc_fatal("monitor %s incorrectly not held\n",name+1);
355 }
356 }
357 }
358
359 void
360 _monitor_assert_unlocked_debug(monitor_t *lock, const char *name)
361 {
362 _objc_lock_list *locks = getLocks(NO);
363
364 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
365 if (hasLock(locks, lock, MONITOR)) {
366 _objc_fatal("monitor %s incorrectly held\n", name+1);
367 }
368 }
369 }
370
371
372 /***********************************************************************
373 * rwlock checking
374 **********************************************************************/
375
376 void
377 _rwlock_read_debug(rwlock_t *lock, const char *name)
378 {
379 _objc_lock_list *locks = getLocks(YES);
380
381 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
382 if (hasLock(locks, lock, RDLOCK)) {
383 // Recursive rwlock read is bad (may deadlock vs pending writer)
384 _objc_fatal("recursive rwlock read %s\n", name+1);
385 }
386 if (hasLock(locks, lock, WRLOCK)) {
387 _objc_fatal("deadlock: read after write for rwlock %s\n", name+1);
388 }
389 setLock(locks, lock, RDLOCK);
390 }
391
392 _rwlock_read_nodebug(lock);
393 }
394
395 int
396 _rwlock_try_read_debug(rwlock_t *lock, const char *name)
397 {
398 _objc_lock_list *locks = getLocks(YES);
399
400 // try-read when already reading is OK (won't deadlock against writer)
401 // try-read when already writing is OK (will fail)
402 int result = _rwlock_try_read_nodebug(lock);
403
404 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
405 if (result) {
406 setLock(locks, lock, RDLOCK);
407 }
408 }
409 return result;
410 }
411
412 void
413 _rwlock_unlock_read_debug(rwlock_t *lock, const char *name)
414 {
415 _objc_lock_list *locks = getLocks(NO);
416
417 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
418 if (!hasLock(locks, lock, RDLOCK)) {
419 _objc_fatal("un-reading unowned rwlock %s\n", name+1);
420 }
421 clearLock(locks, lock, RDLOCK);
422 }
423
424 _rwlock_unlock_read_nodebug(lock);
425 }
426
427 void
428 _rwlock_write_debug(rwlock_t *lock, const char *name)
429 {
430 _objc_lock_list *locks = getLocks(YES);
431
432 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
433 if (hasLock(locks, lock, RDLOCK)) {
434 // Lock promotion not allowed (may deadlock)
435 _objc_fatal("deadlock: write after read for rwlock %s\n", name+1);
436 }
437 if (hasLock(locks, lock, WRLOCK)) {
438 _objc_fatal("recursive rwlock write %s\n", name+1);
439 }
440 setLock(locks, lock, WRLOCK);
441 }
442
443 _rwlock_write_nodebug(lock);
444 }
445
446
447 int
448 _rwlock_try_write_debug(rwlock_t *lock, const char *name)
449 {
450 _objc_lock_list *locks = getLocks(YES);
451
452 // try-write when already reading is OK (will fail)
453 // try-write when already writing is OK (will fail)
454 int result = _rwlock_try_write_nodebug(lock);
455
456 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
457 if (result) {
458 setLock(locks, lock, WRLOCK);
459 }
460 }
461 return result;
462 }
463
464 void
465 _rwlock_unlock_write_debug(rwlock_t *lock, const char *name)
466 {
467 _objc_lock_list *locks = getLocks(NO);
468
469 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
470 if (!hasLock(locks, lock, WRLOCK)) {
471 _objc_fatal("un-writing unowned rwlock %s\n", name+1);
472 }
473 clearLock(locks, lock, WRLOCK);
474 }
475
476 _rwlock_unlock_write_nodebug(lock);
477 }
478
479
480 void
481 _rwlock_assert_reading_debug(rwlock_t *lock, const char *name)
482 {
483 _objc_lock_list *locks = getLocks(NO);
484
485 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
486 if (!hasLock(locks, lock, RDLOCK)) {
487 _objc_fatal("rwlock %s incorrectly not reading\n", name+1);
488 }
489 }
490 }
491
492 void
493 _rwlock_assert_writing_debug(rwlock_t *lock, const char *name)
494 {
495 _objc_lock_list *locks = getLocks(NO);
496
497 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
498 if (!hasLock(locks, lock, WRLOCK)) {
499 _objc_fatal("rwlock %s incorrectly not writing\n", name+1);
500 }
501 }
502 }
503
504 void
505 _rwlock_assert_locked_debug(rwlock_t *lock, const char *name)
506 {
507 _objc_lock_list *locks = getLocks(NO);
508
509 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
510 if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) {
511 _objc_fatal("rwlock %s incorrectly neither reading nor writing\n",
512 name+1);
513 }
514 }
515 }
516
517 void
518 _rwlock_assert_unlocked_debug(rwlock_t *lock, const char *name)
519 {
520 _objc_lock_list *locks = getLocks(NO);
521
522 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
523 if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) {
524 _objc_fatal("rwlock %s incorrectly not unlocked\n", name+1);
525 }
526 }
527 }
528
529
530 #endif