]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-lockdebug.m
objc4-437.1.tar.gz
[apple/objc4.git] / runtime / objc-lockdebug.m
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-lock.m
26 * Error-checking locks for debugging.
27 **********************************************************************/
28
29 #include "objc-private.h"
30
31 #if !defined(NDEBUG) && !TARGET_OS_WIN32
32
33 /***********************************************************************
34 * Recording - per-thread list of mutexes and monitors held
35 **********************************************************************/
36
37 typedef struct {
38 void *l; // the lock itself
39 int k; // the kind of lock it is (MUTEX, MONITOR, etc)
40 int i; // the lock's nest count
41 } lockcount;
42
43 #define MUTEX 1
44 #define MONITOR 2
45 #define RDLOCK 3
46 #define WRLOCK 4
47 #define RECURSIVE 5
48
49 typedef struct _objc_lock_list {
50 int allocated;
51 int used;
52 lockcount list[0];
53 } _objc_lock_list;
54
55 static struct _objc_lock_list *
56 getLocks(BOOL create)
57 {
58 _objc_pthread_data *data;
59 _objc_lock_list *locks;
60
61 data = _objc_fetch_pthread_data(create);
62 if (!data && !create) return NULL;
63
64 locks = data->lockList;
65 if (!locks) {
66 if (!create) {
67 return NULL;
68 } else {
69 locks = _calloc_internal(1, sizeof(_objc_lock_list) + sizeof(lockcount) * 16);
70 locks->allocated = 16;
71 locks->used = 0;
72 data->lockList = locks;
73 }
74 }
75
76 if (locks->allocated == locks->used) {
77 if (!create) {
78 return locks;
79 } else {
80 data->lockList = _calloc_internal(1, sizeof(_objc_lock_list) + 2 * locks->used * sizeof(lockcount));
81 data->lockList->used = locks->used;
82 data->lockList->allocated = locks->used * 2;
83 memcpy(data->lockList->list, locks->list, locks->used * sizeof(lockcount));
84 _free_internal(locks);
85 locks = data->lockList;
86 }
87 }
88
89 return locks;
90 }
91
92 static BOOL
93 hasLock(_objc_lock_list *locks, void *lock, int kind)
94 {
95 int i;
96 if (!locks) return NO;
97
98 for (i = 0; i < locks->used; i++) {
99 if (locks->list[i].l == lock && locks->list[i].k == kind) return YES;
100 }
101 return NO;
102 }
103
104
105 static void
106 setLock(_objc_lock_list *locks, void *lock, int kind)
107 {
108 int i;
109 for (i = 0; i < locks->used; i++) {
110 if (locks->list[i].l == lock && locks->list[i].k == kind) {
111 locks->list[i].i++;
112 return;
113 }
114 }
115
116 locks->list[locks->used].l = lock;
117 locks->list[locks->used].i = 1;
118 locks->list[locks->used].k = kind;
119 locks->used++;
120 }
121
122 static void
123 clearLock(_objc_lock_list *locks, void *lock, int kind)
124 {
125 int i;
126 for (i = 0; i < locks->used; i++) {
127 if (locks->list[i].l == lock && locks->list[i].k == kind) {
128 if (--locks->list[i].i == 0) {
129 locks->list[i].l = NULL;
130 locks->list[i] = locks->list[--locks->used];
131 }
132 return;
133 }
134 }
135
136 _objc_fatal("lock not found!");
137 }
138
139 __private_extern__ void
140 _destroyLockList(struct _objc_lock_list *locks)
141 {
142 // fixme complain about any still-held locks?
143 if (locks) _free_internal(locks);
144 }
145
146
147 /***********************************************************************
148 * Mutex checking
149 **********************************************************************/
150
151 __private_extern__ int
152 _mutex_lock_debug(mutex_t *lock, const char *name)
153 {
154 _objc_lock_list *locks = getLocks(YES);
155
156 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
157 if (hasLock(locks, lock, MUTEX)) {
158 _objc_fatal("deadlock: relocking mutex %s\n", name+1);
159 }
160 setLock(locks, lock, MUTEX);
161 }
162
163 return _mutex_lock_nodebug(lock);
164 }
165
166 __private_extern__ int
167 _mutex_try_lock_debug(mutex_t *lock, const char *name)
168 {
169 _objc_lock_list *locks = getLocks(YES);
170
171 // attempting to relock in try_lock is OK
172 int result = _mutex_try_lock_nodebug(lock);
173
174 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
175 if (result) {
176 setLock(locks, lock, MUTEX);
177 }
178 }
179 return result;
180 }
181
182 __private_extern__ int
183 _mutex_unlock_debug(mutex_t *lock, const char *name)
184 {
185 _objc_lock_list *locks = getLocks(NO);
186
187 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
188 if (!hasLock(locks, lock, MUTEX)) {
189 _objc_fatal("unlocking unowned mutex %s\n", name+1);
190 }
191 clearLock(locks, lock, MUTEX);
192 }
193
194 return _mutex_unlock_nodebug(lock);
195 }
196
197 __private_extern__ void
198 _mutex_assert_locked_debug(mutex_t *lock, const char *name)
199 {
200 _objc_lock_list *locks = getLocks(NO);
201
202 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
203 if (!hasLock(locks, lock, MUTEX)) {
204 _objc_fatal("mutex %s incorrectly not held\n",name+1);
205 }
206 }
207 }
208
209
210 __private_extern__ void
211 _mutex_assert_unlocked_debug(mutex_t *lock, const char *name)
212 {
213 _objc_lock_list *locks = getLocks(NO);
214
215 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
216 if (hasLock(locks, lock, MUTEX)) {
217 _objc_fatal("mutex %s incorrectly held\n", name+1);
218 }
219 }
220 }
221
222
223 /***********************************************************************
224 * Recursive mutex checking
225 **********************************************************************/
226
227 __private_extern__ int
228 _recursive_mutex_lock_debug(recursive_mutex_t *lock, const char *name)
229 {
230 _objc_lock_list *locks = getLocks(YES);
231
232 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
233 setLock(locks, lock, RECURSIVE);
234 }
235
236 return _recursive_mutex_lock_nodebug(lock);
237 }
238
239 __private_extern__ int
240 _recursive_mutex_try_lock_debug(recursive_mutex_t *lock, const char *name)
241 {
242 _objc_lock_list *locks = getLocks(YES);
243
244 int result = _recursive_mutex_try_lock_nodebug(lock);
245
246 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
247 if (result) {
248 setLock(locks, lock, RECURSIVE);
249 }
250 }
251 return result;
252 }
253
254 __private_extern__ int
255 _recursive_mutex_unlock_debug(recursive_mutex_t *lock, const char *name)
256 {
257 _objc_lock_list *locks = getLocks(NO);
258
259 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
260 if (!hasLock(locks, lock, RECURSIVE)) {
261 _objc_fatal("unlocking unowned recursive mutex %s\n", name+1);
262 }
263 clearLock(locks, lock, RECURSIVE);
264 }
265
266 return _recursive_mutex_unlock_nodebug(lock);
267 }
268
269 __private_extern__ void
270 _recursive_mutex_assert_locked_debug(recursive_mutex_t *lock, const char *name)
271 {
272 _objc_lock_list *locks = getLocks(NO);
273
274 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
275 if (!hasLock(locks, lock, RECURSIVE)) {
276 _objc_fatal("recursive mutex %s incorrectly not held\n",name+1);
277 }
278 }
279 }
280
281
282 __private_extern__ void
283 _recursive_mutex_assert_unlocked_debug(recursive_mutex_t *lock, const char *name)
284 {
285 _objc_lock_list *locks = getLocks(NO);
286
287 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
288 if (hasLock(locks, lock, RECURSIVE)) {
289 _objc_fatal("recursive mutex %s incorrectly held\n", name+1);
290 }
291 }
292 }
293
294
295 /***********************************************************************
296 * Monitor checking
297 **********************************************************************/
298
299 __private_extern__ int
300 _monitor_enter_debug(monitor_t *lock, const char *name)
301 {
302 _objc_lock_list *locks = getLocks(YES);
303
304 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
305 if (hasLock(locks, lock, MONITOR)) {
306 _objc_fatal("deadlock: relocking monitor %s\n", name+1);
307 }
308 setLock(locks, lock, MONITOR);
309 }
310
311 return _monitor_enter_nodebug(lock);
312 }
313
314 __private_extern__ int
315 _monitor_exit_debug(monitor_t *lock, const char *name)
316 {
317 _objc_lock_list *locks = getLocks(NO);
318
319 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
320 if (!hasLock(locks, lock, MONITOR)) {
321 _objc_fatal("unlocking unowned monitor%s\n", name+1);
322 }
323 clearLock(locks, lock, MONITOR);
324 }
325
326 return _monitor_exit_nodebug(lock);
327 }
328
329 __private_extern__ int
330 _monitor_wait_debug(monitor_t *lock, const char *name)
331 {
332 _objc_lock_list *locks = getLocks(NO);
333
334 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
335 if (!hasLock(locks, lock, MONITOR)) {
336 _objc_fatal("waiting in unowned monitor%s\n", name+1);
337 }
338 }
339
340 return _monitor_wait_nodebug(lock);
341 }
342
343 __private_extern__ void
344 _monitor_assert_locked_debug(monitor_t *lock, const char *name)
345 {
346 _objc_lock_list *locks = getLocks(NO);
347
348 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
349 if (!hasLock(locks, lock, MONITOR)) {
350 _objc_fatal("monitor %s incorrectly not held\n",name+1);
351 }
352 }
353 }
354
355 __private_extern__ void
356 _monitor_assert_unlocked_debug(monitor_t *lock, const char *name)
357 {
358 _objc_lock_list *locks = getLocks(NO);
359
360 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
361 if (hasLock(locks, lock, MONITOR)) {
362 _objc_fatal("monitor %s incorrectly held\n", name+1);
363 }
364 }
365 }
366
367
368 /***********************************************************************
369 * rwlock checking
370 **********************************************************************/
371
372 __private_extern__ void
373 _rwlock_read_debug(rwlock_t *lock, const char *name)
374 {
375 _objc_lock_list *locks = getLocks(YES);
376
377 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
378 if (hasLock(locks, lock, RDLOCK)) {
379 // Recursive rwlock read is bad (may deadlock vs pending writer)
380 _objc_fatal("recursive rwlock read %s\n", name+1);
381 }
382 if (hasLock(locks, lock, WRLOCK)) {
383 _objc_fatal("deadlock: read after write for rwlock %s\n", name+1);
384 }
385 setLock(locks, lock, RDLOCK);
386 }
387
388 _rwlock_read_nodebug(lock);
389 }
390
391 __private_extern__ int
392 _rwlock_try_read_debug(rwlock_t *lock, const char *name)
393 {
394 _objc_lock_list *locks = getLocks(YES);
395
396 // try-read when already reading is OK (won't deadlock against writer)
397 // try-read when already writing is OK (will fail)
398 int result = _rwlock_try_read_nodebug(lock);
399
400 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
401 if (result) {
402 setLock(locks, lock, RDLOCK);
403 }
404 }
405 return result;
406 }
407
408 __private_extern__ void
409 _rwlock_unlock_read_debug(rwlock_t *lock, const char *name)
410 {
411 _objc_lock_list *locks = getLocks(NO);
412
413 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
414 if (!hasLock(locks, lock, RDLOCK)) {
415 _objc_fatal("un-reading unowned rwlock %s\n", name+1);
416 }
417 clearLock(locks, lock, RDLOCK);
418 }
419
420 _rwlock_unlock_read_nodebug(lock);
421 }
422
423 __private_extern__ void
424 _rwlock_write_debug(rwlock_t *lock, const char *name)
425 {
426 _objc_lock_list *locks = getLocks(YES);
427
428 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
429 if (hasLock(locks, lock, RDLOCK)) {
430 // Lock promotion not allowed (may deadlock)
431 _objc_fatal("deadlock: write after read for rwlock %s\n", name+1);
432 }
433 if (hasLock(locks, lock, WRLOCK)) {
434 _objc_fatal("recursive rwlock write %s\n", name+1);
435 }
436 setLock(locks, lock, WRLOCK);
437 }
438
439 _rwlock_write_nodebug(lock);
440 }
441
442
443 __private_extern__ int
444 _rwlock_try_write_debug(rwlock_t *lock, const char *name)
445 {
446 _objc_lock_list *locks = getLocks(YES);
447
448 // try-write when already reading is OK (will fail)
449 // try-write when already writing is OK (will fail)
450 int result = _rwlock_try_write_nodebug(lock);
451
452 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
453 if (result) {
454 setLock(locks, lock, WRLOCK);
455 }
456 }
457 return result;
458 }
459
460 __private_extern__ void
461 _rwlock_unlock_write_debug(rwlock_t *lock, const char *name)
462 {
463 _objc_lock_list *locks = getLocks(NO);
464
465 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
466 if (!hasLock(locks, lock, WRLOCK)) {
467 _objc_fatal("un-writing unowned rwlock %s\n", name+1);
468 }
469 clearLock(locks, lock, WRLOCK);
470 }
471
472 _rwlock_unlock_write_nodebug(lock);
473 }
474
475
476 __private_extern__ void
477 _rwlock_assert_reading_debug(rwlock_t *lock, const char *name)
478 {
479 _objc_lock_list *locks = getLocks(NO);
480
481 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
482 if (!hasLock(locks, lock, RDLOCK)) {
483 _objc_fatal("rwlock %s incorrectly not reading\n", name+1);
484 }
485 }
486 }
487
488 __private_extern__ void
489 _rwlock_assert_writing_debug(rwlock_t *lock, const char *name)
490 {
491 _objc_lock_list *locks = getLocks(NO);
492
493 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
494 if (!hasLock(locks, lock, WRLOCK)) {
495 _objc_fatal("rwlock %s incorrectly not writing\n", name+1);
496 }
497 }
498 }
499
500 __private_extern__ void
501 _rwlock_assert_locked_debug(rwlock_t *lock, const char *name)
502 {
503 _objc_lock_list *locks = getLocks(NO);
504
505 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
506 if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) {
507 _objc_fatal("rwlock %s incorrectly neither reading nor writing\n",
508 name+1);
509 }
510 }
511 }
512
513 __private_extern__ void
514 _rwlock_assert_unlocked_debug(rwlock_t *lock, const char *name)
515 {
516 _objc_lock_list *locks = getLocks(NO);
517
518 if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
519 if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) {
520 _objc_fatal("rwlock %s incorrectly not unlocked\n", name+1);
521 }
522 }
523 }
524
525
526 #endif