]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-lockdebug.mm
objc4-706.tar.gz
[apple/objc4.git] / runtime / objc-lockdebug.mm
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-lock.m
26 * Error-checking locks for debugging.
27 **********************************************************************/
28
29 #include "objc-private.h"
30
31 #if DEBUG && !TARGET_OS_WIN32
32
33 /***********************************************************************
34 * Recording - per-thread list of mutexes and monitors held
35 **********************************************************************/
36
37 typedef struct {
38 void *l; // the lock itself
39 int k; // the kind of lock it is (MUTEX, MONITOR, etc)
40 int i; // the lock's nest count
41 } lockcount;
42
43 #define MUTEX 1
44 #define MONITOR 2
45 #define RDLOCK 3
46 #define WRLOCK 4
47 #define RECURSIVE 5
48
49 typedef struct _objc_lock_list {
50 int allocated;
51 int used;
52 lockcount list[0];
53 } _objc_lock_list;
54
55 static tls_key_t lock_tls;
56
57 static void
58 destroyLocks(void *value)
59 {
60 _objc_lock_list *locks = (_objc_lock_list *)value;
61 // fixme complain about any still-held locks?
62 if (locks) free(locks);
63 }
64
65 static struct _objc_lock_list *
66 getLocks(BOOL create)
67 {
68 _objc_lock_list *locks;
69
70 // Use a dedicated tls key to prevent differences vs non-debug in
71 // usage of objc's other tls keys (required for some unit tests).
72 INIT_ONCE_PTR(lock_tls, tls_create(&destroyLocks), (void)0);
73
74 locks = (_objc_lock_list *)tls_get(lock_tls);
75 if (!locks) {
76 if (!create) {
77 return NULL;
78 } else {
79 locks = (_objc_lock_list *)calloc(1, sizeof(_objc_lock_list) + sizeof(lockcount) * 16);
80 locks->allocated = 16;
81 locks->used = 0;
82 tls_set(lock_tls, locks);
83 }
84 }
85
86 if (locks->allocated == locks->used) {
87 if (!create) {
88 return locks;
89 } else {
90 _objc_lock_list *oldlocks = locks;
91 locks = (_objc_lock_list *)calloc(1, sizeof(_objc_lock_list) + 2 * oldlocks->used * sizeof(lockcount));
92 locks->used = oldlocks->used;
93 locks->allocated = oldlocks->used * 2;
94 memcpy(locks->list, oldlocks->list, locks->used * sizeof(lockcount));
95 tls_set(lock_tls, locks);
96 free(oldlocks);
97 }
98 }
99
100 return locks;
101 }
102
103 static BOOL
104 hasLock(_objc_lock_list *locks, void *lock, int kind)
105 {
106 int i;
107 if (!locks) return NO;
108
109 for (i = 0; i < locks->used; i++) {
110 if (locks->list[i].l == lock && locks->list[i].k == kind) return YES;
111 }
112 return NO;
113 }
114
115
116 static void
117 setLock(_objc_lock_list *locks, void *lock, int kind)
118 {
119 int i;
120 for (i = 0; i < locks->used; i++) {
121 if (locks->list[i].l == lock && locks->list[i].k == kind) {
122 locks->list[i].i++;
123 return;
124 }
125 }
126
127 locks->list[locks->used].l = lock;
128 locks->list[locks->used].i = 1;
129 locks->list[locks->used].k = kind;
130 locks->used++;
131 }
132
133 static void
134 clearLock(_objc_lock_list *locks, void *lock, int kind)
135 {
136 int i;
137 for (i = 0; i < locks->used; i++) {
138 if (locks->list[i].l == lock && locks->list[i].k == kind) {
139 if (--locks->list[i].i == 0) {
140 locks->list[i].l = NULL;
141 locks->list[i] = locks->list[--locks->used];
142 }
143 return;
144 }
145 }
146
147 _objc_fatal("lock not found!");
148 }
149
150
151 /***********************************************************************
152 * Mutex checking
153 **********************************************************************/
154
155 #if !TARGET_OS_SIMULATOR
156 // Non-simulator platforms have lock debugging built into os_unfair_lock.
157
158
159 void
160 lockdebug_mutex_lock(mutex_t *lock)
161 {
162 // empty
163 }
164
165 void
166 lockdebug_mutex_unlock(mutex_t *lock)
167 {
168 // empty
169 }
170
171 void
172 lockdebug_mutex_assert_locked(mutex_t *lock)
173 {
174 os_unfair_lock_assert_owner((os_unfair_lock *)lock);
175 }
176
177 void
178 lockdebug_mutex_assert_unlocked(mutex_t *lock)
179 {
180 os_unfair_lock_assert_not_owner((os_unfair_lock *)lock);
181 }
182
183
184 // !TARGET_OS_SIMULATOR
185 #else
186 // TARGET_OS_SIMULATOR
187
188 // Simulator platforms have no built-in lock debugging in os_unfair_lock.
189
190
191 void
192 lockdebug_mutex_lock(mutex_t *lock)
193 {
194 _objc_lock_list *locks = getLocks(YES);
195
196 if (hasLock(locks, lock, MUTEX)) {
197 _objc_fatal("deadlock: relocking mutex");
198 }
199 setLock(locks, lock, MUTEX);
200 }
201
202 // try-lock success is the only case with lockdebug effects.
203 // try-lock when already locked is OK (will fail)
204 // try-lock failure does nothing.
205 void
206 lockdebug_mutex_try_lock_success(mutex_t *lock)
207 {
208 _objc_lock_list *locks = getLocks(YES);
209 setLock(locks, lock, MUTEX);
210 }
211
212 void
213 lockdebug_mutex_unlock(mutex_t *lock)
214 {
215 _objc_lock_list *locks = getLocks(NO);
216
217 if (!hasLock(locks, lock, MUTEX)) {
218 _objc_fatal("unlocking unowned mutex");
219 }
220 clearLock(locks, lock, MUTEX);
221 }
222
223
224 void
225 lockdebug_mutex_assert_locked(mutex_t *lock)
226 {
227 _objc_lock_list *locks = getLocks(NO);
228
229 if (!hasLock(locks, lock, MUTEX)) {
230 _objc_fatal("mutex incorrectly not locked");
231 }
232 }
233
234 void
235 lockdebug_mutex_assert_unlocked(mutex_t *lock)
236 {
237 _objc_lock_list *locks = getLocks(NO);
238
239 if (hasLock(locks, lock, MUTEX)) {
240 _objc_fatal("mutex incorrectly locked");
241 }
242 }
243
244
245 // TARGET_OS_SIMULATOR
246 #endif
247
248 /***********************************************************************
249 * Recursive mutex checking
250 **********************************************************************/
251
252 void
253 lockdebug_recursive_mutex_lock(recursive_mutex_tt<true> *lock)
254 {
255 _objc_lock_list *locks = getLocks(YES);
256 setLock(locks, lock, RECURSIVE);
257 }
258
259 void
260 lockdebug_recursive_mutex_unlock(recursive_mutex_tt<true> *lock)
261 {
262 _objc_lock_list *locks = getLocks(NO);
263
264 if (!hasLock(locks, lock, RECURSIVE)) {
265 _objc_fatal("unlocking unowned recursive mutex");
266 }
267 clearLock(locks, lock, RECURSIVE);
268 }
269
270
271 void
272 lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt<true> *lock)
273 {
274 _objc_lock_list *locks = getLocks(NO);
275
276 if (!hasLock(locks, lock, RECURSIVE)) {
277 _objc_fatal("recursive mutex incorrectly not locked");
278 }
279 }
280
281 void
282 lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt<true> *lock)
283 {
284 _objc_lock_list *locks = getLocks(NO);
285
286 if (hasLock(locks, lock, RECURSIVE)) {
287 _objc_fatal("recursive mutex incorrectly locked");
288 }
289 }
290
291
292 /***********************************************************************
293 * Monitor checking
294 **********************************************************************/
295
296 void
297 lockdebug_monitor_enter(monitor_t *lock)
298 {
299 _objc_lock_list *locks = getLocks(YES);
300
301 if (hasLock(locks, lock, MONITOR)) {
302 _objc_fatal("deadlock: relocking monitor");
303 }
304 setLock(locks, lock, MONITOR);
305 }
306
307 void
308 lockdebug_monitor_leave(monitor_t *lock)
309 {
310 _objc_lock_list *locks = getLocks(NO);
311
312 if (!hasLock(locks, lock, MONITOR)) {
313 _objc_fatal("unlocking unowned monitor");
314 }
315 clearLock(locks, lock, MONITOR);
316 }
317
318 void
319 lockdebug_monitor_wait(monitor_t *lock)
320 {
321 _objc_lock_list *locks = getLocks(NO);
322
323 if (!hasLock(locks, lock, MONITOR)) {
324 _objc_fatal("waiting in unowned monitor");
325 }
326 }
327
328
329 void
330 lockdebug_monitor_assert_locked(monitor_t *lock)
331 {
332 _objc_lock_list *locks = getLocks(NO);
333
334 if (!hasLock(locks, lock, MONITOR)) {
335 _objc_fatal("monitor incorrectly not locked");
336 }
337 }
338
339 void
340 lockdebug_monitor_assert_unlocked(monitor_t *lock)
341 {
342 _objc_lock_list *locks = getLocks(NO);
343
344 if (hasLock(locks, lock, MONITOR)) {
345 _objc_fatal("monitor incorrectly held");
346 }
347 }
348
349
350 /***********************************************************************
351 * rwlock checking
352 **********************************************************************/
353
354 void
355 lockdebug_rwlock_read(rwlock_tt<true> *lock)
356 {
357 _objc_lock_list *locks = getLocks(YES);
358
359 if (hasLock(locks, lock, RDLOCK)) {
360 // Recursive rwlock read is bad (may deadlock vs pending writer)
361 _objc_fatal("recursive rwlock read");
362 }
363 if (hasLock(locks, lock, WRLOCK)) {
364 _objc_fatal("deadlock: read after write for rwlock");
365 }
366 setLock(locks, lock, RDLOCK);
367 }
368
369 // try-read success is the only case with lockdebug effects.
370 // try-read when already reading is OK (won't deadlock)
371 // try-read when already writing is OK (will fail)
372 // try-read failure does nothing.
373 void
374 lockdebug_rwlock_try_read_success(rwlock_tt<true> *lock)
375 {
376 _objc_lock_list *locks = getLocks(YES);
377 setLock(locks, lock, RDLOCK);
378 }
379
380 void
381 lockdebug_rwlock_unlock_read(rwlock_tt<true> *lock)
382 {
383 _objc_lock_list *locks = getLocks(NO);
384
385 if (!hasLock(locks, lock, RDLOCK)) {
386 _objc_fatal("un-reading unowned rwlock");
387 }
388 clearLock(locks, lock, RDLOCK);
389 }
390
391
392 void
393 lockdebug_rwlock_write(rwlock_tt<true> *lock)
394 {
395 _objc_lock_list *locks = getLocks(YES);
396
397 if (hasLock(locks, lock, RDLOCK)) {
398 // Lock promotion not allowed (may deadlock)
399 _objc_fatal("deadlock: write after read for rwlock");
400 }
401 if (hasLock(locks, lock, WRLOCK)) {
402 _objc_fatal("recursive rwlock write");
403 }
404 setLock(locks, lock, WRLOCK);
405 }
406
407 // try-write success is the only case with lockdebug effects.
408 // try-write when already reading is OK (will fail)
409 // try-write when already writing is OK (will fail)
410 // try-write failure does nothing.
411 void
412 lockdebug_rwlock_try_write_success(rwlock_tt<true> *lock)
413 {
414 _objc_lock_list *locks = getLocks(YES);
415 setLock(locks, lock, WRLOCK);
416 }
417
418 void
419 lockdebug_rwlock_unlock_write(rwlock_tt<true> *lock)
420 {
421 _objc_lock_list *locks = getLocks(NO);
422
423 if (!hasLock(locks, lock, WRLOCK)) {
424 _objc_fatal("un-writing unowned rwlock");
425 }
426 clearLock(locks, lock, WRLOCK);
427 }
428
429
430 void
431 lockdebug_rwlock_assert_reading(rwlock_tt<true> *lock)
432 {
433 _objc_lock_list *locks = getLocks(NO);
434
435 if (!hasLock(locks, lock, RDLOCK)) {
436 _objc_fatal("rwlock incorrectly not reading");
437 }
438 }
439
440 void
441 lockdebug_rwlock_assert_writing(rwlock_tt<true> *lock)
442 {
443 _objc_lock_list *locks = getLocks(NO);
444
445 if (!hasLock(locks, lock, WRLOCK)) {
446 _objc_fatal("rwlock incorrectly not writing");
447 }
448 }
449
450 void
451 lockdebug_rwlock_assert_locked(rwlock_tt<true> *lock)
452 {
453 _objc_lock_list *locks = getLocks(NO);
454
455 if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) {
456 _objc_fatal("rwlock incorrectly neither reading nor writing");
457 }
458 }
459
460 void
461 lockdebug_rwlock_assert_unlocked(rwlock_tt<true> *lock)
462 {
463 _objc_lock_list *locks = getLocks(NO);
464
465 if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) {
466 _objc_fatal("rwlock incorrectly not unlocked");
467 }
468 }
469
470
471 #endif