]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-sync.mm
objc4-551.1.tar.gz
[apple/objc4.git] / runtime / objc-sync.mm
1 /*
2 * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include "objc-private.h"
25 #include "objc-sync.h"
26
27 //
28 // Allocate a lock only when needed. Since few locks are needed at any point
29 // in time, keep them on a single list.
30 //
31
32
33 typedef struct SyncData {
34 struct SyncData* nextData;
35 id object;
36 int threadCount; // number of THREADS using this block
37 recursive_mutex_t mutex;
38 } SyncData;
39
40 typedef struct {
41 SyncData *data;
42 unsigned int lockCount; // number of times THIS THREAD locked this block
43 } SyncCacheItem;
44
45 typedef struct SyncCache {
46 unsigned int allocated;
47 unsigned int used;
48 SyncCacheItem list[0];
49 } SyncCache;
50
51 /*
52 Fast cache: two fixed pthread keys store a single SyncCacheItem.
53 This avoids malloc of the SyncCache for threads that only synchronize
54 a single object at a time.
55 SYNC_DATA_DIRECT_KEY == SyncCacheItem.data
56 SYNC_COUNT_DIRECT_KEY == SyncCacheItem.lockCount
57 */
58
59 typedef struct {
60 SyncData *data;
61 spinlock_t lock;
62
63 char align[64 - sizeof (spinlock_t) - sizeof (SyncData *)];
64 } SyncList __attribute__((aligned(64)));
65 // aligned to put locks on separate cache lines
66
67 // Use multiple parallel lists to decrease contention among unrelated objects.
68 #define COUNT 16
69 #define HASH(obj) ((((uintptr_t)(obj)) >> 5) & (COUNT - 1))
70 #define LOCK_FOR_OBJ(obj) sDataLists[HASH(obj)].lock
71 #define LIST_FOR_OBJ(obj) sDataLists[HASH(obj)].data
72 static SyncList sDataLists[COUNT];
73
74
75 enum usage { ACQUIRE, RELEASE, CHECK };
76
77 static SyncCache *fetch_cache(BOOL create)
78 {
79 _objc_pthread_data *data;
80
81 data = _objc_fetch_pthread_data(create);
82 if (!data) return NULL;
83
84 if (!data->syncCache) {
85 if (!create) {
86 return NULL;
87 } else {
88 int count = 4;
89 data->syncCache = (SyncCache *)
90 calloc(1, sizeof(SyncCache) + count*sizeof(SyncCacheItem));
91 data->syncCache->allocated = count;
92 }
93 }
94
95 // Make sure there's at least one open slot in the list.
96 if (data->syncCache->allocated == data->syncCache->used) {
97 data->syncCache->allocated *= 2;
98 data->syncCache = (SyncCache *)
99 realloc(data->syncCache, sizeof(SyncCache)
100 + data->syncCache->allocated * sizeof(SyncCacheItem));
101 }
102
103 return data->syncCache;
104 }
105
106
107 void _destroySyncCache(struct SyncCache *cache)
108 {
109 if (cache) free(cache);
110 }
111
112
113 static SyncData* id2data(id object, enum usage why)
114 {
115 spinlock_t *lockp = &LOCK_FOR_OBJ(object);
116 SyncData **listp = &LIST_FOR_OBJ(object);
117 SyncData* result = NULL;
118
119 #if SUPPORT_DIRECT_THREAD_KEYS
120 // Check per-thread single-entry fast cache for matching object
121 BOOL fastCacheOccupied = NO;
122 SyncData *data = (SyncData *)tls_get_direct(SYNC_DATA_DIRECT_KEY);
123 if (data) {
124 fastCacheOccupied = YES;
125
126 if (data->object == object) {
127 // Found a match in fast cache.
128 uintptr_t lockCount;
129
130 result = data;
131 lockCount = (uintptr_t)tls_get_direct(SYNC_COUNT_DIRECT_KEY);
132 require_action_string(result->threadCount > 0, fastcache_done,
133 result = NULL, "id2data fastcache is buggy");
134 require_action_string(lockCount > 0, fastcache_done,
135 result = NULL, "id2data fastcache is buggy");
136
137 switch(why) {
138 case ACQUIRE: {
139 lockCount++;
140 tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)lockCount);
141 break;
142 }
143 case RELEASE:
144 lockCount--;
145 tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)lockCount);
146 if (lockCount == 0) {
147 // remove from fast cache
148 tls_set_direct(SYNC_DATA_DIRECT_KEY, NULL);
149 // atomic because may collide with concurrent ACQUIRE
150 OSAtomicDecrement32Barrier(&result->threadCount);
151 }
152 break;
153 case CHECK:
154 // do nothing
155 break;
156 }
157
158 fastcache_done:
159 return result;
160 }
161 }
162 #endif
163
164 // Check per-thread cache of already-owned locks for matching object
165 SyncCache *cache = fetch_cache(NO);
166 if (cache) {
167 unsigned int i;
168 for (i = 0; i < cache->used; i++) {
169 SyncCacheItem *item = &cache->list[i];
170 if (item->data->object != object) continue;
171
172 // Found a match.
173 result = item->data;
174 require_action_string(result->threadCount > 0, cache_done,
175 result = NULL, "id2data cache is buggy");
176 require_action_string(item->lockCount > 0, cache_done,
177 result = NULL, "id2data cache is buggy");
178
179 switch(why) {
180 case ACQUIRE:
181 item->lockCount++;
182 break;
183 case RELEASE:
184 item->lockCount--;
185 if (item->lockCount == 0) {
186 // remove from per-thread cache
187 cache->list[i] = cache->list[--cache->used];
188 // atomic because may collide with concurrent ACQUIRE
189 OSAtomicDecrement32Barrier(&result->threadCount);
190 }
191 break;
192 case CHECK:
193 // do nothing
194 break;
195 }
196
197 cache_done:
198 return result;
199 }
200 }
201
202 // Thread cache didn't find anything.
203 // Walk in-use list looking for matching object
204 // Spinlock prevents multiple threads from creating multiple
205 // locks for the same new object.
206 // We could keep the nodes in some hash table if we find that there are
207 // more than 20 or so distinct locks active, but we don't do that now.
208
209 spinlock_lock(lockp);
210
211 {
212 SyncData* p;
213 SyncData* firstUnused = NULL;
214 for (p = *listp; p != NULL; p = p->nextData) {
215 if ( p->object == object ) {
216 result = p;
217 // atomic because may collide with concurrent RELEASE
218 OSAtomicIncrement32Barrier(&result->threadCount);
219 goto done;
220 }
221 if ( (firstUnused == NULL) && (p->threadCount == 0) )
222 firstUnused = p;
223 }
224
225 // no SyncData currently associated with object
226 if ( (why == RELEASE) || (why == CHECK) )
227 goto done;
228
229 // an unused one was found, use it
230 if ( firstUnused != NULL ) {
231 result = firstUnused;
232 result->object = object;
233 result->threadCount = 1;
234 goto done;
235 }
236 }
237
238 // malloc a new SyncData and add to list.
239 // XXX calling malloc with a global lock held is bad practice,
240 // might be worth releasing the lock, mallocing, and searching again.
241 // But since we never free these guys we won't be stuck in malloc very often.
242 result = (SyncData*)calloc(sizeof(SyncData), 1);
243 result->object = object;
244 result->threadCount = 1;
245 recursive_mutex_init(&result->mutex);
246 result->nextData = *listp;
247 *listp = result;
248
249 done:
250 spinlock_unlock(lockp);
251 if (result) {
252 // Only new ACQUIRE should get here.
253 // All RELEASE and CHECK and recursive ACQUIRE are
254 // handled by the per-thread caches above.
255
256 require_string(result != NULL, really_done, "id2data is buggy");
257 require_action_string(why == ACQUIRE, really_done,
258 result = NULL, "id2data is buggy");
259 require_action_string(result->object == object, really_done,
260 result = NULL, "id2data is buggy");
261
262 #if SUPPORT_DIRECT_THREAD_KEYS
263 if (!fastCacheOccupied) {
264 // Save in fast thread cache
265 tls_set_direct(SYNC_DATA_DIRECT_KEY, result);
266 tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)1);
267 } else
268 #endif
269 {
270 // Save in thread cache
271 if (!cache) cache = fetch_cache(YES);
272 cache->list[cache->used].data = result;
273 cache->list[cache->used].lockCount = 1;
274 cache->used++;
275 }
276 }
277
278 really_done:
279 return result;
280 }
281
282
283 BREAKPOINT_FUNCTION(
284 void objc_sync_nil(void)
285 );
286
287
288 // Begin synchronizing on 'obj'.
289 // Allocates recursive mutex associated with 'obj' if needed.
290 // Returns OBJC_SYNC_SUCCESS once lock is acquired.
291 int objc_sync_enter(id obj)
292 {
293 int result = OBJC_SYNC_SUCCESS;
294
295 if (obj) {
296 SyncData* data = id2data(obj, ACQUIRE);
297 require_action_string(data != NULL, done, result = OBJC_SYNC_NOT_INITIALIZED, "id2data failed");
298
299 result = recursive_mutex_lock(&data->mutex);
300 require_noerr_string(result, done, "mutex_lock failed");
301 } else {
302 // @synchronized(nil) does nothing
303 if (DebugNilSync) {
304 _objc_inform("NIL SYNC DEBUG: @synchronized(nil); set a breakpoint on objc_sync_nil to debug");
305 }
306 objc_sync_nil();
307 }
308
309 done:
310 return result;
311 }
312
313
314 // End synchronizing on 'obj'.
315 // Returns OBJC_SYNC_SUCCESS or OBJC_SYNC_NOT_OWNING_THREAD_ERROR
316 int objc_sync_exit(id obj)
317 {
318 int result = OBJC_SYNC_SUCCESS;
319
320 if (obj) {
321 SyncData* data = id2data(obj, RELEASE);
322 require_action_string(data != NULL, done, result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR, "id2data failed");
323
324 result = recursive_mutex_unlock(&data->mutex);
325 require_noerr_string(result, done, "mutex_unlock failed");
326 } else {
327 // @synchronized(nil) does nothing
328 }
329
330 done:
331 if ( result == RECURSIVE_MUTEX_NOT_LOCKED )
332 result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR;
333
334 return result;
335 }
336