2 * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include "objc-private.h"
25 #include "objc-sync.h"
28 // Allocate a lock only when needed. Since few locks are needed at any point
29 // in time, keep them on a single list.
33 typedef struct SyncData {
34 struct SyncData* nextData;
36 int threadCount; // number of THREADS using this block
37 recursive_mutex_t mutex;
42 unsigned int lockCount; // number of times THIS THREAD locked this block
45 typedef struct SyncCache {
46 unsigned int allocated;
48 SyncCacheItem list[0];
52 Fast cache: two fixed pthread keys store a single SyncCacheItem.
53 This avoids malloc of the SyncCache for threads that only synchronize
54 a single object at a time.
55 SYNC_DATA_DIRECT_KEY == SyncCacheItem.data
56 SYNC_COUNT_DIRECT_KEY == SyncCacheItem.lockCount
63 char align[64 - sizeof (OSSpinLock) - sizeof (SyncData *)];
64 } SyncList __attribute__((aligned(64)));
65 // aligned to put locks on separate cache lines
67 // Use multiple parallel lists to decrease contention among unrelated objects.
69 #define HASH(obj) ((((uintptr_t)(obj)) >> 5) & (COUNT - 1))
70 #define LOCK_FOR_OBJ(obj) sDataLists[HASH(obj)].lock
71 #define LIST_FOR_OBJ(obj) sDataLists[HASH(obj)].data
72 static SyncList sDataLists[COUNT];
75 enum usage { ACQUIRE, RELEASE, CHECK };
77 static SyncCache *fetch_cache(BOOL create)
79 _objc_pthread_data *data;
81 data = _objc_fetch_pthread_data(create);
82 if (!data) return NULL;
84 if (!data->syncCache) {
89 data->syncCache = (SyncCache *)
90 calloc(1, sizeof(SyncCache) + count*sizeof(SyncCacheItem));
91 data->syncCache->allocated = count;
95 // Make sure there's at least one open slot in the list.
96 if (data->syncCache->allocated == data->syncCache->used) {
97 data->syncCache->allocated *= 2;
98 data->syncCache = (SyncCache *)
99 realloc(data->syncCache, sizeof(SyncCache)
100 + data->syncCache->allocated * sizeof(SyncCacheItem));
103 return data->syncCache;
107 void _destroySyncCache(struct SyncCache *cache)
109 if (cache) free(cache);
113 static SyncData* id2data(id object, enum usage why)
115 OSSpinLock *lockp = &LOCK_FOR_OBJ(object);
116 SyncData **listp = &LIST_FOR_OBJ(object);
117 SyncData* result = NULL;
119 #if SUPPORT_DIRECT_THREAD_KEYS
120 // Check per-thread single-entry fast cache for matching object
121 BOOL fastCacheOccupied = NO;
122 SyncData *data = (SyncData *)tls_get_direct(SYNC_DATA_DIRECT_KEY);
124 fastCacheOccupied = YES;
126 if (data->object == object) {
127 // Found a match in fast cache.
131 lockCount = (uintptr_t)tls_get_direct(SYNC_COUNT_DIRECT_KEY);
132 require_action_string(result->threadCount > 0, fastcache_done,
133 result = NULL, "id2data fastcache is buggy");
134 require_action_string(lockCount > 0, fastcache_done,
135 result = NULL, "id2data fastcache is buggy");
140 tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)lockCount);
145 tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)lockCount);
146 if (lockCount == 0) {
147 // remove from fast cache
148 tls_set_direct(SYNC_DATA_DIRECT_KEY, NULL);
149 // atomic because may collide with concurrent ACQUIRE
150 OSAtomicDecrement32Barrier(&result->threadCount);
164 // Check per-thread cache of already-owned locks for matching object
165 SyncCache *cache = fetch_cache(NO);
168 for (i = 0; i < cache->used; i++) {
169 SyncCacheItem *item = &cache->list[i];
170 if (item->data->object != object) continue;
174 require_action_string(result->threadCount > 0, cache_done,
175 result = NULL, "id2data cache is buggy");
176 require_action_string(item->lockCount > 0, cache_done,
177 result = NULL, "id2data cache is buggy");
185 if (item->lockCount == 0) {
186 // remove from per-thread cache
187 cache->list[i] = cache->list[--cache->used];
188 // atomic because may collide with concurrent ACQUIRE
189 OSAtomicDecrement32Barrier(&result->threadCount);
202 // Thread cache didn't find anything.
203 // Walk in-use list looking for matching object
204 // Spinlock prevents multiple threads from creating multiple
205 // locks for the same new object.
206 // We could keep the nodes in some hash table if we find that there are
207 // more than 20 or so distinct locks active, but we don't do that now.
209 OSSpinLockLock(lockp);
213 SyncData* firstUnused = NULL;
214 for (p = *listp; p != NULL; p = p->nextData) {
215 if ( p->object == object ) {
217 // atomic because may collide with concurrent RELEASE
218 OSAtomicIncrement32Barrier(&result->threadCount);
221 if ( (firstUnused == NULL) && (p->threadCount == 0) )
225 // no SyncData currently associated with object
226 if ( (why == RELEASE) || (why == CHECK) )
229 // an unused one was found, use it
230 if ( firstUnused != NULL ) {
231 result = firstUnused;
232 result->object = object;
233 result->threadCount = 1;
238 // malloc a new SyncData and add to list.
239 // XXX calling malloc with a global lock held is bad practice,
240 // might be worth releasing the lock, mallocing, and searching again.
241 // But since we never free these guys we won't be stuck in malloc very often.
242 result = (SyncData*)calloc(sizeof(SyncData), 1);
243 result->object = object;
244 result->threadCount = 1;
245 recursive_mutex_init(&result->mutex);
246 result->nextData = *listp;
250 OSSpinLockUnlock(lockp);
252 // Only new ACQUIRE should get here.
253 // All RELEASE and CHECK and recursive ACQUIRE are
254 // handled by the per-thread caches above.
256 require_string(result != NULL, really_done, "id2data is buggy");
257 require_action_string(why == ACQUIRE, really_done,
258 result = NULL, "id2data is buggy");
259 require_action_string(result->object == object, really_done,
260 result = NULL, "id2data is buggy");
262 #if SUPPORT_DIRECT_THREAD_KEYS
263 if (!fastCacheOccupied) {
264 // Save in fast thread cache
265 tls_set_direct(SYNC_DATA_DIRECT_KEY, result);
266 tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)1);
270 // Save in thread cache
271 if (!cache) cache = fetch_cache(YES);
272 cache->list[cache->used].data = result;
273 cache->list[cache->used].lockCount = 1;
284 void objc_sync_nil(void)
288 // Begin synchronizing on 'obj'.
289 // Allocates recursive mutex associated with 'obj' if needed.
290 // Returns OBJC_SYNC_SUCCESS once lock is acquired.
291 int objc_sync_enter(id obj)
293 int result = OBJC_SYNC_SUCCESS;
296 SyncData* data = id2data(obj, ACQUIRE);
297 require_action_string(data != NULL, done, result = OBJC_SYNC_NOT_INITIALIZED, "id2data failed");
299 result = recursive_mutex_lock(&data->mutex);
300 require_noerr_string(result, done, "mutex_lock failed");
302 // @synchronized(nil) does nothing
304 _objc_inform("NIL SYNC DEBUG: @synchronized(nil); set a breakpoint on objc_sync_nil to debug");
314 // End synchronizing on 'obj'.
315 // Returns OBJC_SYNC_SUCCESS or OBJC_SYNC_NOT_OWNING_THREAD_ERROR
316 int objc_sync_exit(id obj)
318 int result = OBJC_SYNC_SUCCESS;
321 SyncData* data = id2data(obj, RELEASE);
322 require_action_string(data != NULL, done, result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR, "id2data failed");
324 result = recursive_mutex_unlock(&data->mutex);
325 require_noerr_string(result, done, "mutex_unlock failed");
327 // @synchronized(nil) does nothing
331 if ( result == RECURSIVE_MUTEX_NOT_LOCKED )
332 result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR;