]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-os.h
objc4-437.1.tar.gz
[apple/objc4.git] / runtime / objc-os.h
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-os.h
26 * OS portability layer.
27 **********************************************************************/
28
29 #ifndef _OBJC_OS_H
30 #define _OBJC_OS_H
31
32 #include <TargetConditionals.h>
33
34 #if TARGET_OS_MAC
35
36 # include <stdio.h>
37 # include <stdlib.h>
38 # include <stdint.h>
39 # include <stdarg.h>
40 # include <string.h>
41 # include <ctype.h>
42 # include <errno.h>
43 # include <dlfcn.h>
44 # include <fcntl.h>
45 # include <assert.h>
46 # include <limits.h>
47 # include <syslog.h>
48 # include <unistd.h>
49 # include <pthread.h>
50 # include <crt_externs.h>
51 # include <AssertMacros.h>
52 # include <Block_private.h>
53 # include <AvailabilityMacros.h>
54 # include <TargetConditionals.h>
55 # include <sys/mman.h>
56 # include <sys/time.h>
57 # include <sys/stat.h>
58 # include <sys/param.h>
59 # include <mach/mach.h>
60 # include <mach-o/dyld.h>
61 # include <mach-o/ldsyms.h>
62 # include <mach-o/loader.h>
63 # include <mach-o/getsect.h>
64 # include <mach-o/dyld_priv.h>
65 # include <malloc/malloc.h>
66 # include <libkern/OSAtomic.h>
67 # include <libkern/OSCacheControl.h>
68 # include <System/pthread_machdep.h>
69 # include "objc-probes.h" // generated dtrace probe definitions.
70
71 # if __cplusplus
72 # include <vector>
73 # include <algorithm>
74 using namespace std;
75 # include <ext/hash_map>
76 using namespace __gnu_cxx;
77 # endif
78
79 #elif TARGET_OS_WIN32
80
81 # define WINVER 0x0501 // target Windows XP and later
82 # define _WIN32_WINNT 0x0501 // target Windows XP and later
83 # define WIN32_LEAN_AND_MEAN
84 // hack: windef.h typedefs BOOL as int
85 # define BOOL WINBOOL
86 # include <windows.h>
87 # undef BOOL
88
89 # include <stdio.h>
90 # include <stdlib.h>
91 # include <stdint.h>
92 # include <stdarg.h>
93 # include <string.h>
94 # include <assert.h>
95 # include <malloc.h>
96 # include <AvailabilityMacros.h>
97
98 # if __cplusplus
99 # include <vector>
100 # include <algorithm>
101 using namespace std;
102 # include <hash_map>
103 using namespace stdext;
104 # define __BEGIN_DECLS extern "C" {
105 # define __END_DECLS }
106 # else
107 # define __BEGIN_DECLS /*empty*/
108 # define __END_DECLS /*empty*/
109 # endif
110
111 # define __private_extern__
112 # define __attribute__(x)
113 # define inline __inline
114
115 /* stub out dtrace probes */
116 # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
117 # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
118
119 #else
120 # error unknown OS
121 #endif
122
123
124 #include <objc/objc.h>
125 #include <objc/objc-api.h>
126
127 __BEGIN_DECLS
128
129 extern void _objc_fatal(const char *fmt, ...) __attribute__((noreturn, format (printf, 1, 2)));
130
131 #define INIT_ONCE_PTR(var, create, delete) \
132 do { \
133 if (var) break; \
134 typeof(var) v = create; \
135 while (!var) { \
136 if (OSAtomicCompareAndSwapPtrBarrier(0, v, (void**)&var)) { \
137 goto done; \
138 } \
139 } \
140 delete; \
141 done:; \
142 } while (0)
143
144 #define INIT_ONCE_32(var, create, delete) \
145 do { \
146 if (var) break; \
147 typeof(var) v = create; \
148 while (!var) { \
149 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
150 goto done; \
151 } \
152 } \
153 delete; \
154 done:; \
155 } while (0)
156
157
158 // Thread keys reserved by libc for our use.
159 // Keys [0..4] are used by autozone.
160 #if defined(__PTK_FRAMEWORK_OBJC_KEY5)
161 # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
162 # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY6)
163 # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY7)
164 // define DIRECT_4_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY8)
165 // define DIRECT_5_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY9)
166 #else
167 # define NO_DIRECT_THREAD_KEYS 1
168 #endif
169
170
171 #if TARGET_OS_WIN32
172
173 // Compiler compatibility
174
175 // OS compatibility
176
177 #define strdup _strdup
178
179 #define issetugid() 0
180
181 #define MIN(x, y) ((x) < (y) ? (x) : (y))
182
183 static __inline void bcopy(const void *src, void *dst, size_t size) { memcpy(dst, src, size); }
184 static __inline void bzero(void *dst, size_t size) { memset(dst, 0, size); }
185
186 int asprintf(char **dstp, const char *format, ...);
187
188 typedef void * malloc_zone_t;
189
190 static __inline malloc_zone_t malloc_default_zone(void) { return (malloc_zone_t)-1; }
191 static __inline void *malloc_zone_malloc(malloc_zone_t z, size_t size) { return malloc(size); }
192 static __inline void *malloc_zone_calloc(malloc_zone_t z, size_t size, size_t count) { return calloc(size, count); }
193 static __inline void *malloc_zone_realloc(malloc_zone_t z, void *p, size_t size) { return realloc(p, size); }
194 static __inline void malloc_zone_free(malloc_zone_t z, void *p) { free(p); }
195 static __inline malloc_zone_t malloc_zone_from_ptr(const void *p) { return (malloc_zone_t)-1; }
196 static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ }
197
198
199 // AssertMacros
200
201 #define require_action_string(cond, dest, act, msg) do { if (!(cond)) { { act; } goto dest; } } while (0)
202 #define require_noerr_string(err, dest, msg) do { if (err) goto dest; } while (0)
203 #define require_string(cond, dest, msg) do { if (!(cond)) goto dest; } while (0)
204
205
206 // OSAtomic
207
208 static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
209 {
210 // fixme barrier is overkill
211 long original = InterlockedCompareExchange(dst, newl, oldl);
212 return (original == oldl);
213 }
214
215 static __inline BOOL OSAtomicCompareAndSwapPtrBarrier(void *oldp, void *newp, void * volatile *dst)
216 {
217 void *original = InterlockedCompareExchangePointer(dst, newp, oldp);
218 return (original == oldp);
219 }
220
221 static __inline BOOL OSAtomicCompareAndSwap32Barrier(int32_t oldl, int32_t newl, int32_t volatile *dst)
222 {
223 long original = InterlockedCompareExchange((volatile long *)dst, newl, oldl);
224 return (original == oldl);
225 }
226
227 static __inline int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst)
228 {
229 return InterlockedDecrement((volatile long *)dst);
230 }
231
232 static __inline int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst)
233 {
234 return InterlockedIncrement((volatile long *)dst);
235 }
236
237
238 // Internal data types
239
240 typedef DWORD objc_thread_t; // thread ID
241 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
242 return t1 == t2;
243 }
244 static __inline objc_thread_t thread_self(void) {
245 return GetCurrentThreadId();
246 }
247
248 typedef struct {
249 DWORD key;
250 void (*dtor)(void *);
251 } tls_key_t;
252 static __inline void tls_create(tls_key_t *k, void (*dtor)(void*)) {
253 // fixme need dtor registry for DllMain to call on thread detach
254 k->key = TlsAlloc();
255 k->dtor = dtor;
256 }
257 static __inline void *tls_get(tls_key_t k) {
258 return TlsGetValue(k.key);
259 }
260 static __inline void tls_set(tls_key_t k, void *value) {
261 TlsSetValue(k.key, value);
262 }
263
264 typedef struct {
265 CRITICAL_SECTION *lock;
266 } mutex_t;
267 #define MUTEX_INITIALIZER {0};
268 extern void mutex_init(mutex_t *m);
269 static __inline int _mutex_lock_nodebug(mutex_t *m) {
270 // fixme error check
271 if (!m->lock) {
272 mutex_init(m);
273 }
274 EnterCriticalSection(m->lock);
275 return 0;
276 }
277 static __inline int _mutex_try_lock_nodebug(mutex_t *m) {
278 // fixme error check
279 if (!m->lock) {
280 mutex_init(m);
281 }
282 return TryEnterCriticalSection(m->lock);
283 }
284 static __inline int _mutex_unlock_nodebug(mutex_t *m) {
285 // fixme error check
286 LeaveCriticalSection(m->lock);
287 return 0;
288 }
289
290
291 typedef mutex_t OSSpinLock;
292 #define OSSpinLockLock(l) mutex_lock(l)
293 #define OSSpinLockUnlock(l) mutex_unlock(l)
294 #define OS_SPINLOCK_INIT MUTEX_INITIALIZER
295
296
297 typedef struct {
298 HANDLE mutex;
299 } recursive_mutex_t;
300 #define RECURSIVE_MUTEX_INITIALIZER {0};
301 #define RECURSIVE_MUTEX_NOT_LOCKED 1
302 extern void recursive_mutex_init(recursive_mutex_t *m);
303 static __inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
304 assert(m->mutex);
305 return WaitForSingleObject(m->mutex, INFINITE);
306 }
307 static __inline int _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
308 assert(m->mutex);
309 return (WAIT_OBJECT_0 == WaitForSingleObject(m->mutex, 0));
310 }
311 static __inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
312 assert(m->mutex);
313 return ReleaseMutex(m->mutex) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED;
314 }
315
316
317 /*
318 typedef HANDLE mutex_t;
319 static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
320 static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
321 static inline int mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
322 static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
323 */
324
325 // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
326 // Vista-only CONDITION_VARIABLE would be better
327 typedef struct {
328 HANDLE mutex;
329 HANDLE waiters; // semaphore for those in cond_wait()
330 HANDLE waitersDone; // auto-reset event after everyone gets a broadcast
331 CRITICAL_SECTION waitCountLock; // guards waitCount and didBroadcast
332 unsigned int waitCount;
333 int didBroadcast;
334 } monitor_t;
335 #define MONITOR_INITIALIZER { 0 }
336 #define MONITOR_NOT_ENTERED 1
337 extern int monitor_init(monitor_t *c);
338
339 static inline int _monitor_enter_nodebug(monitor_t *c) {
340 if (!c->mutex) {
341 int err = monitor_init(c);
342 if (err) return err;
343 }
344 return WaitForSingleObject(c->mutex, INFINITE);
345 }
346 static inline int _monitor_exit_nodebug(monitor_t *c) {
347 if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED;
348 else return 0;
349 }
350 static inline int _monitor_wait_nodebug(monitor_t *c) {
351 int last;
352 EnterCriticalSection(&c->waitCountLock);
353 c->waitCount++;
354 LeaveCriticalSection(&c->waitCountLock);
355
356 SignalObjectAndWait(c->mutex, c->waiters, INFINITE, FALSE);
357
358 EnterCriticalSection(&c->waitCountLock);
359 c->waitCount--;
360 last = c->didBroadcast && c->waitCount == 0;
361 LeaveCriticalSection(&c->waitCountLock);
362
363 if (last) {
364 // tell broadcaster that all waiters have awoken
365 SignalObjectAndWait(c->waitersDone, c->mutex, INFINITE, FALSE);
366 } else {
367 WaitForSingleObject(c->mutex, INFINITE);
368 }
369
370 // fixme error checking
371 return 0;
372 }
373 static inline int monitor_notify(monitor_t *c) {
374 int haveWaiters;
375
376 EnterCriticalSection(&c->waitCountLock);
377 haveWaiters = c->waitCount > 0;
378 LeaveCriticalSection(&c->waitCountLock);
379
380 if (haveWaiters) {
381 ReleaseSemaphore(c->waiters, 1, 0);
382 }
383
384 // fixme error checking
385 return 0;
386 }
387 static inline int monitor_notifyAll(monitor_t *c) {
388 EnterCriticalSection(&c->waitCountLock);
389 if (c->waitCount == 0) {
390 LeaveCriticalSection(&c->waitCountLock);
391 return 0;
392 }
393 c->didBroadcast = 1;
394 ReleaseSemaphore(c->waiters, c->waitCount, 0);
395 LeaveCriticalSection(&c->waitCountLock);
396
397 // fairness: wait for everyone to move from waiters to mutex
398 WaitForSingleObject(c->waitersDone, INFINITE);
399 // not under waitCountLock, but still under mutex
400 c->didBroadcast = 0;
401
402 // fixme error checking
403 return 0;
404 }
405
406
407 // fixme no rwlock yet
408
409 #define rwlock_t mutex_t
410 #define rwlock_init(r) mutex_init(r)
411 #define _rwlock_read_nodebug(m) _mutex_lock_nodebug(m)
412 #define _rwlock_write_nodebug(m) _mutex_lock_nodebug(m)
413 #define _rwlock_try_read_nodebug(m) _mutex_try_lock_nodebug(m)
414 #define _rwlock_try_write_nodebug(m) _mutex_try_lock_nodebug(m)
415 #define _rwlock_unlock_read_nodebug(m) _mutex_unlock_nodebug(m)
416 #define _rwlock_unlock_write_nodebug(m) _mutex_unlock_nodebug(m)
417
418
419 typedef struct {
420 struct objc_module **modules;
421 size_t moduleCount;
422 struct old_protocol **protocols;
423 size_t protocolCount;
424 void *imageinfo;
425 size_t imageinfoBytes;
426 SEL *selrefs;
427 size_t selrefCount;
428 struct objc_class **clsrefs;
429 size_t clsrefCount;
430 } os_header_info;
431
432 typedef IMAGE_DOS_HEADER headerType;
433 // fixme YES bundle? NO bundle? sometimes?
434 #define headerIsBundle(hi) YES
435 OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase;
436 #define libobjc_header ((headerType *)&__ImageBase)
437
438 // Prototypes
439
440
441 #elif TARGET_OS_MAC
442
443
444 // OS headers
445
446
447 // Compiler compatibility
448
449 // OS compatibility
450
451 // Internal data types
452
453 typedef pthread_t objc_thread_t;
454
455 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
456 return pthread_equal(t1, t2);
457 }
458 static __inline objc_thread_t thread_self(void) {
459 return pthread_self();
460 }
461
462
463 typedef pthread_key_t tls_key_t;
464
465 static inline void tls_create(tls_key_t *k, void (*dtor)(void*)) {
466 pthread_key_create(k, dtor);
467 }
468 static inline void *tls_get(tls_key_t k) {
469 return pthread_getspecific(k);
470 }
471 static inline void tls_set(tls_key_t k, void *value) {
472 pthread_setspecific(k, value);
473 }
474
475 #ifndef NO_DIRECT_THREAD_KEYS
476 static inline void *tls_get_direct(tls_key_t k)
477 {
478 assert(k == SYNC_DATA_DIRECT_KEY ||
479 k == SYNC_COUNT_DIRECT_KEY);
480
481 if (_pthread_has_direct_tsd()) {
482 return _pthread_getspecific_direct(k);
483 } else {
484 return pthread_getspecific(k);
485 }
486 }
487 static inline void tls_set_direct(tls_key_t k, void *value)
488 {
489 assert(k == SYNC_DATA_DIRECT_KEY ||
490 k == SYNC_COUNT_DIRECT_KEY);
491
492 if (_pthread_has_direct_tsd()) {
493 _pthread_setspecific_direct(k, value);
494 } else {
495 pthread_setspecific(k, value);
496 }
497 }
498 #endif
499
500
501 typedef pthread_mutex_t mutex_t;
502 #define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER;
503
504 extern int DebuggerMode;
505 extern void gdb_objc_debuggerModeFailure(void);
506 extern BOOL isManagedDuringDebugger(void *lock);
507 extern BOOL isLockedDuringDebugger(mutex_t *lock);
508
509 static inline int _mutex_lock_nodebug(mutex_t *m) {
510 if (DebuggerMode && isManagedDuringDebugger(m)) {
511 if (! isLockedDuringDebugger(m)) {
512 gdb_objc_debuggerModeFailure();
513 }
514 return 0;
515 }
516 return pthread_mutex_lock(m);
517 }
518 static inline int _mutex_try_lock_nodebug(mutex_t *m) {
519 if (DebuggerMode && isManagedDuringDebugger(m)) {
520 if (! isLockedDuringDebugger(m)) {
521 gdb_objc_debuggerModeFailure();
522 }
523 return 1;
524 }
525 return !pthread_mutex_trylock(m);
526 }
527 static inline int _mutex_unlock_nodebug(mutex_t *m) {
528 if (DebuggerMode && isManagedDuringDebugger(m)) {
529 return 0;
530 }
531 return pthread_mutex_unlock(m);
532 }
533
534
535 typedef struct {
536 pthread_mutex_t *mutex;
537 } recursive_mutex_t;
538 #define RECURSIVE_MUTEX_INITIALIZER {0};
539 #define RECURSIVE_MUTEX_NOT_LOCKED EPERM
540 extern void recursive_mutex_init(recursive_mutex_t *m);
541
542 static inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
543 assert(m->mutex);
544 if (DebuggerMode && isManagedDuringDebugger(m)) {
545 if (! isLockedDuringDebugger((mutex_t *)m)) {
546 gdb_objc_debuggerModeFailure();
547 }
548 return 0;
549 }
550 return pthread_mutex_lock(m->mutex);
551 }
552 static inline int _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
553 assert(m->mutex);
554 if (DebuggerMode && isManagedDuringDebugger(m)) {
555 if (! isLockedDuringDebugger((mutex_t *)m)) {
556 gdb_objc_debuggerModeFailure();
557 }
558 return 1;
559 }
560 return !pthread_mutex_trylock(m->mutex);
561 }
562 static inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
563 assert(m->mutex);
564 if (DebuggerMode && isManagedDuringDebugger(m)) {
565 return 0;
566 }
567 return pthread_mutex_unlock(m->mutex);
568 }
569
570
571 typedef struct {
572 pthread_mutex_t mutex;
573 pthread_cond_t cond;
574 } monitor_t;
575 #define MONITOR_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER }
576 #define MONITOR_NOT_ENTERED EPERM
577
578 static inline int monitor_init(monitor_t *c) {
579 int err = pthread_mutex_init(&c->mutex, NULL);
580 if (err) return err;
581 err = pthread_cond_init(&c->cond, NULL);
582 if (err) {
583 pthread_mutex_destroy(&c->mutex);
584 return err;
585 }
586 return 0;
587 }
588 static inline int _monitor_enter_nodebug(monitor_t *c) {
589 assert(!isManagedDuringDebugger(c));
590 return pthread_mutex_lock(&c->mutex);
591 }
592 static inline int _monitor_exit_nodebug(monitor_t *c) {
593 return pthread_mutex_unlock(&c->mutex);
594 }
595 static inline int _monitor_wait_nodebug(monitor_t *c) {
596 return pthread_cond_wait(&c->cond, &c->mutex);
597 }
598 static inline int monitor_notify(monitor_t *c) {
599 return pthread_cond_signal(&c->cond);
600 }
601 static inline int monitor_notifyAll(monitor_t *c) {
602 return pthread_cond_broadcast(&c->cond);
603 }
604
605
606 // semaphore_create formatted for INIT_ONCE use
607 static inline semaphore_t create_semaphore(void)
608 {
609 semaphore_t sem;
610 kern_return_t k;
611 k = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
612 if (k) _objc_fatal("semaphore_create failed (0x%x)", k);
613 return sem;
614 }
615
616
617 /* Custom read-write lock
618 - reader is atomic add/subtract
619 - writer is pthread mutex plus atomic add/subtract
620 - fairness: new readers wait if a writer wants in
621 - fairness: when writer completes, readers (probably) precede new writer
622
623 state: xxxxxxxx xxxxxxxx yyyyyyyy yyyyyyyz
624 x: blocked reader count
625 y: active reader count
626 z: readers allowed flag
627 */
628 typedef struct {
629 volatile int32_t state;
630 semaphore_t readersDone;
631 semaphore_t writerDone;
632 pthread_mutex_t writerMutex;
633 } rwlock_t;
634
635 extern BOOL isReadingDuringDebugger(rwlock_t *lock);
636 extern BOOL isWritingDuringDebugger(rwlock_t *lock);
637
638 static inline void rwlock_init(rwlock_t *l)
639 {
640 l->state = 1;
641 l->readersDone = create_semaphore();
642 l->writerDone = create_semaphore();
643 l->writerMutex = (mutex_t)MUTEX_INITIALIZER;
644 }
645
646 static inline void _rwlock_read_nodebug(rwlock_t *l)
647 {
648 if (DebuggerMode && isManagedDuringDebugger(l)) {
649 if (! isReadingDuringDebugger(l)) {
650 gdb_objc_debuggerModeFailure();
651 }
652 return;
653 }
654 while (1) {
655 // Increment "blocked readers" or "active readers" count.
656 int32_t old = l->state;
657 if (old % 2 == 1) {
658 // Readers OK. Increment active reader count.
659 if (OSAtomicCompareAndSwap32Barrier(old, old + 2, &l->state)) {
660 // Success. Read lock acquired.
661 return;
662 } else {
663 // CAS failed (writer or another reader). Redo from start.
664 }
665 }
666 else {
667 // Readers not OK. Increment blocked reader count.
668 if (OSAtomicCompareAndSwap32(old, old + 0x10000, &l->state)) {
669 // Success. Wait for writer to complete, then retry.
670 semaphore_wait(l->writerDone);
671 } else {
672 // CAS failed (writer or another reader). Redo from start.
673 }
674 }
675 }
676 }
677
678 static inline void _rwlock_unlock_read_nodebug(rwlock_t *l)
679 {
680 if (DebuggerMode && isManagedDuringDebugger(l)) {
681 return;
682 }
683 // Decrement "active readers" count.
684 int32_t newState = OSAtomicAdd32Barrier(-2, &l->state);
685 if ((newState & 0xffff) == 0) {
686 // No active readers, and readers OK flag is clear.
687 // We're the last reader out and there's a writer waiting. Wake it.
688 semaphore_signal(l->readersDone);
689 }
690 }
691
692
693 static inline int _rwlock_try_read_nodebug(rwlock_t *l)
694 {
695 int i;
696 if (DebuggerMode && isManagedDuringDebugger(l)) {
697 if (! isReadingDuringDebugger(l)) {
698 gdb_objc_debuggerModeFailure();
699 }
700 return 1;
701 }
702 for (i = 0; i < 16; i++) {
703 int32_t old = l->state;
704 if (old % 2 != 1) {
705 // Readers not OK. Fail.
706 return 0;
707 } else {
708 // Readers OK.
709 if (OSAtomicCompareAndSwap32Barrier(old, old + 2, &l->state)) {
710 // Success. Read lock acquired.
711 return 1;
712 } else {
713 // CAS failed (writer or another reader). Redo from start.
714 // trylock will fail against writer,
715 // but retry a few times against reader.
716 }
717 }
718 }
719
720 // Too many retries. Give up.
721 return 0;
722 }
723
724
725 static inline void _rwlock_write_nodebug(rwlock_t *l)
726 {
727 if (DebuggerMode && isManagedDuringDebugger(l)) {
728 if (! isWritingDuringDebugger(l)) {
729 gdb_objc_debuggerModeFailure();
730 }
731 return;
732 }
733
734 // Only one writer allowed at a time.
735 pthread_mutex_lock(&l->writerMutex);
736
737 // Clear "readers OK" bit and "blocked readers" count.
738 int32_t newState = OSAtomicAnd32(0x0000fffe, (uint32_t *)&l->state);
739
740 if (newState == 0) {
741 // No "active readers". Success.
742 OSMemoryBarrier();
743 } else {
744 // Wait for "active readers" to complete.
745 semaphore_wait(l->readersDone);
746 }
747 }
748
749 static inline void _rwlock_unlock_write_nodebug(rwlock_t *l)
750 {
751 if (DebuggerMode && isManagedDuringDebugger(l)) {
752 return;
753 }
754
755 // Reinstate "readers OK" bit and clear reader counts.
756 int32_t oldState;
757 do {
758 oldState = l->state;
759 } while (!OSAtomicCompareAndSwap32Barrier(oldState, 0x1, &l->state));
760
761 // Unblock any "blocked readers" that arrived while we held the lock
762 oldState = oldState >> 16;
763 while (oldState--) {
764 semaphore_signal(l->writerDone);
765 }
766
767 // Allow a new writer.
768 pthread_mutex_unlock(&l->writerMutex);
769 }
770
771 static inline int _rwlock_try_write_nodebug(rwlock_t *l)
772 {
773 if (DebuggerMode && isManagedDuringDebugger(l)) {
774 if (! isWritingDuringDebugger(l)) {
775 gdb_objc_debuggerModeFailure();
776 }
777 return 1;
778 }
779
780 if (pthread_mutex_trylock(&l->writerMutex)) {
781 // Some other writer is in the way - fail
782 return 0;
783 }
784
785 // Similar to _rwlock_write_nodebug, but less intrusive with readers active
786
787 int32_t oldState, newState;
788 oldState = l->state;
789 newState = oldState & 0x0000fffe;
790 if (newState != 0) {
791 // Readers active. Give up.
792 pthread_mutex_unlock(&l->writerMutex);
793 return 0;
794 }
795 if (!OSAtomicCompareAndSwap32Barrier(oldState, newState, &l->state)) {
796 // CAS failed (reader interupted). Give up.
797 pthread_mutex_unlock(&l->writerMutex);
798 return 0;
799 }
800
801 return 1;
802 }
803
804
805 #ifndef __LP64__
806 typedef struct mach_header headerType;
807 typedef struct segment_command segmentType;
808 typedef struct section sectionType;
809 #else
810 typedef struct mach_header_64 headerType;
811 typedef struct segment_command_64 segmentType;
812 typedef struct section_64 sectionType;
813 #endif
814 #define headerIsBundle(hi) (hi->mhdr->filetype == MH_BUNDLE)
815 #define libobjc_header ((headerType *)&_mh_dylib_header)
816
817 typedef struct {
818 Dl_info dl_info;
819 const segmentType * objcSegmentHeader;
820 const segmentType * dataSegmentHeader;
821 ptrdiff_t image_slide;
822 #if !__OBJC2__
823 struct old_protocol **proto_refs;
824 #endif
825 } os_header_info;
826
827 // Prototypes
828
829 /* Secure /tmp usage */
830 extern int secure_open(const char *filename, int flags, uid_t euid);
831
832
833 #else
834
835
836 #error unknown OS
837
838
839 #endif
840
841 __END_DECLS
842
843 #endif