2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /***********************************************************************
26 * OS portability layer.
27 **********************************************************************/
32 #include <TargetConditionals.h>
36 # ifndef __STDC_LIMIT_MACROS
37 # define __STDC_LIMIT_MACROS
54 # include <crt_externs.h>
55 # include <AssertMacros.h>
57 # include <AvailabilityMacros.h>
58 # include <TargetConditionals.h>
59 # include <sys/mman.h>
60 # include <sys/time.h>
61 # include <sys/stat.h>
62 # include <sys/param.h>
63 # include <mach/mach.h>
64 # include <mach-o/dyld.h>
65 # include <mach-o/ldsyms.h>
66 # include <mach-o/loader.h>
67 # include <mach-o/getsect.h>
68 # include <mach-o/dyld_priv.h>
69 # include <malloc/malloc.h>
70 # include <libkern/OSAtomic.h>
71 # include <libkern/OSCacheControl.h>
72 # include <System/pthread_machdep.h>
73 # include "objc-probes.h" // generated dtrace probe definitions.
75 #define ARR_SPINLOCK_INIT 0
76 // XXX -- Careful: OSSpinLock isn't volatile, but should be
77 typedef volatile int ARRSpinLock
;
78 __attribute__((always_inline
))
79 static inline void ARRSpinLockLock(ARRSpinLock
*l
)
83 if (__builtin_expect(__sync_lock_test_and_set(l
, 1), 0) == 0) {
86 for (y
= 1000; y
; y
--) {
87 #if defined(__i386__) || defined(__x86_64__)
90 if (*l
== 0) goto again
;
92 thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
95 __attribute__((always_inline
))
96 static inline void ARRSpinLockUnlock(ARRSpinLock
*l
)
98 __sync_lock_release(l
);
101 #define OSSpinLock ARRSpinLock
102 #define OSSpinLockTry(l) __sync_bool_compare_and_swap(l, 0, 1)
103 #define OSSpinLockLock(l) ARRSpinLockLock(l)
104 #define OSSpinLockUnlock(l) ARRSpinLockUnlock(l)
105 #undef OS_SPINLOCK_INIT
106 #define OS_SPINLOCK_INIT ARR_SPINLOCK_INIT
108 #if !TARGET_OS_IPHONE
109 # include <CrashReporterClient.h>
111 // CrashReporterClient not yet available on iOS
113 extern const char *CRSetCrashLogMessage(const char *msg
);
114 extern const char *CRGetCrashLogMessage(void);
115 extern const char *CRSetCrashLogMessage2(const char *msg
);
119 #if TARGET_IPHONE_SIMULATOR
120 // getsectiondata() and getsegmentdata() are unavailable
122 # define getsectiondata(m, s, n, c) objc_getsectiondata(m, s, n, c)
123 # define getsegmentdata(m, s, c) objc_getsegmentdata(m, s, c)
124 extern uint8_t *objc_getsectiondata(const struct mach_header
*mh
, const char *segname
, const char *sectname
, unsigned long *outSize
);
125 extern uint8_t * objc_getsegmentdata(const struct mach_header
*mh
, const char *segname
, unsigned long *outSize
);
131 # include <algorithm>
133 # include <ext/hash_map>
134 using namespace __gnu_cxx
;
137 # define PRIVATE_EXTERN __attribute__((visibility("hidden")))
138 # undef __private_extern__
139 # define __private_extern__ use_PRIVATE_EXTERN_instead
140 # undef private_extern
141 # define private_extern use_PRIVATE_EXTERN_instead
143 /* Use this for functions that are intended to be breakpoint hooks.
144 If you do not, the compiler may optimize them away.
145 BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
146 # define BREAKPOINT_FUNCTION(prototype) \
147 __attribute__((noinline, visibility("hidden"))) \
148 prototype { asm(""); }
150 #elif TARGET_OS_WIN32
152 # define WINVER 0x0501 // target Windows XP and later
153 # define _WIN32_WINNT 0x0501 // target Windows XP and later
154 # define WIN32_LEAN_AND_MEAN
155 // hack: windef.h typedefs BOOL as int
156 # define BOOL WINBOOL
157 # include <windows.h>
167 # include <AvailabilityMacros.h>
171 # include <algorithm>
174 using namespace stdext
;
175 # define __BEGIN_DECLS extern "C" {
176 # define __END_DECLS }
178 # define __BEGIN_DECLS /*empty*/
179 # define __END_DECLS /*empty*/
182 # define PRIVATE_EXTERN
183 # define __attribute__(x)
184 # define inline __inline
186 /* Use this for functions that are intended to be breakpoint hooks.
187 If you do not, the compiler may optimize them away.
188 BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
189 # define BREAKPOINT_FUNCTION(prototype) \
190 __declspec(noinline) prototype { __asm { } }
192 /* stub out dtrace probes */
193 # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
194 # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
201 #include <objc/objc.h>
202 #include <objc/objc-api.h>
206 extern void _objc_fatal(const char *fmt
, ...) __attribute__((noreturn
, format (printf
, 1, 2)));
208 #define INIT_ONCE_PTR(var, create, delete) \
211 typeof(var) v = create; \
213 if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
221 #define INIT_ONCE_32(var, create, delete) \
224 typeof(var) v = create; \
226 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
235 // Thread keys reserved by libc for our use.
236 // Keys [0..4] are used by autozone.
237 #if defined(__PTK_FRAMEWORK_OBJC_KEY5)
238 # define SUPPORT_DIRECT_THREAD_KEYS 1
239 # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
240 # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY6)
241 # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY7)
242 # define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY8)
243 # if SUPPORT_RETURN_AUTORELEASE
244 # define AUTORELEASE_POOL_RECLAIM_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY9)
247 # define SUPPORT_DIRECT_THREAD_KEYS 0
253 // Compiler compatibility
257 #define strdup _strdup
259 #define issetugid() 0
261 #define MIN(x, y) ((x) < (y) ? (x) : (y))
263 static __inline
void bcopy(const void *src
, void *dst
, size_t size
) { memcpy(dst
, src
, size
); }
264 static __inline
void bzero(void *dst
, size_t size
) { memset(dst
, 0, size
); }
266 int asprintf(char **dstp
, const char *format
, ...);
268 typedef void * malloc_zone_t
;
270 static __inline malloc_zone_t
malloc_default_zone(void) { return (malloc_zone_t
)-1; }
271 static __inline
void *malloc_zone_malloc(malloc_zone_t z
, size_t size
) { return malloc(size
); }
272 static __inline
void *malloc_zone_calloc(malloc_zone_t z
, size_t size
, size_t count
) { return calloc(size
, count
); }
273 static __inline
void *malloc_zone_realloc(malloc_zone_t z
, void *p
, size_t size
) { return realloc(p
, size
); }
274 static __inline
void malloc_zone_free(malloc_zone_t z
, void *p
) { free(p
); }
275 static __inline malloc_zone_t
malloc_zone_from_ptr(const void *p
) { return (malloc_zone_t
)-1; }
276 static __inline
size_t malloc_size(const void *p
) { return _msize((void*)p
); /* fixme invalid pointer check? */ }
281 #define require_action_string(cond, dest, act, msg) do { if (!(cond)) { { act; } goto dest; } } while (0)
282 #define require_noerr_string(err, dest, msg) do { if (err) goto dest; } while (0)
283 #define require_string(cond, dest, msg) do { if (!(cond)) goto dest; } while (0)
288 static __inline BOOL
OSAtomicCompareAndSwapLong(long oldl
, long newl
, long volatile *dst
)
290 // fixme barrier is overkill
291 long original
= InterlockedCompareExchange(dst
, newl
, oldl
);
292 return (original
== oldl
);
295 static __inline BOOL
OSAtomicCompareAndSwapPtrBarrier(void *oldp
, void *newp
, void * volatile *dst
)
297 void *original
= InterlockedCompareExchangePointer(dst
, newp
, oldp
);
298 return (original
== oldp
);
301 static __inline BOOL
OSAtomicCompareAndSwap32Barrier(int32_t oldl
, int32_t newl
, int32_t volatile *dst
)
303 long original
= InterlockedCompareExchange((volatile long *)dst
, newl
, oldl
);
304 return (original
== oldl
);
307 static __inline
int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst
)
309 return InterlockedDecrement((volatile long *)dst
);
312 static __inline
int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst
)
314 return InterlockedIncrement((volatile long *)dst
);
318 // Internal data types
320 typedef DWORD objc_thread_t
; // thread ID
321 static __inline
int thread_equal(objc_thread_t t1
, objc_thread_t t2
) {
324 static __inline objc_thread_t
thread_self(void) {
325 return GetCurrentThreadId();
330 void (*dtor
)(void *);
332 static __inline tls_key_t
tls_create(void (*dtor
)(void*)) {
333 // fixme need dtor registry for DllMain to call on thread detach
339 static __inline
void *tls_get(tls_key_t k
) {
340 return TlsGetValue(k
.key
);
342 static __inline
void tls_set(tls_key_t k
, void *value
) {
343 TlsSetValue(k
.key
, value
);
347 CRITICAL_SECTION
*lock
;
349 #define MUTEX_INITIALIZER {0};
350 extern void mutex_init(mutex_t
*m
);
351 static __inline
int _mutex_lock_nodebug(mutex_t
*m
) {
356 EnterCriticalSection(m
->lock
);
359 static __inline
int _mutex_try_lock_nodebug(mutex_t
*m
) {
364 return TryEnterCriticalSection(m
->lock
);
366 static __inline
int _mutex_unlock_nodebug(mutex_t
*m
) {
368 LeaveCriticalSection(m
->lock
);
373 typedef mutex_t OSSpinLock
;
374 #define OSSpinLockLock(l) mutex_lock(l)
375 #define OSSpinLockUnlock(l) mutex_unlock(l)
376 #define OS_SPINLOCK_INIT MUTEX_INITIALIZER
382 #define RECURSIVE_MUTEX_INITIALIZER {0};
383 #define RECURSIVE_MUTEX_NOT_LOCKED 1
384 extern void recursive_mutex_init(recursive_mutex_t
*m
);
385 static __inline
int _recursive_mutex_lock_nodebug(recursive_mutex_t
*m
) {
387 return WaitForSingleObject(m
->mutex
, INFINITE
);
389 static __inline
int _recursive_mutex_try_lock_nodebug(recursive_mutex_t
*m
) {
391 return (WAIT_OBJECT_0
== WaitForSingleObject(m
->mutex
, 0));
393 static __inline
int _recursive_mutex_unlock_nodebug(recursive_mutex_t
*m
) {
395 return ReleaseMutex(m
->mutex
) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED
;
400 typedef HANDLE mutex_t;
401 static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
402 static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
403 static inline int mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
404 static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
407 // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
408 // Vista-only CONDITION_VARIABLE would be better
411 HANDLE waiters
; // semaphore for those in cond_wait()
412 HANDLE waitersDone
; // auto-reset event after everyone gets a broadcast
413 CRITICAL_SECTION waitCountLock
; // guards waitCount and didBroadcast
414 unsigned int waitCount
;
417 #define MONITOR_INITIALIZER { 0 }
418 #define MONITOR_NOT_ENTERED 1
419 extern int monitor_init(monitor_t
*c
);
421 static inline int _monitor_enter_nodebug(monitor_t
*c
) {
423 int err
= monitor_init(c
);
426 return WaitForSingleObject(c
->mutex
, INFINITE
);
428 static inline int _monitor_exit_nodebug(monitor_t
*c
) {
429 if (!ReleaseMutex(c
->mutex
)) return MONITOR_NOT_ENTERED
;
432 static inline int _monitor_wait_nodebug(monitor_t
*c
) {
434 EnterCriticalSection(&c
->waitCountLock
);
436 LeaveCriticalSection(&c
->waitCountLock
);
438 SignalObjectAndWait(c
->mutex
, c
->waiters
, INFINITE
, FALSE
);
440 EnterCriticalSection(&c
->waitCountLock
);
442 last
= c
->didBroadcast
&& c
->waitCount
== 0;
443 LeaveCriticalSection(&c
->waitCountLock
);
446 // tell broadcaster that all waiters have awoken
447 SignalObjectAndWait(c
->waitersDone
, c
->mutex
, INFINITE
, FALSE
);
449 WaitForSingleObject(c
->mutex
, INFINITE
);
452 // fixme error checking
455 static inline int monitor_notify(monitor_t
*c
) {
458 EnterCriticalSection(&c
->waitCountLock
);
459 haveWaiters
= c
->waitCount
> 0;
460 LeaveCriticalSection(&c
->waitCountLock
);
463 ReleaseSemaphore(c
->waiters
, 1, 0);
466 // fixme error checking
469 static inline int monitor_notifyAll(monitor_t
*c
) {
470 EnterCriticalSection(&c
->waitCountLock
);
471 if (c
->waitCount
== 0) {
472 LeaveCriticalSection(&c
->waitCountLock
);
476 ReleaseSemaphore(c
->waiters
, c
->waitCount
, 0);
477 LeaveCriticalSection(&c
->waitCountLock
);
479 // fairness: wait for everyone to move from waiters to mutex
480 WaitForSingleObject(c
->waitersDone
, INFINITE
);
481 // not under waitCountLock, but still under mutex
484 // fixme error checking
489 // fixme no rwlock yet
491 #define rwlock_t mutex_t
492 #define rwlock_init(r) mutex_init(r)
493 #define _rwlock_read_nodebug(m) _mutex_lock_nodebug(m)
494 #define _rwlock_write_nodebug(m) _mutex_lock_nodebug(m)
495 #define _rwlock_try_read_nodebug(m) _mutex_try_lock_nodebug(m)
496 #define _rwlock_try_write_nodebug(m) _mutex_try_lock_nodebug(m)
497 #define _rwlock_unlock_read_nodebug(m) _mutex_unlock_nodebug(m)
498 #define _rwlock_unlock_write_nodebug(m) _mutex_unlock_nodebug(m)
502 struct objc_module
**modules
;
504 struct old_protocol
**protocols
;
505 size_t protocolCount
;
507 size_t imageinfoBytes
;
510 struct objc_class
**clsrefs
;
515 typedef IMAGE_DOS_HEADER headerType
;
516 // fixme YES bundle? NO bundle? sometimes?
517 #define headerIsBundle(hi) YES
518 OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase
;
519 #define libobjc_header ((headerType *)&__ImageBase)
528 #include <mach-o/loader.h>
530 # define SEGMENT_CMD LC_SEGMENT
532 # define SEGMENT_CMD LC_SEGMENT_64
535 #ifndef VM_MEMORY_OBJC_DISPATCHERS
536 # define VM_MEMORY_OBJC_DISPATCHERS 0
540 // Compiler compatibility
544 // Internal data types
546 typedef pthread_t objc_thread_t
;
548 static __inline
int thread_equal(objc_thread_t t1
, objc_thread_t t2
) {
549 return pthread_equal(t1
, t2
);
551 static __inline objc_thread_t
thread_self(void) {
552 return pthread_self();
556 typedef pthread_key_t tls_key_t
;
558 static inline tls_key_t
tls_create(void (*dtor
)(void*)) {
560 pthread_key_create(&k
, dtor
);
563 static inline void *tls_get(tls_key_t k
) {
564 return pthread_getspecific(k
);
566 static inline void tls_set(tls_key_t k
, void *value
) {
567 pthread_setspecific(k
, value
);
570 #if SUPPORT_DIRECT_THREAD_KEYS
571 static inline void *tls_get_direct(tls_key_t k
)
573 assert(k
== SYNC_DATA_DIRECT_KEY
||
574 k
== SYNC_COUNT_DIRECT_KEY
);
576 if (_pthread_has_direct_tsd()) {
577 return _pthread_getspecific_direct(k
);
579 return pthread_getspecific(k
);
582 static inline void tls_set_direct(tls_key_t k
, void *value
)
584 assert(k
== SYNC_DATA_DIRECT_KEY
||
585 k
== SYNC_COUNT_DIRECT_KEY
);
587 if (_pthread_has_direct_tsd()) {
588 _pthread_setspecific_direct(k
, value
);
590 pthread_setspecific(k
, value
);
596 typedef pthread_mutex_t mutex_t
;
597 #define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER;
599 extern int DebuggerMode
;
600 extern void gdb_objc_debuggerModeFailure(void);
601 extern BOOL
isManagedDuringDebugger(void *lock
);
602 extern BOOL
isLockedDuringDebugger(void *lock
);
604 static inline int _mutex_lock_nodebug(mutex_t
*m
) {
605 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
606 if (! isLockedDuringDebugger(m
)) {
607 gdb_objc_debuggerModeFailure();
611 return pthread_mutex_lock(m
);
613 static inline int _mutex_try_lock_nodebug(mutex_t
*m
) {
614 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
615 if (! isLockedDuringDebugger(m
)) {
616 gdb_objc_debuggerModeFailure();
620 return !pthread_mutex_trylock(m
);
622 static inline int _mutex_unlock_nodebug(mutex_t
*m
) {
623 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
626 return pthread_mutex_unlock(m
);
631 pthread_mutex_t
*mutex
;
633 #define RECURSIVE_MUTEX_INITIALIZER {0};
634 #define RECURSIVE_MUTEX_NOT_LOCKED EPERM
635 extern void recursive_mutex_init(recursive_mutex_t
*m
);
637 static inline int _recursive_mutex_lock_nodebug(recursive_mutex_t
*m
) {
639 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
640 if (! isLockedDuringDebugger((mutex_t
*)m
)) {
641 gdb_objc_debuggerModeFailure();
645 return pthread_mutex_lock(m
->mutex
);
647 static inline int _recursive_mutex_try_lock_nodebug(recursive_mutex_t
*m
) {
649 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
650 if (! isLockedDuringDebugger((mutex_t
*)m
)) {
651 gdb_objc_debuggerModeFailure();
655 return !pthread_mutex_trylock(m
->mutex
);
657 static inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t
*m
) {
659 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
662 return pthread_mutex_unlock(m
->mutex
);
667 pthread_mutex_t mutex
;
670 #define MONITOR_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER }
671 #define MONITOR_NOT_ENTERED EPERM
673 static inline int monitor_init(monitor_t
*c
) {
674 int err
= pthread_mutex_init(&c
->mutex
, NULL
);
676 err
= pthread_cond_init(&c
->cond
, NULL
);
678 pthread_mutex_destroy(&c
->mutex
);
683 static inline int _monitor_enter_nodebug(monitor_t
*c
) {
684 assert(!isManagedDuringDebugger(c
));
685 return pthread_mutex_lock(&c
->mutex
);
687 static inline int _monitor_exit_nodebug(monitor_t
*c
) {
688 return pthread_mutex_unlock(&c
->mutex
);
690 static inline int _monitor_wait_nodebug(monitor_t
*c
) {
691 return pthread_cond_wait(&c
->cond
, &c
->mutex
);
693 static inline int monitor_notify(monitor_t
*c
) {
694 return pthread_cond_signal(&c
->cond
);
696 static inline int monitor_notifyAll(monitor_t
*c
) {
697 return pthread_cond_broadcast(&c
->cond
);
701 // semaphore_create formatted for INIT_ONCE use
702 static inline semaphore_t
create_semaphore(void)
706 k
= semaphore_create(mach_task_self(), &sem
, SYNC_POLICY_FIFO
, 0);
707 if (k
) _objc_fatal("semaphore_create failed (0x%x)", k
);
712 /* Custom read-write lock
713 - reader is atomic add/subtract
714 - writer is pthread mutex plus atomic add/subtract
715 - fairness: new readers wait if a writer wants in
716 - fairness: when writer completes, readers (probably) precede new writer
718 state: xxxxxxxx xxxxxxxx yyyyyyyy yyyyyyyz
719 x: blocked reader count
720 y: active reader count
721 z: readers allowed flag
724 volatile int32_t state
;
725 semaphore_t readersDone
;
726 semaphore_t writerDone
;
727 pthread_mutex_t writerMutex
;
730 extern BOOL
isReadingDuringDebugger(rwlock_t
*lock
);
731 extern BOOL
isWritingDuringDebugger(rwlock_t
*lock
);
733 static inline void rwlock_init(rwlock_t
*l
)
736 l
->readersDone
= create_semaphore();
737 l
->writerDone
= create_semaphore();
738 l
->writerMutex
= (mutex_t
)MUTEX_INITIALIZER
;
741 static inline void _rwlock_read_nodebug(rwlock_t
*l
)
743 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
744 if (! isReadingDuringDebugger(l
)) {
745 gdb_objc_debuggerModeFailure();
750 // Increment "blocked readers" or "active readers" count.
751 int32_t old
= l
->state
;
753 // Readers OK. Increment active reader count.
754 if (OSAtomicCompareAndSwap32Barrier(old
, old
+ 2, &l
->state
)) {
755 // Success. Read lock acquired.
758 // CAS failed (writer or another reader). Redo from start.
762 // Readers not OK. Increment blocked reader count.
763 if (OSAtomicCompareAndSwap32(old
, old
+ 0x10000, &l
->state
)) {
764 // Success. Wait for writer to complete, then retry.
765 semaphore_wait(l
->writerDone
);
767 // CAS failed (writer or another reader). Redo from start.
773 static inline void _rwlock_unlock_read_nodebug(rwlock_t
*l
)
775 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
778 // Decrement "active readers" count.
779 int32_t newState
= OSAtomicAdd32Barrier(-2, &l
->state
);
780 if ((newState
& 0xffff) == 0) {
781 // No active readers, and readers OK flag is clear.
782 // We're the last reader out and there's a writer waiting. Wake it.
783 semaphore_signal(l
->readersDone
);
788 static inline int _rwlock_try_read_nodebug(rwlock_t
*l
)
791 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
792 if (! isReadingDuringDebugger(l
)) {
793 gdb_objc_debuggerModeFailure();
797 for (i
= 0; i
< 16; i
++) {
798 int32_t old
= l
->state
;
800 // Readers not OK. Fail.
804 if (OSAtomicCompareAndSwap32Barrier(old
, old
+ 2, &l
->state
)) {
805 // Success. Read lock acquired.
808 // CAS failed (writer or another reader). Redo from start.
809 // trylock will fail against writer,
810 // but retry a few times against reader.
815 // Too many retries. Give up.
820 static inline void _rwlock_write_nodebug(rwlock_t
*l
)
822 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
823 if (! isWritingDuringDebugger(l
)) {
824 gdb_objc_debuggerModeFailure();
829 // Only one writer allowed at a time.
830 pthread_mutex_lock(&l
->writerMutex
);
832 // Clear "readers OK" bit and "blocked readers" count.
833 int32_t newState
= OSAtomicAnd32(0x0000fffe, (uint32_t *)&l
->state
);
836 // No "active readers". Success.
839 // Wait for "active readers" to complete.
840 semaphore_wait(l
->readersDone
);
844 static inline void _rwlock_unlock_write_nodebug(rwlock_t
*l
)
846 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
850 // Reinstate "readers OK" bit and clear reader counts.
854 } while (!OSAtomicCompareAndSwap32Barrier(oldState
, 0x1, &l
->state
));
856 // Unblock any "blocked readers" that arrived while we held the lock
857 oldState
= oldState
>> 16;
859 semaphore_signal(l
->writerDone
);
862 // Allow a new writer.
863 pthread_mutex_unlock(&l
->writerMutex
);
866 static inline int _rwlock_try_write_nodebug(rwlock_t
*l
)
868 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
869 if (! isWritingDuringDebugger(l
)) {
870 gdb_objc_debuggerModeFailure();
875 if (pthread_mutex_trylock(&l
->writerMutex
)) {
876 // Some other writer is in the way - fail
880 // Similar to _rwlock_write_nodebug, but less intrusive with readers active
882 int32_t oldState
, newState
;
884 newState
= oldState
& 0x0000fffe;
886 // Readers active. Give up.
887 pthread_mutex_unlock(&l
->writerMutex
);
890 if (!OSAtomicCompareAndSwap32Barrier(oldState
, newState
, &l
->state
)) {
891 // CAS failed (reader interupted). Give up.
892 pthread_mutex_unlock(&l
->writerMutex
);
901 typedef struct mach_header headerType
;
902 typedef struct segment_command segmentType
;
903 typedef struct section sectionType
;
905 typedef struct mach_header_64 headerType
;
906 typedef struct segment_command_64 segmentType
;
907 typedef struct section_64 sectionType
;
909 #define headerIsBundle(hi) (hi->mhdr->filetype == MH_BUNDLE)
910 #define libobjc_header ((headerType *)&_mh_dylib_header)
915 struct old_protocol
**proto_refs
;
921 /* Secure /tmp usage */
922 extern int secure_open(const char *filename
, int flags
, uid_t euid
);