2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /***********************************************************************
26 * OS portability layer.
27 **********************************************************************/
32 #include <TargetConditionals.h>
36 # ifndef __STDC_LIMIT_MACROS
37 # define __STDC_LIMIT_MACROS
54 # include <crt_externs.h>
55 # include <AssertMacros.h>
57 # include <AvailabilityMacros.h>
58 # include <TargetConditionals.h>
59 # include <sys/mman.h>
60 # include <sys/time.h>
61 # include <sys/stat.h>
62 # include <sys/param.h>
63 # include <mach/mach.h>
64 # include <mach-o/dyld.h>
65 # include <mach-o/ldsyms.h>
66 # include <mach-o/loader.h>
67 # include <mach-o/getsect.h>
68 # include <mach-o/dyld_priv.h>
69 # include <malloc/malloc.h>
70 # include <libkern/OSAtomic.h>
71 # include <libkern/OSCacheControl.h>
72 # include <System/pthread_machdep.h>
73 # include "objc-probes.h" // generated dtrace probe definitions.
76 #if defined(__i386__) || defined(__x86_64__)
79 // Not for arm on iOS because it hurts uniprocessor performance.
81 #define ARR_SPINLOCK_INIT 0
82 // XXX -- Careful: OSSpinLock isn't volatile, but should be
83 typedef volatile int ARRSpinLock
;
84 __attribute__((always_inline
))
85 static inline void ARRSpinLockLock(ARRSpinLock
*l
)
89 if (__builtin_expect(__sync_lock_test_and_set(l
, 1), 0) == 0) {
92 for (y
= 1000; y
; y
--) {
93 #if defined(__i386__) || defined(__x86_64__)
96 if (*l
== 0) goto again
;
98 thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
101 __attribute__((always_inline
))
102 static inline void ARRSpinLockUnlock(ARRSpinLock
*l
)
104 __sync_lock_release(l
);
106 __attribute__((always_inline
))
107 static inline int ARRSpinLockTry(ARRSpinLock
*l
)
109 return __sync_bool_compare_and_swap(l
, 0, 1);
112 #define OSSpinLock ARRSpinLock
113 #define OSSpinLockTry(l) ARRSpinLockTry(l)
114 #define OSSpinLockLock(l) ARRSpinLockLock(l)
115 #define OSSpinLockUnlock(l) ARRSpinLockUnlock(l)
116 #undef OS_SPINLOCK_INIT
117 #define OS_SPINLOCK_INIT ARR_SPINLOCK_INIT
122 #if !TARGET_OS_IPHONE
123 # include <CrashReporterClient.h>
125 // CrashReporterClient not yet available on iOS
127 extern const char *CRSetCrashLogMessage(const char *msg
);
128 extern const char *CRGetCrashLogMessage(void);
129 extern const char *CRSetCrashLogMessage2(const char *msg
);
133 #if TARGET_IPHONE_SIMULATOR
134 // getsectiondata() and getsegmentdata() are unavailable
136 # define getsectiondata(m, s, n, c) objc_getsectiondata(m, s, n, c)
137 # define getsegmentdata(m, s, c) objc_getsegmentdata(m, s, c)
138 extern uint8_t *objc_getsectiondata(const struct mach_header
*mh
, const char *segname
, const char *sectname
, unsigned long *outSize
);
139 extern uint8_t * objc_getsegmentdata(const struct mach_header
*mh
, const char *segname
, unsigned long *outSize
);
145 # include <algorithm>
146 # include <functional>
150 # define PRIVATE_EXTERN __attribute__((visibility("hidden")))
151 # undef __private_extern__
152 # define __private_extern__ use_PRIVATE_EXTERN_instead
153 # undef private_extern
154 # define private_extern use_PRIVATE_EXTERN_instead
156 /* Use this for functions that are intended to be breakpoint hooks.
157 If you do not, the compiler may optimize them away.
158 BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
159 # define BREAKPOINT_FUNCTION(prototype) \
160 OBJC_EXTERN __attribute__((noinline, visibility("hidden"))) \
161 prototype { asm(""); }
163 #elif TARGET_OS_WIN32
165 # define WINVER 0x0501 // target Windows XP and later
166 # define _WIN32_WINNT 0x0501 // target Windows XP and later
167 # define WIN32_LEAN_AND_MEAN
168 // hack: windef.h typedefs BOOL as int
169 # define BOOL WINBOOL
170 # include <windows.h>
180 # include <AvailabilityMacros.h>
184 # include <algorithm>
185 # include <functional>
187 # define __BEGIN_DECLS extern "C" {
188 # define __END_DECLS }
190 # define __BEGIN_DECLS /*empty*/
191 # define __END_DECLS /*empty*/
194 # define PRIVATE_EXTERN
195 # define __attribute__(x)
196 # define inline __inline
198 /* Use this for functions that are intended to be breakpoint hooks.
199 If you do not, the compiler may optimize them away.
200 BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
201 # define BREAKPOINT_FUNCTION(prototype) \
202 __declspec(noinline) prototype { __asm { } }
204 /* stub out dtrace probes */
205 # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
206 # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
213 #include <objc/objc.h>
214 #include <objc/objc-api.h>
218 extern void _objc_fatal(const char *fmt
, ...) __attribute__((noreturn
, format (printf
, 1, 2)));
220 #define INIT_ONCE_PTR(var, create, delete) \
223 typeof(var) v = create; \
225 if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
233 #define INIT_ONCE_32(var, create, delete) \
236 typeof(var) v = create; \
238 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
247 // Thread keys reserved by libc for our use.
248 // Keys [0..4] are used by autozone.
249 #if defined(__PTK_FRAMEWORK_OBJC_KEY5)
250 # define SUPPORT_DIRECT_THREAD_KEYS 1
251 # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
252 # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY6)
253 # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY7)
254 # define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY8)
255 # if SUPPORT_RETURN_AUTORELEASE
256 # define AUTORELEASE_POOL_RECLAIM_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY9)
259 # define SUPPORT_DIRECT_THREAD_KEYS 0
265 // Compiler compatibility
269 #define strdup _strdup
271 #define issetugid() 0
273 #define MIN(x, y) ((x) < (y) ? (x) : (y))
275 static __inline
void bcopy(const void *src
, void *dst
, size_t size
) { memcpy(dst
, src
, size
); }
276 static __inline
void bzero(void *dst
, size_t size
) { memset(dst
, 0, size
); }
278 int asprintf(char **dstp
, const char *format
, ...);
280 typedef void * malloc_zone_t
;
282 static __inline malloc_zone_t
malloc_default_zone(void) { return (malloc_zone_t
)-1; }
283 static __inline
void *malloc_zone_malloc(malloc_zone_t z
, size_t size
) { return malloc(size
); }
284 static __inline
void *malloc_zone_calloc(malloc_zone_t z
, size_t size
, size_t count
) { return calloc(size
, count
); }
285 static __inline
void *malloc_zone_realloc(malloc_zone_t z
, void *p
, size_t size
) { return realloc(p
, size
); }
286 static __inline
void malloc_zone_free(malloc_zone_t z
, void *p
) { free(p
); }
287 static __inline malloc_zone_t
malloc_zone_from_ptr(const void *p
) { return (malloc_zone_t
)-1; }
288 static __inline
size_t malloc_size(const void *p
) { return _msize((void*)p
); /* fixme invalid pointer check? */ }
293 #define require_action_string(cond, dest, act, msg) do { if (!(cond)) { { act; } goto dest; } } while (0)
294 #define require_noerr_string(err, dest, msg) do { if (err) goto dest; } while (0)
295 #define require_string(cond, dest, msg) do { if (!(cond)) goto dest; } while (0)
300 static __inline BOOL
OSAtomicCompareAndSwapLong(long oldl
, long newl
, long volatile *dst
)
302 // fixme barrier is overkill
303 long original
= InterlockedCompareExchange(dst
, newl
, oldl
);
304 return (original
== oldl
);
307 static __inline BOOL
OSAtomicCompareAndSwapPtrBarrier(void *oldp
, void *newp
, void * volatile *dst
)
309 void *original
= InterlockedCompareExchangePointer(dst
, newp
, oldp
);
310 return (original
== oldp
);
313 static __inline BOOL
OSAtomicCompareAndSwap32Barrier(int32_t oldl
, int32_t newl
, int32_t volatile *dst
)
315 long original
= InterlockedCompareExchange((volatile long *)dst
, newl
, oldl
);
316 return (original
== oldl
);
319 static __inline
int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst
)
321 return InterlockedDecrement((volatile long *)dst
);
324 static __inline
int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst
)
326 return InterlockedIncrement((volatile long *)dst
);
330 // Internal data types
332 typedef DWORD objc_thread_t
; // thread ID
333 static __inline
int thread_equal(objc_thread_t t1
, objc_thread_t t2
) {
336 static __inline objc_thread_t
thread_self(void) {
337 return GetCurrentThreadId();
342 void (*dtor
)(void *);
344 static __inline tls_key_t
tls_create(void (*dtor
)(void*)) {
345 // fixme need dtor registry for DllMain to call on thread detach
351 static __inline
void *tls_get(tls_key_t k
) {
352 return TlsGetValue(k
.key
);
354 static __inline
void tls_set(tls_key_t k
, void *value
) {
355 TlsSetValue(k
.key
, value
);
359 CRITICAL_SECTION
*lock
;
361 #define MUTEX_INITIALIZER {0};
362 extern void mutex_init(mutex_t
*m
);
363 static __inline
int _mutex_lock_nodebug(mutex_t
*m
) {
368 EnterCriticalSection(m
->lock
);
371 static __inline
bool _mutex_try_lock_nodebug(mutex_t
*m
) {
376 return TryEnterCriticalSection(m
->lock
);
378 static __inline
int _mutex_unlock_nodebug(mutex_t
*m
) {
380 LeaveCriticalSection(m
->lock
);
385 typedef mutex_t OSSpinLock
;
386 #define OSSpinLockLock(l) mutex_lock(l)
387 #define OSSpinLockUnlock(l) mutex_unlock(l)
388 #define OS_SPINLOCK_INIT MUTEX_INITIALIZER
394 #define RECURSIVE_MUTEX_INITIALIZER {0};
395 #define RECURSIVE_MUTEX_NOT_LOCKED 1
396 extern void recursive_mutex_init(recursive_mutex_t
*m
);
397 static __inline
int _recursive_mutex_lock_nodebug(recursive_mutex_t
*m
) {
399 return WaitForSingleObject(m
->mutex
, INFINITE
);
401 static __inline
bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t
*m
) {
403 return (WAIT_OBJECT_0
== WaitForSingleObject(m
->mutex
, 0));
405 static __inline
int _recursive_mutex_unlock_nodebug(recursive_mutex_t
*m
) {
407 return ReleaseMutex(m
->mutex
) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED
;
412 typedef HANDLE mutex_t;
413 static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
414 static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
415 static inline bool mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
416 static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
419 // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
420 // Vista-only CONDITION_VARIABLE would be better
423 HANDLE waiters
; // semaphore for those in cond_wait()
424 HANDLE waitersDone
; // auto-reset event after everyone gets a broadcast
425 CRITICAL_SECTION waitCountLock
; // guards waitCount and didBroadcast
426 unsigned int waitCount
;
429 #define MONITOR_INITIALIZER { 0 }
430 #define MONITOR_NOT_ENTERED 1
431 extern int monitor_init(monitor_t
*c
);
433 static inline int _monitor_enter_nodebug(monitor_t
*c
) {
435 int err
= monitor_init(c
);
438 return WaitForSingleObject(c
->mutex
, INFINITE
);
440 static inline int _monitor_exit_nodebug(monitor_t
*c
) {
441 if (!ReleaseMutex(c
->mutex
)) return MONITOR_NOT_ENTERED
;
444 static inline int _monitor_wait_nodebug(monitor_t
*c
) {
446 EnterCriticalSection(&c
->waitCountLock
);
448 LeaveCriticalSection(&c
->waitCountLock
);
450 SignalObjectAndWait(c
->mutex
, c
->waiters
, INFINITE
, FALSE
);
452 EnterCriticalSection(&c
->waitCountLock
);
454 last
= c
->didBroadcast
&& c
->waitCount
== 0;
455 LeaveCriticalSection(&c
->waitCountLock
);
458 // tell broadcaster that all waiters have awoken
459 SignalObjectAndWait(c
->waitersDone
, c
->mutex
, INFINITE
, FALSE
);
461 WaitForSingleObject(c
->mutex
, INFINITE
);
464 // fixme error checking
467 static inline int monitor_notify(monitor_t
*c
) {
470 EnterCriticalSection(&c
->waitCountLock
);
471 haveWaiters
= c
->waitCount
> 0;
472 LeaveCriticalSection(&c
->waitCountLock
);
475 ReleaseSemaphore(c
->waiters
, 1, 0);
478 // fixme error checking
481 static inline int monitor_notifyAll(monitor_t
*c
) {
482 EnterCriticalSection(&c
->waitCountLock
);
483 if (c
->waitCount
== 0) {
484 LeaveCriticalSection(&c
->waitCountLock
);
488 ReleaseSemaphore(c
->waiters
, c
->waitCount
, 0);
489 LeaveCriticalSection(&c
->waitCountLock
);
491 // fairness: wait for everyone to move from waiters to mutex
492 WaitForSingleObject(c
->waitersDone
, INFINITE
);
493 // not under waitCountLock, but still under mutex
496 // fixme error checking
501 // fixme no rwlock yet
503 #define rwlock_t mutex_t
504 #define rwlock_init(r) mutex_init(r)
505 #define _rwlock_read_nodebug(m) _mutex_lock_nodebug(m)
506 #define _rwlock_write_nodebug(m) _mutex_lock_nodebug(m)
507 #define _rwlock_try_read_nodebug(m) _mutex_try_lock_nodebug(m)
508 #define _rwlock_try_write_nodebug(m) _mutex_try_lock_nodebug(m)
509 #define _rwlock_unlock_read_nodebug(m) _mutex_unlock_nodebug(m)
510 #define _rwlock_unlock_write_nodebug(m) _mutex_unlock_nodebug(m)
513 typedef IMAGE_DOS_HEADER headerType
;
514 // fixme YES bundle? NO bundle? sometimes?
515 #define headerIsBundle(hi) YES
516 OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase
;
517 #define libobjc_header ((headerType *)&__ImageBase)
526 #include <mach-o/loader.h>
528 # define SEGMENT_CMD LC_SEGMENT
530 # define SEGMENT_CMD LC_SEGMENT_64
533 #ifndef VM_MEMORY_OBJC_DISPATCHERS
534 # define VM_MEMORY_OBJC_DISPATCHERS 0
538 // Compiler compatibility
542 // Internal data types
544 typedef pthread_t objc_thread_t
;
546 static __inline
int thread_equal(objc_thread_t t1
, objc_thread_t t2
) {
547 return pthread_equal(t1
, t2
);
549 static __inline objc_thread_t
thread_self(void) {
550 return pthread_self();
554 typedef pthread_key_t tls_key_t
;
556 static inline tls_key_t
tls_create(void (*dtor
)(void*)) {
558 pthread_key_create(&k
, dtor
);
561 static inline void *tls_get(tls_key_t k
) {
562 return pthread_getspecific(k
);
564 static inline void tls_set(tls_key_t k
, void *value
) {
565 pthread_setspecific(k
, value
);
568 #if SUPPORT_DIRECT_THREAD_KEYS
571 static bool is_valid_direct_key(tls_key_t k
) {
572 return ( k
== SYNC_DATA_DIRECT_KEY
573 || k
== SYNC_COUNT_DIRECT_KEY
574 || k
== AUTORELEASE_POOL_KEY
575 # if SUPPORT_RETURN_AUTORELEASE
576 || k
== AUTORELEASE_POOL_RECLAIM_KEY
584 // rdar://9162780 _pthread_get/setspecific_direct are inefficient
585 // copied from libdispatch
587 __attribute__((always_inline
)) __attribute__((const))
592 #if defined(__arm__) && defined(_ARM_ARCH_6)
593 __asm__("mrc p15, 0, %[p], c13, c0, 3" : [p
] "=&r" (p
));
594 return (void**)(p
& ~0x3ul
);
596 #error tls_base not implemented
600 __attribute__((always_inline
))
602 tls_set_direct(void **tsdb
, tls_key_t k
, void *v
)
604 assert(is_valid_direct_key(k
));
608 #define tls_set_direct(k, v) \
609 tls_set_direct(tls_base(), (k), (v))
611 __attribute__((always_inline
))
613 tls_get_direct(void **tsdb
, tls_key_t k
)
615 assert(is_valid_direct_key(k
));
619 #define tls_get_direct(k) \
620 tls_get_direct(tls_base(), (k))
626 static inline void *tls_get_direct(tls_key_t k
)
628 assert(is_valid_direct_key(k
));
630 if (_pthread_has_direct_tsd()) {
631 return _pthread_getspecific_direct(k
);
633 return pthread_getspecific(k
);
636 static inline void tls_set_direct(tls_key_t k
, void *value
)
638 assert(is_valid_direct_key(k
));
640 if (_pthread_has_direct_tsd()) {
641 _pthread_setspecific_direct(k
, value
);
643 pthread_setspecific(k
, value
);
650 // SUPPORT_DIRECT_THREAD_KEYS
654 typedef pthread_mutex_t mutex_t
;
655 #define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER;
657 extern int DebuggerMode
;
658 extern void gdb_objc_debuggerModeFailure(void);
659 extern BOOL
isManagedDuringDebugger(void *lock
);
660 extern BOOL
isLockedDuringDebugger(void *lock
);
662 static inline int _mutex_lock_nodebug(mutex_t
*m
) {
663 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
664 if (! isLockedDuringDebugger(m
)) {
665 gdb_objc_debuggerModeFailure();
669 return pthread_mutex_lock(m
);
671 static inline bool _mutex_try_lock_nodebug(mutex_t
*m
) {
672 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
673 if (! isLockedDuringDebugger(m
)) {
674 gdb_objc_debuggerModeFailure();
678 return !pthread_mutex_trylock(m
);
680 static inline int _mutex_unlock_nodebug(mutex_t
*m
) {
681 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
684 return pthread_mutex_unlock(m
);
689 pthread_mutex_t
*mutex
;
691 #define RECURSIVE_MUTEX_INITIALIZER {0};
692 #define RECURSIVE_MUTEX_NOT_LOCKED EPERM
693 extern void recursive_mutex_init(recursive_mutex_t
*m
);
695 static inline int _recursive_mutex_lock_nodebug(recursive_mutex_t
*m
) {
697 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
698 if (! isLockedDuringDebugger((mutex_t
*)m
)) {
699 gdb_objc_debuggerModeFailure();
703 return pthread_mutex_lock(m
->mutex
);
705 static inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t
*m
) {
707 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
708 if (! isLockedDuringDebugger((mutex_t
*)m
)) {
709 gdb_objc_debuggerModeFailure();
713 return !pthread_mutex_trylock(m
->mutex
);
715 static inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t
*m
) {
717 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
720 return pthread_mutex_unlock(m
->mutex
);
725 pthread_mutex_t mutex
;
728 #define MONITOR_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER }
729 #define MONITOR_NOT_ENTERED EPERM
731 static inline int monitor_init(monitor_t
*c
) {
732 int err
= pthread_mutex_init(&c
->mutex
, NULL
);
734 err
= pthread_cond_init(&c
->cond
, NULL
);
736 pthread_mutex_destroy(&c
->mutex
);
741 static inline int _monitor_enter_nodebug(monitor_t
*c
) {
742 assert(!isManagedDuringDebugger(c
));
743 return pthread_mutex_lock(&c
->mutex
);
745 static inline int _monitor_exit_nodebug(monitor_t
*c
) {
746 return pthread_mutex_unlock(&c
->mutex
);
748 static inline int _monitor_wait_nodebug(monitor_t
*c
) {
749 return pthread_cond_wait(&c
->cond
, &c
->mutex
);
751 static inline int monitor_notify(monitor_t
*c
) {
752 return pthread_cond_signal(&c
->cond
);
754 static inline int monitor_notifyAll(monitor_t
*c
) {
755 return pthread_cond_broadcast(&c
->cond
);
759 // semaphore_create formatted for INIT_ONCE use
760 static inline semaphore_t
create_semaphore(void)
764 k
= semaphore_create(mach_task_self(), &sem
, SYNC_POLICY_FIFO
, 0);
765 if (k
) _objc_fatal("semaphore_create failed (0x%x)", k
);
770 /* Custom read-write lock
771 - reader is atomic add/subtract
772 - writer is pthread mutex plus atomic add/subtract
773 - fairness: new readers wait if a writer wants in
774 - fairness: when writer completes, readers (probably) precede new writer
776 state: xxxxxxxx xxxxxxxx yyyyyyyy yyyyyyyz
777 x: blocked reader count
778 y: active reader count
779 z: readers allowed flag
782 pthread_rwlock_t rwl
;
785 extern BOOL
isReadingDuringDebugger(rwlock_t
*lock
);
786 extern BOOL
isWritingDuringDebugger(rwlock_t
*lock
);
788 static inline void rwlock_init(rwlock_t
*l
)
790 int err __unused
= pthread_rwlock_init(&l
->rwl
, NULL
);
794 static inline void _rwlock_read_nodebug(rwlock_t
*l
)
796 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
797 if (! isReadingDuringDebugger(l
)) {
798 gdb_objc_debuggerModeFailure();
802 int err __unused
= pthread_rwlock_rdlock(&l
->rwl
);
806 static inline void _rwlock_unlock_read_nodebug(rwlock_t
*l
)
808 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
811 int err __unused
= pthread_rwlock_unlock(&l
->rwl
);
816 static inline bool _rwlock_try_read_nodebug(rwlock_t
*l
)
818 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
819 if (! isReadingDuringDebugger(l
)) {
820 gdb_objc_debuggerModeFailure();
824 int err
= pthread_rwlock_tryrdlock(&l
->rwl
);
825 assert(err
== 0 || err
== EBUSY
);
830 static inline void _rwlock_write_nodebug(rwlock_t
*l
)
832 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
833 if (! isWritingDuringDebugger(l
)) {
834 gdb_objc_debuggerModeFailure();
838 int err __unused
= pthread_rwlock_wrlock(&l
->rwl
);
842 static inline void _rwlock_unlock_write_nodebug(rwlock_t
*l
)
844 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
847 int err __unused
= pthread_rwlock_unlock(&l
->rwl
);
851 static inline bool _rwlock_try_write_nodebug(rwlock_t
*l
)
853 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
854 if (! isWritingDuringDebugger(l
)) {
855 gdb_objc_debuggerModeFailure();
859 int err
= pthread_rwlock_trywrlock(&l
->rwl
);
860 assert(err
== 0 || err
== EBUSY
);
866 typedef struct mach_header headerType
;
867 typedef struct segment_command segmentType
;
868 typedef struct section sectionType
;
870 typedef struct mach_header_64 headerType
;
871 typedef struct segment_command_64 segmentType
;
872 typedef struct section_64 sectionType
;
874 #define headerIsBundle(hi) (hi->mhdr->filetype == MH_BUNDLE)
875 #define libobjc_header ((headerType *)&_mh_dylib_header)
879 /* Secure /tmp usage */
880 extern int secure_open(const char *filename
, int flags
, uid_t euid
);