2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /***********************************************************************
26 * OS portability layer.
27 **********************************************************************/
32 #include <TargetConditionals.h>
33 #include "objc-config.h"
36 # define WORD_SHIFT 3UL
37 # define WORD_MASK 7UL
40 # define WORD_SHIFT 2UL
41 # define WORD_MASK 3UL
45 static inline uint32_t word_align(uint32_t x
) {
46 return (x
+ WORD_MASK
) & ~WORD_MASK
;
48 static inline size_t word_align(size_t x
) {
49 return (x
+ WORD_MASK
) & ~WORD_MASK
;
53 // Mix-in for classes that must not be copied.
56 nocopy_t(const nocopy_t
&) = delete;
57 const nocopy_t
& operator=(const nocopy_t
&) = delete;
66 # define OS_UNFAIR_LOCK_INLINE 1
68 # ifndef __STDC_LIMIT_MACROS
69 # define __STDC_LIMIT_MACROS
86 # include <crt_externs.h>
88 # include <Availability.h>
89 # include <TargetConditionals.h>
90 # include <sys/mman.h>
91 # include <sys/time.h>
92 # include <sys/stat.h>
93 # include <sys/param.h>
94 # include <sys/reason.h>
95 # include <mach/mach.h>
96 # include <mach/vm_param.h>
97 # include <mach/mach_time.h>
98 # include <mach-o/dyld.h>
99 # include <mach-o/ldsyms.h>
100 # include <mach-o/loader.h>
101 # include <mach-o/getsect.h>
102 # include <mach-o/dyld_priv.h>
103 # include <malloc/malloc.h>
104 # include <os/lock_private.h>
105 # include <libkern/OSAtomic.h>
106 # include <libkern/OSCacheControl.h>
107 # include <System/pthread_machdep.h>
108 # include "objc-probes.h" // generated dtrace probe definitions.
110 // Some libc functions call objc_msgSend()
111 // so we can't use them without deadlocks.
112 void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE
;
113 void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE
;
116 #define ALWAYS_INLINE inline __attribute__((always_inline))
117 #define NEVER_INLINE inline __attribute__((noinline))
119 #define fastpath(x) (__builtin_expect(bool(x), 1))
120 #define slowpath(x) (__builtin_expect(bool(x), 0))
123 static ALWAYS_INLINE
uintptr_t
124 addc(uintptr_t lhs
, uintptr_t rhs
, uintptr_t carryin
, uintptr_t *carryout
)
126 return __builtin_addcl(lhs
, rhs
, carryin
, carryout
);
129 static ALWAYS_INLINE
uintptr_t
130 subc(uintptr_t lhs
, uintptr_t rhs
, uintptr_t carryin
, uintptr_t *carryout
)
132 return __builtin_subcl(lhs
, rhs
, carryin
, carryout
);
140 LoadExclusive(uintptr_t *src
)
143 asm("ldxr %x0, [%x1]"
145 : "r" (src
), "m" (*src
));
151 StoreExclusive(uintptr_t *dst
, uintptr_t oldvalue __unused
, uintptr_t value
)
154 asm("stxr %w0, %x2, [%x3]"
155 : "=r" (result
), "=m" (*dst
)
156 : "r" (value
), "r" (dst
));
163 StoreReleaseExclusive(uintptr_t *dst
, uintptr_t oldvalue __unused
, uintptr_t value
)
166 asm("stlxr %w0, %x2, [%x3]"
167 : "=r" (result
), "=m" (*dst
)
168 : "r" (value
), "r" (dst
));
174 ClearExclusive(uintptr_t *dst
)
176 // pretend it writes to *dst for instruction ordering purposes
177 asm("clrex" : "=m" (*dst
));
185 LoadExclusive(uintptr_t *src
)
192 StoreExclusive(uintptr_t *dst
, uintptr_t oldvalue
, uintptr_t value
)
194 return OSAtomicCompareAndSwapPtr((void *)oldvalue
, (void *)value
,
200 StoreReleaseExclusive(uintptr_t *dst
, uintptr_t oldvalue
, uintptr_t value
)
202 return OSAtomicCompareAndSwapPtrBarrier((void *)oldvalue
, (void *)value
,
208 ClearExclusive(uintptr_t *dst __unused
)
213 #elif __x86_64__ || __i386__
217 LoadExclusive(uintptr_t *src
)
224 StoreExclusive(uintptr_t *dst
, uintptr_t oldvalue
, uintptr_t value
)
227 return __sync_bool_compare_and_swap((void **)dst
, (void *)oldvalue
, (void *)value
);
232 StoreReleaseExclusive(uintptr_t *dst
, uintptr_t oldvalue
, uintptr_t value
)
234 return StoreExclusive(dst
, oldvalue
, value
);
239 ClearExclusive(uintptr_t *dst __unused
)
245 # error unknown architecture
249 #if !TARGET_OS_IPHONE
250 # include <CrashReporterClient.h>
252 // CrashReporterClient not yet available on iOS
254 extern const char *CRSetCrashLogMessage(const char *msg
);
255 extern const char *CRGetCrashLogMessage(void);
261 # include <algorithm>
262 # include <functional>
266 # define PRIVATE_EXTERN __attribute__((visibility("hidden")))
267 # undef __private_extern__
268 # define __private_extern__ use_PRIVATE_EXTERN_instead
269 # undef private_extern
270 # define private_extern use_PRIVATE_EXTERN_instead
272 /* Use this for functions that are intended to be breakpoint hooks.
273 If you do not, the compiler may optimize them away.
274 BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
275 # define BREAKPOINT_FUNCTION(prototype) \
276 OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) \
277 prototype { asm(""); }
279 #elif TARGET_OS_WIN32
281 # define WINVER 0x0501 // target Windows XP and later
282 # define _WIN32_WINNT 0x0501 // target Windows XP and later
283 # define WIN32_LEAN_AND_MEAN
284 // hack: windef.h typedefs BOOL as int
285 # define BOOL WINBOOL
286 # include <windows.h>
296 # include <Availability.h>
300 # include <algorithm>
301 # include <functional>
303 # define __BEGIN_DECLS extern "C" {
304 # define __END_DECLS }
306 # define __BEGIN_DECLS /*empty*/
307 # define __END_DECLS /*empty*/
310 # define PRIVATE_EXTERN
311 # define __attribute__(x)
312 # define inline __inline
314 /* Use this for functions that are intended to be breakpoint hooks.
315 If you do not, the compiler may optimize them away.
316 BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
317 # define BREAKPOINT_FUNCTION(prototype) \
318 __declspec(noinline) prototype { __asm { } }
320 /* stub out dtrace probes */
321 # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
322 # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
329 #include <objc/objc.h>
330 #include <objc/objc-api.h>
332 extern void _objc_fatal(const char *fmt
, ...)
333 __attribute__((noreturn
, format (printf
, 1, 2)));
334 extern void _objc_fatal_with_reason(uint64_t reason
, uint64_t flags
,
335 const char *fmt
, ...)
336 __attribute__((noreturn
, format (printf
, 3, 4)));
338 #define INIT_ONCE_PTR(var, create, delete) \
341 typeof(var) v = create; \
343 if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
351 #define INIT_ONCE_32(var, create, delete) \
354 typeof(var) v = create; \
356 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
365 // Thread keys reserved by libc for our use.
366 #if defined(__PTK_FRAMEWORK_OBJC_KEY0)
367 # define SUPPORT_DIRECT_THREAD_KEYS 1
368 # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY0)
369 # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY1)
370 # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY2)
371 # define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY3)
372 # if SUPPORT_RETURN_AUTORELEASE
373 # define RETURN_DISPOSITION_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4)
375 # if SUPPORT_QOS_HACK
376 # define QOS_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
379 # define SUPPORT_DIRECT_THREAD_KEYS 0
385 // Compiler compatibility
389 #define strdup _strdup
391 #define issetugid() 0
393 #define MIN(x, y) ((x) < (y) ? (x) : (y))
395 static __inline
void bcopy(const void *src
, void *dst
, size_t size
) { memcpy(dst
, src
, size
); }
396 static __inline
void bzero(void *dst
, size_t size
) { memset(dst
, 0, size
); }
398 int asprintf(char **dstp
, const char *format
, ...);
400 typedef void * malloc_zone_t
;
402 static __inline malloc_zone_t
malloc_default_zone(void) { return (malloc_zone_t
)-1; }
403 static __inline
void *malloc_zone_malloc(malloc_zone_t z
, size_t size
) { return malloc(size
); }
404 static __inline
void *malloc_zone_calloc(malloc_zone_t z
, size_t size
, size_t count
) { return calloc(size
, count
); }
405 static __inline
void *malloc_zone_realloc(malloc_zone_t z
, void *p
, size_t size
) { return realloc(p
, size
); }
406 static __inline
void malloc_zone_free(malloc_zone_t z
, void *p
) { free(p
); }
407 static __inline malloc_zone_t
malloc_zone_from_ptr(const void *p
) { return (malloc_zone_t
)-1; }
408 static __inline
size_t malloc_size(const void *p
) { return _msize((void*)p
); /* fixme invalid pointer check? */ }
413 static __inline BOOL
OSAtomicCompareAndSwapLong(long oldl
, long newl
, long volatile *dst
)
415 // fixme barrier is overkill
416 long original
= InterlockedCompareExchange(dst
, newl
, oldl
);
417 return (original
== oldl
);
420 static __inline BOOL
OSAtomicCompareAndSwapPtrBarrier(void *oldp
, void *newp
, void * volatile *dst
)
422 void *original
= InterlockedCompareExchangePointer(dst
, newp
, oldp
);
423 return (original
== oldp
);
426 static __inline BOOL
OSAtomicCompareAndSwap32Barrier(int32_t oldl
, int32_t newl
, int32_t volatile *dst
)
428 long original
= InterlockedCompareExchange((volatile long *)dst
, newl
, oldl
);
429 return (original
== oldl
);
432 static __inline
int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst
)
434 return InterlockedDecrement((volatile long *)dst
);
437 static __inline
int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst
)
439 return InterlockedIncrement((volatile long *)dst
);
443 // Internal data types
445 typedef DWORD objc_thread_t
; // thread ID
446 static __inline
int thread_equal(objc_thread_t t1
, objc_thread_t t2
) {
449 static __inline objc_thread_t
thread_self(void) {
450 return GetCurrentThreadId();
455 void (*dtor
)(void *);
457 static __inline tls_key_t
tls_create(void (*dtor
)(void*)) {
458 // fixme need dtor registry for DllMain to call on thread detach
464 static __inline
void *tls_get(tls_key_t k
) {
465 return TlsGetValue(k
.key
);
467 static __inline
void tls_set(tls_key_t k
, void *value
) {
468 TlsSetValue(k
.key
, value
);
472 CRITICAL_SECTION
*lock
;
474 #define MUTEX_INITIALIZER {0};
475 extern void mutex_init(mutex_t
*m
);
476 static __inline
int _mutex_lock_nodebug(mutex_t
*m
) {
481 EnterCriticalSection(m
->lock
);
484 static __inline
bool _mutex_try_lock_nodebug(mutex_t
*m
) {
489 return TryEnterCriticalSection(m
->lock
);
491 static __inline
int _mutex_unlock_nodebug(mutex_t
*m
) {
493 LeaveCriticalSection(m
->lock
);
498 typedef mutex_t spinlock_t
;
499 #define spinlock_lock(l) mutex_lock(l)
500 #define spinlock_unlock(l) mutex_unlock(l)
501 #define SPINLOCK_INITIALIZER MUTEX_INITIALIZER
507 #define RECURSIVE_MUTEX_INITIALIZER {0};
508 #define RECURSIVE_MUTEX_NOT_LOCKED 1
509 extern void recursive_mutex_init(recursive_mutex_t
*m
);
510 static __inline
int _recursive_mutex_lock_nodebug(recursive_mutex_t
*m
) {
512 return WaitForSingleObject(m
->mutex
, INFINITE
);
514 static __inline
bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t
*m
) {
516 return (WAIT_OBJECT_0
== WaitForSingleObject(m
->mutex
, 0));
518 static __inline
int _recursive_mutex_unlock_nodebug(recursive_mutex_t
*m
) {
520 return ReleaseMutex(m
->mutex
) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED
;
525 typedef HANDLE mutex_t;
526 static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
527 static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
528 static inline bool mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
529 static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
532 // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
533 // Vista-only CONDITION_VARIABLE would be better
536 HANDLE waiters
; // semaphore for those in cond_wait()
537 HANDLE waitersDone
; // auto-reset event after everyone gets a broadcast
538 CRITICAL_SECTION waitCountLock
; // guards waitCount and didBroadcast
539 unsigned int waitCount
;
542 #define MONITOR_INITIALIZER { 0 }
543 #define MONITOR_NOT_ENTERED 1
544 extern int monitor_init(monitor_t
*c
);
546 static inline int _monitor_enter_nodebug(monitor_t
*c
) {
548 int err
= monitor_init(c
);
551 return WaitForSingleObject(c
->mutex
, INFINITE
);
553 static inline int _monitor_leave_nodebug(monitor_t
*c
) {
554 if (!ReleaseMutex(c
->mutex
)) return MONITOR_NOT_ENTERED
;
557 static inline int _monitor_wait_nodebug(monitor_t
*c
) {
559 EnterCriticalSection(&c
->waitCountLock
);
561 LeaveCriticalSection(&c
->waitCountLock
);
563 SignalObjectAndWait(c
->mutex
, c
->waiters
, INFINITE
, FALSE
);
565 EnterCriticalSection(&c
->waitCountLock
);
567 last
= c
->didBroadcast
&& c
->waitCount
== 0;
568 LeaveCriticalSection(&c
->waitCountLock
);
571 // tell broadcaster that all waiters have awoken
572 SignalObjectAndWait(c
->waitersDone
, c
->mutex
, INFINITE
, FALSE
);
574 WaitForSingleObject(c
->mutex
, INFINITE
);
577 // fixme error checking
580 static inline int monitor_notify(monitor_t
*c
) {
583 EnterCriticalSection(&c
->waitCountLock
);
584 haveWaiters
= c
->waitCount
> 0;
585 LeaveCriticalSection(&c
->waitCountLock
);
588 ReleaseSemaphore(c
->waiters
, 1, 0);
591 // fixme error checking
594 static inline int monitor_notifyAll(monitor_t
*c
) {
595 EnterCriticalSection(&c
->waitCountLock
);
596 if (c
->waitCount
== 0) {
597 LeaveCriticalSection(&c
->waitCountLock
);
601 ReleaseSemaphore(c
->waiters
, c
->waitCount
, 0);
602 LeaveCriticalSection(&c
->waitCountLock
);
604 // fairness: wait for everyone to move from waiters to mutex
605 WaitForSingleObject(c
->waitersDone
, INFINITE
);
606 // not under waitCountLock, but still under mutex
609 // fixme error checking
614 // fixme no rwlock yet
617 typedef IMAGE_DOS_HEADER headerType
;
618 // fixme YES bundle? NO bundle? sometimes?
619 #define headerIsBundle(hi) YES
620 OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase
;
621 #define libobjc_header ((headerType *)&__ImageBase)
630 #include <mach-o/loader.h>
632 # define SEGMENT_CMD LC_SEGMENT
634 # define SEGMENT_CMD LC_SEGMENT_64
637 #ifndef VM_MEMORY_OBJC_DISPATCHERS
638 # define VM_MEMORY_OBJC_DISPATCHERS 0
642 // Compiler compatibility
646 static inline uint64_t nanoseconds() {
647 return mach_absolute_time();
650 // Internal data types
652 typedef pthread_t objc_thread_t
;
654 static __inline
int thread_equal(objc_thread_t t1
, objc_thread_t t2
) {
655 return pthread_equal(t1
, t2
);
657 static __inline objc_thread_t
thread_self(void) {
658 return pthread_self();
662 typedef pthread_key_t tls_key_t
;
664 static inline tls_key_t
tls_create(void (*dtor
)(void*)) {
666 pthread_key_create(&k
, dtor
);
669 static inline void *tls_get(tls_key_t k
) {
670 return pthread_getspecific(k
);
672 static inline void tls_set(tls_key_t k
, void *value
) {
673 pthread_setspecific(k
, value
);
676 #if SUPPORT_DIRECT_THREAD_KEYS
679 static bool is_valid_direct_key(tls_key_t k
) {
680 return ( k
== SYNC_DATA_DIRECT_KEY
681 || k
== SYNC_COUNT_DIRECT_KEY
682 || k
== AUTORELEASE_POOL_KEY
683 # if SUPPORT_RETURN_AUTORELEASE
684 || k
== RETURN_DISPOSITION_KEY
686 # if SUPPORT_QOS_HACK
695 // rdar://9162780 _pthread_get/setspecific_direct are inefficient
696 // copied from libdispatch
698 __attribute__((const))
699 static ALWAYS_INLINE
void**
703 #if defined(__arm__) && defined(_ARM_ARCH_6)
704 __asm__("mrc p15, 0, %[p], c13, c0, 3" : [p
] "=&r" (p
));
705 return (void**)(p
& ~0x3ul
);
707 #error tls_base not implemented
712 static ALWAYS_INLINE
void
713 tls_set_direct(void **tsdb
, tls_key_t k
, void *v
)
715 assert(is_valid_direct_key(k
));
719 #define tls_set_direct(k, v) \
720 tls_set_direct(tls_base(), (k), (v))
723 static ALWAYS_INLINE
void *
724 tls_get_direct(void **tsdb
, tls_key_t k
)
726 assert(is_valid_direct_key(k
));
730 #define tls_get_direct(k) \
731 tls_get_direct(tls_base(), (k))
737 static inline void *tls_get_direct(tls_key_t k
)
739 assert(is_valid_direct_key(k
));
741 if (_pthread_has_direct_tsd()) {
742 return _pthread_getspecific_direct(k
);
744 return pthread_getspecific(k
);
747 static inline void tls_set_direct(tls_key_t k
, void *value
)
749 assert(is_valid_direct_key(k
));
751 if (_pthread_has_direct_tsd()) {
752 _pthread_setspecific_direct(k
, value
);
754 pthread_setspecific(k
, value
);
761 // SUPPORT_DIRECT_THREAD_KEYS
765 static inline pthread_t
pthread_self_direct()
768 _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF
);
771 static inline mach_port_t
mach_thread_self_direct()
773 return (mach_port_t
)(uintptr_t)
774 _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
778 static inline pthread_priority_t
pthread_self_priority_direct()
780 pthread_priority_t pri
= (pthread_priority_t
)
781 _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
);
782 return pri
& ~_PTHREAD_PRIORITY_FLAGS_MASK
;
787 template <bool Debug
> class mutex_tt
;
788 template <bool Debug
> class monitor_tt
;
789 template <bool Debug
> class rwlock_tt
;
790 template <bool Debug
> class recursive_mutex_tt
;
792 using spinlock_t
= mutex_tt
<DEBUG
>;
793 using mutex_t
= mutex_tt
<DEBUG
>;
794 using monitor_t
= monitor_tt
<DEBUG
>;
795 using rwlock_t
= rwlock_tt
<DEBUG
>;
796 using recursive_mutex_t
= recursive_mutex_tt
<DEBUG
>;
798 // Use fork_unsafe_lock to get a lock that isn't
799 // acquired and released around fork().
800 // All fork-safe locks are checked in debug builds.
801 struct fork_unsafe_lock_t
{ };
802 extern const fork_unsafe_lock_t fork_unsafe_lock
;
804 #include "objc-lockdebug.h"
806 template <bool Debug
>
807 class mutex_tt
: nocopy_t
{
808 os_unfair_lock mLock
;
810 mutex_tt() : mLock(OS_UNFAIR_LOCK_INIT
) {
811 lockdebug_remember_mutex(this);
814 mutex_tt(const fork_unsafe_lock_t unsafe
) : mLock(OS_UNFAIR_LOCK_INIT
) { }
817 lockdebug_mutex_lock(this);
819 os_unfair_lock_lock_with_options_inline
820 (&mLock
, OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
);
824 lockdebug_mutex_unlock(this);
826 os_unfair_lock_unlock_inline(&mLock
);
830 lockdebug_mutex_unlock(this);
832 bzero(&mLock
, sizeof(mLock
));
833 mLock
= os_unfair_lock OS_UNFAIR_LOCK_INIT
;
836 void assertLocked() {
837 lockdebug_mutex_assert_locked(this);
840 void assertUnlocked() {
841 lockdebug_mutex_assert_unlocked(this);
845 // Address-ordered lock discipline for a pair of locks.
847 static void lockTwo(mutex_tt
*lock1
, mutex_tt
*lock2
) {
853 if (lock2
!= lock1
) lock1
->lock();
857 static void unlockTwo(mutex_tt
*lock1
, mutex_tt
*lock2
) {
859 if (lock2
!= lock1
) lock2
->unlock();
864 template <bool Debug
>
865 class recursive_mutex_tt
: nocopy_t
{
866 pthread_mutex_t mLock
;
869 recursive_mutex_tt() : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER
) {
870 lockdebug_remember_recursive_mutex(this);
873 recursive_mutex_tt(const fork_unsafe_lock_t unsafe
)
874 : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER
)
879 lockdebug_recursive_mutex_lock(this);
881 int err
= pthread_mutex_lock(&mLock
);
882 if (err
) _objc_fatal("pthread_mutex_lock failed (%d)", err
);
887 lockdebug_recursive_mutex_unlock(this);
889 int err
= pthread_mutex_unlock(&mLock
);
890 if (err
) _objc_fatal("pthread_mutex_unlock failed (%d)", err
);
895 lockdebug_recursive_mutex_unlock(this);
897 bzero(&mLock
, sizeof(mLock
));
898 mLock
= pthread_mutex_t PTHREAD_RECURSIVE_MUTEX_INITIALIZER
;
903 int err
= pthread_mutex_unlock(&mLock
);
905 lockdebug_recursive_mutex_unlock(this);
907 } else if (err
== EPERM
) {
910 _objc_fatal("pthread_mutex_unlock failed (%d)", err
);
915 void assertLocked() {
916 lockdebug_recursive_mutex_assert_locked(this);
919 void assertUnlocked() {
920 lockdebug_recursive_mutex_assert_unlocked(this);
925 template <bool Debug
>
927 pthread_mutex_t mutex
;
932 : mutex(PTHREAD_MUTEX_INITIALIZER
), cond(PTHREAD_COND_INITIALIZER
)
934 lockdebug_remember_monitor(this);
937 monitor_tt(const fork_unsafe_lock_t unsafe
)
938 : mutex(PTHREAD_MUTEX_INITIALIZER
), cond(PTHREAD_COND_INITIALIZER
)
943 lockdebug_monitor_enter(this);
945 int err
= pthread_mutex_lock(&mutex
);
946 if (err
) _objc_fatal("pthread_mutex_lock failed (%d)", err
);
951 lockdebug_monitor_leave(this);
953 int err
= pthread_mutex_unlock(&mutex
);
954 if (err
) _objc_fatal("pthread_mutex_unlock failed (%d)", err
);
959 lockdebug_monitor_wait(this);
961 int err
= pthread_cond_wait(&cond
, &mutex
);
962 if (err
) _objc_fatal("pthread_cond_wait failed (%d)", err
);
967 int err
= pthread_cond_signal(&cond
);
968 if (err
) _objc_fatal("pthread_cond_signal failed (%d)", err
);
973 int err
= pthread_cond_broadcast(&cond
);
974 if (err
) _objc_fatal("pthread_cond_broadcast failed (%d)", err
);
979 lockdebug_monitor_leave(this);
981 bzero(&mutex
, sizeof(mutex
));
982 bzero(&cond
, sizeof(cond
));
983 mutex
= pthread_mutex_t PTHREAD_MUTEX_INITIALIZER
;
984 cond
= pthread_cond_t PTHREAD_COND_INITIALIZER
;
989 lockdebug_monitor_assert_locked(this);
992 void assertUnlocked()
994 lockdebug_monitor_assert_unlocked(this);
999 // semaphore_create formatted for INIT_ONCE use
1000 static inline semaphore_t
create_semaphore(void)
1004 k
= semaphore_create(mach_task_self(), &sem
, SYNC_POLICY_FIFO
, 0);
1005 if (k
) _objc_fatal("semaphore_create failed (0x%x)", k
);
1010 #if SUPPORT_QOS_HACK
1011 // Override QOS class to avoid priority inversion in rwlocks
1012 // <rdar://17697862> do a qos override before taking rw lock in objc
1014 #include <pthread/workqueue_private.h>
1015 extern pthread_priority_t BackgroundPriority
;
1016 extern pthread_priority_t MainPriority
;
1018 static inline void qosStartOverride()
1020 uintptr_t overrideRefCount
= (uintptr_t)tls_get_direct(QOS_KEY
);
1021 if (overrideRefCount
> 0) {
1022 // If there is a qos override, increment the refcount and continue
1023 tls_set_direct(QOS_KEY
, (void *)(overrideRefCount
+ 1));
1026 pthread_priority_t currentPriority
= pthread_self_priority_direct();
1027 // Check if override is needed. Only override if we are background qos
1028 if (currentPriority
!= 0 && currentPriority
<= BackgroundPriority
) {
1029 int res __unused
= _pthread_override_qos_class_start_direct(mach_thread_self_direct(), MainPriority
);
1031 // Once we override, we set the reference count in the tsd
1032 // to know when to end the override
1033 tls_set_direct(QOS_KEY
, (void *)1);
1038 static inline void qosEndOverride()
1040 uintptr_t overrideRefCount
= (uintptr_t)tls_get_direct(QOS_KEY
);
1041 if (overrideRefCount
== 0) return;
1043 if (overrideRefCount
== 1) {
1045 int res __unused
= _pthread_override_qos_class_end_direct(mach_thread_self_direct());
1049 // decrement refcount
1050 tls_set_direct(QOS_KEY
, (void *)(overrideRefCount
- 1));
1055 // not SUPPORT_QOS_HACK
1057 static inline void qosStartOverride() { }
1058 static inline void qosEndOverride() { }
1060 // not SUPPORT_QOS_HACK
1064 template <bool Debug
>
1065 class rwlock_tt
: nocopy_t
{
1066 pthread_rwlock_t mLock
;
1069 rwlock_tt() : mLock(PTHREAD_RWLOCK_INITIALIZER
) {
1070 lockdebug_remember_rwlock(this);
1073 rwlock_tt(const fork_unsafe_lock_t unsafe
)
1074 : mLock(PTHREAD_RWLOCK_INITIALIZER
)
1079 lockdebug_rwlock_read(this);
1082 int err
= pthread_rwlock_rdlock(&mLock
);
1083 if (err
) _objc_fatal("pthread_rwlock_rdlock failed (%d)", err
);
1088 lockdebug_rwlock_unlock_read(this);
1090 int err
= pthread_rwlock_unlock(&mLock
);
1091 if (err
) _objc_fatal("pthread_rwlock_unlock failed (%d)", err
);
1098 int err
= pthread_rwlock_tryrdlock(&mLock
);
1100 lockdebug_rwlock_try_read_success(this);
1102 } else if (err
== EBUSY
) {
1106 _objc_fatal("pthread_rwlock_tryrdlock failed (%d)", err
);
1112 lockdebug_rwlock_write(this);
1115 int err
= pthread_rwlock_wrlock(&mLock
);
1116 if (err
) _objc_fatal("pthread_rwlock_wrlock failed (%d)", err
);
1121 lockdebug_rwlock_unlock_write(this);
1123 int err
= pthread_rwlock_unlock(&mLock
);
1124 if (err
) _objc_fatal("pthread_rwlock_unlock failed (%d)", err
);
1131 int err
= pthread_rwlock_trywrlock(&mLock
);
1133 lockdebug_rwlock_try_write_success(this);
1135 } else if (err
== EBUSY
) {
1139 _objc_fatal("pthread_rwlock_trywrlock failed (%d)", err
);
1145 lockdebug_rwlock_unlock_write(this);
1147 bzero(&mLock
, sizeof(mLock
));
1148 mLock
= pthread_rwlock_t PTHREAD_RWLOCK_INITIALIZER
;
1152 void assertReading() {
1153 lockdebug_rwlock_assert_reading(this);
1156 void assertWriting() {
1157 lockdebug_rwlock_assert_writing(this);
1160 void assertLocked() {
1161 lockdebug_rwlock_assert_locked(this);
1164 void assertUnlocked() {
1165 lockdebug_rwlock_assert_unlocked(this);
1171 typedef struct mach_header headerType
;
1172 typedef struct segment_command segmentType
;
1173 typedef struct section sectionType
;
1175 typedef struct mach_header_64 headerType
;
1176 typedef struct segment_command_64 segmentType
;
1177 typedef struct section_64 sectionType
;
1179 #define headerIsBundle(hi) (hi->mhdr()->filetype == MH_BUNDLE)
1180 #define libobjc_header ((headerType *)&_mh_dylib_header)
1184 /* Secure /tmp usage */
1185 extern int secure_open(const char *filename
, int flags
, uid_t euid
);
1197 static inline void *
1198 memdup(const void *mem
, size_t len
)
1200 void *dup
= malloc(len
);
1201 memcpy(dup
, mem
, len
);
1205 // strdup that doesn't copy read-only memory
1206 static inline char *
1207 strdupIfMutable(const char *str
)
1209 size_t size
= strlen(str
) + 1;
1210 if (_dyld_is_memory_immutable(str
, size
)) {
1213 return (char *)memdup(str
, size
);
1217 // free strdupIfMutable() result
1219 freeIfMutable(char *str
)
1221 size_t size
= strlen(str
) + 1;
1222 if (_dyld_is_memory_immutable(str
, size
)) {
1229 // nil-checking unsigned strdup
1230 static inline uint8_t *
1231 ustrdupMaybeNil(const uint8_t *str
)
1233 if (!str
) return nil
;
1234 return (uint8_t *)strdupIfMutable((char *)str
);
1237 // OS version checking:
1240 // DYLD_OS_VERSION(mac, ios, tv, watch)
1241 // sdkIsOlderThan(mac, ios, tv, watch)
1242 // sdkIsAtLeast(mac, ios, tv, watch)
1244 // This version order matches OBJC_AVAILABLE.
1247 # define DYLD_OS_VERSION(x, i, t, w) DYLD_MACOSX_VERSION_##x
1248 # define sdkVersion() dyld_get_program_sdk_version()
1251 # define DYLD_OS_VERSION(x, i, t, w) DYLD_IOS_VERSION_##i
1252 # define sdkVersion() dyld_get_program_sdk_version()
1255 // dyld does not currently have distinct constants for tvOS
1256 # define DYLD_OS_VERSION(x, i, t, w) DYLD_IOS_VERSION_##t
1257 # define sdkVersion() dyld_get_program_sdk_version()
1259 #elif TARGET_OS_WATCH
1260 # define DYLD_OS_VERSION(x, i, t, w) DYLD_WATCHOS_VERSION_##w
1261 // watchOS has its own API for compatibility reasons
1262 # define sdkVersion() dyld_get_program_sdk_watch_os_version()
1269 #define sdkIsOlderThan(x, i, t, w) \
1270 (sdkVersion() < DYLD_OS_VERSION(x, i, t, w))
1271 #define sdkIsAtLeast(x, i, t, w) \
1272 (sdkVersion() >= DYLD_OS_VERSION(x, i, t, w))
1274 // Allow bare 0 to be used in DYLD_OS_VERSION() and sdkIsOlderThan()
1275 #define DYLD_MACOSX_VERSION_0 0
1276 #define DYLD_IOS_VERSION_0 0
1277 #define DYLD_TVOS_VERSION_0 0
1278 #define DYLD_WATCHOS_VERSION_0 0
1280 // Pretty-print a DYLD_*_VERSION_* constant.
1281 #define SDK_FORMAT "%hu.%hhu.%hhu"
1282 #define FORMAT_SDK(v) \
1283 (unsigned short)(((uint32_t)(v))>>16), \
1284 (unsigned char)(((uint32_t)(v))>>8), \
1285 (unsigned char)(((uint32_t)(v))>>0)
1287 // fork() safety requires careful tracking of all locks.
1288 // Our custom lock types check this in debug builds.
1289 // Disallow direct use of all other lock types.
1290 typedef __darwin_pthread_mutex_t pthread_mutex_t UNAVAILABLE_ATTRIBUTE
;
1291 typedef __darwin_pthread_rwlock_t pthread_rwlock_t UNAVAILABLE_ATTRIBUTE
;
1292 typedef int32_t OSSpinLock UNAVAILABLE_ATTRIBUTE
;
1293 typedef struct os_unfair_lock_s os_unfair_lock UNAVAILABLE_ATTRIBUTE
;