2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /***********************************************************************
26 * OS portability layer.
27 **********************************************************************/
33 #include <TargetConditionals.h>
34 #include "objc-config.h"
35 #include "objc-private.h"
38 # define WORD_SHIFT 3UL
39 # define WORD_MASK 7UL
42 # define WORD_SHIFT 2UL
43 # define WORD_MASK 3UL
47 static inline uint32_t word_align(uint32_t x
) {
48 return (x
+ WORD_MASK
) & ~WORD_MASK
;
50 static inline size_t word_align(size_t x
) {
51 return (x
+ WORD_MASK
) & ~WORD_MASK
;
53 static inline size_t align16(size_t x
) {
54 return (x
+ size_t(15)) & ~size_t(15);
57 // Mix-in for classes that must not be copied.
60 nocopy_t(const nocopy_t
&) = delete;
61 const nocopy_t
& operator=(const nocopy_t
&) = delete;
63 constexpr nocopy_t() = default;
64 ~nocopy_t() = default;
67 // Version of std::atomic that does not allow implicit conversions
68 // to/from the wrapped type, and requires an explicit memory order
69 // be passed to load() and store().
71 struct explicit_atomic
: public std::atomic
<T
> {
72 explicit explicit_atomic(T initial
) noexcept : std::atomic
<T
>(std::move(initial
)) {}
73 operator T() const = delete;
75 T
load(std::memory_order order
) const noexcept {
76 return std::atomic
<T
>::load(order
);
78 void store(T desired
, std::memory_order order
) noexcept {
79 std::atomic
<T
>::store(desired
, order
);
82 // Convert a normal pointer to an atomic pointer. This is a
83 // somewhat dodgy thing to do, but if the atomic type is lock
84 // free and the same size as the non-atomic type, we know the
85 // representations are the same, and the compiler generates good
87 static explicit_atomic
<T
> *from_pointer(T
*ptr
) {
88 static_assert(sizeof(explicit_atomic
<T
> *) == sizeof(T
*),
89 "Size of atomic must match size of original");
90 explicit_atomic
<T
> *atomic
= (explicit_atomic
<T
> *)ptr
;
91 ASSERT(atomic
->is_lock_free());
98 # define OS_UNFAIR_LOCK_INLINE 1
100 # ifndef __STDC_LIMIT_MACROS
101 # define __STDC_LIMIT_MACROS
117 # include <pthread.h>
118 # include <crt_externs.h>
120 # include <Availability.h>
121 # include <TargetConditionals.h>
122 # include <sys/mman.h>
123 # include <sys/time.h>
124 # include <sys/stat.h>
125 # include <sys/param.h>
126 # include <sys/reason.h>
127 # include <mach/mach.h>
128 # include <mach/vm_param.h>
129 # include <mach/mach_time.h>
130 # include <mach-o/dyld.h>
131 # include <mach-o/ldsyms.h>
132 # include <mach-o/loader.h>
133 # include <mach-o/getsect.h>
134 # include <mach-o/dyld_priv.h>
135 # include <malloc/malloc.h>
136 # include <os/lock_private.h>
137 # include <libkern/OSAtomic.h>
138 # include <libkern/OSCacheControl.h>
139 # include <System/pthread_machdep.h>
140 # include "objc-probes.h" // generated dtrace probe definitions.
142 // Some libc functions call objc_msgSend()
143 // so we can't use them without deadlocks.
144 void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE
;
145 void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE
;
148 #define ALWAYS_INLINE inline __attribute__((always_inline))
149 #define NEVER_INLINE __attribute__((noinline))
151 #define fastpath(x) (__builtin_expect(bool(x), 1))
152 #define slowpath(x) (__builtin_expect(bool(x), 0))
155 static ALWAYS_INLINE
uintptr_t
156 addc(uintptr_t lhs
, uintptr_t rhs
, uintptr_t carryin
, uintptr_t *carryout
)
158 return __builtin_addcl(lhs
, rhs
, carryin
, carryout
);
161 static ALWAYS_INLINE
uintptr_t
162 subc(uintptr_t lhs
, uintptr_t rhs
, uintptr_t carryin
, uintptr_t *carryout
)
164 return __builtin_subcl(lhs
, rhs
, carryin
, carryout
);
167 #if __arm64__ && !__arm64e__
171 LoadExclusive(uintptr_t *src
)
173 return __builtin_arm_ldrex(src
);
178 StoreExclusive(uintptr_t *dst
, uintptr_t oldvalue __unused
, uintptr_t value
)
180 return !__builtin_arm_strex(value
, dst
);
186 StoreReleaseExclusive(uintptr_t *dst
, uintptr_t oldvalue __unused
, uintptr_t value
)
188 return !__builtin_arm_stlex(value
, dst
);
193 ClearExclusive(uintptr_t *dst __unused
)
195 __builtin_arm_clrex();
202 LoadExclusive(uintptr_t *src
)
204 return __c11_atomic_load((_Atomic(uintptr_t) *)src
, __ATOMIC_RELAXED
);
209 StoreExclusive(uintptr_t *dst
, uintptr_t oldvalue
, uintptr_t value
)
211 return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst
, &oldvalue
, value
, __ATOMIC_RELAXED
, __ATOMIC_RELAXED
);
217 StoreReleaseExclusive(uintptr_t *dst
, uintptr_t oldvalue
, uintptr_t value
)
219 return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst
, &oldvalue
, value
, __ATOMIC_RELEASE
, __ATOMIC_RELAXED
);
224 ClearExclusive(uintptr_t *dst __unused
)
231 #if !TARGET_OS_IPHONE
232 # include <CrashReporterClient.h>
234 // CrashReporterClient not yet available on iOS
236 extern const char *CRSetCrashLogMessage(const char *msg
);
237 extern const char *CRGetCrashLogMessage(void);
243 # include <algorithm>
244 # include <functional>
248 # define PRIVATE_EXTERN __attribute__((visibility("hidden")))
249 # undef __private_extern__
250 # define __private_extern__ use_PRIVATE_EXTERN_instead
251 # undef private_extern
252 # define private_extern use_PRIVATE_EXTERN_instead
254 /* Use this for functions that are intended to be breakpoint hooks.
255 If you do not, the compiler may optimize them away.
256 BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
257 # define BREAKPOINT_FUNCTION(prototype) \
258 OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) \
259 prototype { asm(""); }
261 #elif TARGET_OS_WIN32
263 # define WINVER 0x0501 // target Windows XP and later
264 # define _WIN32_WINNT 0x0501 // target Windows XP and later
265 # define WIN32_LEAN_AND_MEAN
266 // hack: windef.h typedefs BOOL as int
267 # define BOOL WINBOOL
268 # include <windows.h>
278 # include <Availability.h>
282 # include <algorithm>
283 # include <functional>
285 # define __BEGIN_DECLS extern "C" {
286 # define __END_DECLS }
288 # define __BEGIN_DECLS /*empty*/
289 # define __END_DECLS /*empty*/
292 # define PRIVATE_EXTERN
293 # define __attribute__(x)
294 # define inline __inline
296 /* Use this for functions that are intended to be breakpoint hooks.
297 If you do not, the compiler may optimize them away.
298 BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
299 # define BREAKPOINT_FUNCTION(prototype) \
300 __declspec(noinline) prototype { __asm { } }
302 /* stub out dtrace probes */
303 # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
304 # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
311 #include <objc/objc.h>
312 #include <objc/objc-api.h>
314 extern void _objc_fatal(const char *fmt
, ...)
315 __attribute__((noreturn
, cold
, format (printf
, 1, 2)));
316 extern void _objc_fatal_with_reason(uint64_t reason
, uint64_t flags
,
317 const char *fmt
, ...)
318 __attribute__((noreturn
, cold
, format (printf
, 3, 4)));
320 #define INIT_ONCE_PTR(var, create, delete) \
323 typeof(var) v = create; \
325 if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
333 #define INIT_ONCE_32(var, create, delete) \
336 typeof(var) v = create; \
338 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
347 // Thread keys reserved by libc for our use.
348 #if defined(__PTK_FRAMEWORK_OBJC_KEY0)
349 # define SUPPORT_DIRECT_THREAD_KEYS 1
350 # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY0)
351 # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY1)
352 # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY2)
353 # define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY3)
354 # if SUPPORT_RETURN_AUTORELEASE
355 # define RETURN_DISPOSITION_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4)
358 # define SUPPORT_DIRECT_THREAD_KEYS 0
364 // Compiler compatibility
368 #define strdup _strdup
370 #define issetugid() 0
372 #define MIN(x, y) ((x) < (y) ? (x) : (y))
374 static __inline
void bcopy(const void *src
, void *dst
, size_t size
) { memcpy(dst
, src
, size
); }
375 static __inline
void bzero(void *dst
, size_t size
) { memset(dst
, 0, size
); }
377 int asprintf(char **dstp
, const char *format
, ...);
379 typedef void * malloc_zone_t
;
381 static __inline malloc_zone_t
malloc_default_zone(void) { return (malloc_zone_t
)-1; }
382 static __inline
void *malloc_zone_malloc(malloc_zone_t z
, size_t size
) { return malloc(size
); }
383 static __inline
void *malloc_zone_calloc(malloc_zone_t z
, size_t size
, size_t count
) { return calloc(size
, count
); }
384 static __inline
void *malloc_zone_realloc(malloc_zone_t z
, void *p
, size_t size
) { return realloc(p
, size
); }
385 static __inline
void malloc_zone_free(malloc_zone_t z
, void *p
) { free(p
); }
386 static __inline malloc_zone_t
malloc_zone_from_ptr(const void *p
) { return (malloc_zone_t
)-1; }
387 static __inline
size_t malloc_size(const void *p
) { return _msize((void*)p
); /* fixme invalid pointer check? */ }
392 static __inline BOOL
OSAtomicCompareAndSwapLong(long oldl
, long newl
, long volatile *dst
)
394 // fixme barrier is overkill
395 long original
= InterlockedCompareExchange(dst
, newl
, oldl
);
396 return (original
== oldl
);
399 static __inline BOOL
OSAtomicCompareAndSwapPtrBarrier(void *oldp
, void *newp
, void * volatile *dst
)
401 void *original
= InterlockedCompareExchangePointer(dst
, newp
, oldp
);
402 return (original
== oldp
);
405 static __inline BOOL
OSAtomicCompareAndSwap32Barrier(int32_t oldl
, int32_t newl
, int32_t volatile *dst
)
407 long original
= InterlockedCompareExchange((volatile long *)dst
, newl
, oldl
);
408 return (original
== oldl
);
411 static __inline
int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst
)
413 return InterlockedDecrement((volatile long *)dst
);
416 static __inline
int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst
)
418 return InterlockedIncrement((volatile long *)dst
);
422 // Internal data types
424 typedef DWORD objc_thread_t
; // thread ID
425 static __inline
int thread_equal(objc_thread_t t1
, objc_thread_t t2
) {
428 static __inline objc_thread_t
objc_thread_self(void) {
429 return GetCurrentThreadId();
434 void (*dtor
)(void *);
436 static __inline tls_key_t
tls_create(void (*dtor
)(void*)) {
437 // fixme need dtor registry for DllMain to call on thread detach
443 static __inline
void *tls_get(tls_key_t k
) {
444 return TlsGetValue(k
.key
);
446 static __inline
void tls_set(tls_key_t k
, void *value
) {
447 TlsSetValue(k
.key
, value
);
451 CRITICAL_SECTION
*lock
;
453 #define MUTEX_INITIALIZER {0};
454 extern void mutex_init(mutex_t
*m
);
455 static __inline
int _mutex_lock_nodebug(mutex_t
*m
) {
460 EnterCriticalSection(m
->lock
);
463 static __inline
bool _mutex_try_lock_nodebug(mutex_t
*m
) {
468 return TryEnterCriticalSection(m
->lock
);
470 static __inline
int _mutex_unlock_nodebug(mutex_t
*m
) {
472 LeaveCriticalSection(m
->lock
);
477 typedef mutex_t spinlock_t
;
478 #define spinlock_lock(l) mutex_lock(l)
479 #define spinlock_unlock(l) mutex_unlock(l)
480 #define SPINLOCK_INITIALIZER MUTEX_INITIALIZER
486 #define RECURSIVE_MUTEX_INITIALIZER {0};
487 #define RECURSIVE_MUTEX_NOT_LOCKED 1
488 extern void recursive_mutex_init(recursive_mutex_t
*m
);
489 static __inline
int _recursive_mutex_lock_nodebug(recursive_mutex_t
*m
) {
491 return WaitForSingleObject(m
->mutex
, INFINITE
);
493 static __inline
bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t
*m
) {
495 return (WAIT_OBJECT_0
== WaitForSingleObject(m
->mutex
, 0));
497 static __inline
int _recursive_mutex_unlock_nodebug(recursive_mutex_t
*m
) {
499 return ReleaseMutex(m
->mutex
) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED
;
504 typedef HANDLE mutex_t;
505 static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
506 static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
507 static inline bool mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
508 static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
511 // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
512 // Vista-only CONDITION_VARIABLE would be better
515 HANDLE waiters
; // semaphore for those in cond_wait()
516 HANDLE waitersDone
; // auto-reset event after everyone gets a broadcast
517 CRITICAL_SECTION waitCountLock
; // guards waitCount and didBroadcast
518 unsigned int waitCount
;
521 #define MONITOR_INITIALIZER { 0 }
522 #define MONITOR_NOT_ENTERED 1
523 extern int monitor_init(monitor_t
*c
);
525 static inline int _monitor_enter_nodebug(monitor_t
*c
) {
527 int err
= monitor_init(c
);
530 return WaitForSingleObject(c
->mutex
, INFINITE
);
532 static inline int _monitor_leave_nodebug(monitor_t
*c
) {
533 if (!ReleaseMutex(c
->mutex
)) return MONITOR_NOT_ENTERED
;
536 static inline int _monitor_wait_nodebug(monitor_t
*c
) {
538 EnterCriticalSection(&c
->waitCountLock
);
540 LeaveCriticalSection(&c
->waitCountLock
);
542 SignalObjectAndWait(c
->mutex
, c
->waiters
, INFINITE
, FALSE
);
544 EnterCriticalSection(&c
->waitCountLock
);
546 last
= c
->didBroadcast
&& c
->waitCount
== 0;
547 LeaveCriticalSection(&c
->waitCountLock
);
550 // tell broadcaster that all waiters have awoken
551 SignalObjectAndWait(c
->waitersDone
, c
->mutex
, INFINITE
, FALSE
);
553 WaitForSingleObject(c
->mutex
, INFINITE
);
556 // fixme error checking
559 static inline int monitor_notify(monitor_t
*c
) {
562 EnterCriticalSection(&c
->waitCountLock
);
563 haveWaiters
= c
->waitCount
> 0;
564 LeaveCriticalSection(&c
->waitCountLock
);
567 ReleaseSemaphore(c
->waiters
, 1, 0);
570 // fixme error checking
573 static inline int monitor_notifyAll(monitor_t
*c
) {
574 EnterCriticalSection(&c
->waitCountLock
);
575 if (c
->waitCount
== 0) {
576 LeaveCriticalSection(&c
->waitCountLock
);
580 ReleaseSemaphore(c
->waiters
, c
->waitCount
, 0);
581 LeaveCriticalSection(&c
->waitCountLock
);
583 // fairness: wait for everyone to move from waiters to mutex
584 WaitForSingleObject(c
->waitersDone
, INFINITE
);
585 // not under waitCountLock, but still under mutex
588 // fixme error checking
593 typedef IMAGE_DOS_HEADER headerType
;
594 // fixme YES bundle? NO bundle? sometimes?
595 #define headerIsBundle(hi) YES
596 OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase
;
597 #define libobjc_header ((headerType *)&__ImageBase)
606 #include <mach-o/loader.h>
608 # define SEGMENT_CMD LC_SEGMENT
610 # define SEGMENT_CMD LC_SEGMENT_64
613 #ifndef VM_MEMORY_OBJC_DISPATCHERS
614 # define VM_MEMORY_OBJC_DISPATCHERS 0
618 // Compiler compatibility
622 static inline uint64_t nanoseconds() {
623 return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW
);
626 // Internal data types
628 typedef pthread_t objc_thread_t
;
630 static __inline
int thread_equal(objc_thread_t t1
, objc_thread_t t2
) {
631 return pthread_equal(t1
, t2
);
634 typedef pthread_key_t tls_key_t
;
636 static inline tls_key_t
tls_create(void (*dtor
)(void*)) {
638 pthread_key_create(&k
, dtor
);
641 static inline void *tls_get(tls_key_t k
) {
642 return pthread_getspecific(k
);
644 static inline void tls_set(tls_key_t k
, void *value
) {
645 pthread_setspecific(k
, value
);
648 #if SUPPORT_DIRECT_THREAD_KEYS
650 static inline bool is_valid_direct_key(tls_key_t k
) {
651 return ( k
== SYNC_DATA_DIRECT_KEY
652 || k
== SYNC_COUNT_DIRECT_KEY
653 || k
== AUTORELEASE_POOL_KEY
654 || k
== _PTHREAD_TSD_SLOT_PTHREAD_SELF
655 # if SUPPORT_RETURN_AUTORELEASE
656 || k
== RETURN_DISPOSITION_KEY
661 static inline void *tls_get_direct(tls_key_t k
)
663 ASSERT(is_valid_direct_key(k
));
665 if (_pthread_has_direct_tsd()) {
666 return _pthread_getspecific_direct(k
);
668 return pthread_getspecific(k
);
671 static inline void tls_set_direct(tls_key_t k
, void *value
)
673 ASSERT(is_valid_direct_key(k
));
675 if (_pthread_has_direct_tsd()) {
676 _pthread_setspecific_direct(k
, value
);
678 pthread_setspecific(k
, value
);
682 __attribute__((const))
683 static inline pthread_t
objc_thread_self()
685 return (pthread_t
)tls_get_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF
);
688 __attribute__((const))
689 static inline pthread_t
objc_thread_self()
691 return pthread_self();
693 #endif // SUPPORT_DIRECT_THREAD_KEYS
696 template <bool Debug
> class mutex_tt
;
697 template <bool Debug
> class monitor_tt
;
698 template <bool Debug
> class recursive_mutex_tt
;
706 using spinlock_t
= mutex_tt
<LOCKDEBUG
>;
707 using mutex_t
= mutex_tt
<LOCKDEBUG
>;
708 using monitor_t
= monitor_tt
<LOCKDEBUG
>;
709 using recursive_mutex_t
= recursive_mutex_tt
<LOCKDEBUG
>;
711 // Use fork_unsafe_lock to get a lock that isn't
712 // acquired and released around fork().
713 // All fork-safe locks are checked in debug builds.
714 struct fork_unsafe_lock_t
{
715 constexpr fork_unsafe_lock_t() = default;
717 extern const fork_unsafe_lock_t fork_unsafe_lock
;
719 #include "objc-lockdebug.h"
721 template <bool Debug
>
722 class mutex_tt
: nocopy_t
{
723 os_unfair_lock mLock
;
725 constexpr mutex_tt() : mLock(OS_UNFAIR_LOCK_INIT
) {
726 lockdebug_remember_mutex(this);
729 constexpr mutex_tt(const fork_unsafe_lock_t unsafe
) : mLock(OS_UNFAIR_LOCK_INIT
) { }
732 lockdebug_mutex_lock(this);
734 // <rdar://problem/50384154>
735 uint32_t opts
= OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
| OS_UNFAIR_LOCK_ADAPTIVE_SPIN
;
736 os_unfair_lock_lock_with_options_inline
737 (&mLock
, (os_unfair_lock_options_t
)opts
);
741 lockdebug_mutex_unlock(this);
743 os_unfair_lock_unlock_inline(&mLock
);
747 lockdebug_mutex_unlock(this);
749 bzero(&mLock
, sizeof(mLock
));
750 mLock
= os_unfair_lock OS_UNFAIR_LOCK_INIT
;
753 void assertLocked() {
754 lockdebug_mutex_assert_locked(this);
757 void assertUnlocked() {
758 lockdebug_mutex_assert_unlocked(this);
762 // Address-ordered lock discipline for a pair of locks.
764 static void lockTwo(mutex_tt
*lock1
, mutex_tt
*lock2
) {
770 if (lock2
!= lock1
) lock1
->lock();
774 static void unlockTwo(mutex_tt
*lock1
, mutex_tt
*lock2
) {
776 if (lock2
!= lock1
) lock2
->unlock();
779 // Scoped lock and unlock
780 class locker
: nocopy_t
{
783 locker(mutex_tt
& newLock
)
784 : lock(newLock
) { lock
.lock(); }
785 ~locker() { lock
.unlock(); }
788 // Either scoped lock and unlock, or NOP.
789 class conditional_locker
: nocopy_t
{
793 conditional_locker(mutex_tt
& newLock
, bool shouldLock
)
794 : lock(newLock
), didLock(shouldLock
)
796 if (shouldLock
) lock
.lock();
798 ~conditional_locker() { if (didLock
) lock
.unlock(); }
802 using mutex_locker_t
= mutex_tt
<LOCKDEBUG
>::locker
;
803 using conditional_mutex_locker_t
= mutex_tt
<LOCKDEBUG
>::conditional_locker
;
806 template <bool Debug
>
807 class recursive_mutex_tt
: nocopy_t
{
808 os_unfair_recursive_lock mLock
;
811 constexpr recursive_mutex_tt() : mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT
) {
812 lockdebug_remember_recursive_mutex(this);
815 constexpr recursive_mutex_tt(const fork_unsafe_lock_t unsafe
)
816 : mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT
)
821 lockdebug_recursive_mutex_lock(this);
822 os_unfair_recursive_lock_lock(&mLock
);
827 lockdebug_recursive_mutex_unlock(this);
829 os_unfair_recursive_lock_unlock(&mLock
);
834 lockdebug_recursive_mutex_unlock(this);
836 bzero(&mLock
, sizeof(mLock
));
837 mLock
= os_unfair_recursive_lock OS_UNFAIR_RECURSIVE_LOCK_INIT
;
842 if (os_unfair_recursive_lock_trylock(&mLock
)) {
843 lockdebug_recursive_mutex_lock(this);
851 if (os_unfair_recursive_lock_tryunlock4objc(&mLock
)) {
852 lockdebug_recursive_mutex_unlock(this);
858 void assertLocked() {
859 lockdebug_recursive_mutex_assert_locked(this);
862 void assertUnlocked() {
863 lockdebug_recursive_mutex_assert_unlocked(this);
868 template <bool Debug
>
870 pthread_mutex_t mutex
;
874 constexpr monitor_tt()
875 : mutex(PTHREAD_MUTEX_INITIALIZER
), cond(PTHREAD_COND_INITIALIZER
)
877 lockdebug_remember_monitor(this);
880 monitor_tt(const fork_unsafe_lock_t unsafe
)
881 : mutex(PTHREAD_MUTEX_INITIALIZER
), cond(PTHREAD_COND_INITIALIZER
)
886 lockdebug_monitor_enter(this);
888 int err
= pthread_mutex_lock(&mutex
);
889 if (err
) _objc_fatal("pthread_mutex_lock failed (%d)", err
);
894 lockdebug_monitor_leave(this);
896 int err
= pthread_mutex_unlock(&mutex
);
897 if (err
) _objc_fatal("pthread_mutex_unlock failed (%d)", err
);
902 lockdebug_monitor_wait(this);
904 int err
= pthread_cond_wait(&cond
, &mutex
);
905 if (err
) _objc_fatal("pthread_cond_wait failed (%d)", err
);
910 int err
= pthread_cond_signal(&cond
);
911 if (err
) _objc_fatal("pthread_cond_signal failed (%d)", err
);
916 int err
= pthread_cond_broadcast(&cond
);
917 if (err
) _objc_fatal("pthread_cond_broadcast failed (%d)", err
);
922 lockdebug_monitor_leave(this);
924 bzero(&mutex
, sizeof(mutex
));
925 bzero(&cond
, sizeof(cond
));
926 mutex
= pthread_mutex_t PTHREAD_MUTEX_INITIALIZER
;
927 cond
= pthread_cond_t PTHREAD_COND_INITIALIZER
;
932 lockdebug_monitor_assert_locked(this);
935 void assertUnlocked()
937 lockdebug_monitor_assert_unlocked(this);
942 // semaphore_create formatted for INIT_ONCE use
943 static inline semaphore_t
create_semaphore(void)
947 k
= semaphore_create(mach_task_self(), &sem
, SYNC_POLICY_FIFO
, 0);
948 if (k
) _objc_fatal("semaphore_create failed (0x%x)", k
);
954 typedef struct mach_header headerType
;
955 typedef struct segment_command segmentType
;
956 typedef struct section sectionType
;
958 typedef struct mach_header_64 headerType
;
959 typedef struct segment_command_64 segmentType
;
960 typedef struct section_64 sectionType
;
962 #define headerIsBundle(hi) (hi->mhdr()->filetype == MH_BUNDLE)
963 #define libobjc_header ((headerType *)&_mh_dylib_header)
967 /* Secure /tmp usage */
968 extern int secure_open(const char *filename
, int flags
, uid_t euid
);
981 memdup(const void *mem
, size_t len
)
983 void *dup
= malloc(len
);
984 memcpy(dup
, mem
, len
);
988 // strdup that doesn't copy read-only memory
990 strdupIfMutable(const char *str
)
992 size_t size
= strlen(str
) + 1;
993 if (_dyld_is_memory_immutable(str
, size
)) {
996 return (char *)memdup(str
, size
);
1000 // free strdupIfMutable() result
1002 freeIfMutable(char *str
)
1004 size_t size
= strlen(str
) + 1;
1005 if (_dyld_is_memory_immutable(str
, size
)) {
1012 // nil-checking unsigned strdup
1013 static inline uint8_t *
1014 ustrdupMaybeNil(const uint8_t *str
)
1016 if (!str
) return nil
;
1017 return (uint8_t *)strdupIfMutable((char *)str
);
1020 // OS version checking:
1023 // DYLD_OS_VERSION(mac, ios, tv, watch, bridge)
1024 // sdkIsOlderThan(mac, ios, tv, watch, bridge)
1025 // sdkIsAtLeast(mac, ios, tv, watch, bridge)
1027 // This version order matches OBJC_AVAILABLE.
1030 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_MACOSX_VERSION_##x
1031 # define sdkVersion() dyld_get_program_sdk_version()
1034 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##i
1035 # define sdkVersion() dyld_get_program_sdk_version()
1038 // dyld does not currently have distinct constants for tvOS
1039 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t
1040 # define sdkVersion() dyld_get_program_sdk_version()
1042 #elif TARGET_OS_BRIDGE
1043 # if TARGET_OS_WATCH
1044 # error bridgeOS 1.0 not supported
1046 // fixme don't need bridgeOS versioning yet
1047 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t
1048 # define sdkVersion() dyld_get_program_sdk_bridge_os_version()
1050 #elif TARGET_OS_WATCH
1051 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_WATCHOS_VERSION_##w
1052 // watchOS has its own API for compatibility reasons
1053 # define sdkVersion() dyld_get_program_sdk_watch_os_version()
1060 #define sdkIsOlderThan(x, i, t, w, b) \
1061 (sdkVersion() < DYLD_OS_VERSION(x, i, t, w, b))
1062 #define sdkIsAtLeast(x, i, t, w, b) \
1063 (sdkVersion() >= DYLD_OS_VERSION(x, i, t, w, b))
1065 // Allow bare 0 to be used in DYLD_OS_VERSION() and sdkIsOlderThan()
1066 #define DYLD_MACOSX_VERSION_0 0
1067 #define DYLD_IOS_VERSION_0 0
1068 #define DYLD_TVOS_VERSION_0 0
1069 #define DYLD_WATCHOS_VERSION_0 0
1070 #define DYLD_BRIDGEOS_VERSION_0 0
1072 // Pretty-print a DYLD_*_VERSION_* constant.
1073 #define SDK_FORMAT "%hu.%hhu.%hhu"
1074 #define FORMAT_SDK(v) \
1075 (unsigned short)(((uint32_t)(v))>>16), \
1076 (unsigned char)(((uint32_t)(v))>>8), \
1077 (unsigned char)(((uint32_t)(v))>>0)
1079 #ifndef __BUILDING_OBJCDT__
1080 // fork() safety requires careful tracking of all locks.
1081 // Our custom lock types check this in debug builds.
1082 // Disallow direct use of all other lock types.
1083 typedef __darwin_pthread_mutex_t pthread_mutex_t UNAVAILABLE_ATTRIBUTE
;
1084 typedef __darwin_pthread_rwlock_t pthread_rwlock_t UNAVAILABLE_ATTRIBUTE
;
1085 typedef int32_t OSSpinLock UNAVAILABLE_ATTRIBUTE
;
1086 typedef struct os_unfair_lock_s os_unfair_lock UNAVAILABLE_ATTRIBUTE
;