]> git.saurik.com Git - apple/objc4.git/blame - runtime/objc-os.h
objc4-818.2.tar.gz
[apple/objc4.git] / runtime / objc-os.h
CommitLineData
7af964d1
A
1/*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24/***********************************************************************
25* objc-os.h
26* OS portability layer.
27**********************************************************************/
28
29#ifndef _OBJC_OS_H
30#define _OBJC_OS_H
31
1807f628 32#include <atomic>
7af964d1 33#include <TargetConditionals.h>
7257e56c 34#include "objc-config.h"
1807f628 35#include "objc-private.h"
7257e56c
A
36
37#ifdef __LP64__
38# define WORD_SHIFT 3UL
39# define WORD_MASK 7UL
8070259c 40# define WORD_BITS 64
7257e56c
A
41#else
42# define WORD_SHIFT 2UL
43# define WORD_MASK 3UL
8070259c 44# define WORD_BITS 32
7257e56c 45#endif
7af964d1 46
8070259c
A
47static inline uint32_t word_align(uint32_t x) {
48 return (x + WORD_MASK) & ~WORD_MASK;
49}
50static inline size_t word_align(size_t x) {
51 return (x + WORD_MASK) & ~WORD_MASK;
52}
1807f628
A
53static inline size_t align16(size_t x) {
54 return (x + size_t(15)) & ~size_t(15);
55}
31875a97
A
56
57// Mix-in for classes that must not be copied.
58class nocopy_t {
59 private:
60 nocopy_t(const nocopy_t&) = delete;
61 const nocopy_t& operator=(const nocopy_t&) = delete;
62 protected:
66799735
A
63 constexpr nocopy_t() = default;
64 ~nocopy_t() = default;
31875a97
A
65};
66
1807f628
A
67// Version of std::atomic that does not allow implicit conversions
68// to/from the wrapped type, and requires an explicit memory order
69// be passed to load() and store().
70template <typename T>
71struct explicit_atomic : public std::atomic<T> {
72 explicit explicit_atomic(T initial) noexcept : std::atomic<T>(std::move(initial)) {}
73 operator T() const = delete;
74
75 T load(std::memory_order order) const noexcept {
76 return std::atomic<T>::load(order);
77 }
78 void store(T desired, std::memory_order order) noexcept {
79 std::atomic<T>::store(desired, order);
80 }
81
82 // Convert a normal pointer to an atomic pointer. This is a
83 // somewhat dodgy thing to do, but if the atomic type is lock
84 // free and the same size as the non-atomic type, we know the
85 // representations are the same, and the compiler generates good
86 // code.
87 static explicit_atomic<T> *from_pointer(T *ptr) {
88 static_assert(sizeof(explicit_atomic<T> *) == sizeof(T *),
89 "Size of atomic must match size of original");
90 explicit_atomic<T> *atomic = (explicit_atomic<T> *)ptr;
91 ASSERT(atomic->is_lock_free());
92 return atomic;
93 }
94};
31875a97 95
34d5b5e8
A
96namespace objc {
97static inline uintptr_t mask16ShiftBits(uint16_t mask)
98{
99 // returns by how much 0xffff must be shifted "right" to return mask
100 uintptr_t maskShift = __builtin_clz(mask) - 16;
101 ASSERT((0xffff >> maskShift) == mask);
102 return maskShift;
103}
104}
105
7af964d1
A
106#if TARGET_OS_MAC
107
c1e772c4
A
108# define OS_UNFAIR_LOCK_INLINE 1
109
8972963c
A
110# ifndef __STDC_LIMIT_MACROS
111# define __STDC_LIMIT_MACROS
112# endif
113
7af964d1
A
114# include <stdio.h>
115# include <stdlib.h>
116# include <stdint.h>
117# include <stdarg.h>
118# include <string.h>
119# include <ctype.h>
120# include <errno.h>
121# include <dlfcn.h>
122# include <fcntl.h>
123# include <assert.h>
124# include <limits.h>
125# include <syslog.h>
126# include <unistd.h>
127# include <pthread.h>
128# include <crt_externs.h>
8972963c 129# undef check
7257e56c 130# include <Availability.h>
7af964d1
A
131# include <TargetConditionals.h>
132# include <sys/mman.h>
133# include <sys/time.h>
134# include <sys/stat.h>
135# include <sys/param.h>
c1e772c4 136# include <sys/reason.h>
7af964d1 137# include <mach/mach.h>
7257e56c 138# include <mach/vm_param.h>
31875a97 139# include <mach/mach_time.h>
7af964d1
A
140# include <mach-o/dyld.h>
141# include <mach-o/ldsyms.h>
142# include <mach-o/loader.h>
143# include <mach-o/getsect.h>
144# include <mach-o/dyld_priv.h>
145# include <malloc/malloc.h>
7257e56c 146# include <os/lock_private.h>
7af964d1
A
147# include <libkern/OSAtomic.h>
148# include <libkern/OSCacheControl.h>
149# include <System/pthread_machdep.h>
150# include "objc-probes.h" // generated dtrace probe definitions.
151
7257e56c
A
152// Some libc functions call objc_msgSend()
153// so we can't use them without deadlocks.
154void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE;
155void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE;
cd5f04f5 156
cd5f04f5 157
8070259c 158#define ALWAYS_INLINE inline __attribute__((always_inline))
1807f628 159#define NEVER_INLINE __attribute__((noinline))
8070259c 160
c1e772c4
A
161#define fastpath(x) (__builtin_expect(bool(x), 1))
162#define slowpath(x) (__builtin_expect(bool(x), 0))
8070259c
A
163
164
165static ALWAYS_INLINE uintptr_t
166addc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
167{
168 return __builtin_addcl(lhs, rhs, carryin, carryout);
169}
170
171static ALWAYS_INLINE uintptr_t
172subc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
173{
174 return __builtin_subcl(lhs, rhs, carryin, carryout);
175}
176
13ba007e 177#if __arm64__ && !__arm64e__
66799735 178
8070259c 179static ALWAYS_INLINE
13ba007e 180uintptr_t
8070259c
A
181LoadExclusive(uintptr_t *src)
182{
13ba007e 183 return __builtin_arm_ldrex(src);
8070259c
A
184}
185
186static ALWAYS_INLINE
13ba007e 187bool
34d5b5e8 188StoreExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value)
8070259c 189{
34d5b5e8
A
190 if (slowpath(__builtin_arm_strex(value, dst))) {
191 *oldvalue = LoadExclusive(dst);
192 return false;
193 }
194 return true;
8070259c
A
195}
196
197
198static ALWAYS_INLINE
13ba007e 199bool
34d5b5e8 200StoreReleaseExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value)
8070259c 201{
34d5b5e8
A
202 if (slowpath(__builtin_arm_stlex(value, dst))) {
203 *oldvalue = LoadExclusive(dst);
204 return false;
205 }
206 return true;
8070259c
A
207}
208
209static ALWAYS_INLINE
13ba007e 210void
c1e772c4
A
211ClearExclusive(uintptr_t *dst __unused)
212{
13ba007e 213 __builtin_arm_clrex();
c1e772c4
A
214}
215
13ba007e 216#else
8070259c
A
217
218static ALWAYS_INLINE
13ba007e 219uintptr_t
8070259c
A
220LoadExclusive(uintptr_t *src)
221{
13ba007e 222 return __c11_atomic_load((_Atomic(uintptr_t) *)src, __ATOMIC_RELAXED);
8070259c
A
223}
224
225static ALWAYS_INLINE
13ba007e 226bool
34d5b5e8 227StoreExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value)
8070259c 228{
34d5b5e8 229 return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, oldvalue, value, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
8070259c
A
230}
231
13ba007e 232
8070259c 233static ALWAYS_INLINE
13ba007e 234bool
34d5b5e8 235StoreReleaseExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value)
8070259c 236{
34d5b5e8 237 return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, oldvalue, value, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
8070259c
A
238}
239
c1e772c4 240static ALWAYS_INLINE
13ba007e 241void
c1e772c4
A
242ClearExclusive(uintptr_t *dst __unused)
243{
244}
245
8070259c
A
246#endif
247
248
8972963c
A
249#if !TARGET_OS_IPHONE
250# include <CrashReporterClient.h>
251#else
252 // CrashReporterClient not yet available on iOS
253 __BEGIN_DECLS
254 extern const char *CRSetCrashLogMessage(const char *msg);
255 extern const char *CRGetCrashLogMessage(void);
8972963c
A
256 __END_DECLS
257#endif
258
7af964d1
A
259# if __cplusplus
260# include <vector>
261# include <algorithm>
cd5f04f5 262# include <functional>
7af964d1 263 using namespace std;
7af964d1
A
264# endif
265
8972963c
A
266# define PRIVATE_EXTERN __attribute__((visibility("hidden")))
267# undef __private_extern__
268# define __private_extern__ use_PRIVATE_EXTERN_instead
269# undef private_extern
270# define private_extern use_PRIVATE_EXTERN_instead
271
272/* Use this for functions that are intended to be breakpoint hooks.
273 If you do not, the compiler may optimize them away.
274 BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
8070259c
A
275# define BREAKPOINT_FUNCTION(prototype) \
276 OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) \
8972963c
A
277 prototype { asm(""); }
278
7af964d1
A
279#elif TARGET_OS_WIN32
280
281# define WINVER 0x0501 // target Windows XP and later
282# define _WIN32_WINNT 0x0501 // target Windows XP and later
283# define WIN32_LEAN_AND_MEAN
284 // hack: windef.h typedefs BOOL as int
285# define BOOL WINBOOL
286# include <windows.h>
287# undef BOOL
288
289# include <stdio.h>
290# include <stdlib.h>
291# include <stdint.h>
292# include <stdarg.h>
293# include <string.h>
294# include <assert.h>
295# include <malloc.h>
7257e56c 296# include <Availability.h>
7af964d1
A
297
298# if __cplusplus
299# include <vector>
300# include <algorithm>
cd5f04f5 301# include <functional>
7af964d1 302 using namespace std;
7af964d1
A
303# define __BEGIN_DECLS extern "C" {
304# define __END_DECLS }
305# else
306# define __BEGIN_DECLS /*empty*/
307# define __END_DECLS /*empty*/
308# endif
309
8972963c 310# define PRIVATE_EXTERN
7af964d1
A
311# define __attribute__(x)
312# define inline __inline
313
8972963c
A
314/* Use this for functions that are intended to be breakpoint hooks.
315 If you do not, the compiler may optimize them away.
316 BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
317# define BREAKPOINT_FUNCTION(prototype) \
318 __declspec(noinline) prototype { __asm { } }
319
7af964d1
A
320/* stub out dtrace probes */
321# define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
322# define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
323
324#else
325# error unknown OS
326#endif
327
328
329#include <objc/objc.h>
330#include <objc/objc-api.h>
331
c1e772c4 332extern void _objc_fatal(const char *fmt, ...)
1807f628 333 __attribute__((noreturn, cold, format (printf, 1, 2)));
c1e772c4
A
334extern void _objc_fatal_with_reason(uint64_t reason, uint64_t flags,
335 const char *fmt, ...)
1807f628 336 __attribute__((noreturn, cold, format (printf, 3, 4)));
7af964d1
A
337
338#define INIT_ONCE_PTR(var, create, delete) \
339 do { \
340 if (var) break; \
341 typeof(var) v = create; \
342 while (!var) { \
8972963c 343 if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
7af964d1
A
344 goto done; \
345 } \
346 } \
347 delete; \
348 done:; \
349 } while (0)
350
351#define INIT_ONCE_32(var, create, delete) \
352 do { \
353 if (var) break; \
354 typeof(var) v = create; \
355 while (!var) { \
356 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
357 goto done; \
358 } \
359 } \
360 delete; \
361 done:; \
362 } while (0)
363
364
365// Thread keys reserved by libc for our use.
5984afce 366#if defined(__PTK_FRAMEWORK_OBJC_KEY0)
8972963c 367# define SUPPORT_DIRECT_THREAD_KEYS 1
5984afce
A
368# define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY0)
369# define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY1)
370# define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY2)
371# define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY3)
8972963c 372# if SUPPORT_RETURN_AUTORELEASE
31875a97 373# define RETURN_DISPOSITION_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4)
5984afce 374# endif
7af964d1 375#else
8972963c 376# define SUPPORT_DIRECT_THREAD_KEYS 0
7af964d1
A
377#endif
378
379
380#if TARGET_OS_WIN32
381
382// Compiler compatibility
383
384// OS compatibility
385
386#define strdup _strdup
387
388#define issetugid() 0
389
390#define MIN(x, y) ((x) < (y) ? (x) : (y))
391
392static __inline void bcopy(const void *src, void *dst, size_t size) { memcpy(dst, src, size); }
393static __inline void bzero(void *dst, size_t size) { memset(dst, 0, size); }
394
395int asprintf(char **dstp, const char *format, ...);
396
397typedef void * malloc_zone_t;
398
399static __inline malloc_zone_t malloc_default_zone(void) { return (malloc_zone_t)-1; }
400static __inline void *malloc_zone_malloc(malloc_zone_t z, size_t size) { return malloc(size); }
401static __inline void *malloc_zone_calloc(malloc_zone_t z, size_t size, size_t count) { return calloc(size, count); }
402static __inline void *malloc_zone_realloc(malloc_zone_t z, void *p, size_t size) { return realloc(p, size); }
403static __inline void malloc_zone_free(malloc_zone_t z, void *p) { free(p); }
404static __inline malloc_zone_t malloc_zone_from_ptr(const void *p) { return (malloc_zone_t)-1; }
405static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ }
406
407
7af964d1
A
408// OSAtomic
409
410static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
411{
412 // fixme barrier is overkill
413 long original = InterlockedCompareExchange(dst, newl, oldl);
414 return (original == oldl);
415}
416
417static __inline BOOL OSAtomicCompareAndSwapPtrBarrier(void *oldp, void *newp, void * volatile *dst)
418{
419 void *original = InterlockedCompareExchangePointer(dst, newp, oldp);
420 return (original == oldp);
421}
422
423static __inline BOOL OSAtomicCompareAndSwap32Barrier(int32_t oldl, int32_t newl, int32_t volatile *dst)
424{
425 long original = InterlockedCompareExchange((volatile long *)dst, newl, oldl);
426 return (original == oldl);
427}
428
429static __inline int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst)
430{
431 return InterlockedDecrement((volatile long *)dst);
432}
433
434static __inline int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst)
435{
436 return InterlockedIncrement((volatile long *)dst);
437}
438
439
440// Internal data types
441
442typedef DWORD objc_thread_t; // thread ID
443static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
444 return t1 == t2;
445}
1807f628 446static __inline objc_thread_t objc_thread_self(void) {
7af964d1
A
447 return GetCurrentThreadId();
448}
449
450typedef struct {
451 DWORD key;
452 void (*dtor)(void *);
453} tls_key_t;
8972963c 454static __inline tls_key_t tls_create(void (*dtor)(void*)) {
7af964d1 455 // fixme need dtor registry for DllMain to call on thread detach
8972963c
A
456 tls_key_t k;
457 k.key = TlsAlloc();
458 k.dtor = dtor;
459 return k;
7af964d1
A
460}
461static __inline void *tls_get(tls_key_t k) {
462 return TlsGetValue(k.key);
463}
464static __inline void tls_set(tls_key_t k, void *value) {
465 TlsSetValue(k.key, value);
466}
467
468typedef struct {
469 CRITICAL_SECTION *lock;
470} mutex_t;
471#define MUTEX_INITIALIZER {0};
472extern void mutex_init(mutex_t *m);
473static __inline int _mutex_lock_nodebug(mutex_t *m) {
474 // fixme error check
475 if (!m->lock) {
476 mutex_init(m);
477 }
478 EnterCriticalSection(m->lock);
479 return 0;
480}
cd5f04f5 481static __inline bool _mutex_try_lock_nodebug(mutex_t *m) {
7af964d1
A
482 // fixme error check
483 if (!m->lock) {
484 mutex_init(m);
485 }
486 return TryEnterCriticalSection(m->lock);
487}
488static __inline int _mutex_unlock_nodebug(mutex_t *m) {
489 // fixme error check
490 LeaveCriticalSection(m->lock);
491 return 0;
492}
493
494
7257e56c
A
495typedef mutex_t spinlock_t;
496#define spinlock_lock(l) mutex_lock(l)
497#define spinlock_unlock(l) mutex_unlock(l)
498#define SPINLOCK_INITIALIZER MUTEX_INITIALIZER
7af964d1
A
499
500
501typedef struct {
502 HANDLE mutex;
503} recursive_mutex_t;
504#define RECURSIVE_MUTEX_INITIALIZER {0};
505#define RECURSIVE_MUTEX_NOT_LOCKED 1
506extern void recursive_mutex_init(recursive_mutex_t *m);
507static __inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
1807f628 508 ASSERT(m->mutex);
7af964d1
A
509 return WaitForSingleObject(m->mutex, INFINITE);
510}
cd5f04f5 511static __inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
1807f628 512 ASSERT(m->mutex);
7af964d1
A
513 return (WAIT_OBJECT_0 == WaitForSingleObject(m->mutex, 0));
514}
515static __inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
1807f628 516 ASSERT(m->mutex);
7af964d1
A
517 return ReleaseMutex(m->mutex) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED;
518}
519
520
521/*
522typedef HANDLE mutex_t;
523static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
524static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
cd5f04f5 525static inline bool mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
7af964d1
A
526static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
527*/
528
529// based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
530// Vista-only CONDITION_VARIABLE would be better
531typedef struct {
532 HANDLE mutex;
533 HANDLE waiters; // semaphore for those in cond_wait()
534 HANDLE waitersDone; // auto-reset event after everyone gets a broadcast
535 CRITICAL_SECTION waitCountLock; // guards waitCount and didBroadcast
536 unsigned int waitCount;
537 int didBroadcast;
538} monitor_t;
539#define MONITOR_INITIALIZER { 0 }
540#define MONITOR_NOT_ENTERED 1
541extern int monitor_init(monitor_t *c);
542
543static inline int _monitor_enter_nodebug(monitor_t *c) {
544 if (!c->mutex) {
545 int err = monitor_init(c);
546 if (err) return err;
547 }
548 return WaitForSingleObject(c->mutex, INFINITE);
549}
31875a97 550static inline int _monitor_leave_nodebug(monitor_t *c) {
7af964d1
A
551 if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED;
552 else return 0;
553}
554static inline int _monitor_wait_nodebug(monitor_t *c) {
555 int last;
556 EnterCriticalSection(&c->waitCountLock);
557 c->waitCount++;
558 LeaveCriticalSection(&c->waitCountLock);
559
560 SignalObjectAndWait(c->mutex, c->waiters, INFINITE, FALSE);
561
562 EnterCriticalSection(&c->waitCountLock);
563 c->waitCount--;
564 last = c->didBroadcast && c->waitCount == 0;
565 LeaveCriticalSection(&c->waitCountLock);
566
567 if (last) {
568 // tell broadcaster that all waiters have awoken
569 SignalObjectAndWait(c->waitersDone, c->mutex, INFINITE, FALSE);
570 } else {
571 WaitForSingleObject(c->mutex, INFINITE);
572 }
573
574 // fixme error checking
575 return 0;
576}
577static inline int monitor_notify(monitor_t *c) {
578 int haveWaiters;
579
580 EnterCriticalSection(&c->waitCountLock);
581 haveWaiters = c->waitCount > 0;
582 LeaveCriticalSection(&c->waitCountLock);
583
584 if (haveWaiters) {
585 ReleaseSemaphore(c->waiters, 1, 0);
586 }
587
588 // fixme error checking
589 return 0;
590}
591static inline int monitor_notifyAll(monitor_t *c) {
592 EnterCriticalSection(&c->waitCountLock);
593 if (c->waitCount == 0) {
594 LeaveCriticalSection(&c->waitCountLock);
595 return 0;
596 }
597 c->didBroadcast = 1;
598 ReleaseSemaphore(c->waiters, c->waitCount, 0);
599 LeaveCriticalSection(&c->waitCountLock);
600
601 // fairness: wait for everyone to move from waiters to mutex
602 WaitForSingleObject(c->waitersDone, INFINITE);
603 // not under waitCountLock, but still under mutex
604 c->didBroadcast = 0;
605
606 // fixme error checking
607 return 0;
608}
609
610
7af964d1
A
611typedef IMAGE_DOS_HEADER headerType;
612// fixme YES bundle? NO bundle? sometimes?
613#define headerIsBundle(hi) YES
614OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase;
615#define libobjc_header ((headerType *)&__ImageBase)
616
617// Prototypes
618
619
620#elif TARGET_OS_MAC
621
622
623// OS headers
8972963c
A
624#include <mach-o/loader.h>
625#ifndef __LP64__
626# define SEGMENT_CMD LC_SEGMENT
627#else
628# define SEGMENT_CMD LC_SEGMENT_64
629#endif
630
631#ifndef VM_MEMORY_OBJC_DISPATCHERS
632# define VM_MEMORY_OBJC_DISPATCHERS 0
633#endif
7af964d1
A
634
635
636// Compiler compatibility
637
638// OS compatibility
639
31875a97 640static inline uint64_t nanoseconds() {
13ba007e 641 return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW);
31875a97
A
642}
643
7af964d1
A
644// Internal data types
645
646typedef pthread_t objc_thread_t;
647
648static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
649 return pthread_equal(t1, t2);
650}
7af964d1
A
651
652typedef pthread_key_t tls_key_t;
653
8972963c
A
654static inline tls_key_t tls_create(void (*dtor)(void*)) {
655 tls_key_t k;
656 pthread_key_create(&k, dtor);
657 return k;
7af964d1
A
658}
659static inline void *tls_get(tls_key_t k) {
1807f628 660 return pthread_getspecific(k);
7af964d1
A
661}
662static inline void tls_set(tls_key_t k, void *value) {
663 pthread_setspecific(k, value);
664}
665
8972963c 666#if SUPPORT_DIRECT_THREAD_KEYS
cd5f04f5 667
1807f628 668static inline bool is_valid_direct_key(tls_key_t k) {
cd5f04f5
A
669 return ( k == SYNC_DATA_DIRECT_KEY
670 || k == SYNC_COUNT_DIRECT_KEY
671 || k == AUTORELEASE_POOL_KEY
1807f628 672 || k == _PTHREAD_TSD_SLOT_PTHREAD_SELF
cd5f04f5 673# if SUPPORT_RETURN_AUTORELEASE
31875a97 674 || k == RETURN_DISPOSITION_KEY
cd5f04f5
A
675# endif
676 );
677}
cd5f04f5 678
1807f628 679static inline void *tls_get_direct(tls_key_t k)
7af964d1 680{
1807f628 681 ASSERT(is_valid_direct_key(k));
7af964d1
A
682
683 if (_pthread_has_direct_tsd()) {
684 return _pthread_getspecific_direct(k);
685 } else {
686 return pthread_getspecific(k);
687 }
688}
689static inline void tls_set_direct(tls_key_t k, void *value)
690{
1807f628 691 ASSERT(is_valid_direct_key(k));
7af964d1
A
692
693 if (_pthread_has_direct_tsd()) {
694 _pthread_setspecific_direct(k, value);
695 } else {
696 pthread_setspecific(k, value);
697 }
698}
cd5f04f5 699
1807f628
A
700__attribute__((const))
701static inline pthread_t objc_thread_self()
5984afce 702{
1807f628 703 return (pthread_t)tls_get_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF);
5984afce 704}
1807f628
A
705#else
706__attribute__((const))
707static inline pthread_t objc_thread_self()
5984afce 708{
1807f628 709 return pthread_self();
5984afce 710}
1807f628 711#endif // SUPPORT_DIRECT_THREAD_KEYS
5984afce 712
5984afce 713
31875a97
A
714template <bool Debug> class mutex_tt;
715template <bool Debug> class monitor_tt;
31875a97 716template <bool Debug> class recursive_mutex_tt;
7af964d1 717
4a109af3
A
718#if DEBUG
719# define LOCKDEBUG 1
720#else
721# define LOCKDEBUG 0
722#endif
723
724using spinlock_t = mutex_tt<LOCKDEBUG>;
725using mutex_t = mutex_tt<LOCKDEBUG>;
726using monitor_t = monitor_tt<LOCKDEBUG>;
4a109af3 727using recursive_mutex_t = recursive_mutex_tt<LOCKDEBUG>;
c1e772c4 728
bd8dfcfc
A
729// Use fork_unsafe_lock to get a lock that isn't
730// acquired and released around fork().
731// All fork-safe locks are checked in debug builds.
66799735
A
732struct fork_unsafe_lock_t {
733 constexpr fork_unsafe_lock_t() = default;
734};
bd8dfcfc
A
735extern const fork_unsafe_lock_t fork_unsafe_lock;
736
31875a97 737#include "objc-lockdebug.h"
7af964d1 738
31875a97
A
739template <bool Debug>
740class mutex_tt : nocopy_t {
c1e772c4
A
741 os_unfair_lock mLock;
742 public:
66799735 743 constexpr mutex_tt() : mLock(OS_UNFAIR_LOCK_INIT) {
bd8dfcfc
A
744 lockdebug_remember_mutex(this);
745 }
746
34d5b5e8 747 constexpr mutex_tt(__unused const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_LOCK_INIT) { }
7af964d1 748
c1e772c4 749 void lock() {
31875a97 750 lockdebug_mutex_lock(this);
7af964d1 751
1807f628
A
752 // <rdar://problem/50384154>
753 uint32_t opts = OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION | OS_UNFAIR_LOCK_ADAPTIVE_SPIN;
c1e772c4 754 os_unfair_lock_lock_with_options_inline
1807f628 755 (&mLock, (os_unfair_lock_options_t)opts);
31875a97
A
756 }
757
c1e772c4 758 void unlock() {
31875a97
A
759 lockdebug_mutex_unlock(this);
760
c1e772c4 761 os_unfair_lock_unlock_inline(&mLock);
31875a97
A
762 }
763
bd8dfcfc
A
764 void forceReset() {
765 lockdebug_mutex_unlock(this);
766
767 bzero(&mLock, sizeof(mLock));
768 mLock = os_unfair_lock OS_UNFAIR_LOCK_INIT;
769 }
770
31875a97
A
771 void assertLocked() {
772 lockdebug_mutex_assert_locked(this);
773 }
774
775 void assertUnlocked() {
776 lockdebug_mutex_assert_unlocked(this);
777 }
31875a97 778
c1e772c4
A
779
780 // Address-ordered lock discipline for a pair of locks.
781
782 static void lockTwo(mutex_tt *lock1, mutex_tt *lock2) {
34d5b5e8 783 if ((uintptr_t)lock1 < (uintptr_t)lock2) {
c1e772c4
A
784 lock1->lock();
785 lock2->lock();
786 } else {
787 lock2->lock();
788 if (lock2 != lock1) lock1->lock();
789 }
790 }
791
792 static void unlockTwo(mutex_tt *lock1, mutex_tt *lock2) {
793 lock1->unlock();
794 if (lock2 != lock1) lock2->unlock();
795 }
4a109af3
A
796
797 // Scoped lock and unlock
798 class locker : nocopy_t {
799 mutex_tt& lock;
800 public:
801 locker(mutex_tt& newLock)
802 : lock(newLock) { lock.lock(); }
803 ~locker() { lock.unlock(); }
804 };
66799735
A
805
806 // Either scoped lock and unlock, or NOP.
807 class conditional_locker : nocopy_t {
808 mutex_tt& lock;
809 bool didLock;
810 public:
811 conditional_locker(mutex_tt& newLock, bool shouldLock)
812 : lock(newLock), didLock(shouldLock)
813 {
814 if (shouldLock) lock.lock();
815 }
816 ~conditional_locker() { if (didLock) lock.unlock(); }
817 };
c1e772c4 818};
31875a97 819
4a109af3 820using mutex_locker_t = mutex_tt<LOCKDEBUG>::locker;
66799735 821using conditional_mutex_locker_t = mutex_tt<LOCKDEBUG>::conditional_locker;
4a109af3 822
31875a97
A
823
824template <bool Debug>
825class recursive_mutex_tt : nocopy_t {
66799735 826 os_unfair_recursive_lock mLock;
31875a97
A
827
828 public:
66799735 829 constexpr recursive_mutex_tt() : mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT) {
bd8dfcfc
A
830 lockdebug_remember_recursive_mutex(this);
831 }
832
34d5b5e8 833 constexpr recursive_mutex_tt(__unused const fork_unsafe_lock_t unsafe)
66799735 834 : mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT)
bd8dfcfc 835 { }
31875a97
A
836
837 void lock()
838 {
839 lockdebug_recursive_mutex_lock(this);
66799735 840 os_unfair_recursive_lock_lock(&mLock);
31875a97
A
841 }
842
31875a97
A
843 void unlock()
844 {
845 lockdebug_recursive_mutex_unlock(this);
846
66799735 847 os_unfair_recursive_lock_unlock(&mLock);
31875a97
A
848 }
849
bd8dfcfc
A
850 void forceReset()
851 {
852 lockdebug_recursive_mutex_unlock(this);
853
854 bzero(&mLock, sizeof(mLock));
66799735 855 mLock = os_unfair_recursive_lock OS_UNFAIR_RECURSIVE_LOCK_INIT;
bd8dfcfc
A
856 }
857
1807f628
A
858 bool tryLock()
859 {
860 if (os_unfair_recursive_lock_trylock(&mLock)) {
861 lockdebug_recursive_mutex_lock(this);
862 return true;
863 }
864 return false;
865 }
866
31875a97
A
867 bool tryUnlock()
868 {
66799735 869 if (os_unfair_recursive_lock_tryunlock4objc(&mLock)) {
31875a97
A
870 lockdebug_recursive_mutex_unlock(this);
871 return true;
31875a97 872 }
66799735 873 return false;
31875a97
A
874 }
875
31875a97
A
876 void assertLocked() {
877 lockdebug_recursive_mutex_assert_locked(this);
878 }
879
880 void assertUnlocked() {
881 lockdebug_recursive_mutex_assert_unlocked(this);
882 }
883};
884
31875a97
A
885
886template <bool Debug>
887class monitor_tt {
7af964d1
A
888 pthread_mutex_t mutex;
889 pthread_cond_t cond;
7af964d1 890
31875a97 891 public:
66799735 892 constexpr monitor_tt()
bd8dfcfc
A
893 : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER)
894 {
895 lockdebug_remember_monitor(this);
896 }
897
34d5b5e8 898 monitor_tt(__unused const fork_unsafe_lock_t unsafe)
bd8dfcfc
A
899 : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER)
900 { }
31875a97
A
901
902 void enter()
903 {
904 lockdebug_monitor_enter(this);
905
906 int err = pthread_mutex_lock(&mutex);
907 if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
7af964d1 908 }
31875a97
A
909
910 void leave()
911 {
912 lockdebug_monitor_leave(this);
913
914 int err = pthread_mutex_unlock(&mutex);
915 if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
916 }
917
918 void wait()
919 {
920 lockdebug_monitor_wait(this);
921
922 int err = pthread_cond_wait(&cond, &mutex);
923 if (err) _objc_fatal("pthread_cond_wait failed (%d)", err);
924 }
925
926 void notify()
927 {
928 int err = pthread_cond_signal(&cond);
929 if (err) _objc_fatal("pthread_cond_signal failed (%d)", err);
930 }
931
932 void notifyAll()
933 {
934 int err = pthread_cond_broadcast(&cond);
935 if (err) _objc_fatal("pthread_cond_broadcast failed (%d)", err);
936 }
937
bd8dfcfc
A
938 void forceReset()
939 {
940 lockdebug_monitor_leave(this);
941
942 bzero(&mutex, sizeof(mutex));
943 bzero(&cond, sizeof(cond));
944 mutex = pthread_mutex_t PTHREAD_MUTEX_INITIALIZER;
945 cond = pthread_cond_t PTHREAD_COND_INITIALIZER;
946 }
947
31875a97
A
948 void assertLocked()
949 {
950 lockdebug_monitor_assert_locked(this);
951 }
952
953 void assertUnlocked()
954 {
955 lockdebug_monitor_assert_unlocked(this);
956 }
957};
958
7af964d1
A
959
960// semaphore_create formatted for INIT_ONCE use
961static inline semaphore_t create_semaphore(void)
962{
963 semaphore_t sem;
964 kern_return_t k;
965 k = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
966 if (k) _objc_fatal("semaphore_create failed (0x%x)", k);
967 return sem;
968}
969
970
7af964d1
A
971#ifndef __LP64__
972typedef struct mach_header headerType;
973typedef struct segment_command segmentType;
974typedef struct section sectionType;
975#else
976typedef struct mach_header_64 headerType;
977typedef struct segment_command_64 segmentType;
978typedef struct section_64 sectionType;
979#endif
c1e772c4 980#define headerIsBundle(hi) (hi->mhdr()->filetype == MH_BUNDLE)
7af964d1
A
981#define libobjc_header ((headerType *)&_mh_dylib_header)
982
7af964d1
A
983// Prototypes
984
985/* Secure /tmp usage */
986extern int secure_open(const char *filename, int flags, uid_t euid);
987
988
989#else
990
991
992#error unknown OS
993
994
995#endif
996
31875a97
A
997
998static inline void *
999memdup(const void *mem, size_t len)
1000{
1001 void *dup = malloc(len);
1002 memcpy(dup, mem, len);
1003 return dup;
1004}
1005
c1e772c4
A
1006// strdup that doesn't copy read-only memory
1007static inline char *
1008strdupIfMutable(const char *str)
31875a97 1009{
c1e772c4
A
1010 size_t size = strlen(str) + 1;
1011 if (_dyld_is_memory_immutable(str, size)) {
1012 return (char *)str;
1013 } else {
1014 return (char *)memdup(str, size);
1015 }
31875a97
A
1016}
1017
c1e772c4
A
1018// free strdupIfMutable() result
1019static inline void
1020freeIfMutable(char *str)
31875a97 1021{
c1e772c4
A
1022 size_t size = strlen(str) + 1;
1023 if (_dyld_is_memory_immutable(str, size)) {
1024 // nothing
1025 } else {
1026 free(str);
1027 }
31875a97
A
1028}
1029
1030// nil-checking unsigned strdup
1031static inline uint8_t *
1032ustrdupMaybeNil(const uint8_t *str)
1033{
1034 if (!str) return nil;
c1e772c4 1035 return (uint8_t *)strdupIfMutable((char *)str);
31875a97 1036}
7af964d1 1037
c1e772c4
A
1038// OS version checking:
1039//
4a109af3 1040// sdkIsAtLeast(mac, ios, tv, watch, bridge)
34d5b5e8 1041//
c1e772c4 1042// This version order matches OBJC_AVAILABLE.
34d5b5e8
A
1043//
1044// NOTE: prefer dyld_program_sdk_at_least when possible
1045#define sdkIsAtLeast(x, i, t, w, b) \
1046 (dyld_program_sdk_at_least(dyld_platform_version_macOS_ ## x) || \
1047 dyld_program_sdk_at_least(dyld_platform_version_iOS_ ## i) || \
1048 dyld_program_sdk_at_least(dyld_platform_version_tvOS_ ## t) || \
1049 dyld_program_sdk_at_least(dyld_platform_version_watchOS_ ## w) || \
1050 dyld_program_sdk_at_least(dyld_platform_version_bridgeOS_ ## b))
c1e772c4 1051
c1e772c4 1052
bc4fafce 1053#ifndef __BUILDING_OBJCDT__
bd8dfcfc
A
1054// fork() safety requires careful tracking of all locks.
1055// Our custom lock types check this in debug builds.
1056// Disallow direct use of all other lock types.
1057typedef __darwin_pthread_mutex_t pthread_mutex_t UNAVAILABLE_ATTRIBUTE;
1058typedef __darwin_pthread_rwlock_t pthread_rwlock_t UNAVAILABLE_ATTRIBUTE;
1059typedef int32_t OSSpinLock UNAVAILABLE_ATTRIBUTE;
1060typedef struct os_unfair_lock_s os_unfair_lock UNAVAILABLE_ATTRIBUTE;
bc4fafce 1061#endif
bd8dfcfc 1062
7af964d1 1063#endif