]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-os.h
objc4-781.2.tar.gz
[apple/objc4.git] / runtime / objc-os.h
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-os.h
26 * OS portability layer.
27 **********************************************************************/
28
29 #ifndef _OBJC_OS_H
30 #define _OBJC_OS_H
31
32 #include <atomic>
33 #include <TargetConditionals.h>
34 #include "objc-config.h"
35 #include "objc-private.h"
36
37 #ifdef __LP64__
38 # define WORD_SHIFT 3UL
39 # define WORD_MASK 7UL
40 # define WORD_BITS 64
41 #else
42 # define WORD_SHIFT 2UL
43 # define WORD_MASK 3UL
44 # define WORD_BITS 32
45 #endif
46
47 static inline uint32_t word_align(uint32_t x) {
48 return (x + WORD_MASK) & ~WORD_MASK;
49 }
50 static inline size_t word_align(size_t x) {
51 return (x + WORD_MASK) & ~WORD_MASK;
52 }
53 static inline size_t align16(size_t x) {
54 return (x + size_t(15)) & ~size_t(15);
55 }
56
57 // Mix-in for classes that must not be copied.
58 class nocopy_t {
59 private:
60 nocopy_t(const nocopy_t&) = delete;
61 const nocopy_t& operator=(const nocopy_t&) = delete;
62 protected:
63 constexpr nocopy_t() = default;
64 ~nocopy_t() = default;
65 };
66
67 // Version of std::atomic that does not allow implicit conversions
68 // to/from the wrapped type, and requires an explicit memory order
69 // be passed to load() and store().
70 template <typename T>
71 struct explicit_atomic : public std::atomic<T> {
72 explicit explicit_atomic(T initial) noexcept : std::atomic<T>(std::move(initial)) {}
73 operator T() const = delete;
74
75 T load(std::memory_order order) const noexcept {
76 return std::atomic<T>::load(order);
77 }
78 void store(T desired, std::memory_order order) noexcept {
79 std::atomic<T>::store(desired, order);
80 }
81
82 // Convert a normal pointer to an atomic pointer. This is a
83 // somewhat dodgy thing to do, but if the atomic type is lock
84 // free and the same size as the non-atomic type, we know the
85 // representations are the same, and the compiler generates good
86 // code.
87 static explicit_atomic<T> *from_pointer(T *ptr) {
88 static_assert(sizeof(explicit_atomic<T> *) == sizeof(T *),
89 "Size of atomic must match size of original");
90 explicit_atomic<T> *atomic = (explicit_atomic<T> *)ptr;
91 ASSERT(atomic->is_lock_free());
92 return atomic;
93 }
94 };
95
96 #if TARGET_OS_MAC
97
98 # define OS_UNFAIR_LOCK_INLINE 1
99
100 # ifndef __STDC_LIMIT_MACROS
101 # define __STDC_LIMIT_MACROS
102 # endif
103
104 # include <stdio.h>
105 # include <stdlib.h>
106 # include <stdint.h>
107 # include <stdarg.h>
108 # include <string.h>
109 # include <ctype.h>
110 # include <errno.h>
111 # include <dlfcn.h>
112 # include <fcntl.h>
113 # include <assert.h>
114 # include <limits.h>
115 # include <syslog.h>
116 # include <unistd.h>
117 # include <pthread.h>
118 # include <crt_externs.h>
119 # undef check
120 # include <Availability.h>
121 # include <TargetConditionals.h>
122 # include <sys/mman.h>
123 # include <sys/time.h>
124 # include <sys/stat.h>
125 # include <sys/param.h>
126 # include <sys/reason.h>
127 # include <mach/mach.h>
128 # include <mach/vm_param.h>
129 # include <mach/mach_time.h>
130 # include <mach-o/dyld.h>
131 # include <mach-o/ldsyms.h>
132 # include <mach-o/loader.h>
133 # include <mach-o/getsect.h>
134 # include <mach-o/dyld_priv.h>
135 # include <malloc/malloc.h>
136 # include <os/lock_private.h>
137 # include <libkern/OSAtomic.h>
138 # include <libkern/OSCacheControl.h>
139 # include <System/pthread_machdep.h>
140 # include "objc-probes.h" // generated dtrace probe definitions.
141
142 // Some libc functions call objc_msgSend()
143 // so we can't use them without deadlocks.
144 void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE;
145 void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE;
146
147
148 #define ALWAYS_INLINE inline __attribute__((always_inline))
149 #define NEVER_INLINE __attribute__((noinline))
150
151 #define fastpath(x) (__builtin_expect(bool(x), 1))
152 #define slowpath(x) (__builtin_expect(bool(x), 0))
153
154
155 static ALWAYS_INLINE uintptr_t
156 addc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
157 {
158 return __builtin_addcl(lhs, rhs, carryin, carryout);
159 }
160
161 static ALWAYS_INLINE uintptr_t
162 subc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
163 {
164 return __builtin_subcl(lhs, rhs, carryin, carryout);
165 }
166
167 #if __arm64__ && !__arm64e__
168
169 static ALWAYS_INLINE
170 uintptr_t
171 LoadExclusive(uintptr_t *src)
172 {
173 return __builtin_arm_ldrex(src);
174 }
175
176 static ALWAYS_INLINE
177 bool
178 StoreExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
179 {
180 return !__builtin_arm_strex(value, dst);
181 }
182
183
184 static ALWAYS_INLINE
185 bool
186 StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
187 {
188 return !__builtin_arm_stlex(value, dst);
189 }
190
191 static ALWAYS_INLINE
192 void
193 ClearExclusive(uintptr_t *dst __unused)
194 {
195 __builtin_arm_clrex();
196 }
197
198 #else
199
200 static ALWAYS_INLINE
201 uintptr_t
202 LoadExclusive(uintptr_t *src)
203 {
204 return __c11_atomic_load((_Atomic(uintptr_t) *)src, __ATOMIC_RELAXED);
205 }
206
207 static ALWAYS_INLINE
208 bool
209 StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
210 {
211 return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, &oldvalue, value, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
212 }
213
214
215 static ALWAYS_INLINE
216 bool
217 StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
218 {
219 return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, &oldvalue, value, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
220 }
221
222 static ALWAYS_INLINE
223 void
224 ClearExclusive(uintptr_t *dst __unused)
225 {
226 }
227
228 #endif
229
230
231 #if !TARGET_OS_IPHONE
232 # include <CrashReporterClient.h>
233 #else
234 // CrashReporterClient not yet available on iOS
235 __BEGIN_DECLS
236 extern const char *CRSetCrashLogMessage(const char *msg);
237 extern const char *CRGetCrashLogMessage(void);
238 __END_DECLS
239 #endif
240
241 # if __cplusplus
242 # include <vector>
243 # include <algorithm>
244 # include <functional>
245 using namespace std;
246 # endif
247
248 # define PRIVATE_EXTERN __attribute__((visibility("hidden")))
249 # undef __private_extern__
250 # define __private_extern__ use_PRIVATE_EXTERN_instead
251 # undef private_extern
252 # define private_extern use_PRIVATE_EXTERN_instead
253
254 /* Use this for functions that are intended to be breakpoint hooks.
255 If you do not, the compiler may optimize them away.
256 BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
257 # define BREAKPOINT_FUNCTION(prototype) \
258 OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) \
259 prototype { asm(""); }
260
261 #elif TARGET_OS_WIN32
262
263 # define WINVER 0x0501 // target Windows XP and later
264 # define _WIN32_WINNT 0x0501 // target Windows XP and later
265 # define WIN32_LEAN_AND_MEAN
266 // hack: windef.h typedefs BOOL as int
267 # define BOOL WINBOOL
268 # include <windows.h>
269 # undef BOOL
270
271 # include <stdio.h>
272 # include <stdlib.h>
273 # include <stdint.h>
274 # include <stdarg.h>
275 # include <string.h>
276 # include <assert.h>
277 # include <malloc.h>
278 # include <Availability.h>
279
280 # if __cplusplus
281 # include <vector>
282 # include <algorithm>
283 # include <functional>
284 using namespace std;
285 # define __BEGIN_DECLS extern "C" {
286 # define __END_DECLS }
287 # else
288 # define __BEGIN_DECLS /*empty*/
289 # define __END_DECLS /*empty*/
290 # endif
291
292 # define PRIVATE_EXTERN
293 # define __attribute__(x)
294 # define inline __inline
295
296 /* Use this for functions that are intended to be breakpoint hooks.
297 If you do not, the compiler may optimize them away.
298 BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
299 # define BREAKPOINT_FUNCTION(prototype) \
300 __declspec(noinline) prototype { __asm { } }
301
302 /* stub out dtrace probes */
303 # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
304 # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
305
306 #else
307 # error unknown OS
308 #endif
309
310
311 #include <objc/objc.h>
312 #include <objc/objc-api.h>
313
314 extern void _objc_fatal(const char *fmt, ...)
315 __attribute__((noreturn, cold, format (printf, 1, 2)));
316 extern void _objc_fatal_with_reason(uint64_t reason, uint64_t flags,
317 const char *fmt, ...)
318 __attribute__((noreturn, cold, format (printf, 3, 4)));
319
320 #define INIT_ONCE_PTR(var, create, delete) \
321 do { \
322 if (var) break; \
323 typeof(var) v = create; \
324 while (!var) { \
325 if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
326 goto done; \
327 } \
328 } \
329 delete; \
330 done:; \
331 } while (0)
332
333 #define INIT_ONCE_32(var, create, delete) \
334 do { \
335 if (var) break; \
336 typeof(var) v = create; \
337 while (!var) { \
338 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
339 goto done; \
340 } \
341 } \
342 delete; \
343 done:; \
344 } while (0)
345
346
347 // Thread keys reserved by libc for our use.
348 #if defined(__PTK_FRAMEWORK_OBJC_KEY0)
349 # define SUPPORT_DIRECT_THREAD_KEYS 1
350 # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY0)
351 # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY1)
352 # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY2)
353 # define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY3)
354 # if SUPPORT_RETURN_AUTORELEASE
355 # define RETURN_DISPOSITION_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4)
356 # endif
357 #else
358 # define SUPPORT_DIRECT_THREAD_KEYS 0
359 #endif
360
361
362 #if TARGET_OS_WIN32
363
364 // Compiler compatibility
365
366 // OS compatibility
367
368 #define strdup _strdup
369
370 #define issetugid() 0
371
372 #define MIN(x, y) ((x) < (y) ? (x) : (y))
373
374 static __inline void bcopy(const void *src, void *dst, size_t size) { memcpy(dst, src, size); }
375 static __inline void bzero(void *dst, size_t size) { memset(dst, 0, size); }
376
377 int asprintf(char **dstp, const char *format, ...);
378
379 typedef void * malloc_zone_t;
380
381 static __inline malloc_zone_t malloc_default_zone(void) { return (malloc_zone_t)-1; }
382 static __inline void *malloc_zone_malloc(malloc_zone_t z, size_t size) { return malloc(size); }
383 static __inline void *malloc_zone_calloc(malloc_zone_t z, size_t size, size_t count) { return calloc(size, count); }
384 static __inline void *malloc_zone_realloc(malloc_zone_t z, void *p, size_t size) { return realloc(p, size); }
385 static __inline void malloc_zone_free(malloc_zone_t z, void *p) { free(p); }
386 static __inline malloc_zone_t malloc_zone_from_ptr(const void *p) { return (malloc_zone_t)-1; }
387 static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ }
388
389
390 // OSAtomic
391
392 static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
393 {
394 // fixme barrier is overkill
395 long original = InterlockedCompareExchange(dst, newl, oldl);
396 return (original == oldl);
397 }
398
399 static __inline BOOL OSAtomicCompareAndSwapPtrBarrier(void *oldp, void *newp, void * volatile *dst)
400 {
401 void *original = InterlockedCompareExchangePointer(dst, newp, oldp);
402 return (original == oldp);
403 }
404
405 static __inline BOOL OSAtomicCompareAndSwap32Barrier(int32_t oldl, int32_t newl, int32_t volatile *dst)
406 {
407 long original = InterlockedCompareExchange((volatile long *)dst, newl, oldl);
408 return (original == oldl);
409 }
410
411 static __inline int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst)
412 {
413 return InterlockedDecrement((volatile long *)dst);
414 }
415
416 static __inline int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst)
417 {
418 return InterlockedIncrement((volatile long *)dst);
419 }
420
421
422 // Internal data types
423
424 typedef DWORD objc_thread_t; // thread ID
425 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
426 return t1 == t2;
427 }
428 static __inline objc_thread_t objc_thread_self(void) {
429 return GetCurrentThreadId();
430 }
431
432 typedef struct {
433 DWORD key;
434 void (*dtor)(void *);
435 } tls_key_t;
436 static __inline tls_key_t tls_create(void (*dtor)(void*)) {
437 // fixme need dtor registry for DllMain to call on thread detach
438 tls_key_t k;
439 k.key = TlsAlloc();
440 k.dtor = dtor;
441 return k;
442 }
443 static __inline void *tls_get(tls_key_t k) {
444 return TlsGetValue(k.key);
445 }
446 static __inline void tls_set(tls_key_t k, void *value) {
447 TlsSetValue(k.key, value);
448 }
449
450 typedef struct {
451 CRITICAL_SECTION *lock;
452 } mutex_t;
453 #define MUTEX_INITIALIZER {0};
454 extern void mutex_init(mutex_t *m);
455 static __inline int _mutex_lock_nodebug(mutex_t *m) {
456 // fixme error check
457 if (!m->lock) {
458 mutex_init(m);
459 }
460 EnterCriticalSection(m->lock);
461 return 0;
462 }
463 static __inline bool _mutex_try_lock_nodebug(mutex_t *m) {
464 // fixme error check
465 if (!m->lock) {
466 mutex_init(m);
467 }
468 return TryEnterCriticalSection(m->lock);
469 }
470 static __inline int _mutex_unlock_nodebug(mutex_t *m) {
471 // fixme error check
472 LeaveCriticalSection(m->lock);
473 return 0;
474 }
475
476
477 typedef mutex_t spinlock_t;
478 #define spinlock_lock(l) mutex_lock(l)
479 #define spinlock_unlock(l) mutex_unlock(l)
480 #define SPINLOCK_INITIALIZER MUTEX_INITIALIZER
481
482
483 typedef struct {
484 HANDLE mutex;
485 } recursive_mutex_t;
486 #define RECURSIVE_MUTEX_INITIALIZER {0};
487 #define RECURSIVE_MUTEX_NOT_LOCKED 1
488 extern void recursive_mutex_init(recursive_mutex_t *m);
489 static __inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
490 ASSERT(m->mutex);
491 return WaitForSingleObject(m->mutex, INFINITE);
492 }
493 static __inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
494 ASSERT(m->mutex);
495 return (WAIT_OBJECT_0 == WaitForSingleObject(m->mutex, 0));
496 }
497 static __inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
498 ASSERT(m->mutex);
499 return ReleaseMutex(m->mutex) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED;
500 }
501
502
503 /*
504 typedef HANDLE mutex_t;
505 static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
506 static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
507 static inline bool mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
508 static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
509 */
510
511 // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
512 // Vista-only CONDITION_VARIABLE would be better
513 typedef struct {
514 HANDLE mutex;
515 HANDLE waiters; // semaphore for those in cond_wait()
516 HANDLE waitersDone; // auto-reset event after everyone gets a broadcast
517 CRITICAL_SECTION waitCountLock; // guards waitCount and didBroadcast
518 unsigned int waitCount;
519 int didBroadcast;
520 } monitor_t;
521 #define MONITOR_INITIALIZER { 0 }
522 #define MONITOR_NOT_ENTERED 1
523 extern int monitor_init(monitor_t *c);
524
525 static inline int _monitor_enter_nodebug(monitor_t *c) {
526 if (!c->mutex) {
527 int err = monitor_init(c);
528 if (err) return err;
529 }
530 return WaitForSingleObject(c->mutex, INFINITE);
531 }
532 static inline int _monitor_leave_nodebug(monitor_t *c) {
533 if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED;
534 else return 0;
535 }
536 static inline int _monitor_wait_nodebug(monitor_t *c) {
537 int last;
538 EnterCriticalSection(&c->waitCountLock);
539 c->waitCount++;
540 LeaveCriticalSection(&c->waitCountLock);
541
542 SignalObjectAndWait(c->mutex, c->waiters, INFINITE, FALSE);
543
544 EnterCriticalSection(&c->waitCountLock);
545 c->waitCount--;
546 last = c->didBroadcast && c->waitCount == 0;
547 LeaveCriticalSection(&c->waitCountLock);
548
549 if (last) {
550 // tell broadcaster that all waiters have awoken
551 SignalObjectAndWait(c->waitersDone, c->mutex, INFINITE, FALSE);
552 } else {
553 WaitForSingleObject(c->mutex, INFINITE);
554 }
555
556 // fixme error checking
557 return 0;
558 }
559 static inline int monitor_notify(monitor_t *c) {
560 int haveWaiters;
561
562 EnterCriticalSection(&c->waitCountLock);
563 haveWaiters = c->waitCount > 0;
564 LeaveCriticalSection(&c->waitCountLock);
565
566 if (haveWaiters) {
567 ReleaseSemaphore(c->waiters, 1, 0);
568 }
569
570 // fixme error checking
571 return 0;
572 }
573 static inline int monitor_notifyAll(monitor_t *c) {
574 EnterCriticalSection(&c->waitCountLock);
575 if (c->waitCount == 0) {
576 LeaveCriticalSection(&c->waitCountLock);
577 return 0;
578 }
579 c->didBroadcast = 1;
580 ReleaseSemaphore(c->waiters, c->waitCount, 0);
581 LeaveCriticalSection(&c->waitCountLock);
582
583 // fairness: wait for everyone to move from waiters to mutex
584 WaitForSingleObject(c->waitersDone, INFINITE);
585 // not under waitCountLock, but still under mutex
586 c->didBroadcast = 0;
587
588 // fixme error checking
589 return 0;
590 }
591
592
593 typedef IMAGE_DOS_HEADER headerType;
594 // fixme YES bundle? NO bundle? sometimes?
595 #define headerIsBundle(hi) YES
596 OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase;
597 #define libobjc_header ((headerType *)&__ImageBase)
598
599 // Prototypes
600
601
602 #elif TARGET_OS_MAC
603
604
605 // OS headers
606 #include <mach-o/loader.h>
607 #ifndef __LP64__
608 # define SEGMENT_CMD LC_SEGMENT
609 #else
610 # define SEGMENT_CMD LC_SEGMENT_64
611 #endif
612
613 #ifndef VM_MEMORY_OBJC_DISPATCHERS
614 # define VM_MEMORY_OBJC_DISPATCHERS 0
615 #endif
616
617
618 // Compiler compatibility
619
620 // OS compatibility
621
622 static inline uint64_t nanoseconds() {
623 return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW);
624 }
625
626 // Internal data types
627
628 typedef pthread_t objc_thread_t;
629
630 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
631 return pthread_equal(t1, t2);
632 }
633
634 typedef pthread_key_t tls_key_t;
635
636 static inline tls_key_t tls_create(void (*dtor)(void*)) {
637 tls_key_t k;
638 pthread_key_create(&k, dtor);
639 return k;
640 }
641 static inline void *tls_get(tls_key_t k) {
642 return pthread_getspecific(k);
643 }
644 static inline void tls_set(tls_key_t k, void *value) {
645 pthread_setspecific(k, value);
646 }
647
648 #if SUPPORT_DIRECT_THREAD_KEYS
649
650 static inline bool is_valid_direct_key(tls_key_t k) {
651 return ( k == SYNC_DATA_DIRECT_KEY
652 || k == SYNC_COUNT_DIRECT_KEY
653 || k == AUTORELEASE_POOL_KEY
654 || k == _PTHREAD_TSD_SLOT_PTHREAD_SELF
655 # if SUPPORT_RETURN_AUTORELEASE
656 || k == RETURN_DISPOSITION_KEY
657 # endif
658 );
659 }
660
661 static inline void *tls_get_direct(tls_key_t k)
662 {
663 ASSERT(is_valid_direct_key(k));
664
665 if (_pthread_has_direct_tsd()) {
666 return _pthread_getspecific_direct(k);
667 } else {
668 return pthread_getspecific(k);
669 }
670 }
671 static inline void tls_set_direct(tls_key_t k, void *value)
672 {
673 ASSERT(is_valid_direct_key(k));
674
675 if (_pthread_has_direct_tsd()) {
676 _pthread_setspecific_direct(k, value);
677 } else {
678 pthread_setspecific(k, value);
679 }
680 }
681
682 __attribute__((const))
683 static inline pthread_t objc_thread_self()
684 {
685 return (pthread_t)tls_get_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF);
686 }
687 #else
688 __attribute__((const))
689 static inline pthread_t objc_thread_self()
690 {
691 return pthread_self();
692 }
693 #endif // SUPPORT_DIRECT_THREAD_KEYS
694
695
696 template <bool Debug> class mutex_tt;
697 template <bool Debug> class monitor_tt;
698 template <bool Debug> class recursive_mutex_tt;
699
700 #if DEBUG
701 # define LOCKDEBUG 1
702 #else
703 # define LOCKDEBUG 0
704 #endif
705
706 using spinlock_t = mutex_tt<LOCKDEBUG>;
707 using mutex_t = mutex_tt<LOCKDEBUG>;
708 using monitor_t = monitor_tt<LOCKDEBUG>;
709 using recursive_mutex_t = recursive_mutex_tt<LOCKDEBUG>;
710
711 // Use fork_unsafe_lock to get a lock that isn't
712 // acquired and released around fork().
713 // All fork-safe locks are checked in debug builds.
714 struct fork_unsafe_lock_t {
715 constexpr fork_unsafe_lock_t() = default;
716 };
717 extern const fork_unsafe_lock_t fork_unsafe_lock;
718
719 #include "objc-lockdebug.h"
720
721 template <bool Debug>
722 class mutex_tt : nocopy_t {
723 os_unfair_lock mLock;
724 public:
725 constexpr mutex_tt() : mLock(OS_UNFAIR_LOCK_INIT) {
726 lockdebug_remember_mutex(this);
727 }
728
729 constexpr mutex_tt(const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_LOCK_INIT) { }
730
731 void lock() {
732 lockdebug_mutex_lock(this);
733
734 // <rdar://problem/50384154>
735 uint32_t opts = OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION | OS_UNFAIR_LOCK_ADAPTIVE_SPIN;
736 os_unfair_lock_lock_with_options_inline
737 (&mLock, (os_unfair_lock_options_t)opts);
738 }
739
740 void unlock() {
741 lockdebug_mutex_unlock(this);
742
743 os_unfair_lock_unlock_inline(&mLock);
744 }
745
746 void forceReset() {
747 lockdebug_mutex_unlock(this);
748
749 bzero(&mLock, sizeof(mLock));
750 mLock = os_unfair_lock OS_UNFAIR_LOCK_INIT;
751 }
752
753 void assertLocked() {
754 lockdebug_mutex_assert_locked(this);
755 }
756
757 void assertUnlocked() {
758 lockdebug_mutex_assert_unlocked(this);
759 }
760
761
762 // Address-ordered lock discipline for a pair of locks.
763
764 static void lockTwo(mutex_tt *lock1, mutex_tt *lock2) {
765 if (lock1 < lock2) {
766 lock1->lock();
767 lock2->lock();
768 } else {
769 lock2->lock();
770 if (lock2 != lock1) lock1->lock();
771 }
772 }
773
774 static void unlockTwo(mutex_tt *lock1, mutex_tt *lock2) {
775 lock1->unlock();
776 if (lock2 != lock1) lock2->unlock();
777 }
778
779 // Scoped lock and unlock
780 class locker : nocopy_t {
781 mutex_tt& lock;
782 public:
783 locker(mutex_tt& newLock)
784 : lock(newLock) { lock.lock(); }
785 ~locker() { lock.unlock(); }
786 };
787
788 // Either scoped lock and unlock, or NOP.
789 class conditional_locker : nocopy_t {
790 mutex_tt& lock;
791 bool didLock;
792 public:
793 conditional_locker(mutex_tt& newLock, bool shouldLock)
794 : lock(newLock), didLock(shouldLock)
795 {
796 if (shouldLock) lock.lock();
797 }
798 ~conditional_locker() { if (didLock) lock.unlock(); }
799 };
800 };
801
802 using mutex_locker_t = mutex_tt<LOCKDEBUG>::locker;
803 using conditional_mutex_locker_t = mutex_tt<LOCKDEBUG>::conditional_locker;
804
805
806 template <bool Debug>
807 class recursive_mutex_tt : nocopy_t {
808 os_unfair_recursive_lock mLock;
809
810 public:
811 constexpr recursive_mutex_tt() : mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT) {
812 lockdebug_remember_recursive_mutex(this);
813 }
814
815 constexpr recursive_mutex_tt(const fork_unsafe_lock_t unsafe)
816 : mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT)
817 { }
818
819 void lock()
820 {
821 lockdebug_recursive_mutex_lock(this);
822 os_unfair_recursive_lock_lock(&mLock);
823 }
824
825 void unlock()
826 {
827 lockdebug_recursive_mutex_unlock(this);
828
829 os_unfair_recursive_lock_unlock(&mLock);
830 }
831
832 void forceReset()
833 {
834 lockdebug_recursive_mutex_unlock(this);
835
836 bzero(&mLock, sizeof(mLock));
837 mLock = os_unfair_recursive_lock OS_UNFAIR_RECURSIVE_LOCK_INIT;
838 }
839
840 bool tryLock()
841 {
842 if (os_unfair_recursive_lock_trylock(&mLock)) {
843 lockdebug_recursive_mutex_lock(this);
844 return true;
845 }
846 return false;
847 }
848
849 bool tryUnlock()
850 {
851 if (os_unfair_recursive_lock_tryunlock4objc(&mLock)) {
852 lockdebug_recursive_mutex_unlock(this);
853 return true;
854 }
855 return false;
856 }
857
858 void assertLocked() {
859 lockdebug_recursive_mutex_assert_locked(this);
860 }
861
862 void assertUnlocked() {
863 lockdebug_recursive_mutex_assert_unlocked(this);
864 }
865 };
866
867
868 template <bool Debug>
869 class monitor_tt {
870 pthread_mutex_t mutex;
871 pthread_cond_t cond;
872
873 public:
874 constexpr monitor_tt()
875 : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER)
876 {
877 lockdebug_remember_monitor(this);
878 }
879
880 monitor_tt(const fork_unsafe_lock_t unsafe)
881 : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER)
882 { }
883
884 void enter()
885 {
886 lockdebug_monitor_enter(this);
887
888 int err = pthread_mutex_lock(&mutex);
889 if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
890 }
891
892 void leave()
893 {
894 lockdebug_monitor_leave(this);
895
896 int err = pthread_mutex_unlock(&mutex);
897 if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
898 }
899
900 void wait()
901 {
902 lockdebug_monitor_wait(this);
903
904 int err = pthread_cond_wait(&cond, &mutex);
905 if (err) _objc_fatal("pthread_cond_wait failed (%d)", err);
906 }
907
908 void notify()
909 {
910 int err = pthread_cond_signal(&cond);
911 if (err) _objc_fatal("pthread_cond_signal failed (%d)", err);
912 }
913
914 void notifyAll()
915 {
916 int err = pthread_cond_broadcast(&cond);
917 if (err) _objc_fatal("pthread_cond_broadcast failed (%d)", err);
918 }
919
920 void forceReset()
921 {
922 lockdebug_monitor_leave(this);
923
924 bzero(&mutex, sizeof(mutex));
925 bzero(&cond, sizeof(cond));
926 mutex = pthread_mutex_t PTHREAD_MUTEX_INITIALIZER;
927 cond = pthread_cond_t PTHREAD_COND_INITIALIZER;
928 }
929
930 void assertLocked()
931 {
932 lockdebug_monitor_assert_locked(this);
933 }
934
935 void assertUnlocked()
936 {
937 lockdebug_monitor_assert_unlocked(this);
938 }
939 };
940
941
942 // semaphore_create formatted for INIT_ONCE use
943 static inline semaphore_t create_semaphore(void)
944 {
945 semaphore_t sem;
946 kern_return_t k;
947 k = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
948 if (k) _objc_fatal("semaphore_create failed (0x%x)", k);
949 return sem;
950 }
951
952
953 #ifndef __LP64__
954 typedef struct mach_header headerType;
955 typedef struct segment_command segmentType;
956 typedef struct section sectionType;
957 #else
958 typedef struct mach_header_64 headerType;
959 typedef struct segment_command_64 segmentType;
960 typedef struct section_64 sectionType;
961 #endif
962 #define headerIsBundle(hi) (hi->mhdr()->filetype == MH_BUNDLE)
963 #define libobjc_header ((headerType *)&_mh_dylib_header)
964
965 // Prototypes
966
967 /* Secure /tmp usage */
968 extern int secure_open(const char *filename, int flags, uid_t euid);
969
970
971 #else
972
973
974 #error unknown OS
975
976
977 #endif
978
979
980 static inline void *
981 memdup(const void *mem, size_t len)
982 {
983 void *dup = malloc(len);
984 memcpy(dup, mem, len);
985 return dup;
986 }
987
988 // strdup that doesn't copy read-only memory
989 static inline char *
990 strdupIfMutable(const char *str)
991 {
992 size_t size = strlen(str) + 1;
993 if (_dyld_is_memory_immutable(str, size)) {
994 return (char *)str;
995 } else {
996 return (char *)memdup(str, size);
997 }
998 }
999
1000 // free strdupIfMutable() result
1001 static inline void
1002 freeIfMutable(char *str)
1003 {
1004 size_t size = strlen(str) + 1;
1005 if (_dyld_is_memory_immutable(str, size)) {
1006 // nothing
1007 } else {
1008 free(str);
1009 }
1010 }
1011
1012 // nil-checking unsigned strdup
1013 static inline uint8_t *
1014 ustrdupMaybeNil(const uint8_t *str)
1015 {
1016 if (!str) return nil;
1017 return (uint8_t *)strdupIfMutable((char *)str);
1018 }
1019
1020 // OS version checking:
1021 //
1022 // sdkVersion()
1023 // DYLD_OS_VERSION(mac, ios, tv, watch, bridge)
1024 // sdkIsOlderThan(mac, ios, tv, watch, bridge)
1025 // sdkIsAtLeast(mac, ios, tv, watch, bridge)
1026 //
1027 // This version order matches OBJC_AVAILABLE.
1028
1029 #if TARGET_OS_OSX
1030 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_MACOSX_VERSION_##x
1031 # define sdkVersion() dyld_get_program_sdk_version()
1032
1033 #elif TARGET_OS_IOS
1034 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##i
1035 # define sdkVersion() dyld_get_program_sdk_version()
1036
1037 #elif TARGET_OS_TV
1038 // dyld does not currently have distinct constants for tvOS
1039 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t
1040 # define sdkVersion() dyld_get_program_sdk_version()
1041
1042 #elif TARGET_OS_BRIDGE
1043 # if TARGET_OS_WATCH
1044 # error bridgeOS 1.0 not supported
1045 # endif
1046 // fixme don't need bridgeOS versioning yet
1047 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t
1048 # define sdkVersion() dyld_get_program_sdk_bridge_os_version()
1049
1050 #elif TARGET_OS_WATCH
1051 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_WATCHOS_VERSION_##w
1052 // watchOS has its own API for compatibility reasons
1053 # define sdkVersion() dyld_get_program_sdk_watch_os_version()
1054
1055 #else
1056 # error unknown OS
1057 #endif
1058
1059
1060 #define sdkIsOlderThan(x, i, t, w, b) \
1061 (sdkVersion() < DYLD_OS_VERSION(x, i, t, w, b))
1062 #define sdkIsAtLeast(x, i, t, w, b) \
1063 (sdkVersion() >= DYLD_OS_VERSION(x, i, t, w, b))
1064
1065 // Allow bare 0 to be used in DYLD_OS_VERSION() and sdkIsOlderThan()
1066 #define DYLD_MACOSX_VERSION_0 0
1067 #define DYLD_IOS_VERSION_0 0
1068 #define DYLD_TVOS_VERSION_0 0
1069 #define DYLD_WATCHOS_VERSION_0 0
1070 #define DYLD_BRIDGEOS_VERSION_0 0
1071
1072 // Pretty-print a DYLD_*_VERSION_* constant.
1073 #define SDK_FORMAT "%hu.%hhu.%hhu"
1074 #define FORMAT_SDK(v) \
1075 (unsigned short)(((uint32_t)(v))>>16), \
1076 (unsigned char)(((uint32_t)(v))>>8), \
1077 (unsigned char)(((uint32_t)(v))>>0)
1078
1079 // fork() safety requires careful tracking of all locks.
1080 // Our custom lock types check this in debug builds.
1081 // Disallow direct use of all other lock types.
1082 typedef __darwin_pthread_mutex_t pthread_mutex_t UNAVAILABLE_ATTRIBUTE;
1083 typedef __darwin_pthread_rwlock_t pthread_rwlock_t UNAVAILABLE_ATTRIBUTE;
1084 typedef int32_t OSSpinLock UNAVAILABLE_ATTRIBUTE;
1085 typedef struct os_unfair_lock_s os_unfair_lock UNAVAILABLE_ATTRIBUTE;
1086
1087
1088 #endif