]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-os.h
objc4-750.tar.gz
[apple/objc4.git] / runtime / objc-os.h
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-os.h
26 * OS portability layer.
27 **********************************************************************/
28
29 #ifndef _OBJC_OS_H
30 #define _OBJC_OS_H
31
32 #include <TargetConditionals.h>
33 #include "objc-config.h"
34
35 #ifdef __LP64__
36 # define WORD_SHIFT 3UL
37 # define WORD_MASK 7UL
38 # define WORD_BITS 64
39 #else
40 # define WORD_SHIFT 2UL
41 # define WORD_MASK 3UL
42 # define WORD_BITS 32
43 #endif
44
45 static inline uint32_t word_align(uint32_t x) {
46 return (x + WORD_MASK) & ~WORD_MASK;
47 }
48 static inline size_t word_align(size_t x) {
49 return (x + WORD_MASK) & ~WORD_MASK;
50 }
51
52
53 // Mix-in for classes that must not be copied.
54 class nocopy_t {
55 private:
56 nocopy_t(const nocopy_t&) = delete;
57 const nocopy_t& operator=(const nocopy_t&) = delete;
58 protected:
59 constexpr nocopy_t() = default;
60 ~nocopy_t() = default;
61 };
62
63
64 #if TARGET_OS_MAC
65
66 # define OS_UNFAIR_LOCK_INLINE 1
67
68 # ifndef __STDC_LIMIT_MACROS
69 # define __STDC_LIMIT_MACROS
70 # endif
71
72 # include <stdio.h>
73 # include <stdlib.h>
74 # include <stdint.h>
75 # include <stdarg.h>
76 # include <string.h>
77 # include <ctype.h>
78 # include <errno.h>
79 # include <dlfcn.h>
80 # include <fcntl.h>
81 # include <assert.h>
82 # include <limits.h>
83 # include <syslog.h>
84 # include <unistd.h>
85 # include <pthread.h>
86 # include <crt_externs.h>
87 # undef check
88 # include <Availability.h>
89 # include <TargetConditionals.h>
90 # include <sys/mman.h>
91 # include <sys/time.h>
92 # include <sys/stat.h>
93 # include <sys/param.h>
94 # include <sys/reason.h>
95 # include <mach/mach.h>
96 # include <mach/vm_param.h>
97 # include <mach/mach_time.h>
98 # include <mach-o/dyld.h>
99 # include <mach-o/ldsyms.h>
100 # include <mach-o/loader.h>
101 # include <mach-o/getsect.h>
102 # include <mach-o/dyld_priv.h>
103 # include <malloc/malloc.h>
104 # include <os/lock_private.h>
105 # include <libkern/OSAtomic.h>
106 # include <libkern/OSCacheControl.h>
107 # include <System/pthread_machdep.h>
108 # include "objc-probes.h" // generated dtrace probe definitions.
109
110 // Some libc functions call objc_msgSend()
111 // so we can't use them without deadlocks.
112 void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE;
113 void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE;
114
115
116 #define ALWAYS_INLINE inline __attribute__((always_inline))
117 #define NEVER_INLINE inline __attribute__((noinline))
118
119 #define fastpath(x) (__builtin_expect(bool(x), 1))
120 #define slowpath(x) (__builtin_expect(bool(x), 0))
121
122
123 static ALWAYS_INLINE uintptr_t
124 addc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
125 {
126 return __builtin_addcl(lhs, rhs, carryin, carryout);
127 }
128
129 static ALWAYS_INLINE uintptr_t
130 subc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
131 {
132 return __builtin_subcl(lhs, rhs, carryin, carryout);
133 }
134
135
136 #if __arm64__
137
138 // Pointer-size register prefix for inline asm
139 # if __LP64__
140 # define p "x" // true arm64
141 # else
142 # define p "w" // arm64_32
143 # endif
144
145 static ALWAYS_INLINE
146 uintptr_t
147 LoadExclusive(uintptr_t *src)
148 {
149 uintptr_t result;
150 asm("ldxr %" p "0, [%x1]"
151 : "=r" (result)
152 : "r" (src), "m" (*src));
153 return result;
154 }
155
156 static ALWAYS_INLINE
157 bool
158 StoreExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
159 {
160 uint32_t result;
161 asm("stxr %w0, %" p "2, [%x3]"
162 : "=&r" (result), "=m" (*dst)
163 : "r" (value), "r" (dst));
164 return !result;
165 }
166
167
168 static ALWAYS_INLINE
169 bool
170 StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
171 {
172 uint32_t result;
173 asm("stlxr %w0, %" p "2, [%x3]"
174 : "=&r" (result), "=m" (*dst)
175 : "r" (value), "r" (dst));
176 return !result;
177 }
178
179 static ALWAYS_INLINE
180 void
181 ClearExclusive(uintptr_t *dst)
182 {
183 // pretend it writes to *dst for instruction ordering purposes
184 asm("clrex" : "=m" (*dst));
185 }
186
187 #undef p
188
189 #elif __arm__
190
191 static ALWAYS_INLINE
192 uintptr_t
193 LoadExclusive(uintptr_t *src)
194 {
195 return *src;
196 }
197
198 static ALWAYS_INLINE
199 bool
200 StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
201 {
202 return OSAtomicCompareAndSwapPtr((void *)oldvalue, (void *)value,
203 (void **)dst);
204 }
205
206 static ALWAYS_INLINE
207 bool
208 StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
209 {
210 return OSAtomicCompareAndSwapPtrBarrier((void *)oldvalue, (void *)value,
211 (void **)dst);
212 }
213
214 static ALWAYS_INLINE
215 void
216 ClearExclusive(uintptr_t *dst __unused)
217 {
218 }
219
220
221 #elif __x86_64__ || __i386__
222
223 static ALWAYS_INLINE
224 uintptr_t
225 LoadExclusive(uintptr_t *src)
226 {
227 return *src;
228 }
229
230 static ALWAYS_INLINE
231 bool
232 StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
233 {
234
235 return __sync_bool_compare_and_swap((void **)dst, (void *)oldvalue, (void *)value);
236 }
237
238 static ALWAYS_INLINE
239 bool
240 StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
241 {
242 return StoreExclusive(dst, oldvalue, value);
243 }
244
245 static ALWAYS_INLINE
246 void
247 ClearExclusive(uintptr_t *dst __unused)
248 {
249 }
250
251
252 #else
253 # error unknown architecture
254 #endif
255
256
257 #if !TARGET_OS_IPHONE
258 # include <CrashReporterClient.h>
259 #else
260 // CrashReporterClient not yet available on iOS
261 __BEGIN_DECLS
262 extern const char *CRSetCrashLogMessage(const char *msg);
263 extern const char *CRGetCrashLogMessage(void);
264 __END_DECLS
265 #endif
266
267 # if __cplusplus
268 # include <vector>
269 # include <algorithm>
270 # include <functional>
271 using namespace std;
272 # endif
273
274 # define PRIVATE_EXTERN __attribute__((visibility("hidden")))
275 # undef __private_extern__
276 # define __private_extern__ use_PRIVATE_EXTERN_instead
277 # undef private_extern
278 # define private_extern use_PRIVATE_EXTERN_instead
279
280 /* Use this for functions that are intended to be breakpoint hooks.
281 If you do not, the compiler may optimize them away.
282 BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
283 # define BREAKPOINT_FUNCTION(prototype) \
284 OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) \
285 prototype { asm(""); }
286
287 #elif TARGET_OS_WIN32
288
289 # define WINVER 0x0501 // target Windows XP and later
290 # define _WIN32_WINNT 0x0501 // target Windows XP and later
291 # define WIN32_LEAN_AND_MEAN
292 // hack: windef.h typedefs BOOL as int
293 # define BOOL WINBOOL
294 # include <windows.h>
295 # undef BOOL
296
297 # include <stdio.h>
298 # include <stdlib.h>
299 # include <stdint.h>
300 # include <stdarg.h>
301 # include <string.h>
302 # include <assert.h>
303 # include <malloc.h>
304 # include <Availability.h>
305
306 # if __cplusplus
307 # include <vector>
308 # include <algorithm>
309 # include <functional>
310 using namespace std;
311 # define __BEGIN_DECLS extern "C" {
312 # define __END_DECLS }
313 # else
314 # define __BEGIN_DECLS /*empty*/
315 # define __END_DECLS /*empty*/
316 # endif
317
318 # define PRIVATE_EXTERN
319 # define __attribute__(x)
320 # define inline __inline
321
322 /* Use this for functions that are intended to be breakpoint hooks.
323 If you do not, the compiler may optimize them away.
324 BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
325 # define BREAKPOINT_FUNCTION(prototype) \
326 __declspec(noinline) prototype { __asm { } }
327
328 /* stub out dtrace probes */
329 # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
330 # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
331
332 #else
333 # error unknown OS
334 #endif
335
336
337 #include <objc/objc.h>
338 #include <objc/objc-api.h>
339
340 extern void _objc_fatal(const char *fmt, ...)
341 __attribute__((noreturn, format (printf, 1, 2)));
342 extern void _objc_fatal_with_reason(uint64_t reason, uint64_t flags,
343 const char *fmt, ...)
344 __attribute__((noreturn, format (printf, 3, 4)));
345
346 #define INIT_ONCE_PTR(var, create, delete) \
347 do { \
348 if (var) break; \
349 typeof(var) v = create; \
350 while (!var) { \
351 if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
352 goto done; \
353 } \
354 } \
355 delete; \
356 done:; \
357 } while (0)
358
359 #define INIT_ONCE_32(var, create, delete) \
360 do { \
361 if (var) break; \
362 typeof(var) v = create; \
363 while (!var) { \
364 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
365 goto done; \
366 } \
367 } \
368 delete; \
369 done:; \
370 } while (0)
371
372
373 // Thread keys reserved by libc for our use.
374 #if defined(__PTK_FRAMEWORK_OBJC_KEY0)
375 # define SUPPORT_DIRECT_THREAD_KEYS 1
376 # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY0)
377 # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY1)
378 # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY2)
379 # define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY3)
380 # if SUPPORT_RETURN_AUTORELEASE
381 # define RETURN_DISPOSITION_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4)
382 # endif
383 #else
384 # define SUPPORT_DIRECT_THREAD_KEYS 0
385 #endif
386
387
388 #if TARGET_OS_WIN32
389
390 // Compiler compatibility
391
392 // OS compatibility
393
394 #define strdup _strdup
395
396 #define issetugid() 0
397
398 #define MIN(x, y) ((x) < (y) ? (x) : (y))
399
400 static __inline void bcopy(const void *src, void *dst, size_t size) { memcpy(dst, src, size); }
401 static __inline void bzero(void *dst, size_t size) { memset(dst, 0, size); }
402
403 int asprintf(char **dstp, const char *format, ...);
404
405 typedef void * malloc_zone_t;
406
407 static __inline malloc_zone_t malloc_default_zone(void) { return (malloc_zone_t)-1; }
408 static __inline void *malloc_zone_malloc(malloc_zone_t z, size_t size) { return malloc(size); }
409 static __inline void *malloc_zone_calloc(malloc_zone_t z, size_t size, size_t count) { return calloc(size, count); }
410 static __inline void *malloc_zone_realloc(malloc_zone_t z, void *p, size_t size) { return realloc(p, size); }
411 static __inline void malloc_zone_free(malloc_zone_t z, void *p) { free(p); }
412 static __inline malloc_zone_t malloc_zone_from_ptr(const void *p) { return (malloc_zone_t)-1; }
413 static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ }
414
415
416 // OSAtomic
417
418 static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
419 {
420 // fixme barrier is overkill
421 long original = InterlockedCompareExchange(dst, newl, oldl);
422 return (original == oldl);
423 }
424
425 static __inline BOOL OSAtomicCompareAndSwapPtrBarrier(void *oldp, void *newp, void * volatile *dst)
426 {
427 void *original = InterlockedCompareExchangePointer(dst, newp, oldp);
428 return (original == oldp);
429 }
430
431 static __inline BOOL OSAtomicCompareAndSwap32Barrier(int32_t oldl, int32_t newl, int32_t volatile *dst)
432 {
433 long original = InterlockedCompareExchange((volatile long *)dst, newl, oldl);
434 return (original == oldl);
435 }
436
437 static __inline int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst)
438 {
439 return InterlockedDecrement((volatile long *)dst);
440 }
441
442 static __inline int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst)
443 {
444 return InterlockedIncrement((volatile long *)dst);
445 }
446
447
448 // Internal data types
449
450 typedef DWORD objc_thread_t; // thread ID
451 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
452 return t1 == t2;
453 }
454 static __inline objc_thread_t thread_self(void) {
455 return GetCurrentThreadId();
456 }
457
458 typedef struct {
459 DWORD key;
460 void (*dtor)(void *);
461 } tls_key_t;
462 static __inline tls_key_t tls_create(void (*dtor)(void*)) {
463 // fixme need dtor registry for DllMain to call on thread detach
464 tls_key_t k;
465 k.key = TlsAlloc();
466 k.dtor = dtor;
467 return k;
468 }
469 static __inline void *tls_get(tls_key_t k) {
470 return TlsGetValue(k.key);
471 }
472 static __inline void tls_set(tls_key_t k, void *value) {
473 TlsSetValue(k.key, value);
474 }
475
476 typedef struct {
477 CRITICAL_SECTION *lock;
478 } mutex_t;
479 #define MUTEX_INITIALIZER {0};
480 extern void mutex_init(mutex_t *m);
481 static __inline int _mutex_lock_nodebug(mutex_t *m) {
482 // fixme error check
483 if (!m->lock) {
484 mutex_init(m);
485 }
486 EnterCriticalSection(m->lock);
487 return 0;
488 }
489 static __inline bool _mutex_try_lock_nodebug(mutex_t *m) {
490 // fixme error check
491 if (!m->lock) {
492 mutex_init(m);
493 }
494 return TryEnterCriticalSection(m->lock);
495 }
496 static __inline int _mutex_unlock_nodebug(mutex_t *m) {
497 // fixme error check
498 LeaveCriticalSection(m->lock);
499 return 0;
500 }
501
502
503 typedef mutex_t spinlock_t;
504 #define spinlock_lock(l) mutex_lock(l)
505 #define spinlock_unlock(l) mutex_unlock(l)
506 #define SPINLOCK_INITIALIZER MUTEX_INITIALIZER
507
508
509 typedef struct {
510 HANDLE mutex;
511 } recursive_mutex_t;
512 #define RECURSIVE_MUTEX_INITIALIZER {0};
513 #define RECURSIVE_MUTEX_NOT_LOCKED 1
514 extern void recursive_mutex_init(recursive_mutex_t *m);
515 static __inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
516 assert(m->mutex);
517 return WaitForSingleObject(m->mutex, INFINITE);
518 }
519 static __inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
520 assert(m->mutex);
521 return (WAIT_OBJECT_0 == WaitForSingleObject(m->mutex, 0));
522 }
523 static __inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
524 assert(m->mutex);
525 return ReleaseMutex(m->mutex) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED;
526 }
527
528
529 /*
530 typedef HANDLE mutex_t;
531 static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
532 static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
533 static inline bool mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
534 static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
535 */
536
537 // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
538 // Vista-only CONDITION_VARIABLE would be better
539 typedef struct {
540 HANDLE mutex;
541 HANDLE waiters; // semaphore for those in cond_wait()
542 HANDLE waitersDone; // auto-reset event after everyone gets a broadcast
543 CRITICAL_SECTION waitCountLock; // guards waitCount and didBroadcast
544 unsigned int waitCount;
545 int didBroadcast;
546 } monitor_t;
547 #define MONITOR_INITIALIZER { 0 }
548 #define MONITOR_NOT_ENTERED 1
549 extern int monitor_init(monitor_t *c);
550
551 static inline int _monitor_enter_nodebug(monitor_t *c) {
552 if (!c->mutex) {
553 int err = monitor_init(c);
554 if (err) return err;
555 }
556 return WaitForSingleObject(c->mutex, INFINITE);
557 }
558 static inline int _monitor_leave_nodebug(monitor_t *c) {
559 if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED;
560 else return 0;
561 }
562 static inline int _monitor_wait_nodebug(monitor_t *c) {
563 int last;
564 EnterCriticalSection(&c->waitCountLock);
565 c->waitCount++;
566 LeaveCriticalSection(&c->waitCountLock);
567
568 SignalObjectAndWait(c->mutex, c->waiters, INFINITE, FALSE);
569
570 EnterCriticalSection(&c->waitCountLock);
571 c->waitCount--;
572 last = c->didBroadcast && c->waitCount == 0;
573 LeaveCriticalSection(&c->waitCountLock);
574
575 if (last) {
576 // tell broadcaster that all waiters have awoken
577 SignalObjectAndWait(c->waitersDone, c->mutex, INFINITE, FALSE);
578 } else {
579 WaitForSingleObject(c->mutex, INFINITE);
580 }
581
582 // fixme error checking
583 return 0;
584 }
585 static inline int monitor_notify(monitor_t *c) {
586 int haveWaiters;
587
588 EnterCriticalSection(&c->waitCountLock);
589 haveWaiters = c->waitCount > 0;
590 LeaveCriticalSection(&c->waitCountLock);
591
592 if (haveWaiters) {
593 ReleaseSemaphore(c->waiters, 1, 0);
594 }
595
596 // fixme error checking
597 return 0;
598 }
599 static inline int monitor_notifyAll(monitor_t *c) {
600 EnterCriticalSection(&c->waitCountLock);
601 if (c->waitCount == 0) {
602 LeaveCriticalSection(&c->waitCountLock);
603 return 0;
604 }
605 c->didBroadcast = 1;
606 ReleaseSemaphore(c->waiters, c->waitCount, 0);
607 LeaveCriticalSection(&c->waitCountLock);
608
609 // fairness: wait for everyone to move from waiters to mutex
610 WaitForSingleObject(c->waitersDone, INFINITE);
611 // not under waitCountLock, but still under mutex
612 c->didBroadcast = 0;
613
614 // fixme error checking
615 return 0;
616 }
617
618
619 typedef IMAGE_DOS_HEADER headerType;
620 // fixme YES bundle? NO bundle? sometimes?
621 #define headerIsBundle(hi) YES
622 OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase;
623 #define libobjc_header ((headerType *)&__ImageBase)
624
625 // Prototypes
626
627
628 #elif TARGET_OS_MAC
629
630
631 // OS headers
632 #include <mach-o/loader.h>
633 #ifndef __LP64__
634 # define SEGMENT_CMD LC_SEGMENT
635 #else
636 # define SEGMENT_CMD LC_SEGMENT_64
637 #endif
638
639 #ifndef VM_MEMORY_OBJC_DISPATCHERS
640 # define VM_MEMORY_OBJC_DISPATCHERS 0
641 #endif
642
643
644 // Compiler compatibility
645
646 // OS compatibility
647
648 static inline uint64_t nanoseconds() {
649 return mach_absolute_time();
650 }
651
652 // Internal data types
653
654 typedef pthread_t objc_thread_t;
655
656 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
657 return pthread_equal(t1, t2);
658 }
659 static __inline objc_thread_t thread_self(void) {
660 return pthread_self();
661 }
662
663
664 typedef pthread_key_t tls_key_t;
665
666 static inline tls_key_t tls_create(void (*dtor)(void*)) {
667 tls_key_t k;
668 pthread_key_create(&k, dtor);
669 return k;
670 }
671 static inline void *tls_get(tls_key_t k) {
672 return pthread_getspecific(k);
673 }
674 static inline void tls_set(tls_key_t k, void *value) {
675 pthread_setspecific(k, value);
676 }
677
678 #if SUPPORT_DIRECT_THREAD_KEYS
679
680 #if DEBUG
681 static bool is_valid_direct_key(tls_key_t k) {
682 return ( k == SYNC_DATA_DIRECT_KEY
683 || k == SYNC_COUNT_DIRECT_KEY
684 || k == AUTORELEASE_POOL_KEY
685 # if SUPPORT_RETURN_AUTORELEASE
686 || k == RETURN_DISPOSITION_KEY
687 # endif
688 );
689 }
690 #endif
691
692 static inline void *tls_get_direct(tls_key_t k)
693 {
694 assert(is_valid_direct_key(k));
695
696 if (_pthread_has_direct_tsd()) {
697 return _pthread_getspecific_direct(k);
698 } else {
699 return pthread_getspecific(k);
700 }
701 }
702 static inline void tls_set_direct(tls_key_t k, void *value)
703 {
704 assert(is_valid_direct_key(k));
705
706 if (_pthread_has_direct_tsd()) {
707 _pthread_setspecific_direct(k, value);
708 } else {
709 pthread_setspecific(k, value);
710 }
711 }
712
713 // SUPPORT_DIRECT_THREAD_KEYS
714 #endif
715
716
717 static inline pthread_t pthread_self_direct()
718 {
719 return (pthread_t)
720 _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF);
721 }
722
723 static inline mach_port_t mach_thread_self_direct()
724 {
725 return (mach_port_t)(uintptr_t)
726 _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
727 }
728
729
730 template <bool Debug> class mutex_tt;
731 template <bool Debug> class monitor_tt;
732 template <bool Debug> class recursive_mutex_tt;
733
734 #if DEBUG
735 # define LOCKDEBUG 1
736 #else
737 # define LOCKDEBUG 0
738 #endif
739
740 using spinlock_t = mutex_tt<LOCKDEBUG>;
741 using mutex_t = mutex_tt<LOCKDEBUG>;
742 using monitor_t = monitor_tt<LOCKDEBUG>;
743 using recursive_mutex_t = recursive_mutex_tt<LOCKDEBUG>;
744
745 // Use fork_unsafe_lock to get a lock that isn't
746 // acquired and released around fork().
747 // All fork-safe locks are checked in debug builds.
748 struct fork_unsafe_lock_t {
749 constexpr fork_unsafe_lock_t() = default;
750 };
751 extern const fork_unsafe_lock_t fork_unsafe_lock;
752
753 #include "objc-lockdebug.h"
754
755 template <bool Debug>
756 class mutex_tt : nocopy_t {
757 os_unfair_lock mLock;
758 public:
759 constexpr mutex_tt() : mLock(OS_UNFAIR_LOCK_INIT) {
760 lockdebug_remember_mutex(this);
761 }
762
763 constexpr mutex_tt(const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_LOCK_INIT) { }
764
765 void lock() {
766 lockdebug_mutex_lock(this);
767
768 os_unfair_lock_lock_with_options_inline
769 (&mLock, OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
770 }
771
772 void unlock() {
773 lockdebug_mutex_unlock(this);
774
775 os_unfair_lock_unlock_inline(&mLock);
776 }
777
778 void forceReset() {
779 lockdebug_mutex_unlock(this);
780
781 bzero(&mLock, sizeof(mLock));
782 mLock = os_unfair_lock OS_UNFAIR_LOCK_INIT;
783 }
784
785 void assertLocked() {
786 lockdebug_mutex_assert_locked(this);
787 }
788
789 void assertUnlocked() {
790 lockdebug_mutex_assert_unlocked(this);
791 }
792
793
794 // Address-ordered lock discipline for a pair of locks.
795
796 static void lockTwo(mutex_tt *lock1, mutex_tt *lock2) {
797 if (lock1 < lock2) {
798 lock1->lock();
799 lock2->lock();
800 } else {
801 lock2->lock();
802 if (lock2 != lock1) lock1->lock();
803 }
804 }
805
806 static void unlockTwo(mutex_tt *lock1, mutex_tt *lock2) {
807 lock1->unlock();
808 if (lock2 != lock1) lock2->unlock();
809 }
810
811 // Scoped lock and unlock
812 class locker : nocopy_t {
813 mutex_tt& lock;
814 public:
815 locker(mutex_tt& newLock)
816 : lock(newLock) { lock.lock(); }
817 ~locker() { lock.unlock(); }
818 };
819
820 // Either scoped lock and unlock, or NOP.
821 class conditional_locker : nocopy_t {
822 mutex_tt& lock;
823 bool didLock;
824 public:
825 conditional_locker(mutex_tt& newLock, bool shouldLock)
826 : lock(newLock), didLock(shouldLock)
827 {
828 if (shouldLock) lock.lock();
829 }
830 ~conditional_locker() { if (didLock) lock.unlock(); }
831 };
832 };
833
834 using mutex_locker_t = mutex_tt<LOCKDEBUG>::locker;
835 using conditional_mutex_locker_t = mutex_tt<LOCKDEBUG>::conditional_locker;
836
837
838 template <bool Debug>
839 class recursive_mutex_tt : nocopy_t {
840 os_unfair_recursive_lock mLock;
841
842 public:
843 constexpr recursive_mutex_tt() : mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT) {
844 lockdebug_remember_recursive_mutex(this);
845 }
846
847 constexpr recursive_mutex_tt(const fork_unsafe_lock_t unsafe)
848 : mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT)
849 { }
850
851 void lock()
852 {
853 lockdebug_recursive_mutex_lock(this);
854 os_unfair_recursive_lock_lock(&mLock);
855 }
856
857 void unlock()
858 {
859 lockdebug_recursive_mutex_unlock(this);
860
861 os_unfair_recursive_lock_unlock(&mLock);
862 }
863
864 void forceReset()
865 {
866 lockdebug_recursive_mutex_unlock(this);
867
868 bzero(&mLock, sizeof(mLock));
869 mLock = os_unfair_recursive_lock OS_UNFAIR_RECURSIVE_LOCK_INIT;
870 }
871
872 bool tryUnlock()
873 {
874 if (os_unfair_recursive_lock_tryunlock4objc(&mLock)) {
875 lockdebug_recursive_mutex_unlock(this);
876 return true;
877 }
878 return false;
879 }
880
881 void assertLocked() {
882 lockdebug_recursive_mutex_assert_locked(this);
883 }
884
885 void assertUnlocked() {
886 lockdebug_recursive_mutex_assert_unlocked(this);
887 }
888 };
889
890
891 template <bool Debug>
892 class monitor_tt {
893 pthread_mutex_t mutex;
894 pthread_cond_t cond;
895
896 public:
897 constexpr monitor_tt()
898 : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER)
899 {
900 lockdebug_remember_monitor(this);
901 }
902
903 monitor_tt(const fork_unsafe_lock_t unsafe)
904 : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER)
905 { }
906
907 void enter()
908 {
909 lockdebug_monitor_enter(this);
910
911 int err = pthread_mutex_lock(&mutex);
912 if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
913 }
914
915 void leave()
916 {
917 lockdebug_monitor_leave(this);
918
919 int err = pthread_mutex_unlock(&mutex);
920 if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
921 }
922
923 void wait()
924 {
925 lockdebug_monitor_wait(this);
926
927 int err = pthread_cond_wait(&cond, &mutex);
928 if (err) _objc_fatal("pthread_cond_wait failed (%d)", err);
929 }
930
931 void notify()
932 {
933 int err = pthread_cond_signal(&cond);
934 if (err) _objc_fatal("pthread_cond_signal failed (%d)", err);
935 }
936
937 void notifyAll()
938 {
939 int err = pthread_cond_broadcast(&cond);
940 if (err) _objc_fatal("pthread_cond_broadcast failed (%d)", err);
941 }
942
943 void forceReset()
944 {
945 lockdebug_monitor_leave(this);
946
947 bzero(&mutex, sizeof(mutex));
948 bzero(&cond, sizeof(cond));
949 mutex = pthread_mutex_t PTHREAD_MUTEX_INITIALIZER;
950 cond = pthread_cond_t PTHREAD_COND_INITIALIZER;
951 }
952
953 void assertLocked()
954 {
955 lockdebug_monitor_assert_locked(this);
956 }
957
958 void assertUnlocked()
959 {
960 lockdebug_monitor_assert_unlocked(this);
961 }
962 };
963
964
965 // semaphore_create formatted for INIT_ONCE use
966 static inline semaphore_t create_semaphore(void)
967 {
968 semaphore_t sem;
969 kern_return_t k;
970 k = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
971 if (k) _objc_fatal("semaphore_create failed (0x%x)", k);
972 return sem;
973 }
974
975
976 #ifndef __LP64__
977 typedef struct mach_header headerType;
978 typedef struct segment_command segmentType;
979 typedef struct section sectionType;
980 #else
981 typedef struct mach_header_64 headerType;
982 typedef struct segment_command_64 segmentType;
983 typedef struct section_64 sectionType;
984 #endif
985 #define headerIsBundle(hi) (hi->mhdr()->filetype == MH_BUNDLE)
986 #define libobjc_header ((headerType *)&_mh_dylib_header)
987
988 // Prototypes
989
990 /* Secure /tmp usage */
991 extern int secure_open(const char *filename, int flags, uid_t euid);
992
993
994 #else
995
996
997 #error unknown OS
998
999
1000 #endif
1001
1002
1003 static inline void *
1004 memdup(const void *mem, size_t len)
1005 {
1006 void *dup = malloc(len);
1007 memcpy(dup, mem, len);
1008 return dup;
1009 }
1010
1011 // strdup that doesn't copy read-only memory
1012 static inline char *
1013 strdupIfMutable(const char *str)
1014 {
1015 size_t size = strlen(str) + 1;
1016 if (_dyld_is_memory_immutable(str, size)) {
1017 return (char *)str;
1018 } else {
1019 return (char *)memdup(str, size);
1020 }
1021 }
1022
1023 // free strdupIfMutable() result
1024 static inline void
1025 freeIfMutable(char *str)
1026 {
1027 size_t size = strlen(str) + 1;
1028 if (_dyld_is_memory_immutable(str, size)) {
1029 // nothing
1030 } else {
1031 free(str);
1032 }
1033 }
1034
1035 // nil-checking unsigned strdup
1036 static inline uint8_t *
1037 ustrdupMaybeNil(const uint8_t *str)
1038 {
1039 if (!str) return nil;
1040 return (uint8_t *)strdupIfMutable((char *)str);
1041 }
1042
1043 // OS version checking:
1044 //
1045 // sdkVersion()
1046 // DYLD_OS_VERSION(mac, ios, tv, watch, bridge)
1047 // sdkIsOlderThan(mac, ios, tv, watch, bridge)
1048 // sdkIsAtLeast(mac, ios, tv, watch, bridge)
1049 //
1050 // This version order matches OBJC_AVAILABLE.
1051
1052 #if TARGET_OS_OSX
1053 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_MACOSX_VERSION_##x
1054 # define sdkVersion() dyld_get_program_sdk_version()
1055
1056 #elif TARGET_OS_IOS
1057 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##i
1058 # define sdkVersion() dyld_get_program_sdk_version()
1059
1060 #elif TARGET_OS_TV
1061 // dyld does not currently have distinct constants for tvOS
1062 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t
1063 # define sdkVersion() dyld_get_program_sdk_version()
1064
1065 #elif TARGET_OS_BRIDGE
1066 # if TARGET_OS_WATCH
1067 # error bridgeOS 1.0 not supported
1068 # endif
1069 // fixme don't need bridgeOS versioning yet
1070 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t
1071 # define sdkVersion() dyld_get_program_sdk_bridge_os_version()
1072
1073 #elif TARGET_OS_WATCH
1074 # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_WATCHOS_VERSION_##w
1075 // watchOS has its own API for compatibility reasons
1076 # define sdkVersion() dyld_get_program_sdk_watch_os_version()
1077
1078 #else
1079 # error unknown OS
1080 #endif
1081
1082
1083 #define sdkIsOlderThan(x, i, t, w, b) \
1084 (sdkVersion() < DYLD_OS_VERSION(x, i, t, w, b))
1085 #define sdkIsAtLeast(x, i, t, w, b) \
1086 (sdkVersion() >= DYLD_OS_VERSION(x, i, t, w, b))
1087
1088 // Allow bare 0 to be used in DYLD_OS_VERSION() and sdkIsOlderThan()
1089 #define DYLD_MACOSX_VERSION_0 0
1090 #define DYLD_IOS_VERSION_0 0
1091 #define DYLD_TVOS_VERSION_0 0
1092 #define DYLD_WATCHOS_VERSION_0 0
1093 #define DYLD_BRIDGEOS_VERSION_0 0
1094
1095 // Pretty-print a DYLD_*_VERSION_* constant.
1096 #define SDK_FORMAT "%hu.%hhu.%hhu"
1097 #define FORMAT_SDK(v) \
1098 (unsigned short)(((uint32_t)(v))>>16), \
1099 (unsigned char)(((uint32_t)(v))>>8), \
1100 (unsigned char)(((uint32_t)(v))>>0)
1101
1102 // fork() safety requires careful tracking of all locks.
1103 // Our custom lock types check this in debug builds.
1104 // Disallow direct use of all other lock types.
1105 typedef __darwin_pthread_mutex_t pthread_mutex_t UNAVAILABLE_ATTRIBUTE;
1106 typedef __darwin_pthread_rwlock_t pthread_rwlock_t UNAVAILABLE_ATTRIBUTE;
1107 typedef int32_t OSSpinLock UNAVAILABLE_ATTRIBUTE;
1108 typedef struct os_unfair_lock_s os_unfair_lock UNAVAILABLE_ATTRIBUTE;
1109
1110
1111 #endif