]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-os.h
1765d6e24a75bc90ba01cf69475b8963ae5e72a4
[apple/objc4.git] / runtime / objc-os.h
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-os.h
26 * OS portability layer.
27 **********************************************************************/
28
29 #ifndef _OBJC_OS_H
30 #define _OBJC_OS_H
31
32 #include <TargetConditionals.h>
33 #include "objc-config.h"
34
35 #ifdef __LP64__
36 # define WORD_SHIFT 3UL
37 # define WORD_MASK 7UL
38 # define WORD_BITS 64
39 #else
40 # define WORD_SHIFT 2UL
41 # define WORD_MASK 3UL
42 # define WORD_BITS 32
43 #endif
44
45 static inline uint32_t word_align(uint32_t x) {
46 return (x + WORD_MASK) & ~WORD_MASK;
47 }
48 static inline size_t word_align(size_t x) {
49 return (x + WORD_MASK) & ~WORD_MASK;
50 }
51
52 #if TARGET_OS_MAC
53
54 # ifndef __STDC_LIMIT_MACROS
55 # define __STDC_LIMIT_MACROS
56 # endif
57
58 # include <stdio.h>
59 # include <stdlib.h>
60 # include <stdint.h>
61 # include <stdarg.h>
62 # include <string.h>
63 # include <ctype.h>
64 # include <errno.h>
65 # include <dlfcn.h>
66 # include <fcntl.h>
67 # include <assert.h>
68 # include <limits.h>
69 # include <syslog.h>
70 # include <unistd.h>
71 # include <pthread.h>
72 # include <crt_externs.h>
73 # include <AssertMacros.h>
74 # undef check
75 # include <Availability.h>
76 # include <TargetConditionals.h>
77 # include <sys/mman.h>
78 # include <sys/time.h>
79 # include <sys/stat.h>
80 # include <sys/param.h>
81 # include <mach/mach.h>
82 # include <mach/vm_param.h>
83 # include <mach-o/dyld.h>
84 # include <mach-o/ldsyms.h>
85 # include <mach-o/loader.h>
86 # include <mach-o/getsect.h>
87 # include <mach-o/dyld_priv.h>
88 # include <malloc/malloc.h>
89 # include <os/lock_private.h>
90 # include <libkern/OSAtomic.h>
91 # include <libkern/OSCacheControl.h>
92 # include <System/pthread_machdep.h>
93 # include "objc-probes.h" // generated dtrace probe definitions.
94
95 // Some libc functions call objc_msgSend()
96 // so we can't use them without deadlocks.
97 void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE;
98 void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE;
99
100
101 #define ALWAYS_INLINE inline __attribute__((always_inline))
102 #define NEVER_INLINE inline __attribute__((noinline))
103
104
105
106 static ALWAYS_INLINE uintptr_t
107 addc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
108 {
109 return __builtin_addcl(lhs, rhs, carryin, carryout);
110 }
111
112 static ALWAYS_INLINE uintptr_t
113 subc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
114 {
115 return __builtin_subcl(lhs, rhs, carryin, carryout);
116 }
117
118
119 #if __arm64__
120
121 static ALWAYS_INLINE
122 uintptr_t
123 LoadExclusive(uintptr_t *src)
124 {
125 uintptr_t result;
126 asm("ldxr %x0, [%x1]"
127 : "=r" (result)
128 : "r" (src), "m" (*src));
129 return result;
130 }
131
132 static ALWAYS_INLINE
133 bool
134 StoreExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
135 {
136 uint32_t result;
137 asm("stxr %w0, %x2, [%x3]"
138 : "=r" (result), "=m" (*dst)
139 : "r" (value), "r" (dst));
140 return !result;
141 }
142
143
144 static ALWAYS_INLINE
145 bool
146 StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
147 {
148 uint32_t result;
149 asm("stlxr %w0, %x2, [%x3]"
150 : "=r" (result), "=m" (*dst)
151 : "r" (value), "r" (dst));
152 return !result;
153 }
154
155
156 #elif __arm__
157
158 static ALWAYS_INLINE
159 uintptr_t
160 LoadExclusive(uintptr_t *src)
161 {
162 return *src;
163 }
164
165 static ALWAYS_INLINE
166 bool
167 StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
168 {
169 return OSAtomicCompareAndSwapPtr((void *)oldvalue, (void *)value,
170 (void **)dst);
171 }
172
173 static ALWAYS_INLINE
174 bool
175 StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
176 {
177 return OSAtomicCompareAndSwapPtrBarrier((void *)oldvalue, (void *)value,
178 (void **)dst);
179 }
180
181
182 #elif __x86_64__ || __i386__
183
184 static ALWAYS_INLINE
185 uintptr_t
186 LoadExclusive(uintptr_t *src)
187 {
188 return *src;
189 }
190
191 static ALWAYS_INLINE
192 bool
193 StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
194 {
195
196 return __sync_bool_compare_and_swap((void **)dst, (void *)oldvalue, (void *)value);
197 }
198
199 static ALWAYS_INLINE
200 bool
201 StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
202 {
203 return StoreExclusive(dst, oldvalue, value);
204 }
205
206 #else
207 # error unknown architecture
208 #endif
209
210
211 #define spinlock_t os_lock_handoff_s
212 #define spinlock_trylock(l) os_lock_trylock(l)
213 #define spinlock_lock(l) os_lock_lock(l)
214 #define spinlock_unlock(l) os_lock_unlock(l)
215 #define SPINLOCK_INITIALIZER OS_LOCK_HANDOFF_INIT
216
217
218 #if !TARGET_OS_IPHONE
219 # include <CrashReporterClient.h>
220 #else
221 // CrashReporterClient not yet available on iOS
222 __BEGIN_DECLS
223 extern const char *CRSetCrashLogMessage(const char *msg);
224 extern const char *CRGetCrashLogMessage(void);
225 extern const char *CRSetCrashLogMessage2(const char *msg);
226 __END_DECLS
227 #endif
228
229 # if __cplusplus
230 # include <vector>
231 # include <algorithm>
232 # include <functional>
233 using namespace std;
234 # endif
235
236 # define PRIVATE_EXTERN __attribute__((visibility("hidden")))
237 # undef __private_extern__
238 # define __private_extern__ use_PRIVATE_EXTERN_instead
239 # undef private_extern
240 # define private_extern use_PRIVATE_EXTERN_instead
241
242 /* Use this for functions that are intended to be breakpoint hooks.
243 If you do not, the compiler may optimize them away.
244 BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
245 # define BREAKPOINT_FUNCTION(prototype) \
246 OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) \
247 prototype { asm(""); }
248
249 #elif TARGET_OS_WIN32
250
251 # define WINVER 0x0501 // target Windows XP and later
252 # define _WIN32_WINNT 0x0501 // target Windows XP and later
253 # define WIN32_LEAN_AND_MEAN
254 // hack: windef.h typedefs BOOL as int
255 # define BOOL WINBOOL
256 # include <windows.h>
257 # undef BOOL
258
259 # include <stdio.h>
260 # include <stdlib.h>
261 # include <stdint.h>
262 # include <stdarg.h>
263 # include <string.h>
264 # include <assert.h>
265 # include <malloc.h>
266 # include <Availability.h>
267
268 # if __cplusplus
269 # include <vector>
270 # include <algorithm>
271 # include <functional>
272 using namespace std;
273 # define __BEGIN_DECLS extern "C" {
274 # define __END_DECLS }
275 # else
276 # define __BEGIN_DECLS /*empty*/
277 # define __END_DECLS /*empty*/
278 # endif
279
280 # define PRIVATE_EXTERN
281 # define __attribute__(x)
282 # define inline __inline
283
284 /* Use this for functions that are intended to be breakpoint hooks.
285 If you do not, the compiler may optimize them away.
286 BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
287 # define BREAKPOINT_FUNCTION(prototype) \
288 __declspec(noinline) prototype { __asm { } }
289
290 /* stub out dtrace probes */
291 # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
292 # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
293
294 #else
295 # error unknown OS
296 #endif
297
298
299 #include <objc/objc.h>
300 #include <objc/objc-api.h>
301
302 __BEGIN_DECLS
303
304 extern void _objc_fatal(const char *fmt, ...) __attribute__((noreturn, format (printf, 1, 2)));
305
306 #define INIT_ONCE_PTR(var, create, delete) \
307 do { \
308 if (var) break; \
309 typeof(var) v = create; \
310 while (!var) { \
311 if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
312 goto done; \
313 } \
314 } \
315 delete; \
316 done:; \
317 } while (0)
318
319 #define INIT_ONCE_32(var, create, delete) \
320 do { \
321 if (var) break; \
322 typeof(var) v = create; \
323 while (!var) { \
324 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
325 goto done; \
326 } \
327 } \
328 delete; \
329 done:; \
330 } while (0)
331
332
333 // Thread keys reserved by libc for our use.
334 #if defined(__PTK_FRAMEWORK_OBJC_KEY0)
335 # define SUPPORT_DIRECT_THREAD_KEYS 1
336 # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY0)
337 # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY1)
338 # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY2)
339 # define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY3)
340 # if SUPPORT_RETURN_AUTORELEASE
341 # define AUTORELEASE_POOL_RECLAIM_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4)
342 # endif
343 # if SUPPORT_QOS_HACK
344 # define QOS_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
345 # endif
346 #else
347 # define SUPPORT_DIRECT_THREAD_KEYS 0
348 #endif
349
350
351 #if TARGET_OS_WIN32
352
353 // Compiler compatibility
354
355 // OS compatibility
356
357 #define strdup _strdup
358
359 #define issetugid() 0
360
361 #define MIN(x, y) ((x) < (y) ? (x) : (y))
362
363 static __inline void bcopy(const void *src, void *dst, size_t size) { memcpy(dst, src, size); }
364 static __inline void bzero(void *dst, size_t size) { memset(dst, 0, size); }
365
366 int asprintf(char **dstp, const char *format, ...);
367
368 typedef void * malloc_zone_t;
369
370 static __inline malloc_zone_t malloc_default_zone(void) { return (malloc_zone_t)-1; }
371 static __inline void *malloc_zone_malloc(malloc_zone_t z, size_t size) { return malloc(size); }
372 static __inline void *malloc_zone_calloc(malloc_zone_t z, size_t size, size_t count) { return calloc(size, count); }
373 static __inline void *malloc_zone_realloc(malloc_zone_t z, void *p, size_t size) { return realloc(p, size); }
374 static __inline void malloc_zone_free(malloc_zone_t z, void *p) { free(p); }
375 static __inline malloc_zone_t malloc_zone_from_ptr(const void *p) { return (malloc_zone_t)-1; }
376 static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ }
377
378
379 // AssertMacros
380
381 #define require_action_string(cond, dest, act, msg) do { if (!(cond)) { { act; } goto dest; } } while (0)
382 #define require_noerr_string(err, dest, msg) do { if (err) goto dest; } while (0)
383 #define require_string(cond, dest, msg) do { if (!(cond)) goto dest; } while (0)
384
385
386 // OSAtomic
387
388 static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
389 {
390 // fixme barrier is overkill
391 long original = InterlockedCompareExchange(dst, newl, oldl);
392 return (original == oldl);
393 }
394
395 static __inline BOOL OSAtomicCompareAndSwapPtrBarrier(void *oldp, void *newp, void * volatile *dst)
396 {
397 void *original = InterlockedCompareExchangePointer(dst, newp, oldp);
398 return (original == oldp);
399 }
400
401 static __inline BOOL OSAtomicCompareAndSwap32Barrier(int32_t oldl, int32_t newl, int32_t volatile *dst)
402 {
403 long original = InterlockedCompareExchange((volatile long *)dst, newl, oldl);
404 return (original == oldl);
405 }
406
407 static __inline int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst)
408 {
409 return InterlockedDecrement((volatile long *)dst);
410 }
411
412 static __inline int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst)
413 {
414 return InterlockedIncrement((volatile long *)dst);
415 }
416
417
418 // Internal data types
419
420 typedef DWORD objc_thread_t; // thread ID
421 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
422 return t1 == t2;
423 }
424 static __inline objc_thread_t thread_self(void) {
425 return GetCurrentThreadId();
426 }
427
428 typedef struct {
429 DWORD key;
430 void (*dtor)(void *);
431 } tls_key_t;
432 static __inline tls_key_t tls_create(void (*dtor)(void*)) {
433 // fixme need dtor registry for DllMain to call on thread detach
434 tls_key_t k;
435 k.key = TlsAlloc();
436 k.dtor = dtor;
437 return k;
438 }
439 static __inline void *tls_get(tls_key_t k) {
440 return TlsGetValue(k.key);
441 }
442 static __inline void tls_set(tls_key_t k, void *value) {
443 TlsSetValue(k.key, value);
444 }
445
446 typedef struct {
447 CRITICAL_SECTION *lock;
448 } mutex_t;
449 #define MUTEX_INITIALIZER {0};
450 extern void mutex_init(mutex_t *m);
451 static __inline int _mutex_lock_nodebug(mutex_t *m) {
452 // fixme error check
453 if (!m->lock) {
454 mutex_init(m);
455 }
456 EnterCriticalSection(m->lock);
457 return 0;
458 }
459 static __inline bool _mutex_try_lock_nodebug(mutex_t *m) {
460 // fixme error check
461 if (!m->lock) {
462 mutex_init(m);
463 }
464 return TryEnterCriticalSection(m->lock);
465 }
466 static __inline int _mutex_unlock_nodebug(mutex_t *m) {
467 // fixme error check
468 LeaveCriticalSection(m->lock);
469 return 0;
470 }
471
472
473 typedef mutex_t spinlock_t;
474 #define spinlock_lock(l) mutex_lock(l)
475 #define spinlock_unlock(l) mutex_unlock(l)
476 #define SPINLOCK_INITIALIZER MUTEX_INITIALIZER
477
478
479 typedef struct {
480 HANDLE mutex;
481 } recursive_mutex_t;
482 #define RECURSIVE_MUTEX_INITIALIZER {0};
483 #define RECURSIVE_MUTEX_NOT_LOCKED 1
484 extern void recursive_mutex_init(recursive_mutex_t *m);
485 static __inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
486 assert(m->mutex);
487 return WaitForSingleObject(m->mutex, INFINITE);
488 }
489 static __inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
490 assert(m->mutex);
491 return (WAIT_OBJECT_0 == WaitForSingleObject(m->mutex, 0));
492 }
493 static __inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
494 assert(m->mutex);
495 return ReleaseMutex(m->mutex) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED;
496 }
497
498
499 /*
500 typedef HANDLE mutex_t;
501 static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
502 static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
503 static inline bool mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
504 static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
505 */
506
507 // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
508 // Vista-only CONDITION_VARIABLE would be better
509 typedef struct {
510 HANDLE mutex;
511 HANDLE waiters; // semaphore for those in cond_wait()
512 HANDLE waitersDone; // auto-reset event after everyone gets a broadcast
513 CRITICAL_SECTION waitCountLock; // guards waitCount and didBroadcast
514 unsigned int waitCount;
515 int didBroadcast;
516 } monitor_t;
517 #define MONITOR_INITIALIZER { 0 }
518 #define MONITOR_NOT_ENTERED 1
519 extern int monitor_init(monitor_t *c);
520
521 static inline int _monitor_enter_nodebug(monitor_t *c) {
522 if (!c->mutex) {
523 int err = monitor_init(c);
524 if (err) return err;
525 }
526 return WaitForSingleObject(c->mutex, INFINITE);
527 }
528 static inline int _monitor_exit_nodebug(monitor_t *c) {
529 if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED;
530 else return 0;
531 }
532 static inline int _monitor_wait_nodebug(monitor_t *c) {
533 int last;
534 EnterCriticalSection(&c->waitCountLock);
535 c->waitCount++;
536 LeaveCriticalSection(&c->waitCountLock);
537
538 SignalObjectAndWait(c->mutex, c->waiters, INFINITE, FALSE);
539
540 EnterCriticalSection(&c->waitCountLock);
541 c->waitCount--;
542 last = c->didBroadcast && c->waitCount == 0;
543 LeaveCriticalSection(&c->waitCountLock);
544
545 if (last) {
546 // tell broadcaster that all waiters have awoken
547 SignalObjectAndWait(c->waitersDone, c->mutex, INFINITE, FALSE);
548 } else {
549 WaitForSingleObject(c->mutex, INFINITE);
550 }
551
552 // fixme error checking
553 return 0;
554 }
555 static inline int monitor_notify(monitor_t *c) {
556 int haveWaiters;
557
558 EnterCriticalSection(&c->waitCountLock);
559 haveWaiters = c->waitCount > 0;
560 LeaveCriticalSection(&c->waitCountLock);
561
562 if (haveWaiters) {
563 ReleaseSemaphore(c->waiters, 1, 0);
564 }
565
566 // fixme error checking
567 return 0;
568 }
569 static inline int monitor_notifyAll(monitor_t *c) {
570 EnterCriticalSection(&c->waitCountLock);
571 if (c->waitCount == 0) {
572 LeaveCriticalSection(&c->waitCountLock);
573 return 0;
574 }
575 c->didBroadcast = 1;
576 ReleaseSemaphore(c->waiters, c->waitCount, 0);
577 LeaveCriticalSection(&c->waitCountLock);
578
579 // fairness: wait for everyone to move from waiters to mutex
580 WaitForSingleObject(c->waitersDone, INFINITE);
581 // not under waitCountLock, but still under mutex
582 c->didBroadcast = 0;
583
584 // fixme error checking
585 return 0;
586 }
587
588
589 // fixme no rwlock yet
590
591 #define rwlock_t mutex_t
592 #define rwlock_init(r) mutex_init(r)
593 #define _rwlock_read_nodebug(m) _mutex_lock_nodebug(m)
594 #define _rwlock_write_nodebug(m) _mutex_lock_nodebug(m)
595 #define _rwlock_try_read_nodebug(m) _mutex_try_lock_nodebug(m)
596 #define _rwlock_try_write_nodebug(m) _mutex_try_lock_nodebug(m)
597 #define _rwlock_unlock_read_nodebug(m) _mutex_unlock_nodebug(m)
598 #define _rwlock_unlock_write_nodebug(m) _mutex_unlock_nodebug(m)
599
600
601 typedef IMAGE_DOS_HEADER headerType;
602 // fixme YES bundle? NO bundle? sometimes?
603 #define headerIsBundle(hi) YES
604 OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase;
605 #define libobjc_header ((headerType *)&__ImageBase)
606
607 // Prototypes
608
609
610 #elif TARGET_OS_MAC
611
612
613 // OS headers
614 #include <mach-o/loader.h>
615 #ifndef __LP64__
616 # define SEGMENT_CMD LC_SEGMENT
617 #else
618 # define SEGMENT_CMD LC_SEGMENT_64
619 #endif
620
621 #ifndef VM_MEMORY_OBJC_DISPATCHERS
622 # define VM_MEMORY_OBJC_DISPATCHERS 0
623 #endif
624
625
626 // Compiler compatibility
627
628 // OS compatibility
629
630 // Internal data types
631
632 typedef pthread_t objc_thread_t;
633
634 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
635 return pthread_equal(t1, t2);
636 }
637 static __inline objc_thread_t thread_self(void) {
638 return pthread_self();
639 }
640
641
642 typedef pthread_key_t tls_key_t;
643
644 static inline tls_key_t tls_create(void (*dtor)(void*)) {
645 tls_key_t k;
646 pthread_key_create(&k, dtor);
647 return k;
648 }
649 static inline void *tls_get(tls_key_t k) {
650 return pthread_getspecific(k);
651 }
652 static inline void tls_set(tls_key_t k, void *value) {
653 pthread_setspecific(k, value);
654 }
655
656 #if SUPPORT_DIRECT_THREAD_KEYS
657
658 #if !NDEBUG
659 static bool is_valid_direct_key(tls_key_t k) {
660 return ( k == SYNC_DATA_DIRECT_KEY
661 || k == SYNC_COUNT_DIRECT_KEY
662 || k == AUTORELEASE_POOL_KEY
663 # if SUPPORT_RETURN_AUTORELEASE
664 || k == AUTORELEASE_POOL_RECLAIM_KEY
665 # endif
666 # if SUPPORT_QOS_HACK
667 || k == QOS_KEY
668 # endif
669 );
670 }
671 #endif
672
673 #if __arm__
674
675 // rdar://9162780 _pthread_get/setspecific_direct are inefficient
676 // copied from libdispatch
677
678 __attribute__((const))
679 static ALWAYS_INLINE void**
680 tls_base(void)
681 {
682 uintptr_t p;
683 #if defined(__arm__) && defined(_ARM_ARCH_6)
684 __asm__("mrc p15, 0, %[p], c13, c0, 3" : [p] "=&r" (p));
685 return (void**)(p & ~0x3ul);
686 #else
687 #error tls_base not implemented
688 #endif
689 }
690
691
692 static ALWAYS_INLINE void
693 tls_set_direct(void **tsdb, tls_key_t k, void *v)
694 {
695 assert(is_valid_direct_key(k));
696
697 tsdb[k] = v;
698 }
699 #define tls_set_direct(k, v) \
700 tls_set_direct(tls_base(), (k), (v))
701
702
703 static ALWAYS_INLINE void *
704 tls_get_direct(void **tsdb, tls_key_t k)
705 {
706 assert(is_valid_direct_key(k));
707
708 return tsdb[k];
709 }
710 #define tls_get_direct(k) \
711 tls_get_direct(tls_base(), (k))
712
713 // arm
714 #else
715 // not arm
716
717 static inline void *tls_get_direct(tls_key_t k)
718 {
719 assert(is_valid_direct_key(k));
720
721 if (_pthread_has_direct_tsd()) {
722 return _pthread_getspecific_direct(k);
723 } else {
724 return pthread_getspecific(k);
725 }
726 }
727 static inline void tls_set_direct(tls_key_t k, void *value)
728 {
729 assert(is_valid_direct_key(k));
730
731 if (_pthread_has_direct_tsd()) {
732 _pthread_setspecific_direct(k, value);
733 } else {
734 pthread_setspecific(k, value);
735 }
736 }
737
738 // not arm
739 #endif
740
741 // SUPPORT_DIRECT_THREAD_KEYS
742 #endif
743
744
745 static inline pthread_t pthread_self_direct()
746 {
747 return (pthread_t)
748 _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF);
749 }
750
751 static inline mach_port_t mach_thread_self_direct()
752 {
753 return (mach_port_t)(uintptr_t)
754 _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
755 }
756
757 #if SUPPORT_QOS_HACK
758 static inline pthread_priority_t pthread_self_priority_direct()
759 {
760 pthread_priority_t pri = (pthread_priority_t)
761 _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS);
762 return pri & ~_PTHREAD_PRIORITY_FLAGS_MASK;
763 }
764 #endif
765
766
767 typedef pthread_mutex_t mutex_t;
768 #define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER;
769
770 static inline int _mutex_lock_nodebug(mutex_t *m) {
771 return pthread_mutex_lock(m);
772 }
773 static inline bool _mutex_try_lock_nodebug(mutex_t *m) {
774 return !pthread_mutex_trylock(m);
775 }
776 static inline int _mutex_unlock_nodebug(mutex_t *m) {
777 return pthread_mutex_unlock(m);
778 }
779
780
781 typedef struct {
782 pthread_mutex_t *mutex;
783 } recursive_mutex_t;
784 #define RECURSIVE_MUTEX_INITIALIZER {0};
785 #define RECURSIVE_MUTEX_NOT_LOCKED EPERM
786 extern void recursive_mutex_init(recursive_mutex_t *m);
787
788 static inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
789 assert(m->mutex);
790 return pthread_mutex_lock(m->mutex);
791 }
792 static inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
793 assert(m->mutex);
794 return !pthread_mutex_trylock(m->mutex);
795 }
796 static inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
797 assert(m->mutex);
798 return pthread_mutex_unlock(m->mutex);
799 }
800
801
802 typedef struct {
803 pthread_mutex_t mutex;
804 pthread_cond_t cond;
805 } monitor_t;
806 #define MONITOR_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER }
807 #define MONITOR_NOT_ENTERED EPERM
808
809 static inline int monitor_init(monitor_t *c) {
810 int err = pthread_mutex_init(&c->mutex, NULL);
811 if (err) return err;
812 err = pthread_cond_init(&c->cond, NULL);
813 if (err) {
814 pthread_mutex_destroy(&c->mutex);
815 return err;
816 }
817 return 0;
818 }
819 static inline int _monitor_enter_nodebug(monitor_t *c) {
820 return pthread_mutex_lock(&c->mutex);
821 }
822 static inline int _monitor_exit_nodebug(monitor_t *c) {
823 return pthread_mutex_unlock(&c->mutex);
824 }
825 static inline int _monitor_wait_nodebug(monitor_t *c) {
826 return pthread_cond_wait(&c->cond, &c->mutex);
827 }
828 static inline int monitor_notify(monitor_t *c) {
829 return pthread_cond_signal(&c->cond);
830 }
831 static inline int monitor_notifyAll(monitor_t *c) {
832 return pthread_cond_broadcast(&c->cond);
833 }
834
835
836 // semaphore_create formatted for INIT_ONCE use
837 static inline semaphore_t create_semaphore(void)
838 {
839 semaphore_t sem;
840 kern_return_t k;
841 k = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
842 if (k) _objc_fatal("semaphore_create failed (0x%x)", k);
843 return sem;
844 }
845
846
847 #if SUPPORT_QOS_HACK
848 // Override QOS class to avoid priority inversion in rwlocks
849 // <rdar://17697862> do a qos override before taking rw lock in objc
850
851 #include <pthread/workqueue_private.h>
852 extern pthread_priority_t BackgroundPriority;
853 extern pthread_priority_t MainPriority;
854
855 static inline void qosStartOverride()
856 {
857 uintptr_t overrideRefCount = (uintptr_t)tls_get_direct(QOS_KEY);
858 if (overrideRefCount > 0) {
859 // If there is a qos override, increment the refcount and continue
860 tls_set_direct(QOS_KEY, (void *)(overrideRefCount + 1));
861 }
862 else {
863 pthread_priority_t currentPriority = pthread_self_priority_direct();
864 // Check if override is needed. Only override if we are background qos
865 if (currentPriority <= BackgroundPriority) {
866 int res __unused = _pthread_override_qos_class_start_direct(mach_thread_self_direct(), MainPriority);
867 assert(res == 0);
868 // Once we override, we set the reference count in the tsd
869 // to know when to end the override
870 tls_set_direct(QOS_KEY, (void *)1);
871 }
872 }
873 }
874
875 static inline void qosEndOverride()
876 {
877 uintptr_t overrideRefCount = (uintptr_t)tls_get_direct(QOS_KEY);
878 if (overrideRefCount == 0) return;
879
880 if (overrideRefCount == 1) {
881 // end the override
882 int res __unused = _pthread_override_qos_class_end_direct(mach_thread_self_direct());
883 assert(res == 0);
884 }
885
886 // decrement refcount
887 tls_set_direct(QOS_KEY, (void *)(overrideRefCount - 1));
888 }
889
890 // SUPPORT_QOS_HACK
891 #else
892 // not SUPPORT_QOS_HACK
893
894 static inline void qosStartOverride() { }
895 static inline void qosEndOverride() { }
896
897 // not SUPPORT_QOS_HACK
898 #endif
899
900 /* Custom read-write lock
901 - reader is atomic add/subtract
902 - writer is pthread mutex plus atomic add/subtract
903 - fairness: new readers wait if a writer wants in
904 - fairness: when writer completes, readers (probably) precede new writer
905
906 state: xxxxxxxx xxxxxxxx yyyyyyyy yyyyyyyz
907 x: blocked reader count
908 y: active reader count
909 z: readers allowed flag
910 */
911 typedef struct {
912 pthread_rwlock_t rwl;
913 } rwlock_t;
914
915 static inline void rwlock_init(rwlock_t *l)
916 {
917 int err __unused = pthread_rwlock_init(&l->rwl, NULL);
918 assert(err == 0);
919 }
920
921 static inline void _rwlock_read_nodebug(rwlock_t *l)
922 {
923 qosStartOverride();
924 int err __unused = pthread_rwlock_rdlock(&l->rwl);
925 assert(err == 0);
926 }
927
928 static inline void _rwlock_unlock_read_nodebug(rwlock_t *l)
929 {
930 int err __unused = pthread_rwlock_unlock(&l->rwl);
931 assert(err == 0);
932 qosEndOverride();
933 }
934
935
936 static inline bool _rwlock_try_read_nodebug(rwlock_t *l)
937 {
938 qosStartOverride();
939 int err = pthread_rwlock_tryrdlock(&l->rwl);
940 assert(err == 0 || err == EBUSY || err == EAGAIN);
941 if (err == 0) {
942 return true;
943 } else {
944 qosEndOverride();
945 return false;
946 }
947 }
948
949
950 static inline void _rwlock_write_nodebug(rwlock_t *l)
951 {
952 qosStartOverride();
953 int err __unused = pthread_rwlock_wrlock(&l->rwl);
954 assert(err == 0);
955 }
956
957 static inline void _rwlock_unlock_write_nodebug(rwlock_t *l)
958 {
959 int err __unused = pthread_rwlock_unlock(&l->rwl);
960 assert(err == 0);
961 qosEndOverride();
962 }
963
964 static inline bool _rwlock_try_write_nodebug(rwlock_t *l)
965 {
966 qosStartOverride();
967 int err = pthread_rwlock_trywrlock(&l->rwl);
968 assert(err == 0 || err == EBUSY);
969 if (err == 0) {
970 return true;
971 } else {
972 qosEndOverride();
973 return false;
974 }
975 }
976
977
978 #ifndef __LP64__
979 typedef struct mach_header headerType;
980 typedef struct segment_command segmentType;
981 typedef struct section sectionType;
982 #else
983 typedef struct mach_header_64 headerType;
984 typedef struct segment_command_64 segmentType;
985 typedef struct section_64 sectionType;
986 #endif
987 #define headerIsBundle(hi) (hi->mhdr->filetype == MH_BUNDLE)
988 #define libobjc_header ((headerType *)&_mh_dylib_header)
989
990 // Prototypes
991
992 /* Secure /tmp usage */
993 extern int secure_open(const char *filename, int flags, uid_t euid);
994
995
996 #else
997
998
999 #error unknown OS
1000
1001
1002 #endif
1003
1004 __END_DECLS
1005
1006 #endif