]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-os.h
c43ccbbfa6a02d71135003ffe9d2387e950653cb
[apple/objc4.git] / runtime / objc-os.h
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-os.h
26 * OS portability layer.
27 **********************************************************************/
28
29 #ifndef _OBJC_OS_H
30 #define _OBJC_OS_H
31
32 #include <TargetConditionals.h>
33 #include "objc-config.h"
34
35 #ifdef __LP64__
36 # define WORD_SHIFT 3UL
37 # define WORD_MASK 7UL
38 # define WORD_BITS 64
39 #else
40 # define WORD_SHIFT 2UL
41 # define WORD_MASK 3UL
42 # define WORD_BITS 32
43 #endif
44
45 static inline uint32_t word_align(uint32_t x) {
46 return (x + WORD_MASK) & ~WORD_MASK;
47 }
48 static inline size_t word_align(size_t x) {
49 return (x + WORD_MASK) & ~WORD_MASK;
50 }
51
52
53 // Mix-in for classes that must not be copied.
54 class nocopy_t {
55 private:
56 nocopy_t(const nocopy_t&) = delete;
57 const nocopy_t& operator=(const nocopy_t&) = delete;
58 protected:
59 nocopy_t() { }
60 ~nocopy_t() { }
61 };
62
63
64 #if TARGET_OS_MAC
65
66 # ifndef __STDC_LIMIT_MACROS
67 # define __STDC_LIMIT_MACROS
68 # endif
69
70 # include <stdio.h>
71 # include <stdlib.h>
72 # include <stdint.h>
73 # include <stdarg.h>
74 # include <string.h>
75 # include <ctype.h>
76 # include <errno.h>
77 # include <dlfcn.h>
78 # include <fcntl.h>
79 # include <assert.h>
80 # include <limits.h>
81 # include <syslog.h>
82 # include <unistd.h>
83 # include <pthread.h>
84 # include <crt_externs.h>
85 # undef check
86 # include <Availability.h>
87 # include <TargetConditionals.h>
88 # include <sys/mman.h>
89 # include <sys/time.h>
90 # include <sys/stat.h>
91 # include <sys/param.h>
92 # include <mach/mach.h>
93 # include <mach/vm_param.h>
94 # include <mach/mach_time.h>
95 # include <mach-o/dyld.h>
96 # include <mach-o/ldsyms.h>
97 # include <mach-o/loader.h>
98 # include <mach-o/getsect.h>
99 # include <mach-o/dyld_priv.h>
100 # include <malloc/malloc.h>
101 # include <os/lock_private.h>
102 # include <libkern/OSAtomic.h>
103 # include <libkern/OSCacheControl.h>
104 # include <System/pthread_machdep.h>
105 # include "objc-probes.h" // generated dtrace probe definitions.
106
107 // Some libc functions call objc_msgSend()
108 // so we can't use them without deadlocks.
109 void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE;
110 void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE;
111
112
113 #define ALWAYS_INLINE inline __attribute__((always_inline))
114 #define NEVER_INLINE inline __attribute__((noinline))
115
116
117
118 static ALWAYS_INLINE uintptr_t
119 addc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
120 {
121 return __builtin_addcl(lhs, rhs, carryin, carryout);
122 }
123
124 static ALWAYS_INLINE uintptr_t
125 subc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
126 {
127 return __builtin_subcl(lhs, rhs, carryin, carryout);
128 }
129
130
131 #if __arm64__
132
133 static ALWAYS_INLINE
134 uintptr_t
135 LoadExclusive(uintptr_t *src)
136 {
137 uintptr_t result;
138 asm("ldxr %x0, [%x1]"
139 : "=r" (result)
140 : "r" (src), "m" (*src));
141 return result;
142 }
143
144 static ALWAYS_INLINE
145 bool
146 StoreExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
147 {
148 uint32_t result;
149 asm("stxr %w0, %x2, [%x3]"
150 : "=r" (result), "=m" (*dst)
151 : "r" (value), "r" (dst));
152 return !result;
153 }
154
155
156 static ALWAYS_INLINE
157 bool
158 StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
159 {
160 uint32_t result;
161 asm("stlxr %w0, %x2, [%x3]"
162 : "=r" (result), "=m" (*dst)
163 : "r" (value), "r" (dst));
164 return !result;
165 }
166
167
168 #elif __arm__
169
170 static ALWAYS_INLINE
171 uintptr_t
172 LoadExclusive(uintptr_t *src)
173 {
174 return *src;
175 }
176
177 static ALWAYS_INLINE
178 bool
179 StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
180 {
181 return OSAtomicCompareAndSwapPtr((void *)oldvalue, (void *)value,
182 (void **)dst);
183 }
184
185 static ALWAYS_INLINE
186 bool
187 StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
188 {
189 return OSAtomicCompareAndSwapPtrBarrier((void *)oldvalue, (void *)value,
190 (void **)dst);
191 }
192
193
194 #elif __x86_64__ || __i386__
195
196 static ALWAYS_INLINE
197 uintptr_t
198 LoadExclusive(uintptr_t *src)
199 {
200 return *src;
201 }
202
203 static ALWAYS_INLINE
204 bool
205 StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
206 {
207
208 return __sync_bool_compare_and_swap((void **)dst, (void *)oldvalue, (void *)value);
209 }
210
211 static ALWAYS_INLINE
212 bool
213 StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
214 {
215 return StoreExclusive(dst, oldvalue, value);
216 }
217
218 #else
219 # error unknown architecture
220 #endif
221
222
223 class spinlock_t {
224 os_lock_handoff_s mLock;
225 public:
226 spinlock_t() : mLock(OS_LOCK_HANDOFF_INIT) { }
227
228 void lock() { os_lock_lock(&mLock); }
229 void unlock() { os_lock_unlock(&mLock); }
230 bool trylock() { return os_lock_trylock(&mLock); }
231
232
233 // Address-ordered lock discipline for a pair of locks.
234
235 static void lockTwo(spinlock_t *lock1, spinlock_t *lock2) {
236 if (lock1 > lock2) {
237 lock1->lock();
238 lock2->lock();
239 } else {
240 lock2->lock();
241 if (lock2 != lock1) lock1->lock();
242 }
243 }
244
245 static void unlockTwo(spinlock_t *lock1, spinlock_t *lock2) {
246 lock1->unlock();
247 if (lock2 != lock1) lock2->unlock();
248 }
249 };
250
251
252 #if !TARGET_OS_IPHONE
253 # include <CrashReporterClient.h>
254 #else
255 // CrashReporterClient not yet available on iOS
256 __BEGIN_DECLS
257 extern const char *CRSetCrashLogMessage(const char *msg);
258 extern const char *CRGetCrashLogMessage(void);
259 extern const char *CRSetCrashLogMessage2(const char *msg);
260 __END_DECLS
261 #endif
262
263 # if __cplusplus
264 # include <vector>
265 # include <algorithm>
266 # include <functional>
267 using namespace std;
268 # endif
269
270 # define PRIVATE_EXTERN __attribute__((visibility("hidden")))
271 # undef __private_extern__
272 # define __private_extern__ use_PRIVATE_EXTERN_instead
273 # undef private_extern
274 # define private_extern use_PRIVATE_EXTERN_instead
275
276 /* Use this for functions that are intended to be breakpoint hooks.
277 If you do not, the compiler may optimize them away.
278 BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
279 # define BREAKPOINT_FUNCTION(prototype) \
280 OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) \
281 prototype { asm(""); }
282
283 #elif TARGET_OS_WIN32
284
285 # define WINVER 0x0501 // target Windows XP and later
286 # define _WIN32_WINNT 0x0501 // target Windows XP and later
287 # define WIN32_LEAN_AND_MEAN
288 // hack: windef.h typedefs BOOL as int
289 # define BOOL WINBOOL
290 # include <windows.h>
291 # undef BOOL
292
293 # include <stdio.h>
294 # include <stdlib.h>
295 # include <stdint.h>
296 # include <stdarg.h>
297 # include <string.h>
298 # include <assert.h>
299 # include <malloc.h>
300 # include <Availability.h>
301
302 # if __cplusplus
303 # include <vector>
304 # include <algorithm>
305 # include <functional>
306 using namespace std;
307 # define __BEGIN_DECLS extern "C" {
308 # define __END_DECLS }
309 # else
310 # define __BEGIN_DECLS /*empty*/
311 # define __END_DECLS /*empty*/
312 # endif
313
314 # define PRIVATE_EXTERN
315 # define __attribute__(x)
316 # define inline __inline
317
318 /* Use this for functions that are intended to be breakpoint hooks.
319 If you do not, the compiler may optimize them away.
320 BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
321 # define BREAKPOINT_FUNCTION(prototype) \
322 __declspec(noinline) prototype { __asm { } }
323
324 /* stub out dtrace probes */
325 # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
326 # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
327
328 #else
329 # error unknown OS
330 #endif
331
332
333 #include <objc/objc.h>
334 #include <objc/objc-api.h>
335
336 extern void _objc_fatal(const char *fmt, ...) __attribute__((noreturn, format (printf, 1, 2)));
337
338 #define INIT_ONCE_PTR(var, create, delete) \
339 do { \
340 if (var) break; \
341 typeof(var) v = create; \
342 while (!var) { \
343 if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
344 goto done; \
345 } \
346 } \
347 delete; \
348 done:; \
349 } while (0)
350
351 #define INIT_ONCE_32(var, create, delete) \
352 do { \
353 if (var) break; \
354 typeof(var) v = create; \
355 while (!var) { \
356 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
357 goto done; \
358 } \
359 } \
360 delete; \
361 done:; \
362 } while (0)
363
364
365 // Thread keys reserved by libc for our use.
366 #if defined(__PTK_FRAMEWORK_OBJC_KEY0)
367 # define SUPPORT_DIRECT_THREAD_KEYS 1
368 # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY0)
369 # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY1)
370 # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY2)
371 # define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY3)
372 # if SUPPORT_RETURN_AUTORELEASE
373 # define RETURN_DISPOSITION_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4)
374 # endif
375 # if SUPPORT_QOS_HACK
376 # define QOS_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
377 # endif
378 #else
379 # define SUPPORT_DIRECT_THREAD_KEYS 0
380 #endif
381
382
383 #if TARGET_OS_WIN32
384
385 // Compiler compatibility
386
387 // OS compatibility
388
389 #define strdup _strdup
390
391 #define issetugid() 0
392
393 #define MIN(x, y) ((x) < (y) ? (x) : (y))
394
395 static __inline void bcopy(const void *src, void *dst, size_t size) { memcpy(dst, src, size); }
396 static __inline void bzero(void *dst, size_t size) { memset(dst, 0, size); }
397
398 int asprintf(char **dstp, const char *format, ...);
399
400 typedef void * malloc_zone_t;
401
402 static __inline malloc_zone_t malloc_default_zone(void) { return (malloc_zone_t)-1; }
403 static __inline void *malloc_zone_malloc(malloc_zone_t z, size_t size) { return malloc(size); }
404 static __inline void *malloc_zone_calloc(malloc_zone_t z, size_t size, size_t count) { return calloc(size, count); }
405 static __inline void *malloc_zone_realloc(malloc_zone_t z, void *p, size_t size) { return realloc(p, size); }
406 static __inline void malloc_zone_free(malloc_zone_t z, void *p) { free(p); }
407 static __inline malloc_zone_t malloc_zone_from_ptr(const void *p) { return (malloc_zone_t)-1; }
408 static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ }
409
410
411 // OSAtomic
412
413 static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
414 {
415 // fixme barrier is overkill
416 long original = InterlockedCompareExchange(dst, newl, oldl);
417 return (original == oldl);
418 }
419
420 static __inline BOOL OSAtomicCompareAndSwapPtrBarrier(void *oldp, void *newp, void * volatile *dst)
421 {
422 void *original = InterlockedCompareExchangePointer(dst, newp, oldp);
423 return (original == oldp);
424 }
425
426 static __inline BOOL OSAtomicCompareAndSwap32Barrier(int32_t oldl, int32_t newl, int32_t volatile *dst)
427 {
428 long original = InterlockedCompareExchange((volatile long *)dst, newl, oldl);
429 return (original == oldl);
430 }
431
432 static __inline int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst)
433 {
434 return InterlockedDecrement((volatile long *)dst);
435 }
436
437 static __inline int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst)
438 {
439 return InterlockedIncrement((volatile long *)dst);
440 }
441
442
443 // Internal data types
444
445 typedef DWORD objc_thread_t; // thread ID
446 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
447 return t1 == t2;
448 }
449 static __inline objc_thread_t thread_self(void) {
450 return GetCurrentThreadId();
451 }
452
453 typedef struct {
454 DWORD key;
455 void (*dtor)(void *);
456 } tls_key_t;
457 static __inline tls_key_t tls_create(void (*dtor)(void*)) {
458 // fixme need dtor registry for DllMain to call on thread detach
459 tls_key_t k;
460 k.key = TlsAlloc();
461 k.dtor = dtor;
462 return k;
463 }
464 static __inline void *tls_get(tls_key_t k) {
465 return TlsGetValue(k.key);
466 }
467 static __inline void tls_set(tls_key_t k, void *value) {
468 TlsSetValue(k.key, value);
469 }
470
471 typedef struct {
472 CRITICAL_SECTION *lock;
473 } mutex_t;
474 #define MUTEX_INITIALIZER {0};
475 extern void mutex_init(mutex_t *m);
476 static __inline int _mutex_lock_nodebug(mutex_t *m) {
477 // fixme error check
478 if (!m->lock) {
479 mutex_init(m);
480 }
481 EnterCriticalSection(m->lock);
482 return 0;
483 }
484 static __inline bool _mutex_try_lock_nodebug(mutex_t *m) {
485 // fixme error check
486 if (!m->lock) {
487 mutex_init(m);
488 }
489 return TryEnterCriticalSection(m->lock);
490 }
491 static __inline int _mutex_unlock_nodebug(mutex_t *m) {
492 // fixme error check
493 LeaveCriticalSection(m->lock);
494 return 0;
495 }
496
497
498 typedef mutex_t spinlock_t;
499 #define spinlock_lock(l) mutex_lock(l)
500 #define spinlock_unlock(l) mutex_unlock(l)
501 #define SPINLOCK_INITIALIZER MUTEX_INITIALIZER
502
503
504 typedef struct {
505 HANDLE mutex;
506 } recursive_mutex_t;
507 #define RECURSIVE_MUTEX_INITIALIZER {0};
508 #define RECURSIVE_MUTEX_NOT_LOCKED 1
509 extern void recursive_mutex_init(recursive_mutex_t *m);
510 static __inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
511 assert(m->mutex);
512 return WaitForSingleObject(m->mutex, INFINITE);
513 }
514 static __inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
515 assert(m->mutex);
516 return (WAIT_OBJECT_0 == WaitForSingleObject(m->mutex, 0));
517 }
518 static __inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
519 assert(m->mutex);
520 return ReleaseMutex(m->mutex) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED;
521 }
522
523
524 /*
525 typedef HANDLE mutex_t;
526 static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
527 static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
528 static inline bool mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
529 static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
530 */
531
532 // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
533 // Vista-only CONDITION_VARIABLE would be better
534 typedef struct {
535 HANDLE mutex;
536 HANDLE waiters; // semaphore for those in cond_wait()
537 HANDLE waitersDone; // auto-reset event after everyone gets a broadcast
538 CRITICAL_SECTION waitCountLock; // guards waitCount and didBroadcast
539 unsigned int waitCount;
540 int didBroadcast;
541 } monitor_t;
542 #define MONITOR_INITIALIZER { 0 }
543 #define MONITOR_NOT_ENTERED 1
544 extern int monitor_init(monitor_t *c);
545
546 static inline int _monitor_enter_nodebug(monitor_t *c) {
547 if (!c->mutex) {
548 int err = monitor_init(c);
549 if (err) return err;
550 }
551 return WaitForSingleObject(c->mutex, INFINITE);
552 }
553 static inline int _monitor_leave_nodebug(monitor_t *c) {
554 if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED;
555 else return 0;
556 }
557 static inline int _monitor_wait_nodebug(monitor_t *c) {
558 int last;
559 EnterCriticalSection(&c->waitCountLock);
560 c->waitCount++;
561 LeaveCriticalSection(&c->waitCountLock);
562
563 SignalObjectAndWait(c->mutex, c->waiters, INFINITE, FALSE);
564
565 EnterCriticalSection(&c->waitCountLock);
566 c->waitCount--;
567 last = c->didBroadcast && c->waitCount == 0;
568 LeaveCriticalSection(&c->waitCountLock);
569
570 if (last) {
571 // tell broadcaster that all waiters have awoken
572 SignalObjectAndWait(c->waitersDone, c->mutex, INFINITE, FALSE);
573 } else {
574 WaitForSingleObject(c->mutex, INFINITE);
575 }
576
577 // fixme error checking
578 return 0;
579 }
580 static inline int monitor_notify(monitor_t *c) {
581 int haveWaiters;
582
583 EnterCriticalSection(&c->waitCountLock);
584 haveWaiters = c->waitCount > 0;
585 LeaveCriticalSection(&c->waitCountLock);
586
587 if (haveWaiters) {
588 ReleaseSemaphore(c->waiters, 1, 0);
589 }
590
591 // fixme error checking
592 return 0;
593 }
594 static inline int monitor_notifyAll(monitor_t *c) {
595 EnterCriticalSection(&c->waitCountLock);
596 if (c->waitCount == 0) {
597 LeaveCriticalSection(&c->waitCountLock);
598 return 0;
599 }
600 c->didBroadcast = 1;
601 ReleaseSemaphore(c->waiters, c->waitCount, 0);
602 LeaveCriticalSection(&c->waitCountLock);
603
604 // fairness: wait for everyone to move from waiters to mutex
605 WaitForSingleObject(c->waitersDone, INFINITE);
606 // not under waitCountLock, but still under mutex
607 c->didBroadcast = 0;
608
609 // fixme error checking
610 return 0;
611 }
612
613
614 // fixme no rwlock yet
615
616
617 typedef IMAGE_DOS_HEADER headerType;
618 // fixme YES bundle? NO bundle? sometimes?
619 #define headerIsBundle(hi) YES
620 OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase;
621 #define libobjc_header ((headerType *)&__ImageBase)
622
623 // Prototypes
624
625
626 #elif TARGET_OS_MAC
627
628
629 // OS headers
630 #include <mach-o/loader.h>
631 #ifndef __LP64__
632 # define SEGMENT_CMD LC_SEGMENT
633 #else
634 # define SEGMENT_CMD LC_SEGMENT_64
635 #endif
636
637 #ifndef VM_MEMORY_OBJC_DISPATCHERS
638 # define VM_MEMORY_OBJC_DISPATCHERS 0
639 #endif
640
641
642 // Compiler compatibility
643
644 // OS compatibility
645
646 static inline uint64_t nanoseconds() {
647 return mach_absolute_time();
648 }
649
650 // Internal data types
651
652 typedef pthread_t objc_thread_t;
653
654 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
655 return pthread_equal(t1, t2);
656 }
657 static __inline objc_thread_t thread_self(void) {
658 return pthread_self();
659 }
660
661
662 typedef pthread_key_t tls_key_t;
663
664 static inline tls_key_t tls_create(void (*dtor)(void*)) {
665 tls_key_t k;
666 pthread_key_create(&k, dtor);
667 return k;
668 }
669 static inline void *tls_get(tls_key_t k) {
670 return pthread_getspecific(k);
671 }
672 static inline void tls_set(tls_key_t k, void *value) {
673 pthread_setspecific(k, value);
674 }
675
676 #if SUPPORT_DIRECT_THREAD_KEYS
677
678 #if DEBUG
679 static bool is_valid_direct_key(tls_key_t k) {
680 return ( k == SYNC_DATA_DIRECT_KEY
681 || k == SYNC_COUNT_DIRECT_KEY
682 || k == AUTORELEASE_POOL_KEY
683 # if SUPPORT_RETURN_AUTORELEASE
684 || k == RETURN_DISPOSITION_KEY
685 # endif
686 # if SUPPORT_QOS_HACK
687 || k == QOS_KEY
688 # endif
689 );
690 }
691 #endif
692
693 #if __arm__
694
695 // rdar://9162780 _pthread_get/setspecific_direct are inefficient
696 // copied from libdispatch
697
698 __attribute__((const))
699 static ALWAYS_INLINE void**
700 tls_base(void)
701 {
702 uintptr_t p;
703 #if defined(__arm__) && defined(_ARM_ARCH_6)
704 __asm__("mrc p15, 0, %[p], c13, c0, 3" : [p] "=&r" (p));
705 return (void**)(p & ~0x3ul);
706 #else
707 #error tls_base not implemented
708 #endif
709 }
710
711
712 static ALWAYS_INLINE void
713 tls_set_direct(void **tsdb, tls_key_t k, void *v)
714 {
715 assert(is_valid_direct_key(k));
716
717 tsdb[k] = v;
718 }
719 #define tls_set_direct(k, v) \
720 tls_set_direct(tls_base(), (k), (v))
721
722
723 static ALWAYS_INLINE void *
724 tls_get_direct(void **tsdb, tls_key_t k)
725 {
726 assert(is_valid_direct_key(k));
727
728 return tsdb[k];
729 }
730 #define tls_get_direct(k) \
731 tls_get_direct(tls_base(), (k))
732
733 // arm
734 #else
735 // not arm
736
737 static inline void *tls_get_direct(tls_key_t k)
738 {
739 assert(is_valid_direct_key(k));
740
741 if (_pthread_has_direct_tsd()) {
742 return _pthread_getspecific_direct(k);
743 } else {
744 return pthread_getspecific(k);
745 }
746 }
747 static inline void tls_set_direct(tls_key_t k, void *value)
748 {
749 assert(is_valid_direct_key(k));
750
751 if (_pthread_has_direct_tsd()) {
752 _pthread_setspecific_direct(k, value);
753 } else {
754 pthread_setspecific(k, value);
755 }
756 }
757
758 // not arm
759 #endif
760
761 // SUPPORT_DIRECT_THREAD_KEYS
762 #endif
763
764
765 static inline pthread_t pthread_self_direct()
766 {
767 return (pthread_t)
768 _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF);
769 }
770
771 static inline mach_port_t mach_thread_self_direct()
772 {
773 return (mach_port_t)(uintptr_t)
774 _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
775 }
776
777 #if SUPPORT_QOS_HACK
778 static inline pthread_priority_t pthread_self_priority_direct()
779 {
780 pthread_priority_t pri = (pthread_priority_t)
781 _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS);
782 return pri & ~_PTHREAD_PRIORITY_FLAGS_MASK;
783 }
784 #endif
785
786
787 template <bool Debug> class mutex_tt;
788 template <bool Debug> class monitor_tt;
789 template <bool Debug> class rwlock_tt;
790 template <bool Debug> class recursive_mutex_tt;
791
792 #include "objc-lockdebug.h"
793
794 template <bool Debug>
795 class mutex_tt : nocopy_t {
796 pthread_mutex_t mLock;
797
798 public:
799 mutex_tt() : mLock(PTHREAD_MUTEX_INITIALIZER) { }
800
801 void lock()
802 {
803 lockdebug_mutex_lock(this);
804
805 int err = pthread_mutex_lock(&mLock);
806 if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
807 }
808
809 bool tryLock()
810 {
811 int err = pthread_mutex_trylock(&mLock);
812 if (err == 0) {
813 lockdebug_mutex_try_lock_success(this);
814 return true;
815 } else if (err == EBUSY) {
816 return false;
817 } else {
818 _objc_fatal("pthread_mutex_trylock failed (%d)", err);
819 }
820 }
821
822 void unlock()
823 {
824 lockdebug_mutex_unlock(this);
825
826 int err = pthread_mutex_unlock(&mLock);
827 if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
828 }
829
830
831 void assertLocked() {
832 lockdebug_mutex_assert_locked(this);
833 }
834
835 void assertUnlocked() {
836 lockdebug_mutex_assert_unlocked(this);
837 }
838 };
839
840 using mutex_t = mutex_tt<DEBUG>;
841
842
843 template <bool Debug>
844 class recursive_mutex_tt : nocopy_t {
845 pthread_mutex_t mLock;
846
847 public:
848 recursive_mutex_tt() : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER) { }
849
850 void lock()
851 {
852 lockdebug_recursive_mutex_lock(this);
853
854 int err = pthread_mutex_lock(&mLock);
855 if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
856 }
857
858 bool tryLock()
859 {
860 int err = pthread_mutex_trylock(&mLock);
861 if (err == 0) {
862 lockdebug_recursive_mutex_lock(this);
863 return true;
864 } else if (err == EBUSY) {
865 return false;
866 } else {
867 _objc_fatal("pthread_mutex_trylock failed (%d)", err);
868 }
869 }
870
871
872 void unlock()
873 {
874 lockdebug_recursive_mutex_unlock(this);
875
876 int err = pthread_mutex_unlock(&mLock);
877 if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
878 }
879
880 bool tryUnlock()
881 {
882 int err = pthread_mutex_unlock(&mLock);
883 if (err == 0) {
884 lockdebug_recursive_mutex_unlock(this);
885 return true;
886 } else if (err == EPERM) {
887 return false;
888 } else {
889 _objc_fatal("pthread_mutex_unlock failed (%d)", err);
890 }
891 }
892
893
894 void assertLocked() {
895 lockdebug_recursive_mutex_assert_locked(this);
896 }
897
898 void assertUnlocked() {
899 lockdebug_recursive_mutex_assert_unlocked(this);
900 }
901 };
902
903 using recursive_mutex_t = recursive_mutex_tt<DEBUG>;
904
905
906 template <bool Debug>
907 class monitor_tt {
908 pthread_mutex_t mutex;
909 pthread_cond_t cond;
910
911 public:
912 monitor_tt()
913 : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER) { }
914
915 void enter()
916 {
917 lockdebug_monitor_enter(this);
918
919 int err = pthread_mutex_lock(&mutex);
920 if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
921 }
922
923 void leave()
924 {
925 lockdebug_monitor_leave(this);
926
927 int err = pthread_mutex_unlock(&mutex);
928 if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
929 }
930
931 void wait()
932 {
933 lockdebug_monitor_wait(this);
934
935 int err = pthread_cond_wait(&cond, &mutex);
936 if (err) _objc_fatal("pthread_cond_wait failed (%d)", err);
937 }
938
939 void notify()
940 {
941 int err = pthread_cond_signal(&cond);
942 if (err) _objc_fatal("pthread_cond_signal failed (%d)", err);
943 }
944
945 void notifyAll()
946 {
947 int err = pthread_cond_broadcast(&cond);
948 if (err) _objc_fatal("pthread_cond_broadcast failed (%d)", err);
949 }
950
951 void assertLocked()
952 {
953 lockdebug_monitor_assert_locked(this);
954 }
955
956 void assertUnlocked()
957 {
958 lockdebug_monitor_assert_unlocked(this);
959 }
960 };
961
962 using monitor_t = monitor_tt<DEBUG>;
963
964
965 // semaphore_create formatted for INIT_ONCE use
966 static inline semaphore_t create_semaphore(void)
967 {
968 semaphore_t sem;
969 kern_return_t k;
970 k = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
971 if (k) _objc_fatal("semaphore_create failed (0x%x)", k);
972 return sem;
973 }
974
975
976 #if SUPPORT_QOS_HACK
977 // Override QOS class to avoid priority inversion in rwlocks
978 // <rdar://17697862> do a qos override before taking rw lock in objc
979
980 #include <pthread/workqueue_private.h>
981 extern pthread_priority_t BackgroundPriority;
982 extern pthread_priority_t MainPriority;
983
984 static inline void qosStartOverride()
985 {
986 uintptr_t overrideRefCount = (uintptr_t)tls_get_direct(QOS_KEY);
987 if (overrideRefCount > 0) {
988 // If there is a qos override, increment the refcount and continue
989 tls_set_direct(QOS_KEY, (void *)(overrideRefCount + 1));
990 }
991 else {
992 pthread_priority_t currentPriority = pthread_self_priority_direct();
993 // Check if override is needed. Only override if we are background qos
994 if (currentPriority != 0 && currentPriority <= BackgroundPriority) {
995 int res __unused = _pthread_override_qos_class_start_direct(mach_thread_self_direct(), MainPriority);
996 assert(res == 0);
997 // Once we override, we set the reference count in the tsd
998 // to know when to end the override
999 tls_set_direct(QOS_KEY, (void *)1);
1000 }
1001 }
1002 }
1003
1004 static inline void qosEndOverride()
1005 {
1006 uintptr_t overrideRefCount = (uintptr_t)tls_get_direct(QOS_KEY);
1007 if (overrideRefCount == 0) return;
1008
1009 if (overrideRefCount == 1) {
1010 // end the override
1011 int res __unused = _pthread_override_qos_class_end_direct(mach_thread_self_direct());
1012 assert(res == 0);
1013 }
1014
1015 // decrement refcount
1016 tls_set_direct(QOS_KEY, (void *)(overrideRefCount - 1));
1017 }
1018
1019 // SUPPORT_QOS_HACK
1020 #else
1021 // not SUPPORT_QOS_HACK
1022
1023 static inline void qosStartOverride() { }
1024 static inline void qosEndOverride() { }
1025
1026 // not SUPPORT_QOS_HACK
1027 #endif
1028
1029
1030 template <bool Debug>
1031 class rwlock_tt : nocopy_t {
1032 pthread_rwlock_t mLock;
1033
1034 public:
1035 rwlock_tt() : mLock(PTHREAD_RWLOCK_INITIALIZER) { }
1036
1037 void read()
1038 {
1039 lockdebug_rwlock_read(this);
1040
1041 qosStartOverride();
1042 int err = pthread_rwlock_rdlock(&mLock);
1043 if (err) _objc_fatal("pthread_rwlock_rdlock failed (%d)", err);
1044 }
1045
1046 void unlockRead()
1047 {
1048 lockdebug_rwlock_unlock_read(this);
1049
1050 int err = pthread_rwlock_unlock(&mLock);
1051 if (err) _objc_fatal("pthread_rwlock_unlock failed (%d)", err);
1052 qosEndOverride();
1053 }
1054
1055 bool tryRead()
1056 {
1057 qosStartOverride();
1058 int err = pthread_rwlock_tryrdlock(&mLock);
1059 if (err == 0) {
1060 lockdebug_rwlock_try_read_success(this);
1061 return true;
1062 } else if (err == EBUSY) {
1063 qosEndOverride();
1064 return false;
1065 } else {
1066 _objc_fatal("pthread_rwlock_tryrdlock failed (%d)", err);
1067 }
1068 }
1069
1070 void write()
1071 {
1072 lockdebug_rwlock_write(this);
1073
1074 qosStartOverride();
1075 int err = pthread_rwlock_wrlock(&mLock);
1076 if (err) _objc_fatal("pthread_rwlock_wrlock failed (%d)", err);
1077 }
1078
1079 void unlockWrite()
1080 {
1081 lockdebug_rwlock_unlock_write(this);
1082
1083 int err = pthread_rwlock_unlock(&mLock);
1084 if (err) _objc_fatal("pthread_rwlock_unlock failed (%d)", err);
1085 qosEndOverride();
1086 }
1087
1088 bool tryWrite()
1089 {
1090 qosStartOverride();
1091 int err = pthread_rwlock_trywrlock(&mLock);
1092 if (err == 0) {
1093 lockdebug_rwlock_try_write_success(this);
1094 return true;
1095 } else if (err == EBUSY) {
1096 qosEndOverride();
1097 return false;
1098 } else {
1099 _objc_fatal("pthread_rwlock_trywrlock failed (%d)", err);
1100 }
1101 }
1102
1103
1104 void assertReading() {
1105 lockdebug_rwlock_assert_reading(this);
1106 }
1107
1108 void assertWriting() {
1109 lockdebug_rwlock_assert_writing(this);
1110 }
1111
1112 void assertLocked() {
1113 lockdebug_rwlock_assert_locked(this);
1114 }
1115
1116 void assertUnlocked() {
1117 lockdebug_rwlock_assert_unlocked(this);
1118 }
1119 };
1120
1121 using rwlock_t = rwlock_tt<DEBUG>;
1122
1123
1124 #ifndef __LP64__
1125 typedef struct mach_header headerType;
1126 typedef struct segment_command segmentType;
1127 typedef struct section sectionType;
1128 #else
1129 typedef struct mach_header_64 headerType;
1130 typedef struct segment_command_64 segmentType;
1131 typedef struct section_64 sectionType;
1132 #endif
1133 #define headerIsBundle(hi) (hi->mhdr->filetype == MH_BUNDLE)
1134 #define libobjc_header ((headerType *)&_mh_dylib_header)
1135
1136 // Prototypes
1137
1138 /* Secure /tmp usage */
1139 extern int secure_open(const char *filename, int flags, uid_t euid);
1140
1141
1142 #else
1143
1144
1145 #error unknown OS
1146
1147
1148 #endif
1149
1150
1151 static inline void *
1152 memdup(const void *mem, size_t len)
1153 {
1154 void *dup = malloc(len);
1155 memcpy(dup, mem, len);
1156 return dup;
1157 }
1158
1159 // unsigned strdup
1160 static inline uint8_t *
1161 ustrdup(const uint8_t *str)
1162 {
1163 return (uint8_t *)strdup((char *)str);
1164 }
1165
1166 // nil-checking strdup
1167 static inline uint8_t *
1168 strdupMaybeNil(const uint8_t *str)
1169 {
1170 if (!str) return nil;
1171 return (uint8_t *)strdup((char *)str);
1172 }
1173
1174 // nil-checking unsigned strdup
1175 static inline uint8_t *
1176 ustrdupMaybeNil(const uint8_t *str)
1177 {
1178 if (!str) return nil;
1179 return (uint8_t *)strdup((char *)str);
1180 }
1181
1182 #endif