]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-os.h
f6685709e66f167a8c27d45f3390f60b86175f4c
[apple/objc4.git] / runtime / objc-os.h
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-os.h
26 * OS portability layer.
27 **********************************************************************/
28
29 #ifndef _OBJC_OS_H
30 #define _OBJC_OS_H
31
32 #include <TargetConditionals.h>
33
34 #if TARGET_OS_MAC
35
36 # ifndef __STDC_LIMIT_MACROS
37 # define __STDC_LIMIT_MACROS
38 # endif
39
40 # include <stdio.h>
41 # include <stdlib.h>
42 # include <stdint.h>
43 # include <stdarg.h>
44 # include <string.h>
45 # include <ctype.h>
46 # include <errno.h>
47 # include <dlfcn.h>
48 # include <fcntl.h>
49 # include <assert.h>
50 # include <limits.h>
51 # include <syslog.h>
52 # include <unistd.h>
53 # include <pthread.h>
54 # include <crt_externs.h>
55 # include <AssertMacros.h>
56 # undef check
57 # include <AvailabilityMacros.h>
58 # include <TargetConditionals.h>
59 # include <sys/mman.h>
60 # include <sys/time.h>
61 # include <sys/stat.h>
62 # include <sys/param.h>
63 # include <mach/mach.h>
64 # include <mach-o/dyld.h>
65 # include <mach-o/ldsyms.h>
66 # include <mach-o/loader.h>
67 # include <mach-o/getsect.h>
68 # include <mach-o/dyld_priv.h>
69 # include <malloc/malloc.h>
70 # include <libkern/OSAtomic.h>
71 # include <libkern/OSCacheControl.h>
72 # include <System/pthread_machdep.h>
73 # include "objc-probes.h" // generated dtrace probe definitions.
74
75
76 #if defined(__i386__) || defined(__x86_64__)
77
78 // Inlined spinlock.
79 // Not for arm on iOS because it hurts uniprocessor performance.
80
81 #define ARR_SPINLOCK_INIT 0
82 // XXX -- Careful: OSSpinLock isn't volatile, but should be
83 typedef volatile int ARRSpinLock;
84 __attribute__((always_inline))
85 static inline void ARRSpinLockLock(ARRSpinLock *l)
86 {
87 unsigned y;
88 again:
89 if (__builtin_expect(__sync_lock_test_and_set(l, 1), 0) == 0) {
90 return;
91 }
92 for (y = 1000; y; y--) {
93 #if defined(__i386__) || defined(__x86_64__)
94 asm("pause");
95 #endif
96 if (*l == 0) goto again;
97 }
98 thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
99 goto again;
100 }
101 __attribute__((always_inline))
102 static inline void ARRSpinLockUnlock(ARRSpinLock *l)
103 {
104 __sync_lock_release(l);
105 }
106 __attribute__((always_inline))
107 static inline int ARRSpinLockTry(ARRSpinLock *l)
108 {
109 return __sync_bool_compare_and_swap(l, 0, 1);
110 }
111
112 #define OSSpinLock ARRSpinLock
113 #define OSSpinLockTry(l) ARRSpinLockTry(l)
114 #define OSSpinLockLock(l) ARRSpinLockLock(l)
115 #define OSSpinLockUnlock(l) ARRSpinLockUnlock(l)
116 #undef OS_SPINLOCK_INIT
117 #define OS_SPINLOCK_INIT ARR_SPINLOCK_INIT
118
119 #endif
120
121
122 #if !TARGET_OS_IPHONE
123 # include <CrashReporterClient.h>
124 #else
125 // CrashReporterClient not yet available on iOS
126 __BEGIN_DECLS
127 extern const char *CRSetCrashLogMessage(const char *msg);
128 extern const char *CRGetCrashLogMessage(void);
129 extern const char *CRSetCrashLogMessage2(const char *msg);
130 __END_DECLS
131 #endif
132
133 #if TARGET_IPHONE_SIMULATOR
134 // getsectiondata() and getsegmentdata() are unavailable
135 __BEGIN_DECLS
136 # define getsectiondata(m, s, n, c) objc_getsectiondata(m, s, n, c)
137 # define getsegmentdata(m, s, c) objc_getsegmentdata(m, s, c)
138 extern uint8_t *objc_getsectiondata(const struct mach_header *mh, const char *segname, const char *sectname, unsigned long *outSize);
139 extern uint8_t * objc_getsegmentdata(const struct mach_header *mh, const char *segname, unsigned long *outSize);
140 __END_DECLS
141 #endif
142
143 # if __cplusplus
144 # include <vector>
145 # include <algorithm>
146 # include <functional>
147 using namespace std;
148 # endif
149
150 # define PRIVATE_EXTERN __attribute__((visibility("hidden")))
151 # undef __private_extern__
152 # define __private_extern__ use_PRIVATE_EXTERN_instead
153 # undef private_extern
154 # define private_extern use_PRIVATE_EXTERN_instead
155
156 /* Use this for functions that are intended to be breakpoint hooks.
157 If you do not, the compiler may optimize them away.
158 BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
159 # define BREAKPOINT_FUNCTION(prototype) \
160 OBJC_EXTERN __attribute__((noinline, visibility("hidden"))) \
161 prototype { asm(""); }
162
163 #elif TARGET_OS_WIN32
164
165 # define WINVER 0x0501 // target Windows XP and later
166 # define _WIN32_WINNT 0x0501 // target Windows XP and later
167 # define WIN32_LEAN_AND_MEAN
168 // hack: windef.h typedefs BOOL as int
169 # define BOOL WINBOOL
170 # include <windows.h>
171 # undef BOOL
172
173 # include <stdio.h>
174 # include <stdlib.h>
175 # include <stdint.h>
176 # include <stdarg.h>
177 # include <string.h>
178 # include <assert.h>
179 # include <malloc.h>
180 # include <AvailabilityMacros.h>
181
182 # if __cplusplus
183 # include <vector>
184 # include <algorithm>
185 # include <functional>
186 using namespace std;
187 # define __BEGIN_DECLS extern "C" {
188 # define __END_DECLS }
189 # else
190 # define __BEGIN_DECLS /*empty*/
191 # define __END_DECLS /*empty*/
192 # endif
193
194 # define PRIVATE_EXTERN
195 # define __attribute__(x)
196 # define inline __inline
197
198 /* Use this for functions that are intended to be breakpoint hooks.
199 If you do not, the compiler may optimize them away.
200 BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
201 # define BREAKPOINT_FUNCTION(prototype) \
202 __declspec(noinline) prototype { __asm { } }
203
204 /* stub out dtrace probes */
205 # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
206 # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
207
208 #else
209 # error unknown OS
210 #endif
211
212
213 #include <objc/objc.h>
214 #include <objc/objc-api.h>
215
216 __BEGIN_DECLS
217
218 extern void _objc_fatal(const char *fmt, ...) __attribute__((noreturn, format (printf, 1, 2)));
219
220 #define INIT_ONCE_PTR(var, create, delete) \
221 do { \
222 if (var) break; \
223 typeof(var) v = create; \
224 while (!var) { \
225 if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
226 goto done; \
227 } \
228 } \
229 delete; \
230 done:; \
231 } while (0)
232
233 #define INIT_ONCE_32(var, create, delete) \
234 do { \
235 if (var) break; \
236 typeof(var) v = create; \
237 while (!var) { \
238 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
239 goto done; \
240 } \
241 } \
242 delete; \
243 done:; \
244 } while (0)
245
246
247 // Thread keys reserved by libc for our use.
248 // Keys [0..4] are used by autozone.
249 #if defined(__PTK_FRAMEWORK_OBJC_KEY5)
250 # define SUPPORT_DIRECT_THREAD_KEYS 1
251 # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
252 # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY6)
253 # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY7)
254 # define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY8)
255 # if SUPPORT_RETURN_AUTORELEASE
256 # define AUTORELEASE_POOL_RECLAIM_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY9)
257 # endif
258 #else
259 # define SUPPORT_DIRECT_THREAD_KEYS 0
260 #endif
261
262
263 #if TARGET_OS_WIN32
264
265 // Compiler compatibility
266
267 // OS compatibility
268
269 #define strdup _strdup
270
271 #define issetugid() 0
272
273 #define MIN(x, y) ((x) < (y) ? (x) : (y))
274
275 static __inline void bcopy(const void *src, void *dst, size_t size) { memcpy(dst, src, size); }
276 static __inline void bzero(void *dst, size_t size) { memset(dst, 0, size); }
277
278 int asprintf(char **dstp, const char *format, ...);
279
280 typedef void * malloc_zone_t;
281
282 static __inline malloc_zone_t malloc_default_zone(void) { return (malloc_zone_t)-1; }
283 static __inline void *malloc_zone_malloc(malloc_zone_t z, size_t size) { return malloc(size); }
284 static __inline void *malloc_zone_calloc(malloc_zone_t z, size_t size, size_t count) { return calloc(size, count); }
285 static __inline void *malloc_zone_realloc(malloc_zone_t z, void *p, size_t size) { return realloc(p, size); }
286 static __inline void malloc_zone_free(malloc_zone_t z, void *p) { free(p); }
287 static __inline malloc_zone_t malloc_zone_from_ptr(const void *p) { return (malloc_zone_t)-1; }
288 static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ }
289
290
291 // AssertMacros
292
293 #define require_action_string(cond, dest, act, msg) do { if (!(cond)) { { act; } goto dest; } } while (0)
294 #define require_noerr_string(err, dest, msg) do { if (err) goto dest; } while (0)
295 #define require_string(cond, dest, msg) do { if (!(cond)) goto dest; } while (0)
296
297
298 // OSAtomic
299
300 static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
301 {
302 // fixme barrier is overkill
303 long original = InterlockedCompareExchange(dst, newl, oldl);
304 return (original == oldl);
305 }
306
307 static __inline BOOL OSAtomicCompareAndSwapPtrBarrier(void *oldp, void *newp, void * volatile *dst)
308 {
309 void *original = InterlockedCompareExchangePointer(dst, newp, oldp);
310 return (original == oldp);
311 }
312
313 static __inline BOOL OSAtomicCompareAndSwap32Barrier(int32_t oldl, int32_t newl, int32_t volatile *dst)
314 {
315 long original = InterlockedCompareExchange((volatile long *)dst, newl, oldl);
316 return (original == oldl);
317 }
318
319 static __inline int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst)
320 {
321 return InterlockedDecrement((volatile long *)dst);
322 }
323
324 static __inline int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst)
325 {
326 return InterlockedIncrement((volatile long *)dst);
327 }
328
329
330 // Internal data types
331
332 typedef DWORD objc_thread_t; // thread ID
333 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
334 return t1 == t2;
335 }
336 static __inline objc_thread_t thread_self(void) {
337 return GetCurrentThreadId();
338 }
339
340 typedef struct {
341 DWORD key;
342 void (*dtor)(void *);
343 } tls_key_t;
344 static __inline tls_key_t tls_create(void (*dtor)(void*)) {
345 // fixme need dtor registry for DllMain to call on thread detach
346 tls_key_t k;
347 k.key = TlsAlloc();
348 k.dtor = dtor;
349 return k;
350 }
351 static __inline void *tls_get(tls_key_t k) {
352 return TlsGetValue(k.key);
353 }
354 static __inline void tls_set(tls_key_t k, void *value) {
355 TlsSetValue(k.key, value);
356 }
357
358 typedef struct {
359 CRITICAL_SECTION *lock;
360 } mutex_t;
361 #define MUTEX_INITIALIZER {0};
362 extern void mutex_init(mutex_t *m);
363 static __inline int _mutex_lock_nodebug(mutex_t *m) {
364 // fixme error check
365 if (!m->lock) {
366 mutex_init(m);
367 }
368 EnterCriticalSection(m->lock);
369 return 0;
370 }
371 static __inline bool _mutex_try_lock_nodebug(mutex_t *m) {
372 // fixme error check
373 if (!m->lock) {
374 mutex_init(m);
375 }
376 return TryEnterCriticalSection(m->lock);
377 }
378 static __inline int _mutex_unlock_nodebug(mutex_t *m) {
379 // fixme error check
380 LeaveCriticalSection(m->lock);
381 return 0;
382 }
383
384
385 typedef mutex_t OSSpinLock;
386 #define OSSpinLockLock(l) mutex_lock(l)
387 #define OSSpinLockUnlock(l) mutex_unlock(l)
388 #define OS_SPINLOCK_INIT MUTEX_INITIALIZER
389
390
391 typedef struct {
392 HANDLE mutex;
393 } recursive_mutex_t;
394 #define RECURSIVE_MUTEX_INITIALIZER {0};
395 #define RECURSIVE_MUTEX_NOT_LOCKED 1
396 extern void recursive_mutex_init(recursive_mutex_t *m);
397 static __inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
398 assert(m->mutex);
399 return WaitForSingleObject(m->mutex, INFINITE);
400 }
401 static __inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
402 assert(m->mutex);
403 return (WAIT_OBJECT_0 == WaitForSingleObject(m->mutex, 0));
404 }
405 static __inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
406 assert(m->mutex);
407 return ReleaseMutex(m->mutex) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED;
408 }
409
410
411 /*
412 typedef HANDLE mutex_t;
413 static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
414 static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
415 static inline bool mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
416 static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
417 */
418
419 // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
420 // Vista-only CONDITION_VARIABLE would be better
421 typedef struct {
422 HANDLE mutex;
423 HANDLE waiters; // semaphore for those in cond_wait()
424 HANDLE waitersDone; // auto-reset event after everyone gets a broadcast
425 CRITICAL_SECTION waitCountLock; // guards waitCount and didBroadcast
426 unsigned int waitCount;
427 int didBroadcast;
428 } monitor_t;
429 #define MONITOR_INITIALIZER { 0 }
430 #define MONITOR_NOT_ENTERED 1
431 extern int monitor_init(monitor_t *c);
432
433 static inline int _monitor_enter_nodebug(monitor_t *c) {
434 if (!c->mutex) {
435 int err = monitor_init(c);
436 if (err) return err;
437 }
438 return WaitForSingleObject(c->mutex, INFINITE);
439 }
440 static inline int _monitor_exit_nodebug(monitor_t *c) {
441 if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED;
442 else return 0;
443 }
444 static inline int _monitor_wait_nodebug(monitor_t *c) {
445 int last;
446 EnterCriticalSection(&c->waitCountLock);
447 c->waitCount++;
448 LeaveCriticalSection(&c->waitCountLock);
449
450 SignalObjectAndWait(c->mutex, c->waiters, INFINITE, FALSE);
451
452 EnterCriticalSection(&c->waitCountLock);
453 c->waitCount--;
454 last = c->didBroadcast && c->waitCount == 0;
455 LeaveCriticalSection(&c->waitCountLock);
456
457 if (last) {
458 // tell broadcaster that all waiters have awoken
459 SignalObjectAndWait(c->waitersDone, c->mutex, INFINITE, FALSE);
460 } else {
461 WaitForSingleObject(c->mutex, INFINITE);
462 }
463
464 // fixme error checking
465 return 0;
466 }
467 static inline int monitor_notify(monitor_t *c) {
468 int haveWaiters;
469
470 EnterCriticalSection(&c->waitCountLock);
471 haveWaiters = c->waitCount > 0;
472 LeaveCriticalSection(&c->waitCountLock);
473
474 if (haveWaiters) {
475 ReleaseSemaphore(c->waiters, 1, 0);
476 }
477
478 // fixme error checking
479 return 0;
480 }
481 static inline int monitor_notifyAll(monitor_t *c) {
482 EnterCriticalSection(&c->waitCountLock);
483 if (c->waitCount == 0) {
484 LeaveCriticalSection(&c->waitCountLock);
485 return 0;
486 }
487 c->didBroadcast = 1;
488 ReleaseSemaphore(c->waiters, c->waitCount, 0);
489 LeaveCriticalSection(&c->waitCountLock);
490
491 // fairness: wait for everyone to move from waiters to mutex
492 WaitForSingleObject(c->waitersDone, INFINITE);
493 // not under waitCountLock, but still under mutex
494 c->didBroadcast = 0;
495
496 // fixme error checking
497 return 0;
498 }
499
500
501 // fixme no rwlock yet
502
503 #define rwlock_t mutex_t
504 #define rwlock_init(r) mutex_init(r)
505 #define _rwlock_read_nodebug(m) _mutex_lock_nodebug(m)
506 #define _rwlock_write_nodebug(m) _mutex_lock_nodebug(m)
507 #define _rwlock_try_read_nodebug(m) _mutex_try_lock_nodebug(m)
508 #define _rwlock_try_write_nodebug(m) _mutex_try_lock_nodebug(m)
509 #define _rwlock_unlock_read_nodebug(m) _mutex_unlock_nodebug(m)
510 #define _rwlock_unlock_write_nodebug(m) _mutex_unlock_nodebug(m)
511
512
513 typedef IMAGE_DOS_HEADER headerType;
514 // fixme YES bundle? NO bundle? sometimes?
515 #define headerIsBundle(hi) YES
516 OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase;
517 #define libobjc_header ((headerType *)&__ImageBase)
518
519 // Prototypes
520
521
522 #elif TARGET_OS_MAC
523
524
525 // OS headers
526 #include <mach-o/loader.h>
527 #ifndef __LP64__
528 # define SEGMENT_CMD LC_SEGMENT
529 #else
530 # define SEGMENT_CMD LC_SEGMENT_64
531 #endif
532
533 #ifndef VM_MEMORY_OBJC_DISPATCHERS
534 # define VM_MEMORY_OBJC_DISPATCHERS 0
535 #endif
536
537
538 // Compiler compatibility
539
540 // OS compatibility
541
542 // Internal data types
543
544 typedef pthread_t objc_thread_t;
545
546 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
547 return pthread_equal(t1, t2);
548 }
549 static __inline objc_thread_t thread_self(void) {
550 return pthread_self();
551 }
552
553
554 typedef pthread_key_t tls_key_t;
555
556 static inline tls_key_t tls_create(void (*dtor)(void*)) {
557 tls_key_t k;
558 pthread_key_create(&k, dtor);
559 return k;
560 }
561 static inline void *tls_get(tls_key_t k) {
562 return pthread_getspecific(k);
563 }
564 static inline void tls_set(tls_key_t k, void *value) {
565 pthread_setspecific(k, value);
566 }
567
568 #if SUPPORT_DIRECT_THREAD_KEYS
569
570 #if !NDEBUG
571 static bool is_valid_direct_key(tls_key_t k) {
572 return ( k == SYNC_DATA_DIRECT_KEY
573 || k == SYNC_COUNT_DIRECT_KEY
574 || k == AUTORELEASE_POOL_KEY
575 # if SUPPORT_RETURN_AUTORELEASE
576 || k == AUTORELEASE_POOL_RECLAIM_KEY
577 # endif
578 );
579 }
580 #endif
581
582 #if __arm__
583
584 // rdar://9162780 _pthread_get/setspecific_direct are inefficient
585 // copied from libdispatch
586
587 __attribute__((always_inline)) __attribute__((const))
588 static inline void**
589 tls_base(void)
590 {
591 uintptr_t p;
592 #if defined(__arm__) && defined(_ARM_ARCH_6)
593 __asm__("mrc p15, 0, %[p], c13, c0, 3" : [p] "=&r" (p));
594 return (void**)(p & ~0x3ul);
595 #else
596 #error tls_base not implemented
597 #endif
598 }
599
600 __attribute__((always_inline))
601 static inline void
602 tls_set_direct(void **tsdb, tls_key_t k, void *v)
603 {
604 assert(is_valid_direct_key(k));
605
606 tsdb[k] = v;
607 }
608 #define tls_set_direct(k, v) \
609 tls_set_direct(tls_base(), (k), (v))
610
611 __attribute__((always_inline))
612 static inline void *
613 tls_get_direct(void **tsdb, tls_key_t k)
614 {
615 assert(is_valid_direct_key(k));
616
617 return tsdb[k];
618 }
619 #define tls_get_direct(k) \
620 tls_get_direct(tls_base(), (k))
621
622 // arm
623 #else
624 // not arm
625
626 static inline void *tls_get_direct(tls_key_t k)
627 {
628 assert(is_valid_direct_key(k));
629
630 if (_pthread_has_direct_tsd()) {
631 return _pthread_getspecific_direct(k);
632 } else {
633 return pthread_getspecific(k);
634 }
635 }
636 static inline void tls_set_direct(tls_key_t k, void *value)
637 {
638 assert(is_valid_direct_key(k));
639
640 if (_pthread_has_direct_tsd()) {
641 _pthread_setspecific_direct(k, value);
642 } else {
643 pthread_setspecific(k, value);
644 }
645 }
646
647 // not arm
648 #endif
649
650 // SUPPORT_DIRECT_THREAD_KEYS
651 #endif
652
653
654 typedef pthread_mutex_t mutex_t;
655 #define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER;
656
657 extern int DebuggerMode;
658 extern void gdb_objc_debuggerModeFailure(void);
659 extern BOOL isManagedDuringDebugger(void *lock);
660 extern BOOL isLockedDuringDebugger(void *lock);
661
662 static inline int _mutex_lock_nodebug(mutex_t *m) {
663 if (DebuggerMode && isManagedDuringDebugger(m)) {
664 if (! isLockedDuringDebugger(m)) {
665 gdb_objc_debuggerModeFailure();
666 }
667 return 0;
668 }
669 return pthread_mutex_lock(m);
670 }
671 static inline bool _mutex_try_lock_nodebug(mutex_t *m) {
672 if (DebuggerMode && isManagedDuringDebugger(m)) {
673 if (! isLockedDuringDebugger(m)) {
674 gdb_objc_debuggerModeFailure();
675 }
676 return true;
677 }
678 return !pthread_mutex_trylock(m);
679 }
680 static inline int _mutex_unlock_nodebug(mutex_t *m) {
681 if (DebuggerMode && isManagedDuringDebugger(m)) {
682 return 0;
683 }
684 return pthread_mutex_unlock(m);
685 }
686
687
688 typedef struct {
689 pthread_mutex_t *mutex;
690 } recursive_mutex_t;
691 #define RECURSIVE_MUTEX_INITIALIZER {0};
692 #define RECURSIVE_MUTEX_NOT_LOCKED EPERM
693 extern void recursive_mutex_init(recursive_mutex_t *m);
694
695 static inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
696 assert(m->mutex);
697 if (DebuggerMode && isManagedDuringDebugger(m)) {
698 if (! isLockedDuringDebugger((mutex_t *)m)) {
699 gdb_objc_debuggerModeFailure();
700 }
701 return 0;
702 }
703 return pthread_mutex_lock(m->mutex);
704 }
705 static inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
706 assert(m->mutex);
707 if (DebuggerMode && isManagedDuringDebugger(m)) {
708 if (! isLockedDuringDebugger((mutex_t *)m)) {
709 gdb_objc_debuggerModeFailure();
710 }
711 return true;
712 }
713 return !pthread_mutex_trylock(m->mutex);
714 }
715 static inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
716 assert(m->mutex);
717 if (DebuggerMode && isManagedDuringDebugger(m)) {
718 return 0;
719 }
720 return pthread_mutex_unlock(m->mutex);
721 }
722
723
724 typedef struct {
725 pthread_mutex_t mutex;
726 pthread_cond_t cond;
727 } monitor_t;
728 #define MONITOR_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER }
729 #define MONITOR_NOT_ENTERED EPERM
730
731 static inline int monitor_init(monitor_t *c) {
732 int err = pthread_mutex_init(&c->mutex, NULL);
733 if (err) return err;
734 err = pthread_cond_init(&c->cond, NULL);
735 if (err) {
736 pthread_mutex_destroy(&c->mutex);
737 return err;
738 }
739 return 0;
740 }
741 static inline int _monitor_enter_nodebug(monitor_t *c) {
742 assert(!isManagedDuringDebugger(c));
743 return pthread_mutex_lock(&c->mutex);
744 }
745 static inline int _monitor_exit_nodebug(monitor_t *c) {
746 return pthread_mutex_unlock(&c->mutex);
747 }
748 static inline int _monitor_wait_nodebug(monitor_t *c) {
749 return pthread_cond_wait(&c->cond, &c->mutex);
750 }
751 static inline int monitor_notify(monitor_t *c) {
752 return pthread_cond_signal(&c->cond);
753 }
754 static inline int monitor_notifyAll(monitor_t *c) {
755 return pthread_cond_broadcast(&c->cond);
756 }
757
758
759 // semaphore_create formatted for INIT_ONCE use
760 static inline semaphore_t create_semaphore(void)
761 {
762 semaphore_t sem;
763 kern_return_t k;
764 k = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
765 if (k) _objc_fatal("semaphore_create failed (0x%x)", k);
766 return sem;
767 }
768
769
770 /* Custom read-write lock
771 - reader is atomic add/subtract
772 - writer is pthread mutex plus atomic add/subtract
773 - fairness: new readers wait if a writer wants in
774 - fairness: when writer completes, readers (probably) precede new writer
775
776 state: xxxxxxxx xxxxxxxx yyyyyyyy yyyyyyyz
777 x: blocked reader count
778 y: active reader count
779 z: readers allowed flag
780 */
781 typedef struct {
782 pthread_rwlock_t rwl;
783 } rwlock_t;
784
785 extern BOOL isReadingDuringDebugger(rwlock_t *lock);
786 extern BOOL isWritingDuringDebugger(rwlock_t *lock);
787
788 static inline void rwlock_init(rwlock_t *l)
789 {
790 int err __unused = pthread_rwlock_init(&l->rwl, NULL);
791 assert(err == 0);
792 }
793
794 static inline void _rwlock_read_nodebug(rwlock_t *l)
795 {
796 if (DebuggerMode && isManagedDuringDebugger(l)) {
797 if (! isReadingDuringDebugger(l)) {
798 gdb_objc_debuggerModeFailure();
799 }
800 return;
801 }
802 int err __unused = pthread_rwlock_rdlock(&l->rwl);
803 assert(err == 0);
804 }
805
806 static inline void _rwlock_unlock_read_nodebug(rwlock_t *l)
807 {
808 if (DebuggerMode && isManagedDuringDebugger(l)) {
809 return;
810 }
811 int err __unused = pthread_rwlock_unlock(&l->rwl);
812 assert(err == 0);
813 }
814
815
816 static inline bool _rwlock_try_read_nodebug(rwlock_t *l)
817 {
818 if (DebuggerMode && isManagedDuringDebugger(l)) {
819 if (! isReadingDuringDebugger(l)) {
820 gdb_objc_debuggerModeFailure();
821 }
822 return true;
823 }
824 int err = pthread_rwlock_tryrdlock(&l->rwl);
825 assert(err == 0 || err == EBUSY);
826 return (err == 0);
827 }
828
829
830 static inline void _rwlock_write_nodebug(rwlock_t *l)
831 {
832 if (DebuggerMode && isManagedDuringDebugger(l)) {
833 if (! isWritingDuringDebugger(l)) {
834 gdb_objc_debuggerModeFailure();
835 }
836 return;
837 }
838 int err __unused = pthread_rwlock_wrlock(&l->rwl);
839 assert(err == 0);
840 }
841
842 static inline void _rwlock_unlock_write_nodebug(rwlock_t *l)
843 {
844 if (DebuggerMode && isManagedDuringDebugger(l)) {
845 return;
846 }
847 int err __unused = pthread_rwlock_unlock(&l->rwl);
848 assert(err == 0);
849 }
850
851 static inline bool _rwlock_try_write_nodebug(rwlock_t *l)
852 {
853 if (DebuggerMode && isManagedDuringDebugger(l)) {
854 if (! isWritingDuringDebugger(l)) {
855 gdb_objc_debuggerModeFailure();
856 }
857 return true;
858 }
859 int err = pthread_rwlock_trywrlock(&l->rwl);
860 assert(err == 0 || err == EBUSY);
861 return (err == 0);
862 }
863
864
865 #ifndef __LP64__
866 typedef struct mach_header headerType;
867 typedef struct segment_command segmentType;
868 typedef struct section sectionType;
869 #else
870 typedef struct mach_header_64 headerType;
871 typedef struct segment_command_64 segmentType;
872 typedef struct section_64 sectionType;
873 #endif
874 #define headerIsBundle(hi) (hi->mhdr->filetype == MH_BUNDLE)
875 #define libobjc_header ((headerType *)&_mh_dylib_header)
876
877 // Prototypes
878
879 /* Secure /tmp usage */
880 extern int secure_open(const char *filename, int flags, uid_t euid);
881
882
883 #else
884
885
886 #error unknown OS
887
888
889 #endif
890
891 __END_DECLS
892
893 #endif