]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-os.h
objc4-493.11.tar.gz
[apple/objc4.git] / runtime / objc-os.h
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-os.h
26 * OS portability layer.
27 **********************************************************************/
28
29 #ifndef _OBJC_OS_H
30 #define _OBJC_OS_H
31
32 #include <TargetConditionals.h>
33
34 #if TARGET_OS_MAC
35
36 # ifndef __STDC_LIMIT_MACROS
37 # define __STDC_LIMIT_MACROS
38 # endif
39
40 # include <stdio.h>
41 # include <stdlib.h>
42 # include <stdint.h>
43 # include <stdarg.h>
44 # include <string.h>
45 # include <ctype.h>
46 # include <errno.h>
47 # include <dlfcn.h>
48 # include <fcntl.h>
49 # include <assert.h>
50 # include <limits.h>
51 # include <syslog.h>
52 # include <unistd.h>
53 # include <pthread.h>
54 # include <crt_externs.h>
55 # include <AssertMacros.h>
56 # undef check
57 # include <AvailabilityMacros.h>
58 # include <TargetConditionals.h>
59 # include <sys/mman.h>
60 # include <sys/time.h>
61 # include <sys/stat.h>
62 # include <sys/param.h>
63 # include <mach/mach.h>
64 # include <mach-o/dyld.h>
65 # include <mach-o/ldsyms.h>
66 # include <mach-o/loader.h>
67 # include <mach-o/getsect.h>
68 # include <mach-o/dyld_priv.h>
69 # include <malloc/malloc.h>
70 # include <libkern/OSAtomic.h>
71 # include <libkern/OSCacheControl.h>
72 # include <System/pthread_machdep.h>
73 # include "objc-probes.h" // generated dtrace probe definitions.
74
75 #define ARR_SPINLOCK_INIT 0
76 // XXX -- Careful: OSSpinLock isn't volatile, but should be
77 typedef volatile int ARRSpinLock;
78 __attribute__((always_inline))
79 static inline void ARRSpinLockLock(ARRSpinLock *l)
80 {
81 unsigned y;
82 again:
83 if (__builtin_expect(__sync_lock_test_and_set(l, 1), 0) == 0) {
84 return;
85 }
86 for (y = 1000; y; y--) {
87 #if defined(__i386__) || defined(__x86_64__)
88 asm("pause");
89 #endif
90 if (*l == 0) goto again;
91 }
92 thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
93 goto again;
94 }
95 __attribute__((always_inline))
96 static inline void ARRSpinLockUnlock(ARRSpinLock *l)
97 {
98 __sync_lock_release(l);
99 }
100
101 #define OSSpinLock ARRSpinLock
102 #define OSSpinLockTry(l) __sync_bool_compare_and_swap(l, 0, 1)
103 #define OSSpinLockLock(l) ARRSpinLockLock(l)
104 #define OSSpinLockUnlock(l) ARRSpinLockUnlock(l)
105 #undef OS_SPINLOCK_INIT
106 #define OS_SPINLOCK_INIT ARR_SPINLOCK_INIT
107
108 #if !TARGET_OS_IPHONE
109 # include <CrashReporterClient.h>
110 #else
111 // CrashReporterClient not yet available on iOS
112 __BEGIN_DECLS
113 extern const char *CRSetCrashLogMessage(const char *msg);
114 extern const char *CRGetCrashLogMessage(void);
115 extern const char *CRSetCrashLogMessage2(const char *msg);
116 __END_DECLS
117 #endif
118
119 #if TARGET_IPHONE_SIMULATOR
120 // getsectiondata() and getsegmentdata() are unavailable
121 __BEGIN_DECLS
122 # define getsectiondata(m, s, n, c) objc_getsectiondata(m, s, n, c)
123 # define getsegmentdata(m, s, c) objc_getsegmentdata(m, s, c)
124 extern uint8_t *objc_getsectiondata(const struct mach_header *mh, const char *segname, const char *sectname, unsigned long *outSize);
125 extern uint8_t * objc_getsegmentdata(const struct mach_header *mh, const char *segname, unsigned long *outSize);
126 __END_DECLS
127 #endif
128
129 # if __cplusplus
130 # include <vector>
131 # include <algorithm>
132 using namespace std;
133 # include <ext/hash_map>
134 using namespace __gnu_cxx;
135 # endif
136
137 # define PRIVATE_EXTERN __attribute__((visibility("hidden")))
138 # undef __private_extern__
139 # define __private_extern__ use_PRIVATE_EXTERN_instead
140 # undef private_extern
141 # define private_extern use_PRIVATE_EXTERN_instead
142
143 /* Use this for functions that are intended to be breakpoint hooks.
144 If you do not, the compiler may optimize them away.
145 BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
146 # define BREAKPOINT_FUNCTION(prototype) \
147 __attribute__((noinline, visibility("hidden"))) \
148 prototype { asm(""); }
149
150 #elif TARGET_OS_WIN32
151
152 # define WINVER 0x0501 // target Windows XP and later
153 # define _WIN32_WINNT 0x0501 // target Windows XP and later
154 # define WIN32_LEAN_AND_MEAN
155 // hack: windef.h typedefs BOOL as int
156 # define BOOL WINBOOL
157 # include <windows.h>
158 # undef BOOL
159
160 # include <stdio.h>
161 # include <stdlib.h>
162 # include <stdint.h>
163 # include <stdarg.h>
164 # include <string.h>
165 # include <assert.h>
166 # include <malloc.h>
167 # include <AvailabilityMacros.h>
168
169 # if __cplusplus
170 # include <vector>
171 # include <algorithm>
172 using namespace std;
173 # include <hash_map>
174 using namespace stdext;
175 # define __BEGIN_DECLS extern "C" {
176 # define __END_DECLS }
177 # else
178 # define __BEGIN_DECLS /*empty*/
179 # define __END_DECLS /*empty*/
180 # endif
181
182 # define PRIVATE_EXTERN
183 # define __attribute__(x)
184 # define inline __inline
185
186 /* Use this for functions that are intended to be breakpoint hooks.
187 If you do not, the compiler may optimize them away.
188 BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
189 # define BREAKPOINT_FUNCTION(prototype) \
190 __declspec(noinline) prototype { __asm { } }
191
192 /* stub out dtrace probes */
193 # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
194 # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
195
196 #else
197 # error unknown OS
198 #endif
199
200
201 #include <objc/objc.h>
202 #include <objc/objc-api.h>
203
204 __BEGIN_DECLS
205
206 extern void _objc_fatal(const char *fmt, ...) __attribute__((noreturn, format (printf, 1, 2)));
207
208 #define INIT_ONCE_PTR(var, create, delete) \
209 do { \
210 if (var) break; \
211 typeof(var) v = create; \
212 while (!var) { \
213 if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
214 goto done; \
215 } \
216 } \
217 delete; \
218 done:; \
219 } while (0)
220
221 #define INIT_ONCE_32(var, create, delete) \
222 do { \
223 if (var) break; \
224 typeof(var) v = create; \
225 while (!var) { \
226 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
227 goto done; \
228 } \
229 } \
230 delete; \
231 done:; \
232 } while (0)
233
234
235 // Thread keys reserved by libc for our use.
236 // Keys [0..4] are used by autozone.
237 #if defined(__PTK_FRAMEWORK_OBJC_KEY5)
238 # define SUPPORT_DIRECT_THREAD_KEYS 1
239 # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
240 # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY6)
241 # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY7)
242 # define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY8)
243 # if SUPPORT_RETURN_AUTORELEASE
244 # define AUTORELEASE_POOL_RECLAIM_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY9)
245 # endif
246 #else
247 # define SUPPORT_DIRECT_THREAD_KEYS 0
248 #endif
249
250
251 #if TARGET_OS_WIN32
252
253 // Compiler compatibility
254
255 // OS compatibility
256
257 #define strdup _strdup
258
259 #define issetugid() 0
260
261 #define MIN(x, y) ((x) < (y) ? (x) : (y))
262
263 static __inline void bcopy(const void *src, void *dst, size_t size) { memcpy(dst, src, size); }
264 static __inline void bzero(void *dst, size_t size) { memset(dst, 0, size); }
265
266 int asprintf(char **dstp, const char *format, ...);
267
268 typedef void * malloc_zone_t;
269
270 static __inline malloc_zone_t malloc_default_zone(void) { return (malloc_zone_t)-1; }
271 static __inline void *malloc_zone_malloc(malloc_zone_t z, size_t size) { return malloc(size); }
272 static __inline void *malloc_zone_calloc(malloc_zone_t z, size_t size, size_t count) { return calloc(size, count); }
273 static __inline void *malloc_zone_realloc(malloc_zone_t z, void *p, size_t size) { return realloc(p, size); }
274 static __inline void malloc_zone_free(malloc_zone_t z, void *p) { free(p); }
275 static __inline malloc_zone_t malloc_zone_from_ptr(const void *p) { return (malloc_zone_t)-1; }
276 static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ }
277
278
279 // AssertMacros
280
281 #define require_action_string(cond, dest, act, msg) do { if (!(cond)) { { act; } goto dest; } } while (0)
282 #define require_noerr_string(err, dest, msg) do { if (err) goto dest; } while (0)
283 #define require_string(cond, dest, msg) do { if (!(cond)) goto dest; } while (0)
284
285
286 // OSAtomic
287
288 static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
289 {
290 // fixme barrier is overkill
291 long original = InterlockedCompareExchange(dst, newl, oldl);
292 return (original == oldl);
293 }
294
295 static __inline BOOL OSAtomicCompareAndSwapPtrBarrier(void *oldp, void *newp, void * volatile *dst)
296 {
297 void *original = InterlockedCompareExchangePointer(dst, newp, oldp);
298 return (original == oldp);
299 }
300
301 static __inline BOOL OSAtomicCompareAndSwap32Barrier(int32_t oldl, int32_t newl, int32_t volatile *dst)
302 {
303 long original = InterlockedCompareExchange((volatile long *)dst, newl, oldl);
304 return (original == oldl);
305 }
306
307 static __inline int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst)
308 {
309 return InterlockedDecrement((volatile long *)dst);
310 }
311
312 static __inline int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst)
313 {
314 return InterlockedIncrement((volatile long *)dst);
315 }
316
317
318 // Internal data types
319
320 typedef DWORD objc_thread_t; // thread ID
321 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
322 return t1 == t2;
323 }
324 static __inline objc_thread_t thread_self(void) {
325 return GetCurrentThreadId();
326 }
327
328 typedef struct {
329 DWORD key;
330 void (*dtor)(void *);
331 } tls_key_t;
332 static __inline tls_key_t tls_create(void (*dtor)(void*)) {
333 // fixme need dtor registry for DllMain to call on thread detach
334 tls_key_t k;
335 k.key = TlsAlloc();
336 k.dtor = dtor;
337 return k;
338 }
339 static __inline void *tls_get(tls_key_t k) {
340 return TlsGetValue(k.key);
341 }
342 static __inline void tls_set(tls_key_t k, void *value) {
343 TlsSetValue(k.key, value);
344 }
345
346 typedef struct {
347 CRITICAL_SECTION *lock;
348 } mutex_t;
349 #define MUTEX_INITIALIZER {0};
350 extern void mutex_init(mutex_t *m);
351 static __inline int _mutex_lock_nodebug(mutex_t *m) {
352 // fixme error check
353 if (!m->lock) {
354 mutex_init(m);
355 }
356 EnterCriticalSection(m->lock);
357 return 0;
358 }
359 static __inline int _mutex_try_lock_nodebug(mutex_t *m) {
360 // fixme error check
361 if (!m->lock) {
362 mutex_init(m);
363 }
364 return TryEnterCriticalSection(m->lock);
365 }
366 static __inline int _mutex_unlock_nodebug(mutex_t *m) {
367 // fixme error check
368 LeaveCriticalSection(m->lock);
369 return 0;
370 }
371
372
373 typedef mutex_t OSSpinLock;
374 #define OSSpinLockLock(l) mutex_lock(l)
375 #define OSSpinLockUnlock(l) mutex_unlock(l)
376 #define OS_SPINLOCK_INIT MUTEX_INITIALIZER
377
378
379 typedef struct {
380 HANDLE mutex;
381 } recursive_mutex_t;
382 #define RECURSIVE_MUTEX_INITIALIZER {0};
383 #define RECURSIVE_MUTEX_NOT_LOCKED 1
384 extern void recursive_mutex_init(recursive_mutex_t *m);
385 static __inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
386 assert(m->mutex);
387 return WaitForSingleObject(m->mutex, INFINITE);
388 }
389 static __inline int _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
390 assert(m->mutex);
391 return (WAIT_OBJECT_0 == WaitForSingleObject(m->mutex, 0));
392 }
393 static __inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
394 assert(m->mutex);
395 return ReleaseMutex(m->mutex) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED;
396 }
397
398
399 /*
400 typedef HANDLE mutex_t;
401 static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
402 static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
403 static inline int mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
404 static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
405 */
406
407 // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
408 // Vista-only CONDITION_VARIABLE would be better
409 typedef struct {
410 HANDLE mutex;
411 HANDLE waiters; // semaphore for those in cond_wait()
412 HANDLE waitersDone; // auto-reset event after everyone gets a broadcast
413 CRITICAL_SECTION waitCountLock; // guards waitCount and didBroadcast
414 unsigned int waitCount;
415 int didBroadcast;
416 } monitor_t;
417 #define MONITOR_INITIALIZER { 0 }
418 #define MONITOR_NOT_ENTERED 1
419 extern int monitor_init(monitor_t *c);
420
421 static inline int _monitor_enter_nodebug(monitor_t *c) {
422 if (!c->mutex) {
423 int err = monitor_init(c);
424 if (err) return err;
425 }
426 return WaitForSingleObject(c->mutex, INFINITE);
427 }
428 static inline int _monitor_exit_nodebug(monitor_t *c) {
429 if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED;
430 else return 0;
431 }
432 static inline int _monitor_wait_nodebug(monitor_t *c) {
433 int last;
434 EnterCriticalSection(&c->waitCountLock);
435 c->waitCount++;
436 LeaveCriticalSection(&c->waitCountLock);
437
438 SignalObjectAndWait(c->mutex, c->waiters, INFINITE, FALSE);
439
440 EnterCriticalSection(&c->waitCountLock);
441 c->waitCount--;
442 last = c->didBroadcast && c->waitCount == 0;
443 LeaveCriticalSection(&c->waitCountLock);
444
445 if (last) {
446 // tell broadcaster that all waiters have awoken
447 SignalObjectAndWait(c->waitersDone, c->mutex, INFINITE, FALSE);
448 } else {
449 WaitForSingleObject(c->mutex, INFINITE);
450 }
451
452 // fixme error checking
453 return 0;
454 }
455 static inline int monitor_notify(monitor_t *c) {
456 int haveWaiters;
457
458 EnterCriticalSection(&c->waitCountLock);
459 haveWaiters = c->waitCount > 0;
460 LeaveCriticalSection(&c->waitCountLock);
461
462 if (haveWaiters) {
463 ReleaseSemaphore(c->waiters, 1, 0);
464 }
465
466 // fixme error checking
467 return 0;
468 }
469 static inline int monitor_notifyAll(monitor_t *c) {
470 EnterCriticalSection(&c->waitCountLock);
471 if (c->waitCount == 0) {
472 LeaveCriticalSection(&c->waitCountLock);
473 return 0;
474 }
475 c->didBroadcast = 1;
476 ReleaseSemaphore(c->waiters, c->waitCount, 0);
477 LeaveCriticalSection(&c->waitCountLock);
478
479 // fairness: wait for everyone to move from waiters to mutex
480 WaitForSingleObject(c->waitersDone, INFINITE);
481 // not under waitCountLock, but still under mutex
482 c->didBroadcast = 0;
483
484 // fixme error checking
485 return 0;
486 }
487
488
489 // fixme no rwlock yet
490
491 #define rwlock_t mutex_t
492 #define rwlock_init(r) mutex_init(r)
493 #define _rwlock_read_nodebug(m) _mutex_lock_nodebug(m)
494 #define _rwlock_write_nodebug(m) _mutex_lock_nodebug(m)
495 #define _rwlock_try_read_nodebug(m) _mutex_try_lock_nodebug(m)
496 #define _rwlock_try_write_nodebug(m) _mutex_try_lock_nodebug(m)
497 #define _rwlock_unlock_read_nodebug(m) _mutex_unlock_nodebug(m)
498 #define _rwlock_unlock_write_nodebug(m) _mutex_unlock_nodebug(m)
499
500
501 typedef struct {
502 struct objc_module **modules;
503 size_t moduleCount;
504 struct old_protocol **protocols;
505 size_t protocolCount;
506 void *imageinfo;
507 size_t imageinfoBytes;
508 SEL *selrefs;
509 size_t selrefCount;
510 struct objc_class **clsrefs;
511 size_t clsrefCount;
512 TCHAR *moduleName;
513 } os_header_info;
514
515 typedef IMAGE_DOS_HEADER headerType;
516 // fixme YES bundle? NO bundle? sometimes?
517 #define headerIsBundle(hi) YES
518 OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase;
519 #define libobjc_header ((headerType *)&__ImageBase)
520
521 // Prototypes
522
523
524 #elif TARGET_OS_MAC
525
526
527 // OS headers
528 #include <mach-o/loader.h>
529 #ifndef __LP64__
530 # define SEGMENT_CMD LC_SEGMENT
531 #else
532 # define SEGMENT_CMD LC_SEGMENT_64
533 #endif
534
535 #ifndef VM_MEMORY_OBJC_DISPATCHERS
536 # define VM_MEMORY_OBJC_DISPATCHERS 0
537 #endif
538
539
540 // Compiler compatibility
541
542 // OS compatibility
543
544 // Internal data types
545
546 typedef pthread_t objc_thread_t;
547
548 static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
549 return pthread_equal(t1, t2);
550 }
551 static __inline objc_thread_t thread_self(void) {
552 return pthread_self();
553 }
554
555
556 typedef pthread_key_t tls_key_t;
557
558 static inline tls_key_t tls_create(void (*dtor)(void*)) {
559 tls_key_t k;
560 pthread_key_create(&k, dtor);
561 return k;
562 }
563 static inline void *tls_get(tls_key_t k) {
564 return pthread_getspecific(k);
565 }
566 static inline void tls_set(tls_key_t k, void *value) {
567 pthread_setspecific(k, value);
568 }
569
570 #if SUPPORT_DIRECT_THREAD_KEYS
571 static inline void *tls_get_direct(tls_key_t k)
572 {
573 assert(k == SYNC_DATA_DIRECT_KEY ||
574 k == SYNC_COUNT_DIRECT_KEY);
575
576 if (_pthread_has_direct_tsd()) {
577 return _pthread_getspecific_direct(k);
578 } else {
579 return pthread_getspecific(k);
580 }
581 }
582 static inline void tls_set_direct(tls_key_t k, void *value)
583 {
584 assert(k == SYNC_DATA_DIRECT_KEY ||
585 k == SYNC_COUNT_DIRECT_KEY);
586
587 if (_pthread_has_direct_tsd()) {
588 _pthread_setspecific_direct(k, value);
589 } else {
590 pthread_setspecific(k, value);
591 }
592 }
593 #endif
594
595
596 typedef pthread_mutex_t mutex_t;
597 #define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER;
598
599 extern int DebuggerMode;
600 extern void gdb_objc_debuggerModeFailure(void);
601 extern BOOL isManagedDuringDebugger(void *lock);
602 extern BOOL isLockedDuringDebugger(void *lock);
603
604 static inline int _mutex_lock_nodebug(mutex_t *m) {
605 if (DebuggerMode && isManagedDuringDebugger(m)) {
606 if (! isLockedDuringDebugger(m)) {
607 gdb_objc_debuggerModeFailure();
608 }
609 return 0;
610 }
611 return pthread_mutex_lock(m);
612 }
613 static inline int _mutex_try_lock_nodebug(mutex_t *m) {
614 if (DebuggerMode && isManagedDuringDebugger(m)) {
615 if (! isLockedDuringDebugger(m)) {
616 gdb_objc_debuggerModeFailure();
617 }
618 return 1;
619 }
620 return !pthread_mutex_trylock(m);
621 }
622 static inline int _mutex_unlock_nodebug(mutex_t *m) {
623 if (DebuggerMode && isManagedDuringDebugger(m)) {
624 return 0;
625 }
626 return pthread_mutex_unlock(m);
627 }
628
629
630 typedef struct {
631 pthread_mutex_t *mutex;
632 } recursive_mutex_t;
633 #define RECURSIVE_MUTEX_INITIALIZER {0};
634 #define RECURSIVE_MUTEX_NOT_LOCKED EPERM
635 extern void recursive_mutex_init(recursive_mutex_t *m);
636
637 static inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
638 assert(m->mutex);
639 if (DebuggerMode && isManagedDuringDebugger(m)) {
640 if (! isLockedDuringDebugger((mutex_t *)m)) {
641 gdb_objc_debuggerModeFailure();
642 }
643 return 0;
644 }
645 return pthread_mutex_lock(m->mutex);
646 }
647 static inline int _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
648 assert(m->mutex);
649 if (DebuggerMode && isManagedDuringDebugger(m)) {
650 if (! isLockedDuringDebugger((mutex_t *)m)) {
651 gdb_objc_debuggerModeFailure();
652 }
653 return 1;
654 }
655 return !pthread_mutex_trylock(m->mutex);
656 }
657 static inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
658 assert(m->mutex);
659 if (DebuggerMode && isManagedDuringDebugger(m)) {
660 return 0;
661 }
662 return pthread_mutex_unlock(m->mutex);
663 }
664
665
666 typedef struct {
667 pthread_mutex_t mutex;
668 pthread_cond_t cond;
669 } monitor_t;
670 #define MONITOR_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER }
671 #define MONITOR_NOT_ENTERED EPERM
672
673 static inline int monitor_init(monitor_t *c) {
674 int err = pthread_mutex_init(&c->mutex, NULL);
675 if (err) return err;
676 err = pthread_cond_init(&c->cond, NULL);
677 if (err) {
678 pthread_mutex_destroy(&c->mutex);
679 return err;
680 }
681 return 0;
682 }
683 static inline int _monitor_enter_nodebug(monitor_t *c) {
684 assert(!isManagedDuringDebugger(c));
685 return pthread_mutex_lock(&c->mutex);
686 }
687 static inline int _monitor_exit_nodebug(monitor_t *c) {
688 return pthread_mutex_unlock(&c->mutex);
689 }
690 static inline int _monitor_wait_nodebug(monitor_t *c) {
691 return pthread_cond_wait(&c->cond, &c->mutex);
692 }
693 static inline int monitor_notify(monitor_t *c) {
694 return pthread_cond_signal(&c->cond);
695 }
696 static inline int monitor_notifyAll(monitor_t *c) {
697 return pthread_cond_broadcast(&c->cond);
698 }
699
700
701 // semaphore_create formatted for INIT_ONCE use
702 static inline semaphore_t create_semaphore(void)
703 {
704 semaphore_t sem;
705 kern_return_t k;
706 k = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
707 if (k) _objc_fatal("semaphore_create failed (0x%x)", k);
708 return sem;
709 }
710
711
712 /* Custom read-write lock
713 - reader is atomic add/subtract
714 - writer is pthread mutex plus atomic add/subtract
715 - fairness: new readers wait if a writer wants in
716 - fairness: when writer completes, readers (probably) precede new writer
717
718 state: xxxxxxxx xxxxxxxx yyyyyyyy yyyyyyyz
719 x: blocked reader count
720 y: active reader count
721 z: readers allowed flag
722 */
723 typedef struct {
724 volatile int32_t state;
725 semaphore_t readersDone;
726 semaphore_t writerDone;
727 pthread_mutex_t writerMutex;
728 } rwlock_t;
729
730 extern BOOL isReadingDuringDebugger(rwlock_t *lock);
731 extern BOOL isWritingDuringDebugger(rwlock_t *lock);
732
733 static inline void rwlock_init(rwlock_t *l)
734 {
735 l->state = 1;
736 l->readersDone = create_semaphore();
737 l->writerDone = create_semaphore();
738 l->writerMutex = (mutex_t)MUTEX_INITIALIZER;
739 }
740
741 static inline void _rwlock_read_nodebug(rwlock_t *l)
742 {
743 if (DebuggerMode && isManagedDuringDebugger(l)) {
744 if (! isReadingDuringDebugger(l)) {
745 gdb_objc_debuggerModeFailure();
746 }
747 return;
748 }
749 while (1) {
750 // Increment "blocked readers" or "active readers" count.
751 int32_t old = l->state;
752 if (old % 2 == 1) {
753 // Readers OK. Increment active reader count.
754 if (OSAtomicCompareAndSwap32Barrier(old, old + 2, &l->state)) {
755 // Success. Read lock acquired.
756 return;
757 } else {
758 // CAS failed (writer or another reader). Redo from start.
759 }
760 }
761 else {
762 // Readers not OK. Increment blocked reader count.
763 if (OSAtomicCompareAndSwap32(old, old + 0x10000, &l->state)) {
764 // Success. Wait for writer to complete, then retry.
765 semaphore_wait(l->writerDone);
766 } else {
767 // CAS failed (writer or another reader). Redo from start.
768 }
769 }
770 }
771 }
772
773 static inline void _rwlock_unlock_read_nodebug(rwlock_t *l)
774 {
775 if (DebuggerMode && isManagedDuringDebugger(l)) {
776 return;
777 }
778 // Decrement "active readers" count.
779 int32_t newState = OSAtomicAdd32Barrier(-2, &l->state);
780 if ((newState & 0xffff) == 0) {
781 // No active readers, and readers OK flag is clear.
782 // We're the last reader out and there's a writer waiting. Wake it.
783 semaphore_signal(l->readersDone);
784 }
785 }
786
787
788 static inline int _rwlock_try_read_nodebug(rwlock_t *l)
789 {
790 int i;
791 if (DebuggerMode && isManagedDuringDebugger(l)) {
792 if (! isReadingDuringDebugger(l)) {
793 gdb_objc_debuggerModeFailure();
794 }
795 return 1;
796 }
797 for (i = 0; i < 16; i++) {
798 int32_t old = l->state;
799 if (old % 2 != 1) {
800 // Readers not OK. Fail.
801 return 0;
802 } else {
803 // Readers OK.
804 if (OSAtomicCompareAndSwap32Barrier(old, old + 2, &l->state)) {
805 // Success. Read lock acquired.
806 return 1;
807 } else {
808 // CAS failed (writer or another reader). Redo from start.
809 // trylock will fail against writer,
810 // but retry a few times against reader.
811 }
812 }
813 }
814
815 // Too many retries. Give up.
816 return 0;
817 }
818
819
820 static inline void _rwlock_write_nodebug(rwlock_t *l)
821 {
822 if (DebuggerMode && isManagedDuringDebugger(l)) {
823 if (! isWritingDuringDebugger(l)) {
824 gdb_objc_debuggerModeFailure();
825 }
826 return;
827 }
828
829 // Only one writer allowed at a time.
830 pthread_mutex_lock(&l->writerMutex);
831
832 // Clear "readers OK" bit and "blocked readers" count.
833 int32_t newState = OSAtomicAnd32(0x0000fffe, (uint32_t *)&l->state);
834
835 if (newState == 0) {
836 // No "active readers". Success.
837 OSMemoryBarrier();
838 } else {
839 // Wait for "active readers" to complete.
840 semaphore_wait(l->readersDone);
841 }
842 }
843
844 static inline void _rwlock_unlock_write_nodebug(rwlock_t *l)
845 {
846 if (DebuggerMode && isManagedDuringDebugger(l)) {
847 return;
848 }
849
850 // Reinstate "readers OK" bit and clear reader counts.
851 int32_t oldState;
852 do {
853 oldState = l->state;
854 } while (!OSAtomicCompareAndSwap32Barrier(oldState, 0x1, &l->state));
855
856 // Unblock any "blocked readers" that arrived while we held the lock
857 oldState = oldState >> 16;
858 while (oldState--) {
859 semaphore_signal(l->writerDone);
860 }
861
862 // Allow a new writer.
863 pthread_mutex_unlock(&l->writerMutex);
864 }
865
866 static inline int _rwlock_try_write_nodebug(rwlock_t *l)
867 {
868 if (DebuggerMode && isManagedDuringDebugger(l)) {
869 if (! isWritingDuringDebugger(l)) {
870 gdb_objc_debuggerModeFailure();
871 }
872 return 1;
873 }
874
875 if (pthread_mutex_trylock(&l->writerMutex)) {
876 // Some other writer is in the way - fail
877 return 0;
878 }
879
880 // Similar to _rwlock_write_nodebug, but less intrusive with readers active
881
882 int32_t oldState, newState;
883 oldState = l->state;
884 newState = oldState & 0x0000fffe;
885 if (newState != 0) {
886 // Readers active. Give up.
887 pthread_mutex_unlock(&l->writerMutex);
888 return 0;
889 }
890 if (!OSAtomicCompareAndSwap32Barrier(oldState, newState, &l->state)) {
891 // CAS failed (reader interupted). Give up.
892 pthread_mutex_unlock(&l->writerMutex);
893 return 0;
894 }
895
896 return 1;
897 }
898
899
900 #ifndef __LP64__
901 typedef struct mach_header headerType;
902 typedef struct segment_command segmentType;
903 typedef struct section sectionType;
904 #else
905 typedef struct mach_header_64 headerType;
906 typedef struct segment_command_64 segmentType;
907 typedef struct section_64 sectionType;
908 #endif
909 #define headerIsBundle(hi) (hi->mhdr->filetype == MH_BUNDLE)
910 #define libobjc_header ((headerType *)&_mh_dylib_header)
911
912 typedef struct {
913 Dl_info dl_info;
914 #if !__OBJC2__
915 struct old_protocol **proto_refs;
916 #endif
917 } os_header_info;
918
919 // Prototypes
920
921 /* Secure /tmp usage */
922 extern int secure_open(const char *filename, int flags, uid_t euid);
923
924
925 #else
926
927
928 #error unknown OS
929
930
931 #endif
932
933 __END_DECLS
934
935 #endif