2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /***********************************************************************
26 * OS portability layer.
27 **********************************************************************/
32 #include <TargetConditionals.h>
50 # include <crt_externs.h>
51 # include <AssertMacros.h>
52 # include <Block_private.h>
53 # include <AvailabilityMacros.h>
54 # include <TargetConditionals.h>
55 # include <sys/mman.h>
56 # include <sys/time.h>
57 # include <sys/stat.h>
58 # include <sys/param.h>
59 # include <mach/mach.h>
60 # include <mach-o/dyld.h>
61 # include <mach-o/ldsyms.h>
62 # include <mach-o/loader.h>
63 # include <mach-o/getsect.h>
64 # include <mach-o/dyld_priv.h>
65 # include <malloc/malloc.h>
66 # include <libkern/OSAtomic.h>
67 # include <libkern/OSCacheControl.h>
68 # include <System/pthread_machdep.h>
69 # include "objc-probes.h" // generated dtrace probe definitions.
75 # include <ext/hash_map>
76 using namespace __gnu_cxx
;
81 # define WINVER 0x0501 // target Windows XP and later
82 # define _WIN32_WINNT 0x0501 // target Windows XP and later
83 # define WIN32_LEAN_AND_MEAN
84 // hack: windef.h typedefs BOOL as int
96 # include <AvailabilityMacros.h>
100 # include <algorithm>
103 using namespace stdext
;
104 # define __BEGIN_DECLS extern "C" {
105 # define __END_DECLS }
107 # define __BEGIN_DECLS /*empty*/
108 # define __END_DECLS /*empty*/
111 # define __private_extern__
112 # define __attribute__(x)
113 # define inline __inline
115 /* stub out dtrace probes */
116 # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
117 # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
124 #include <objc/objc.h>
125 #include <objc/objc-api.h>
129 extern void _objc_fatal(const char *fmt
, ...) __attribute__((noreturn
, format (printf
, 1, 2)));
131 #define INIT_ONCE_PTR(var, create, delete) \
134 typeof(var) v = create; \
136 if (OSAtomicCompareAndSwapPtrBarrier(0, v, (void**)&var)) { \
144 #define INIT_ONCE_32(var, create, delete) \
147 typeof(var) v = create; \
149 if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
158 // Thread keys reserved by libc for our use.
159 // Keys [0..4] are used by autozone.
160 #if defined(__PTK_FRAMEWORK_OBJC_KEY5)
161 # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
162 # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY6)
163 # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY7)
164 // define DIRECT_4_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY8)
165 // define DIRECT_5_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY9)
167 # define NO_DIRECT_THREAD_KEYS 1
173 // Compiler compatibility
177 #define strdup _strdup
179 #define issetugid() 0
181 #define MIN(x, y) ((x) < (y) ? (x) : (y))
183 static __inline
void bcopy(const void *src
, void *dst
, size_t size
) { memcpy(dst
, src
, size
); }
184 static __inline
void bzero(void *dst
, size_t size
) { memset(dst
, 0, size
); }
186 int asprintf(char **dstp
, const char *format
, ...);
188 typedef void * malloc_zone_t
;
190 static __inline malloc_zone_t
malloc_default_zone(void) { return (malloc_zone_t
)-1; }
191 static __inline
void *malloc_zone_malloc(malloc_zone_t z
, size_t size
) { return malloc(size
); }
192 static __inline
void *malloc_zone_calloc(malloc_zone_t z
, size_t size
, size_t count
) { return calloc(size
, count
); }
193 static __inline
void *malloc_zone_realloc(malloc_zone_t z
, void *p
, size_t size
) { return realloc(p
, size
); }
194 static __inline
void malloc_zone_free(malloc_zone_t z
, void *p
) { free(p
); }
195 static __inline malloc_zone_t
malloc_zone_from_ptr(const void *p
) { return (malloc_zone_t
)-1; }
196 static __inline
size_t malloc_size(const void *p
) { return _msize((void*)p
); /* fixme invalid pointer check? */ }
201 #define require_action_string(cond, dest, act, msg) do { if (!(cond)) { { act; } goto dest; } } while (0)
202 #define require_noerr_string(err, dest, msg) do { if (err) goto dest; } while (0)
203 #define require_string(cond, dest, msg) do { if (!(cond)) goto dest; } while (0)
208 static __inline BOOL
OSAtomicCompareAndSwapLong(long oldl
, long newl
, long volatile *dst
)
210 // fixme barrier is overkill
211 long original
= InterlockedCompareExchange(dst
, newl
, oldl
);
212 return (original
== oldl
);
215 static __inline BOOL
OSAtomicCompareAndSwapPtrBarrier(void *oldp
, void *newp
, void * volatile *dst
)
217 void *original
= InterlockedCompareExchangePointer(dst
, newp
, oldp
);
218 return (original
== oldp
);
221 static __inline BOOL
OSAtomicCompareAndSwap32Barrier(int32_t oldl
, int32_t newl
, int32_t volatile *dst
)
223 long original
= InterlockedCompareExchange((volatile long *)dst
, newl
, oldl
);
224 return (original
== oldl
);
227 static __inline
int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst
)
229 return InterlockedDecrement((volatile long *)dst
);
232 static __inline
int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst
)
234 return InterlockedIncrement((volatile long *)dst
);
238 // Internal data types
240 typedef DWORD objc_thread_t
; // thread ID
241 static __inline
int thread_equal(objc_thread_t t1
, objc_thread_t t2
) {
244 static __inline objc_thread_t
thread_self(void) {
245 return GetCurrentThreadId();
250 void (*dtor
)(void *);
252 static __inline
void tls_create(tls_key_t
*k
, void (*dtor
)(void*)) {
253 // fixme need dtor registry for DllMain to call on thread detach
257 static __inline
void *tls_get(tls_key_t k
) {
258 return TlsGetValue(k
.key
);
260 static __inline
void tls_set(tls_key_t k
, void *value
) {
261 TlsSetValue(k
.key
, value
);
265 CRITICAL_SECTION
*lock
;
267 #define MUTEX_INITIALIZER {0};
268 extern void mutex_init(mutex_t
*m
);
269 static __inline
int _mutex_lock_nodebug(mutex_t
*m
) {
274 EnterCriticalSection(m
->lock
);
277 static __inline
int _mutex_try_lock_nodebug(mutex_t
*m
) {
282 return TryEnterCriticalSection(m
->lock
);
284 static __inline
int _mutex_unlock_nodebug(mutex_t
*m
) {
286 LeaveCriticalSection(m
->lock
);
291 typedef mutex_t OSSpinLock
;
292 #define OSSpinLockLock(l) mutex_lock(l)
293 #define OSSpinLockUnlock(l) mutex_unlock(l)
294 #define OS_SPINLOCK_INIT MUTEX_INITIALIZER
300 #define RECURSIVE_MUTEX_INITIALIZER {0};
301 #define RECURSIVE_MUTEX_NOT_LOCKED 1
302 extern void recursive_mutex_init(recursive_mutex_t
*m
);
303 static __inline
int _recursive_mutex_lock_nodebug(recursive_mutex_t
*m
) {
305 return WaitForSingleObject(m
->mutex
, INFINITE
);
307 static __inline
int _recursive_mutex_try_lock_nodebug(recursive_mutex_t
*m
) {
309 return (WAIT_OBJECT_0
== WaitForSingleObject(m
->mutex
, 0));
311 static __inline
int _recursive_mutex_unlock_nodebug(recursive_mutex_t
*m
) {
313 return ReleaseMutex(m
->mutex
) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED
;
318 typedef HANDLE mutex_t;
319 static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
320 static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
321 static inline int mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
322 static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
325 // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
326 // Vista-only CONDITION_VARIABLE would be better
329 HANDLE waiters
; // semaphore for those in cond_wait()
330 HANDLE waitersDone
; // auto-reset event after everyone gets a broadcast
331 CRITICAL_SECTION waitCountLock
; // guards waitCount and didBroadcast
332 unsigned int waitCount
;
335 #define MONITOR_INITIALIZER { 0 }
336 #define MONITOR_NOT_ENTERED 1
337 extern int monitor_init(monitor_t
*c
);
339 static inline int _monitor_enter_nodebug(monitor_t
*c
) {
341 int err
= monitor_init(c
);
344 return WaitForSingleObject(c
->mutex
, INFINITE
);
346 static inline int _monitor_exit_nodebug(monitor_t
*c
) {
347 if (!ReleaseMutex(c
->mutex
)) return MONITOR_NOT_ENTERED
;
350 static inline int _monitor_wait_nodebug(monitor_t
*c
) {
352 EnterCriticalSection(&c
->waitCountLock
);
354 LeaveCriticalSection(&c
->waitCountLock
);
356 SignalObjectAndWait(c
->mutex
, c
->waiters
, INFINITE
, FALSE
);
358 EnterCriticalSection(&c
->waitCountLock
);
360 last
= c
->didBroadcast
&& c
->waitCount
== 0;
361 LeaveCriticalSection(&c
->waitCountLock
);
364 // tell broadcaster that all waiters have awoken
365 SignalObjectAndWait(c
->waitersDone
, c
->mutex
, INFINITE
, FALSE
);
367 WaitForSingleObject(c
->mutex
, INFINITE
);
370 // fixme error checking
373 static inline int monitor_notify(monitor_t
*c
) {
376 EnterCriticalSection(&c
->waitCountLock
);
377 haveWaiters
= c
->waitCount
> 0;
378 LeaveCriticalSection(&c
->waitCountLock
);
381 ReleaseSemaphore(c
->waiters
, 1, 0);
384 // fixme error checking
387 static inline int monitor_notifyAll(monitor_t
*c
) {
388 EnterCriticalSection(&c
->waitCountLock
);
389 if (c
->waitCount
== 0) {
390 LeaveCriticalSection(&c
->waitCountLock
);
394 ReleaseSemaphore(c
->waiters
, c
->waitCount
, 0);
395 LeaveCriticalSection(&c
->waitCountLock
);
397 // fairness: wait for everyone to move from waiters to mutex
398 WaitForSingleObject(c
->waitersDone
, INFINITE
);
399 // not under waitCountLock, but still under mutex
402 // fixme error checking
407 // fixme no rwlock yet
409 #define rwlock_t mutex_t
410 #define rwlock_init(r) mutex_init(r)
411 #define _rwlock_read_nodebug(m) _mutex_lock_nodebug(m)
412 #define _rwlock_write_nodebug(m) _mutex_lock_nodebug(m)
413 #define _rwlock_try_read_nodebug(m) _mutex_try_lock_nodebug(m)
414 #define _rwlock_try_write_nodebug(m) _mutex_try_lock_nodebug(m)
415 #define _rwlock_unlock_read_nodebug(m) _mutex_unlock_nodebug(m)
416 #define _rwlock_unlock_write_nodebug(m) _mutex_unlock_nodebug(m)
420 struct objc_module
**modules
;
422 struct old_protocol
**protocols
;
423 size_t protocolCount
;
425 size_t imageinfoBytes
;
428 struct objc_class
**clsrefs
;
432 typedef IMAGE_DOS_HEADER headerType
;
433 // fixme YES bundle? NO bundle? sometimes?
434 #define headerIsBundle(hi) YES
435 OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase
;
436 #define libobjc_header ((headerType *)&__ImageBase)
447 // Compiler compatibility
451 // Internal data types
453 typedef pthread_t objc_thread_t
;
455 static __inline
int thread_equal(objc_thread_t t1
, objc_thread_t t2
) {
456 return pthread_equal(t1
, t2
);
458 static __inline objc_thread_t
thread_self(void) {
459 return pthread_self();
463 typedef pthread_key_t tls_key_t
;
465 static inline void tls_create(tls_key_t
*k
, void (*dtor
)(void*)) {
466 pthread_key_create(k
, dtor
);
468 static inline void *tls_get(tls_key_t k
) {
469 return pthread_getspecific(k
);
471 static inline void tls_set(tls_key_t k
, void *value
) {
472 pthread_setspecific(k
, value
);
475 #ifndef NO_DIRECT_THREAD_KEYS
476 static inline void *tls_get_direct(tls_key_t k
)
478 assert(k
== SYNC_DATA_DIRECT_KEY
||
479 k
== SYNC_COUNT_DIRECT_KEY
);
481 if (_pthread_has_direct_tsd()) {
482 return _pthread_getspecific_direct(k
);
484 return pthread_getspecific(k
);
487 static inline void tls_set_direct(tls_key_t k
, void *value
)
489 assert(k
== SYNC_DATA_DIRECT_KEY
||
490 k
== SYNC_COUNT_DIRECT_KEY
);
492 if (_pthread_has_direct_tsd()) {
493 _pthread_setspecific_direct(k
, value
);
495 pthread_setspecific(k
, value
);
501 typedef pthread_mutex_t mutex_t
;
502 #define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER;
504 extern int DebuggerMode
;
505 extern void gdb_objc_debuggerModeFailure(void);
506 extern BOOL
isManagedDuringDebugger(void *lock
);
507 extern BOOL
isLockedDuringDebugger(mutex_t
*lock
);
509 static inline int _mutex_lock_nodebug(mutex_t
*m
) {
510 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
511 if (! isLockedDuringDebugger(m
)) {
512 gdb_objc_debuggerModeFailure();
516 return pthread_mutex_lock(m
);
518 static inline int _mutex_try_lock_nodebug(mutex_t
*m
) {
519 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
520 if (! isLockedDuringDebugger(m
)) {
521 gdb_objc_debuggerModeFailure();
525 return !pthread_mutex_trylock(m
);
527 static inline int _mutex_unlock_nodebug(mutex_t
*m
) {
528 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
531 return pthread_mutex_unlock(m
);
536 pthread_mutex_t
*mutex
;
538 #define RECURSIVE_MUTEX_INITIALIZER {0};
539 #define RECURSIVE_MUTEX_NOT_LOCKED EPERM
540 extern void recursive_mutex_init(recursive_mutex_t
*m
);
542 static inline int _recursive_mutex_lock_nodebug(recursive_mutex_t
*m
) {
544 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
545 if (! isLockedDuringDebugger((mutex_t
*)m
)) {
546 gdb_objc_debuggerModeFailure();
550 return pthread_mutex_lock(m
->mutex
);
552 static inline int _recursive_mutex_try_lock_nodebug(recursive_mutex_t
*m
) {
554 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
555 if (! isLockedDuringDebugger((mutex_t
*)m
)) {
556 gdb_objc_debuggerModeFailure();
560 return !pthread_mutex_trylock(m
->mutex
);
562 static inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t
*m
) {
564 if (DebuggerMode
&& isManagedDuringDebugger(m
)) {
567 return pthread_mutex_unlock(m
->mutex
);
572 pthread_mutex_t mutex
;
575 #define MONITOR_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER }
576 #define MONITOR_NOT_ENTERED EPERM
578 static inline int monitor_init(monitor_t
*c
) {
579 int err
= pthread_mutex_init(&c
->mutex
, NULL
);
581 err
= pthread_cond_init(&c
->cond
, NULL
);
583 pthread_mutex_destroy(&c
->mutex
);
588 static inline int _monitor_enter_nodebug(monitor_t
*c
) {
589 assert(!isManagedDuringDebugger(c
));
590 return pthread_mutex_lock(&c
->mutex
);
592 static inline int _monitor_exit_nodebug(monitor_t
*c
) {
593 return pthread_mutex_unlock(&c
->mutex
);
595 static inline int _monitor_wait_nodebug(monitor_t
*c
) {
596 return pthread_cond_wait(&c
->cond
, &c
->mutex
);
598 static inline int monitor_notify(monitor_t
*c
) {
599 return pthread_cond_signal(&c
->cond
);
601 static inline int monitor_notifyAll(monitor_t
*c
) {
602 return pthread_cond_broadcast(&c
->cond
);
606 // semaphore_create formatted for INIT_ONCE use
607 static inline semaphore_t
create_semaphore(void)
611 k
= semaphore_create(mach_task_self(), &sem
, SYNC_POLICY_FIFO
, 0);
612 if (k
) _objc_fatal("semaphore_create failed (0x%x)", k
);
617 /* Custom read-write lock
618 - reader is atomic add/subtract
619 - writer is pthread mutex plus atomic add/subtract
620 - fairness: new readers wait if a writer wants in
621 - fairness: when writer completes, readers (probably) precede new writer
623 state: xxxxxxxx xxxxxxxx yyyyyyyy yyyyyyyz
624 x: blocked reader count
625 y: active reader count
626 z: readers allowed flag
629 volatile int32_t state
;
630 semaphore_t readersDone
;
631 semaphore_t writerDone
;
632 pthread_mutex_t writerMutex
;
635 extern BOOL
isReadingDuringDebugger(rwlock_t
*lock
);
636 extern BOOL
isWritingDuringDebugger(rwlock_t
*lock
);
638 static inline void rwlock_init(rwlock_t
*l
)
641 l
->readersDone
= create_semaphore();
642 l
->writerDone
= create_semaphore();
643 l
->writerMutex
= (mutex_t
)MUTEX_INITIALIZER
;
646 static inline void _rwlock_read_nodebug(rwlock_t
*l
)
648 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
649 if (! isReadingDuringDebugger(l
)) {
650 gdb_objc_debuggerModeFailure();
655 // Increment "blocked readers" or "active readers" count.
656 int32_t old
= l
->state
;
658 // Readers OK. Increment active reader count.
659 if (OSAtomicCompareAndSwap32Barrier(old
, old
+ 2, &l
->state
)) {
660 // Success. Read lock acquired.
663 // CAS failed (writer or another reader). Redo from start.
667 // Readers not OK. Increment blocked reader count.
668 if (OSAtomicCompareAndSwap32(old
, old
+ 0x10000, &l
->state
)) {
669 // Success. Wait for writer to complete, then retry.
670 semaphore_wait(l
->writerDone
);
672 // CAS failed (writer or another reader). Redo from start.
678 static inline void _rwlock_unlock_read_nodebug(rwlock_t
*l
)
680 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
683 // Decrement "active readers" count.
684 int32_t newState
= OSAtomicAdd32Barrier(-2, &l
->state
);
685 if ((newState
& 0xffff) == 0) {
686 // No active readers, and readers OK flag is clear.
687 // We're the last reader out and there's a writer waiting. Wake it.
688 semaphore_signal(l
->readersDone
);
693 static inline int _rwlock_try_read_nodebug(rwlock_t
*l
)
696 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
697 if (! isReadingDuringDebugger(l
)) {
698 gdb_objc_debuggerModeFailure();
702 for (i
= 0; i
< 16; i
++) {
703 int32_t old
= l
->state
;
705 // Readers not OK. Fail.
709 if (OSAtomicCompareAndSwap32Barrier(old
, old
+ 2, &l
->state
)) {
710 // Success. Read lock acquired.
713 // CAS failed (writer or another reader). Redo from start.
714 // trylock will fail against writer,
715 // but retry a few times against reader.
720 // Too many retries. Give up.
725 static inline void _rwlock_write_nodebug(rwlock_t
*l
)
727 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
728 if (! isWritingDuringDebugger(l
)) {
729 gdb_objc_debuggerModeFailure();
734 // Only one writer allowed at a time.
735 pthread_mutex_lock(&l
->writerMutex
);
737 // Clear "readers OK" bit and "blocked readers" count.
738 int32_t newState
= OSAtomicAnd32(0x0000fffe, (uint32_t *)&l
->state
);
741 // No "active readers". Success.
744 // Wait for "active readers" to complete.
745 semaphore_wait(l
->readersDone
);
749 static inline void _rwlock_unlock_write_nodebug(rwlock_t
*l
)
751 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
755 // Reinstate "readers OK" bit and clear reader counts.
759 } while (!OSAtomicCompareAndSwap32Barrier(oldState
, 0x1, &l
->state
));
761 // Unblock any "blocked readers" that arrived while we held the lock
762 oldState
= oldState
>> 16;
764 semaphore_signal(l
->writerDone
);
767 // Allow a new writer.
768 pthread_mutex_unlock(&l
->writerMutex
);
771 static inline int _rwlock_try_write_nodebug(rwlock_t
*l
)
773 if (DebuggerMode
&& isManagedDuringDebugger(l
)) {
774 if (! isWritingDuringDebugger(l
)) {
775 gdb_objc_debuggerModeFailure();
780 if (pthread_mutex_trylock(&l
->writerMutex
)) {
781 // Some other writer is in the way - fail
785 // Similar to _rwlock_write_nodebug, but less intrusive with readers active
787 int32_t oldState
, newState
;
789 newState
= oldState
& 0x0000fffe;
791 // Readers active. Give up.
792 pthread_mutex_unlock(&l
->writerMutex
);
795 if (!OSAtomicCompareAndSwap32Barrier(oldState
, newState
, &l
->state
)) {
796 // CAS failed (reader interupted). Give up.
797 pthread_mutex_unlock(&l
->writerMutex
);
806 typedef struct mach_header headerType
;
807 typedef struct segment_command segmentType
;
808 typedef struct section sectionType
;
810 typedef struct mach_header_64 headerType
;
811 typedef struct segment_command_64 segmentType
;
812 typedef struct section_64 sectionType
;
814 #define headerIsBundle(hi) (hi->mhdr->filetype == MH_BUNDLE)
815 #define libobjc_header ((headerType *)&_mh_dylib_header)
819 const segmentType
* objcSegmentHeader
;
820 const segmentType
* dataSegmentHeader
;
821 ptrdiff_t image_slide
;
823 struct old_protocol
**proto_refs
;
829 /* Secure /tmp usage */
830 extern int secure_open(const char *filename
, int flags
, uid_t euid
);