-#endif /* __APPLE_API_PRIVATE */
-
-#if !defined(MACH_KERNEL_PRIVATE)
-
-typedef struct __mutex__ mutex_t;
-
-#endif /* MACH_KERNEL_PRIVATE */
-
-extern mutex_t *mutex_alloc(
- etap_event_t tag);
-
-extern void mutex_free(
- mutex_t *mutex);
-
-extern void mutex_lock(
- mutex_t *mutex);
-
-extern void mutex_unlock(
- mutex_t *mutex);
-
-extern boolean_t mutex_try(
- mutex_t *mutex);
-
-#ifdef __APPLE_API_PRIVATE
-
-#ifdef MACH_KERNEL_PRIVATE
-
-/*
- * The general lock structure. Provides for multiple readers,
- * upgrading from read to write, and sleeping until the lock
- * can be gained.
- *
- * On some architectures, assembly language code in the 'inline'
- * program fiddles the lock structures. It must be changed in
- * concert with the structure layout.
- *
- * Only the "interlock" field is used for hardware exclusion;
- * other fields are modified with normal instructions after
- * acquiring the interlock bit.
- */
-
-typedef struct {
- decl_simple_lock_data(,interlock) /* "hardware" interlock field */
- volatile unsigned int
- read_count:16, /* No. of accepted readers */
- want_upgrade:1, /* Read-to-write upgrade waiting */
- want_write:1, /* Writer is waiting, or
- locked for write */
- waiting:1, /* Someone is sleeping on lock */
- can_sleep:1; /* Can attempts to lock go to sleep? */
-#if ETAP_LOCK_TRACE
- union { /* Must be overlaid on the event_tablep */
- struct event_table_chain event_table_chain;
- struct {
- event_table_t event_tablep; /* ptr to event table entry */
- start_data_node_t start_list; /* linked list of start times
- and pcs */
- } s;
- } u;
-#endif /* ETAP_LOCK_TRACE */
-#if ETAP_LOCK_ACCUMULATE
- cbuff_entry_t cbuff_write; /* write cumulative buffer entry */
- cbuff_entry_t cbuff_read; /* read cumulative buffer entry */
-#endif /* ETAP_LOCK_ACCUMULATE */
-} lock_t;
-
-/* Sleep locks must work even if no multiprocessing */
-
-/*
- * Complex lock operations
- */
-
-#if ETAP
-/*
- * Locks have a pointer into an event_table entry that names the
- * corresponding lock event and controls whether it is being traced.
- * Initially this pointer is into a read-only table event_table_init[].
- * Once dynamic allocation becomes possible a modifiable copy of the table
- * is allocated and pointers are set to within this copy. The pointers
- * that were already in place at that point need to be switched to point
- * into the copy. To do this we overlay the event_table_chain structure
- * onto sufficiently-big elements of the various lock structures so we
- * can sweep down this list switching the pointers. The assumption is
- * that we will not want to enable tracing before this is done (which is
- * after all during kernel bootstrap, before any user tasks are launched).
- *
- * This is admittedly rather ugly but so were the alternatives:
- * - record the event_table pointers in a statically-allocated array
- * (dynamic allocation not yet being available) -- but there were
- * over 8000 of them;
- * - add a new link field to each lock structure;
- * - change pointers to array indices -- this adds quite a bit of
- * arithmetic to every lock operation that might be traced.
- */
-#define lock_event_table(lockp) ((lockp)->u.s.event_tablep)
-#define lock_start_hold_time(lockp) ((lockp)->u.s.start_hold_time)
-#endif /* ETAP_LOCK_TRACE */
-
-extern void lock_init (lock_t*,
- boolean_t,
- etap_event_t,
- etap_event_t);
-
-#endif /* MACH_KERNEL_PRIVATE */
-
-extern unsigned int LockTimeOut; /* Standard lock timeout value */
-
-#endif /* __APPLE_API_PRIVATE */
-
-#if !defined(MACH_KERNEL_PRIVATE)
-
-typedef struct __lock__ lock_t;
-extern lock_t *lock_alloc(boolean_t, etap_event_t, etap_event_t);
-void lock_free(lock_t *);
-
-#endif /* MACH_KERNEL_PRIVATE */
-
-extern void lock_write (lock_t*);
-extern void lock_read (lock_t*);
-extern void lock_done (lock_t*);
-extern void lock_write_to_read (lock_t*);
-
-#define lock_read_done(l) lock_done(l)
-#define lock_write_done(l) lock_done(l)
-
-extern boolean_t lock_read_to_write (lock_t*); /* vm_map is only user */