2 * Copyright (c) 2004-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifndef _I386_LOCKS_H_
30 #define _I386_LOCKS_H_
32 #include <sys/appleapiopts.h>
33 #include <kern/kern_types.h>
35 #ifdef MACH_KERNEL_PRIVATE
37 #include <i386/hw_lock_types.h>
39 extern unsigned int LcksOpts
;
40 #if DEVELOPMENT || DEBUG
41 extern unsigned int LckDisablePreemptCheck
;
44 #define enaLkDeb 0x00000001 /* Request debug in default attribute */
45 #define enaLkStat 0x00000002 /* Request statistic in default attribute */
46 #define disLkRWPrio 0x00000004 /* Disable RW lock priority promotion */
48 #endif /* MACH_KERNEL_PRIVATE */
50 #if defined(MACH_KERNEL_PRIVATE)
52 volatile uintptr_t interlock
;
54 unsigned long lck_spin_pad
[9]; /* XXX - usimple_lock_data_t */
58 #define LCK_SPIN_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */
60 #else /* MACH_KERNEL_PRIVATE */
63 unsigned long opaque
[10];
65 #else /* KERNEL_PRIVATE */
66 typedef struct __lck_spin_t__ lck_spin_t
;
70 #ifdef MACH_KERNEL_PRIVATE
71 /* The definition of this structure, including the layout of the
72 * state bitfield, is tailored to the asm implementation in i386_lock.s
74 typedef struct _lck_mtx_
{
77 volatile uintptr_t lck_mtx_owner
;
90 uint32_t lck_mtx_state
;
92 /* Pad field used as a canary, initialized to ~0 */
93 uint32_t lck_mtx_pad32
;
96 struct _lck_mtx_ext_
*lck_mtx_ptr
;
98 uint32_t lck_mtx_pad32_2
;
103 /* This pattern must subsume the interlocked, mlocked and spin bits */
104 #define LCK_MTX_TAG_INDIRECT 0x07ff1007 /* lock marked as Indirect */
105 #define LCK_MTX_TAG_DESTROYED 0x07fe2007 /* lock marked as Destroyed */
107 /* Adaptive spin before blocking */
108 extern uint64_t MutexSpin
;
109 extern int lck_mtx_lock_spinwait_x86(lck_mtx_t
*mutex
);
110 extern void lck_mtx_lock_wait_x86(lck_mtx_t
*mutex
);
111 extern void lck_mtx_lock_acquire_x86(lck_mtx_t
*mutex
);
112 extern void lck_mtx_unlock_wakeup_x86(lck_mtx_t
*mutex
, int prior_lock_state
);
114 extern void lck_mtx_lock_mark_destroyed(lck_mtx_t
*mutex
);
115 extern int lck_mtx_lock_grab_mutex(lck_mtx_t
*mutex
);
117 extern void hw_lock_byte_init(volatile uint8_t *lock_byte
);
118 extern void hw_lock_byte_lock(volatile uint8_t *lock_byte
);
119 extern void hw_lock_byte_unlock(volatile uint8_t *lock_byte
);
128 #define MUTEX_TAG 0x4d4d
131 unsigned int lck_mtx_stat_data
;
134 typedef struct _lck_mtx_ext_
{
136 struct _lck_grp_
*lck_mtx_grp
;
137 unsigned int lck_mtx_attr
;
138 unsigned int lck_mtx_pad1
;
139 lck_mtx_deb_t lck_mtx_deb
;
140 uint64_t lck_mtx_stat
;
141 unsigned int lck_mtx_pad2
[2];
144 #define LCK_MTX_ATTR_DEBUG 0x1
145 #define LCK_MTX_ATTR_DEBUGb 0
146 #define LCK_MTX_ATTR_STAT 0x2
147 #define LCK_MTX_ATTR_STATb 1
149 #define LCK_MTX_EVENT(lck) ((event_t)(((unsigned int*)(lck))+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)))
150 #define LCK_EVENT_TO_MUTEX(event) ((lck_mtx_t *)(uintptr_t)(((unsigned int *)(event)) - ((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))))
152 #else /* MACH_KERNEL_PRIVATE */
153 #ifdef XNU_KERNEL_PRIVATE
155 unsigned long opaque
[2];
159 unsigned long opaque
[10];
162 #ifdef KERNEL_PRIVATE
164 unsigned long opaque
[2];
168 unsigned long opaque
[10];
172 typedef struct __lck_mtx_t__ lck_mtx_t
;
173 typedef struct __lck_mtx_ext_t__ lck_mtx_ext_t
;
178 #ifdef MACH_KERNEL_PRIVATE
179 #pragma pack(1) /* Make sure the structure stays as we defined it */
180 typedef union _lck_rw_t_internal_
{
182 volatile uint16_t lck_rw_shared_count
; /* No. of accepted readers */
183 volatile uint8_t lck_rw_interlock
; /* Interlock byte */
185 lck_rw_priv_excl
:1, /* Writers prioritized if set */
186 lck_rw_want_upgrade
:1, /* Read-to-write upgrade waiting */
187 lck_rw_want_write
:1, /* Writer waiting or locked for write */
188 lck_r_waiting
:1, /* Reader is sleeping on lock */
189 lck_w_waiting
:1, /* Writer is sleeping on lock */
190 lck_rw_can_sleep
:1, /* Can attempts to lock go to sleep? */
191 lck_rw_padb6
:2; /* padding */
192 uint32_t lck_rw_tag
; /* This can be obsoleted when stats are in */
193 thread_t lck_rw_owner
; /* Unused */
196 uint32_t data
; /* Single word for count, ilk, and bitfields */
197 uint32_t lck_rw_pad4
;
198 uint32_t lck_rw_pad8
;
199 uint32_t lck_rw_pad12
;
204 #define LCK_RW_SHARED_SHIFT 0
205 #define LCK_RW_INTERLOCK_BIT 16
206 #define LCK_RW_PRIV_EXCL_BIT 24
207 #define LCK_RW_WANT_UPGRADE_BIT 25
208 #define LCK_RW_WANT_EXCL_BIT 26
209 #define LCK_RW_R_WAITING_BIT 27
210 #define LCK_RW_W_WAITING_BIT 28
211 #define LCK_RW_CAN_SLEEP_BIT 29
213 #define LCK_RW_INTERLOCK (1 << LCK_RW_INTERLOCK_BIT)
214 #define LCK_RW_WANT_UPGRADE (1 << LCK_RW_WANT_UPGRADE_BIT)
215 #define LCK_RW_WANT_EXCL (1 << LCK_RW_WANT_EXCL_BIT)
216 #define LCK_RW_R_WAITING (1 << LCK_RW_R_WAITING_BIT)
217 #define LCK_RW_W_WAITING (1 << LCK_RW_W_WAITING_BIT)
218 #define LCK_RW_PRIV_EXCL (1 << LCK_RW_PRIV_EXCL_BIT)
219 #define LCK_RW_TAG_VALID (1 << LCK_RW_TAG_VALID_BIT)
220 #define LCK_RW_SHARED_MASK (0xffff << LCK_RW_SHARED_SHIFT)
221 #define LCK_RW_SHARED_READER (1 << LCK_RW_SHARED_SHIFT)
223 #define LCK_RW_WANT_WRITE LCK_RW_WANT_EXCL
226 #define LCK_RW_ATTR_DEBUG 0x1
227 #define LCK_RW_ATTR_DEBUGb 0
228 #define LCK_RW_ATTR_STAT 0x2
229 #define LCK_RW_ATTR_STATb 1
230 #define LCK_RW_ATTR_READ_PRI 0x3
231 #define LCK_RW_ATTR_READ_PRIb 2
232 #define LCK_RW_ATTR_DIS_THREAD 0x40000000
233 #define LCK_RW_ATTR_DIS_THREADb 30
234 #define LCK_RW_ATTR_DIS_MYLOCK 0x10000000
235 #define LCK_RW_ATTR_DIS_MYLOCKb 28
237 #define LCK_RW_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */
239 #define RW_LOCK_READER_EVENT(x) ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_tag))))
240 #define RW_LOCK_WRITER_EVENT(x) ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_pad8))))
241 #define READ_EVENT_TO_RWLOCK(x) ((lck_rw_t *)(((unsigned char*)(x) - (offsetof(lck_rw_t, lck_rw_tag)))))
242 #define WRITE_EVENT_TO_RWLOCK(x) ((lck_rw_t *)(((unsigned char*)(x) - (offsetof(lck_rw_t, lck_rw_pad8)))))
246 #define disable_preemption_for_thread(t) ((cpu_data_t GS_RELATIVE *)0UL)->cpu_preemption_level++
248 #define LCK_MTX_THREAD_TO_STATE(t) ((uintptr_t)t)
249 #define PLATFORM_LCK_ILOCK 0
251 #define LOCK_SNOOP_SPINS 1000
252 #define LOCK_PRETEST 1
254 /* Spinlock panic deadline, in mach_absolute_time units (ns on i386) */
255 #define LOCK_PANIC_TIMEOUT 0xf00000 /* 250 ms (huge) */
257 #endif // LOCK_PRIVATE
260 #ifdef KERNEL_PRIVATE
268 typedef struct __lck_rw_t__ lck_rw_t
;
272 #ifdef MACH_KERNEL_PRIVATE
274 extern void kernel_preempt_check (void);
276 #endif /* MACH_KERNEL_PRIVATE */
278 #endif /* _I386_LOCKS_H_ */