]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/locks.h
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / i386 / locks.h
1 /*
2 * Copyright (c) 2004-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _I386_LOCKS_H_
30 #define _I386_LOCKS_H_
31
32 #include <sys/appleapiopts.h>
33 #include <kern/kern_types.h>
34
35 #ifdef MACH_KERNEL_PRIVATE
36
37 #include <i386/hw_lock_types.h>
38
39 extern unsigned int LcksOpts;
40 #if DEVELOPMENT || DEBUG
41 extern unsigned int LckDisablePreemptCheck;
42 #endif
43
44 #define enaLkDeb 0x00000001 /* Request debug in default attribute */
45 #define enaLkStat 0x00000002 /* Request statistic in default attribute */
46 #define disLkRWPrio 0x00000004 /* Disable RW lock priority promotion */
47
48 #endif /* MACH_KERNEL_PRIVATE */
49
50 #if defined(MACH_KERNEL_PRIVATE)
51 typedef struct {
52 volatile uintptr_t interlock;
53 #if MACH_LDEBUG
54 unsigned long lck_spin_pad[9]; /* XXX - usimple_lock_data_t */
55 #endif
56 } lck_spin_t;
57
58 #define LCK_SPIN_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */
59
60 #else /* MACH_KERNEL_PRIVATE */
61 #ifdef KERNEL_PRIVATE
62 typedef struct {
63 unsigned long opaque[10];
64 } lck_spin_t;
65 #else /* KERNEL_PRIVATE */
66 typedef struct __lck_spin_t__ lck_spin_t;
67 #endif
68 #endif
69
70 #ifdef MACH_KERNEL_PRIVATE
71 /* The definition of this structure, including the layout of the
72 * state bitfield, is tailored to the asm implementation in i386_lock.s
73 */
74 typedef struct _lck_mtx_ {
75 union {
76 struct {
77 volatile uintptr_t lck_mtx_owner;
78 union {
79 struct {
80 volatile uint32_t
81 lck_mtx_waiters:16,
82 lck_mtx_pri:8,
83 lck_mtx_ilocked:1,
84 lck_mtx_mlocked:1,
85 lck_mtx_promoted:1,
86 lck_mtx_spin:1,
87 lck_mtx_is_ext:1,
88 lck_mtx_pad3:3;
89 };
90 uint32_t lck_mtx_state;
91 };
92 /* Pad field used as a canary, initialized to ~0 */
93 uint32_t lck_mtx_pad32;
94 };
95 struct {
96 struct _lck_mtx_ext_ *lck_mtx_ptr;
97 uint32_t lck_mtx_tag;
98 uint32_t lck_mtx_pad32_2;
99 };
100 };
101 } lck_mtx_t;
102
103 /* This pattern must subsume the interlocked, mlocked and spin bits */
104 #define LCK_MTX_TAG_INDIRECT 0x07ff1007 /* lock marked as Indirect */
105 #define LCK_MTX_TAG_DESTROYED 0x07fe2007 /* lock marked as Destroyed */
106
107 /* Adaptive spin before blocking */
108 extern uint64_t MutexSpin;
109 extern int lck_mtx_lock_spinwait_x86(lck_mtx_t *mutex);
110 extern void lck_mtx_lock_wait_x86(lck_mtx_t *mutex);
111 extern void lck_mtx_lock_acquire_x86(lck_mtx_t *mutex);
112 extern void lck_mtx_unlock_wakeup_x86(lck_mtx_t *mutex, int prior_lock_state);
113
114 extern void lck_mtx_lock_mark_destroyed(lck_mtx_t *mutex);
115 extern int lck_mtx_lock_grab_mutex(lck_mtx_t *mutex);
116
117 extern void hw_lock_byte_init(volatile uint8_t *lock_byte);
118 extern void hw_lock_byte_lock(volatile uint8_t *lock_byte);
119 extern void hw_lock_byte_unlock(volatile uint8_t *lock_byte);
120
121 typedef struct {
122 unsigned int type;
123 unsigned int pad4;
124 vm_offset_t pc;
125 vm_offset_t thread;
126 } lck_mtx_deb_t;
127
128 #define MUTEX_TAG 0x4d4d
129
130 typedef struct {
131 unsigned int lck_mtx_stat_data;
132 } lck_mtx_stat_t;
133
134 typedef struct _lck_mtx_ext_ {
135 lck_mtx_t lck_mtx;
136 struct _lck_grp_ *lck_mtx_grp;
137 unsigned int lck_mtx_attr;
138 unsigned int lck_mtx_pad1;
139 lck_mtx_deb_t lck_mtx_deb;
140 uint64_t lck_mtx_stat;
141 unsigned int lck_mtx_pad2[2];
142 } lck_mtx_ext_t;
143
144 #define LCK_MTX_ATTR_DEBUG 0x1
145 #define LCK_MTX_ATTR_DEBUGb 0
146 #define LCK_MTX_ATTR_STAT 0x2
147 #define LCK_MTX_ATTR_STATb 1
148
149 #define LCK_MTX_EVENT(lck) ((event_t)(((unsigned int*)(lck))+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)))
150 #define LCK_EVENT_TO_MUTEX(event) ((lck_mtx_t *)(uintptr_t)(((unsigned int *)(event)) - ((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))))
151
152 #else /* MACH_KERNEL_PRIVATE */
153 #ifdef XNU_KERNEL_PRIVATE
154 typedef struct {
155 unsigned long opaque[2];
156 } lck_mtx_t;
157
158 typedef struct {
159 unsigned long opaque[10];
160 } lck_mtx_ext_t;
161 #else
162 #ifdef KERNEL_PRIVATE
163 typedef struct {
164 unsigned long opaque[2];
165 } lck_mtx_t;
166
167 typedef struct {
168 unsigned long opaque[10];
169 } lck_mtx_ext_t;
170
171 #else
172 typedef struct __lck_mtx_t__ lck_mtx_t;
173 typedef struct __lck_mtx_ext_t__ lck_mtx_ext_t;
174 #endif
175 #endif
176 #endif
177
178 #ifdef MACH_KERNEL_PRIVATE
179 #pragma pack(1) /* Make sure the structure stays as we defined it */
180 typedef union _lck_rw_t_internal_ {
181 struct {
182 volatile uint16_t lck_rw_shared_count; /* No. of accepted readers */
183 volatile uint8_t lck_rw_interlock; /* Interlock byte */
184 volatile uint8_t
185 lck_rw_priv_excl:1, /* Writers prioritized if set */
186 lck_rw_want_upgrade:1, /* Read-to-write upgrade waiting */
187 lck_rw_want_write:1, /* Writer waiting or locked for write */
188 lck_r_waiting:1, /* Reader is sleeping on lock */
189 lck_w_waiting:1, /* Writer is sleeping on lock */
190 lck_rw_can_sleep:1, /* Can attempts to lock go to sleep? */
191 lck_rw_padb6:2; /* padding */
192 uint32_t lck_rw_tag; /* This can be obsoleted when stats are in */
193 thread_t lck_rw_owner; /* Unused */
194 };
195 struct {
196 uint32_t data; /* Single word for count, ilk, and bitfields */
197 uint32_t lck_rw_pad4;
198 uint32_t lck_rw_pad8;
199 uint32_t lck_rw_pad12;
200 };
201 } lck_rw_t;
202 #pragma pack()
203
204 #define LCK_RW_SHARED_SHIFT 0
205 #define LCK_RW_INTERLOCK_BIT 16
206 #define LCK_RW_PRIV_EXCL_BIT 24
207 #define LCK_RW_WANT_UPGRADE_BIT 25
208 #define LCK_RW_WANT_EXCL_BIT 26
209 #define LCK_RW_R_WAITING_BIT 27
210 #define LCK_RW_W_WAITING_BIT 28
211 #define LCK_RW_CAN_SLEEP_BIT 29
212
213 #define LCK_RW_INTERLOCK (1 << LCK_RW_INTERLOCK_BIT)
214 #define LCK_RW_WANT_UPGRADE (1 << LCK_RW_WANT_UPGRADE_BIT)
215 #define LCK_RW_WANT_EXCL (1 << LCK_RW_WANT_EXCL_BIT)
216 #define LCK_RW_R_WAITING (1 << LCK_RW_R_WAITING_BIT)
217 #define LCK_RW_W_WAITING (1 << LCK_RW_W_WAITING_BIT)
218 #define LCK_RW_PRIV_EXCL (1 << LCK_RW_PRIV_EXCL_BIT)
219 #define LCK_RW_TAG_VALID (1 << LCK_RW_TAG_VALID_BIT)
220 #define LCK_RW_SHARED_MASK (0xffff << LCK_RW_SHARED_SHIFT)
221 #define LCK_RW_SHARED_READER (1 << LCK_RW_SHARED_SHIFT)
222
223 #define LCK_RW_WANT_WRITE LCK_RW_WANT_EXCL
224
225
226 #define LCK_RW_ATTR_DEBUG 0x1
227 #define LCK_RW_ATTR_DEBUGb 0
228 #define LCK_RW_ATTR_STAT 0x2
229 #define LCK_RW_ATTR_STATb 1
230 #define LCK_RW_ATTR_READ_PRI 0x3
231 #define LCK_RW_ATTR_READ_PRIb 2
232 #define LCK_RW_ATTR_DIS_THREAD 0x40000000
233 #define LCK_RW_ATTR_DIS_THREADb 30
234 #define LCK_RW_ATTR_DIS_MYLOCK 0x10000000
235 #define LCK_RW_ATTR_DIS_MYLOCKb 28
236
237 #define LCK_RW_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */
238
239 #define RW_LOCK_READER_EVENT(x) ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_tag))))
240 #define RW_LOCK_WRITER_EVENT(x) ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_pad8))))
241 #define READ_EVENT_TO_RWLOCK(x) ((lck_rw_t *)(((unsigned char*)(x) - (offsetof(lck_rw_t, lck_rw_tag)))))
242 #define WRITE_EVENT_TO_RWLOCK(x) ((lck_rw_t *)(((unsigned char*)(x) - (offsetof(lck_rw_t, lck_rw_pad8)))))
243
244 #if LOCK_PRIVATE
245
246 #define disable_preemption_for_thread(t) ((cpu_data_t GS_RELATIVE *)0UL)->cpu_preemption_level++
247
248 #define LCK_MTX_THREAD_TO_STATE(t) ((uintptr_t)t)
249 #define PLATFORM_LCK_ILOCK 0
250
251 #define LOCK_SNOOP_SPINS 1000
252 #define LOCK_PRETEST 1
253
254 /* Spinlock panic deadline, in mach_absolute_time units (ns on i386) */
255 #define LOCK_PANIC_TIMEOUT 0xf00000 /* 250 ms (huge) */
256
257 #endif // LOCK_PRIVATE
258
259 #else
260 #ifdef KERNEL_PRIVATE
261 #pragma pack(1)
262 typedef struct {
263 uint32_t opaque[3];
264 uint32_t opaque4;
265 } lck_rw_t;
266 #pragma pack()
267 #else
268 typedef struct __lck_rw_t__ lck_rw_t;
269 #endif
270 #endif
271
272 #ifdef MACH_KERNEL_PRIVATE
273
274 extern void kernel_preempt_check (void);
275
276 #endif /* MACH_KERNEL_PRIVATE */
277
278 #endif /* _I386_LOCKS_H_ */