]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/locks.h
xnu-6153.41.3.tar.gz
[apple/xnu.git] / osfmk / i386 / locks.h
1 /*
2 * Copyright (c) 2004-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _I386_LOCKS_H_
30 #define _I386_LOCKS_H_
31
32 #include <sys/appleapiopts.h>
33 #include <kern/kern_types.h>
34 #include <kern/assert.h>
35
36 #ifdef MACH_KERNEL_PRIVATE
37
38 #include <i386/hw_lock_types.h>
39
40 extern unsigned int LcksOpts;
41 #if DEVELOPMENT || DEBUG
42 extern unsigned int LckDisablePreemptCheck;
43 #endif
44
45 #define enaLkDeb 0x00000001 /* Request debug in default attribute */
46 #define enaLkStat 0x00000002 /* Request statistic in default attribute */
47 #define disLkRWPrio 0x00000004 /* Disable RW lock priority promotion */
48 #define enaLkTimeStat 0x00000008 /* Request time statistics in default attribute */
49
50 #endif /* MACH_KERNEL_PRIVATE */
51
52 #if defined(MACH_KERNEL_PRIVATE)
53 typedef struct {
54 volatile uintptr_t interlock;
55 #if MACH_LDEBUG
56 unsigned long lck_spin_pad[9]; /* XXX - usimple_lock_data_t */
57 #endif
58 } lck_spin_t;
59
60 #define LCK_SPIN_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */
61
62 #else /* MACH_KERNEL_PRIVATE */
63 #ifdef KERNEL_PRIVATE
64 typedef struct {
65 unsigned long opaque[10];
66 } lck_spin_t;
67 #else /* KERNEL_PRIVATE */
68 typedef struct __lck_spin_t__ lck_spin_t;
69 #endif
70 #endif
71
72 #ifdef MACH_KERNEL_PRIVATE
73 /* The definition of this structure, including the layout of the
74 * state bitfield, is tailored to the asm implementation in i386_lock.s
75 */
76 typedef struct _lck_mtx_ {
77 union {
78 struct {
79 volatile uintptr_t lck_mtx_owner;
80 union {
81 struct {
82 volatile uint32_t
83 lck_mtx_waiters:16,
84 lck_mtx_pri:8, // unused
85 lck_mtx_ilocked:1,
86 lck_mtx_mlocked:1,
87 lck_mtx_promoted:1, // unused
88 lck_mtx_spin:1,
89 lck_mtx_is_ext:1,
90 lck_mtx_pad3:3;
91 };
92 uint32_t lck_mtx_state;
93 };
94 /* Pad field used as a canary, initialized to ~0 */
95 uint32_t lck_mtx_pad32;
96 };
97 struct {
98 struct _lck_mtx_ext_ *lck_mtx_ptr;
99 uint32_t lck_mtx_tag;
100 uint32_t lck_mtx_pad32_2;
101 };
102 };
103 } lck_mtx_t;
104
105 #define LCK_MTX_WAITERS_MSK 0x0000ffff
106 #define LCK_MTX_WAITER 0x00000001
107 #define LCK_MTX_PRIORITY_MSK 0x00ff0000
108 #define LCK_MTX_ILOCKED_MSK 0x01000000
109 #define LCK_MTX_MLOCKED_MSK 0x02000000
110 #define LCK_MTX_SPIN_MSK 0x08000000
111
112 /* This pattern must subsume the interlocked, mlocked and spin bits */
113 #define LCK_MTX_TAG_INDIRECT 0x07ff1007 /* lock marked as Indirect */
114 #define LCK_MTX_TAG_DESTROYED 0x07fe2007 /* lock marked as Destroyed */
115
116 /* Adaptive spin before blocking */
117 extern uint64_t MutexSpin;
118
119 typedef enum lck_mtx_spinwait_ret_type {
120 LCK_MTX_SPINWAIT_ACQUIRED = 0,
121 LCK_MTX_SPINWAIT_SPUN = 1,
122 LCK_MTX_SPINWAIT_NO_SPIN = 2,
123 } lck_mtx_spinwait_ret_type_t;
124
125 extern lck_mtx_spinwait_ret_type_t lck_mtx_lock_spinwait_x86(lck_mtx_t *mutex);
126 struct turnstile;
127 extern void lck_mtx_lock_wait_x86(lck_mtx_t *mutex, struct turnstile **ts);
128 extern void lck_mtx_lock_acquire_x86(lck_mtx_t *mutex);
129
130 extern void lck_mtx_lock_slow(lck_mtx_t *lock);
131 extern boolean_t lck_mtx_try_lock_slow(lck_mtx_t *lock);
132 extern void lck_mtx_unlock_slow(lck_mtx_t *lock);
133 extern void lck_mtx_lock_spin_slow(lck_mtx_t *lock);
134 extern boolean_t lck_mtx_try_lock_spin_slow(lck_mtx_t *lock);
135 extern void hw_lock_byte_init(volatile uint8_t *lock_byte);
136 extern void hw_lock_byte_lock(volatile uint8_t *lock_byte);
137 extern void hw_lock_byte_unlock(volatile uint8_t *lock_byte);
138
139 typedef struct {
140 unsigned int type;
141 unsigned int pad4;
142 vm_offset_t pc;
143 vm_offset_t thread;
144 } lck_mtx_deb_t;
145
146 #define MUTEX_TAG 0x4d4d
147
148 typedef struct {
149 unsigned int lck_mtx_stat_data;
150 } lck_mtx_stat_t;
151
152 typedef struct _lck_mtx_ext_ {
153 lck_mtx_t lck_mtx;
154 struct _lck_grp_ *lck_mtx_grp;
155 unsigned int lck_mtx_attr;
156 unsigned int lck_mtx_pad1;
157 lck_mtx_deb_t lck_mtx_deb;
158 uint64_t lck_mtx_stat;
159 unsigned int lck_mtx_pad2[2];
160 } lck_mtx_ext_t;
161
162 #define LCK_MTX_ATTR_DEBUG 0x1
163 #define LCK_MTX_ATTR_DEBUGb 0
164 #define LCK_MTX_ATTR_STAT 0x2
165 #define LCK_MTX_ATTR_STATb 1
166
167 #define LCK_MTX_EVENT(lck) ((event_t)(((unsigned int*)(lck))+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)))
168 #define LCK_EVENT_TO_MUTEX(event) ((lck_mtx_t *)(uintptr_t)(((unsigned int *)(event)) - ((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))))
169
170 #else /* MACH_KERNEL_PRIVATE */
171 #ifdef XNU_KERNEL_PRIVATE
172 typedef struct {
173 unsigned long opaque[2];
174 } lck_mtx_t;
175
176 typedef struct {
177 unsigned long opaque[10];
178 } lck_mtx_ext_t;
179 #else
180 #ifdef KERNEL_PRIVATE
181 typedef struct {
182 unsigned long opaque[2];
183 } lck_mtx_t;
184
185 typedef struct {
186 unsigned long opaque[10];
187 } lck_mtx_ext_t;
188
189 #else
190 typedef struct __lck_mtx_t__ lck_mtx_t;
191 typedef struct __lck_mtx_ext_t__ lck_mtx_ext_t;
192 #endif
193 #endif
194 #endif
195
196 #ifdef MACH_KERNEL_PRIVATE
197 typedef union _lck_rw_t_internal_ {
198 struct {
199 volatile uint16_t lck_rw_shared_count; /* No. of accepted readers */
200 volatile uint8_t lck_rw_interlock; /* Interlock byte */
201 volatile uint8_t
202 lck_rw_priv_excl:1, /* Writers prioritized if set */
203 lck_rw_want_upgrade:1, /* Read-to-write upgrade waiting */
204 lck_rw_want_write:1, /* Writer waiting or locked for write */
205 lck_r_waiting:1, /* Reader is sleeping on lock */
206 lck_w_waiting:1, /* Writer is sleeping on lock */
207 lck_rw_can_sleep:1, /* Can attempts to lock go to sleep? */
208 lck_rw_padb6:2; /* padding */
209 uint32_t lck_rw_tag; /* This can be obsoleted when stats are in */
210 thread_t lck_rw_owner; /* Unused */
211 };
212 struct {
213 uint32_t data; /* Single word for count, ilk, and bitfields */
214 uint32_t lck_rw_pad4;
215 uint32_t lck_rw_pad8;
216 uint32_t lck_rw_pad12;
217 };
218 } lck_rw_t;
219 #define LCK_RW_T_SIZE 16
220
221 static_assert(sizeof(lck_rw_t) == LCK_RW_T_SIZE);
222
223 #define LCK_RW_SHARED_SHIFT 0
224 #define LCK_RW_INTERLOCK_BIT 16
225 #define LCK_RW_PRIV_EXCL_BIT 24
226 #define LCK_RW_WANT_UPGRADE_BIT 25
227 #define LCK_RW_WANT_EXCL_BIT 26
228 #define LCK_RW_R_WAITING_BIT 27
229 #define LCK_RW_W_WAITING_BIT 28
230 #define LCK_RW_CAN_SLEEP_BIT 29
231
232 #define LCK_RW_INTERLOCK (1 << LCK_RW_INTERLOCK_BIT)
233 #define LCK_RW_WANT_UPGRADE (1 << LCK_RW_WANT_UPGRADE_BIT)
234 #define LCK_RW_WANT_EXCL (1 << LCK_RW_WANT_EXCL_BIT)
235 #define LCK_RW_R_WAITING (1 << LCK_RW_R_WAITING_BIT)
236 #define LCK_RW_W_WAITING (1 << LCK_RW_W_WAITING_BIT)
237 #define LCK_RW_PRIV_EXCL (1 << LCK_RW_PRIV_EXCL_BIT)
238 #define LCK_RW_TAG_VALID (1 << LCK_RW_TAG_VALID_BIT)
239 #define LCK_RW_SHARED_MASK (0xffff << LCK_RW_SHARED_SHIFT)
240 #define LCK_RW_SHARED_READER (1 << LCK_RW_SHARED_SHIFT)
241
242 #define LCK_RW_WANT_WRITE LCK_RW_WANT_EXCL
243
244
245 #define LCK_RW_ATTR_DEBUG 0x1
246 #define LCK_RW_ATTR_DEBUGb 0
247 #define LCK_RW_ATTR_STAT 0x2
248 #define LCK_RW_ATTR_STATb 1
249 #define LCK_RW_ATTR_READ_PRI 0x3
250 #define LCK_RW_ATTR_READ_PRIb 2
251 #define LCK_RW_ATTR_DIS_THREAD 0x40000000
252 #define LCK_RW_ATTR_DIS_THREADb 30
253 #define LCK_RW_ATTR_DIS_MYLOCK 0x10000000
254 #define LCK_RW_ATTR_DIS_MYLOCKb 28
255
256 #define LCK_RW_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */
257
258 #define RW_LOCK_READER_EVENT(x) ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_tag))))
259 #define RW_LOCK_WRITER_EVENT(x) ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_pad8))))
260 #define READ_EVENT_TO_RWLOCK(x) ((lck_rw_t *)(((unsigned char*)(x) - (offsetof(lck_rw_t, lck_rw_tag)))))
261 #define WRITE_EVENT_TO_RWLOCK(x) ((lck_rw_t *)(((unsigned char*)(x) - (offsetof(lck_rw_t, lck_rw_pad8)))))
262
263 #if LOCK_PRIVATE
264
265 #define disable_preemption_for_thread(t) ((cpu_data_t GS_RELATIVE *)0UL)->cpu_preemption_level++
266 #define preemption_disabled_for_thread(t) (((cpu_data_t GS_RELATIVE *)0UL)->cpu_preemption_level > 0)
267
268 #define LCK_MTX_THREAD_TO_STATE(t) ((uintptr_t)t)
269 #define PLATFORM_LCK_ILOCK 0
270
271 #define LOCK_SNOOP_SPINS 1000
272 #define LOCK_PRETEST 1
273
274 /* hw_lock_lock static panic deadline, in timebase units. hw_lock_to() uses
275 * LockTimeoutTSC computed at startup
276 */
277 #define LOCK_PANIC_TIMEOUT 0xf000000 /* 251e6 TSC ticks */
278
279 #endif // LOCK_PRIVATE
280
281 #else
282 #ifdef KERNEL_PRIVATE
283 #pragma pack(1)
284 typedef struct {
285 uint32_t opaque[3];
286 uint32_t opaque4;
287 } lck_rw_t;
288 #pragma pack()
289 #else
290 typedef struct __lck_rw_t__ lck_rw_t;
291 #endif
292 #endif
293
294 #ifdef MACH_KERNEL_PRIVATE
295
296 extern void kernel_preempt_check(void);
297
298 #endif /* MACH_KERNEL_PRIVATE */
299 #endif /* _I386_LOCKS_H_ */