]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/arm/locks.h
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / arm / locks.h
... / ...
CommitLineData
1/*
2 * Copyright (c) 2007-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _ARM_LOCKS_H_
30#define _ARM_LOCKS_H_
31
32#include <kern/kern_types.h>
33#ifdef MACH_KERNEL_PRIVATE
34#include <arm/hw_lock_types.h>
35#endif
36
37
38#ifdef MACH_KERNEL_PRIVATE
39
40#define enaLkDeb 0x00000001 /* Request debug in default attribute */
41#define enaLkStat 0x00000002 /* Request statistic in default attribute */
42#define disLkRWPrio 0x00000004 /* Disable RW lock priority promotion */
43#define enaLkTimeStat 0x00000008 /* Request time statistics in default attribute */
44
45#define disLkType 0x80000000 /* Disable type checking */
46#define disLktypeb 0
47#define disLkThread 0x40000000 /* Disable ownership checking */
48#define disLkThreadb 1
49#define enaLkExtStck 0x20000000 /* Enable extended backtrace */
50#define enaLkExtStckb 2
51#define disLkMyLck 0x10000000 /* Disable recursive lock dectection */
52#define disLkMyLckb 3
53
54#endif
55
56#ifdef MACH_KERNEL_PRIVATE
57typedef struct {
58 struct hslock hwlock;
59 uintptr_t type;
60} lck_spin_t;
61
62#define lck_spin_data hwlock.lock_data
63
64#define LCK_SPIN_TAG_DESTROYED 0xdead /* lock marked as Destroyed */
65
66#define LCK_SPIN_TYPE 0x00000011
67
68#else
69#ifdef KERNEL_PRIVATE
70
71typedef struct {
72 uintptr_t opaque[2];
73} lck_spin_t;
74
75#else
76typedef struct __lck_spin_t__ lck_spin_t;
77#endif // KERNEL_PRIVATE
78#endif // MACH_KERNEL_PRIVATE
79
80#ifdef MACH_KERNEL_PRIVATE
81typedef struct _lck_mtx_ {
82 union {
83 uintptr_t lck_mtx_data; /* Thread pointer plus lock bits */
84 uintptr_t lck_mtx_tag; /* Tag for type */
85 }; /* arm: 4 arm64: 8 */
86 union {
87 struct {
88 uint16_t lck_mtx_waiters;/* Number of waiters */
89 uint8_t lck_mtx_pri; /* unused */
90 uint8_t lck_mtx_type; /* Type */
91 };
92 struct {
93 struct _lck_mtx_ext_ *lck_mtx_ptr; /* Indirect pointer */
94 };
95 }; /* arm: 4 arm64: 8 */
96} lck_mtx_t; /* arm: 8 arm64: 16 */
97
98/* Shared between mutex and read-write locks */
99#define LCK_ILOCK_BIT 0
100#define ARM_LCK_WAITERS_BIT 1
101#define LCK_ILOCK (1 << LCK_ILOCK_BIT)
102#define ARM_LCK_WAITERS (1 << ARM_LCK_WAITERS_BIT)
103
104#define LCK_MTX_TYPE 0x22 /* lock type */
105
106#define LCK_MTX_TAG_INDIRECT 0x00001007 /* lock marked as Indirect */
107#define LCK_MTX_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */
108
109#define LCK_FRAMES_MAX 8
110
111extern uint64_t MutexSpin;
112extern uint64_t low_MutexSpin;
113extern int64_t high_MutexSpin;
114
115typedef struct {
116 unsigned int type;
117 vm_offset_t stack[LCK_FRAMES_MAX];
118 vm_offset_t thread;
119} lck_mtx_deb_t;
120
121#define MUTEX_TAG 0x4d4d
122
123typedef struct {
124 unsigned int lck_mtx_stat_data;
125} lck_mtx_stat_t;
126
127typedef struct _lck_mtx_ext_ {
128 lck_mtx_t lck_mtx; /* arm: 12 arm64: 24 */
129 struct _lck_grp_ *lck_mtx_grp; /* arm: 4 arm64: 8 */
130 unsigned int lck_mtx_attr; /* arm: 4 arm64: 4 */
131 lck_mtx_stat_t lck_mtx_stat; /* arm: 4 arm64: 4 */
132 lck_mtx_deb_t lck_mtx_deb; /* arm: 40 arm64: 80 */
133} lck_mtx_ext_t; /* arm: 64 arm64: 120 */
134
135#define LCK_MTX_ATTR_DEBUG 0x1
136#define LCK_MTX_ATTR_DEBUGb 31
137#define LCK_MTX_ATTR_STAT 0x2
138#define LCK_MTX_ATTR_STATb 30
139
140#define LCK_MTX_EVENT(lck) ((event_t)(((unsigned int*)(lck))+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))))
141#define LCK_EVENT_TO_MUTEX(event) ((lck_mtx_t *)(uintptr_t)(((unsigned int *)(event)) - ((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))))
142
143#else
144#ifdef KERNEL_PRIVATE
145typedef struct {
146 uintptr_t opaque[2];
147} lck_mtx_t;
148
149typedef struct {
150#if defined(__arm64__)
151 unsigned long opaque[16];
152#else /* __arm__ */
153 unsigned int opaque[16];
154#endif
155} lck_mtx_ext_t;
156
157#else
158typedef struct __lck_mtx_t__ lck_mtx_t;
159#endif
160#endif
161
162#ifdef MACH_KERNEL_PRIVATE
163
164typedef union {
165 struct {
166 uint16_t shared_count; /* No. of shared granted request */
167 uint16_t interlock: 1, /* Interlock */
168 priv_excl: 1, /* priority for Writer */
169 want_upgrade: 1, /* Read-to-write upgrade waiting */
170 want_excl: 1, /* Writer is waiting, or locked for write */
171 r_waiting: 1, /* Someone is sleeping on lock */
172 w_waiting: 1, /* Writer is sleeping on lock */
173 can_sleep: 1, /* Can attempts to lock go to sleep? */
174 _pad2: 8, /* padding */
175 tag_valid: 1; /* Field is actually a tag, not a bitfield */
176#if __arm64__
177 uint32_t _pad4;
178#endif
179 };
180 struct {
181 uint32_t data; /* Single word version of bitfields and shared count */
182#if __arm64__
183 uint32_t lck_rw_pad4;
184#endif
185 };
186} lck_rw_word_t;
187
188typedef struct {
189 lck_rw_word_t word;
190 thread_t lck_rw_owner;
191} lck_rw_t; /* arm: 8 arm64: 16 */
192
193#define lck_rw_shared_count word.shared_count
194#define lck_rw_interlock word.interlock
195#define lck_rw_priv_excl word.priv_excl
196#define lck_rw_want_upgrade word.want_upgrade
197#define lck_rw_want_excl word.want_excl
198#define lck_r_waiting word.r_waiting
199#define lck_w_waiting word.w_waiting
200#define lck_rw_can_sleep word.can_sleep
201#define lck_rw_data word.data
202// tag and data reference the same memory. When the tag_valid bit is set,
203// the data word should be treated as a tag instead of a bitfield.
204#define lck_rw_tag_valid word.tag_valid
205#define lck_rw_tag word.data
206
207#define LCK_RW_SHARED_READER_OFFSET 0
208#define LCK_RW_INTERLOCK_BIT 16
209#define LCK_RW_PRIV_EXCL_BIT 17
210#define LCK_RW_WANT_UPGRADE_BIT 18
211#define LCK_RW_WANT_EXCL_BIT 19
212#define LCK_RW_R_WAITING_BIT 20
213#define LCK_RW_W_WAITING_BIT 21
214#define LCK_RW_CAN_SLEEP_BIT 22
215// 23-30
216#define LCK_RW_TAG_VALID_BIT 31
217
218#define LCK_RW_INTERLOCK (1U << LCK_RW_INTERLOCK_BIT)
219#define LCK_RW_R_WAITING (1U << LCK_RW_R_WAITING_BIT)
220#define LCK_RW_W_WAITING (1U << LCK_RW_W_WAITING_BIT)
221#define LCK_RW_WANT_UPGRADE (1U << LCK_RW_WANT_UPGRADE_BIT)
222#define LCK_RW_WANT_EXCL (1U << LCK_RW_WANT_EXCL_BIT)
223#define LCK_RW_TAG_VALID (1U << LCK_RW_TAG_VALID_BIT)
224#define LCK_RW_PRIV_EXCL (1U << LCK_RW_PRIV_EXCL_BIT)
225#define LCK_RW_SHARED_MASK (0xffff << LCK_RW_SHARED_READER_OFFSET)
226#define LCK_RW_SHARED_READER (0x1 << LCK_RW_SHARED_READER_OFFSET)
227
228#define LCK_RW_TAG_DESTROYED ((LCK_RW_TAG_VALID | 0xdddddeadu)) /* lock marked as Destroyed */
229
230#define LCK_RW_WRITER_EVENT(lck) (event_t)((uintptr_t)(lck)+1)
231#define LCK_RW_READER_EVENT(lck) (event_t)((uintptr_t)(lck)+2)
232#define WRITE_EVENT_TO_RWLOCK(event) ((lck_rw_t *)((uintptr_t)(event)-1))
233#define READ_EVENT_TO_RWLOCK(event) ((lck_rw_t *)((uintptr_t)(event)-2))
234
235#if __ARM_ENABLE_WFE_
236
237#define wait_for_event() __builtin_arm_wfe()
238#if __arm__
239#define set_event() do{__builtin_arm_dsb(DSB_ISHST);__builtin_arm_sev();}while(0)
240#define LOCK_SNOOP_SPINS 4
241#else
242#define set_event() do{}while(0) // arm64 sev is implicit in stlxr
243#define LOCK_SNOOP_SPINS 0x300
244#endif
245
246#else
247
248#define wait_for_event() __builtin_arm_clrex()
249#define set_event() do{}while(0)
250#define LOCK_SNOOP_SPINS 0x300
251
252#endif // __ARM_ENABLE_WFE_
253
254#if LOCK_PRIVATE
255
256#define LOCK_PANIC_TIMEOUT 0xc00000 // 12.5 m ticks ~= 524ms with 24MHz OSC
257
258#define PLATFORM_LCK_ILOCK LCK_ILOCK
259
260#if defined(__ARM_ARCH_8_2__)
261#define __ARM_ATOMICS_8_1 1 // ARMv8.1 atomic instructions are available
262#endif
263
264/*
265 * Lock state to thread pointer
266 * Clear the bottom bits
267 */
268#define LCK_MTX_STATE_TO_THREAD(s) (thread_t)(s & ~(LCK_ILOCK | ARM_LCK_WAITERS))
269/*
270 * Thread pointer to lock state
271 * arm thread pointers are aligned such that the bottom two bits are clear
272 */
273#define LCK_MTX_THREAD_TO_STATE(t) ((uintptr_t)t)
274/*
275 * Thread pointer mask
276 */
277#define LCK_MTX_THREAD_MASK (~(uintptr_t)(LCK_ILOCK | ARM_LCK_WAITERS))
278
279#define disable_preemption_for_thread(t) os_atomic_store(&(t->machine.preemption_count), t->machine.preemption_count + 1, compiler_acq_rel)
280#define preemption_disabled_for_thread(t) (t->machine.preemption_count > 0)
281
282
283__unused static void
284disable_interrupts_noread(void)
285{
286#if __arm__
287 __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
288#else
289 __builtin_arm_wsr64("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF)); // Mask IRQ FIQ
290#endif
291}
292
293__unused static inline long
294get_interrupts(void)
295{
296 long state;
297
298#if __arm__
299 __asm__ volatile ("mrs %[state], cpsr" :[state] "=r" (state)); // Read cpsr
300#else
301 state = (long)__builtin_arm_rsr64("DAIF"); // Read interrupt state
302#endif
303 return state;
304}
305
306__unused static inline long
307disable_interrupts(void)
308{
309 long state;
310
311 state = get_interrupts(); // Get previous state
312 disable_interrupts_noread(); // Disable
313 return state;
314}
315
316__unused static inline void
317restore_interrupts(long state)
318{
319#if __arm__
320 __asm__ volatile ("msr cpsr, %[state]" :: [state] "r" (state) : "cc", "memory"); // Restore CPSR
321#elif __arm64__
322 __builtin_arm_wsr64("DAIF", (uint64_t)state); // Restore masks
323#endif
324}
325
326#endif // LOCK_PRIVATE
327
328#else
329#ifdef KERNEL_PRIVATE
330typedef struct {
331 uintptr_t opaque[2];
332} lck_rw_t;
333#else
334typedef struct __lck_rw_t__ lck_rw_t;
335#endif
336#endif
337
338#endif /* _ARM_LOCKS_H_ */