]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/locks_i386_inlines.h
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / i386 / locks_i386_inlines.h
1 /*
2 * Copyright (c) 201 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _I386_LOCKS_I386_INLINES_H_
30 #define _I386_LOCKS_I386_INLINES_H_
31
32 #include <kern/locks.h>
33 #include <kern/lock_stat.h>
34 #include <kern/turnstile.h>
35
36 // Enforce program order of loads and stores.
37 #define ordered_load(target) os_atomic_load(target, compiler_acq_rel)
38 #define ordered_store_release(target, value) ({ \
39 os_atomic_store(target, value, release); \
40 os_compiler_barrier(); \
41 })
42
43 /* Enforce program order of loads and stores. */
44 #define ordered_load_mtx_state(lock) ordered_load(&(lock)->lck_mtx_state)
45 #define ordered_store_mtx_state_release(lock, value) ordered_store_release(&(lock)->lck_mtx_state, (value))
46 #define ordered_store_mtx_owner(lock, value) os_atomic_store(&(lock)->lck_mtx_owner, (value), compiler_acq_rel)
47
48 #if DEVELOPMENT | DEBUG
49 void lck_mtx_owner_check_panic(lck_mtx_t *mutex) __abortlike;
50 #endif
51
52 __attribute__((always_inline))
53 static inline void
54 lck_mtx_ilk_unlock_inline(
55 lck_mtx_t *mutex,
56 uint32_t state)
57 {
58 state &= ~LCK_MTX_ILOCKED_MSK;
59 ordered_store_mtx_state_release(mutex, state);
60
61 enable_preemption();
62 }
63
64 __attribute__((always_inline))
65 static inline void
66 lck_mtx_lock_finish_inline(
67 lck_mtx_t *mutex,
68 uint32_t state,
69 boolean_t indirect)
70 {
71 assert(state & LCK_MTX_ILOCKED_MSK);
72
73 /* release the interlock and re-enable preemption */
74 lck_mtx_ilk_unlock_inline(mutex, state);
75
76 #if CONFIG_DTRACE
77 if (indirect) {
78 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, mutex, 0);
79 } else {
80 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, mutex, 0);
81 }
82 #endif
83 }
84
85 __attribute__((always_inline))
86 static inline void
87 lck_mtx_lock_finish_inline_with_cleanup(
88 lck_mtx_t *mutex,
89 uint32_t state,
90 boolean_t indirect)
91 {
92 assert(state & LCK_MTX_ILOCKED_MSK);
93
94 /* release the interlock and re-enable preemption */
95 lck_mtx_ilk_unlock_inline(mutex, state);
96
97 turnstile_cleanup();
98
99 #if CONFIG_DTRACE
100 if (indirect) {
101 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, mutex, 0);
102 } else {
103 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, mutex, 0);
104 }
105 #endif
106 }
107
108 __attribute__((always_inline))
109 static inline void
110 lck_mtx_try_lock_finish_inline(
111 lck_mtx_t *mutex,
112 uint32_t state)
113 {
114 /* release the interlock and re-enable preemption */
115 lck_mtx_ilk_unlock_inline(mutex, state);
116
117 #if CONFIG_DTRACE
118 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, mutex, 0);
119 #endif
120 }
121
122 __attribute__((always_inline))
123 static inline void
124 lck_mtx_convert_spin_finish_inline(
125 lck_mtx_t *mutex,
126 uint32_t state)
127 {
128 /* release the interlock and acquire it as mutex */
129 state &= ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK);
130 state |= LCK_MTX_MLOCKED_MSK;
131
132 ordered_store_mtx_state_release(mutex, state);
133 enable_preemption();
134 }
135
136 __attribute__((always_inline))
137 static inline void
138 lck_mtx_unlock_finish_inline(
139 lck_mtx_t *mutex,
140 boolean_t indirect)
141 {
142 enable_preemption();
143
144 #if CONFIG_DTRACE
145 if (indirect) {
146 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, mutex, 0);
147 } else {
148 LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, mutex, 0);
149 }
150 #endif // CONFIG_DTRACE
151 }
152
153 #endif /* _I386_LOCKS_I386_INLINES_H_ */