]>
Commit | Line | Data |
---|---|---|
d9a64523 A |
1 | /* |
2 | * Copyright (c) 201 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
0a7de745 | 5 | * |
d9a64523 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
d9a64523 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
d9a64523 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
d9a64523 A |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ | |
28 | ||
29 | #ifndef _I386_LOCKS_I386_INLINES_H_ | |
30 | #define _I386_LOCKS_I386_INLINES_H_ | |
31 | ||
32 | #include <kern/locks.h> | |
0a7de745 | 33 | #include <kern/lock_stat.h> |
cb323159 | 34 | #include <kern/turnstile.h> |
d9a64523 A |
35 | |
36 | // Enforce program order of loads and stores. | |
cb323159 A |
37 | #define ordered_load(target) os_atomic_load(target, compiler_acq_rel) |
38 | #define ordered_store_release(target, value) ({ \ | |
39 | os_atomic_store(target, value, release); \ | |
40 | os_compiler_barrier(); \ | |
41 | }) | |
d9a64523 A |
42 | |
43 | /* Enforce program order of loads and stores. */ | |
0a7de745 A |
44 | #define ordered_load_mtx_state(lock) ordered_load(&(lock)->lck_mtx_state) |
45 | #define ordered_store_mtx_state_release(lock, value) ordered_store_release(&(lock)->lck_mtx_state, (value)) | |
cb323159 | 46 | #define ordered_store_mtx_owner(lock, value) os_atomic_store(&(lock)->lck_mtx_owner, (value), compiler_acq_rel) |
d9a64523 A |
47 | |
48 | #if DEVELOPMENT | DEBUG | |
cb323159 | 49 | void lck_mtx_owner_check_panic(lck_mtx_t *mutex) __abortlike; |
d9a64523 A |
50 | #endif |
51 | ||
52 | __attribute__((always_inline)) | |
53 | static inline void | |
54 | lck_mtx_ilk_unlock_inline( | |
55 | lck_mtx_t *mutex, | |
0a7de745 | 56 | uint32_t state) |
d9a64523 A |
57 | { |
58 | state &= ~LCK_MTX_ILOCKED_MSK; | |
59 | ordered_store_mtx_state_release(mutex, state); | |
60 | ||
61 | enable_preemption(); | |
62 | } | |
63 | ||
64 | __attribute__((always_inline)) | |
65 | static inline void | |
66 | lck_mtx_lock_finish_inline( | |
67 | lck_mtx_t *mutex, | |
0a7de745 A |
68 | uint32_t state, |
69 | boolean_t indirect) | |
d9a64523 A |
70 | { |
71 | assert(state & LCK_MTX_ILOCKED_MSK); | |
72 | ||
73 | /* release the interlock and re-enable preemption */ | |
74 | lck_mtx_ilk_unlock_inline(mutex, state); | |
75 | ||
0a7de745 | 76 | #if CONFIG_DTRACE |
d9a64523 A |
77 | if (indirect) { |
78 | LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, mutex, 0); | |
79 | } else { | |
80 | LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, mutex, 0); | |
81 | } | |
82 | #endif | |
83 | } | |
84 | ||
85 | __attribute__((always_inline)) | |
86 | static inline void | |
cb323159 A |
87 | lck_mtx_lock_finish_inline_with_cleanup( |
88 | lck_mtx_t *mutex, | |
89 | uint32_t state, | |
90 | boolean_t indirect) | |
91 | { | |
92 | assert(state & LCK_MTX_ILOCKED_MSK); | |
93 | ||
94 | /* release the interlock and re-enable preemption */ | |
95 | lck_mtx_ilk_unlock_inline(mutex, state); | |
96 | ||
97 | turnstile_cleanup(); | |
98 | ||
99 | #if CONFIG_DTRACE | |
100 | if (indirect) { | |
101 | LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, mutex, 0); | |
102 | } else { | |
103 | LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, mutex, 0); | |
104 | } | |
105 | #endif | |
106 | } | |
107 | ||
108 | __attribute__((always_inline)) | |
109 | static inline void | |
d9a64523 A |
110 | lck_mtx_try_lock_finish_inline( |
111 | lck_mtx_t *mutex, | |
0a7de745 | 112 | uint32_t state) |
d9a64523 A |
113 | { |
114 | /* release the interlock and re-enable preemption */ | |
115 | lck_mtx_ilk_unlock_inline(mutex, state); | |
116 | ||
0a7de745 | 117 | #if CONFIG_DTRACE |
d9a64523 A |
118 | LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, mutex, 0); |
119 | #endif | |
120 | } | |
121 | ||
122 | __attribute__((always_inline)) | |
123 | static inline void | |
124 | lck_mtx_convert_spin_finish_inline( | |
125 | lck_mtx_t *mutex, | |
0a7de745 | 126 | uint32_t state) |
d9a64523 A |
127 | { |
128 | /* release the interlock and acquire it as mutex */ | |
129 | state &= ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK); | |
130 | state |= LCK_MTX_MLOCKED_MSK; | |
131 | ||
132 | ordered_store_mtx_state_release(mutex, state); | |
133 | enable_preemption(); | |
134 | } | |
135 | ||
136 | __attribute__((always_inline)) | |
137 | static inline void | |
138 | lck_mtx_unlock_finish_inline( | |
139 | lck_mtx_t *mutex, | |
140 | boolean_t indirect) | |
141 | { | |
142 | enable_preemption(); | |
143 | ||
0a7de745 | 144 | #if CONFIG_DTRACE |
d9a64523 A |
145 | if (indirect) { |
146 | LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, mutex, 0); | |
147 | } else { | |
148 | LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, mutex, 0); | |
149 | } | |
0a7de745 | 150 | #endif // CONFIG_DTRACE |
d9a64523 A |
151 | } |
152 | ||
153 | #endif /* _I386_LOCKS_I386_INLINES_H_ */ |