]>
Commit | Line | Data |
---|---|---|
d9a64523 A |
1 | /* |
2 | * Copyright (c) 2000-2018 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
0a7de745 | 5 | * |
d9a64523 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
d9a64523 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
d9a64523 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
d9a64523 A |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ | |
28 | ||
d9a64523 A |
29 | #define LOCK_PRIVATE 1 |
30 | ||
31 | #include <mach_ldebug.h> | |
32 | ||
33 | #include <kern/locks.h> | |
34 | #include <kern/kalloc.h> | |
35 | #include <kern/misc_protos.h> | |
36 | #include <kern/thread.h> | |
37 | #include <kern/processor.h> | |
38 | #include <kern/cpu_data.h> | |
39 | #include <kern/cpu_number.h> | |
40 | #include <kern/sched_prim.h> | |
d9a64523 A |
41 | #include <kern/debug.h> |
42 | #include <string.h> | |
43 | ||
44 | #include <i386/machine_routines.h> /* machine_timeout_suspended() */ | |
45 | #include <machine/atomic.h> | |
46 | #include <machine/machine_cpu.h> | |
47 | #include <i386/mp.h> | |
48 | #include <machine/atomic.h> | |
49 | #include <sys/kdebug.h> | |
50 | #include <i386/locks_i386_inlines.h> | |
51 | ||
52 | /* | |
53 | * Fast path routines for lck_mtx locking and unlocking functions. | |
54 | * Fast paths will try a single compare and swap instruction to acquire/release the lock | |
55 | * and interlock, and they will fall through the slow path in case it fails. | |
56 | * | |
57 | * These functions were previously implemented in x86 assembly, | |
58 | * and some optimizations are in place in this c code to obtain a compiled code | |
59 | * as performant and compact as the assembly version. | |
60 | * | |
61 | * To avoid to inline these functions and increase the kernel text size all functions have | |
62 | * the __attribute__((noinline)) specified. | |
63 | * | |
64 | * The code is structured in such a way there are no calls to functions that will return | |
65 | * on the context of the caller function, i.e. all functions called are or tail call functions | |
66 | * or inline functions. The number of arguments of the tail call functions are less then six, | |
67 | * so that they can be passed over registers and do not need to be pushed on stack. | |
68 | * This allows the compiler to not create a stack frame for the functions. | |
69 | * | |
70 | * The file is compiled with momit-leaf-frame-pointer and O2. | |
71 | */ | |
72 | ||
73 | #if DEVELOPMENT || DEBUG | |
74 | ||
75 | /* | |
76 | * If one or more simplelocks are currently held by a thread, | |
77 | * an attempt to acquire a mutex will cause this check to fail | |
78 | * (since a mutex lock may context switch, holding a simplelock | |
79 | * is not a good thing). | |
80 | */ | |
81 | void __inline__ | |
82 | lck_mtx_check_preemption(void) | |
83 | { | |
0a7de745 | 84 | if (get_preemption_level() == 0) { |
d9a64523 | 85 | return; |
0a7de745 A |
86 | } |
87 | if (LckDisablePreemptCheck) { | |
d9a64523 | 88 | return; |
0a7de745 A |
89 | } |
90 | if (current_cpu_datap()->cpu_hibernate) { | |
d9a64523 | 91 | return; |
0a7de745 | 92 | } |
d9a64523 A |
93 | |
94 | panic("preemption_level(%d) != 0\n", get_preemption_level()); | |
95 | } | |
96 | ||
97 | #else /* DEVELOPMENT || DEBUG */ | |
98 | ||
99 | void __inline__ | |
100 | lck_mtx_check_preemption(void) | |
101 | { | |
102 | return; | |
103 | } | |
104 | ||
105 | #endif /* DEVELOPMENT || DEBUG */ | |
106 | ||
107 | /* | |
108 | * Routine: lck_mtx_lock | |
109 | * | |
110 | * Locks a mutex for current thread. | |
111 | * It tries the fast path first and | |
112 | * falls through the slow path in case | |
113 | * of contention. | |
114 | * | |
115 | * Interlock or mutex cannot be already held by current thread. | |
116 | * In case of contention it might sleep. | |
117 | */ | |
118 | __attribute__((noinline)) | |
119 | void | |
120 | lck_mtx_lock( | |
121 | lck_mtx_t *lock) | |
122 | { | |
123 | uint32_t prev, state; | |
124 | ||
125 | lck_mtx_check_preemption(); | |
126 | state = ordered_load_mtx_state(lock); | |
127 | ||
128 | /* | |
129 | * Fast path only if the mutex is not held | |
130 | * interlock is not contended and there are no waiters. | |
131 | * Indirect mutexes will fall through the slow path as | |
132 | * well as destroyed mutexes. | |
133 | */ | |
134 | ||
135 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK | LCK_MTX_WAITERS_MSK); | |
136 | state = prev | LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK; | |
137 | ||
138 | disable_preemption(); | |
cb323159 | 139 | if (!os_atomic_cmpxchg(&lock->lck_mtx_state, prev, state, acquire)) { |
d9a64523 A |
140 | enable_preemption(); |
141 | return lck_mtx_lock_slow(lock); | |
142 | } | |
143 | ||
144 | /* mutex acquired, interlock acquired and preemption disabled */ | |
145 | ||
146 | thread_t thread = current_thread(); | |
147 | /* record owner of mutex */ | |
148 | ordered_store_mtx_owner(lock, (uintptr_t)thread); | |
149 | ||
150 | #if MACH_LDEBUG | |
151 | if (thread) { | |
152 | thread->mutex_count++; /* lock statistic */ | |
153 | } | |
154 | #endif | |
155 | ||
156 | /* release interlock and re-enable preemption */ | |
157 | lck_mtx_lock_finish_inline(lock, state, FALSE); | |
158 | } | |
159 | ||
160 | /* | |
161 | * Routine: lck_mtx_try_lock | |
162 | * | |
163 | * Try to lock a mutex for current thread. | |
164 | * It tries the fast path first and | |
165 | * falls through the slow path in case | |
166 | * of contention. | |
167 | * | |
168 | * Interlock or mutex cannot be already held by current thread. | |
169 | * | |
170 | * In case the mutex is held (either as spin or mutex) | |
171 | * the function will fail, it will acquire the mutex otherwise. | |
172 | */ | |
173 | __attribute__((noinline)) | |
174 | boolean_t | |
175 | lck_mtx_try_lock( | |
0a7de745 | 176 | lck_mtx_t *lock) |
d9a64523 A |
177 | { |
178 | uint32_t prev, state; | |
179 | ||
180 | state = ordered_load_mtx_state(lock); | |
181 | ||
182 | /* | |
183 | * Fast path only if the mutex is not held | |
184 | * interlock is not contended and there are no waiters. | |
185 | * Indirect mutexes will fall through the slow path as | |
186 | * well as destroyed mutexes. | |
187 | */ | |
188 | ||
189 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK | LCK_MTX_WAITERS_MSK); | |
190 | state = prev | LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK; | |
191 | ||
192 | disable_preemption(); | |
cb323159 | 193 | if (!os_atomic_cmpxchg(&lock->lck_mtx_state, prev, state, acquire)) { |
d9a64523 A |
194 | enable_preemption(); |
195 | return lck_mtx_try_lock_slow(lock); | |
196 | } | |
197 | ||
198 | /* mutex acquired, interlock acquired and preemption disabled */ | |
199 | ||
200 | thread_t thread = current_thread(); | |
201 | /* record owner of mutex */ | |
202 | ordered_store_mtx_owner(lock, (uintptr_t)thread); | |
203 | ||
204 | #if MACH_LDEBUG | |
205 | if (thread) { | |
206 | thread->mutex_count++; /* lock statistic */ | |
207 | } | |
208 | #endif | |
209 | ||
210 | /* release interlock and re-enable preemption */ | |
211 | lck_mtx_try_lock_finish_inline(lock, state); | |
212 | ||
213 | return TRUE; | |
214 | } | |
215 | ||
216 | /* | |
217 | * Routine: lck_mtx_lock_spin_always | |
218 | * | |
219 | * Try to lock a mutex as spin lock for current thread. | |
220 | * It tries the fast path first and | |
221 | * falls through the slow path in case | |
222 | * of contention. | |
223 | * | |
224 | * Interlock or mutex cannot be already held by current thread. | |
225 | * | |
226 | * In case the mutex is held as mutex by another thread | |
227 | * this function will switch behavior and try to acquire the lock as mutex. | |
228 | * | |
229 | * In case the mutex is held as spinlock it will spin contending | |
230 | * for it. | |
231 | * | |
232 | * In case of contention it might sleep. | |
233 | */ | |
234 | __attribute__((noinline)) | |
235 | void | |
236 | lck_mtx_lock_spin_always( | |
0a7de745 | 237 | lck_mtx_t *lock) |
d9a64523 A |
238 | { |
239 | uint32_t prev, state; | |
240 | ||
241 | state = ordered_load_mtx_state(lock); | |
242 | ||
243 | /* | |
244 | * Fast path only if the mutex is not held | |
245 | * neither as mutex nor as spin and | |
246 | * interlock is not contended. | |
247 | * Indirect mutexes will fall through the slow path as | |
248 | * well as destroyed mutexes. | |
249 | */ | |
250 | ||
251 | /* Note LCK_MTX_SPIN_MSK is set only if LCK_MTX_ILOCKED_MSK is set */ | |
252 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK); | |
253 | state = prev | LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK; | |
254 | ||
255 | disable_preemption(); | |
cb323159 | 256 | if (!os_atomic_cmpxchg(&lock->lck_mtx_state, prev, state, acquire)) { |
d9a64523 A |
257 | enable_preemption(); |
258 | return lck_mtx_lock_spin_slow(lock); | |
259 | } | |
260 | ||
261 | /* mutex acquired as spinlock, interlock acquired and preemption disabled */ | |
262 | ||
263 | thread_t thread = current_thread(); | |
264 | /* record owner of mutex */ | |
265 | ordered_store_mtx_owner(lock, (uintptr_t)thread); | |
266 | ||
267 | #if MACH_LDEBUG | |
268 | if (thread) { | |
269 | thread->mutex_count++; /* lock statistic */ | |
270 | } | |
271 | #endif | |
272 | ||
0a7de745 | 273 | #if CONFIG_DTRACE |
d9a64523 A |
274 | LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, lock, 0); |
275 | #endif | |
276 | /* return with the interlock held and preemption disabled */ | |
277 | return; | |
278 | } | |
279 | ||
280 | /* | |
281 | * Routine: lck_mtx_lock_spin | |
282 | * | |
283 | * Try to lock a mutex as spin lock for current thread. | |
284 | * It tries the fast path first and | |
285 | * falls through the slow path in case | |
286 | * of contention. | |
287 | * | |
288 | * Interlock or mutex cannot be already held by current thread. | |
289 | * | |
290 | * In case the mutex is held as mutex by another thread | |
291 | * this function will switch behavior and try to acquire the lock as mutex. | |
292 | * | |
293 | * In case the mutex is held as spinlock it will spin contending | |
294 | * for it. | |
295 | * | |
296 | * In case of contention it might sleep. | |
297 | */ | |
298 | void | |
299 | lck_mtx_lock_spin( | |
0a7de745 | 300 | lck_mtx_t *lock) |
d9a64523 A |
301 | { |
302 | lck_mtx_check_preemption(); | |
303 | lck_mtx_lock_spin_always(lock); | |
304 | } | |
305 | ||
306 | /* | |
307 | * Routine: lck_mtx_try_lock_spin_always | |
308 | * | |
309 | * Try to lock a mutex as spin lock for current thread. | |
310 | * It tries the fast path first and | |
311 | * falls through the slow path in case | |
312 | * of contention. | |
313 | * | |
314 | * Interlock or mutex cannot be already held by current thread. | |
315 | * | |
316 | * In case the mutex is held (either as spin or mutex) | |
317 | * the function will fail, it will acquire the mutex as spin lock | |
318 | * otherwise. | |
319 | * | |
320 | */ | |
321 | __attribute__((noinline)) | |
322 | boolean_t | |
323 | lck_mtx_try_lock_spin_always( | |
324 | lck_mtx_t *lock) | |
325 | { | |
326 | uint32_t prev, state; | |
327 | ||
328 | state = ordered_load_mtx_state(lock); | |
329 | ||
330 | /* | |
331 | * Fast path only if the mutex is not held | |
332 | * neither as mutex nor as spin and | |
333 | * interlock is not contended. | |
334 | * Indirect mutexes will fall through the slow path as | |
335 | * well as destroyed mutexes. | |
336 | */ | |
337 | ||
338 | /* Note LCK_MTX_SPIN_MSK is set only if LCK_MTX_ILOCKED_MSK is set */ | |
339 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK); | |
340 | state = prev | LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK; | |
341 | ||
342 | disable_preemption(); | |
cb323159 | 343 | if (!os_atomic_cmpxchg(&lock->lck_mtx_state, prev, state, acquire)) { |
d9a64523 A |
344 | enable_preemption(); |
345 | return lck_mtx_try_lock_spin_slow(lock); | |
346 | } | |
347 | ||
348 | /* mutex acquired as spinlock, interlock acquired and preemption disabled */ | |
349 | ||
350 | thread_t thread = current_thread(); | |
351 | /* record owner of mutex */ | |
352 | ordered_store_mtx_owner(lock, (uintptr_t)thread); | |
353 | ||
354 | #if MACH_LDEBUG | |
355 | if (thread) { | |
356 | thread->mutex_count++; /* lock statistic */ | |
357 | } | |
358 | #endif | |
359 | ||
360 | #if CONFIG_DTRACE | |
361 | LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, lock, 0); | |
362 | #endif | |
363 | ||
364 | /* return with the interlock held and preemption disabled */ | |
365 | return TRUE; | |
366 | } | |
367 | ||
368 | /* | |
369 | * Routine: lck_mtx_try_lock_spin | |
370 | * | |
371 | * Try to lock a mutex as spin lock for current thread. | |
372 | * It tries the fast path first and | |
373 | * falls through the slow path in case | |
374 | * of contention. | |
375 | * | |
376 | * Interlock or mutex cannot be already held by current thread. | |
377 | * | |
378 | * In case the mutex is held (either as spin or mutex) | |
379 | * the function will fail, it will acquire the mutex as spin lock | |
380 | * otherwise. | |
381 | * | |
382 | */ | |
383 | boolean_t | |
384 | lck_mtx_try_lock_spin( | |
385 | lck_mtx_t *lock) | |
386 | { | |
387 | return lck_mtx_try_lock_spin_always(lock); | |
388 | } | |
389 | ||
390 | /* | |
0a7de745 | 391 | * Routine: lck_mtx_unlock |
d9a64523 A |
392 | * |
393 | * Unlocks a mutex held by current thread. | |
394 | * It tries the fast path first, and falls | |
395 | * through the slow path in case waiters need to | |
cb323159 | 396 | * be woken up. |
d9a64523 A |
397 | * |
398 | * Interlock can be held, and the slow path will | |
399 | * unlock the mutex for this case. | |
400 | */ | |
401 | __attribute__((noinline)) | |
402 | void | |
403 | lck_mtx_unlock( | |
0a7de745 | 404 | lck_mtx_t *lock) |
d9a64523 A |
405 | { |
406 | uint32_t prev, state; | |
407 | ||
408 | state = ordered_load_mtx_state(lock); | |
409 | ||
0a7de745 | 410 | if (state & LCK_MTX_SPIN_MSK) { |
d9a64523 | 411 | return lck_mtx_unlock_slow(lock); |
0a7de745 | 412 | } |
d9a64523 A |
413 | |
414 | /* | |
415 | * Only full mutex will go through the fast path | |
416 | * (if the lock was acquired as a spinlock it will | |
417 | * fall through the slow path). | |
cb323159 | 418 | * If there are waiters it will fall |
d9a64523 A |
419 | * through the slow path. |
420 | * If it is indirect it will fall through the slow path. | |
421 | */ | |
422 | ||
0a7de745 A |
423 | /* |
424 | * Fast path state: | |
425 | * interlock not held, no waiters, no promotion and mutex held. | |
426 | */ | |
cb323159 | 427 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_WAITERS_MSK); |
d9a64523 A |
428 | prev |= LCK_MTX_MLOCKED_MSK; |
429 | ||
430 | state = prev | LCK_MTX_ILOCKED_MSK; | |
431 | state &= ~LCK_MTX_MLOCKED_MSK; | |
432 | ||
433 | disable_preemption(); | |
434 | ||
435 | /* the memory order needs to be acquire because it is acquiring the interlock */ | |
cb323159 | 436 | if (!os_atomic_cmpxchg(&lock->lck_mtx_state, prev, state, acquire)) { |
d9a64523 A |
437 | enable_preemption(); |
438 | return lck_mtx_unlock_slow(lock); | |
439 | } | |
440 | ||
441 | /* mutex released, interlock acquired and preemption disabled */ | |
442 | ||
443 | #if DEVELOPMENT | DEBUG | |
444 | thread_t owner = (thread_t)lock->lck_mtx_owner; | |
0a7de745 | 445 | if (__improbable(owner != current_thread())) { |
cb323159 | 446 | lck_mtx_owner_check_panic(lock); |
0a7de745 | 447 | } |
d9a64523 A |
448 | #endif |
449 | ||
450 | /* clear owner */ | |
451 | ordered_store_mtx_owner(lock, 0); | |
452 | /* release interlock */ | |
453 | state &= ~LCK_MTX_ILOCKED_MSK; | |
454 | ordered_store_mtx_state_release(lock, state); | |
455 | ||
456 | #if MACH_LDEBUG | |
457 | thread_t thread = current_thread(); | |
0a7de745 | 458 | if (thread) { |
d9a64523 | 459 | thread->mutex_count--; |
0a7de745 | 460 | } |
d9a64523 A |
461 | #endif /* MACH_LDEBUG */ |
462 | ||
463 | /* re-enable preemption */ | |
464 | lck_mtx_unlock_finish_inline(lock, FALSE); | |
465 | } |