]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2018 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #define LOCK_PRIVATE 1 | |
30 | ||
31 | #include <mach_ldebug.h> | |
32 | ||
33 | #include <kern/locks.h> | |
34 | #include <kern/misc_protos.h> | |
35 | #include <kern/thread.h> | |
36 | #include <kern/processor.h> | |
37 | #include <kern/cpu_data.h> | |
38 | #include <kern/cpu_number.h> | |
39 | #include <kern/sched_prim.h> | |
40 | #include <kern/debug.h> | |
41 | #include <string.h> | |
42 | ||
43 | #include <i386/machine_routines.h> /* machine_timeout_suspended() */ | |
44 | #include <machine/atomic.h> | |
45 | #include <machine/machine_cpu.h> | |
46 | #include <i386/mp.h> | |
47 | #include <machine/atomic.h> | |
48 | #include <sys/kdebug.h> | |
49 | #include <i386/locks_i386_inlines.h> | |
50 | ||
51 | /* | |
52 | * Fast path routines for lck_mtx locking and unlocking functions. | |
53 | * Fast paths will try a single compare and swap instruction to acquire/release the lock | |
54 | * and interlock, and they will fall through the slow path in case it fails. | |
55 | * | |
56 | * These functions were previously implemented in x86 assembly, | |
57 | * and some optimizations are in place in this c code to obtain a compiled code | |
58 | * as performant and compact as the assembly version. | |
59 | * | |
60 | * To avoid to inline these functions and increase the kernel text size all functions have | |
61 | * the __attribute__((noinline)) specified. | |
62 | * | |
63 | * The code is structured in such a way there are no calls to functions that will return | |
64 | * on the context of the caller function, i.e. all functions called are or tail call functions | |
65 | * or inline functions. The number of arguments of the tail call functions are less then six, | |
66 | * so that they can be passed over registers and do not need to be pushed on stack. | |
67 | * This allows the compiler to not create a stack frame for the functions. | |
68 | * | |
69 | * The file is compiled with momit-leaf-frame-pointer and O2. | |
70 | */ | |
71 | ||
72 | #if DEVELOPMENT || DEBUG | |
73 | TUNABLE(bool, LckDisablePreemptCheck, "-disable_mtx_chk", false); | |
74 | ||
75 | /* | |
76 | * If one or more simplelocks are currently held by a thread, | |
77 | * an attempt to acquire a mutex will cause this check to fail | |
78 | * (since a mutex lock may context switch, holding a simplelock | |
79 | * is not a good thing). | |
80 | */ | |
81 | void __inline__ | |
82 | lck_mtx_check_preemption(void) | |
83 | { | |
84 | if (get_preemption_level() == 0) { | |
85 | return; | |
86 | } | |
87 | if (LckDisablePreemptCheck) { | |
88 | return; | |
89 | } | |
90 | if (current_cpu_datap()->cpu_hibernate) { | |
91 | return; | |
92 | } | |
93 | ||
94 | panic("preemption_level(%d) != 0\n", get_preemption_level()); | |
95 | } | |
96 | ||
97 | #else /* DEVELOPMENT || DEBUG */ | |
98 | ||
99 | void __inline__ | |
100 | lck_mtx_check_preemption(void) | |
101 | { | |
102 | return; | |
103 | } | |
104 | ||
105 | #endif /* DEVELOPMENT || DEBUG */ | |
106 | ||
107 | /* | |
108 | * Routine: lck_mtx_lock | |
109 | * | |
110 | * Locks a mutex for current thread. | |
111 | * It tries the fast path first and | |
112 | * falls through the slow path in case | |
113 | * of contention. | |
114 | * | |
115 | * Interlock or mutex cannot be already held by current thread. | |
116 | * In case of contention it might sleep. | |
117 | */ | |
118 | __attribute__((noinline)) | |
119 | void | |
120 | lck_mtx_lock( | |
121 | lck_mtx_t *lock) | |
122 | { | |
123 | uint32_t prev, state; | |
124 | ||
125 | lck_mtx_check_preemption(); | |
126 | state = ordered_load_mtx_state(lock); | |
127 | ||
128 | /* | |
129 | * Fast path only if the mutex is not held | |
130 | * interlock is not contended and there are no waiters. | |
131 | * Indirect mutexes will fall through the slow path as | |
132 | * well as destroyed mutexes. | |
133 | */ | |
134 | ||
135 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK | LCK_MTX_WAITERS_MSK); | |
136 | state = prev | LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK; | |
137 | ||
138 | disable_preemption(); | |
139 | if (!os_atomic_cmpxchg(&lock->lck_mtx_state, prev, state, acquire)) { | |
140 | enable_preemption(); | |
141 | return lck_mtx_lock_slow(lock); | |
142 | } | |
143 | ||
144 | /* mutex acquired, interlock acquired and preemption disabled */ | |
145 | ||
146 | thread_t thread = current_thread(); | |
147 | /* record owner of mutex */ | |
148 | ordered_store_mtx_owner(lock, (uintptr_t)thread); | |
149 | ||
150 | #if MACH_LDEBUG | |
151 | if (thread) { | |
152 | thread->mutex_count++; /* lock statistic */ | |
153 | } | |
154 | #endif | |
155 | ||
156 | /* release interlock and re-enable preemption */ | |
157 | lck_mtx_lock_finish_inline(lock, state, FALSE); | |
158 | } | |
159 | ||
160 | /* | |
161 | * Routine: lck_mtx_try_lock | |
162 | * | |
163 | * Try to lock a mutex for current thread. | |
164 | * It tries the fast path first and | |
165 | * falls through the slow path in case | |
166 | * of contention. | |
167 | * | |
168 | * Interlock or mutex cannot be already held by current thread. | |
169 | * | |
170 | * In case the mutex is held (either as spin or mutex) | |
171 | * the function will fail, it will acquire the mutex otherwise. | |
172 | */ | |
173 | __attribute__((noinline)) | |
174 | boolean_t | |
175 | lck_mtx_try_lock( | |
176 | lck_mtx_t *lock) | |
177 | { | |
178 | uint32_t prev, state; | |
179 | ||
180 | state = ordered_load_mtx_state(lock); | |
181 | ||
182 | /* | |
183 | * Fast path only if the mutex is not held | |
184 | * interlock is not contended and there are no waiters. | |
185 | * Indirect mutexes will fall through the slow path as | |
186 | * well as destroyed mutexes. | |
187 | */ | |
188 | ||
189 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK | LCK_MTX_WAITERS_MSK); | |
190 | state = prev | LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK; | |
191 | ||
192 | disable_preemption(); | |
193 | if (!os_atomic_cmpxchg(&lock->lck_mtx_state, prev, state, acquire)) { | |
194 | enable_preemption(); | |
195 | return lck_mtx_try_lock_slow(lock); | |
196 | } | |
197 | ||
198 | /* mutex acquired, interlock acquired and preemption disabled */ | |
199 | ||
200 | thread_t thread = current_thread(); | |
201 | /* record owner of mutex */ | |
202 | ordered_store_mtx_owner(lock, (uintptr_t)thread); | |
203 | ||
204 | #if MACH_LDEBUG | |
205 | if (thread) { | |
206 | thread->mutex_count++; /* lock statistic */ | |
207 | } | |
208 | #endif | |
209 | ||
210 | /* release interlock and re-enable preemption */ | |
211 | lck_mtx_try_lock_finish_inline(lock, state); | |
212 | ||
213 | return TRUE; | |
214 | } | |
215 | ||
216 | /* | |
217 | * Routine: lck_mtx_lock_spin_always | |
218 | * | |
219 | * Try to lock a mutex as spin lock for current thread. | |
220 | * It tries the fast path first and | |
221 | * falls through the slow path in case | |
222 | * of contention. | |
223 | * | |
224 | * Interlock or mutex cannot be already held by current thread. | |
225 | * | |
226 | * In case the mutex is held as mutex by another thread | |
227 | * this function will switch behavior and try to acquire the lock as mutex. | |
228 | * | |
229 | * In case the mutex is held as spinlock it will spin contending | |
230 | * for it. | |
231 | * | |
232 | * In case of contention it might sleep. | |
233 | */ | |
234 | __attribute__((noinline)) | |
235 | void | |
236 | lck_mtx_lock_spin_always( | |
237 | lck_mtx_t *lock) | |
238 | { | |
239 | uint32_t prev, state; | |
240 | ||
241 | state = ordered_load_mtx_state(lock); | |
242 | ||
243 | /* | |
244 | * Fast path only if the mutex is not held | |
245 | * neither as mutex nor as spin and | |
246 | * interlock is not contended. | |
247 | * Indirect mutexes will fall through the slow path as | |
248 | * well as destroyed mutexes. | |
249 | */ | |
250 | ||
251 | if (state & (LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK)) { | |
252 | return lck_mtx_lock_spin_slow(lock); | |
253 | } | |
254 | ||
255 | /* Note LCK_MTX_SPIN_MSK is set only if LCK_MTX_ILOCKED_MSK is set */ | |
256 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK); | |
257 | state = prev | LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK; | |
258 | ||
259 | disable_preemption(); | |
260 | if (!os_atomic_cmpxchg(&lock->lck_mtx_state, prev, state, acquire)) { | |
261 | enable_preemption(); | |
262 | return lck_mtx_lock_spin_slow(lock); | |
263 | } | |
264 | ||
265 | /* mutex acquired as spinlock, interlock acquired and preemption disabled */ | |
266 | ||
267 | thread_t thread = current_thread(); | |
268 | /* record owner of mutex */ | |
269 | ordered_store_mtx_owner(lock, (uintptr_t)thread); | |
270 | ||
271 | #if MACH_LDEBUG | |
272 | if (thread) { | |
273 | thread->mutex_count++; /* lock statistic */ | |
274 | } | |
275 | #endif | |
276 | ||
277 | #if CONFIG_DTRACE | |
278 | LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, lock, 0); | |
279 | #endif | |
280 | /* return with the interlock held and preemption disabled */ | |
281 | return; | |
282 | } | |
283 | ||
284 | /* | |
285 | * Routine: lck_mtx_lock_spin | |
286 | * | |
287 | * Try to lock a mutex as spin lock for current thread. | |
288 | * It tries the fast path first and | |
289 | * falls through the slow path in case | |
290 | * of contention. | |
291 | * | |
292 | * Interlock or mutex cannot be already held by current thread. | |
293 | * | |
294 | * In case the mutex is held as mutex by another thread | |
295 | * this function will switch behavior and try to acquire the lock as mutex. | |
296 | * | |
297 | * In case the mutex is held as spinlock it will spin contending | |
298 | * for it. | |
299 | * | |
300 | * In case of contention it might sleep. | |
301 | */ | |
302 | void | |
303 | lck_mtx_lock_spin( | |
304 | lck_mtx_t *lock) | |
305 | { | |
306 | lck_mtx_check_preemption(); | |
307 | lck_mtx_lock_spin_always(lock); | |
308 | } | |
309 | ||
310 | /* | |
311 | * Routine: lck_mtx_try_lock_spin_always | |
312 | * | |
313 | * Try to lock a mutex as spin lock for current thread. | |
314 | * It tries the fast path first and | |
315 | * falls through the slow path in case | |
316 | * of contention. | |
317 | * | |
318 | * Interlock or mutex cannot be already held by current thread. | |
319 | * | |
320 | * In case the mutex is held (either as spin or mutex) | |
321 | * the function will fail, it will acquire the mutex as spin lock | |
322 | * otherwise. | |
323 | * | |
324 | */ | |
325 | __attribute__((noinline)) | |
326 | boolean_t | |
327 | lck_mtx_try_lock_spin_always( | |
328 | lck_mtx_t *lock) | |
329 | { | |
330 | uint32_t prev, state; | |
331 | ||
332 | state = ordered_load_mtx_state(lock); | |
333 | ||
334 | /* | |
335 | * Fast path only if the mutex is not held | |
336 | * neither as mutex nor as spin and | |
337 | * interlock is not contended. | |
338 | * Indirect mutexes will fall through the slow path as | |
339 | * well as destroyed mutexes. | |
340 | */ | |
341 | ||
342 | /* Note LCK_MTX_SPIN_MSK is set only if LCK_MTX_ILOCKED_MSK is set */ | |
343 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK); | |
344 | state = prev | LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK; | |
345 | ||
346 | disable_preemption(); | |
347 | if (!os_atomic_cmpxchg(&lock->lck_mtx_state, prev, state, acquire)) { | |
348 | enable_preemption(); | |
349 | return lck_mtx_try_lock_spin_slow(lock); | |
350 | } | |
351 | ||
352 | /* mutex acquired as spinlock, interlock acquired and preemption disabled */ | |
353 | ||
354 | thread_t thread = current_thread(); | |
355 | /* record owner of mutex */ | |
356 | ordered_store_mtx_owner(lock, (uintptr_t)thread); | |
357 | ||
358 | #if MACH_LDEBUG | |
359 | if (thread) { | |
360 | thread->mutex_count++; /* lock statistic */ | |
361 | } | |
362 | #endif | |
363 | ||
364 | #if CONFIG_DTRACE | |
365 | LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, lock, 0); | |
366 | #endif | |
367 | ||
368 | /* return with the interlock held and preemption disabled */ | |
369 | return TRUE; | |
370 | } | |
371 | ||
372 | /* | |
373 | * Routine: lck_mtx_try_lock_spin | |
374 | * | |
375 | * Try to lock a mutex as spin lock for current thread. | |
376 | * It tries the fast path first and | |
377 | * falls through the slow path in case | |
378 | * of contention. | |
379 | * | |
380 | * Interlock or mutex cannot be already held by current thread. | |
381 | * | |
382 | * In case the mutex is held (either as spin or mutex) | |
383 | * the function will fail, it will acquire the mutex as spin lock | |
384 | * otherwise. | |
385 | * | |
386 | */ | |
387 | boolean_t | |
388 | lck_mtx_try_lock_spin( | |
389 | lck_mtx_t *lock) | |
390 | { | |
391 | return lck_mtx_try_lock_spin_always(lock); | |
392 | } | |
393 | ||
394 | /* | |
395 | * Routine: lck_mtx_unlock | |
396 | * | |
397 | * Unlocks a mutex held by current thread. | |
398 | * It tries the fast path first, and falls | |
399 | * through the slow path in case waiters need to | |
400 | * be woken up. | |
401 | * | |
402 | * Interlock can be held, and the slow path will | |
403 | * unlock the mutex for this case. | |
404 | */ | |
405 | __attribute__((noinline)) | |
406 | void | |
407 | lck_mtx_unlock( | |
408 | lck_mtx_t *lock) | |
409 | { | |
410 | uint32_t prev, state; | |
411 | ||
412 | state = ordered_load_mtx_state(lock); | |
413 | ||
414 | if (state & LCK_MTX_SPIN_MSK) { | |
415 | return lck_mtx_unlock_slow(lock); | |
416 | } | |
417 | ||
418 | /* | |
419 | * Only full mutex will go through the fast path | |
420 | * (if the lock was acquired as a spinlock it will | |
421 | * fall through the slow path). | |
422 | * If there are waiters it will fall | |
423 | * through the slow path. | |
424 | * If it is indirect it will fall through the slow path. | |
425 | */ | |
426 | ||
427 | /* | |
428 | * Fast path state: | |
429 | * interlock not held, no waiters, no promotion and mutex held. | |
430 | */ | |
431 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_WAITERS_MSK); | |
432 | prev |= LCK_MTX_MLOCKED_MSK; | |
433 | ||
434 | state = prev | LCK_MTX_ILOCKED_MSK; | |
435 | state &= ~LCK_MTX_MLOCKED_MSK; | |
436 | ||
437 | disable_preemption(); | |
438 | ||
439 | /* the memory order needs to be acquire because it is acquiring the interlock */ | |
440 | if (!os_atomic_cmpxchg(&lock->lck_mtx_state, prev, state, acquire)) { | |
441 | enable_preemption(); | |
442 | return lck_mtx_unlock_slow(lock); | |
443 | } | |
444 | ||
445 | /* mutex released, interlock acquired and preemption disabled */ | |
446 | ||
447 | #if DEVELOPMENT | DEBUG | |
448 | thread_t owner = (thread_t)lock->lck_mtx_owner; | |
449 | if (__improbable(owner != current_thread())) { | |
450 | lck_mtx_owner_check_panic(lock); | |
451 | } | |
452 | #endif | |
453 | ||
454 | /* clear owner */ | |
455 | ordered_store_mtx_owner(lock, 0); | |
456 | /* release interlock */ | |
457 | state &= ~LCK_MTX_ILOCKED_MSK; | |
458 | ordered_store_mtx_state_release(lock, state); | |
459 | ||
460 | #if MACH_LDEBUG | |
461 | thread_t thread = current_thread(); | |
462 | if (thread) { | |
463 | thread->mutex_count--; | |
464 | } | |
465 | #endif /* MACH_LDEBUG */ | |
466 | ||
467 | /* re-enable preemption */ | |
468 | lck_mtx_unlock_finish_inline(lock, FALSE); | |
469 | } |