2 * Copyright (c) 2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 #include "lock_internal.h"
22 #include "libkern/OSAtomic.h"
24 #include "os/lock_private.h"
25 #include "os/once_private.h"
28 #include <mach/mach_init.h>
29 #include <mach/mach_traps.h>
30 #include <mach/thread_switch.h>
31 #include <mach/mach_time.h>
35 #pragma mark _os_lock_base_t
39 OS_LOCK_STRUCT_DECL_INTERNAL(base
);
40 OS_USED
static OS_LOCK_TYPE_STRUCT_DECL(base
);
43 os_lock_lock(os_lock_t l
)
45 return l
._osl_base
->osl_type
->osl_lock(l
);
49 os_lock_trylock(os_lock_t l
)
51 return l
._osl_base
->osl_type
->osl_trylock(l
);
55 os_lock_unlock(os_lock_t l
)
57 return l
._osl_base
->osl_type
->osl_unlock(l
);
60 #endif //!OS_VARIANT_ONLY
62 OS_NOINLINE OS_NORETURN OS_COLD
64 _os_lock_corruption_abort(void *lock_ptr OS_UNUSED
, uintptr_t lock_value
)
66 __LIBPLATFORM_CLIENT_CRASH__(lock_value
, "os_lock is corrupt");
70 #pragma mark OSSpinLock
72 #ifdef OS_LOCK_VARIANT_SELECTOR
73 void _OSSpinLockLockSlow(volatile OSSpinLock
*l
);
75 OS_NOINLINE OS_USED
static void _OSSpinLockLockSlow(volatile OSSpinLock
*l
);
76 #endif // OS_LOCK_VARIANT_SELECTOR
78 OS_ATOMIC_EXPORT
void OSSpinLockLock(volatile OSSpinLock
*l
);
79 OS_ATOMIC_EXPORT
bool OSSpinLockTry(volatile OSSpinLock
*l
);
80 OS_ATOMIC_EXPORT
int spin_lock_try(volatile OSSpinLock
*l
);
81 OS_ATOMIC_EXPORT
void OSSpinLockUnlock(volatile OSSpinLock
*l
);
86 #define OS_LOCK_SPIN_SPIN_TRIES 100
87 #define OS_LOCK_SPIN_PAUSE() os_hardware_wfe()
89 #define OS_LOCK_SPIN_SPIN_TRIES 1000
90 #define OS_LOCK_SPIN_PAUSE() os_hardware_pause()
93 static const OSSpinLock _OSSpinLockLocked
= TARGET_OS_EMBEDDED
? 1 : -1;
97 _os_lock_yield_deadline(mach_msg_timeout_t timeout
)
99 uint64_t abstime
= timeout
* NSEC_PER_MSEC
;
100 #if !(defined(__i386__) || defined(__x86_64__))
101 mach_timebase_info_data_t tbi
;
102 kern_return_t kr
= mach_timebase_info(&tbi
);
103 if (kr
) return UINT64_MAX
;
104 abstime
*= tbi
.denom
;
105 abstime
/= tbi
.numer
;
107 return mach_absolute_time() + abstime
;
112 _os_lock_yield_until(uint64_t deadline
)
114 return mach_absolute_time() < deadline
;
119 _OSSpinLockLockYield(volatile OSSpinLock
*l
)
121 int option
= SWITCH_OPTION_DEPRESS
;
122 mach_msg_timeout_t timeout
= 1;
123 uint64_t deadline
= _os_lock_yield_deadline(timeout
);
125 while (unlikely(lock
= *l
)) {
127 if (unlikely(lock
!= _OSSpinLockLocked
)) {
128 _os_lock_corruption_abort((void *)l
, (uintptr_t)lock
);
130 thread_switch(MACH_PORT_NULL
, option
, timeout
);
131 if (option
== SWITCH_OPTION_WAIT
) {
133 } else if (!_os_lock_yield_until(deadline
)) {
134 option
= SWITCH_OPTION_WAIT
;
137 bool r
= os_atomic_cmpxchgv(l
, 0, _OSSpinLockLocked
, &lock
, acquire
);
138 if (likely(r
)) return;
144 _OSSpinLockLockSlow(volatile OSSpinLock
*l
)
146 return _OSSpinLockLockYield(l
); // Don't spin on UP
150 _OSSpinLockLockSlow(volatile OSSpinLock
*l
)
152 uint32_t tries
= OS_LOCK_SPIN_SPIN_TRIES
;
154 while (unlikely(lock
= *l
)) {
156 if (unlikely(lock
!= _OSSpinLockLocked
)) {
157 return _os_lock_corruption_abort((void *)l
, (uintptr_t)lock
);
159 if (unlikely(!tries
--)) return _OSSpinLockLockYield(l
);
160 OS_LOCK_SPIN_PAUSE();
162 bool r
= os_atomic_cmpxchgv(l
, 0, _OSSpinLockLocked
, &lock
, acquire
);
163 if (likely(r
)) return;
168 #ifdef OS_LOCK_VARIANT_SELECTOR
169 #undef _OSSpinLockLockSlow
170 extern void _OSSpinLockLockSlow(volatile OSSpinLock
*l
);
173 #if !OS_LOCK_VARIANT_ONLY
175 #if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR
177 typedef struct _os_nospin_lock_s
*_os_nospin_lock_t
;
178 void _os_nospin_lock_lock(_os_nospin_lock_t lock
);
179 bool _os_nospin_lock_trylock(_os_nospin_lock_t lock
);
180 void _os_nospin_lock_unlock(_os_nospin_lock_t lock
);
183 OSSpinLockLock(volatile OSSpinLock
*l
)
185 OS_ATOMIC_ALIAS(spin_lock
, OSSpinLockLock
);
186 OS_ATOMIC_ALIAS(_spin_lock
, OSSpinLockLock
);
187 return _os_nospin_lock_lock((_os_nospin_lock_t
)l
);
191 OSSpinLockTry(volatile OSSpinLock
*l
)
193 return _os_nospin_lock_trylock((_os_nospin_lock_t
)l
);
197 spin_lock_try(volatile OSSpinLock
*l
)
199 OS_ATOMIC_ALIAS(_spin_lock_try
, spin_lock_try
);
200 return _os_nospin_lock_trylock((_os_nospin_lock_t
)l
);
204 OSSpinLockUnlock(volatile OSSpinLock
*l
)
206 OS_ATOMIC_ALIAS(spin_unlock
, OSSpinLockUnlock
);
207 OS_ATOMIC_ALIAS(_spin_unlock
, OSSpinLockUnlock
);
208 return _os_nospin_lock_unlock((_os_nospin_lock_t
)l
);
211 #undef OS_ATOMIC_ALIAS
212 #define OS_ATOMIC_ALIAS(n, o)
213 static void _OSSpinLockLock(volatile OSSpinLock
*l
);
214 #undef OSSpinLockLock
215 #define OSSpinLockLock _OSSpinLockLock
216 static bool _OSSpinLockTry(volatile OSSpinLock
*l
);
218 #define OSSpinLockTry _OSSpinLockTry
219 static __unused
int __spin_lock_try(volatile OSSpinLock
*l
);
221 #define spin_lock_try __spin_lock_try
222 static void _OSSpinLockUnlock(volatile OSSpinLock
*l
);
223 #undef OSSpinLockUnlock
224 #define OSSpinLockUnlock _OSSpinLockUnlock
226 #endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK
229 OSSpinLockLock(volatile OSSpinLock
*l
)
231 OS_ATOMIC_ALIAS(spin_lock
, OSSpinLockLock
);
232 OS_ATOMIC_ALIAS(_spin_lock
, OSSpinLockLock
);
233 bool r
= os_atomic_cmpxchg(l
, 0, _OSSpinLockLocked
, acquire
);
234 if (likely(r
)) return;
235 return _OSSpinLockLockSlow(l
);
239 OSSpinLockTry(volatile OSSpinLock
*l
)
241 bool r
= os_atomic_cmpxchg(l
, 0, _OSSpinLockLocked
, acquire
);
246 spin_lock_try(volatile OSSpinLock
*l
) // <rdar://problem/13316060>
248 OS_ATOMIC_ALIAS(_spin_lock_try
, spin_lock_try
);
249 return OSSpinLockTry(l
);
253 OSSpinLockUnlock(volatile OSSpinLock
*l
)
255 OS_ATOMIC_ALIAS(spin_unlock
, OSSpinLockUnlock
);
256 OS_ATOMIC_ALIAS(_spin_unlock
, OSSpinLockUnlock
);
257 os_atomic_store(l
, 0, release
);
261 #pragma mark os_lock_spin_t
263 OS_LOCK_STRUCT_DECL_INTERNAL(spin
,
264 OSSpinLock
volatile osl_spinlock
;
267 OS_LOCK_METHODS_DECL(spin
);
268 OS_LOCK_TYPE_INSTANCE(spin
);
269 #endif // !OS_VARIANT_ONLY
271 #ifdef OS_VARIANT_SELECTOR
272 #define _os_lock_spin_lock \
273 OS_VARIANT(_os_lock_spin_lock, OS_VARIANT_SELECTOR)
274 #define _os_lock_spin_trylock \
275 OS_VARIANT(_os_lock_spin_trylock, OS_VARIANT_SELECTOR)
276 #define _os_lock_spin_unlock \
277 OS_VARIANT(_os_lock_spin_unlock, OS_VARIANT_SELECTOR)
278 OS_LOCK_METHODS_DECL(spin
);
279 #endif // OS_VARIANT_SELECTOR
282 _os_lock_spin_lock(_os_lock_spin_t l
)
284 return OSSpinLockLock(&l
->osl_spinlock
);
288 _os_lock_spin_trylock(_os_lock_spin_t l
)
290 return OSSpinLockTry(&l
->osl_spinlock
);
294 _os_lock_spin_unlock(_os_lock_spin_t l
)
296 return OSSpinLockUnlock(&l
->osl_spinlock
);
300 #pragma mark os_lock_owner_t
302 #ifndef __TSD_MACH_THREAD_SELF
303 #define __TSD_MACH_THREAD_SELF 3
306 typedef mach_port_name_t os_lock_owner_t
;
309 static inline os_lock_owner_t
310 _os_lock_owner_get_self(void)
312 os_lock_owner_t self
;
313 self
= (os_lock_owner_t
)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF
);
317 #define OS_LOCK_NO_OWNER MACH_PORT_NULL
319 #if !OS_LOCK_VARIANT_ONLY
321 OS_NOINLINE OS_NORETURN OS_COLD
323 _os_lock_recursive_abort(os_lock_owner_t owner
)
325 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an "
329 #endif //!OS_LOCK_VARIANT_ONLY
332 #pragma mark os_lock_handoff_t
334 OS_LOCK_STRUCT_DECL_INTERNAL(handoff
,
335 os_lock_owner_t
volatile osl_owner
;
338 OS_LOCK_METHODS_DECL(handoff
);
339 OS_LOCK_TYPE_INSTANCE(handoff
);
340 #endif // !OS_VARIANT_ONLY
342 #ifdef OS_VARIANT_SELECTOR
343 #define _os_lock_handoff_lock \
344 OS_VARIANT(_os_lock_handoff_lock, OS_VARIANT_SELECTOR)
345 #define _os_lock_handoff_trylock \
346 OS_VARIANT(_os_lock_handoff_trylock, OS_VARIANT_SELECTOR)
347 #define _os_lock_handoff_unlock \
348 OS_VARIANT(_os_lock_handoff_unlock, OS_VARIANT_SELECTOR)
349 OS_LOCK_METHODS_DECL(handoff
);
350 #endif // OS_VARIANT_SELECTOR
352 #define OS_LOCK_HANDOFF_YIELD_TRIES 100
356 _os_lock_handoff_lock_slow(_os_lock_handoff_t l
)
358 int option
= SWITCH_OPTION_OSLOCK_DEPRESS
;
359 mach_msg_timeout_t timeout
= 1;
360 uint32_t tries
= OS_LOCK_HANDOFF_YIELD_TRIES
;
361 os_lock_owner_t self
= _os_lock_owner_get_self(), owner
;
362 while (unlikely(owner
= l
->osl_owner
)) {
364 if (unlikely(owner
== self
)) return _os_lock_recursive_abort(self
);
365 // Yield until tries first hits zero, then permanently switch to wait
366 if (unlikely(!tries
--)) option
= SWITCH_OPTION_OSLOCK_WAIT
;
367 thread_switch(owner
, option
, timeout
);
368 // Redrive the handoff every 1ms until switching to wait
369 if (option
== SWITCH_OPTION_OSLOCK_WAIT
) timeout
++;
371 bool r
= os_atomic_cmpxchgv2o(l
, osl_owner
, MACH_PORT_NULL
, self
, &owner
,
373 if (likely(r
)) return;
378 _os_lock_handoff_lock(_os_lock_handoff_t l
)
380 os_lock_owner_t self
= _os_lock_owner_get_self();
381 bool r
= os_atomic_cmpxchg2o(l
, osl_owner
, MACH_PORT_NULL
, self
, acquire
);
382 if (likely(r
)) return;
383 return _os_lock_handoff_lock_slow(l
);
387 _os_lock_handoff_trylock(_os_lock_handoff_t l
)
389 os_lock_owner_t self
= _os_lock_owner_get_self();
390 bool r
= os_atomic_cmpxchg2o(l
, osl_owner
, MACH_PORT_NULL
, self
, acquire
);
395 _os_lock_handoff_unlock(_os_lock_handoff_t l
)
397 os_atomic_store2o(l
, osl_owner
, MACH_PORT_NULL
, release
);
401 #pragma mark os_ulock_value_t
403 #include <sys/errno.h>
404 #include <sys/ulock.h>
406 typedef os_lock_owner_t os_ulock_value_t
;
408 // This assumes that all thread mach port values always have the low bit set!
409 // Clearing this bit is used to communicate the existence of waiters to unlock.
410 #define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u)
411 #define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT)
413 #define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD
414 #define OS_ULOCK_IS_OWNER(value, self) ({ \
415 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); \
416 (_owner == (self) && _owner != OS_ULOCK_ANONYMOUS_OWNER); })
417 #define OS_ULOCK_IS_NOT_OWNER(value, self) ({ \
418 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); \
419 (_owner != (self) && _owner != OS_ULOCK_ANONYMOUS_OWNER); })
423 #pragma mark os_unfair_lock
425 typedef struct _os_unfair_lock_s
{
426 os_ulock_value_t oul_value
;
427 } *_os_unfair_lock_t
;
429 _Static_assert(sizeof(struct os_unfair_lock_s
) ==
430 sizeof(struct _os_unfair_lock_s
), "os_unfair_lock size mismatch");
432 OS_ATOMIC_EXPORT
void os_unfair_lock_lock(os_unfair_lock_t lock
);
433 OS_ATOMIC_EXPORT
void os_unfair_lock_lock_with_options(os_unfair_lock_t lock
,
434 os_unfair_lock_options_t options
);
435 OS_ATOMIC_EXPORT
bool os_unfair_lock_trylock(os_unfair_lock_t lock
);
436 OS_ATOMIC_EXPORT
void os_unfair_lock_unlock(os_unfair_lock_t lock
);
438 OS_ATOMIC_EXPORT
void os_unfair_lock_lock_no_tsd_4libpthread(
439 os_unfair_lock_t lock
);
440 OS_ATOMIC_EXPORT
void os_unfair_lock_unlock_no_tsd_4libpthread(
441 os_unfair_lock_t lock
);
443 _Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
==
444 ULF_WAIT_WORKQ_DATA_CONTENTION
,
445 "check value for OS_UNFAIR_LOCK_OPTIONS_MASK");
446 #define OS_UNFAIR_LOCK_OPTIONS_MASK \
447 (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION)
449 OS_NOINLINE OS_NORETURN OS_COLD
451 _os_unfair_lock_recursive_abort(os_lock_owner_t owner
)
453 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an "
457 OS_NOINLINE OS_NORETURN OS_COLD
459 _os_unfair_lock_unowned_abort(os_lock_owner_t owner
)
461 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Unlock of an os_unfair_lock not "
462 "owned by current thread");
465 OS_NOINLINE OS_NORETURN OS_COLD
467 _os_unfair_lock_corruption_abort(os_ulock_value_t current
)
469 __LIBPLATFORM_CLIENT_CRASH__(current
, "os_unfair_lock is corrupt");
474 _os_unfair_lock_lock_slow(_os_unfair_lock_t l
, os_lock_owner_t self
,
475 os_unfair_lock_options_t options
)
477 os_ulock_value_t current
, new, waiters_mask
= 0;
478 if (unlikely(options
& ~OS_UNFAIR_LOCK_OPTIONS_MASK
)) {
479 __LIBPLATFORM_CLIENT_CRASH__(options
, "Invalid options");
481 while (unlikely((current
= os_atomic_load2o(l
, oul_value
, relaxed
)) !=
484 if (unlikely(OS_ULOCK_IS_OWNER(current
, self
))) {
485 return _os_unfair_lock_recursive_abort(self
);
487 new = current
& ~OS_ULOCK_NOWAITERS_BIT
;
488 if (current
!= new) {
489 // Clear nowaiters bit in lock value before waiting
490 if (!os_atomic_cmpxchgv2o(l
, oul_value
, current
, new, ¤t
,
496 int ret
= __ulock_wait(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
| options
,
498 if (unlikely(ret
< 0)) {
504 _os_unfair_lock_corruption_abort(current
);
507 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
510 // If there are more waiters, unset nowaiters bit when acquiring lock
511 waiters_mask
= (ret
> 0) ? OS_ULOCK_NOWAITERS_BIT
: 0;
513 new = self
& ~waiters_mask
;
514 bool r
= os_atomic_cmpxchgv2o(l
, oul_value
, OS_LOCK_NO_OWNER
, new,
516 if (unlikely(!r
)) goto _retry
;
521 _os_unfair_lock_unlock_slow(_os_unfair_lock_t l
, os_ulock_value_t current
,
522 os_lock_owner_t self
)
524 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
))) {
525 return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current
));
527 if (current
& OS_ULOCK_NOWAITERS_BIT
) {
528 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters");
531 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
, l
, 0);
532 if (unlikely(ret
< 0)) {
539 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
547 os_unfair_lock_lock(os_unfair_lock_t lock
)
549 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
550 os_lock_owner_t self
= _os_lock_owner_get_self();
551 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
552 if (likely(r
)) return;
553 return _os_unfair_lock_lock_slow(l
, self
, OS_UNFAIR_LOCK_NONE
);
557 os_unfair_lock_lock_with_options(os_unfair_lock_t lock
,
558 os_unfair_lock_options_t options
)
560 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
561 os_lock_owner_t self
= _os_lock_owner_get_self();
562 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
563 if (likely(r
)) return;
564 return _os_unfair_lock_lock_slow(l
, self
, options
);
568 os_unfair_lock_trylock(os_unfair_lock_t lock
)
570 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
571 os_lock_owner_t self
= _os_lock_owner_get_self();
572 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
577 os_unfair_lock_unlock(os_unfair_lock_t lock
)
579 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
580 os_lock_owner_t self
= _os_lock_owner_get_self();
581 os_ulock_value_t current
;
582 current
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
);
583 if (likely(current
== self
)) return;
584 return _os_unfair_lock_unlock_slow(l
, current
, self
);
588 os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock
)
590 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
591 os_lock_owner_t self
= OS_ULOCK_ANONYMOUS_OWNER
;
592 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
593 if (likely(r
)) return;
594 return _os_unfair_lock_lock_slow(l
, self
,
595 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
);
599 os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock
)
601 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
602 os_lock_owner_t self
= OS_ULOCK_ANONYMOUS_OWNER
;
603 os_ulock_value_t current
;
604 current
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
);
605 if (likely(current
== self
)) return;
606 return _os_unfair_lock_unlock_slow(l
, current
, self
);
610 #pragma mark _os_lock_unfair_t 4Libc // <rdar://problem/27138264>
612 OS_ATOMIC_EXPORT
void os_unfair_lock_lock_with_options_4Libc(
613 os_unfair_lock_t lock
, os_unfair_lock_options_t options
);
614 OS_ATOMIC_EXPORT
void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock
);
618 _os_unfair_lock_lock_slow_4Libc(_os_unfair_lock_t l
, os_lock_owner_t self
,
619 os_unfair_lock_options_t options
)
621 os_ulock_value_t current
, new, waiters_mask
= 0;
622 if (unlikely(options
& ~OS_UNFAIR_LOCK_OPTIONS_MASK
)) {
623 __LIBPLATFORM_CLIENT_CRASH__(options
, "Invalid options");
625 while (unlikely((current
= os_atomic_load2o(l
, oul_value
, relaxed
)) !=
628 if (unlikely(OS_ULOCK_IS_OWNER(current
, self
))) {
629 return _os_unfair_lock_recursive_abort(self
);
631 new = current
& ~OS_ULOCK_NOWAITERS_BIT
;
632 if (current
!= new) {
633 // Clear nowaiters bit in lock value before waiting
634 if (!os_atomic_cmpxchgv2o(l
, oul_value
, current
, new, ¤t
,
640 int ret
= __ulock_wait(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
| options
,
642 if (unlikely(ret
< 0)) {
648 // if we get an `EOWNERDEAD` it could be corruption of the lock
649 // so for the Libc locks, if we can steal the lock, assume
650 // it is corruption and pretend we got the lock with contention
651 new = self
& ~OS_ULOCK_NOWAITERS_BIT
;
652 if (os_atomic_cmpxchgv2o(l
, oul_value
, current
, new, ¤t
,
658 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
661 // If there are more waiters, unset nowaiters bit when acquiring lock
662 waiters_mask
= (ret
> 0) ? OS_ULOCK_NOWAITERS_BIT
: 0;
664 new = self
& ~waiters_mask
;
665 bool r
= os_atomic_cmpxchgv2o(l
, oul_value
, OS_LOCK_NO_OWNER
, new,
667 if (unlikely(!r
)) goto _retry
;
672 _os_unfair_lock_unlock_slow_4Libc(_os_unfair_lock_t l
)
675 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
, l
, 0);
676 if (unlikely(ret
< 0)) {
683 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
691 os_unfair_lock_lock_with_options_4Libc(os_unfair_lock_t lock
,
692 os_unfair_lock_options_t options
)
694 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
695 os_lock_owner_t self
= _os_lock_owner_get_self();
696 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
697 if (likely(r
)) return;
698 return _os_unfair_lock_lock_slow_4Libc(l
, self
, options
);
702 os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock
)
704 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
705 os_lock_owner_t self
= _os_lock_owner_get_self();
706 os_ulock_value_t current
;
707 current
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
);
708 if (likely(current
== self
)) return;
709 return _os_unfair_lock_unlock_slow_4Libc(l
);
714 os_unfair_lock_assert_owner(os_unfair_lock_t lock
)
716 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
717 os_lock_owner_t self
= _os_lock_owner_get_self();
718 os_ulock_value_t current
= os_atomic_load2o(l
, oul_value
, relaxed
);
719 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
))) {
720 __LIBPLATFORM_CLIENT_CRASH__(current
, "Assertion failed: "
721 "Lock unexpectedly not owned by current thread");
726 os_unfair_lock_assert_not_owner(os_unfair_lock_t lock
)
728 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
729 os_lock_owner_t self
= _os_lock_owner_get_self();
730 os_ulock_value_t current
= os_atomic_load2o(l
, oul_value
, relaxed
);
731 if (unlikely(OS_ULOCK_IS_OWNER(current
, self
))) {
732 __LIBPLATFORM_CLIENT_CRASH__(current
, "Assertion failed: "
733 "Lock unexpectedly owned by current thread");
739 #pragma mark _os_lock_unfair_t
741 OS_LOCK_STRUCT_DECL_INTERNAL(unfair
,
742 os_unfair_lock osl_unfair_lock
;
745 OS_LOCK_METHODS_DECL(unfair
);
746 OS_LOCK_TYPE_INSTANCE(unfair
);
747 #endif // !OS_VARIANT_ONLY
749 #ifdef OS_VARIANT_SELECTOR
750 #define _os_lock_unfair_lock \
751 OS_VARIANT(_os_lock_unfair_lock, OS_VARIANT_SELECTOR)
752 #define _os_lock_unfair_trylock \
753 OS_VARIANT(_os_lock_unfair_trylock, OS_VARIANT_SELECTOR)
754 #define _os_lock_unfair_unlock \
755 OS_VARIANT(_os_lock_unfair_unlock, OS_VARIANT_SELECTOR)
756 OS_LOCK_METHODS_DECL(unfair
);
757 #endif // OS_VARIANT_SELECTOR
760 _os_lock_unfair_lock(_os_lock_unfair_t l
)
762 return os_unfair_lock_lock(&l
->osl_unfair_lock
);
766 _os_lock_unfair_trylock(_os_lock_unfair_t l
)
768 return os_unfair_lock_trylock(&l
->osl_unfair_lock
);
772 _os_lock_unfair_unlock(_os_lock_unfair_t l
)
774 return os_unfair_lock_unlock(&l
->osl_unfair_lock
);
778 #pragma mark _os_nospin_lock
780 typedef struct _os_nospin_lock_s
{
781 os_ulock_value_t oul_value
;
782 } _os_nospin_lock
, *_os_nospin_lock_t
;
784 _Static_assert(sizeof(OSSpinLock
) ==
785 sizeof(struct _os_nospin_lock_s
), "os_nospin_lock size mismatch");
787 OS_ATOMIC_EXPORT
void _os_nospin_lock_lock(_os_nospin_lock_t lock
);
788 OS_ATOMIC_EXPORT
bool _os_nospin_lock_trylock(_os_nospin_lock_t lock
);
789 OS_ATOMIC_EXPORT
void _os_nospin_lock_unlock(_os_nospin_lock_t lock
);
793 _os_nospin_lock_lock_slow(_os_nospin_lock_t l
)
795 os_lock_owner_t self
= _os_lock_owner_get_self();
796 os_ulock_value_t current
, new, waiters_mask
= 0;
797 uint32_t timeout
= 1;
798 while (unlikely((current
= os_atomic_load2o(l
, oul_value
, relaxed
)) !=
801 new = current
& ~OS_ULOCK_NOWAITERS_BIT
;
802 // For safer compatibility with OSSpinLock where _OSSpinLockLocked may
803 // be 1, check that new didn't become 0 (unlocked) by clearing this bit
804 if (current
!= new && new) {
805 // Clear nowaiters bit in lock value before waiting
806 if (!os_atomic_cmpxchgv2o(l
, oul_value
, current
, new, ¤t
,
812 int ret
= __ulock_wait(UL_COMPARE_AND_WAIT
| ULF_NO_ERRNO
, l
, current
,
814 if (unlikely(ret
< 0)) {
823 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
826 // If there are more waiters, unset nowaiters bit when acquiring lock
827 waiters_mask
= (ret
> 0) ? OS_ULOCK_NOWAITERS_BIT
: 0;
829 new = self
& ~waiters_mask
;
830 bool r
= os_atomic_cmpxchgv2o(l
, oul_value
, OS_LOCK_NO_OWNER
, new,
832 if (unlikely(!r
)) goto _retry
;
837 _os_nospin_lock_unlock_slow(_os_nospin_lock_t l
, os_ulock_value_t current
)
839 os_lock_owner_t self
= _os_lock_owner_get_self();
840 if (unlikely(OS_ULOCK_OWNER(current
) != self
)) {
841 return; // no unowned_abort for drop-in compatibility with OSSpinLock
843 if (current
& OS_ULOCK_NOWAITERS_BIT
) {
844 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters");
847 int ret
= __ulock_wake(UL_COMPARE_AND_WAIT
| ULF_NO_ERRNO
, l
, 0);
848 if (unlikely(ret
< 0)) {
855 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
863 _os_nospin_lock_lock(_os_nospin_lock_t l
)
865 os_lock_owner_t self
= _os_lock_owner_get_self();
866 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
867 if (likely(r
)) return;
868 return _os_nospin_lock_lock_slow(l
);
872 _os_nospin_lock_trylock(_os_nospin_lock_t l
)
874 os_lock_owner_t self
= _os_lock_owner_get_self();
875 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
880 _os_nospin_lock_unlock(_os_nospin_lock_t l
)
882 os_lock_owner_t self
= _os_lock_owner_get_self();
883 os_ulock_value_t current
;
884 current
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
);
885 if (likely(current
== self
)) return;
886 return _os_nospin_lock_unlock_slow(l
, current
);
890 #pragma mark _os_lock_nospin_t
892 OS_LOCK_STRUCT_DECL_INTERNAL(nospin
,
893 _os_nospin_lock osl_nospin_lock
;
896 OS_LOCK_METHODS_DECL(nospin
);
897 OS_LOCK_TYPE_INSTANCE(nospin
);
898 #endif // !OS_VARIANT_ONLY
900 #ifdef OS_VARIANT_SELECTOR
901 #define _os_lock_nospin_lock \
902 OS_VARIANT(_os_lock_nospin_lock, OS_VARIANT_SELECTOR)
903 #define _os_lock_nospin_trylock \
904 OS_VARIANT(_os_lock_nospin_trylock, OS_VARIANT_SELECTOR)
905 #define _os_lock_nospin_unlock \
906 OS_VARIANT(_os_lock_nospin_unlock, OS_VARIANT_SELECTOR)
907 OS_LOCK_METHODS_DECL(nospin
);
908 #endif // OS_VARIANT_SELECTOR
911 _os_lock_nospin_lock(_os_lock_nospin_t l
)
913 return _os_nospin_lock_lock(&l
->osl_nospin_lock
);
917 _os_lock_nospin_trylock(_os_lock_nospin_t l
)
919 return _os_nospin_lock_trylock(&l
->osl_nospin_lock
);
923 _os_lock_nospin_unlock(_os_lock_nospin_t l
)
925 return _os_nospin_lock_unlock(&l
->osl_nospin_lock
);
929 #pragma mark os_once_t
931 typedef struct os_once_gate_s
{
933 os_ulock_value_t ogo_lock
;
936 } os_once_gate_s
, *os_once_gate_t
;
938 #define OS_ONCE_INIT ((os_once_t)0l)
939 #define OS_ONCE_DONE (~(os_once_t)0l)
941 OS_ATOMIC_EXPORT
void _os_once(os_once_t
*val
, void *ctxt
, os_function_t func
);
942 OS_ATOMIC_EXPORT
void __os_once_reset(os_once_t
*val
);
944 OS_NOINLINE OS_NORETURN OS_COLD
946 _os_once_gate_recursive_abort(os_lock_owner_t owner
)
948 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an "
952 OS_NOINLINE OS_NORETURN OS_COLD
954 _os_once_gate_unowned_abort(os_lock_owner_t owner
)
956 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Unlock of an os_once_t not "
957 "owned by current thread");
960 OS_NOINLINE OS_NORETURN OS_COLD
962 _os_once_gate_corruption_abort(os_ulock_value_t current
)
964 __LIBPLATFORM_CLIENT_CRASH__(current
, "os_once_t is corrupt");
969 _os_once_gate_wait_slow(os_ulock_value_t
*gate
, os_lock_owner_t self
)
971 os_ulock_value_t tid_old
, tid_new
;
974 os_atomic_rmw_loop(gate
, tid_old
, tid_new
, relaxed
, {
976 case (os_ulock_value_t
)OS_ONCE_INIT
: // raced with __os_once_reset()
977 case (os_ulock_value_t
)OS_ONCE_DONE
: // raced with _os_once()
978 os_atomic_rmw_loop_give_up(return);
980 tid_new
= tid_old
& ~OS_ULOCK_NOWAITERS_BIT
;
981 if (tid_new
== tid_old
) os_atomic_rmw_loop_give_up(break);
983 if (unlikely(OS_ULOCK_IS_OWNER(tid_old
, self
))) {
984 return _os_once_gate_recursive_abort(self
);
986 int ret
= __ulock_wait(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
,
988 if (unlikely(ret
< 0)) {
994 _os_once_gate_corruption_abort(tid_old
);
997 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
1005 _os_once_gate_broadcast_slow(os_ulock_value_t
*gate
, os_ulock_value_t current
,
1006 os_lock_owner_t self
)
1008 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
))) {
1009 return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current
));
1011 if (current
& OS_ULOCK_NOWAITERS_BIT
) {
1012 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters");
1015 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
| ULF_WAKE_ALL
,
1017 if (unlikely(ret
< 0)) {
1024 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
1033 _os_once_gate_set_value_and_broadcast(os_once_gate_t og
, os_lock_owner_t self
,
1036 // The next barrier must be long and strong.
1038 // The scenario: SMP systems with weakly ordered memory models
1039 // and aggressive out-of-order instruction execution.
1043 // The os_once*() wrapper macro causes the callee's
1044 // instruction stream to look like this (pseudo-RISC):
1046 // load r5, pred-addr
1051 // load r6, data-addr
1053 // May be re-ordered like so:
1055 // load r6, data-addr
1056 // load r5, pred-addr
1062 // Normally, a barrier on the read side is used to workaround
1063 // the weakly ordered memory model. But barriers are expensive
1064 // and we only need to synchronize once! After func(ctxt)
1065 // completes, the predicate will be marked as "done" and the
1066 // branch predictor will correctly skip the call to
1069 // A far faster alternative solution: Defeat the speculative
1070 // read-ahead of peer CPUs.
1072 // Modern architectures will throw away speculative results
1073 // once a branch mis-prediction occurs. Therefore, if we can
1074 // ensure that the predicate is not marked as being complete
1075 // until long after the last store by func(ctxt), then we have
1076 // defeated the read-ahead of peer CPUs.
1078 // In other words, the last "store" by func(ctxt) must complete
1079 // and then N cycles must elapse before ~0l is stored to *val.
1080 // The value of N is whatever is sufficient to defeat the
1081 // read-ahead mechanism of peer CPUs.
1083 // On some CPUs, the most fully synchronizing instruction might
1084 // need to be issued.
1085 os_atomic_maximally_synchronizing_barrier();
1087 // above assumed to contain release barrier
1088 os_ulock_value_t current
=
1089 (os_ulock_value_t
)os_atomic_xchg(&og
->ogo_once
, value
, relaxed
);
1090 if (likely(current
== self
)) return;
1091 _os_once_gate_broadcast_slow(&og
->ogo_lock
, current
, self
);
1094 // Atomically resets the once value to zero and then signals all
1095 // pending waiters to return from their _os_once_gate_wait_slow()
1097 __os_once_reset(os_once_t
*val
)
1099 os_once_gate_t og
= (os_once_gate_t
)val
;
1100 os_lock_owner_t self
= _os_lock_owner_get_self();
1101 _os_once_gate_set_value_and_broadcast(og
, self
, OS_ONCE_INIT
);
1105 _os_once(os_once_t
*val
, void *ctxt
, os_function_t func
)
1107 os_once_gate_t og
= (os_once_gate_t
)val
;
1108 os_lock_owner_t self
= _os_lock_owner_get_self();
1109 os_once_t v
= (os_once_t
)self
;
1111 if (likely(os_atomic_cmpxchg(&og
->ogo_once
, OS_ONCE_INIT
, v
, relaxed
))) {
1113 _os_once_gate_set_value_and_broadcast(og
, self
, OS_ONCE_DONE
);
1115 _os_once_gate_wait_slow(&og
->ogo_lock
, self
);
1119 #if !OS_VARIANT_ONLY
1122 #pragma mark os_lock_eliding_t
1124 #if !TARGET_OS_IPHONE
1126 #define _os_lock_eliding_t _os_lock_spin_t
1127 #define _os_lock_eliding_lock _os_lock_spin_lock
1128 #define _os_lock_eliding_trylock _os_lock_spin_trylock
1129 #define _os_lock_eliding_unlock _os_lock_spin_unlock
1130 OS_LOCK_METHODS_DECL(eliding
);
1131 OS_LOCK_TYPE_INSTANCE(eliding
);
1134 #pragma mark os_lock_transactional_t
1136 OS_LOCK_STRUCT_DECL_INTERNAL(transactional
,
1137 uintptr_t volatile osl_lock
;
1140 #define _os_lock_transactional_t _os_lock_eliding_t
1141 #define _os_lock_transactional_lock _os_lock_eliding_lock
1142 #define _os_lock_transactional_trylock _os_lock_eliding_trylock
1143 #define _os_lock_transactional_unlock _os_lock_eliding_unlock
1144 OS_LOCK_METHODS_DECL(transactional
);
1145 OS_LOCK_TYPE_INSTANCE(transactional
);
1147 #endif // !TARGET_OS_IPHONE
1148 #endif // !OS_VARIANT_ONLY
1149 #endif // !OS_LOCK_VARIANT_ONLY