2 * Copyright (c) 2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 #include "lock_internal.h"
22 #include "libkern/OSAtomic.h"
24 #include "os/lock_private.h"
25 #include "os/once_private.h"
28 #include <mach/mach_init.h>
29 #include <mach/mach_traps.h>
30 #include <mach/thread_switch.h>
34 #pragma mark _os_lock_base_t
38 OS_LOCK_STRUCT_DECL_INTERNAL(base
);
39 OS_USED
static OS_LOCK_TYPE_STRUCT_DECL(base
);
42 os_lock_lock(os_lock_t l
)
44 return l
._osl_base
->osl_type
->osl_lock(l
);
48 os_lock_trylock(os_lock_t l
)
50 return l
._osl_base
->osl_type
->osl_trylock(l
);
54 os_lock_unlock(os_lock_t l
)
56 return l
._osl_base
->osl_type
->osl_unlock(l
);
59 #endif //!OS_VARIANT_ONLY
61 OS_NOINLINE OS_NORETURN OS_COLD
63 _os_lock_corruption_abort(void *lock_ptr OS_UNUSED
, uintptr_t lock_value
)
65 __LIBPLATFORM_CLIENT_CRASH__(lock_value
, "os_lock is corrupt");
69 #pragma mark OSSpinLock
71 #ifdef OS_LOCK_VARIANT_SELECTOR
72 void _OSSpinLockLockSlow(volatile OSSpinLock
*l
);
74 OS_NOINLINE OS_USED
static void _OSSpinLockLockSlow(volatile OSSpinLock
*l
);
75 #endif // OS_LOCK_VARIANT_SELECTOR
77 OS_ATOMIC_EXPORT
void OSSpinLockLock(volatile OSSpinLock
*l
);
78 OS_ATOMIC_EXPORT
bool OSSpinLockTry(volatile OSSpinLock
*l
);
79 OS_ATOMIC_EXPORT
int spin_lock_try(volatile OSSpinLock
*l
);
80 OS_ATOMIC_EXPORT
void OSSpinLockUnlock(volatile OSSpinLock
*l
);
85 #define OS_LOCK_SPIN_SPIN_TRIES 100
86 #define OS_LOCK_SPIN_PAUSE() os_hardware_wfe()
88 #define OS_LOCK_SPIN_SPIN_TRIES 1000
89 #define OS_LOCK_SPIN_PAUSE() os_hardware_pause()
91 #define OS_LOCK_SPIN_YIELD_TRIES 100
93 static const OSSpinLock _OSSpinLockLocked
= TARGET_OS_EMBEDDED
? 1 : -1;
97 _OSSpinLockLockYield(volatile OSSpinLock
*l
)
99 int option
= SWITCH_OPTION_DEPRESS
;
100 mach_msg_timeout_t timeout
= 1;
101 uint32_t tries
= OS_LOCK_SPIN_YIELD_TRIES
;
103 while (unlikely(lock
= *l
)) {
105 if (unlikely(lock
!= _OSSpinLockLocked
)) {
106 _os_lock_corruption_abort((void *)l
, (uintptr_t)lock
);
108 // Yield until tries first hits zero, then permanently switch to wait
109 if (unlikely(!tries
--)) option
= SWITCH_OPTION_WAIT
;
110 thread_switch(MACH_PORT_NULL
, option
, timeout
);
112 bool r
= os_atomic_cmpxchgv(l
, 0, _OSSpinLockLocked
, &lock
, acquire
);
113 if (likely(r
)) return;
119 _OSSpinLockLockSlow(volatile OSSpinLock
*l
)
121 return _OSSpinLockLockYield(l
); // Don't spin on UP
125 _OSSpinLockLockSlow(volatile OSSpinLock
*l
)
127 uint32_t tries
= OS_LOCK_SPIN_SPIN_TRIES
;
129 while (unlikely(lock
= *l
)) {
131 if (unlikely(lock
!= _OSSpinLockLocked
)) {
132 return _os_lock_corruption_abort((void *)l
, (uintptr_t)lock
);
134 if (unlikely(!tries
--)) return _OSSpinLockLockYield(l
);
135 OS_LOCK_SPIN_PAUSE();
137 bool r
= os_atomic_cmpxchgv(l
, 0, _OSSpinLockLocked
, &lock
, acquire
);
138 if (likely(r
)) return;
143 #ifdef OS_LOCK_VARIANT_SELECTOR
144 #undef _OSSpinLockLockSlow
145 extern void _OSSpinLockLockSlow(volatile OSSpinLock
*l
);
148 #if !OS_LOCK_VARIANT_ONLY
150 #if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR
152 typedef struct _os_nospin_lock_s
*_os_nospin_lock_t
;
153 void _os_nospin_lock_lock(_os_nospin_lock_t lock
);
154 bool _os_nospin_lock_trylock(_os_nospin_lock_t lock
);
155 void _os_nospin_lock_unlock(_os_nospin_lock_t lock
);
158 OSSpinLockLock(volatile OSSpinLock
*l
)
160 OS_ATOMIC_ALIAS(spin_lock
, OSSpinLockLock
);
161 OS_ATOMIC_ALIAS(_spin_lock
, OSSpinLockLock
);
162 return _os_nospin_lock_lock((_os_nospin_lock_t
)l
);
166 OSSpinLockTry(volatile OSSpinLock
*l
)
168 return _os_nospin_lock_trylock((_os_nospin_lock_t
)l
);
172 spin_lock_try(volatile OSSpinLock
*l
)
174 OS_ATOMIC_ALIAS(_spin_lock_try
, spin_lock_try
);
175 return _os_nospin_lock_trylock((_os_nospin_lock_t
)l
);
179 OSSpinLockUnlock(volatile OSSpinLock
*l
)
181 OS_ATOMIC_ALIAS(spin_unlock
, OSSpinLockUnlock
);
182 OS_ATOMIC_ALIAS(_spin_unlock
, OSSpinLockUnlock
);
183 return _os_nospin_lock_unlock((_os_nospin_lock_t
)l
);
186 #undef OS_ATOMIC_ALIAS
187 #define OS_ATOMIC_ALIAS(n, o)
188 static void _OSSpinLockLock(volatile OSSpinLock
*l
);
189 #undef OSSpinLockLock
190 #define OSSpinLockLock _OSSpinLockLock
191 static bool _OSSpinLockTry(volatile OSSpinLock
*l
);
193 #define OSSpinLockTry _OSSpinLockTry
194 static __unused
int __spin_lock_try(volatile OSSpinLock
*l
);
196 #define spin_lock_try __spin_lock_try
197 static void _OSSpinLockUnlock(volatile OSSpinLock
*l
);
198 #undef OSSpinLockUnlock
199 #define OSSpinLockUnlock _OSSpinLockUnlock
201 #endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK
204 OSSpinLockLock(volatile OSSpinLock
*l
)
206 OS_ATOMIC_ALIAS(spin_lock
, OSSpinLockLock
);
207 OS_ATOMIC_ALIAS(_spin_lock
, OSSpinLockLock
);
208 bool r
= os_atomic_cmpxchg(l
, 0, _OSSpinLockLocked
, acquire
);
209 if (likely(r
)) return;
210 return _OSSpinLockLockSlow(l
);
214 OSSpinLockTry(volatile OSSpinLock
*l
)
216 bool r
= os_atomic_cmpxchg(l
, 0, _OSSpinLockLocked
, acquire
);
221 spin_lock_try(volatile OSSpinLock
*l
) // <rdar://problem/13316060>
223 OS_ATOMIC_ALIAS(_spin_lock_try
, spin_lock_try
);
224 return OSSpinLockTry(l
);
228 OSSpinLockUnlock(volatile OSSpinLock
*l
)
230 OS_ATOMIC_ALIAS(spin_unlock
, OSSpinLockUnlock
);
231 OS_ATOMIC_ALIAS(_spin_unlock
, OSSpinLockUnlock
);
232 os_atomic_store(l
, 0, release
);
236 #pragma mark os_lock_spin_t
238 OS_LOCK_STRUCT_DECL_INTERNAL(spin
,
239 OSSpinLock
volatile osl_spinlock
;
242 OS_LOCK_METHODS_DECL(spin
);
243 OS_LOCK_TYPE_INSTANCE(spin
);
244 #endif // !OS_VARIANT_ONLY
246 #ifdef OS_VARIANT_SELECTOR
247 #define _os_lock_spin_lock \
248 OS_VARIANT(_os_lock_spin_lock, OS_VARIANT_SELECTOR)
249 #define _os_lock_spin_trylock \
250 OS_VARIANT(_os_lock_spin_trylock, OS_VARIANT_SELECTOR)
251 #define _os_lock_spin_unlock \
252 OS_VARIANT(_os_lock_spin_unlock, OS_VARIANT_SELECTOR)
253 OS_LOCK_METHODS_DECL(spin
);
254 #endif // OS_VARIANT_SELECTOR
257 _os_lock_spin_lock(_os_lock_spin_t l
)
259 return OSSpinLockLock(&l
->osl_spinlock
);
263 _os_lock_spin_trylock(_os_lock_spin_t l
)
265 return OSSpinLockTry(&l
->osl_spinlock
);
269 _os_lock_spin_unlock(_os_lock_spin_t l
)
271 return OSSpinLockUnlock(&l
->osl_spinlock
);
275 #pragma mark os_lock_owner_t
277 #ifndef __TSD_MACH_THREAD_SELF
278 #define __TSD_MACH_THREAD_SELF 3
281 typedef mach_port_name_t os_lock_owner_t
;
284 static inline os_lock_owner_t
285 _os_lock_owner_get_self(void)
287 os_lock_owner_t self
;
288 self
= (os_lock_owner_t
)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF
);
292 #define OS_LOCK_NO_OWNER MACH_PORT_NULL
294 #if !OS_LOCK_VARIANT_ONLY
296 OS_NOINLINE OS_NORETURN OS_COLD
298 _os_lock_recursive_abort(os_lock_owner_t owner
)
300 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an "
304 #endif //!OS_LOCK_VARIANT_ONLY
307 #pragma mark os_lock_handoff_t
309 OS_LOCK_STRUCT_DECL_INTERNAL(handoff
,
310 os_lock_owner_t
volatile osl_owner
;
313 OS_LOCK_METHODS_DECL(handoff
);
314 OS_LOCK_TYPE_INSTANCE(handoff
);
315 #endif // !OS_VARIANT_ONLY
317 #ifdef OS_VARIANT_SELECTOR
318 #define _os_lock_handoff_lock \
319 OS_VARIANT(_os_lock_handoff_lock, OS_VARIANT_SELECTOR)
320 #define _os_lock_handoff_trylock \
321 OS_VARIANT(_os_lock_handoff_trylock, OS_VARIANT_SELECTOR)
322 #define _os_lock_handoff_unlock \
323 OS_VARIANT(_os_lock_handoff_unlock, OS_VARIANT_SELECTOR)
324 OS_LOCK_METHODS_DECL(handoff
);
325 #endif // OS_VARIANT_SELECTOR
327 #define OS_LOCK_HANDOFF_YIELD_TRIES 100
331 _os_lock_handoff_lock_slow(_os_lock_handoff_t l
)
333 int option
= SWITCH_OPTION_OSLOCK_DEPRESS
;
334 mach_msg_timeout_t timeout
= 1;
335 uint32_t tries
= OS_LOCK_HANDOFF_YIELD_TRIES
;
336 os_lock_owner_t self
= _os_lock_owner_get_self(), owner
;
337 while (unlikely(owner
= l
->osl_owner
)) {
339 if (unlikely(owner
== self
)) return _os_lock_recursive_abort(self
);
340 // Yield until tries first hits zero, then permanently switch to wait
341 if (unlikely(!tries
--)) option
= SWITCH_OPTION_OSLOCK_WAIT
;
342 thread_switch(owner
, option
, timeout
);
343 // Redrive the handoff every 1ms until switching to wait
344 if (option
== SWITCH_OPTION_OSLOCK_WAIT
) timeout
++;
346 bool r
= os_atomic_cmpxchgv2o(l
, osl_owner
, MACH_PORT_NULL
, self
, &owner
,
348 if (likely(r
)) return;
353 _os_lock_handoff_lock(_os_lock_handoff_t l
)
355 os_lock_owner_t self
= _os_lock_owner_get_self();
356 bool r
= os_atomic_cmpxchg2o(l
, osl_owner
, MACH_PORT_NULL
, self
, acquire
);
357 if (likely(r
)) return;
358 return _os_lock_handoff_lock_slow(l
);
362 _os_lock_handoff_trylock(_os_lock_handoff_t l
)
364 os_lock_owner_t self
= _os_lock_owner_get_self();
365 bool r
= os_atomic_cmpxchg2o(l
, osl_owner
, MACH_PORT_NULL
, self
, acquire
);
370 _os_lock_handoff_unlock(_os_lock_handoff_t l
)
372 os_atomic_store2o(l
, osl_owner
, MACH_PORT_NULL
, release
);
376 #pragma mark os_ulock_value_t
378 #include <sys/errno.h>
379 #include <sys/ulock.h>
381 typedef os_lock_owner_t os_ulock_value_t
;
383 // This assumes that all thread mach port values always have the low bit set!
384 // Clearing this bit is used to communicate the existence of waiters to unlock.
385 #define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u)
386 #define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT)
388 #define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD
389 #define OS_ULOCK_IS_OWNER(value, self) ({ \
390 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); \
391 (_owner == (self) && _owner != OS_ULOCK_ANONYMOUS_OWNER); })
392 #define OS_ULOCK_IS_NOT_OWNER(value, self) ({ \
393 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); \
394 (_owner != (self) && _owner != OS_ULOCK_ANONYMOUS_OWNER); })
398 #pragma mark os_unfair_lock
400 typedef struct _os_unfair_lock_s
{
401 os_ulock_value_t oul_value
;
402 } *_os_unfair_lock_t
;
404 _Static_assert(sizeof(struct os_unfair_lock_s
) ==
405 sizeof(struct _os_unfair_lock_s
), "os_unfair_lock size mismatch");
407 OS_ATOMIC_EXPORT
void os_unfair_lock_lock(os_unfair_lock_t lock
);
408 OS_ATOMIC_EXPORT
void os_unfair_lock_lock_with_options(os_unfair_lock_t lock
,
409 os_unfair_lock_options_t options
);
410 OS_ATOMIC_EXPORT
bool os_unfair_lock_trylock(os_unfair_lock_t lock
);
411 OS_ATOMIC_EXPORT
void os_unfair_lock_unlock(os_unfair_lock_t lock
);
413 OS_ATOMIC_EXPORT
void os_unfair_lock_lock_no_tsd_4libpthread(
414 os_unfair_lock_t lock
);
415 OS_ATOMIC_EXPORT
void os_unfair_lock_unlock_no_tsd_4libpthread(
416 os_unfair_lock_t lock
);
418 _Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
==
419 ULF_WAIT_WORKQ_DATA_CONTENTION
,
420 "check value for OS_UNFAIR_LOCK_OPTIONS_MASK");
421 #define OS_UNFAIR_LOCK_OPTIONS_MASK \
422 (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION)
424 OS_NOINLINE OS_NORETURN OS_COLD
426 _os_unfair_lock_recursive_abort(os_lock_owner_t owner
)
428 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an "
432 OS_NOINLINE OS_NORETURN OS_COLD
434 _os_unfair_lock_unowned_abort(os_lock_owner_t owner
)
436 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Unlock of an os_unfair_lock not "
437 "owned by current thread");
440 OS_NOINLINE OS_NORETURN OS_COLD
442 _os_unfair_lock_corruption_abort(os_ulock_value_t current
)
444 __LIBPLATFORM_CLIENT_CRASH__(current
, "os_unfair_lock is corrupt");
449 _os_unfair_lock_lock_slow(_os_unfair_lock_t l
, os_lock_owner_t self
,
450 os_unfair_lock_options_t options
)
452 os_ulock_value_t current
, new, waiters_mask
= 0;
453 if (unlikely(options
& ~OS_UNFAIR_LOCK_OPTIONS_MASK
)) {
454 __LIBPLATFORM_CLIENT_CRASH__(options
, "Invalid options");
456 while (unlikely((current
= os_atomic_load2o(l
, oul_value
, relaxed
)) !=
459 if (unlikely(OS_ULOCK_IS_OWNER(current
, self
))) {
460 return _os_unfair_lock_recursive_abort(self
);
462 new = current
& ~OS_ULOCK_NOWAITERS_BIT
;
463 if (current
!= new) {
464 // Clear nowaiters bit in lock value before waiting
465 if (!os_atomic_cmpxchgv2o(l
, oul_value
, current
, new, ¤t
,
471 int ret
= __ulock_wait(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
| options
,
473 if (unlikely(ret
< 0)) {
479 _os_unfair_lock_corruption_abort(current
);
482 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
485 // If there are more waiters, unset nowaiters bit when acquiring lock
486 waiters_mask
= (ret
> 0) ? OS_ULOCK_NOWAITERS_BIT
: 0;
488 new = self
& ~waiters_mask
;
489 bool r
= os_atomic_cmpxchgv2o(l
, oul_value
, OS_LOCK_NO_OWNER
, new,
491 if (unlikely(!r
)) goto _retry
;
496 _os_unfair_lock_unlock_slow(_os_unfair_lock_t l
, os_ulock_value_t current
,
497 os_lock_owner_t self
)
499 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
))) {
500 return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current
));
502 if (current
& OS_ULOCK_NOWAITERS_BIT
) {
503 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters");
506 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
, l
, 0);
507 if (unlikely(ret
< 0)) {
514 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
522 os_unfair_lock_lock(os_unfair_lock_t lock
)
524 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
525 os_lock_owner_t self
= _os_lock_owner_get_self();
526 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
527 if (likely(r
)) return;
528 return _os_unfair_lock_lock_slow(l
, self
, OS_UNFAIR_LOCK_NONE
);
532 os_unfair_lock_lock_with_options(os_unfair_lock_t lock
,
533 os_unfair_lock_options_t options
)
535 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
536 os_lock_owner_t self
= _os_lock_owner_get_self();
537 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
538 if (likely(r
)) return;
539 return _os_unfair_lock_lock_slow(l
, self
, options
);
543 os_unfair_lock_trylock(os_unfair_lock_t lock
)
545 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
546 os_lock_owner_t self
= _os_lock_owner_get_self();
547 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
552 os_unfair_lock_unlock(os_unfair_lock_t lock
)
554 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
555 os_lock_owner_t self
= _os_lock_owner_get_self();
556 os_ulock_value_t current
;
557 current
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
);
558 if (likely(current
== self
)) return;
559 return _os_unfair_lock_unlock_slow(l
, current
, self
);
563 os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock
)
565 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
566 os_lock_owner_t self
= OS_ULOCK_ANONYMOUS_OWNER
;
567 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
568 if (likely(r
)) return;
569 return _os_unfair_lock_lock_slow(l
, self
,
570 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
);
574 os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock
)
576 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
577 os_lock_owner_t self
= OS_ULOCK_ANONYMOUS_OWNER
;
578 os_ulock_value_t current
;
579 current
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
);
580 if (likely(current
== self
)) return;
581 return _os_unfair_lock_unlock_slow(l
, current
, self
);
585 #pragma mark _os_lock_unfair_t 4Libc // <rdar://problem/27138264>
587 OS_ATOMIC_EXPORT
void os_unfair_lock_lock_with_options_4Libc(
588 os_unfair_lock_t lock
, os_unfair_lock_options_t options
);
589 OS_ATOMIC_EXPORT
void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock
);
593 _os_unfair_lock_lock_slow_4Libc(_os_unfair_lock_t l
, os_lock_owner_t self
,
594 os_unfair_lock_options_t options
)
596 os_ulock_value_t current
, new, waiters_mask
= 0;
597 if (unlikely(options
& ~OS_UNFAIR_LOCK_OPTIONS_MASK
)) {
598 __LIBPLATFORM_CLIENT_CRASH__(options
, "Invalid options");
600 while (unlikely((current
= os_atomic_load2o(l
, oul_value
, relaxed
)) !=
603 if (unlikely(OS_ULOCK_IS_OWNER(current
, self
))) {
604 return _os_unfair_lock_recursive_abort(self
);
606 new = current
& ~OS_ULOCK_NOWAITERS_BIT
;
607 if (current
!= new) {
608 // Clear nowaiters bit in lock value before waiting
609 if (!os_atomic_cmpxchgv2o(l
, oul_value
, current
, new, ¤t
,
615 int ret
= __ulock_wait(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
| options
,
617 if (unlikely(ret
< 0)) {
623 // if we get an `EOWNERDEAD` it could be corruption of the lock
624 // so for the Libc locks, if we can steal the lock, assume
625 // it is corruption and pretend we got the lock with contention
626 new = self
& ~OS_ULOCK_NOWAITERS_BIT
;
627 if (os_atomic_cmpxchgv2o(l
, oul_value
, current
, new, ¤t
,
633 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
636 // If there are more waiters, unset nowaiters bit when acquiring lock
637 waiters_mask
= (ret
> 0) ? OS_ULOCK_NOWAITERS_BIT
: 0;
639 new = self
& ~waiters_mask
;
640 bool r
= os_atomic_cmpxchgv2o(l
, oul_value
, OS_LOCK_NO_OWNER
, new,
642 if (unlikely(!r
)) goto _retry
;
647 _os_unfair_lock_unlock_slow_4Libc(_os_unfair_lock_t l
)
650 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
, l
, 0);
651 if (unlikely(ret
< 0)) {
658 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
666 os_unfair_lock_lock_with_options_4Libc(os_unfair_lock_t lock
,
667 os_unfair_lock_options_t options
)
669 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
670 os_lock_owner_t self
= _os_lock_owner_get_self();
671 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
672 if (likely(r
)) return;
673 return _os_unfair_lock_lock_slow_4Libc(l
, self
, options
);
677 os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock
)
679 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
680 os_lock_owner_t self
= _os_lock_owner_get_self();
681 os_ulock_value_t current
;
682 current
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
);
683 if (likely(current
== self
)) return;
684 return _os_unfair_lock_unlock_slow_4Libc(l
);
689 os_unfair_lock_assert_owner(os_unfair_lock_t lock
)
691 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
692 os_lock_owner_t self
= _os_lock_owner_get_self();
693 os_ulock_value_t current
= os_atomic_load2o(l
, oul_value
, relaxed
);
694 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
))) {
695 __LIBPLATFORM_CLIENT_CRASH__(current
, "Assertion failed: "
696 "Lock unexpectedly not owned by current thread");
701 os_unfair_lock_assert_not_owner(os_unfair_lock_t lock
)
703 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
704 os_lock_owner_t self
= _os_lock_owner_get_self();
705 os_ulock_value_t current
= os_atomic_load2o(l
, oul_value
, relaxed
);
706 if (unlikely(OS_ULOCK_IS_OWNER(current
, self
))) {
707 __LIBPLATFORM_CLIENT_CRASH__(current
, "Assertion failed: "
708 "Lock unexpectedly owned by current thread");
714 #pragma mark _os_lock_unfair_t
716 OS_LOCK_STRUCT_DECL_INTERNAL(unfair
,
717 os_unfair_lock osl_unfair_lock
;
720 OS_LOCK_METHODS_DECL(unfair
);
721 OS_LOCK_TYPE_INSTANCE(unfair
);
722 #endif // !OS_VARIANT_ONLY
724 #ifdef OS_VARIANT_SELECTOR
725 #define _os_lock_unfair_lock \
726 OS_VARIANT(_os_lock_unfair_lock, OS_VARIANT_SELECTOR)
727 #define _os_lock_unfair_trylock \
728 OS_VARIANT(_os_lock_unfair_trylock, OS_VARIANT_SELECTOR)
729 #define _os_lock_unfair_unlock \
730 OS_VARIANT(_os_lock_unfair_unlock, OS_VARIANT_SELECTOR)
731 OS_LOCK_METHODS_DECL(unfair
);
732 #endif // OS_VARIANT_SELECTOR
735 _os_lock_unfair_lock(_os_lock_unfair_t l
)
737 return os_unfair_lock_lock(&l
->osl_unfair_lock
);
741 _os_lock_unfair_trylock(_os_lock_unfair_t l
)
743 return os_unfair_lock_trylock(&l
->osl_unfair_lock
);
747 _os_lock_unfair_unlock(_os_lock_unfair_t l
)
749 return os_unfair_lock_unlock(&l
->osl_unfair_lock
);
753 #pragma mark _os_nospin_lock
755 typedef struct _os_nospin_lock_s
{
756 os_ulock_value_t oul_value
;
757 } _os_nospin_lock
, *_os_nospin_lock_t
;
759 _Static_assert(sizeof(OSSpinLock
) ==
760 sizeof(struct _os_nospin_lock_s
), "os_nospin_lock size mismatch");
762 OS_ATOMIC_EXPORT
void _os_nospin_lock_lock(_os_nospin_lock_t lock
);
763 OS_ATOMIC_EXPORT
bool _os_nospin_lock_trylock(_os_nospin_lock_t lock
);
764 OS_ATOMIC_EXPORT
void _os_nospin_lock_unlock(_os_nospin_lock_t lock
);
768 _os_nospin_lock_lock_slow(_os_nospin_lock_t l
)
770 os_lock_owner_t self
= _os_lock_owner_get_self();
771 os_ulock_value_t current
, new, waiters_mask
= 0;
772 uint32_t timeout
= 1;
773 while (unlikely((current
= os_atomic_load2o(l
, oul_value
, relaxed
)) !=
776 new = current
& ~OS_ULOCK_NOWAITERS_BIT
;
777 // For safer compatibility with OSSpinLock where _OSSpinLockLocked may
778 // be 1, check that new didn't become 0 (unlocked) by clearing this bit
779 if (current
!= new && new) {
780 // Clear nowaiters bit in lock value before waiting
781 if (!os_atomic_cmpxchgv2o(l
, oul_value
, current
, new, ¤t
,
787 int ret
= __ulock_wait(UL_COMPARE_AND_WAIT
| ULF_NO_ERRNO
, l
, current
,
789 if (unlikely(ret
< 0)) {
798 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
801 // If there are more waiters, unset nowaiters bit when acquiring lock
802 waiters_mask
= (ret
> 0) ? OS_ULOCK_NOWAITERS_BIT
: 0;
804 new = self
& ~waiters_mask
;
805 bool r
= os_atomic_cmpxchgv2o(l
, oul_value
, OS_LOCK_NO_OWNER
, new,
807 if (unlikely(!r
)) goto _retry
;
812 _os_nospin_lock_unlock_slow(_os_nospin_lock_t l
, os_ulock_value_t current
)
814 os_lock_owner_t self
= _os_lock_owner_get_self();
815 if (unlikely(OS_ULOCK_OWNER(current
) != self
)) {
816 return; // no unowned_abort for drop-in compatibility with OSSpinLock
818 if (current
& OS_ULOCK_NOWAITERS_BIT
) {
819 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters");
822 int ret
= __ulock_wake(UL_COMPARE_AND_WAIT
| ULF_NO_ERRNO
, l
, 0);
823 if (unlikely(ret
< 0)) {
830 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
838 _os_nospin_lock_lock(_os_nospin_lock_t l
)
840 os_lock_owner_t self
= _os_lock_owner_get_self();
841 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
842 if (likely(r
)) return;
843 return _os_nospin_lock_lock_slow(l
);
847 _os_nospin_lock_trylock(_os_nospin_lock_t l
)
849 os_lock_owner_t self
= _os_lock_owner_get_self();
850 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
855 _os_nospin_lock_unlock(_os_nospin_lock_t l
)
857 os_lock_owner_t self
= _os_lock_owner_get_self();
858 os_ulock_value_t current
;
859 current
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
);
860 if (likely(current
== self
)) return;
861 return _os_nospin_lock_unlock_slow(l
, current
);
865 #pragma mark _os_lock_nospin_t
867 OS_LOCK_STRUCT_DECL_INTERNAL(nospin
,
868 _os_nospin_lock osl_nospin_lock
;
871 OS_LOCK_METHODS_DECL(nospin
);
872 OS_LOCK_TYPE_INSTANCE(nospin
);
873 #endif // !OS_VARIANT_ONLY
875 #ifdef OS_VARIANT_SELECTOR
876 #define _os_lock_nospin_lock \
877 OS_VARIANT(_os_lock_nospin_lock, OS_VARIANT_SELECTOR)
878 #define _os_lock_nospin_trylock \
879 OS_VARIANT(_os_lock_nospin_trylock, OS_VARIANT_SELECTOR)
880 #define _os_lock_nospin_unlock \
881 OS_VARIANT(_os_lock_nospin_unlock, OS_VARIANT_SELECTOR)
882 OS_LOCK_METHODS_DECL(nospin
);
883 #endif // OS_VARIANT_SELECTOR
886 _os_lock_nospin_lock(_os_lock_nospin_t l
)
888 return _os_nospin_lock_lock(&l
->osl_nospin_lock
);
892 _os_lock_nospin_trylock(_os_lock_nospin_t l
)
894 return _os_nospin_lock_trylock(&l
->osl_nospin_lock
);
898 _os_lock_nospin_unlock(_os_lock_nospin_t l
)
900 return _os_nospin_lock_unlock(&l
->osl_nospin_lock
);
904 #pragma mark os_once_t
906 typedef struct os_once_gate_s
{
908 os_ulock_value_t ogo_lock
;
911 } os_once_gate_s
, *os_once_gate_t
;
913 #define OS_ONCE_INIT ((os_once_t)0l)
914 #define OS_ONCE_DONE (~(os_once_t)0l)
916 OS_ATOMIC_EXPORT
void _os_once(os_once_t
*val
, void *ctxt
, os_function_t func
);
917 OS_ATOMIC_EXPORT
void __os_once_reset(os_once_t
*val
);
919 OS_NOINLINE OS_NORETURN OS_COLD
921 _os_once_gate_recursive_abort(os_lock_owner_t owner
)
923 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an "
927 OS_NOINLINE OS_NORETURN OS_COLD
929 _os_once_gate_unowned_abort(os_lock_owner_t owner
)
931 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Unlock of an os_once_t not "
932 "owned by current thread");
935 OS_NOINLINE OS_NORETURN OS_COLD
937 _os_once_gate_corruption_abort(os_ulock_value_t current
)
939 __LIBPLATFORM_CLIENT_CRASH__(current
, "os_once_t is corrupt");
944 _os_once_gate_wait_slow(os_ulock_value_t
*gate
, os_lock_owner_t self
)
946 os_ulock_value_t tid_old
, tid_new
;
949 os_atomic_rmw_loop(gate
, tid_old
, tid_new
, relaxed
, {
951 case (os_ulock_value_t
)OS_ONCE_INIT
: // raced with __os_once_reset()
952 case (os_ulock_value_t
)OS_ONCE_DONE
: // raced with _os_once()
953 os_atomic_rmw_loop_give_up(return);
955 tid_new
= tid_old
& ~OS_ULOCK_NOWAITERS_BIT
;
956 if (tid_new
== tid_old
) os_atomic_rmw_loop_give_up(break);
958 if (unlikely(OS_ULOCK_IS_OWNER(tid_old
, self
))) {
959 return _os_once_gate_recursive_abort(self
);
961 int ret
= __ulock_wait(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
,
963 if (unlikely(ret
< 0)) {
969 _os_once_gate_corruption_abort(tid_old
);
972 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
980 _os_once_gate_broadcast_slow(os_ulock_value_t
*gate
, os_ulock_value_t current
,
981 os_lock_owner_t self
)
983 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
))) {
984 return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current
));
986 if (current
& OS_ULOCK_NOWAITERS_BIT
) {
987 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters");
990 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
| ULF_WAKE_ALL
,
992 if (unlikely(ret
< 0)) {
999 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
1008 _os_once_gate_set_value_and_broadcast(os_once_gate_t og
, os_lock_owner_t self
,
1011 // The next barrier must be long and strong.
1013 // The scenario: SMP systems with weakly ordered memory models
1014 // and aggressive out-of-order instruction execution.
1018 // The os_once*() wrapper macro causes the callee's
1019 // instruction stream to look like this (pseudo-RISC):
1021 // load r5, pred-addr
1026 // load r6, data-addr
1028 // May be re-ordered like so:
1030 // load r6, data-addr
1031 // load r5, pred-addr
1037 // Normally, a barrier on the read side is used to workaround
1038 // the weakly ordered memory model. But barriers are expensive
1039 // and we only need to synchronize once! After func(ctxt)
1040 // completes, the predicate will be marked as "done" and the
1041 // branch predictor will correctly skip the call to
1044 // A far faster alternative solution: Defeat the speculative
1045 // read-ahead of peer CPUs.
1047 // Modern architectures will throw away speculative results
1048 // once a branch mis-prediction occurs. Therefore, if we can
1049 // ensure that the predicate is not marked as being complete
1050 // until long after the last store by func(ctxt), then we have
1051 // defeated the read-ahead of peer CPUs.
1053 // In other words, the last "store" by func(ctxt) must complete
1054 // and then N cycles must elapse before ~0l is stored to *val.
1055 // The value of N is whatever is sufficient to defeat the
1056 // read-ahead mechanism of peer CPUs.
1058 // On some CPUs, the most fully synchronizing instruction might
1059 // need to be issued.
1060 os_atomic_maximally_synchronizing_barrier();
1062 // above assumed to contain release barrier
1063 os_ulock_value_t current
=
1064 (os_ulock_value_t
)os_atomic_xchg(&og
->ogo_once
, value
, relaxed
);
1065 if (likely(current
== self
)) return;
1066 _os_once_gate_broadcast_slow(&og
->ogo_lock
, current
, self
);
1069 // Atomically resets the once value to zero and then signals all
1070 // pending waiters to return from their _os_once_gate_wait_slow()
1072 __os_once_reset(os_once_t
*val
)
1074 os_once_gate_t og
= (os_once_gate_t
)val
;
1075 os_lock_owner_t self
= _os_lock_owner_get_self();
1076 _os_once_gate_set_value_and_broadcast(og
, self
, OS_ONCE_INIT
);
1080 _os_once(os_once_t
*val
, void *ctxt
, os_function_t func
)
1082 os_once_gate_t og
= (os_once_gate_t
)val
;
1083 os_lock_owner_t self
= _os_lock_owner_get_self();
1084 os_once_t v
= (os_once_t
)self
;
1086 if (likely(os_atomic_cmpxchg(&og
->ogo_once
, OS_ONCE_INIT
, v
, relaxed
))) {
1088 _os_once_gate_set_value_and_broadcast(og
, self
, OS_ONCE_DONE
);
1090 _os_once_gate_wait_slow(&og
->ogo_lock
, self
);
1094 #if !OS_VARIANT_ONLY
1097 #pragma mark os_lock_eliding_t
1099 #if !TARGET_OS_IPHONE
1101 #define _os_lock_eliding_t _os_lock_spin_t
1102 #define _os_lock_eliding_lock _os_lock_spin_lock
1103 #define _os_lock_eliding_trylock _os_lock_spin_trylock
1104 #define _os_lock_eliding_unlock _os_lock_spin_unlock
1105 OS_LOCK_METHODS_DECL(eliding
);
1106 OS_LOCK_TYPE_INSTANCE(eliding
);
1109 #pragma mark os_lock_transactional_t
1111 OS_LOCK_STRUCT_DECL_INTERNAL(transactional
,
1112 uintptr_t volatile osl_lock
;
1115 #define _os_lock_transactional_t _os_lock_eliding_t
1116 #define _os_lock_transactional_lock _os_lock_eliding_lock
1117 #define _os_lock_transactional_trylock _os_lock_eliding_trylock
1118 #define _os_lock_transactional_unlock _os_lock_eliding_unlock
1119 OS_LOCK_METHODS_DECL(transactional
);
1120 OS_LOCK_TYPE_INSTANCE(transactional
);
1122 #endif // !TARGET_OS_IPHONE
1123 #endif // !OS_VARIANT_ONLY
1124 #endif // !OS_LOCK_VARIANT_ONLY