2 * Copyright (c) 2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 #define OS_UNFAIR_LOCK_INLINE 1
22 #include "lock_internal.h"
23 #include "os/internal.h"
25 #include "libkern/OSAtomic.h"
27 #include "os/lock_private.h"
28 #include "os/once_private.h"
30 #include <mach/mach_init.h>
31 #include <mach/mach_traps.h>
32 #include <mach/thread_switch.h>
33 #include <mach/mach_time.h>
37 #pragma mark _os_lock_base_t
39 OS_NOINLINE OS_NORETURN OS_COLD
40 void _os_lock_corruption_abort(void *lock_ptr OS_UNUSED
, uintptr_t lock_value
);
43 OS_LOCK_STRUCT_DECL_INTERNAL(base
);
44 OS_USED
static OS_LOCK_TYPE_STRUCT_DECL(base
);
47 os_lock_lock(os_lock_t l
)
49 return l
._osl_base
->osl_type
->osl_lock(l
);
53 os_lock_trylock(os_lock_t l
)
55 return l
._osl_base
->osl_type
->osl_trylock(l
);
59 os_lock_unlock(os_lock_t l
)
61 return l
._osl_base
->osl_type
->osl_unlock(l
);
64 OS_NOINLINE OS_NORETURN OS_COLD
66 _os_lock_corruption_abort(void *lock_ptr OS_UNUSED
, uintptr_t lock_value
)
68 __LIBPLATFORM_CLIENT_CRASH__(lock_value
, "os_lock is corrupt");
73 #pragma mark OSSpinLock
75 OS_ATOMIC_EXPORT OS_NOINLINE
void _OSSpinLockLockSlow(volatile OSSpinLock
*l
);
76 OS_ATOMIC_EXPORT
void OSSpinLockLock(volatile OSSpinLock
*l
);
77 OS_ATOMIC_EXPORT
bool OSSpinLockTry(volatile OSSpinLock
*l
);
78 OS_ATOMIC_EXPORT
int spin_lock_try(volatile OSSpinLock
*l
);
79 OS_ATOMIC_EXPORT
void OSSpinLockUnlock(volatile OSSpinLock
*l
);
81 #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
82 static const OSSpinLock _OSSpinLockLocked
= 1;
84 static const OSSpinLock _OSSpinLockLocked
= -1;
89 #elif defined(__arm__) || defined(__arm64__)
90 #define OS_LOCK_SPIN_SPIN_TRIES 100
91 #define OS_LOCK_SPIN_PAUSE() os_hardware_wfe()
93 #define OS_LOCK_SPIN_SPIN_TRIES 1000
94 #define OS_LOCK_SPIN_PAUSE() os_hardware_pause()
99 _os_lock_yield_deadline(mach_msg_timeout_t timeout
)
101 uint64_t abstime
= timeout
;
103 // some armv7 targets do not have div, like the armv7 arch
104 // so hardcode the most typical clock resolution it has
105 // as we don't really need accuracy here anyway
106 abstime
*= NSEC_PER_MSEC
* 128 / 3;
107 #elif defined(__i386__) || defined(__x86_64__)
108 // abstime is in nanoseconds
110 mach_timebase_info_data_t tbi
;
111 kern_return_t kr
= mach_timebase_info(&tbi
);
112 if (kr
) return UINT64_MAX
;
113 abstime
*= (NSEC_PER_MSEC
* tbi
.denom
/ tbi
.numer
);
115 return mach_absolute_time() + abstime
;
120 _os_lock_yield_until(uint64_t deadline
)
122 return mach_absolute_time() < deadline
;
127 _OSSpinLockLockYield(volatile OSSpinLock
*l
)
129 int option
= SWITCH_OPTION_DEPRESS
;
130 mach_msg_timeout_t timeout
= 1;
131 uint64_t deadline
= _os_lock_yield_deadline(timeout
);
133 while (unlikely(lock
= *l
)) {
135 if (unlikely(lock
!= _OSSpinLockLocked
)) {
136 _os_lock_corruption_abort((void *)l
, (uintptr_t)lock
);
138 thread_switch(MACH_PORT_NULL
, option
, timeout
);
139 if (option
== SWITCH_OPTION_WAIT
) {
141 } else if (!_os_lock_yield_until(deadline
)) {
142 option
= SWITCH_OPTION_WAIT
;
145 bool r
= os_atomic_cmpxchgv(l
, 0, _OSSpinLockLocked
, &lock
, acquire
);
146 if (likely(r
)) return;
152 _OSSpinLockLockSlow(volatile OSSpinLock
*l
)
154 return _OSSpinLockLockYield(l
); // Don't spin on UP
156 #elif defined(__arm64__)
157 // Exclusive monitor must be held during WFE <rdar://problem/22300054>
158 #if defined(__ARM_ARCH_8_2__)
160 _OSSpinLockLockSlow(volatile OSSpinLock
*l
)
162 uint32_t tries
= OS_LOCK_SPIN_SPIN_TRIES
;
165 while (unlikely(lock
= os_atomic_load_exclusive(l
, relaxed
))) {
166 if (unlikely(lock
!= _OSSpinLockLocked
)) {
167 os_atomic_clear_exclusive();
168 return _os_lock_corruption_abort((void *)l
, (uintptr_t)lock
);
170 if (unlikely(!tries
--)) {
171 os_atomic_clear_exclusive();
172 return _OSSpinLockLockYield(l
);
174 OS_LOCK_SPIN_PAUSE();
176 os_atomic_clear_exclusive();
177 bool r
= os_atomic_cmpxchg(l
, 0, _OSSpinLockLocked
, acquire
);
178 if (likely(r
)) return;
181 #else // !__ARM_ARCH_8_2__
183 _OSSpinLockLockSlow(volatile OSSpinLock
*l
)
185 uint32_t tries
= OS_LOCK_SPIN_SPIN_TRIES
;
187 os_atomic_rmw_loop(l
, lock
, _OSSpinLockLocked
, acquire
, if (unlikely(lock
)){
188 if (unlikely(lock
!= _OSSpinLockLocked
)) {
189 os_atomic_rmw_loop_give_up(return
190 _os_lock_corruption_abort((void *)l
, (uintptr_t)lock
));
192 if (unlikely(!tries
--)) {
193 os_atomic_rmw_loop_give_up(return _OSSpinLockLockYield(l
));
195 OS_LOCK_SPIN_PAUSE();
199 #endif // !__ARM_ARCH_8_2__
200 #else // !OS_ATOMIC_UP
202 _OSSpinLockLockSlow(volatile OSSpinLock
*l
)
204 uint32_t tries
= OS_LOCK_SPIN_SPIN_TRIES
;
206 while (unlikely(lock
= *l
)) {
208 if (unlikely(lock
!= _OSSpinLockLocked
)) {
209 return _os_lock_corruption_abort((void *)l
, (uintptr_t)lock
);
211 if (unlikely(!tries
--)) return _OSSpinLockLockYield(l
);
212 OS_LOCK_SPIN_PAUSE();
214 bool r
= os_atomic_cmpxchgv(l
, 0, _OSSpinLockLocked
, &lock
, acquire
);
215 if (likely(r
)) return;
218 #endif // !OS_ATOMIC_UP
221 #if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR
223 typedef struct _os_nospin_lock_s
*_os_nospin_lock_t
;
225 OS_ATOMIC_EXPORT
void _os_nospin_lock_lock(_os_nospin_lock_t lock
);
226 OS_ATOMIC_EXPORT
bool _os_nospin_lock_trylock(_os_nospin_lock_t lock
);
227 OS_ATOMIC_EXPORT
void _os_nospin_lock_unlock(_os_nospin_lock_t lock
);
230 OSSpinLockLock(volatile OSSpinLock
*l
)
232 OS_ATOMIC_ALIAS(spin_lock
, OSSpinLockLock
);
233 OS_ATOMIC_ALIAS(_spin_lock
, OSSpinLockLock
);
234 return _os_nospin_lock_lock((_os_nospin_lock_t
)l
);
238 OSSpinLockTry(volatile OSSpinLock
*l
)
240 return _os_nospin_lock_trylock((_os_nospin_lock_t
)l
);
244 spin_lock_try(volatile OSSpinLock
*l
)
246 OS_ATOMIC_ALIAS(_spin_lock_try
, spin_lock_try
);
247 return _os_nospin_lock_trylock((_os_nospin_lock_t
)l
);
251 OSSpinLockUnlock(volatile OSSpinLock
*l
)
253 OS_ATOMIC_ALIAS(spin_unlock
, OSSpinLockUnlock
);
254 OS_ATOMIC_ALIAS(_spin_unlock
, OSSpinLockUnlock
);
255 return _os_nospin_lock_unlock((_os_nospin_lock_t
)l
);
258 #undef OS_ATOMIC_ALIAS
259 #define OS_ATOMIC_ALIAS(n, o)
260 static void _OSSpinLockLock(volatile OSSpinLock
*l
);
261 #undef OSSpinLockLock
262 #define OSSpinLockLock _OSSpinLockLock
263 static bool _OSSpinLockTry(volatile OSSpinLock
*l
);
265 #define OSSpinLockTry _OSSpinLockTry
266 static __unused
int __spin_lock_try(volatile OSSpinLock
*l
);
268 #define spin_lock_try __spin_lock_try
269 static void _OSSpinLockUnlock(volatile OSSpinLock
*l
);
270 #undef OSSpinLockUnlock
271 #define OSSpinLockUnlock _OSSpinLockUnlock
273 #endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK
276 OSSpinLockLock(volatile OSSpinLock
*l
)
278 OS_ATOMIC_ALIAS(spin_lock
, OSSpinLockLock
);
279 OS_ATOMIC_ALIAS(_spin_lock
, OSSpinLockLock
);
280 bool r
= os_atomic_cmpxchg(l
, 0, _OSSpinLockLocked
, acquire
);
281 if (likely(r
)) return;
282 return _OSSpinLockLockSlow(l
);
286 OSSpinLockTry(volatile OSSpinLock
*l
)
288 bool r
= os_atomic_cmpxchg(l
, 0, _OSSpinLockLocked
, acquire
);
293 spin_lock_try(volatile OSSpinLock
*l
) // <rdar://problem/13316060>
295 OS_ATOMIC_ALIAS(_spin_lock_try
, spin_lock_try
);
296 return OSSpinLockTry(l
);
300 OSSpinLockUnlock(volatile OSSpinLock
*l
)
302 OS_ATOMIC_ALIAS(spin_unlock
, OSSpinLockUnlock
);
303 OS_ATOMIC_ALIAS(_spin_unlock
, OSSpinLockUnlock
);
304 os_atomic_store(l
, 0, release
);
309 #pragma mark os_lock_spin_t
311 OS_LOCK_STRUCT_DECL_INTERNAL(spin
,
312 OSSpinLock
volatile osl_spinlock
;
314 OS_LOCK_METHODS_DECL(spin
);
315 OS_LOCK_TYPE_INSTANCE(spin
);
318 _os_lock_spin_lock(_os_lock_spin_t l
)
320 return OSSpinLockLock(&l
->osl_spinlock
);
324 _os_lock_spin_trylock(_os_lock_spin_t l
)
326 return OSSpinLockTry(&l
->osl_spinlock
);
330 _os_lock_spin_unlock(_os_lock_spin_t l
)
332 return OSSpinLockUnlock(&l
->osl_spinlock
);
337 #pragma mark os_lock_owner_t
339 #ifndef __TSD_MACH_THREAD_SELF
340 #define __TSD_MACH_THREAD_SELF 3
343 typedef mach_port_name_t os_lock_owner_t
;
344 #define OS_LOCK_NO_OWNER MACH_PORT_NULL
347 OS_ALWAYS_INLINE OS_CONST
348 static inline os_lock_owner_t
349 _os_lock_owner_get_self(void)
351 os_lock_owner_t self
;
352 self
= (os_lock_owner_t
)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF
);
357 OS_NOINLINE OS_NORETURN OS_COLD
359 _os_lock_recursive_abort(os_lock_owner_t owner
)
361 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an "
367 #pragma mark os_lock_handoff_t
369 OS_LOCK_STRUCT_DECL_INTERNAL(handoff
,
370 os_lock_owner_t
volatile osl_owner
;
372 OS_LOCK_METHODS_DECL(handoff
);
373 OS_LOCK_TYPE_INSTANCE(handoff
);
375 #define OS_LOCK_HANDOFF_YIELD_TRIES 100
379 _os_lock_handoff_lock_slow(_os_lock_handoff_t l
)
381 int option
= SWITCH_OPTION_OSLOCK_DEPRESS
;
382 mach_msg_timeout_t timeout
= 1;
383 uint32_t tries
= OS_LOCK_HANDOFF_YIELD_TRIES
;
384 os_lock_owner_t self
= _os_lock_owner_get_self(), owner
;
385 while (unlikely(owner
= l
->osl_owner
)) {
387 if (unlikely(owner
== self
)) return _os_lock_recursive_abort(self
);
388 // Yield until tries first hits zero, then permanently switch to wait
389 if (unlikely(!tries
--)) option
= SWITCH_OPTION_OSLOCK_WAIT
;
390 thread_switch(owner
, option
, timeout
);
391 // Redrive the handoff every 1ms until switching to wait
392 if (option
== SWITCH_OPTION_OSLOCK_WAIT
) timeout
++;
394 bool r
= os_atomic_cmpxchgv(&l
->osl_owner
, MACH_PORT_NULL
, self
, &owner
,
396 if (likely(r
)) return;
401 _os_lock_handoff_lock(_os_lock_handoff_t l
)
403 os_lock_owner_t self
= _os_lock_owner_get_self();
404 bool r
= os_atomic_cmpxchg(&l
->osl_owner
, MACH_PORT_NULL
, self
, acquire
);
405 if (likely(r
)) return;
406 return _os_lock_handoff_lock_slow(l
);
410 _os_lock_handoff_trylock(_os_lock_handoff_t l
)
412 os_lock_owner_t self
= _os_lock_owner_get_self();
413 bool r
= os_atomic_cmpxchg(&l
->osl_owner
, MACH_PORT_NULL
, self
, acquire
);
418 _os_lock_handoff_unlock(_os_lock_handoff_t l
)
420 os_atomic_store(&l
->osl_owner
, MACH_PORT_NULL
, release
);
425 #pragma mark os_ulock_value_t
427 #include <sys/errno.h>
428 #include <sys/ulock.h>
430 typedef os_lock_owner_t os_ulock_value_t
;
432 // This assumes that all thread mach port values always have the low bit set!
433 // Clearing this bit is used to communicate the existence of waiters to unlock.
434 #define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u)
435 #define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT)
437 #define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD
438 #define OS_ULOCK_IS_OWNER(value, self, allow_anonymous_owner) ({ \
439 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner == (self)) && \
440 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
441 #define OS_ULOCK_IS_NOT_OWNER(value, self, allow_anonymous_owner) ({ \
442 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner != (self)) && \
443 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
446 #pragma mark os_unfair_lock
448 typedef struct _os_unfair_lock_s
{
449 os_ulock_value_t oul_value
;
450 } *_os_unfair_lock_t
;
452 _Static_assert(sizeof(struct os_unfair_lock_s
) ==
453 sizeof(struct _os_unfair_lock_s
), "os_unfair_lock size mismatch");
455 OS_ATOMIC_EXPORT
void os_unfair_lock_lock(os_unfair_lock_t lock
);
456 OS_ATOMIC_EXPORT
void os_unfair_lock_lock_with_options(os_unfair_lock_t lock
,
457 os_unfair_lock_options_t options
);
458 OS_ATOMIC_EXPORT
bool os_unfair_lock_trylock(os_unfair_lock_t lock
);
459 OS_ATOMIC_EXPORT
void os_unfair_lock_unlock(os_unfair_lock_t lock
);
461 OS_ATOMIC_EXPORT
void os_unfair_lock_lock_no_tsd(os_unfair_lock_t lock
,
462 os_unfair_lock_options_t options
, mach_port_t mts
);
463 OS_ATOMIC_EXPORT
void os_unfair_lock_unlock_no_tsd(os_unfair_lock_t lock
,
466 OS_NOINLINE OS_NORETURN OS_COLD
467 void _os_unfair_lock_recursive_abort(os_lock_owner_t owner
);
468 OS_NOINLINE OS_NORETURN OS_COLD
469 void _os_unfair_lock_unowned_abort(os_lock_owner_t owner
);
470 OS_NOINLINE OS_NORETURN OS_COLD
471 void _os_unfair_lock_corruption_abort(os_ulock_value_t current
);
473 _Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
==
474 ULF_WAIT_WORKQ_DATA_CONTENTION
,
475 "check value for OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION");
476 _Static_assert(OS_UNFAIR_LOCK_ADAPTIVE_SPIN
==
477 ULF_WAIT_ADAPTIVE_SPIN
,
478 "check value for OS_UNFAIR_LOCK_ADAPTIVE_SPIN");
479 #define OS_UNFAIR_LOCK_OPTIONS_MASK \
480 (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION | \
481 OS_UNFAIR_LOCK_ADAPTIVE_SPIN)
482 #define OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER 0x01000000u
485 OS_NOINLINE OS_NORETURN OS_COLD
487 _os_unfair_lock_recursive_abort(os_lock_owner_t owner
)
489 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an "
493 OS_NOINLINE OS_NORETURN OS_COLD
495 _os_unfair_lock_unowned_abort(os_lock_owner_t owner
)
497 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Unlock of an os_unfair_lock not "
498 "owned by current thread");
501 OS_NOINLINE OS_NORETURN OS_COLD
503 _os_unfair_lock_corruption_abort(os_ulock_value_t current
)
505 __LIBPLATFORM_CLIENT_CRASH__(current
, "os_unfair_lock is corrupt");
511 _os_unfair_lock_lock_slow(_os_unfair_lock_t l
,
512 os_unfair_lock_options_t options
, os_lock_owner_t self
)
514 os_unfair_lock_options_t allow_anonymous_owner
=
515 options
& OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
;
516 options
&= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
;
517 if (unlikely(options
& ~OS_UNFAIR_LOCK_OPTIONS_MASK
)) {
518 __LIBPLATFORM_CLIENT_CRASH__(options
, "Invalid options");
520 os_ulock_value_t current
, new, waiters_mask
= 0;
521 while (unlikely((current
= os_atomic_load(&l
->oul_value
, relaxed
)) !=
524 if (unlikely(OS_ULOCK_IS_OWNER(current
, self
, allow_anonymous_owner
))) {
525 return _os_unfair_lock_recursive_abort(self
);
527 new = current
& ~OS_ULOCK_NOWAITERS_BIT
;
528 if (current
!= new) {
529 // Clear nowaiters bit in lock value before waiting
530 if (!os_atomic_cmpxchgv(&l
->oul_value
, current
, new, ¤t
,
536 int ret
= __ulock_wait(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
| options
,
538 if (unlikely(ret
< 0)) {
544 _os_unfair_lock_corruption_abort(current
);
547 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
551 // If there are more waiters, unset nowaiters bit when acquiring lock
552 waiters_mask
= OS_ULOCK_NOWAITERS_BIT
;
555 new = self
& ~waiters_mask
;
556 bool r
= os_atomic_cmpxchgv(&l
->oul_value
, OS_LOCK_NO_OWNER
, new,
558 if (unlikely(!r
)) goto _retry
;
563 _os_unfair_lock_unlock_slow(_os_unfair_lock_t l
, os_lock_owner_t self
,
564 os_ulock_value_t current
, os_unfair_lock_options_t options
)
566 os_unfair_lock_options_t allow_anonymous_owner
=
567 options
& OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
;
568 options
&= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
;
569 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
, allow_anonymous_owner
))) {
570 return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current
));
572 if (current
& OS_ULOCK_NOWAITERS_BIT
) {
573 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters");
576 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
, l
, 0);
577 if (unlikely(ret
< 0)) {
584 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
592 os_unfair_lock_lock(os_unfair_lock_t lock
)
594 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
595 os_lock_owner_t self
= _os_lock_owner_get_self();
596 bool r
= os_atomic_cmpxchg(&l
->oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
597 if (likely(r
)) return;
598 return _os_unfair_lock_lock_slow(l
, OS_UNFAIR_LOCK_NONE
, self
);
602 os_unfair_lock_lock_with_options(os_unfair_lock_t lock
,
603 os_unfair_lock_options_t options
)
605 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
606 os_lock_owner_t self
= _os_lock_owner_get_self();
607 bool r
= os_atomic_cmpxchg(&l
->oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
608 if (likely(r
)) return;
609 return _os_unfair_lock_lock_slow(l
, options
, self
);
613 os_unfair_lock_trylock(os_unfair_lock_t lock
)
615 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
616 os_lock_owner_t self
= _os_lock_owner_get_self();
617 bool r
= os_atomic_cmpxchg(&l
->oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
622 os_unfair_lock_unlock(os_unfair_lock_t lock
)
624 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
625 os_lock_owner_t self
= _os_lock_owner_get_self();
626 os_ulock_value_t current
;
627 current
= os_atomic_xchg(&l
->oul_value
, OS_LOCK_NO_OWNER
, release
);
628 if (likely(current
== self
)) return;
629 return _os_unfair_lock_unlock_slow(l
, self
, current
, 0);
633 os_unfair_lock_lock_no_tsd(os_unfair_lock_t lock
,
634 os_unfair_lock_options_t options
, mach_port_t self
)
636 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
637 bool r
= os_atomic_cmpxchg(&l
->oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
638 if (likely(r
)) return;
639 return _os_unfair_lock_lock_slow(l
, options
, self
);
643 os_unfair_lock_unlock_no_tsd(os_unfair_lock_t lock
, mach_port_t self
)
645 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
646 os_ulock_value_t current
;
647 current
= os_atomic_xchg(&l
->oul_value
, OS_LOCK_NO_OWNER
, release
);
648 if (likely(current
== self
)) return;
649 return _os_unfair_lock_unlock_slow(l
, self
, current
, 0);
654 os_unfair_lock_assert_owner(os_unfair_lock_t lock
)
656 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
657 os_lock_owner_t self
= _os_lock_owner_get_self();
658 os_ulock_value_t current
= os_atomic_load(&l
->oul_value
, relaxed
);
659 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
, 0))) {
660 __LIBPLATFORM_CLIENT_CRASH__(current
, "Assertion failed: "
661 "Lock unexpectedly not owned by current thread");
666 os_unfair_lock_assert_not_owner(os_unfair_lock_t lock
)
668 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
669 os_lock_owner_t self
= _os_lock_owner_get_self();
670 os_ulock_value_t current
= os_atomic_load(&l
->oul_value
, relaxed
);
671 if (unlikely(OS_ULOCK_IS_OWNER(current
, self
, 0))) {
672 __LIBPLATFORM_CLIENT_CRASH__(current
, "Assertion failed: "
673 "Lock unexpectedly owned by current thread");
679 #pragma mark os_unfair_recursive_lock
682 void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock
,
683 os_unfair_lock_options_t options
);
686 bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock
);
689 void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock
);
692 bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock
);
695 static inline os_lock_owner_t
696 _os_unfair_lock_owner(os_unfair_lock_t lock
)
698 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
699 return OS_ULOCK_OWNER(os_atomic_load(&l
->oul_value
, relaxed
));
704 os_unfair_recursive_lock_owned(os_unfair_recursive_lock_t lock
)
706 return _os_unfair_lock_owner(&lock
->ourl_lock
) ==
707 _os_lock_owner_get_self();
712 os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock
,
713 os_unfair_lock_options_t options
)
715 os_lock_owner_t cur
, self
= _os_lock_owner_get_self();
716 _os_unfair_lock_t l
= (_os_unfair_lock_t
)&lock
->ourl_lock
;
718 if (likely(os_atomic_cmpxchgv(&l
->oul_value
,
719 OS_LOCK_NO_OWNER
, self
, &cur
, acquire
))) {
723 if (OS_ULOCK_OWNER(cur
) == self
) {
728 return _os_unfair_lock_lock_slow(l
, options
, self
);
732 os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock
)
734 os_lock_owner_t cur
, self
= _os_lock_owner_get_self();
735 _os_unfair_lock_t l
= (_os_unfair_lock_t
)&lock
->ourl_lock
;
737 if (likely(os_atomic_cmpxchgv(&l
->oul_value
,
738 OS_LOCK_NO_OWNER
, self
, &cur
, acquire
))) {
742 if (likely(OS_ULOCK_OWNER(cur
) == self
)) {
753 _os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock
,
754 os_lock_owner_t self
)
756 if (unlikely(lock
->ourl_count
)) {
757 os_lock_owner_t cur
= _os_unfair_lock_owner(&lock
->ourl_lock
);
758 if (unlikely(cur
!= self
)) {
759 _os_unfair_lock_unowned_abort(cur
);
765 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
766 os_ulock_value_t current
;
767 current
= os_atomic_xchg(&l
->oul_value
, OS_LOCK_NO_OWNER
, release
);
768 if (likely(current
== self
)) return;
769 return _os_unfair_lock_unlock_slow(l
, self
, current
, 0);
773 os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock
)
775 os_lock_owner_t self
= _os_lock_owner_get_self();
776 _os_unfair_recursive_lock_unlock(lock
, self
);
780 os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock
)
782 os_lock_owner_t cur
= _os_unfair_lock_owner(&lock
->ourl_lock
);
783 os_lock_owner_t self
= _os_lock_owner_get_self();
784 if (likely(cur
== self
)) {
785 _os_unfair_recursive_lock_unlock(lock
, self
);
793 os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock
)
795 _os_unfair_lock_t l
= (_os_unfair_lock_t
)&lock
->ourl_lock
;
797 if (os_atomic_load(&l
->oul_value
, relaxed
) == OS_LOCK_NO_OWNER
) {
798 __LIBPLATFORM_CLIENT_CRASH__(0, "Lock was not held");
800 if (lock
->ourl_count
) {
801 os_lock_owner_t self
= _os_lock_owner_get_self();
803 os_atomic_store(&l
->oul_value
, self
, relaxed
);
805 os_atomic_store(&l
->oul_value
, OS_LOCK_NO_OWNER
, relaxed
);
811 #pragma mark _os_lock_unfair_t
813 OS_LOCK_STRUCT_DECL_INTERNAL(unfair
,
814 os_unfair_lock osl_unfair_lock
;
816 OS_LOCK_METHODS_DECL(unfair
);
817 OS_LOCK_TYPE_INSTANCE(unfair
);
820 _os_lock_unfair_lock(_os_lock_unfair_t l
)
822 return os_unfair_lock_lock(&l
->osl_unfair_lock
);
826 _os_lock_unfair_trylock(_os_lock_unfair_t l
)
828 return os_unfair_lock_trylock(&l
->osl_unfair_lock
);
832 _os_lock_unfair_unlock(_os_lock_unfair_t l
)
834 return os_unfair_lock_unlock(&l
->osl_unfair_lock
);
839 #pragma mark _os_nospin_lock
841 typedef struct _os_nospin_lock_s
{
842 os_ulock_value_t oul_value
;
843 } _os_nospin_lock
, *_os_nospin_lock_t
;
845 _Static_assert(sizeof(OSSpinLock
) ==
846 sizeof(struct _os_nospin_lock_s
), "os_nospin_lock size mismatch");
848 OS_ATOMIC_EXPORT
void _os_nospin_lock_lock(_os_nospin_lock_t lock
);
849 OS_ATOMIC_EXPORT
bool _os_nospin_lock_trylock(_os_nospin_lock_t lock
);
850 OS_ATOMIC_EXPORT
void _os_nospin_lock_unlock(_os_nospin_lock_t lock
);
855 _os_nospin_lock_lock_slow(_os_nospin_lock_t l
)
857 os_lock_owner_t self
= _os_lock_owner_get_self();
858 os_ulock_value_t current
, new, waiters_mask
= 0;
859 uint32_t timeout
= 1;
860 while (unlikely((current
= os_atomic_load(&l
->oul_value
, relaxed
)) !=
863 new = current
& ~OS_ULOCK_NOWAITERS_BIT
;
864 // For safer compatibility with OSSpinLock where _OSSpinLockLocked may
865 // be 1, check that new didn't become 0 (unlocked) by clearing this bit
866 if (current
!= new && new) {
867 // Clear nowaiters bit in lock value before waiting
868 if (!os_atomic_cmpxchgv(&l
->oul_value
, current
, new, ¤t
,
874 int ret
= __ulock_wait(UL_COMPARE_AND_WAIT
| ULF_NO_ERRNO
, l
, current
,
876 if (unlikely(ret
< 0)) {
885 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
889 // If there are more waiters, unset nowaiters bit when acquiring lock
890 waiters_mask
= OS_ULOCK_NOWAITERS_BIT
;
893 new = self
& ~waiters_mask
;
894 bool r
= os_atomic_cmpxchgv(&l
->oul_value
, OS_LOCK_NO_OWNER
, new,
896 if (unlikely(!r
)) goto _retry
;
901 _os_nospin_lock_unlock_slow(_os_nospin_lock_t l
, os_ulock_value_t current
)
903 os_lock_owner_t self
= _os_lock_owner_get_self();
904 if (unlikely(OS_ULOCK_OWNER(current
) != self
)) {
905 return; // no unowned_abort for drop-in compatibility with OSSpinLock
907 if (current
& OS_ULOCK_NOWAITERS_BIT
) {
908 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters");
911 int ret
= __ulock_wake(UL_COMPARE_AND_WAIT
| ULF_NO_ERRNO
, l
, 0);
912 if (unlikely(ret
< 0)) {
919 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
927 _os_nospin_lock_lock(_os_nospin_lock_t l
)
929 os_lock_owner_t self
= _os_lock_owner_get_self();
930 bool r
= os_atomic_cmpxchg(&l
->oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
931 if (likely(r
)) return;
932 return _os_nospin_lock_lock_slow(l
);
936 _os_nospin_lock_trylock(_os_nospin_lock_t l
)
938 os_lock_owner_t self
= _os_lock_owner_get_self();
939 bool r
= os_atomic_cmpxchg(&l
->oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
944 _os_nospin_lock_unlock(_os_nospin_lock_t l
)
946 os_lock_owner_t self
= _os_lock_owner_get_self();
947 os_ulock_value_t current
;
948 current
= os_atomic_xchg(&l
->oul_value
, OS_LOCK_NO_OWNER
, release
);
949 if (likely(current
== self
)) return;
950 return _os_nospin_lock_unlock_slow(l
, current
);
955 #pragma mark _os_lock_nospin_t
957 OS_LOCK_STRUCT_DECL_INTERNAL(nospin
,
958 _os_nospin_lock osl_nospin_lock
;
960 OS_LOCK_METHODS_DECL(nospin
);
961 OS_LOCK_TYPE_INSTANCE(nospin
);
964 _os_lock_nospin_lock(_os_lock_nospin_t l
)
966 return _os_nospin_lock_lock(&l
->osl_nospin_lock
);
970 _os_lock_nospin_trylock(_os_lock_nospin_t l
)
972 return _os_nospin_lock_trylock(&l
->osl_nospin_lock
);
976 _os_lock_nospin_unlock(_os_lock_nospin_t l
)
978 return _os_nospin_lock_unlock(&l
->osl_nospin_lock
);
983 #pragma mark os_once_t
985 typedef struct os_once_gate_s
{
987 os_ulock_value_t ogo_lock
;
990 } os_once_gate_s
, *os_once_gate_t
;
992 #define OS_ONCE_INIT ((uintptr_t)0l)
993 #define OS_ONCE_DONE (~(uintptr_t)0l)
995 #if defined(__i386__) || defined(__x86_64__)
996 #define OS_ONCE_USE_QUIESCENT_COUNTER 0
998 #define OS_ONCE_USE_QUIESCENT_COUNTER 1
1001 OS_ATOMIC_EXPORT
void _os_once(os_once_t
*val
, void *ctxt
, os_function_t func
);
1002 OS_ATOMIC_EXPORT
void __os_once_reset(os_once_t
*val
);
1004 OS_NOINLINE OS_NORETURN OS_COLD
1005 void _os_once_gate_recursive_abort(os_lock_owner_t owner
);
1006 OS_NOINLINE OS_NORETURN OS_COLD
1007 void _os_once_gate_unowned_abort(os_lock_owner_t owner
);
1008 OS_NOINLINE OS_NORETURN OS_COLD
1009 void _os_once_gate_corruption_abort(os_ulock_value_t current
);
1012 OS_NOINLINE OS_NORETURN OS_COLD
1014 _os_once_gate_recursive_abort(os_lock_owner_t owner
)
1016 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an "
1020 OS_NOINLINE OS_NORETURN OS_COLD
1022 _os_once_gate_unowned_abort(os_lock_owner_t owner
)
1024 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Unlock of an os_once_t not "
1025 "owned by current thread");
1028 OS_NOINLINE OS_NORETURN OS_COLD
1030 _os_once_gate_corruption_abort(os_ulock_value_t current
)
1032 __LIBPLATFORM_CLIENT_CRASH__(current
, "os_once_t is corrupt");
1036 #if OS_ONCE_USE_QUIESCENT_COUNTER
1037 #define OS_ONCE_MAKE_GEN(gen) (((gen) << 2) + OS_ULOCK_NOWAITERS_BIT)
1038 #define OS_ONCE_IS_GEN(gen) (((gen) & 3) == OS_ULOCK_NOWAITERS_BIT)
1040 // the _COMM_PAGE_CPU_QUIESCENT_COUNTER value is incremented every time
1041 // all CPUs have performed a context switch.
1043 // To make sure all CPUs context switched at least once since `gen`,
1044 // we need to observe 4 increments, see libdispatch/src/shims/lock.h
1045 #define OS_ONCE_GEN_SAFE_DELTA (4 << 2)
1048 static inline uintptr_t
1049 _os_once_generation(void)
1051 uintptr_t value
= *(volatile uintptr_t *)_COMM_PAGE_CPU_QUIESCENT_COUNTER
;
1052 return OS_ONCE_MAKE_GEN(value
);
1056 static inline uintptr_t
1057 _os_once_mark_quiescing(os_once_gate_t og
)
1059 return os_atomic_xchg(&og
->ogo_once
, _os_once_generation(), release
);
1064 _os_once_mark_done_if_quiesced(os_once_gate_t og
, uintptr_t gen
)
1066 if (_os_once_generation() - gen
>= OS_ONCE_GEN_SAFE_DELTA
) {
1067 os_atomic_store(&og
->ogo_once
, OS_ONCE_DONE
, relaxed
);
1072 static inline uintptr_t
1073 _os_once_mark_done(os_once_gate_t og
)
1075 return os_atomic_xchg(&og
->ogo_once
, OS_ONCE_DONE
, release
);
1081 _os_once_gate_broadcast(os_once_gate_t og
, os_ulock_value_t current
,
1082 os_lock_owner_t self
)
1084 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
, 0))) {
1085 return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current
));
1087 if (current
& OS_ULOCK_NOWAITERS_BIT
) {
1088 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters");
1091 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
| ULF_WAKE_ALL
,
1093 if (unlikely(ret
< 0)) {
1100 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
1109 _os_once_callout(os_once_gate_t og
, void *ctxt
, os_function_t func
,
1110 os_lock_owner_t self
)
1116 #if OS_ONCE_USE_QUIESCENT_COUNTER
1117 v
= _os_once_mark_quiescing(og
);
1119 v
= _os_once_mark_done(og
);
1121 if (likely((os_ulock_value_t
)v
== self
)) return;
1122 _os_once_gate_broadcast(og
, (os_ulock_value_t
)v
, self
);
1127 _os_once_gate_wait(os_once_gate_t og
, void *ctxt
, os_function_t func
,
1128 os_lock_owner_t self
)
1133 os_atomic_rmw_loop(&og
->ogo_once
, old
, new, relaxed
, {
1134 if (old
== OS_ONCE_DONE
) {
1135 os_atomic_rmw_loop_give_up(return);
1136 #if OS_ONCE_USE_QUIESCENT_COUNTER
1137 } else if (OS_ONCE_IS_GEN(old
)) {
1138 os_atomic_rmw_loop_give_up({
1139 os_atomic_thread_fence(acquire
);
1140 return _os_once_mark_done_if_quiesced(og
, old
);
1143 } else if (old
== OS_ONCE_INIT
) {
1144 // __os_once_reset was used, try to become the new initializer
1145 new = (uintptr_t)self
;
1147 new = old
& ~(uintptr_t)OS_ULOCK_NOWAITERS_BIT
;
1148 if (new == old
) os_atomic_rmw_loop_give_up(break);
1151 if (old
== OS_ONCE_INIT
) {
1152 // see comment in _os_once, pairs with the release barrier
1153 // in __os_once_reset()
1154 os_atomic_thread_fence(acquire
);
1155 return _os_once_callout(og
, ctxt
, func
, self
);
1157 if (unlikely(OS_ULOCK_IS_OWNER((os_lock_owner_t
)old
, self
, 0))) {
1158 return _os_once_gate_recursive_abort(self
);
1160 int ret
= __ulock_wait(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
,
1161 &og
->ogo_lock
, (os_ulock_value_t
)new, 0);
1162 if (unlikely(ret
< 0)) {
1168 _os_once_gate_corruption_abort((os_lock_owner_t
)old
);
1171 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
1177 // Atomically resets the once value to zero and then signals all
1178 // pending waiters to return from their __ulock_wait()
1180 __os_once_reset(os_once_t
*val
)
1182 os_once_gate_t og
= (os_once_gate_t
)val
;
1183 os_lock_owner_t self
= _os_lock_owner_get_self();
1186 v
= os_atomic_xchg(&og
->ogo_once
, OS_ONCE_INIT
, release
);
1187 if (likely((os_ulock_value_t
)v
== self
)) return;
1188 return _os_once_gate_broadcast(og
, (os_ulock_value_t
)v
, self
);
1192 _os_once(os_once_t
*val
, void *ctxt
, os_function_t func
)
1194 os_once_gate_t og
= (os_once_gate_t
)val
;
1195 os_lock_owner_t self
;
1198 #if OS_ONCE_USE_QUIESCENT_COUNTER
1199 v
= os_atomic_load(&og
->ogo_once
, acquire
);
1200 if (likely(OS_ONCE_IS_GEN(v
))) {
1201 return _os_once_mark_done_if_quiesced(og
, v
);
1205 self
= _os_lock_owner_get_self();
1206 v
= (uintptr_t)self
;
1208 // The acquire barrier pairs with the release in __os_once_reset()
1209 // for cases when a previous initializer failed.
1210 if (likely(os_atomic_cmpxchg(&og
->ogo_once
, OS_ONCE_INIT
, v
, acquire
))) {
1211 return _os_once_callout(og
, ctxt
, func
, self
);
1213 return _os_once_gate_wait(og
, ctxt
, func
, self
);