2 * Copyright (c) 2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 #include "lock_internal.h"
22 #include "os/internal.h"
24 #include "libkern/OSAtomic.h"
26 #include "os/lock_private.h"
27 #include "os/once_private.h"
29 #include <mach/mach_init.h>
30 #include <mach/mach_traps.h>
31 #include <mach/thread_switch.h>
32 #include <mach/mach_time.h>
36 #pragma mark _os_lock_base_t
38 OS_NOINLINE OS_NORETURN OS_COLD
39 void _os_lock_corruption_abort(void *lock_ptr OS_UNUSED
, uintptr_t lock_value
);
42 OS_LOCK_STRUCT_DECL_INTERNAL(base
);
43 OS_USED
static OS_LOCK_TYPE_STRUCT_DECL(base
);
46 os_lock_lock(os_lock_t l
)
48 return l
._osl_base
->osl_type
->osl_lock(l
);
52 os_lock_trylock(os_lock_t l
)
54 return l
._osl_base
->osl_type
->osl_trylock(l
);
58 os_lock_unlock(os_lock_t l
)
60 return l
._osl_base
->osl_type
->osl_unlock(l
);
63 OS_NOINLINE OS_NORETURN OS_COLD
65 _os_lock_corruption_abort(void *lock_ptr OS_UNUSED
, uintptr_t lock_value
)
67 __LIBPLATFORM_CLIENT_CRASH__(lock_value
, "os_lock is corrupt");
72 #pragma mark OSSpinLock
74 OS_NOEXPORT OS_NOINLINE
void _OSSpinLockLockSlow(volatile OSSpinLock
*l
);
76 OS_ATOMIC_EXPORT
void OSSpinLockLock(volatile OSSpinLock
*l
);
77 OS_ATOMIC_EXPORT
bool OSSpinLockTry(volatile OSSpinLock
*l
);
78 OS_ATOMIC_EXPORT
int spin_lock_try(volatile OSSpinLock
*l
);
79 OS_ATOMIC_EXPORT
void OSSpinLockUnlock(volatile OSSpinLock
*l
);
81 static const OSSpinLock _OSSpinLockLocked
= TARGET_OS_EMBEDDED
? 1 : -1;
87 #define OS_LOCK_SPIN_SPIN_TRIES 1000
88 #define OS_LOCK_SPIN_PAUSE() os_hardware_pause()
93 _os_lock_yield_deadline(mach_msg_timeout_t timeout
)
95 uint64_t abstime
= timeout
* NSEC_PER_MSEC
;
96 #if !(defined(__i386__) || defined(__x86_64__))
97 mach_timebase_info_data_t tbi
;
98 kern_return_t kr
= mach_timebase_info(&tbi
);
99 if (kr
) return UINT64_MAX
;
100 abstime
*= tbi
.denom
;
101 abstime
/= tbi
.numer
;
103 return mach_absolute_time() + abstime
;
108 _os_lock_yield_until(uint64_t deadline
)
110 return mach_absolute_time() < deadline
;
115 _OSSpinLockLockYield(volatile OSSpinLock
*l
)
117 int option
= SWITCH_OPTION_DEPRESS
;
118 mach_msg_timeout_t timeout
= 1;
119 uint64_t deadline
= _os_lock_yield_deadline(timeout
);
121 while (unlikely(lock
= *l
)) {
123 if (unlikely(lock
!= _OSSpinLockLocked
)) {
124 _os_lock_corruption_abort((void *)l
, (uintptr_t)lock
);
126 thread_switch(MACH_PORT_NULL
, option
, timeout
);
127 if (option
== SWITCH_OPTION_WAIT
) {
129 } else if (!_os_lock_yield_until(deadline
)) {
130 option
= SWITCH_OPTION_WAIT
;
133 bool r
= os_atomic_cmpxchgv(l
, 0, _OSSpinLockLocked
, &lock
, acquire
);
134 if (likely(r
)) return;
140 _OSSpinLockLockSlow(volatile OSSpinLock
*l
)
142 return _OSSpinLockLockYield(l
); // Don't spin on UP
144 #else // !OS_ATOMIC_UP
146 _OSSpinLockLockSlow(volatile OSSpinLock
*l
)
148 uint32_t tries
= OS_LOCK_SPIN_SPIN_TRIES
;
150 while (unlikely(lock
= *l
)) {
152 if (unlikely(lock
!= _OSSpinLockLocked
)) {
153 return _os_lock_corruption_abort((void *)l
, (uintptr_t)lock
);
155 if (unlikely(!tries
--)) return _OSSpinLockLockYield(l
);
156 OS_LOCK_SPIN_PAUSE();
158 bool r
= os_atomic_cmpxchgv(l
, 0, _OSSpinLockLocked
, &lock
, acquire
);
159 if (likely(r
)) return;
162 #endif // !OS_ATOMIC_UP
166 #if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR
168 typedef struct _os_nospin_lock_s
*_os_nospin_lock_t
;
170 OS_ATOMIC_EXPORT
void _os_nospin_lock_lock(_os_nospin_lock_t lock
);
171 OS_ATOMIC_EXPORT
bool _os_nospin_lock_trylock(_os_nospin_lock_t lock
);
172 OS_ATOMIC_EXPORT
void _os_nospin_lock_unlock(_os_nospin_lock_t lock
);
175 OSSpinLockLock(volatile OSSpinLock
*l
)
177 OS_ATOMIC_ALIAS(spin_lock
, OSSpinLockLock
);
178 OS_ATOMIC_ALIAS(_spin_lock
, OSSpinLockLock
);
179 return _os_nospin_lock_lock((_os_nospin_lock_t
)l
);
183 OSSpinLockTry(volatile OSSpinLock
*l
)
185 return _os_nospin_lock_trylock((_os_nospin_lock_t
)l
);
189 spin_lock_try(volatile OSSpinLock
*l
)
191 OS_ATOMIC_ALIAS(_spin_lock_try
, spin_lock_try
);
192 return _os_nospin_lock_trylock((_os_nospin_lock_t
)l
);
196 OSSpinLockUnlock(volatile OSSpinLock
*l
)
198 OS_ATOMIC_ALIAS(spin_unlock
, OSSpinLockUnlock
);
199 OS_ATOMIC_ALIAS(_spin_unlock
, OSSpinLockUnlock
);
200 return _os_nospin_lock_unlock((_os_nospin_lock_t
)l
);
203 #undef OS_ATOMIC_ALIAS
204 #define OS_ATOMIC_ALIAS(n, o)
205 static void _OSSpinLockLock(volatile OSSpinLock
*l
);
206 #undef OSSpinLockLock
207 #define OSSpinLockLock _OSSpinLockLock
208 static bool _OSSpinLockTry(volatile OSSpinLock
*l
);
210 #define OSSpinLockTry _OSSpinLockTry
211 static __unused
int __spin_lock_try(volatile OSSpinLock
*l
);
213 #define spin_lock_try __spin_lock_try
214 static void _OSSpinLockUnlock(volatile OSSpinLock
*l
);
215 #undef OSSpinLockUnlock
216 #define OSSpinLockUnlock _OSSpinLockUnlock
218 #endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK
221 OSSpinLockLock(volatile OSSpinLock
*l
)
223 OS_ATOMIC_ALIAS(spin_lock
, OSSpinLockLock
);
224 OS_ATOMIC_ALIAS(_spin_lock
, OSSpinLockLock
);
225 bool r
= os_atomic_cmpxchg(l
, 0, _OSSpinLockLocked
, acquire
);
226 if (likely(r
)) return;
227 return _OSSpinLockLockSlow(l
);
231 OSSpinLockTry(volatile OSSpinLock
*l
)
233 bool r
= os_atomic_cmpxchg(l
, 0, _OSSpinLockLocked
, acquire
);
238 spin_lock_try(volatile OSSpinLock
*l
) // <rdar://problem/13316060>
240 OS_ATOMIC_ALIAS(_spin_lock_try
, spin_lock_try
);
241 return OSSpinLockTry(l
);
245 OSSpinLockUnlock(volatile OSSpinLock
*l
)
247 OS_ATOMIC_ALIAS(spin_unlock
, OSSpinLockUnlock
);
248 OS_ATOMIC_ALIAS(_spin_unlock
, OSSpinLockUnlock
);
249 os_atomic_store(l
, 0, release
);
254 #pragma mark os_lock_spin_t
256 OS_LOCK_STRUCT_DECL_INTERNAL(spin
,
257 OSSpinLock
volatile osl_spinlock
;
259 OS_LOCK_METHODS_DECL(spin
);
260 OS_LOCK_TYPE_INSTANCE(spin
);
263 _os_lock_spin_lock(_os_lock_spin_t l
)
265 return OSSpinLockLock(&l
->osl_spinlock
);
269 _os_lock_spin_trylock(_os_lock_spin_t l
)
271 return OSSpinLockTry(&l
->osl_spinlock
);
275 _os_lock_spin_unlock(_os_lock_spin_t l
)
277 return OSSpinLockUnlock(&l
->osl_spinlock
);
282 #pragma mark os_lock_owner_t
284 #ifndef __TSD_MACH_THREAD_SELF
285 #define __TSD_MACH_THREAD_SELF 3
288 typedef mach_port_name_t os_lock_owner_t
;
289 #define OS_LOCK_NO_OWNER MACH_PORT_NULL
293 static inline os_lock_owner_t
294 _os_lock_owner_get_self(void)
296 os_lock_owner_t self
;
297 self
= (os_lock_owner_t
)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF
);
302 OS_NOINLINE OS_NORETURN OS_COLD
304 _os_lock_recursive_abort(os_lock_owner_t owner
)
306 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an "
312 #pragma mark os_lock_handoff_t
314 OS_LOCK_STRUCT_DECL_INTERNAL(handoff
,
315 os_lock_owner_t
volatile osl_owner
;
317 OS_LOCK_METHODS_DECL(handoff
);
318 OS_LOCK_TYPE_INSTANCE(handoff
);
320 #define OS_LOCK_HANDOFF_YIELD_TRIES 100
324 _os_lock_handoff_lock_slow(_os_lock_handoff_t l
)
326 int option
= SWITCH_OPTION_OSLOCK_DEPRESS
;
327 mach_msg_timeout_t timeout
= 1;
328 uint32_t tries
= OS_LOCK_HANDOFF_YIELD_TRIES
;
329 os_lock_owner_t self
= _os_lock_owner_get_self(), owner
;
330 while (unlikely(owner
= l
->osl_owner
)) {
332 if (unlikely(owner
== self
)) return _os_lock_recursive_abort(self
);
333 // Yield until tries first hits zero, then permanently switch to wait
334 if (unlikely(!tries
--)) option
= SWITCH_OPTION_OSLOCK_WAIT
;
335 thread_switch(owner
, option
, timeout
);
336 // Redrive the handoff every 1ms until switching to wait
337 if (option
== SWITCH_OPTION_OSLOCK_WAIT
) timeout
++;
339 bool r
= os_atomic_cmpxchgv2o(l
, osl_owner
, MACH_PORT_NULL
, self
, &owner
,
341 if (likely(r
)) return;
346 _os_lock_handoff_lock(_os_lock_handoff_t l
)
348 os_lock_owner_t self
= _os_lock_owner_get_self();
349 bool r
= os_atomic_cmpxchg2o(l
, osl_owner
, MACH_PORT_NULL
, self
, acquire
);
350 if (likely(r
)) return;
351 return _os_lock_handoff_lock_slow(l
);
355 _os_lock_handoff_trylock(_os_lock_handoff_t l
)
357 os_lock_owner_t self
= _os_lock_owner_get_self();
358 bool r
= os_atomic_cmpxchg2o(l
, osl_owner
, MACH_PORT_NULL
, self
, acquire
);
363 _os_lock_handoff_unlock(_os_lock_handoff_t l
)
365 os_atomic_store2o(l
, osl_owner
, MACH_PORT_NULL
, release
);
370 #pragma mark os_ulock_value_t
372 #include <sys/errno.h>
373 #include <sys/ulock.h>
375 typedef os_lock_owner_t os_ulock_value_t
;
377 // This assumes that all thread mach port values always have the low bit set!
378 // Clearing this bit is used to communicate the existence of waiters to unlock.
379 #define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u)
380 #define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT)
382 #define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD
383 #define OS_ULOCK_IS_OWNER(value, self, allow_anonymous_owner) ({ \
384 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner == (self)) && \
385 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
386 #define OS_ULOCK_IS_NOT_OWNER(value, self, allow_anonymous_owner) ({ \
387 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner != (self)) && \
388 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
391 #pragma mark os_unfair_lock
393 typedef struct _os_unfair_lock_s
{
394 os_ulock_value_t oul_value
;
395 } *_os_unfair_lock_t
;
397 _Static_assert(sizeof(struct os_unfair_lock_s
) ==
398 sizeof(struct _os_unfair_lock_s
), "os_unfair_lock size mismatch");
400 OS_ATOMIC_EXPORT
void os_unfair_lock_lock(os_unfair_lock_t lock
);
401 OS_ATOMIC_EXPORT
void os_unfair_lock_lock_with_options(os_unfair_lock_t lock
,
402 os_unfair_lock_options_t options
);
403 OS_ATOMIC_EXPORT
bool os_unfair_lock_trylock(os_unfair_lock_t lock
);
404 OS_ATOMIC_EXPORT
void os_unfair_lock_unlock(os_unfair_lock_t lock
);
406 OS_ATOMIC_EXPORT
void os_unfair_lock_lock_no_tsd_4libpthread(
407 os_unfair_lock_t lock
);
408 OS_ATOMIC_EXPORT
void os_unfair_lock_unlock_no_tsd_4libpthread(
409 os_unfair_lock_t lock
);
410 OS_ATOMIC_EXPORT
void os_unfair_lock_lock_with_options_4Libc(
411 os_unfair_lock_t lock
, os_unfair_lock_options_t options
);
412 OS_ATOMIC_EXPORT
void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock
);
414 OS_NOINLINE OS_NORETURN OS_COLD
415 void _os_unfair_lock_recursive_abort(os_lock_owner_t owner
);
416 OS_NOINLINE OS_NORETURN OS_COLD
417 void _os_unfair_lock_unowned_abort(os_lock_owner_t owner
);
418 OS_NOINLINE OS_NORETURN OS_COLD
419 void _os_unfair_lock_corruption_abort(os_ulock_value_t current
);
421 _Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
==
422 ULF_WAIT_WORKQ_DATA_CONTENTION
,
423 "check value for OS_UNFAIR_LOCK_OPTIONS_MASK");
424 #define OS_UNFAIR_LOCK_OPTIONS_MASK \
425 (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION)
426 #define OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER 0x01000000u
429 OS_NOINLINE OS_NORETURN OS_COLD
431 _os_unfair_lock_recursive_abort(os_lock_owner_t owner
)
433 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an "
437 OS_NOINLINE OS_NORETURN OS_COLD
439 _os_unfair_lock_unowned_abort(os_lock_owner_t owner
)
441 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Unlock of an os_unfair_lock not "
442 "owned by current thread");
445 OS_NOINLINE OS_NORETURN OS_COLD
447 _os_unfair_lock_corruption_abort(os_ulock_value_t current
)
449 __LIBPLATFORM_CLIENT_CRASH__(current
, "os_unfair_lock is corrupt");
455 _os_unfair_lock_lock_slow(_os_unfair_lock_t l
, os_lock_owner_t self
,
456 os_unfair_lock_options_t options
)
458 os_unfair_lock_options_t allow_anonymous_owner
=
459 options
& OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
;
460 options
&= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
;
461 if (unlikely(options
& ~OS_UNFAIR_LOCK_OPTIONS_MASK
)) {
462 __LIBPLATFORM_CLIENT_CRASH__(options
, "Invalid options");
464 os_ulock_value_t current
, new, waiters_mask
= 0;
465 while (unlikely((current
= os_atomic_load2o(l
, oul_value
, relaxed
)) !=
468 if (unlikely(OS_ULOCK_IS_OWNER(current
, self
, allow_anonymous_owner
))) {
469 return _os_unfair_lock_recursive_abort(self
);
471 new = current
& ~OS_ULOCK_NOWAITERS_BIT
;
472 if (current
!= new) {
473 // Clear nowaiters bit in lock value before waiting
474 if (!os_atomic_cmpxchgv2o(l
, oul_value
, current
, new, ¤t
,
480 int ret
= __ulock_wait(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
| options
,
482 if (unlikely(ret
< 0)) {
488 _os_unfair_lock_corruption_abort(current
);
491 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
494 // If there are more waiters, unset nowaiters bit when acquiring lock
495 waiters_mask
= (ret
> 0) ? OS_ULOCK_NOWAITERS_BIT
: 0;
497 new = self
& ~waiters_mask
;
498 bool r
= os_atomic_cmpxchgv2o(l
, oul_value
, OS_LOCK_NO_OWNER
, new,
500 if (unlikely(!r
)) goto _retry
;
505 _os_unfair_lock_unlock_slow(_os_unfair_lock_t l
, os_ulock_value_t current
,
506 os_lock_owner_t self
, os_unfair_lock_options_t options
)
508 os_unfair_lock_options_t allow_anonymous_owner
=
509 options
& OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
;
510 options
&= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
;
511 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
, allow_anonymous_owner
))) {
512 return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current
));
514 if (current
& OS_ULOCK_NOWAITERS_BIT
) {
515 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters");
518 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
, l
, 0);
519 if (unlikely(ret
< 0)) {
526 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
534 os_unfair_lock_lock(os_unfair_lock_t lock
)
536 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
537 os_lock_owner_t self
= _os_lock_owner_get_self();
538 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
539 if (likely(r
)) return;
540 return _os_unfair_lock_lock_slow(l
, self
, OS_UNFAIR_LOCK_NONE
);
544 os_unfair_lock_lock_with_options(os_unfair_lock_t lock
,
545 os_unfair_lock_options_t options
)
547 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
548 os_lock_owner_t self
= _os_lock_owner_get_self();
549 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
550 if (likely(r
)) return;
551 return _os_unfair_lock_lock_slow(l
, self
, options
);
555 os_unfair_lock_trylock(os_unfair_lock_t lock
)
557 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
558 os_lock_owner_t self
= _os_lock_owner_get_self();
559 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
564 os_unfair_lock_unlock(os_unfair_lock_t lock
)
566 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
567 os_lock_owner_t self
= _os_lock_owner_get_self();
568 os_ulock_value_t current
;
569 current
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
);
570 if (likely(current
== self
)) return;
571 return _os_unfair_lock_unlock_slow(l
, current
, self
, 0);
575 os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock
)
577 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
578 os_lock_owner_t self
= OS_ULOCK_ANONYMOUS_OWNER
;
579 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
580 if (likely(r
)) return;
581 return _os_unfair_lock_lock_slow(l
, self
,
582 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
|
583 OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
);
587 os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock
)
589 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
590 os_lock_owner_t self
= OS_ULOCK_ANONYMOUS_OWNER
;
591 os_ulock_value_t current
;
592 current
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
);
593 if (likely(current
== self
)) return;
594 return _os_unfair_lock_unlock_slow(l
, current
, self
,
595 OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
);
600 os_unfair_lock_assert_owner(os_unfair_lock_t lock
)
602 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
603 os_lock_owner_t self
= _os_lock_owner_get_self();
604 os_ulock_value_t current
= os_atomic_load2o(l
, oul_value
, relaxed
);
605 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
, 0))) {
606 __LIBPLATFORM_CLIENT_CRASH__(current
, "Assertion failed: "
607 "Lock unexpectedly not owned by current thread");
612 os_unfair_lock_assert_not_owner(os_unfair_lock_t lock
)
614 _os_unfair_lock_t l
= (_os_unfair_lock_t
)lock
;
615 os_lock_owner_t self
= _os_lock_owner_get_self();
616 os_ulock_value_t current
= os_atomic_load2o(l
, oul_value
, relaxed
);
617 if (unlikely(OS_ULOCK_IS_OWNER(current
, self
, 0))) {
618 __LIBPLATFORM_CLIENT_CRASH__(current
, "Assertion failed: "
619 "Lock unexpectedly owned by current thread");
625 #pragma mark _os_lock_unfair_t
627 OS_LOCK_STRUCT_DECL_INTERNAL(unfair
,
628 os_unfair_lock osl_unfair_lock
;
630 OS_LOCK_METHODS_DECL(unfair
);
631 OS_LOCK_TYPE_INSTANCE(unfair
);
634 _os_lock_unfair_lock(_os_lock_unfair_t l
)
636 return os_unfair_lock_lock(&l
->osl_unfair_lock
);
640 _os_lock_unfair_trylock(_os_lock_unfair_t l
)
642 return os_unfair_lock_trylock(&l
->osl_unfair_lock
);
646 _os_lock_unfair_unlock(_os_lock_unfair_t l
)
648 return os_unfair_lock_unlock(&l
->osl_unfair_lock
);
653 #pragma mark _os_nospin_lock
655 typedef struct _os_nospin_lock_s
{
656 os_ulock_value_t oul_value
;
657 } _os_nospin_lock
, *_os_nospin_lock_t
;
659 _Static_assert(sizeof(OSSpinLock
) ==
660 sizeof(struct _os_nospin_lock_s
), "os_nospin_lock size mismatch");
662 OS_ATOMIC_EXPORT
void _os_nospin_lock_lock(_os_nospin_lock_t lock
);
663 OS_ATOMIC_EXPORT
bool _os_nospin_lock_trylock(_os_nospin_lock_t lock
);
664 OS_ATOMIC_EXPORT
void _os_nospin_lock_unlock(_os_nospin_lock_t lock
);
669 _os_nospin_lock_lock_slow(_os_nospin_lock_t l
)
671 os_lock_owner_t self
= _os_lock_owner_get_self();
672 os_ulock_value_t current
, new, waiters_mask
= 0;
673 uint32_t timeout
= 1;
674 while (unlikely((current
= os_atomic_load2o(l
, oul_value
, relaxed
)) !=
677 new = current
& ~OS_ULOCK_NOWAITERS_BIT
;
678 // For safer compatibility with OSSpinLock where _OSSpinLockLocked may
679 // be 1, check that new didn't become 0 (unlocked) by clearing this bit
680 if (current
!= new && new) {
681 // Clear nowaiters bit in lock value before waiting
682 if (!os_atomic_cmpxchgv2o(l
, oul_value
, current
, new, ¤t
,
688 int ret
= __ulock_wait(UL_COMPARE_AND_WAIT
| ULF_NO_ERRNO
, l
, current
,
690 if (unlikely(ret
< 0)) {
699 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
702 // If there are more waiters, unset nowaiters bit when acquiring lock
703 waiters_mask
= (ret
> 0) ? OS_ULOCK_NOWAITERS_BIT
: 0;
705 new = self
& ~waiters_mask
;
706 bool r
= os_atomic_cmpxchgv2o(l
, oul_value
, OS_LOCK_NO_OWNER
, new,
708 if (unlikely(!r
)) goto _retry
;
713 _os_nospin_lock_unlock_slow(_os_nospin_lock_t l
, os_ulock_value_t current
)
715 os_lock_owner_t self
= _os_lock_owner_get_self();
716 if (unlikely(OS_ULOCK_OWNER(current
) != self
)) {
717 return; // no unowned_abort for drop-in compatibility with OSSpinLock
719 if (current
& OS_ULOCK_NOWAITERS_BIT
) {
720 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters");
723 int ret
= __ulock_wake(UL_COMPARE_AND_WAIT
| ULF_NO_ERRNO
, l
, 0);
724 if (unlikely(ret
< 0)) {
731 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
739 _os_nospin_lock_lock(_os_nospin_lock_t l
)
741 os_lock_owner_t self
= _os_lock_owner_get_self();
742 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
743 if (likely(r
)) return;
744 return _os_nospin_lock_lock_slow(l
);
748 _os_nospin_lock_trylock(_os_nospin_lock_t l
)
750 os_lock_owner_t self
= _os_lock_owner_get_self();
751 bool r
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
);
756 _os_nospin_lock_unlock(_os_nospin_lock_t l
)
758 os_lock_owner_t self
= _os_lock_owner_get_self();
759 os_ulock_value_t current
;
760 current
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
);
761 if (likely(current
== self
)) return;
762 return _os_nospin_lock_unlock_slow(l
, current
);
767 #pragma mark _os_lock_nospin_t
769 OS_LOCK_STRUCT_DECL_INTERNAL(nospin
,
770 _os_nospin_lock osl_nospin_lock
;
772 OS_LOCK_METHODS_DECL(nospin
);
773 OS_LOCK_TYPE_INSTANCE(nospin
);
776 _os_lock_nospin_lock(_os_lock_nospin_t l
)
778 return _os_nospin_lock_lock(&l
->osl_nospin_lock
);
782 _os_lock_nospin_trylock(_os_lock_nospin_t l
)
784 return _os_nospin_lock_trylock(&l
->osl_nospin_lock
);
788 _os_lock_nospin_unlock(_os_lock_nospin_t l
)
790 return _os_nospin_lock_unlock(&l
->osl_nospin_lock
);
795 #pragma mark os_once_t
797 typedef struct os_once_gate_s
{
799 os_ulock_value_t ogo_lock
;
802 } os_once_gate_s
, *os_once_gate_t
;
804 #define OS_ONCE_INIT ((os_once_t)0l)
805 #define OS_ONCE_DONE (~(os_once_t)0l)
807 OS_ATOMIC_EXPORT
void _os_once(os_once_t
*val
, void *ctxt
, os_function_t func
);
808 OS_ATOMIC_EXPORT
void __os_once_reset(os_once_t
*val
);
810 OS_NOINLINE OS_NORETURN OS_COLD
811 void _os_once_gate_recursive_abort(os_lock_owner_t owner
);
812 OS_NOINLINE OS_NORETURN OS_COLD
813 void _os_once_gate_unowned_abort(os_lock_owner_t owner
);
814 OS_NOINLINE OS_NORETURN OS_COLD
815 void _os_once_gate_corruption_abort(os_ulock_value_t current
);
818 OS_NOINLINE OS_NORETURN OS_COLD
820 _os_once_gate_recursive_abort(os_lock_owner_t owner
)
822 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an "
826 OS_NOINLINE OS_NORETURN OS_COLD
828 _os_once_gate_unowned_abort(os_lock_owner_t owner
)
830 __LIBPLATFORM_CLIENT_CRASH__(owner
, "Unlock of an os_once_t not "
831 "owned by current thread");
834 OS_NOINLINE OS_NORETURN OS_COLD
836 _os_once_gate_corruption_abort(os_ulock_value_t current
)
838 __LIBPLATFORM_CLIENT_CRASH__(current
, "os_once_t is corrupt");
844 _os_once_gate_wait_slow(os_ulock_value_t
*gate
, os_lock_owner_t self
)
846 os_ulock_value_t tid_old
, tid_new
;
849 os_atomic_rmw_loop(gate
, tid_old
, tid_new
, relaxed
, {
851 case (os_ulock_value_t
)OS_ONCE_INIT
: // raced with __os_once_reset()
852 case (os_ulock_value_t
)OS_ONCE_DONE
: // raced with _os_once()
853 os_atomic_rmw_loop_give_up(return);
855 tid_new
= tid_old
& ~OS_ULOCK_NOWAITERS_BIT
;
856 if (tid_new
== tid_old
) os_atomic_rmw_loop_give_up(break);
858 if (unlikely(OS_ULOCK_IS_OWNER(tid_old
, self
, 0))) {
859 return _os_once_gate_recursive_abort(self
);
861 int ret
= __ulock_wait(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
,
863 if (unlikely(ret
< 0)) {
869 _os_once_gate_corruption_abort(tid_old
);
872 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure");
880 _os_once_gate_broadcast_slow(os_ulock_value_t
*gate
, os_ulock_value_t current
,
881 os_lock_owner_t self
)
883 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
, 0))) {
884 return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current
));
886 if (current
& OS_ULOCK_NOWAITERS_BIT
) {
887 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters");
890 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
| ULF_WAKE_ALL
,
892 if (unlikely(ret
< 0)) {
899 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure");
908 _os_once_gate_set_value_and_broadcast(os_once_gate_t og
, os_lock_owner_t self
,
911 os_ulock_value_t current
;
912 #if defined(__i386__) || defined(__x86_64__)
913 // On Intel, any load is a load-acquire, so we don't need to be fancy
914 current
= (os_ulock_value_t
)os_atomic_xchg(&og
->ogo_once
, value
, release
);
916 # error os_once algorithm not available for this architecture
918 if (likely(current
== self
)) return;
919 _os_once_gate_broadcast_slow(&og
->ogo_lock
, current
, self
);
922 // Atomically resets the once value to zero and then signals all
923 // pending waiters to return from their _os_once_gate_wait_slow()
925 __os_once_reset(os_once_t
*val
)
927 os_once_gate_t og
= (os_once_gate_t
)val
;
928 os_lock_owner_t self
= _os_lock_owner_get_self();
929 _os_once_gate_set_value_and_broadcast(og
, self
, OS_ONCE_INIT
);
933 _os_once(os_once_t
*val
, void *ctxt
, os_function_t func
)
935 os_once_gate_t og
= (os_once_gate_t
)val
;
936 os_lock_owner_t self
= _os_lock_owner_get_self();
937 os_once_t v
= (os_once_t
)self
;
939 if (likely(os_atomic_cmpxchg(&og
->ogo_once
, OS_ONCE_INIT
, v
, relaxed
))) {
941 _os_once_gate_set_value_and_broadcast(og
, self
, OS_ONCE_DONE
);
943 _os_once_gate_wait_slow(&og
->ogo_lock
, self
);
949 #pragma mark os_lock_eliding_t
951 #if !TARGET_OS_IPHONE
953 #define _os_lock_eliding_t _os_lock_spin_t
954 #define _os_lock_eliding_lock _os_lock_spin_lock
955 #define _os_lock_eliding_trylock _os_lock_spin_trylock
956 #define _os_lock_eliding_unlock _os_lock_spin_unlock
957 OS_LOCK_METHODS_DECL(eliding
);
958 OS_LOCK_TYPE_INSTANCE(eliding
);
961 #pragma mark os_lock_transactional_t
963 OS_LOCK_STRUCT_DECL_INTERNAL(transactional
,
964 uintptr_t volatile osl_lock
;
967 #define _os_lock_transactional_t _os_lock_eliding_t
968 #define _os_lock_transactional_lock _os_lock_eliding_lock
969 #define _os_lock_transactional_trylock _os_lock_eliding_trylock
970 #define _os_lock_transactional_unlock _os_lock_eliding_unlock
971 OS_LOCK_METHODS_DECL(transactional
);
972 OS_LOCK_TYPE_INSTANCE(transactional
);
974 #endif // !TARGET_OS_IPHONE