]> git.saurik.com Git - apple/libplatform.git/blame - src/os/lock.c
libplatform-161.50.1.tar.gz
[apple/libplatform.git] / src / os / lock.c
CommitLineData
ada7c492
A
1/*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21#include "lock_internal.h"
e45b4692
A
22#include "os/internal.h"
23#include "resolver.h"
ada7c492
A
24#include "libkern/OSAtomic.h"
25#include "os/lock.h"
26#include "os/lock_private.h"
27#include "os/once_private.h"
ada7c492
A
28
29#include <mach/mach_init.h>
30#include <mach/mach_traps.h>
31#include <mach/thread_switch.h>
ea84da91 32#include <mach/mach_time.h>
ada7c492
A
33#include <os/tsd.h>
34
35#pragma mark -
36#pragma mark _os_lock_base_t
37
e45b4692
A
38OS_NOINLINE OS_NORETURN OS_COLD
39void _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value);
40
ada7c492
A
41
42OS_LOCK_STRUCT_DECL_INTERNAL(base);
43OS_USED static OS_LOCK_TYPE_STRUCT_DECL(base);
44
45void
46os_lock_lock(os_lock_t l)
47{
48 return l._osl_base->osl_type->osl_lock(l);
49}
50
51bool
52os_lock_trylock(os_lock_t l)
53{
54 return l._osl_base->osl_type->osl_trylock(l);
55}
56
57void
58os_lock_unlock(os_lock_t l)
59{
60 return l._osl_base->osl_type->osl_unlock(l);
61}
62
ada7c492 63OS_NOINLINE OS_NORETURN OS_COLD
e45b4692 64void
ada7c492
A
65_os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value)
66{
67 __LIBPLATFORM_CLIENT_CRASH__(lock_value, "os_lock is corrupt");
68}
69
e45b4692 70
ada7c492
A
71#pragma mark -
72#pragma mark OSSpinLock
73
e45b4692 74OS_NOEXPORT OS_NOINLINE void _OSSpinLockLockSlow(volatile OSSpinLock *l);
ada7c492
A
75
76OS_ATOMIC_EXPORT void OSSpinLockLock(volatile OSSpinLock *l);
77OS_ATOMIC_EXPORT bool OSSpinLockTry(volatile OSSpinLock *l);
78OS_ATOMIC_EXPORT int spin_lock_try(volatile OSSpinLock *l);
79OS_ATOMIC_EXPORT void OSSpinLockUnlock(volatile OSSpinLock *l);
80
e45b4692
A
81static const OSSpinLock _OSSpinLockLocked = TARGET_OS_EMBEDDED ? 1 : -1;
82
83
ada7c492
A
84#if OS_ATOMIC_UP
85// Don't spin on UP
ada7c492
A
86#else
87#define OS_LOCK_SPIN_SPIN_TRIES 1000
88#define OS_LOCK_SPIN_PAUSE() os_hardware_pause()
89#endif
ada7c492 90
ea84da91
A
91OS_ALWAYS_INLINE
92static uint64_t
93_os_lock_yield_deadline(mach_msg_timeout_t timeout)
94{
95 uint64_t abstime = timeout * NSEC_PER_MSEC;
96#if !(defined(__i386__) || defined(__x86_64__))
97 mach_timebase_info_data_t tbi;
98 kern_return_t kr = mach_timebase_info(&tbi);
99 if (kr) return UINT64_MAX;
100 abstime *= tbi.denom;
101 abstime /= tbi.numer;
102#endif
103 return mach_absolute_time() + abstime;
104}
105
106OS_ALWAYS_INLINE
107static bool
108_os_lock_yield_until(uint64_t deadline)
109{
110 return mach_absolute_time() < deadline;
111}
112
ada7c492
A
113OS_NOINLINE
114static void
115_OSSpinLockLockYield(volatile OSSpinLock *l)
116{
117 int option = SWITCH_OPTION_DEPRESS;
118 mach_msg_timeout_t timeout = 1;
ea84da91 119 uint64_t deadline = _os_lock_yield_deadline(timeout);
ada7c492
A
120 OSSpinLock lock;
121 while (unlikely(lock = *l)) {
122_yield:
123 if (unlikely(lock != _OSSpinLockLocked)) {
124 _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
125 }
ada7c492 126 thread_switch(MACH_PORT_NULL, option, timeout);
ea84da91
A
127 if (option == SWITCH_OPTION_WAIT) {
128 timeout++;
129 } else if (!_os_lock_yield_until(deadline)) {
130 option = SWITCH_OPTION_WAIT;
131 }
ada7c492
A
132 }
133 bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
134 if (likely(r)) return;
135 goto _yield;
136}
137
138#if OS_ATOMIC_UP
139void
140_OSSpinLockLockSlow(volatile OSSpinLock *l)
141{
142 return _OSSpinLockLockYield(l); // Don't spin on UP
143}
e45b4692 144#else // !OS_ATOMIC_UP
ada7c492
A
145void
146_OSSpinLockLockSlow(volatile OSSpinLock *l)
147{
148 uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES;
149 OSSpinLock lock;
150 while (unlikely(lock = *l)) {
151_spin:
152 if (unlikely(lock != _OSSpinLockLocked)) {
153 return _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
154 }
155 if (unlikely(!tries--)) return _OSSpinLockLockYield(l);
156 OS_LOCK_SPIN_PAUSE();
157 }
158 bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
159 if (likely(r)) return;
160 goto _spin;
161}
e45b4692 162#endif // !OS_ATOMIC_UP
ada7c492 163
ada7c492 164
ada7c492
A
165
166#if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR
167
168typedef struct _os_nospin_lock_s *_os_nospin_lock_t;
e45b4692
A
169
170OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock);
171OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
172OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
ada7c492
A
173
174void
175OSSpinLockLock(volatile OSSpinLock *l)
176{
177 OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
178 OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
179 return _os_nospin_lock_lock((_os_nospin_lock_t)l);
180}
181
182bool
183OSSpinLockTry(volatile OSSpinLock *l)
184{
185 return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
186}
187
188int
189spin_lock_try(volatile OSSpinLock *l)
190{
191 OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
192 return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
193}
194
195void
196OSSpinLockUnlock(volatile OSSpinLock *l)
197{
198 OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
199 OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
200 return _os_nospin_lock_unlock((_os_nospin_lock_t)l);
201}
202
203#undef OS_ATOMIC_ALIAS
204#define OS_ATOMIC_ALIAS(n, o)
205static void _OSSpinLockLock(volatile OSSpinLock *l);
206#undef OSSpinLockLock
207#define OSSpinLockLock _OSSpinLockLock
208static bool _OSSpinLockTry(volatile OSSpinLock *l);
209#undef OSSpinLockTry
210#define OSSpinLockTry _OSSpinLockTry
211static __unused int __spin_lock_try(volatile OSSpinLock *l);
212#undef spin_lock_try
213#define spin_lock_try __spin_lock_try
214static void _OSSpinLockUnlock(volatile OSSpinLock *l);
215#undef OSSpinLockUnlock
216#define OSSpinLockUnlock _OSSpinLockUnlock
217
218#endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK
219
220void
221OSSpinLockLock(volatile OSSpinLock *l)
222{
223 OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
224 OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
225 bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
226 if (likely(r)) return;
227 return _OSSpinLockLockSlow(l);
228}
229
230bool
231OSSpinLockTry(volatile OSSpinLock *l)
232{
233 bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
234 return r;
235}
236
237int
238spin_lock_try(volatile OSSpinLock *l) // <rdar://problem/13316060>
239{
240 OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
241 return OSSpinLockTry(l);
242}
243
244void
245OSSpinLockUnlock(volatile OSSpinLock *l)
246{
247 OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
248 OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
249 os_atomic_store(l, 0, release);
250}
251
e45b4692 252
ada7c492
A
253#pragma mark -
254#pragma mark os_lock_spin_t
255
256OS_LOCK_STRUCT_DECL_INTERNAL(spin,
257 OSSpinLock volatile osl_spinlock;
258);
ada7c492
A
259OS_LOCK_METHODS_DECL(spin);
260OS_LOCK_TYPE_INSTANCE(spin);
ada7c492
A
261
262void
263_os_lock_spin_lock(_os_lock_spin_t l)
264{
265 return OSSpinLockLock(&l->osl_spinlock);
266}
267
268bool
269_os_lock_spin_trylock(_os_lock_spin_t l)
270{
271 return OSSpinLockTry(&l->osl_spinlock);
272}
273
274void
275_os_lock_spin_unlock(_os_lock_spin_t l)
276{
277 return OSSpinLockUnlock(&l->osl_spinlock);
278}
279
e45b4692 280
ada7c492
A
281#pragma mark -
282#pragma mark os_lock_owner_t
283
284#ifndef __TSD_MACH_THREAD_SELF
285#define __TSD_MACH_THREAD_SELF 3
286#endif
287
288typedef mach_port_name_t os_lock_owner_t;
e45b4692
A
289#define OS_LOCK_NO_OWNER MACH_PORT_NULL
290
ada7c492
A
291
292OS_ALWAYS_INLINE
293static inline os_lock_owner_t
294_os_lock_owner_get_self(void)
295{
296 os_lock_owner_t self;
297 self = (os_lock_owner_t)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF);
298 return self;
299}
300
ada7c492
A
301
302OS_NOINLINE OS_NORETURN OS_COLD
303static void
304_os_lock_recursive_abort(os_lock_owner_t owner)
305{
306 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
307 "os_lock");
308}
309
ada7c492
A
310
311#pragma mark -
312#pragma mark os_lock_handoff_t
313
314OS_LOCK_STRUCT_DECL_INTERNAL(handoff,
315 os_lock_owner_t volatile osl_owner;
316);
ada7c492
A
317OS_LOCK_METHODS_DECL(handoff);
318OS_LOCK_TYPE_INSTANCE(handoff);
ada7c492
A
319
320#define OS_LOCK_HANDOFF_YIELD_TRIES 100
321
322OS_NOINLINE
323static void
324_os_lock_handoff_lock_slow(_os_lock_handoff_t l)
325{
326 int option = SWITCH_OPTION_OSLOCK_DEPRESS;
327 mach_msg_timeout_t timeout = 1;
328 uint32_t tries = OS_LOCK_HANDOFF_YIELD_TRIES;
329 os_lock_owner_t self = _os_lock_owner_get_self(), owner;
330 while (unlikely(owner = l->osl_owner)) {
331_handoff:
332 if (unlikely(owner == self)) return _os_lock_recursive_abort(self);
333 // Yield until tries first hits zero, then permanently switch to wait
334 if (unlikely(!tries--)) option = SWITCH_OPTION_OSLOCK_WAIT;
335 thread_switch(owner, option, timeout);
336 // Redrive the handoff every 1ms until switching to wait
337 if (option == SWITCH_OPTION_OSLOCK_WAIT) timeout++;
338 }
339 bool r = os_atomic_cmpxchgv2o(l, osl_owner, MACH_PORT_NULL, self, &owner,
340 acquire);
341 if (likely(r)) return;
342 goto _handoff;
343}
344
345void
346_os_lock_handoff_lock(_os_lock_handoff_t l)
347{
348 os_lock_owner_t self = _os_lock_owner_get_self();
349 bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
350 if (likely(r)) return;
351 return _os_lock_handoff_lock_slow(l);
352}
353
354bool
355_os_lock_handoff_trylock(_os_lock_handoff_t l)
356{
357 os_lock_owner_t self = _os_lock_owner_get_self();
358 bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
359 return r;
360}
361
362void
363_os_lock_handoff_unlock(_os_lock_handoff_t l)
364{
365 os_atomic_store2o(l, osl_owner, MACH_PORT_NULL, release);
366}
367
e45b4692 368
ada7c492
A
369#pragma mark -
370#pragma mark os_ulock_value_t
371
372#include <sys/errno.h>
373#include <sys/ulock.h>
374
375typedef os_lock_owner_t os_ulock_value_t;
376
377// This assumes that all thread mach port values always have the low bit set!
378// Clearing this bit is used to communicate the existence of waiters to unlock.
379#define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u)
380#define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT)
381
382#define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD
e45b4692
A
383#define OS_ULOCK_IS_OWNER(value, self, allow_anonymous_owner) ({ \
384 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner == (self)) && \
385 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
386#define OS_ULOCK_IS_NOT_OWNER(value, self, allow_anonymous_owner) ({ \
387 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner != (self)) && \
388 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
ada7c492
A
389
390#pragma mark -
391#pragma mark os_unfair_lock
392
393typedef struct _os_unfair_lock_s {
394 os_ulock_value_t oul_value;
395} *_os_unfair_lock_t;
396
397_Static_assert(sizeof(struct os_unfair_lock_s) ==
398 sizeof(struct _os_unfair_lock_s), "os_unfair_lock size mismatch");
399
400OS_ATOMIC_EXPORT void os_unfair_lock_lock(os_unfair_lock_t lock);
401OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
402 os_unfair_lock_options_t options);
403OS_ATOMIC_EXPORT bool os_unfair_lock_trylock(os_unfair_lock_t lock);
404OS_ATOMIC_EXPORT void os_unfair_lock_unlock(os_unfair_lock_t lock);
405
406OS_ATOMIC_EXPORT void os_unfair_lock_lock_no_tsd_4libpthread(
407 os_unfair_lock_t lock);
408OS_ATOMIC_EXPORT void os_unfair_lock_unlock_no_tsd_4libpthread(
409 os_unfair_lock_t lock);
e45b4692
A
410OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options_4Libc(
411 os_unfair_lock_t lock, os_unfair_lock_options_t options);
412OS_ATOMIC_EXPORT void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock);
413
414OS_NOINLINE OS_NORETURN OS_COLD
415void _os_unfair_lock_recursive_abort(os_lock_owner_t owner);
416OS_NOINLINE OS_NORETURN OS_COLD
417void _os_unfair_lock_unowned_abort(os_lock_owner_t owner);
418OS_NOINLINE OS_NORETURN OS_COLD
419void _os_unfair_lock_corruption_abort(os_ulock_value_t current);
ada7c492
A
420
421_Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION ==
422 ULF_WAIT_WORKQ_DATA_CONTENTION,
423 "check value for OS_UNFAIR_LOCK_OPTIONS_MASK");
424#define OS_UNFAIR_LOCK_OPTIONS_MASK \
425 (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION)
e45b4692
A
426#define OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER 0x01000000u
427
ada7c492
A
428
429OS_NOINLINE OS_NORETURN OS_COLD
e45b4692 430void
ada7c492
A
431_os_unfair_lock_recursive_abort(os_lock_owner_t owner)
432{
433 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
434 "os_unfair_lock");
435}
436
437OS_NOINLINE OS_NORETURN OS_COLD
e45b4692 438void
ada7c492
A
439_os_unfair_lock_unowned_abort(os_lock_owner_t owner)
440{
441 __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_unfair_lock not "
442 "owned by current thread");
443}
444
445OS_NOINLINE OS_NORETURN OS_COLD
e45b4692 446void
ada7c492
A
447_os_unfair_lock_corruption_abort(os_ulock_value_t current)
448{
449 __LIBPLATFORM_CLIENT_CRASH__(current, "os_unfair_lock is corrupt");
450}
451
e45b4692 452
ada7c492
A
453OS_NOINLINE
454static void
455_os_unfair_lock_lock_slow(_os_unfair_lock_t l, os_lock_owner_t self,
456 os_unfair_lock_options_t options)
457{
e45b4692
A
458 os_unfair_lock_options_t allow_anonymous_owner =
459 options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
460 options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
ada7c492
A
461 if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) {
462 __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options");
463 }
e45b4692 464 os_ulock_value_t current, new, waiters_mask = 0;
ada7c492
A
465 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
466 OS_LOCK_NO_OWNER)) {
467_retry:
e45b4692 468 if (unlikely(OS_ULOCK_IS_OWNER(current, self, allow_anonymous_owner))) {
ada7c492
A
469 return _os_unfair_lock_recursive_abort(self);
470 }
471 new = current & ~OS_ULOCK_NOWAITERS_BIT;
472 if (current != new) {
473 // Clear nowaiters bit in lock value before waiting
474 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
475 relaxed)){
476 continue;
477 }
478 current = new;
479 }
480 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options,
481 l, current, 0);
482 if (unlikely(ret < 0)) {
483 switch (-ret) {
484 case EINTR:
485 case EFAULT:
486 continue;
487 case EOWNERDEAD:
488 _os_unfair_lock_corruption_abort(current);
489 break;
490 default:
491 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
492 }
493 }
494 // If there are more waiters, unset nowaiters bit when acquiring lock
495 waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
496 }
497 new = self & ~waiters_mask;
498 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
499 &current, acquire);
500 if (unlikely(!r)) goto _retry;
501}
502
503OS_NOINLINE
504static void
505_os_unfair_lock_unlock_slow(_os_unfair_lock_t l, os_ulock_value_t current,
e45b4692 506 os_lock_owner_t self, os_unfair_lock_options_t options)
ada7c492 507{
e45b4692
A
508 os_unfair_lock_options_t allow_anonymous_owner =
509 options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
510 options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
511 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, allow_anonymous_owner))) {
ada7c492
A
512 return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current));
513 }
514 if (current & OS_ULOCK_NOWAITERS_BIT) {
515 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
516 }
517 for (;;) {
518 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0);
519 if (unlikely(ret < 0)) {
520 switch (-ret) {
521 case EINTR:
522 continue;
523 case ENOENT:
524 break;
525 default:
526 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
527 }
528 }
529 break;
530 }
531}
532
533void
534os_unfair_lock_lock(os_unfair_lock_t lock)
535{
536 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
537 os_lock_owner_t self = _os_lock_owner_get_self();
538 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
539 if (likely(r)) return;
540 return _os_unfair_lock_lock_slow(l, self, OS_UNFAIR_LOCK_NONE);
541}
542
543void
544os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
545 os_unfair_lock_options_t options)
546{
547 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
548 os_lock_owner_t self = _os_lock_owner_get_self();
549 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
550 if (likely(r)) return;
551 return _os_unfair_lock_lock_slow(l, self, options);
552}
553
554bool
555os_unfair_lock_trylock(os_unfair_lock_t lock)
556{
557 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
558 os_lock_owner_t self = _os_lock_owner_get_self();
559 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
560 return r;
561}
562
563void
564os_unfair_lock_unlock(os_unfair_lock_t lock)
565{
566 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
567 os_lock_owner_t self = _os_lock_owner_get_self();
568 os_ulock_value_t current;
569 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
570 if (likely(current == self)) return;
e45b4692 571 return _os_unfair_lock_unlock_slow(l, current, self, 0);
ada7c492
A
572}
573
574void
575os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock)
576{
577 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
578 os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
579 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
580 if (likely(r)) return;
581 return _os_unfair_lock_lock_slow(l, self,
e45b4692
A
582 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION|
583 OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER);
ada7c492
A
584}
585
586void
587os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock)
588{
589 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
590 os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
591 os_ulock_value_t current;
592 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
593 if (likely(current == self)) return;
e45b4692
A
594 return _os_unfair_lock_unlock_slow(l, current, self,
595 OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER);
ada7c492
A
596}
597
ada7c492 598
ada7c492
A
599void
600os_unfair_lock_assert_owner(os_unfair_lock_t lock)
601{
602 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
603 os_lock_owner_t self = _os_lock_owner_get_self();
604 os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
e45b4692 605 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) {
ada7c492
A
606 __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
607 "Lock unexpectedly not owned by current thread");
608 }
609}
610
611void
612os_unfair_lock_assert_not_owner(os_unfair_lock_t lock)
613{
614 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
615 os_lock_owner_t self = _os_lock_owner_get_self();
616 os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
e45b4692 617 if (unlikely(OS_ULOCK_IS_OWNER(current, self, 0))) {
ada7c492
A
618 __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
619 "Lock unexpectedly owned by current thread");
620 }
621}
e45b4692 622
ada7c492
A
623
624#pragma mark -
625#pragma mark _os_lock_unfair_t
626
627OS_LOCK_STRUCT_DECL_INTERNAL(unfair,
628 os_unfair_lock osl_unfair_lock;
629);
ada7c492
A
630OS_LOCK_METHODS_DECL(unfair);
631OS_LOCK_TYPE_INSTANCE(unfair);
ada7c492
A
632
633void
634_os_lock_unfair_lock(_os_lock_unfair_t l)
635{
636 return os_unfair_lock_lock(&l->osl_unfair_lock);
637}
638
639bool
640_os_lock_unfair_trylock(_os_lock_unfair_t l)
641{
642 return os_unfair_lock_trylock(&l->osl_unfair_lock);
643}
644
645void
646_os_lock_unfair_unlock(_os_lock_unfair_t l)
647{
648 return os_unfair_lock_unlock(&l->osl_unfair_lock);
649}
650
e45b4692 651
ada7c492
A
652#pragma mark -
653#pragma mark _os_nospin_lock
654
655typedef struct _os_nospin_lock_s {
656 os_ulock_value_t oul_value;
657} _os_nospin_lock, *_os_nospin_lock_t;
658
659_Static_assert(sizeof(OSSpinLock) ==
660 sizeof(struct _os_nospin_lock_s), "os_nospin_lock size mismatch");
661
662OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock);
663OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
664OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
665
e45b4692 666
ada7c492
A
667OS_NOINLINE
668static void
669_os_nospin_lock_lock_slow(_os_nospin_lock_t l)
670{
671 os_lock_owner_t self = _os_lock_owner_get_self();
672 os_ulock_value_t current, new, waiters_mask = 0;
673 uint32_t timeout = 1;
674 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
675 OS_LOCK_NO_OWNER)) {
676_retry:
677 new = current & ~OS_ULOCK_NOWAITERS_BIT;
678 // For safer compatibility with OSSpinLock where _OSSpinLockLocked may
679 // be 1, check that new didn't become 0 (unlocked) by clearing this bit
680 if (current != new && new) {
681 // Clear nowaiters bit in lock value before waiting
682 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
683 relaxed)){
684 continue;
685 }
686 current = new;
687 }
688 int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, current,
689 timeout * 1000);
690 if (unlikely(ret < 0)) {
691 switch (-ret) {
692 case ETIMEDOUT:
693 timeout++;
694 continue;
695 case EINTR:
696 case EFAULT:
697 continue;
698 default:
699 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
700 }
701 }
702 // If there are more waiters, unset nowaiters bit when acquiring lock
703 waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
704 }
705 new = self & ~waiters_mask;
706 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
707 &current, acquire);
708 if (unlikely(!r)) goto _retry;
709}
710
711OS_NOINLINE
712static void
713_os_nospin_lock_unlock_slow(_os_nospin_lock_t l, os_ulock_value_t current)
714{
715 os_lock_owner_t self = _os_lock_owner_get_self();
716 if (unlikely(OS_ULOCK_OWNER(current) != self)) {
717 return; // no unowned_abort for drop-in compatibility with OSSpinLock
718 }
719 if (current & OS_ULOCK_NOWAITERS_BIT) {
720 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
721 }
722 for (;;) {
723 int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, 0);
724 if (unlikely(ret < 0)) {
725 switch (-ret) {
726 case EINTR:
727 continue;
728 case ENOENT:
729 break;
730 default:
731 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
732 }
733 }
734 break;
735 }
736}
737
738void
739_os_nospin_lock_lock(_os_nospin_lock_t l)
740{
741 os_lock_owner_t self = _os_lock_owner_get_self();
742 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
743 if (likely(r)) return;
744 return _os_nospin_lock_lock_slow(l);
745}
746
747bool
748_os_nospin_lock_trylock(_os_nospin_lock_t l)
749{
750 os_lock_owner_t self = _os_lock_owner_get_self();
751 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
752 return r;
753}
754
755void
756_os_nospin_lock_unlock(_os_nospin_lock_t l)
757{
758 os_lock_owner_t self = _os_lock_owner_get_self();
759 os_ulock_value_t current;
760 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
761 if (likely(current == self)) return;
762 return _os_nospin_lock_unlock_slow(l, current);
763}
764
e45b4692 765
ada7c492
A
766#pragma mark -
767#pragma mark _os_lock_nospin_t
768
769OS_LOCK_STRUCT_DECL_INTERNAL(nospin,
770 _os_nospin_lock osl_nospin_lock;
771);
ada7c492
A
772OS_LOCK_METHODS_DECL(nospin);
773OS_LOCK_TYPE_INSTANCE(nospin);
ada7c492
A
774
775void
776_os_lock_nospin_lock(_os_lock_nospin_t l)
777{
778 return _os_nospin_lock_lock(&l->osl_nospin_lock);
779}
780
781bool
782_os_lock_nospin_trylock(_os_lock_nospin_t l)
783{
784 return _os_nospin_lock_trylock(&l->osl_nospin_lock);
785}
786
787void
788_os_lock_nospin_unlock(_os_lock_nospin_t l)
789{
790 return _os_nospin_lock_unlock(&l->osl_nospin_lock);
791}
792
e45b4692 793
ada7c492
A
794#pragma mark -
795#pragma mark os_once_t
796
797typedef struct os_once_gate_s {
798 union {
799 os_ulock_value_t ogo_lock;
800 os_once_t ogo_once;
801 };
802} os_once_gate_s, *os_once_gate_t;
803
804#define OS_ONCE_INIT ((os_once_t)0l)
805#define OS_ONCE_DONE (~(os_once_t)0l)
806
807OS_ATOMIC_EXPORT void _os_once(os_once_t *val, void *ctxt, os_function_t func);
808OS_ATOMIC_EXPORT void __os_once_reset(os_once_t *val);
809
810OS_NOINLINE OS_NORETURN OS_COLD
e45b4692
A
811void _os_once_gate_recursive_abort(os_lock_owner_t owner);
812OS_NOINLINE OS_NORETURN OS_COLD
813void _os_once_gate_unowned_abort(os_lock_owner_t owner);
814OS_NOINLINE OS_NORETURN OS_COLD
815void _os_once_gate_corruption_abort(os_ulock_value_t current);
816
817
818OS_NOINLINE OS_NORETURN OS_COLD
819void
ada7c492
A
820_os_once_gate_recursive_abort(os_lock_owner_t owner)
821{
822 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
823 "os_once_t");
824}
825
826OS_NOINLINE OS_NORETURN OS_COLD
e45b4692 827void
ada7c492
A
828_os_once_gate_unowned_abort(os_lock_owner_t owner)
829{
830 __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_once_t not "
831 "owned by current thread");
832}
833
834OS_NOINLINE OS_NORETURN OS_COLD
e45b4692 835void
ada7c492
A
836_os_once_gate_corruption_abort(os_ulock_value_t current)
837{
838 __LIBPLATFORM_CLIENT_CRASH__(current, "os_once_t is corrupt");
839}
840
e45b4692 841
ada7c492
A
842OS_NOINLINE
843static void
844_os_once_gate_wait_slow(os_ulock_value_t *gate, os_lock_owner_t self)
845{
846 os_ulock_value_t tid_old, tid_new;
847
848 for (;;) {
849 os_atomic_rmw_loop(gate, tid_old, tid_new, relaxed, {
850 switch (tid_old) {
851 case (os_ulock_value_t)OS_ONCE_INIT: // raced with __os_once_reset()
852 case (os_ulock_value_t)OS_ONCE_DONE: // raced with _os_once()
853 os_atomic_rmw_loop_give_up(return);
854 }
855 tid_new = tid_old & ~OS_ULOCK_NOWAITERS_BIT;
856 if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
857 });
e45b4692 858 if (unlikely(OS_ULOCK_IS_OWNER(tid_old, self, 0))) {
ada7c492
A
859 return _os_once_gate_recursive_abort(self);
860 }
861 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO,
862 gate, tid_new, 0);
863 if (unlikely(ret < 0)) {
864 switch (-ret) {
865 case EINTR:
866 case EFAULT:
867 continue;
868 case EOWNERDEAD:
869 _os_once_gate_corruption_abort(tid_old);
870 break;
871 default:
872 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
873 }
874 }
875 }
876}
877
878OS_NOINLINE
879static void
880_os_once_gate_broadcast_slow(os_ulock_value_t *gate, os_ulock_value_t current,
881 os_lock_owner_t self)
882{
e45b4692 883 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) {
ada7c492
A
884 return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current));
885 }
886 if (current & OS_ULOCK_NOWAITERS_BIT) {
887 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
888 }
889 for (;;) {
890 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | ULF_WAKE_ALL,
891 gate, 0);
892 if (unlikely(ret < 0)) {
893 switch (-ret) {
894 case EINTR:
895 continue;
896 case ENOENT:
897 break;
898 default:
899 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
900 }
901 }
902 break;
903 }
904}
905
906OS_ALWAYS_INLINE
907static void
908_os_once_gate_set_value_and_broadcast(os_once_gate_t og, os_lock_owner_t self,
909 os_once_t value)
910{
e45b4692
A
911 os_ulock_value_t current;
912#if defined(__i386__) || defined(__x86_64__)
913 // On Intel, any load is a load-acquire, so we don't need to be fancy
914 current = (os_ulock_value_t)os_atomic_xchg(&og->ogo_once, value, release);
915#else
916# error os_once algorithm not available for this architecture
917#endif
ada7c492
A
918 if (likely(current == self)) return;
919 _os_once_gate_broadcast_slow(&og->ogo_lock, current, self);
920}
921
922// Atomically resets the once value to zero and then signals all
923// pending waiters to return from their _os_once_gate_wait_slow()
924void
925__os_once_reset(os_once_t *val)
926{
927 os_once_gate_t og = (os_once_gate_t)val;
928 os_lock_owner_t self = _os_lock_owner_get_self();
929 _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_INIT);
930}
931
932void
933_os_once(os_once_t *val, void *ctxt, os_function_t func)
934{
935 os_once_gate_t og = (os_once_gate_t)val;
936 os_lock_owner_t self = _os_lock_owner_get_self();
937 os_once_t v = (os_once_t)self;
938
939 if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, relaxed))) {
940 func(ctxt);
941 _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_DONE);
942 } else {
943 _os_once_gate_wait_slow(&og->ogo_lock, self);
944 }
945}
946
ada7c492
A
947
948#pragma mark -
949#pragma mark os_lock_eliding_t
950
951#if !TARGET_OS_IPHONE
952
953#define _os_lock_eliding_t _os_lock_spin_t
954#define _os_lock_eliding_lock _os_lock_spin_lock
955#define _os_lock_eliding_trylock _os_lock_spin_trylock
956#define _os_lock_eliding_unlock _os_lock_spin_unlock
957OS_LOCK_METHODS_DECL(eliding);
958OS_LOCK_TYPE_INSTANCE(eliding);
959
960#pragma mark -
961#pragma mark os_lock_transactional_t
962
963OS_LOCK_STRUCT_DECL_INTERNAL(transactional,
964 uintptr_t volatile osl_lock;
965);
966
967#define _os_lock_transactional_t _os_lock_eliding_t
968#define _os_lock_transactional_lock _os_lock_eliding_lock
969#define _os_lock_transactional_trylock _os_lock_eliding_trylock
970#define _os_lock_transactional_unlock _os_lock_eliding_unlock
971OS_LOCK_METHODS_DECL(transactional);
972OS_LOCK_TYPE_INSTANCE(transactional);
973
974#endif // !TARGET_OS_IPHONE