]> git.saurik.com Git - apple/libplatform.git/blame - src/os/lock.c
libplatform-126.1.2.tar.gz
[apple/libplatform.git] / src / os / lock.c
CommitLineData
ada7c492
A
1/*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21#include "lock_internal.h"
22#include "libkern/OSAtomic.h"
23#include "os/lock.h"
24#include "os/lock_private.h"
25#include "os/once_private.h"
26#include "resolver.h"
27
28#include <mach/mach_init.h>
29#include <mach/mach_traps.h>
30#include <mach/thread_switch.h>
31#include <os/tsd.h>
32
33#pragma mark -
34#pragma mark _os_lock_base_t
35
36#if !OS_VARIANT_ONLY
37
38OS_LOCK_STRUCT_DECL_INTERNAL(base);
39OS_USED static OS_LOCK_TYPE_STRUCT_DECL(base);
40
41void
42os_lock_lock(os_lock_t l)
43{
44 return l._osl_base->osl_type->osl_lock(l);
45}
46
47bool
48os_lock_trylock(os_lock_t l)
49{
50 return l._osl_base->osl_type->osl_trylock(l);
51}
52
53void
54os_lock_unlock(os_lock_t l)
55{
56 return l._osl_base->osl_type->osl_unlock(l);
57}
58
59#endif //!OS_VARIANT_ONLY
60
61OS_NOINLINE OS_NORETURN OS_COLD
62static void
63_os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value)
64{
65 __LIBPLATFORM_CLIENT_CRASH__(lock_value, "os_lock is corrupt");
66}
67
68#pragma mark -
69#pragma mark OSSpinLock
70
71#ifdef OS_LOCK_VARIANT_SELECTOR
72void _OSSpinLockLockSlow(volatile OSSpinLock *l);
73#else
74OS_NOINLINE OS_USED static void _OSSpinLockLockSlow(volatile OSSpinLock *l);
75#endif // OS_LOCK_VARIANT_SELECTOR
76
77OS_ATOMIC_EXPORT void OSSpinLockLock(volatile OSSpinLock *l);
78OS_ATOMIC_EXPORT bool OSSpinLockTry(volatile OSSpinLock *l);
79OS_ATOMIC_EXPORT int spin_lock_try(volatile OSSpinLock *l);
80OS_ATOMIC_EXPORT void OSSpinLockUnlock(volatile OSSpinLock *l);
81
82#if OS_ATOMIC_UP
83// Don't spin on UP
84#elif OS_ATOMIC_WFE
85#define OS_LOCK_SPIN_SPIN_TRIES 100
86#define OS_LOCK_SPIN_PAUSE() os_hardware_wfe()
87#else
88#define OS_LOCK_SPIN_SPIN_TRIES 1000
89#define OS_LOCK_SPIN_PAUSE() os_hardware_pause()
90#endif
91#define OS_LOCK_SPIN_YIELD_TRIES 100
92
93static const OSSpinLock _OSSpinLockLocked = TARGET_OS_EMBEDDED ? 1 : -1;
94
95OS_NOINLINE
96static void
97_OSSpinLockLockYield(volatile OSSpinLock *l)
98{
99 int option = SWITCH_OPTION_DEPRESS;
100 mach_msg_timeout_t timeout = 1;
101 uint32_t tries = OS_LOCK_SPIN_YIELD_TRIES;
102 OSSpinLock lock;
103 while (unlikely(lock = *l)) {
104_yield:
105 if (unlikely(lock != _OSSpinLockLocked)) {
106 _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
107 }
108 // Yield until tries first hits zero, then permanently switch to wait
109 if (unlikely(!tries--)) option = SWITCH_OPTION_WAIT;
110 thread_switch(MACH_PORT_NULL, option, timeout);
111 }
112 bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
113 if (likely(r)) return;
114 goto _yield;
115}
116
117#if OS_ATOMIC_UP
118void
119_OSSpinLockLockSlow(volatile OSSpinLock *l)
120{
121 return _OSSpinLockLockYield(l); // Don't spin on UP
122}
123#else
124void
125_OSSpinLockLockSlow(volatile OSSpinLock *l)
126{
127 uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES;
128 OSSpinLock lock;
129 while (unlikely(lock = *l)) {
130_spin:
131 if (unlikely(lock != _OSSpinLockLocked)) {
132 return _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
133 }
134 if (unlikely(!tries--)) return _OSSpinLockLockYield(l);
135 OS_LOCK_SPIN_PAUSE();
136 }
137 bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
138 if (likely(r)) return;
139 goto _spin;
140}
141#endif
142
143#ifdef OS_LOCK_VARIANT_SELECTOR
144#undef _OSSpinLockLockSlow
145extern void _OSSpinLockLockSlow(volatile OSSpinLock *l);
146#endif
147
148#if !OS_LOCK_VARIANT_ONLY
149
150#if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR
151
152typedef struct _os_nospin_lock_s *_os_nospin_lock_t;
153void _os_nospin_lock_lock(_os_nospin_lock_t lock);
154bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
155void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
156
157void
158OSSpinLockLock(volatile OSSpinLock *l)
159{
160 OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
161 OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
162 return _os_nospin_lock_lock((_os_nospin_lock_t)l);
163}
164
165bool
166OSSpinLockTry(volatile OSSpinLock *l)
167{
168 return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
169}
170
171int
172spin_lock_try(volatile OSSpinLock *l)
173{
174 OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
175 return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
176}
177
178void
179OSSpinLockUnlock(volatile OSSpinLock *l)
180{
181 OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
182 OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
183 return _os_nospin_lock_unlock((_os_nospin_lock_t)l);
184}
185
186#undef OS_ATOMIC_ALIAS
187#define OS_ATOMIC_ALIAS(n, o)
188static void _OSSpinLockLock(volatile OSSpinLock *l);
189#undef OSSpinLockLock
190#define OSSpinLockLock _OSSpinLockLock
191static bool _OSSpinLockTry(volatile OSSpinLock *l);
192#undef OSSpinLockTry
193#define OSSpinLockTry _OSSpinLockTry
194static __unused int __spin_lock_try(volatile OSSpinLock *l);
195#undef spin_lock_try
196#define spin_lock_try __spin_lock_try
197static void _OSSpinLockUnlock(volatile OSSpinLock *l);
198#undef OSSpinLockUnlock
199#define OSSpinLockUnlock _OSSpinLockUnlock
200
201#endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK
202
203void
204OSSpinLockLock(volatile OSSpinLock *l)
205{
206 OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
207 OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
208 bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
209 if (likely(r)) return;
210 return _OSSpinLockLockSlow(l);
211}
212
213bool
214OSSpinLockTry(volatile OSSpinLock *l)
215{
216 bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
217 return r;
218}
219
220int
221spin_lock_try(volatile OSSpinLock *l) // <rdar://problem/13316060>
222{
223 OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
224 return OSSpinLockTry(l);
225}
226
227void
228OSSpinLockUnlock(volatile OSSpinLock *l)
229{
230 OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
231 OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
232 os_atomic_store(l, 0, release);
233}
234
235#pragma mark -
236#pragma mark os_lock_spin_t
237
238OS_LOCK_STRUCT_DECL_INTERNAL(spin,
239 OSSpinLock volatile osl_spinlock;
240);
241#if !OS_VARIANT_ONLY
242OS_LOCK_METHODS_DECL(spin);
243OS_LOCK_TYPE_INSTANCE(spin);
244#endif // !OS_VARIANT_ONLY
245
246#ifdef OS_VARIANT_SELECTOR
247#define _os_lock_spin_lock \
248 OS_VARIANT(_os_lock_spin_lock, OS_VARIANT_SELECTOR)
249#define _os_lock_spin_trylock \
250 OS_VARIANT(_os_lock_spin_trylock, OS_VARIANT_SELECTOR)
251#define _os_lock_spin_unlock \
252 OS_VARIANT(_os_lock_spin_unlock, OS_VARIANT_SELECTOR)
253OS_LOCK_METHODS_DECL(spin);
254#endif // OS_VARIANT_SELECTOR
255
256void
257_os_lock_spin_lock(_os_lock_spin_t l)
258{
259 return OSSpinLockLock(&l->osl_spinlock);
260}
261
262bool
263_os_lock_spin_trylock(_os_lock_spin_t l)
264{
265 return OSSpinLockTry(&l->osl_spinlock);
266}
267
268void
269_os_lock_spin_unlock(_os_lock_spin_t l)
270{
271 return OSSpinLockUnlock(&l->osl_spinlock);
272}
273
274#pragma mark -
275#pragma mark os_lock_owner_t
276
277#ifndef __TSD_MACH_THREAD_SELF
278#define __TSD_MACH_THREAD_SELF 3
279#endif
280
281typedef mach_port_name_t os_lock_owner_t;
282
283OS_ALWAYS_INLINE
284static inline os_lock_owner_t
285_os_lock_owner_get_self(void)
286{
287 os_lock_owner_t self;
288 self = (os_lock_owner_t)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF);
289 return self;
290}
291
292#define OS_LOCK_NO_OWNER MACH_PORT_NULL
293
294#if !OS_LOCK_VARIANT_ONLY
295
296OS_NOINLINE OS_NORETURN OS_COLD
297static void
298_os_lock_recursive_abort(os_lock_owner_t owner)
299{
300 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
301 "os_lock");
302}
303
304#endif //!OS_LOCK_VARIANT_ONLY
305
306#pragma mark -
307#pragma mark os_lock_handoff_t
308
309OS_LOCK_STRUCT_DECL_INTERNAL(handoff,
310 os_lock_owner_t volatile osl_owner;
311);
312#if !OS_VARIANT_ONLY
313OS_LOCK_METHODS_DECL(handoff);
314OS_LOCK_TYPE_INSTANCE(handoff);
315#endif // !OS_VARIANT_ONLY
316
317#ifdef OS_VARIANT_SELECTOR
318#define _os_lock_handoff_lock \
319 OS_VARIANT(_os_lock_handoff_lock, OS_VARIANT_SELECTOR)
320#define _os_lock_handoff_trylock \
321 OS_VARIANT(_os_lock_handoff_trylock, OS_VARIANT_SELECTOR)
322#define _os_lock_handoff_unlock \
323 OS_VARIANT(_os_lock_handoff_unlock, OS_VARIANT_SELECTOR)
324OS_LOCK_METHODS_DECL(handoff);
325#endif // OS_VARIANT_SELECTOR
326
327#define OS_LOCK_HANDOFF_YIELD_TRIES 100
328
329OS_NOINLINE
330static void
331_os_lock_handoff_lock_slow(_os_lock_handoff_t l)
332{
333 int option = SWITCH_OPTION_OSLOCK_DEPRESS;
334 mach_msg_timeout_t timeout = 1;
335 uint32_t tries = OS_LOCK_HANDOFF_YIELD_TRIES;
336 os_lock_owner_t self = _os_lock_owner_get_self(), owner;
337 while (unlikely(owner = l->osl_owner)) {
338_handoff:
339 if (unlikely(owner == self)) return _os_lock_recursive_abort(self);
340 // Yield until tries first hits zero, then permanently switch to wait
341 if (unlikely(!tries--)) option = SWITCH_OPTION_OSLOCK_WAIT;
342 thread_switch(owner, option, timeout);
343 // Redrive the handoff every 1ms until switching to wait
344 if (option == SWITCH_OPTION_OSLOCK_WAIT) timeout++;
345 }
346 bool r = os_atomic_cmpxchgv2o(l, osl_owner, MACH_PORT_NULL, self, &owner,
347 acquire);
348 if (likely(r)) return;
349 goto _handoff;
350}
351
352void
353_os_lock_handoff_lock(_os_lock_handoff_t l)
354{
355 os_lock_owner_t self = _os_lock_owner_get_self();
356 bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
357 if (likely(r)) return;
358 return _os_lock_handoff_lock_slow(l);
359}
360
361bool
362_os_lock_handoff_trylock(_os_lock_handoff_t l)
363{
364 os_lock_owner_t self = _os_lock_owner_get_self();
365 bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
366 return r;
367}
368
369void
370_os_lock_handoff_unlock(_os_lock_handoff_t l)
371{
372 os_atomic_store2o(l, osl_owner, MACH_PORT_NULL, release);
373}
374
375#pragma mark -
376#pragma mark os_ulock_value_t
377
378#include <sys/errno.h>
379#include <sys/ulock.h>
380
381typedef os_lock_owner_t os_ulock_value_t;
382
383// This assumes that all thread mach port values always have the low bit set!
384// Clearing this bit is used to communicate the existence of waiters to unlock.
385#define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u)
386#define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT)
387
388#define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD
389#define OS_ULOCK_IS_OWNER(value, self) ({ \
390 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); \
391 (_owner == (self) && _owner != OS_ULOCK_ANONYMOUS_OWNER); })
392#define OS_ULOCK_IS_NOT_OWNER(value, self) ({ \
393 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); \
394 (_owner != (self) && _owner != OS_ULOCK_ANONYMOUS_OWNER); })
395
396
397#pragma mark -
398#pragma mark os_unfair_lock
399
400typedef struct _os_unfair_lock_s {
401 os_ulock_value_t oul_value;
402} *_os_unfair_lock_t;
403
404_Static_assert(sizeof(struct os_unfair_lock_s) ==
405 sizeof(struct _os_unfair_lock_s), "os_unfair_lock size mismatch");
406
407OS_ATOMIC_EXPORT void os_unfair_lock_lock(os_unfair_lock_t lock);
408OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
409 os_unfair_lock_options_t options);
410OS_ATOMIC_EXPORT bool os_unfair_lock_trylock(os_unfair_lock_t lock);
411OS_ATOMIC_EXPORT void os_unfair_lock_unlock(os_unfair_lock_t lock);
412
413OS_ATOMIC_EXPORT void os_unfair_lock_lock_no_tsd_4libpthread(
414 os_unfair_lock_t lock);
415OS_ATOMIC_EXPORT void os_unfair_lock_unlock_no_tsd_4libpthread(
416 os_unfair_lock_t lock);
417
418_Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION ==
419 ULF_WAIT_WORKQ_DATA_CONTENTION,
420 "check value for OS_UNFAIR_LOCK_OPTIONS_MASK");
421#define OS_UNFAIR_LOCK_OPTIONS_MASK \
422 (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION)
423
424OS_NOINLINE OS_NORETURN OS_COLD
425static void
426_os_unfair_lock_recursive_abort(os_lock_owner_t owner)
427{
428 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
429 "os_unfair_lock");
430}
431
432OS_NOINLINE OS_NORETURN OS_COLD
433static void
434_os_unfair_lock_unowned_abort(os_lock_owner_t owner)
435{
436 __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_unfair_lock not "
437 "owned by current thread");
438}
439
440OS_NOINLINE OS_NORETURN OS_COLD
441static void
442_os_unfair_lock_corruption_abort(os_ulock_value_t current)
443{
444 __LIBPLATFORM_CLIENT_CRASH__(current, "os_unfair_lock is corrupt");
445}
446
447OS_NOINLINE
448static void
449_os_unfair_lock_lock_slow(_os_unfair_lock_t l, os_lock_owner_t self,
450 os_unfair_lock_options_t options)
451{
452 os_ulock_value_t current, new, waiters_mask = 0;
453 if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) {
454 __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options");
455 }
456 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
457 OS_LOCK_NO_OWNER)) {
458_retry:
459 if (unlikely(OS_ULOCK_IS_OWNER(current, self))) {
460 return _os_unfair_lock_recursive_abort(self);
461 }
462 new = current & ~OS_ULOCK_NOWAITERS_BIT;
463 if (current != new) {
464 // Clear nowaiters bit in lock value before waiting
465 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
466 relaxed)){
467 continue;
468 }
469 current = new;
470 }
471 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options,
472 l, current, 0);
473 if (unlikely(ret < 0)) {
474 switch (-ret) {
475 case EINTR:
476 case EFAULT:
477 continue;
478 case EOWNERDEAD:
479 _os_unfair_lock_corruption_abort(current);
480 break;
481 default:
482 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
483 }
484 }
485 // If there are more waiters, unset nowaiters bit when acquiring lock
486 waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
487 }
488 new = self & ~waiters_mask;
489 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
490 &current, acquire);
491 if (unlikely(!r)) goto _retry;
492}
493
494OS_NOINLINE
495static void
496_os_unfair_lock_unlock_slow(_os_unfair_lock_t l, os_ulock_value_t current,
497 os_lock_owner_t self)
498{
499 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self))) {
500 return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current));
501 }
502 if (current & OS_ULOCK_NOWAITERS_BIT) {
503 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
504 }
505 for (;;) {
506 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0);
507 if (unlikely(ret < 0)) {
508 switch (-ret) {
509 case EINTR:
510 continue;
511 case ENOENT:
512 break;
513 default:
514 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
515 }
516 }
517 break;
518 }
519}
520
521void
522os_unfair_lock_lock(os_unfair_lock_t lock)
523{
524 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
525 os_lock_owner_t self = _os_lock_owner_get_self();
526 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
527 if (likely(r)) return;
528 return _os_unfair_lock_lock_slow(l, self, OS_UNFAIR_LOCK_NONE);
529}
530
531void
532os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
533 os_unfair_lock_options_t options)
534{
535 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
536 os_lock_owner_t self = _os_lock_owner_get_self();
537 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
538 if (likely(r)) return;
539 return _os_unfair_lock_lock_slow(l, self, options);
540}
541
542bool
543os_unfair_lock_trylock(os_unfair_lock_t lock)
544{
545 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
546 os_lock_owner_t self = _os_lock_owner_get_self();
547 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
548 return r;
549}
550
551void
552os_unfair_lock_unlock(os_unfair_lock_t lock)
553{
554 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
555 os_lock_owner_t self = _os_lock_owner_get_self();
556 os_ulock_value_t current;
557 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
558 if (likely(current == self)) return;
559 return _os_unfair_lock_unlock_slow(l, current, self);
560}
561
562void
563os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock)
564{
565 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
566 os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
567 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
568 if (likely(r)) return;
569 return _os_unfair_lock_lock_slow(l, self,
570 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
571}
572
573void
574os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock)
575{
576 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
577 os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
578 os_ulock_value_t current;
579 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
580 if (likely(current == self)) return;
581 return _os_unfair_lock_unlock_slow(l, current, self);
582}
583
584#pragma mark -
585#pragma mark _os_lock_unfair_t 4Libc // <rdar://problem/27138264>
586
587OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options_4Libc(
588 os_unfair_lock_t lock, os_unfair_lock_options_t options);
589OS_ATOMIC_EXPORT void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock);
590
591OS_NOINLINE
592static void
593_os_unfair_lock_lock_slow_4Libc(_os_unfair_lock_t l, os_lock_owner_t self,
594 os_unfair_lock_options_t options)
595{
596 os_ulock_value_t current, new, waiters_mask = 0;
597 if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) {
598 __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options");
599 }
600 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
601 OS_LOCK_NO_OWNER)) {
602_retry:
603 if (unlikely(OS_ULOCK_IS_OWNER(current, self))) {
604 return _os_unfair_lock_recursive_abort(self);
605 }
606 new = current & ~OS_ULOCK_NOWAITERS_BIT;
607 if (current != new) {
608 // Clear nowaiters bit in lock value before waiting
609 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
610 relaxed)){
611 continue;
612 }
613 current = new;
614 }
615 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options,
616 l, current, 0);
617 if (unlikely(ret < 0)) {
618 switch (-ret) {
619 case EINTR:
620 case EFAULT:
621 continue;
622 case EOWNERDEAD:
623 // if we get an `EOWNERDEAD` it could be corruption of the lock
624 // so for the Libc locks, if we can steal the lock, assume
625 // it is corruption and pretend we got the lock with contention
626 new = self & ~OS_ULOCK_NOWAITERS_BIT;
627 if (os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
628 acquire)) {
629 return;
630 }
631 break;
632 default:
633 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
634 }
635 }
636 // If there are more waiters, unset nowaiters bit when acquiring lock
637 waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
638 }
639 new = self & ~waiters_mask;
640 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
641 &current, acquire);
642 if (unlikely(!r)) goto _retry;
643}
644
645OS_NOINLINE
646static void
647_os_unfair_lock_unlock_slow_4Libc(_os_unfair_lock_t l)
648{
649 for (;;) {
650 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0);
651 if (unlikely(ret < 0)) {
652 switch (-ret) {
653 case EINTR:
654 continue;
655 case ENOENT:
656 break;
657 default:
658 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
659 }
660 }
661 break;
662 }
663}
664
665void
666os_unfair_lock_lock_with_options_4Libc(os_unfair_lock_t lock,
667 os_unfair_lock_options_t options)
668{
669 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
670 os_lock_owner_t self = _os_lock_owner_get_self();
671 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
672 if (likely(r)) return;
673 return _os_unfair_lock_lock_slow_4Libc(l, self, options);
674}
675
676void
677os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock)
678{
679 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
680 os_lock_owner_t self = _os_lock_owner_get_self();
681 os_ulock_value_t current;
682 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
683 if (likely(current == self)) return;
684 return _os_unfair_lock_unlock_slow_4Libc(l);
685}
686
687#if !OS_VARIANT_ONLY
688void
689os_unfair_lock_assert_owner(os_unfair_lock_t lock)
690{
691 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
692 os_lock_owner_t self = _os_lock_owner_get_self();
693 os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
694 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self))) {
695 __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
696 "Lock unexpectedly not owned by current thread");
697 }
698}
699
700void
701os_unfair_lock_assert_not_owner(os_unfair_lock_t lock)
702{
703 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
704 os_lock_owner_t self = _os_lock_owner_get_self();
705 os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
706 if (unlikely(OS_ULOCK_IS_OWNER(current, self))) {
707 __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
708 "Lock unexpectedly owned by current thread");
709 }
710}
711#endif
712
713#pragma mark -
714#pragma mark _os_lock_unfair_t
715
716OS_LOCK_STRUCT_DECL_INTERNAL(unfair,
717 os_unfair_lock osl_unfair_lock;
718);
719#if !OS_VARIANT_ONLY
720OS_LOCK_METHODS_DECL(unfair);
721OS_LOCK_TYPE_INSTANCE(unfair);
722#endif // !OS_VARIANT_ONLY
723
724#ifdef OS_VARIANT_SELECTOR
725#define _os_lock_unfair_lock \
726 OS_VARIANT(_os_lock_unfair_lock, OS_VARIANT_SELECTOR)
727#define _os_lock_unfair_trylock \
728 OS_VARIANT(_os_lock_unfair_trylock, OS_VARIANT_SELECTOR)
729#define _os_lock_unfair_unlock \
730 OS_VARIANT(_os_lock_unfair_unlock, OS_VARIANT_SELECTOR)
731OS_LOCK_METHODS_DECL(unfair);
732#endif // OS_VARIANT_SELECTOR
733
734void
735_os_lock_unfair_lock(_os_lock_unfair_t l)
736{
737 return os_unfair_lock_lock(&l->osl_unfair_lock);
738}
739
740bool
741_os_lock_unfair_trylock(_os_lock_unfair_t l)
742{
743 return os_unfair_lock_trylock(&l->osl_unfair_lock);
744}
745
746void
747_os_lock_unfair_unlock(_os_lock_unfair_t l)
748{
749 return os_unfair_lock_unlock(&l->osl_unfair_lock);
750}
751
752#pragma mark -
753#pragma mark _os_nospin_lock
754
755typedef struct _os_nospin_lock_s {
756 os_ulock_value_t oul_value;
757} _os_nospin_lock, *_os_nospin_lock_t;
758
759_Static_assert(sizeof(OSSpinLock) ==
760 sizeof(struct _os_nospin_lock_s), "os_nospin_lock size mismatch");
761
762OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock);
763OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
764OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
765
766OS_NOINLINE
767static void
768_os_nospin_lock_lock_slow(_os_nospin_lock_t l)
769{
770 os_lock_owner_t self = _os_lock_owner_get_self();
771 os_ulock_value_t current, new, waiters_mask = 0;
772 uint32_t timeout = 1;
773 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
774 OS_LOCK_NO_OWNER)) {
775_retry:
776 new = current & ~OS_ULOCK_NOWAITERS_BIT;
777 // For safer compatibility with OSSpinLock where _OSSpinLockLocked may
778 // be 1, check that new didn't become 0 (unlocked) by clearing this bit
779 if (current != new && new) {
780 // Clear nowaiters bit in lock value before waiting
781 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
782 relaxed)){
783 continue;
784 }
785 current = new;
786 }
787 int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, current,
788 timeout * 1000);
789 if (unlikely(ret < 0)) {
790 switch (-ret) {
791 case ETIMEDOUT:
792 timeout++;
793 continue;
794 case EINTR:
795 case EFAULT:
796 continue;
797 default:
798 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
799 }
800 }
801 // If there are more waiters, unset nowaiters bit when acquiring lock
802 waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
803 }
804 new = self & ~waiters_mask;
805 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
806 &current, acquire);
807 if (unlikely(!r)) goto _retry;
808}
809
810OS_NOINLINE
811static void
812_os_nospin_lock_unlock_slow(_os_nospin_lock_t l, os_ulock_value_t current)
813{
814 os_lock_owner_t self = _os_lock_owner_get_self();
815 if (unlikely(OS_ULOCK_OWNER(current) != self)) {
816 return; // no unowned_abort for drop-in compatibility with OSSpinLock
817 }
818 if (current & OS_ULOCK_NOWAITERS_BIT) {
819 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
820 }
821 for (;;) {
822 int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, 0);
823 if (unlikely(ret < 0)) {
824 switch (-ret) {
825 case EINTR:
826 continue;
827 case ENOENT:
828 break;
829 default:
830 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
831 }
832 }
833 break;
834 }
835}
836
837void
838_os_nospin_lock_lock(_os_nospin_lock_t l)
839{
840 os_lock_owner_t self = _os_lock_owner_get_self();
841 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
842 if (likely(r)) return;
843 return _os_nospin_lock_lock_slow(l);
844}
845
846bool
847_os_nospin_lock_trylock(_os_nospin_lock_t l)
848{
849 os_lock_owner_t self = _os_lock_owner_get_self();
850 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
851 return r;
852}
853
854void
855_os_nospin_lock_unlock(_os_nospin_lock_t l)
856{
857 os_lock_owner_t self = _os_lock_owner_get_self();
858 os_ulock_value_t current;
859 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
860 if (likely(current == self)) return;
861 return _os_nospin_lock_unlock_slow(l, current);
862}
863
864#pragma mark -
865#pragma mark _os_lock_nospin_t
866
867OS_LOCK_STRUCT_DECL_INTERNAL(nospin,
868 _os_nospin_lock osl_nospin_lock;
869);
870#if !OS_VARIANT_ONLY
871OS_LOCK_METHODS_DECL(nospin);
872OS_LOCK_TYPE_INSTANCE(nospin);
873#endif // !OS_VARIANT_ONLY
874
875#ifdef OS_VARIANT_SELECTOR
876#define _os_lock_nospin_lock \
877 OS_VARIANT(_os_lock_nospin_lock, OS_VARIANT_SELECTOR)
878#define _os_lock_nospin_trylock \
879 OS_VARIANT(_os_lock_nospin_trylock, OS_VARIANT_SELECTOR)
880#define _os_lock_nospin_unlock \
881 OS_VARIANT(_os_lock_nospin_unlock, OS_VARIANT_SELECTOR)
882OS_LOCK_METHODS_DECL(nospin);
883#endif // OS_VARIANT_SELECTOR
884
885void
886_os_lock_nospin_lock(_os_lock_nospin_t l)
887{
888 return _os_nospin_lock_lock(&l->osl_nospin_lock);
889}
890
891bool
892_os_lock_nospin_trylock(_os_lock_nospin_t l)
893{
894 return _os_nospin_lock_trylock(&l->osl_nospin_lock);
895}
896
897void
898_os_lock_nospin_unlock(_os_lock_nospin_t l)
899{
900 return _os_nospin_lock_unlock(&l->osl_nospin_lock);
901}
902
903#pragma mark -
904#pragma mark os_once_t
905
906typedef struct os_once_gate_s {
907 union {
908 os_ulock_value_t ogo_lock;
909 os_once_t ogo_once;
910 };
911} os_once_gate_s, *os_once_gate_t;
912
913#define OS_ONCE_INIT ((os_once_t)0l)
914#define OS_ONCE_DONE (~(os_once_t)0l)
915
916OS_ATOMIC_EXPORT void _os_once(os_once_t *val, void *ctxt, os_function_t func);
917OS_ATOMIC_EXPORT void __os_once_reset(os_once_t *val);
918
919OS_NOINLINE OS_NORETURN OS_COLD
920static void
921_os_once_gate_recursive_abort(os_lock_owner_t owner)
922{
923 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
924 "os_once_t");
925}
926
927OS_NOINLINE OS_NORETURN OS_COLD
928static void
929_os_once_gate_unowned_abort(os_lock_owner_t owner)
930{
931 __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_once_t not "
932 "owned by current thread");
933}
934
935OS_NOINLINE OS_NORETURN OS_COLD
936static void
937_os_once_gate_corruption_abort(os_ulock_value_t current)
938{
939 __LIBPLATFORM_CLIENT_CRASH__(current, "os_once_t is corrupt");
940}
941
942OS_NOINLINE
943static void
944_os_once_gate_wait_slow(os_ulock_value_t *gate, os_lock_owner_t self)
945{
946 os_ulock_value_t tid_old, tid_new;
947
948 for (;;) {
949 os_atomic_rmw_loop(gate, tid_old, tid_new, relaxed, {
950 switch (tid_old) {
951 case (os_ulock_value_t)OS_ONCE_INIT: // raced with __os_once_reset()
952 case (os_ulock_value_t)OS_ONCE_DONE: // raced with _os_once()
953 os_atomic_rmw_loop_give_up(return);
954 }
955 tid_new = tid_old & ~OS_ULOCK_NOWAITERS_BIT;
956 if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
957 });
958 if (unlikely(OS_ULOCK_IS_OWNER(tid_old, self))) {
959 return _os_once_gate_recursive_abort(self);
960 }
961 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO,
962 gate, tid_new, 0);
963 if (unlikely(ret < 0)) {
964 switch (-ret) {
965 case EINTR:
966 case EFAULT:
967 continue;
968 case EOWNERDEAD:
969 _os_once_gate_corruption_abort(tid_old);
970 break;
971 default:
972 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
973 }
974 }
975 }
976}
977
978OS_NOINLINE
979static void
980_os_once_gate_broadcast_slow(os_ulock_value_t *gate, os_ulock_value_t current,
981 os_lock_owner_t self)
982{
983 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self))) {
984 return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current));
985 }
986 if (current & OS_ULOCK_NOWAITERS_BIT) {
987 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
988 }
989 for (;;) {
990 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | ULF_WAKE_ALL,
991 gate, 0);
992 if (unlikely(ret < 0)) {
993 switch (-ret) {
994 case EINTR:
995 continue;
996 case ENOENT:
997 break;
998 default:
999 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
1000 }
1001 }
1002 break;
1003 }
1004}
1005
1006OS_ALWAYS_INLINE
1007static void
1008_os_once_gate_set_value_and_broadcast(os_once_gate_t og, os_lock_owner_t self,
1009 os_once_t value)
1010{
1011 // The next barrier must be long and strong.
1012 //
1013 // The scenario: SMP systems with weakly ordered memory models
1014 // and aggressive out-of-order instruction execution.
1015 //
1016 // The problem:
1017 //
1018 // The os_once*() wrapper macro causes the callee's
1019 // instruction stream to look like this (pseudo-RISC):
1020 //
1021 // load r5, pred-addr
1022 // cmpi r5, -1
1023 // beq 1f
1024 // call os_once*()
1025 // 1f:
1026 // load r6, data-addr
1027 //
1028 // May be re-ordered like so:
1029 //
1030 // load r6, data-addr
1031 // load r5, pred-addr
1032 // cmpi r5, -1
1033 // beq 1f
1034 // call os_once*()
1035 // 1f:
1036 //
1037 // Normally, a barrier on the read side is used to workaround
1038 // the weakly ordered memory model. But barriers are expensive
1039 // and we only need to synchronize once! After func(ctxt)
1040 // completes, the predicate will be marked as "done" and the
1041 // branch predictor will correctly skip the call to
1042 // os_once*().
1043 //
1044 // A far faster alternative solution: Defeat the speculative
1045 // read-ahead of peer CPUs.
1046 //
1047 // Modern architectures will throw away speculative results
1048 // once a branch mis-prediction occurs. Therefore, if we can
1049 // ensure that the predicate is not marked as being complete
1050 // until long after the last store by func(ctxt), then we have
1051 // defeated the read-ahead of peer CPUs.
1052 //
1053 // In other words, the last "store" by func(ctxt) must complete
1054 // and then N cycles must elapse before ~0l is stored to *val.
1055 // The value of N is whatever is sufficient to defeat the
1056 // read-ahead mechanism of peer CPUs.
1057 //
1058 // On some CPUs, the most fully synchronizing instruction might
1059 // need to be issued.
1060 os_atomic_maximally_synchronizing_barrier();
1061
1062 // above assumed to contain release barrier
1063 os_ulock_value_t current =
1064 (os_ulock_value_t)os_atomic_xchg(&og->ogo_once, value, relaxed);
1065 if (likely(current == self)) return;
1066 _os_once_gate_broadcast_slow(&og->ogo_lock, current, self);
1067}
1068
1069// Atomically resets the once value to zero and then signals all
1070// pending waiters to return from their _os_once_gate_wait_slow()
1071void
1072__os_once_reset(os_once_t *val)
1073{
1074 os_once_gate_t og = (os_once_gate_t)val;
1075 os_lock_owner_t self = _os_lock_owner_get_self();
1076 _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_INIT);
1077}
1078
1079void
1080_os_once(os_once_t *val, void *ctxt, os_function_t func)
1081{
1082 os_once_gate_t og = (os_once_gate_t)val;
1083 os_lock_owner_t self = _os_lock_owner_get_self();
1084 os_once_t v = (os_once_t)self;
1085
1086 if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, relaxed))) {
1087 func(ctxt);
1088 _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_DONE);
1089 } else {
1090 _os_once_gate_wait_slow(&og->ogo_lock, self);
1091 }
1092}
1093
1094#if !OS_VARIANT_ONLY
1095
1096#pragma mark -
1097#pragma mark os_lock_eliding_t
1098
1099#if !TARGET_OS_IPHONE
1100
1101#define _os_lock_eliding_t _os_lock_spin_t
1102#define _os_lock_eliding_lock _os_lock_spin_lock
1103#define _os_lock_eliding_trylock _os_lock_spin_trylock
1104#define _os_lock_eliding_unlock _os_lock_spin_unlock
1105OS_LOCK_METHODS_DECL(eliding);
1106OS_LOCK_TYPE_INSTANCE(eliding);
1107
1108#pragma mark -
1109#pragma mark os_lock_transactional_t
1110
1111OS_LOCK_STRUCT_DECL_INTERNAL(transactional,
1112 uintptr_t volatile osl_lock;
1113);
1114
1115#define _os_lock_transactional_t _os_lock_eliding_t
1116#define _os_lock_transactional_lock _os_lock_eliding_lock
1117#define _os_lock_transactional_trylock _os_lock_eliding_trylock
1118#define _os_lock_transactional_unlock _os_lock_eliding_unlock
1119OS_LOCK_METHODS_DECL(transactional);
1120OS_LOCK_TYPE_INSTANCE(transactional);
1121
1122#endif // !TARGET_OS_IPHONE
1123#endif // !OS_VARIANT_ONLY
1124#endif // !OS_LOCK_VARIANT_ONLY