]> git.saurik.com Git - apple/libplatform.git/blame - src/os/lock.c
libplatform-177.250.1.tar.gz
[apple/libplatform.git] / src / os / lock.c
CommitLineData
ada7c492
A
1/*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
438624e0 21#define OS_UNFAIR_LOCK_INLINE 1
ada7c492 22#include "lock_internal.h"
e45b4692
A
23#include "os/internal.h"
24#include "resolver.h"
ada7c492
A
25#include "libkern/OSAtomic.h"
26#include "os/lock.h"
27#include "os/lock_private.h"
28#include "os/once_private.h"
ada7c492
A
29
30#include <mach/mach_init.h>
31#include <mach/mach_traps.h>
32#include <mach/thread_switch.h>
ea84da91 33#include <mach/mach_time.h>
ada7c492
A
34#include <os/tsd.h>
35
36#pragma mark -
37#pragma mark _os_lock_base_t
38
e45b4692
A
39OS_NOINLINE OS_NORETURN OS_COLD
40void _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value);
41
ada7c492
A
42
43OS_LOCK_STRUCT_DECL_INTERNAL(base);
44OS_USED static OS_LOCK_TYPE_STRUCT_DECL(base);
45
46void
47os_lock_lock(os_lock_t l)
48{
49 return l._osl_base->osl_type->osl_lock(l);
50}
51
52bool
53os_lock_trylock(os_lock_t l)
54{
55 return l._osl_base->osl_type->osl_trylock(l);
56}
57
58void
59os_lock_unlock(os_lock_t l)
60{
61 return l._osl_base->osl_type->osl_unlock(l);
62}
63
ada7c492 64OS_NOINLINE OS_NORETURN OS_COLD
e45b4692 65void
ada7c492
A
66_os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value)
67{
68 __LIBPLATFORM_CLIENT_CRASH__(lock_value, "os_lock is corrupt");
69}
70
e45b4692 71
ada7c492
A
72#pragma mark -
73#pragma mark OSSpinLock
74
e45b4692 75OS_NOEXPORT OS_NOINLINE void _OSSpinLockLockSlow(volatile OSSpinLock *l);
ada7c492
A
76
77OS_ATOMIC_EXPORT void OSSpinLockLock(volatile OSSpinLock *l);
78OS_ATOMIC_EXPORT bool OSSpinLockTry(volatile OSSpinLock *l);
79OS_ATOMIC_EXPORT int spin_lock_try(volatile OSSpinLock *l);
80OS_ATOMIC_EXPORT void OSSpinLockUnlock(volatile OSSpinLock *l);
81
438624e0
A
82#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
83static const OSSpinLock _OSSpinLockLocked = 1;
84#else
85static const OSSpinLock _OSSpinLockLocked = -1;
86#endif
87
e45b4692
A
88
89
ada7c492
A
90#if OS_ATOMIC_UP
91// Don't spin on UP
ada7c492
A
92#else
93#define OS_LOCK_SPIN_SPIN_TRIES 1000
94#define OS_LOCK_SPIN_PAUSE() os_hardware_pause()
95#endif
ada7c492 96
ea84da91
A
97OS_ALWAYS_INLINE
98static uint64_t
99_os_lock_yield_deadline(mach_msg_timeout_t timeout)
100{
101 uint64_t abstime = timeout * NSEC_PER_MSEC;
102#if !(defined(__i386__) || defined(__x86_64__))
103 mach_timebase_info_data_t tbi;
104 kern_return_t kr = mach_timebase_info(&tbi);
105 if (kr) return UINT64_MAX;
106 abstime *= tbi.denom;
107 abstime /= tbi.numer;
108#endif
109 return mach_absolute_time() + abstime;
110}
111
112OS_ALWAYS_INLINE
113static bool
114_os_lock_yield_until(uint64_t deadline)
115{
116 return mach_absolute_time() < deadline;
117}
118
ada7c492
A
119OS_NOINLINE
120static void
121_OSSpinLockLockYield(volatile OSSpinLock *l)
122{
123 int option = SWITCH_OPTION_DEPRESS;
124 mach_msg_timeout_t timeout = 1;
ea84da91 125 uint64_t deadline = _os_lock_yield_deadline(timeout);
ada7c492
A
126 OSSpinLock lock;
127 while (unlikely(lock = *l)) {
128_yield:
129 if (unlikely(lock != _OSSpinLockLocked)) {
130 _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
131 }
ada7c492 132 thread_switch(MACH_PORT_NULL, option, timeout);
ea84da91
A
133 if (option == SWITCH_OPTION_WAIT) {
134 timeout++;
135 } else if (!_os_lock_yield_until(deadline)) {
136 option = SWITCH_OPTION_WAIT;
137 }
ada7c492
A
138 }
139 bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
140 if (likely(r)) return;
141 goto _yield;
142}
143
144#if OS_ATOMIC_UP
145void
146_OSSpinLockLockSlow(volatile OSSpinLock *l)
147{
148 return _OSSpinLockLockYield(l); // Don't spin on UP
149}
e45b4692 150#else // !OS_ATOMIC_UP
ada7c492
A
151void
152_OSSpinLockLockSlow(volatile OSSpinLock *l)
153{
154 uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES;
155 OSSpinLock lock;
156 while (unlikely(lock = *l)) {
157_spin:
158 if (unlikely(lock != _OSSpinLockLocked)) {
159 return _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
160 }
161 if (unlikely(!tries--)) return _OSSpinLockLockYield(l);
162 OS_LOCK_SPIN_PAUSE();
163 }
164 bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
165 if (likely(r)) return;
166 goto _spin;
167}
e45b4692 168#endif // !OS_ATOMIC_UP
ada7c492 169
ada7c492 170
ada7c492
A
171
172#if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR
173
174typedef struct _os_nospin_lock_s *_os_nospin_lock_t;
e45b4692
A
175
176OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock);
177OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
178OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
ada7c492
A
179
180void
181OSSpinLockLock(volatile OSSpinLock *l)
182{
183 OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
184 OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
185 return _os_nospin_lock_lock((_os_nospin_lock_t)l);
186}
187
188bool
189OSSpinLockTry(volatile OSSpinLock *l)
190{
191 return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
192}
193
194int
195spin_lock_try(volatile OSSpinLock *l)
196{
197 OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
198 return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
199}
200
201void
202OSSpinLockUnlock(volatile OSSpinLock *l)
203{
204 OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
205 OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
206 return _os_nospin_lock_unlock((_os_nospin_lock_t)l);
207}
208
209#undef OS_ATOMIC_ALIAS
210#define OS_ATOMIC_ALIAS(n, o)
211static void _OSSpinLockLock(volatile OSSpinLock *l);
212#undef OSSpinLockLock
213#define OSSpinLockLock _OSSpinLockLock
214static bool _OSSpinLockTry(volatile OSSpinLock *l);
215#undef OSSpinLockTry
216#define OSSpinLockTry _OSSpinLockTry
217static __unused int __spin_lock_try(volatile OSSpinLock *l);
218#undef spin_lock_try
219#define spin_lock_try __spin_lock_try
220static void _OSSpinLockUnlock(volatile OSSpinLock *l);
221#undef OSSpinLockUnlock
222#define OSSpinLockUnlock _OSSpinLockUnlock
223
224#endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK
225
226void
227OSSpinLockLock(volatile OSSpinLock *l)
228{
229 OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
230 OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
231 bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
232 if (likely(r)) return;
233 return _OSSpinLockLockSlow(l);
234}
235
236bool
237OSSpinLockTry(volatile OSSpinLock *l)
238{
239 bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
240 return r;
241}
242
243int
244spin_lock_try(volatile OSSpinLock *l) // <rdar://problem/13316060>
245{
246 OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
247 return OSSpinLockTry(l);
248}
249
250void
251OSSpinLockUnlock(volatile OSSpinLock *l)
252{
253 OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
254 OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
255 os_atomic_store(l, 0, release);
256}
257
e45b4692 258
ada7c492
A
259#pragma mark -
260#pragma mark os_lock_spin_t
261
262OS_LOCK_STRUCT_DECL_INTERNAL(spin,
263 OSSpinLock volatile osl_spinlock;
264);
ada7c492
A
265OS_LOCK_METHODS_DECL(spin);
266OS_LOCK_TYPE_INSTANCE(spin);
ada7c492
A
267
268void
269_os_lock_spin_lock(_os_lock_spin_t l)
270{
271 return OSSpinLockLock(&l->osl_spinlock);
272}
273
274bool
275_os_lock_spin_trylock(_os_lock_spin_t l)
276{
277 return OSSpinLockTry(&l->osl_spinlock);
278}
279
280void
281_os_lock_spin_unlock(_os_lock_spin_t l)
282{
283 return OSSpinLockUnlock(&l->osl_spinlock);
284}
285
e45b4692 286
ada7c492
A
287#pragma mark -
288#pragma mark os_lock_owner_t
289
290#ifndef __TSD_MACH_THREAD_SELF
291#define __TSD_MACH_THREAD_SELF 3
292#endif
293
294typedef mach_port_name_t os_lock_owner_t;
e45b4692
A
295#define OS_LOCK_NO_OWNER MACH_PORT_NULL
296
ada7c492 297
438624e0 298OS_ALWAYS_INLINE OS_CONST
ada7c492
A
299static inline os_lock_owner_t
300_os_lock_owner_get_self(void)
301{
302 os_lock_owner_t self;
303 self = (os_lock_owner_t)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF);
304 return self;
305}
306
ada7c492
A
307
308OS_NOINLINE OS_NORETURN OS_COLD
309static void
310_os_lock_recursive_abort(os_lock_owner_t owner)
311{
312 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
313 "os_lock");
314}
315
ada7c492
A
316
317#pragma mark -
318#pragma mark os_lock_handoff_t
319
320OS_LOCK_STRUCT_DECL_INTERNAL(handoff,
321 os_lock_owner_t volatile osl_owner;
322);
ada7c492
A
323OS_LOCK_METHODS_DECL(handoff);
324OS_LOCK_TYPE_INSTANCE(handoff);
ada7c492
A
325
326#define OS_LOCK_HANDOFF_YIELD_TRIES 100
327
328OS_NOINLINE
329static void
330_os_lock_handoff_lock_slow(_os_lock_handoff_t l)
331{
332 int option = SWITCH_OPTION_OSLOCK_DEPRESS;
333 mach_msg_timeout_t timeout = 1;
334 uint32_t tries = OS_LOCK_HANDOFF_YIELD_TRIES;
335 os_lock_owner_t self = _os_lock_owner_get_self(), owner;
336 while (unlikely(owner = l->osl_owner)) {
337_handoff:
338 if (unlikely(owner == self)) return _os_lock_recursive_abort(self);
339 // Yield until tries first hits zero, then permanently switch to wait
340 if (unlikely(!tries--)) option = SWITCH_OPTION_OSLOCK_WAIT;
341 thread_switch(owner, option, timeout);
342 // Redrive the handoff every 1ms until switching to wait
343 if (option == SWITCH_OPTION_OSLOCK_WAIT) timeout++;
344 }
345 bool r = os_atomic_cmpxchgv2o(l, osl_owner, MACH_PORT_NULL, self, &owner,
346 acquire);
347 if (likely(r)) return;
348 goto _handoff;
349}
350
351void
352_os_lock_handoff_lock(_os_lock_handoff_t l)
353{
354 os_lock_owner_t self = _os_lock_owner_get_self();
355 bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
356 if (likely(r)) return;
357 return _os_lock_handoff_lock_slow(l);
358}
359
360bool
361_os_lock_handoff_trylock(_os_lock_handoff_t l)
362{
363 os_lock_owner_t self = _os_lock_owner_get_self();
364 bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
365 return r;
366}
367
368void
369_os_lock_handoff_unlock(_os_lock_handoff_t l)
370{
371 os_atomic_store2o(l, osl_owner, MACH_PORT_NULL, release);
372}
373
e45b4692 374
ada7c492
A
375#pragma mark -
376#pragma mark os_ulock_value_t
377
378#include <sys/errno.h>
379#include <sys/ulock.h>
380
381typedef os_lock_owner_t os_ulock_value_t;
382
383// This assumes that all thread mach port values always have the low bit set!
384// Clearing this bit is used to communicate the existence of waiters to unlock.
385#define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u)
386#define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT)
387
388#define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD
e45b4692
A
389#define OS_ULOCK_IS_OWNER(value, self, allow_anonymous_owner) ({ \
390 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner == (self)) && \
391 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
392#define OS_ULOCK_IS_NOT_OWNER(value, self, allow_anonymous_owner) ({ \
393 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner != (self)) && \
394 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
ada7c492
A
395
396#pragma mark -
397#pragma mark os_unfair_lock
398
399typedef struct _os_unfair_lock_s {
400 os_ulock_value_t oul_value;
401} *_os_unfair_lock_t;
402
403_Static_assert(sizeof(struct os_unfair_lock_s) ==
404 sizeof(struct _os_unfair_lock_s), "os_unfair_lock size mismatch");
405
406OS_ATOMIC_EXPORT void os_unfair_lock_lock(os_unfair_lock_t lock);
407OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
408 os_unfair_lock_options_t options);
409OS_ATOMIC_EXPORT bool os_unfair_lock_trylock(os_unfair_lock_t lock);
410OS_ATOMIC_EXPORT void os_unfair_lock_unlock(os_unfair_lock_t lock);
411
412OS_ATOMIC_EXPORT void os_unfair_lock_lock_no_tsd_4libpthread(
413 os_unfair_lock_t lock);
414OS_ATOMIC_EXPORT void os_unfair_lock_unlock_no_tsd_4libpthread(
415 os_unfair_lock_t lock);
e45b4692
A
416OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options_4Libc(
417 os_unfair_lock_t lock, os_unfair_lock_options_t options);
418OS_ATOMIC_EXPORT void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock);
419
420OS_NOINLINE OS_NORETURN OS_COLD
421void _os_unfair_lock_recursive_abort(os_lock_owner_t owner);
422OS_NOINLINE OS_NORETURN OS_COLD
423void _os_unfair_lock_unowned_abort(os_lock_owner_t owner);
424OS_NOINLINE OS_NORETURN OS_COLD
425void _os_unfair_lock_corruption_abort(os_ulock_value_t current);
ada7c492
A
426
427_Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION ==
428 ULF_WAIT_WORKQ_DATA_CONTENTION,
429 "check value for OS_UNFAIR_LOCK_OPTIONS_MASK");
430#define OS_UNFAIR_LOCK_OPTIONS_MASK \
431 (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION)
e45b4692
A
432#define OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER 0x01000000u
433
ada7c492
A
434
435OS_NOINLINE OS_NORETURN OS_COLD
e45b4692 436void
ada7c492
A
437_os_unfair_lock_recursive_abort(os_lock_owner_t owner)
438{
439 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
440 "os_unfair_lock");
441}
442
443OS_NOINLINE OS_NORETURN OS_COLD
e45b4692 444void
ada7c492
A
445_os_unfair_lock_unowned_abort(os_lock_owner_t owner)
446{
447 __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_unfair_lock not "
448 "owned by current thread");
449}
450
451OS_NOINLINE OS_NORETURN OS_COLD
e45b4692 452void
ada7c492
A
453_os_unfair_lock_corruption_abort(os_ulock_value_t current)
454{
455 __LIBPLATFORM_CLIENT_CRASH__(current, "os_unfair_lock is corrupt");
456}
457
e45b4692 458
ada7c492
A
459OS_NOINLINE
460static void
461_os_unfair_lock_lock_slow(_os_unfair_lock_t l, os_lock_owner_t self,
462 os_unfair_lock_options_t options)
463{
e45b4692
A
464 os_unfair_lock_options_t allow_anonymous_owner =
465 options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
466 options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
ada7c492
A
467 if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) {
468 __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options");
469 }
e45b4692 470 os_ulock_value_t current, new, waiters_mask = 0;
ada7c492
A
471 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
472 OS_LOCK_NO_OWNER)) {
473_retry:
e45b4692 474 if (unlikely(OS_ULOCK_IS_OWNER(current, self, allow_anonymous_owner))) {
ada7c492
A
475 return _os_unfair_lock_recursive_abort(self);
476 }
477 new = current & ~OS_ULOCK_NOWAITERS_BIT;
478 if (current != new) {
479 // Clear nowaiters bit in lock value before waiting
480 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
481 relaxed)){
482 continue;
483 }
484 current = new;
485 }
486 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options,
487 l, current, 0);
488 if (unlikely(ret < 0)) {
489 switch (-ret) {
490 case EINTR:
491 case EFAULT:
492 continue;
493 case EOWNERDEAD:
494 _os_unfair_lock_corruption_abort(current);
495 break;
496 default:
497 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
498 }
499 }
500 // If there are more waiters, unset nowaiters bit when acquiring lock
501 waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
502 }
503 new = self & ~waiters_mask;
504 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
505 &current, acquire);
506 if (unlikely(!r)) goto _retry;
507}
508
509OS_NOINLINE
510static void
511_os_unfair_lock_unlock_slow(_os_unfair_lock_t l, os_ulock_value_t current,
e45b4692 512 os_lock_owner_t self, os_unfair_lock_options_t options)
ada7c492 513{
e45b4692
A
514 os_unfair_lock_options_t allow_anonymous_owner =
515 options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
516 options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
517 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, allow_anonymous_owner))) {
ada7c492
A
518 return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current));
519 }
520 if (current & OS_ULOCK_NOWAITERS_BIT) {
521 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
522 }
523 for (;;) {
524 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0);
525 if (unlikely(ret < 0)) {
526 switch (-ret) {
527 case EINTR:
528 continue;
529 case ENOENT:
530 break;
531 default:
532 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
533 }
534 }
535 break;
536 }
537}
538
539void
540os_unfair_lock_lock(os_unfair_lock_t lock)
541{
542 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
543 os_lock_owner_t self = _os_lock_owner_get_self();
544 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
545 if (likely(r)) return;
546 return _os_unfair_lock_lock_slow(l, self, OS_UNFAIR_LOCK_NONE);
547}
548
549void
550os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
551 os_unfair_lock_options_t options)
552{
553 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
554 os_lock_owner_t self = _os_lock_owner_get_self();
555 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
556 if (likely(r)) return;
557 return _os_unfair_lock_lock_slow(l, self, options);
558}
559
560bool
561os_unfair_lock_trylock(os_unfair_lock_t lock)
562{
563 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
564 os_lock_owner_t self = _os_lock_owner_get_self();
565 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
566 return r;
567}
568
569void
570os_unfair_lock_unlock(os_unfair_lock_t lock)
571{
572 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
573 os_lock_owner_t self = _os_lock_owner_get_self();
574 os_ulock_value_t current;
575 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
576 if (likely(current == self)) return;
e45b4692 577 return _os_unfair_lock_unlock_slow(l, current, self, 0);
ada7c492
A
578}
579
580void
581os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock)
582{
583 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
584 os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
585 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
586 if (likely(r)) return;
587 return _os_unfair_lock_lock_slow(l, self,
e45b4692
A
588 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION|
589 OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER);
ada7c492
A
590}
591
592void
593os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock)
594{
595 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
596 os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
597 os_ulock_value_t current;
598 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
599 if (likely(current == self)) return;
e45b4692
A
600 return _os_unfair_lock_unlock_slow(l, current, self,
601 OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER);
ada7c492
A
602}
603
ada7c492 604
ada7c492
A
605void
606os_unfair_lock_assert_owner(os_unfair_lock_t lock)
607{
608 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
609 os_lock_owner_t self = _os_lock_owner_get_self();
610 os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
e45b4692 611 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) {
ada7c492
A
612 __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
613 "Lock unexpectedly not owned by current thread");
614 }
615}
616
617void
618os_unfair_lock_assert_not_owner(os_unfair_lock_t lock)
619{
620 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
621 os_lock_owner_t self = _os_lock_owner_get_self();
622 os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
e45b4692 623 if (unlikely(OS_ULOCK_IS_OWNER(current, self, 0))) {
ada7c492
A
624 __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
625 "Lock unexpectedly owned by current thread");
626 }
627}
e45b4692 628
ada7c492 629
438624e0
A
630#pragma mark -
631#pragma mark os_unfair_recursive_lock
632
633OS_ATOMIC_EXPORT
634void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
635 os_unfair_lock_options_t options);
636
637OS_ATOMIC_EXPORT
638bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock);
639
640OS_ATOMIC_EXPORT
641void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
642
643OS_ATOMIC_EXPORT
644bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock);
645
646
647static inline os_lock_owner_t
648_os_unfair_lock_owner(os_unfair_lock_t lock)
649{
650 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
651 return OS_ULOCK_OWNER(os_atomic_load(&l->oul_value, relaxed));
652}
653
654void
655os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
656 os_unfair_lock_options_t options)
657{
658 os_lock_owner_t cur, self = _os_lock_owner_get_self();
659 _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock;
660
661 if (likely(os_atomic_cmpxchgv2o(l, oul_value,
662 OS_LOCK_NO_OWNER, self, &cur, acquire))) {
663 return;
664 }
665
666 if (OS_ULOCK_OWNER(cur) == self) {
667 lock->ourl_count++;
668 return;
669 }
670
671 return _os_unfair_lock_lock_slow(l, self, options);
672}
673
674bool
675os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock)
676{
677 os_lock_owner_t cur, self = _os_lock_owner_get_self();
678 _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock;
679
680 if (likely(os_atomic_cmpxchgv2o(l, oul_value,
681 OS_LOCK_NO_OWNER, self, &cur, acquire))) {
682 return true;
683 }
684
685 if (likely(OS_ULOCK_OWNER(cur) == self)) {
686 lock->ourl_count++;
687 return true;
688 }
689
690 return false;
691}
692
693
694OS_ALWAYS_INLINE
695static inline void
696_os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock,
697 os_lock_owner_t self)
698{
699 if (unlikely(lock->ourl_count)) {
700 os_lock_owner_t cur = _os_unfair_lock_owner(&lock->ourl_lock);
701 if (unlikely(cur != self)) {
702 _os_unfair_lock_unowned_abort(cur);
703 }
704 lock->ourl_count--;
705 return;
706 }
707
708 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
709 os_ulock_value_t current;
710 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
711 if (likely(current == self)) return;
712 return _os_unfair_lock_unlock_slow(l, current, self, 0);
713}
714
715void
716os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock)
717{
718 os_lock_owner_t self = _os_lock_owner_get_self();
719 _os_unfair_recursive_lock_unlock(lock, self);
720}
721
722bool
723os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock)
724{
725 os_lock_owner_t cur = _os_unfair_lock_owner(&lock->ourl_lock);
726 os_lock_owner_t self = _os_lock_owner_get_self();
727 if (likely(cur == self)) {
728 _os_unfair_recursive_lock_unlock(lock, self);
729 return true;
730 }
731 return false;
732}
733
734
ada7c492
A
735#pragma mark -
736#pragma mark _os_lock_unfair_t
737
738OS_LOCK_STRUCT_DECL_INTERNAL(unfair,
739 os_unfair_lock osl_unfair_lock;
740);
ada7c492
A
741OS_LOCK_METHODS_DECL(unfair);
742OS_LOCK_TYPE_INSTANCE(unfair);
ada7c492
A
743
744void
745_os_lock_unfair_lock(_os_lock_unfair_t l)
746{
747 return os_unfair_lock_lock(&l->osl_unfair_lock);
748}
749
750bool
751_os_lock_unfair_trylock(_os_lock_unfair_t l)
752{
753 return os_unfair_lock_trylock(&l->osl_unfair_lock);
754}
755
756void
757_os_lock_unfair_unlock(_os_lock_unfair_t l)
758{
759 return os_unfair_lock_unlock(&l->osl_unfair_lock);
760}
761
e45b4692 762
ada7c492
A
763#pragma mark -
764#pragma mark _os_nospin_lock
765
766typedef struct _os_nospin_lock_s {
767 os_ulock_value_t oul_value;
768} _os_nospin_lock, *_os_nospin_lock_t;
769
770_Static_assert(sizeof(OSSpinLock) ==
771 sizeof(struct _os_nospin_lock_s), "os_nospin_lock size mismatch");
772
773OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock);
774OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
775OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
776
e45b4692 777
ada7c492
A
778OS_NOINLINE
779static void
780_os_nospin_lock_lock_slow(_os_nospin_lock_t l)
781{
782 os_lock_owner_t self = _os_lock_owner_get_self();
783 os_ulock_value_t current, new, waiters_mask = 0;
784 uint32_t timeout = 1;
785 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
786 OS_LOCK_NO_OWNER)) {
787_retry:
788 new = current & ~OS_ULOCK_NOWAITERS_BIT;
789 // For safer compatibility with OSSpinLock where _OSSpinLockLocked may
790 // be 1, check that new didn't become 0 (unlocked) by clearing this bit
791 if (current != new && new) {
792 // Clear nowaiters bit in lock value before waiting
793 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
794 relaxed)){
795 continue;
796 }
797 current = new;
798 }
799 int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, current,
800 timeout * 1000);
801 if (unlikely(ret < 0)) {
802 switch (-ret) {
803 case ETIMEDOUT:
804 timeout++;
805 continue;
806 case EINTR:
807 case EFAULT:
808 continue;
809 default:
810 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
811 }
812 }
813 // If there are more waiters, unset nowaiters bit when acquiring lock
814 waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
815 }
816 new = self & ~waiters_mask;
817 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
818 &current, acquire);
819 if (unlikely(!r)) goto _retry;
820}
821
822OS_NOINLINE
823static void
824_os_nospin_lock_unlock_slow(_os_nospin_lock_t l, os_ulock_value_t current)
825{
826 os_lock_owner_t self = _os_lock_owner_get_self();
827 if (unlikely(OS_ULOCK_OWNER(current) != self)) {
828 return; // no unowned_abort for drop-in compatibility with OSSpinLock
829 }
830 if (current & OS_ULOCK_NOWAITERS_BIT) {
831 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
832 }
833 for (;;) {
834 int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, 0);
835 if (unlikely(ret < 0)) {
836 switch (-ret) {
837 case EINTR:
838 continue;
839 case ENOENT:
840 break;
841 default:
842 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
843 }
844 }
845 break;
846 }
847}
848
849void
850_os_nospin_lock_lock(_os_nospin_lock_t l)
851{
852 os_lock_owner_t self = _os_lock_owner_get_self();
853 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
854 if (likely(r)) return;
855 return _os_nospin_lock_lock_slow(l);
856}
857
858bool
859_os_nospin_lock_trylock(_os_nospin_lock_t l)
860{
861 os_lock_owner_t self = _os_lock_owner_get_self();
862 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
863 return r;
864}
865
866void
867_os_nospin_lock_unlock(_os_nospin_lock_t l)
868{
869 os_lock_owner_t self = _os_lock_owner_get_self();
870 os_ulock_value_t current;
871 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
872 if (likely(current == self)) return;
873 return _os_nospin_lock_unlock_slow(l, current);
874}
875
e45b4692 876
ada7c492
A
877#pragma mark -
878#pragma mark _os_lock_nospin_t
879
880OS_LOCK_STRUCT_DECL_INTERNAL(nospin,
881 _os_nospin_lock osl_nospin_lock;
882);
ada7c492
A
883OS_LOCK_METHODS_DECL(nospin);
884OS_LOCK_TYPE_INSTANCE(nospin);
ada7c492
A
885
886void
887_os_lock_nospin_lock(_os_lock_nospin_t l)
888{
889 return _os_nospin_lock_lock(&l->osl_nospin_lock);
890}
891
892bool
893_os_lock_nospin_trylock(_os_lock_nospin_t l)
894{
895 return _os_nospin_lock_trylock(&l->osl_nospin_lock);
896}
897
898void
899_os_lock_nospin_unlock(_os_lock_nospin_t l)
900{
901 return _os_nospin_lock_unlock(&l->osl_nospin_lock);
902}
903
e45b4692 904
ada7c492
A
905#pragma mark -
906#pragma mark os_once_t
907
908typedef struct os_once_gate_s {
909 union {
910 os_ulock_value_t ogo_lock;
438624e0 911 uintptr_t ogo_once;
ada7c492
A
912 };
913} os_once_gate_s, *os_once_gate_t;
914
438624e0
A
915#define OS_ONCE_INIT ((uintptr_t)0l)
916#define OS_ONCE_DONE (~(uintptr_t)0l)
917
918#if defined(__i386__) || defined(__x86_64__)
919#define OS_ONCE_USE_QUIESCENT_COUNTER 0
920#else
921#define OS_ONCE_USE_QUIESCENT_COUNTER 1
922#endif
ada7c492
A
923
924OS_ATOMIC_EXPORT void _os_once(os_once_t *val, void *ctxt, os_function_t func);
925OS_ATOMIC_EXPORT void __os_once_reset(os_once_t *val);
926
927OS_NOINLINE OS_NORETURN OS_COLD
e45b4692
A
928void _os_once_gate_recursive_abort(os_lock_owner_t owner);
929OS_NOINLINE OS_NORETURN OS_COLD
930void _os_once_gate_unowned_abort(os_lock_owner_t owner);
931OS_NOINLINE OS_NORETURN OS_COLD
932void _os_once_gate_corruption_abort(os_ulock_value_t current);
933
934
935OS_NOINLINE OS_NORETURN OS_COLD
936void
ada7c492
A
937_os_once_gate_recursive_abort(os_lock_owner_t owner)
938{
939 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
940 "os_once_t");
941}
942
943OS_NOINLINE OS_NORETURN OS_COLD
e45b4692 944void
ada7c492
A
945_os_once_gate_unowned_abort(os_lock_owner_t owner)
946{
947 __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_once_t not "
948 "owned by current thread");
949}
950
951OS_NOINLINE OS_NORETURN OS_COLD
e45b4692 952void
ada7c492
A
953_os_once_gate_corruption_abort(os_ulock_value_t current)
954{
955 __LIBPLATFORM_CLIENT_CRASH__(current, "os_once_t is corrupt");
956}
957
e45b4692 958
438624e0
A
959#if OS_ONCE_USE_QUIESCENT_COUNTER
960#define OS_ONCE_MAKE_GEN(gen) (((gen) << 2) + OS_ULOCK_NOWAITERS_BIT)
961#define OS_ONCE_IS_GEN(gen) (((gen) & 3) == OS_ULOCK_NOWAITERS_BIT)
962
963// the _COMM_PAGE_CPU_QUIESCENT_COUNTER value is incremented every time
964// all CPUs have performed a context switch.
965//
966// To make sure all CPUs context switched at least once since `gen`,
967// we need to observe 4 increments, see libdispatch/src/shims/lock.h
968#define OS_ONCE_GEN_SAFE_DELTA (4 << 2)
969
970OS_ALWAYS_INLINE
971static inline uintptr_t
972_os_once_generation(void)
973{
974 uintptr_t value = *(volatile uintptr_t *)_COMM_PAGE_CPU_QUIESCENT_COUNTER;
975 return OS_ONCE_MAKE_GEN(value);
976}
977
978OS_ALWAYS_INLINE
979static inline uintptr_t
980_os_once_mark_quiescing(os_once_gate_t og)
ada7c492 981{
438624e0
A
982 return os_atomic_xchg(&og->ogo_once, _os_once_generation(), release);
983}
ada7c492 984
438624e0
A
985OS_ALWAYS_INLINE
986static void
987_os_once_mark_done_if_quiesced(os_once_gate_t og, uintptr_t gen)
988{
989 if (_os_once_generation() - gen >= OS_ONCE_GEN_SAFE_DELTA) {
990 os_atomic_store(&og->ogo_once, OS_ONCE_DONE, relaxed);
ada7c492
A
991 }
992}
438624e0
A
993#else
994OS_ALWAYS_INLINE
995static inline uintptr_t
996_os_once_mark_done(os_once_gate_t og)
997{
998 return os_atomic_xchg(&og->ogo_once, OS_ONCE_DONE, release);
999}
1000#endif
ada7c492
A
1001
1002OS_NOINLINE
1003static void
438624e0 1004_os_once_gate_broadcast(os_once_gate_t og, os_ulock_value_t current,
ada7c492
A
1005 os_lock_owner_t self)
1006{
e45b4692 1007 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) {
ada7c492
A
1008 return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current));
1009 }
1010 if (current & OS_ULOCK_NOWAITERS_BIT) {
1011 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
1012 }
1013 for (;;) {
1014 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | ULF_WAKE_ALL,
438624e0 1015 &og->ogo_lock, 0);
ada7c492
A
1016 if (unlikely(ret < 0)) {
1017 switch (-ret) {
1018 case EINTR:
1019 continue;
1020 case ENOENT:
1021 break;
1022 default:
1023 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
1024 }
1025 }
1026 break;
1027 }
1028}
1029
438624e0 1030OS_NOINLINE
ada7c492 1031static void
438624e0
A
1032_os_once_callout(os_once_gate_t og, void *ctxt, os_function_t func,
1033 os_lock_owner_t self)
ada7c492 1034{
438624e0
A
1035 uintptr_t v;
1036
1037 func(ctxt);
1038
1039#if OS_ONCE_USE_QUIESCENT_COUNTER
1040 v = _os_once_mark_quiescing(og);
e45b4692 1041#else
438624e0 1042 v = _os_once_mark_done(og);
e45b4692 1043#endif
438624e0
A
1044 if (likely((os_ulock_value_t)v == self)) return;
1045 _os_once_gate_broadcast(og, (os_ulock_value_t)v, self);
1046}
1047
1048OS_NOINLINE
1049static void
1050_os_once_gate_wait(os_once_gate_t og, void *ctxt, os_function_t func,
1051 os_lock_owner_t self)
1052{
1053 uintptr_t old, new;
1054
1055 for (;;) {
1056 os_atomic_rmw_loop(&og->ogo_once, old, new, relaxed, {
1057 if (old == OS_ONCE_DONE) {
1058 os_atomic_rmw_loop_give_up(return);
1059#if OS_ONCE_USE_QUIESCENT_COUNTER
1060 } else if (OS_ONCE_IS_GEN(old)) {
1061 os_atomic_rmw_loop_give_up({
1062 os_atomic_thread_fence(acquire);
1063 return _os_once_mark_done_if_quiesced(og, old);
1064 });
1065#endif
1066 } else if (old == OS_ONCE_INIT) {
1067 // __os_once_reset was used, try to become the new initializer
1068 new = (uintptr_t)self;
1069 } else {
1070 new = old & ~(uintptr_t)OS_ULOCK_NOWAITERS_BIT;
1071 if (new == old) os_atomic_rmw_loop_give_up(break);
1072 }
1073 });
1074 if (old == OS_ONCE_INIT) {
1075 // see comment in _os_once, pairs with the release barrier
1076 // in __os_once_reset()
1077 os_atomic_thread_fence(acquire);
1078 return _os_once_callout(og, ctxt, func, self);
1079 }
1080 if (unlikely(OS_ULOCK_IS_OWNER((os_lock_owner_t)old, self, 0))) {
1081 return _os_once_gate_recursive_abort(self);
1082 }
1083 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO,
1084 &og->ogo_lock, (os_ulock_value_t)new, 0);
1085 if (unlikely(ret < 0)) {
1086 switch (-ret) {
1087 case EINTR:
1088 case EFAULT:
1089 continue;
1090 case EOWNERDEAD:
1091 _os_once_gate_corruption_abort((os_lock_owner_t)old);
1092 break;
1093 default:
1094 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
1095 }
1096 }
1097 }
ada7c492
A
1098}
1099
1100// Atomically resets the once value to zero and then signals all
438624e0 1101// pending waiters to return from their __ulock_wait()
ada7c492
A
1102void
1103__os_once_reset(os_once_t *val)
1104{
1105 os_once_gate_t og = (os_once_gate_t)val;
1106 os_lock_owner_t self = _os_lock_owner_get_self();
438624e0
A
1107 uintptr_t v;
1108
1109 v = os_atomic_xchg(&og->ogo_once, OS_ONCE_INIT, release);
1110 if (likely((os_ulock_value_t)v == self)) return;
1111 return _os_once_gate_broadcast(og, (os_ulock_value_t)v, self);
ada7c492
A
1112}
1113
1114void
1115_os_once(os_once_t *val, void *ctxt, os_function_t func)
1116{
1117 os_once_gate_t og = (os_once_gate_t)val;
438624e0
A
1118 os_lock_owner_t self;
1119 uintptr_t v;
1120
1121#if OS_ONCE_USE_QUIESCENT_COUNTER
1122 v = os_atomic_load(&og->ogo_once, acquire);
1123 if (likely(OS_ONCE_IS_GEN(v))) {
1124 return _os_once_mark_done_if_quiesced(og, v);
1125 }
1126#endif
1127
1128 self = _os_lock_owner_get_self();
1129 v = (uintptr_t)self;
ada7c492 1130
438624e0
A
1131 // The acquire barrier pairs with the release in __os_once_reset()
1132 // for cases when a previous initializer failed.
1133 if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, acquire))) {
1134 return _os_once_callout(og, ctxt, func, self);
ada7c492 1135 }
438624e0 1136 return _os_once_gate_wait(og, ctxt, func, self);
ada7c492
A
1137}
1138
ada7c492
A
1139
1140#pragma mark -
1141#pragma mark os_lock_eliding_t
1142
1143#if !TARGET_OS_IPHONE
1144
1145#define _os_lock_eliding_t _os_lock_spin_t
1146#define _os_lock_eliding_lock _os_lock_spin_lock
1147#define _os_lock_eliding_trylock _os_lock_spin_trylock
1148#define _os_lock_eliding_unlock _os_lock_spin_unlock
1149OS_LOCK_METHODS_DECL(eliding);
1150OS_LOCK_TYPE_INSTANCE(eliding);
1151
1152#pragma mark -
1153#pragma mark os_lock_transactional_t
1154
1155OS_LOCK_STRUCT_DECL_INTERNAL(transactional,
1156 uintptr_t volatile osl_lock;
1157);
1158
1159#define _os_lock_transactional_t _os_lock_eliding_t
1160#define _os_lock_transactional_lock _os_lock_eliding_lock
1161#define _os_lock_transactional_trylock _os_lock_eliding_trylock
1162#define _os_lock_transactional_unlock _os_lock_eliding_unlock
1163OS_LOCK_METHODS_DECL(transactional);
1164OS_LOCK_TYPE_INSTANCE(transactional);
1165
1166#endif // !TARGET_OS_IPHONE