]> git.saurik.com Git - apple/libplatform.git/blob - src/os/lock.c
3d66adf60da2888380dcae89a07cd4896b4c11b1
[apple/libplatform.git] / src / os / lock.c
1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "lock_internal.h"
22 #include "libkern/OSAtomic.h"
23 #include "os/lock.h"
24 #include "os/lock_private.h"
25 #include "os/once_private.h"
26 #include "resolver.h"
27
28 #include <mach/mach_init.h>
29 #include <mach/mach_traps.h>
30 #include <mach/thread_switch.h>
31 #include <mach/mach_time.h>
32 #include <os/tsd.h>
33
34 #pragma mark -
35 #pragma mark _os_lock_base_t
36
37 #if !OS_VARIANT_ONLY
38
39 OS_LOCK_STRUCT_DECL_INTERNAL(base);
40 OS_USED static OS_LOCK_TYPE_STRUCT_DECL(base);
41
42 void
43 os_lock_lock(os_lock_t l)
44 {
45 return l._osl_base->osl_type->osl_lock(l);
46 }
47
48 bool
49 os_lock_trylock(os_lock_t l)
50 {
51 return l._osl_base->osl_type->osl_trylock(l);
52 }
53
54 void
55 os_lock_unlock(os_lock_t l)
56 {
57 return l._osl_base->osl_type->osl_unlock(l);
58 }
59
60 #endif //!OS_VARIANT_ONLY
61
62 OS_NOINLINE OS_NORETURN OS_COLD
63 static void
64 _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value)
65 {
66 __LIBPLATFORM_CLIENT_CRASH__(lock_value, "os_lock is corrupt");
67 }
68
69 #pragma mark -
70 #pragma mark OSSpinLock
71
72 #ifdef OS_LOCK_VARIANT_SELECTOR
73 void _OSSpinLockLockSlow(volatile OSSpinLock *l);
74 #else
75 OS_NOINLINE OS_USED static void _OSSpinLockLockSlow(volatile OSSpinLock *l);
76 #endif // OS_LOCK_VARIANT_SELECTOR
77
78 OS_ATOMIC_EXPORT void OSSpinLockLock(volatile OSSpinLock *l);
79 OS_ATOMIC_EXPORT bool OSSpinLockTry(volatile OSSpinLock *l);
80 OS_ATOMIC_EXPORT int spin_lock_try(volatile OSSpinLock *l);
81 OS_ATOMIC_EXPORT void OSSpinLockUnlock(volatile OSSpinLock *l);
82
83 #if OS_ATOMIC_UP
84 // Don't spin on UP
85 #elif OS_ATOMIC_WFE
86 #define OS_LOCK_SPIN_SPIN_TRIES 100
87 #define OS_LOCK_SPIN_PAUSE() os_hardware_wfe()
88 #else
89 #define OS_LOCK_SPIN_SPIN_TRIES 1000
90 #define OS_LOCK_SPIN_PAUSE() os_hardware_pause()
91 #endif
92
93 static const OSSpinLock _OSSpinLockLocked = TARGET_OS_EMBEDDED ? 1 : -1;
94
95 OS_ALWAYS_INLINE
96 static uint64_t
97 _os_lock_yield_deadline(mach_msg_timeout_t timeout)
98 {
99 uint64_t abstime = timeout * NSEC_PER_MSEC;
100 #if !(defined(__i386__) || defined(__x86_64__))
101 mach_timebase_info_data_t tbi;
102 kern_return_t kr = mach_timebase_info(&tbi);
103 if (kr) return UINT64_MAX;
104 abstime *= tbi.denom;
105 abstime /= tbi.numer;
106 #endif
107 return mach_absolute_time() + abstime;
108 }
109
110 OS_ALWAYS_INLINE
111 static bool
112 _os_lock_yield_until(uint64_t deadline)
113 {
114 return mach_absolute_time() < deadline;
115 }
116
117 OS_NOINLINE
118 static void
119 _OSSpinLockLockYield(volatile OSSpinLock *l)
120 {
121 int option = SWITCH_OPTION_DEPRESS;
122 mach_msg_timeout_t timeout = 1;
123 uint64_t deadline = _os_lock_yield_deadline(timeout);
124 OSSpinLock lock;
125 while (unlikely(lock = *l)) {
126 _yield:
127 if (unlikely(lock != _OSSpinLockLocked)) {
128 _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
129 }
130 thread_switch(MACH_PORT_NULL, option, timeout);
131 if (option == SWITCH_OPTION_WAIT) {
132 timeout++;
133 } else if (!_os_lock_yield_until(deadline)) {
134 option = SWITCH_OPTION_WAIT;
135 }
136 }
137 bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
138 if (likely(r)) return;
139 goto _yield;
140 }
141
142 #if OS_ATOMIC_UP
143 void
144 _OSSpinLockLockSlow(volatile OSSpinLock *l)
145 {
146 return _OSSpinLockLockYield(l); // Don't spin on UP
147 }
148 #else
149 void
150 _OSSpinLockLockSlow(volatile OSSpinLock *l)
151 {
152 uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES;
153 OSSpinLock lock;
154 while (unlikely(lock = *l)) {
155 _spin:
156 if (unlikely(lock != _OSSpinLockLocked)) {
157 return _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
158 }
159 if (unlikely(!tries--)) return _OSSpinLockLockYield(l);
160 OS_LOCK_SPIN_PAUSE();
161 }
162 bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
163 if (likely(r)) return;
164 goto _spin;
165 }
166 #endif
167
168 #ifdef OS_LOCK_VARIANT_SELECTOR
169 #undef _OSSpinLockLockSlow
170 extern void _OSSpinLockLockSlow(volatile OSSpinLock *l);
171 #endif
172
173 #if !OS_LOCK_VARIANT_ONLY
174
175 #if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR
176
177 typedef struct _os_nospin_lock_s *_os_nospin_lock_t;
178 void _os_nospin_lock_lock(_os_nospin_lock_t lock);
179 bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
180 void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
181
182 void
183 OSSpinLockLock(volatile OSSpinLock *l)
184 {
185 OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
186 OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
187 return _os_nospin_lock_lock((_os_nospin_lock_t)l);
188 }
189
190 bool
191 OSSpinLockTry(volatile OSSpinLock *l)
192 {
193 return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
194 }
195
196 int
197 spin_lock_try(volatile OSSpinLock *l)
198 {
199 OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
200 return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
201 }
202
203 void
204 OSSpinLockUnlock(volatile OSSpinLock *l)
205 {
206 OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
207 OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
208 return _os_nospin_lock_unlock((_os_nospin_lock_t)l);
209 }
210
211 #undef OS_ATOMIC_ALIAS
212 #define OS_ATOMIC_ALIAS(n, o)
213 static void _OSSpinLockLock(volatile OSSpinLock *l);
214 #undef OSSpinLockLock
215 #define OSSpinLockLock _OSSpinLockLock
216 static bool _OSSpinLockTry(volatile OSSpinLock *l);
217 #undef OSSpinLockTry
218 #define OSSpinLockTry _OSSpinLockTry
219 static __unused int __spin_lock_try(volatile OSSpinLock *l);
220 #undef spin_lock_try
221 #define spin_lock_try __spin_lock_try
222 static void _OSSpinLockUnlock(volatile OSSpinLock *l);
223 #undef OSSpinLockUnlock
224 #define OSSpinLockUnlock _OSSpinLockUnlock
225
226 #endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK
227
228 void
229 OSSpinLockLock(volatile OSSpinLock *l)
230 {
231 OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
232 OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
233 bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
234 if (likely(r)) return;
235 return _OSSpinLockLockSlow(l);
236 }
237
238 bool
239 OSSpinLockTry(volatile OSSpinLock *l)
240 {
241 bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
242 return r;
243 }
244
245 int
246 spin_lock_try(volatile OSSpinLock *l) // <rdar://problem/13316060>
247 {
248 OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
249 return OSSpinLockTry(l);
250 }
251
252 void
253 OSSpinLockUnlock(volatile OSSpinLock *l)
254 {
255 OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
256 OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
257 os_atomic_store(l, 0, release);
258 }
259
260 #pragma mark -
261 #pragma mark os_lock_spin_t
262
263 OS_LOCK_STRUCT_DECL_INTERNAL(spin,
264 OSSpinLock volatile osl_spinlock;
265 );
266 #if !OS_VARIANT_ONLY
267 OS_LOCK_METHODS_DECL(spin);
268 OS_LOCK_TYPE_INSTANCE(spin);
269 #endif // !OS_VARIANT_ONLY
270
271 #ifdef OS_VARIANT_SELECTOR
272 #define _os_lock_spin_lock \
273 OS_VARIANT(_os_lock_spin_lock, OS_VARIANT_SELECTOR)
274 #define _os_lock_spin_trylock \
275 OS_VARIANT(_os_lock_spin_trylock, OS_VARIANT_SELECTOR)
276 #define _os_lock_spin_unlock \
277 OS_VARIANT(_os_lock_spin_unlock, OS_VARIANT_SELECTOR)
278 OS_LOCK_METHODS_DECL(spin);
279 #endif // OS_VARIANT_SELECTOR
280
281 void
282 _os_lock_spin_lock(_os_lock_spin_t l)
283 {
284 return OSSpinLockLock(&l->osl_spinlock);
285 }
286
287 bool
288 _os_lock_spin_trylock(_os_lock_spin_t l)
289 {
290 return OSSpinLockTry(&l->osl_spinlock);
291 }
292
293 void
294 _os_lock_spin_unlock(_os_lock_spin_t l)
295 {
296 return OSSpinLockUnlock(&l->osl_spinlock);
297 }
298
299 #pragma mark -
300 #pragma mark os_lock_owner_t
301
302 #ifndef __TSD_MACH_THREAD_SELF
303 #define __TSD_MACH_THREAD_SELF 3
304 #endif
305
306 typedef mach_port_name_t os_lock_owner_t;
307
308 OS_ALWAYS_INLINE
309 static inline os_lock_owner_t
310 _os_lock_owner_get_self(void)
311 {
312 os_lock_owner_t self;
313 self = (os_lock_owner_t)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF);
314 return self;
315 }
316
317 #define OS_LOCK_NO_OWNER MACH_PORT_NULL
318
319 #if !OS_LOCK_VARIANT_ONLY
320
321 OS_NOINLINE OS_NORETURN OS_COLD
322 static void
323 _os_lock_recursive_abort(os_lock_owner_t owner)
324 {
325 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
326 "os_lock");
327 }
328
329 #endif //!OS_LOCK_VARIANT_ONLY
330
331 #pragma mark -
332 #pragma mark os_lock_handoff_t
333
334 OS_LOCK_STRUCT_DECL_INTERNAL(handoff,
335 os_lock_owner_t volatile osl_owner;
336 );
337 #if !OS_VARIANT_ONLY
338 OS_LOCK_METHODS_DECL(handoff);
339 OS_LOCK_TYPE_INSTANCE(handoff);
340 #endif // !OS_VARIANT_ONLY
341
342 #ifdef OS_VARIANT_SELECTOR
343 #define _os_lock_handoff_lock \
344 OS_VARIANT(_os_lock_handoff_lock, OS_VARIANT_SELECTOR)
345 #define _os_lock_handoff_trylock \
346 OS_VARIANT(_os_lock_handoff_trylock, OS_VARIANT_SELECTOR)
347 #define _os_lock_handoff_unlock \
348 OS_VARIANT(_os_lock_handoff_unlock, OS_VARIANT_SELECTOR)
349 OS_LOCK_METHODS_DECL(handoff);
350 #endif // OS_VARIANT_SELECTOR
351
352 #define OS_LOCK_HANDOFF_YIELD_TRIES 100
353
354 OS_NOINLINE
355 static void
356 _os_lock_handoff_lock_slow(_os_lock_handoff_t l)
357 {
358 int option = SWITCH_OPTION_OSLOCK_DEPRESS;
359 mach_msg_timeout_t timeout = 1;
360 uint32_t tries = OS_LOCK_HANDOFF_YIELD_TRIES;
361 os_lock_owner_t self = _os_lock_owner_get_self(), owner;
362 while (unlikely(owner = l->osl_owner)) {
363 _handoff:
364 if (unlikely(owner == self)) return _os_lock_recursive_abort(self);
365 // Yield until tries first hits zero, then permanently switch to wait
366 if (unlikely(!tries--)) option = SWITCH_OPTION_OSLOCK_WAIT;
367 thread_switch(owner, option, timeout);
368 // Redrive the handoff every 1ms until switching to wait
369 if (option == SWITCH_OPTION_OSLOCK_WAIT) timeout++;
370 }
371 bool r = os_atomic_cmpxchgv2o(l, osl_owner, MACH_PORT_NULL, self, &owner,
372 acquire);
373 if (likely(r)) return;
374 goto _handoff;
375 }
376
377 void
378 _os_lock_handoff_lock(_os_lock_handoff_t l)
379 {
380 os_lock_owner_t self = _os_lock_owner_get_self();
381 bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
382 if (likely(r)) return;
383 return _os_lock_handoff_lock_slow(l);
384 }
385
386 bool
387 _os_lock_handoff_trylock(_os_lock_handoff_t l)
388 {
389 os_lock_owner_t self = _os_lock_owner_get_self();
390 bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
391 return r;
392 }
393
394 void
395 _os_lock_handoff_unlock(_os_lock_handoff_t l)
396 {
397 os_atomic_store2o(l, osl_owner, MACH_PORT_NULL, release);
398 }
399
400 #pragma mark -
401 #pragma mark os_ulock_value_t
402
403 #include <sys/errno.h>
404 #include <sys/ulock.h>
405
406 typedef os_lock_owner_t os_ulock_value_t;
407
408 // This assumes that all thread mach port values always have the low bit set!
409 // Clearing this bit is used to communicate the existence of waiters to unlock.
410 #define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u)
411 #define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT)
412
413 #define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD
414 #define OS_ULOCK_IS_OWNER(value, self) ({ \
415 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); \
416 (_owner == (self) && _owner != OS_ULOCK_ANONYMOUS_OWNER); })
417 #define OS_ULOCK_IS_NOT_OWNER(value, self) ({ \
418 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); \
419 (_owner != (self) && _owner != OS_ULOCK_ANONYMOUS_OWNER); })
420
421
422 #pragma mark -
423 #pragma mark os_unfair_lock
424
425 typedef struct _os_unfair_lock_s {
426 os_ulock_value_t oul_value;
427 } *_os_unfair_lock_t;
428
429 _Static_assert(sizeof(struct os_unfair_lock_s) ==
430 sizeof(struct _os_unfair_lock_s), "os_unfair_lock size mismatch");
431
432 OS_ATOMIC_EXPORT void os_unfair_lock_lock(os_unfair_lock_t lock);
433 OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
434 os_unfair_lock_options_t options);
435 OS_ATOMIC_EXPORT bool os_unfair_lock_trylock(os_unfair_lock_t lock);
436 OS_ATOMIC_EXPORT void os_unfair_lock_unlock(os_unfair_lock_t lock);
437
438 OS_ATOMIC_EXPORT void os_unfair_lock_lock_no_tsd_4libpthread(
439 os_unfair_lock_t lock);
440 OS_ATOMIC_EXPORT void os_unfair_lock_unlock_no_tsd_4libpthread(
441 os_unfair_lock_t lock);
442
443 _Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION ==
444 ULF_WAIT_WORKQ_DATA_CONTENTION,
445 "check value for OS_UNFAIR_LOCK_OPTIONS_MASK");
446 #define OS_UNFAIR_LOCK_OPTIONS_MASK \
447 (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION)
448
449 OS_NOINLINE OS_NORETURN OS_COLD
450 static void
451 _os_unfair_lock_recursive_abort(os_lock_owner_t owner)
452 {
453 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
454 "os_unfair_lock");
455 }
456
457 OS_NOINLINE OS_NORETURN OS_COLD
458 static void
459 _os_unfair_lock_unowned_abort(os_lock_owner_t owner)
460 {
461 __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_unfair_lock not "
462 "owned by current thread");
463 }
464
465 OS_NOINLINE OS_NORETURN OS_COLD
466 static void
467 _os_unfair_lock_corruption_abort(os_ulock_value_t current)
468 {
469 __LIBPLATFORM_CLIENT_CRASH__(current, "os_unfair_lock is corrupt");
470 }
471
472 OS_NOINLINE
473 static void
474 _os_unfair_lock_lock_slow(_os_unfair_lock_t l, os_lock_owner_t self,
475 os_unfair_lock_options_t options)
476 {
477 os_ulock_value_t current, new, waiters_mask = 0;
478 if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) {
479 __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options");
480 }
481 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
482 OS_LOCK_NO_OWNER)) {
483 _retry:
484 if (unlikely(OS_ULOCK_IS_OWNER(current, self))) {
485 return _os_unfair_lock_recursive_abort(self);
486 }
487 new = current & ~OS_ULOCK_NOWAITERS_BIT;
488 if (current != new) {
489 // Clear nowaiters bit in lock value before waiting
490 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
491 relaxed)){
492 continue;
493 }
494 current = new;
495 }
496 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options,
497 l, current, 0);
498 if (unlikely(ret < 0)) {
499 switch (-ret) {
500 case EINTR:
501 case EFAULT:
502 continue;
503 case EOWNERDEAD:
504 _os_unfair_lock_corruption_abort(current);
505 break;
506 default:
507 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
508 }
509 }
510 // If there are more waiters, unset nowaiters bit when acquiring lock
511 waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
512 }
513 new = self & ~waiters_mask;
514 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
515 &current, acquire);
516 if (unlikely(!r)) goto _retry;
517 }
518
519 OS_NOINLINE
520 static void
521 _os_unfair_lock_unlock_slow(_os_unfair_lock_t l, os_ulock_value_t current,
522 os_lock_owner_t self)
523 {
524 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self))) {
525 return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current));
526 }
527 if (current & OS_ULOCK_NOWAITERS_BIT) {
528 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
529 }
530 for (;;) {
531 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0);
532 if (unlikely(ret < 0)) {
533 switch (-ret) {
534 case EINTR:
535 continue;
536 case ENOENT:
537 break;
538 default:
539 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
540 }
541 }
542 break;
543 }
544 }
545
546 void
547 os_unfair_lock_lock(os_unfair_lock_t lock)
548 {
549 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
550 os_lock_owner_t self = _os_lock_owner_get_self();
551 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
552 if (likely(r)) return;
553 return _os_unfair_lock_lock_slow(l, self, OS_UNFAIR_LOCK_NONE);
554 }
555
556 void
557 os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
558 os_unfair_lock_options_t options)
559 {
560 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
561 os_lock_owner_t self = _os_lock_owner_get_self();
562 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
563 if (likely(r)) return;
564 return _os_unfair_lock_lock_slow(l, self, options);
565 }
566
567 bool
568 os_unfair_lock_trylock(os_unfair_lock_t lock)
569 {
570 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
571 os_lock_owner_t self = _os_lock_owner_get_self();
572 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
573 return r;
574 }
575
576 void
577 os_unfair_lock_unlock(os_unfair_lock_t lock)
578 {
579 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
580 os_lock_owner_t self = _os_lock_owner_get_self();
581 os_ulock_value_t current;
582 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
583 if (likely(current == self)) return;
584 return _os_unfair_lock_unlock_slow(l, current, self);
585 }
586
587 void
588 os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock)
589 {
590 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
591 os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
592 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
593 if (likely(r)) return;
594 return _os_unfair_lock_lock_slow(l, self,
595 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
596 }
597
598 void
599 os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock)
600 {
601 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
602 os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
603 os_ulock_value_t current;
604 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
605 if (likely(current == self)) return;
606 return _os_unfair_lock_unlock_slow(l, current, self);
607 }
608
609 #pragma mark -
610 #pragma mark _os_lock_unfair_t 4Libc // <rdar://problem/27138264>
611
612 OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options_4Libc(
613 os_unfair_lock_t lock, os_unfair_lock_options_t options);
614 OS_ATOMIC_EXPORT void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock);
615
616 OS_NOINLINE
617 static void
618 _os_unfair_lock_lock_slow_4Libc(_os_unfair_lock_t l, os_lock_owner_t self,
619 os_unfair_lock_options_t options)
620 {
621 os_ulock_value_t current, new, waiters_mask = 0;
622 if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) {
623 __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options");
624 }
625 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
626 OS_LOCK_NO_OWNER)) {
627 _retry:
628 if (unlikely(OS_ULOCK_IS_OWNER(current, self))) {
629 return _os_unfair_lock_recursive_abort(self);
630 }
631 new = current & ~OS_ULOCK_NOWAITERS_BIT;
632 if (current != new) {
633 // Clear nowaiters bit in lock value before waiting
634 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
635 relaxed)){
636 continue;
637 }
638 current = new;
639 }
640 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options,
641 l, current, 0);
642 if (unlikely(ret < 0)) {
643 switch (-ret) {
644 case EINTR:
645 case EFAULT:
646 continue;
647 case EOWNERDEAD:
648 // if we get an `EOWNERDEAD` it could be corruption of the lock
649 // so for the Libc locks, if we can steal the lock, assume
650 // it is corruption and pretend we got the lock with contention
651 new = self & ~OS_ULOCK_NOWAITERS_BIT;
652 if (os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
653 acquire)) {
654 return;
655 }
656 break;
657 default:
658 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
659 }
660 }
661 // If there are more waiters, unset nowaiters bit when acquiring lock
662 waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
663 }
664 new = self & ~waiters_mask;
665 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
666 &current, acquire);
667 if (unlikely(!r)) goto _retry;
668 }
669
670 OS_NOINLINE
671 static void
672 _os_unfair_lock_unlock_slow_4Libc(_os_unfair_lock_t l)
673 {
674 for (;;) {
675 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0);
676 if (unlikely(ret < 0)) {
677 switch (-ret) {
678 case EINTR:
679 continue;
680 case ENOENT:
681 break;
682 default:
683 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
684 }
685 }
686 break;
687 }
688 }
689
690 void
691 os_unfair_lock_lock_with_options_4Libc(os_unfair_lock_t lock,
692 os_unfair_lock_options_t options)
693 {
694 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
695 os_lock_owner_t self = _os_lock_owner_get_self();
696 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
697 if (likely(r)) return;
698 return _os_unfair_lock_lock_slow_4Libc(l, self, options);
699 }
700
701 void
702 os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock)
703 {
704 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
705 os_lock_owner_t self = _os_lock_owner_get_self();
706 os_ulock_value_t current;
707 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
708 if (likely(current == self)) return;
709 return _os_unfair_lock_unlock_slow_4Libc(l);
710 }
711
712 #if !OS_VARIANT_ONLY
713 void
714 os_unfair_lock_assert_owner(os_unfair_lock_t lock)
715 {
716 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
717 os_lock_owner_t self = _os_lock_owner_get_self();
718 os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
719 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self))) {
720 __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
721 "Lock unexpectedly not owned by current thread");
722 }
723 }
724
725 void
726 os_unfair_lock_assert_not_owner(os_unfair_lock_t lock)
727 {
728 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
729 os_lock_owner_t self = _os_lock_owner_get_self();
730 os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
731 if (unlikely(OS_ULOCK_IS_OWNER(current, self))) {
732 __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
733 "Lock unexpectedly owned by current thread");
734 }
735 }
736 #endif
737
738 #pragma mark -
739 #pragma mark _os_lock_unfair_t
740
741 OS_LOCK_STRUCT_DECL_INTERNAL(unfair,
742 os_unfair_lock osl_unfair_lock;
743 );
744 #if !OS_VARIANT_ONLY
745 OS_LOCK_METHODS_DECL(unfair);
746 OS_LOCK_TYPE_INSTANCE(unfair);
747 #endif // !OS_VARIANT_ONLY
748
749 #ifdef OS_VARIANT_SELECTOR
750 #define _os_lock_unfair_lock \
751 OS_VARIANT(_os_lock_unfair_lock, OS_VARIANT_SELECTOR)
752 #define _os_lock_unfair_trylock \
753 OS_VARIANT(_os_lock_unfair_trylock, OS_VARIANT_SELECTOR)
754 #define _os_lock_unfair_unlock \
755 OS_VARIANT(_os_lock_unfair_unlock, OS_VARIANT_SELECTOR)
756 OS_LOCK_METHODS_DECL(unfair);
757 #endif // OS_VARIANT_SELECTOR
758
759 void
760 _os_lock_unfair_lock(_os_lock_unfair_t l)
761 {
762 return os_unfair_lock_lock(&l->osl_unfair_lock);
763 }
764
765 bool
766 _os_lock_unfair_trylock(_os_lock_unfair_t l)
767 {
768 return os_unfair_lock_trylock(&l->osl_unfair_lock);
769 }
770
771 void
772 _os_lock_unfair_unlock(_os_lock_unfair_t l)
773 {
774 return os_unfair_lock_unlock(&l->osl_unfair_lock);
775 }
776
777 #pragma mark -
778 #pragma mark _os_nospin_lock
779
780 typedef struct _os_nospin_lock_s {
781 os_ulock_value_t oul_value;
782 } _os_nospin_lock, *_os_nospin_lock_t;
783
784 _Static_assert(sizeof(OSSpinLock) ==
785 sizeof(struct _os_nospin_lock_s), "os_nospin_lock size mismatch");
786
787 OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock);
788 OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
789 OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
790
791 OS_NOINLINE
792 static void
793 _os_nospin_lock_lock_slow(_os_nospin_lock_t l)
794 {
795 os_lock_owner_t self = _os_lock_owner_get_self();
796 os_ulock_value_t current, new, waiters_mask = 0;
797 uint32_t timeout = 1;
798 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
799 OS_LOCK_NO_OWNER)) {
800 _retry:
801 new = current & ~OS_ULOCK_NOWAITERS_BIT;
802 // For safer compatibility with OSSpinLock where _OSSpinLockLocked may
803 // be 1, check that new didn't become 0 (unlocked) by clearing this bit
804 if (current != new && new) {
805 // Clear nowaiters bit in lock value before waiting
806 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
807 relaxed)){
808 continue;
809 }
810 current = new;
811 }
812 int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, current,
813 timeout * 1000);
814 if (unlikely(ret < 0)) {
815 switch (-ret) {
816 case ETIMEDOUT:
817 timeout++;
818 continue;
819 case EINTR:
820 case EFAULT:
821 continue;
822 default:
823 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
824 }
825 }
826 // If there are more waiters, unset nowaiters bit when acquiring lock
827 waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
828 }
829 new = self & ~waiters_mask;
830 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
831 &current, acquire);
832 if (unlikely(!r)) goto _retry;
833 }
834
835 OS_NOINLINE
836 static void
837 _os_nospin_lock_unlock_slow(_os_nospin_lock_t l, os_ulock_value_t current)
838 {
839 os_lock_owner_t self = _os_lock_owner_get_self();
840 if (unlikely(OS_ULOCK_OWNER(current) != self)) {
841 return; // no unowned_abort for drop-in compatibility with OSSpinLock
842 }
843 if (current & OS_ULOCK_NOWAITERS_BIT) {
844 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
845 }
846 for (;;) {
847 int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, 0);
848 if (unlikely(ret < 0)) {
849 switch (-ret) {
850 case EINTR:
851 continue;
852 case ENOENT:
853 break;
854 default:
855 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
856 }
857 }
858 break;
859 }
860 }
861
862 void
863 _os_nospin_lock_lock(_os_nospin_lock_t l)
864 {
865 os_lock_owner_t self = _os_lock_owner_get_self();
866 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
867 if (likely(r)) return;
868 return _os_nospin_lock_lock_slow(l);
869 }
870
871 bool
872 _os_nospin_lock_trylock(_os_nospin_lock_t l)
873 {
874 os_lock_owner_t self = _os_lock_owner_get_self();
875 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
876 return r;
877 }
878
879 void
880 _os_nospin_lock_unlock(_os_nospin_lock_t l)
881 {
882 os_lock_owner_t self = _os_lock_owner_get_self();
883 os_ulock_value_t current;
884 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
885 if (likely(current == self)) return;
886 return _os_nospin_lock_unlock_slow(l, current);
887 }
888
889 #pragma mark -
890 #pragma mark _os_lock_nospin_t
891
892 OS_LOCK_STRUCT_DECL_INTERNAL(nospin,
893 _os_nospin_lock osl_nospin_lock;
894 );
895 #if !OS_VARIANT_ONLY
896 OS_LOCK_METHODS_DECL(nospin);
897 OS_LOCK_TYPE_INSTANCE(nospin);
898 #endif // !OS_VARIANT_ONLY
899
900 #ifdef OS_VARIANT_SELECTOR
901 #define _os_lock_nospin_lock \
902 OS_VARIANT(_os_lock_nospin_lock, OS_VARIANT_SELECTOR)
903 #define _os_lock_nospin_trylock \
904 OS_VARIANT(_os_lock_nospin_trylock, OS_VARIANT_SELECTOR)
905 #define _os_lock_nospin_unlock \
906 OS_VARIANT(_os_lock_nospin_unlock, OS_VARIANT_SELECTOR)
907 OS_LOCK_METHODS_DECL(nospin);
908 #endif // OS_VARIANT_SELECTOR
909
910 void
911 _os_lock_nospin_lock(_os_lock_nospin_t l)
912 {
913 return _os_nospin_lock_lock(&l->osl_nospin_lock);
914 }
915
916 bool
917 _os_lock_nospin_trylock(_os_lock_nospin_t l)
918 {
919 return _os_nospin_lock_trylock(&l->osl_nospin_lock);
920 }
921
922 void
923 _os_lock_nospin_unlock(_os_lock_nospin_t l)
924 {
925 return _os_nospin_lock_unlock(&l->osl_nospin_lock);
926 }
927
928 #pragma mark -
929 #pragma mark os_once_t
930
931 typedef struct os_once_gate_s {
932 union {
933 os_ulock_value_t ogo_lock;
934 os_once_t ogo_once;
935 };
936 } os_once_gate_s, *os_once_gate_t;
937
938 #define OS_ONCE_INIT ((os_once_t)0l)
939 #define OS_ONCE_DONE (~(os_once_t)0l)
940
941 OS_ATOMIC_EXPORT void _os_once(os_once_t *val, void *ctxt, os_function_t func);
942 OS_ATOMIC_EXPORT void __os_once_reset(os_once_t *val);
943
944 OS_NOINLINE OS_NORETURN OS_COLD
945 static void
946 _os_once_gate_recursive_abort(os_lock_owner_t owner)
947 {
948 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
949 "os_once_t");
950 }
951
952 OS_NOINLINE OS_NORETURN OS_COLD
953 static void
954 _os_once_gate_unowned_abort(os_lock_owner_t owner)
955 {
956 __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_once_t not "
957 "owned by current thread");
958 }
959
960 OS_NOINLINE OS_NORETURN OS_COLD
961 static void
962 _os_once_gate_corruption_abort(os_ulock_value_t current)
963 {
964 __LIBPLATFORM_CLIENT_CRASH__(current, "os_once_t is corrupt");
965 }
966
967 OS_NOINLINE
968 static void
969 _os_once_gate_wait_slow(os_ulock_value_t *gate, os_lock_owner_t self)
970 {
971 os_ulock_value_t tid_old, tid_new;
972
973 for (;;) {
974 os_atomic_rmw_loop(gate, tid_old, tid_new, relaxed, {
975 switch (tid_old) {
976 case (os_ulock_value_t)OS_ONCE_INIT: // raced with __os_once_reset()
977 case (os_ulock_value_t)OS_ONCE_DONE: // raced with _os_once()
978 os_atomic_rmw_loop_give_up(return);
979 }
980 tid_new = tid_old & ~OS_ULOCK_NOWAITERS_BIT;
981 if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
982 });
983 if (unlikely(OS_ULOCK_IS_OWNER(tid_old, self))) {
984 return _os_once_gate_recursive_abort(self);
985 }
986 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO,
987 gate, tid_new, 0);
988 if (unlikely(ret < 0)) {
989 switch (-ret) {
990 case EINTR:
991 case EFAULT:
992 continue;
993 case EOWNERDEAD:
994 _os_once_gate_corruption_abort(tid_old);
995 break;
996 default:
997 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
998 }
999 }
1000 }
1001 }
1002
1003 OS_NOINLINE
1004 static void
1005 _os_once_gate_broadcast_slow(os_ulock_value_t *gate, os_ulock_value_t current,
1006 os_lock_owner_t self)
1007 {
1008 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self))) {
1009 return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current));
1010 }
1011 if (current & OS_ULOCK_NOWAITERS_BIT) {
1012 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
1013 }
1014 for (;;) {
1015 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | ULF_WAKE_ALL,
1016 gate, 0);
1017 if (unlikely(ret < 0)) {
1018 switch (-ret) {
1019 case EINTR:
1020 continue;
1021 case ENOENT:
1022 break;
1023 default:
1024 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
1025 }
1026 }
1027 break;
1028 }
1029 }
1030
1031 OS_ALWAYS_INLINE
1032 static void
1033 _os_once_gate_set_value_and_broadcast(os_once_gate_t og, os_lock_owner_t self,
1034 os_once_t value)
1035 {
1036 // The next barrier must be long and strong.
1037 //
1038 // The scenario: SMP systems with weakly ordered memory models
1039 // and aggressive out-of-order instruction execution.
1040 //
1041 // The problem:
1042 //
1043 // The os_once*() wrapper macro causes the callee's
1044 // instruction stream to look like this (pseudo-RISC):
1045 //
1046 // load r5, pred-addr
1047 // cmpi r5, -1
1048 // beq 1f
1049 // call os_once*()
1050 // 1f:
1051 // load r6, data-addr
1052 //
1053 // May be re-ordered like so:
1054 //
1055 // load r6, data-addr
1056 // load r5, pred-addr
1057 // cmpi r5, -1
1058 // beq 1f
1059 // call os_once*()
1060 // 1f:
1061 //
1062 // Normally, a barrier on the read side is used to workaround
1063 // the weakly ordered memory model. But barriers are expensive
1064 // and we only need to synchronize once! After func(ctxt)
1065 // completes, the predicate will be marked as "done" and the
1066 // branch predictor will correctly skip the call to
1067 // os_once*().
1068 //
1069 // A far faster alternative solution: Defeat the speculative
1070 // read-ahead of peer CPUs.
1071 //
1072 // Modern architectures will throw away speculative results
1073 // once a branch mis-prediction occurs. Therefore, if we can
1074 // ensure that the predicate is not marked as being complete
1075 // until long after the last store by func(ctxt), then we have
1076 // defeated the read-ahead of peer CPUs.
1077 //
1078 // In other words, the last "store" by func(ctxt) must complete
1079 // and then N cycles must elapse before ~0l is stored to *val.
1080 // The value of N is whatever is sufficient to defeat the
1081 // read-ahead mechanism of peer CPUs.
1082 //
1083 // On some CPUs, the most fully synchronizing instruction might
1084 // need to be issued.
1085 os_atomic_maximally_synchronizing_barrier();
1086
1087 // above assumed to contain release barrier
1088 os_ulock_value_t current =
1089 (os_ulock_value_t)os_atomic_xchg(&og->ogo_once, value, relaxed);
1090 if (likely(current == self)) return;
1091 _os_once_gate_broadcast_slow(&og->ogo_lock, current, self);
1092 }
1093
1094 // Atomically resets the once value to zero and then signals all
1095 // pending waiters to return from their _os_once_gate_wait_slow()
1096 void
1097 __os_once_reset(os_once_t *val)
1098 {
1099 os_once_gate_t og = (os_once_gate_t)val;
1100 os_lock_owner_t self = _os_lock_owner_get_self();
1101 _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_INIT);
1102 }
1103
1104 void
1105 _os_once(os_once_t *val, void *ctxt, os_function_t func)
1106 {
1107 os_once_gate_t og = (os_once_gate_t)val;
1108 os_lock_owner_t self = _os_lock_owner_get_self();
1109 os_once_t v = (os_once_t)self;
1110
1111 if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, relaxed))) {
1112 func(ctxt);
1113 _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_DONE);
1114 } else {
1115 _os_once_gate_wait_slow(&og->ogo_lock, self);
1116 }
1117 }
1118
1119 #if !OS_VARIANT_ONLY
1120
1121 #pragma mark -
1122 #pragma mark os_lock_eliding_t
1123
1124 #if !TARGET_OS_IPHONE
1125
1126 #define _os_lock_eliding_t _os_lock_spin_t
1127 #define _os_lock_eliding_lock _os_lock_spin_lock
1128 #define _os_lock_eliding_trylock _os_lock_spin_trylock
1129 #define _os_lock_eliding_unlock _os_lock_spin_unlock
1130 OS_LOCK_METHODS_DECL(eliding);
1131 OS_LOCK_TYPE_INSTANCE(eliding);
1132
1133 #pragma mark -
1134 #pragma mark os_lock_transactional_t
1135
1136 OS_LOCK_STRUCT_DECL_INTERNAL(transactional,
1137 uintptr_t volatile osl_lock;
1138 );
1139
1140 #define _os_lock_transactional_t _os_lock_eliding_t
1141 #define _os_lock_transactional_lock _os_lock_eliding_lock
1142 #define _os_lock_transactional_trylock _os_lock_eliding_trylock
1143 #define _os_lock_transactional_unlock _os_lock_eliding_unlock
1144 OS_LOCK_METHODS_DECL(transactional);
1145 OS_LOCK_TYPE_INSTANCE(transactional);
1146
1147 #endif // !TARGET_OS_IPHONE
1148 #endif // !OS_VARIANT_ONLY
1149 #endif // !OS_LOCK_VARIANT_ONLY