]> git.saurik.com Git - apple/libplatform.git/blob - src/os/lock.c
libplatform-161.50.1.tar.gz
[apple/libplatform.git] / src / os / lock.c
1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "lock_internal.h"
22 #include "os/internal.h"
23 #include "resolver.h"
24 #include "libkern/OSAtomic.h"
25 #include "os/lock.h"
26 #include "os/lock_private.h"
27 #include "os/once_private.h"
28
29 #include <mach/mach_init.h>
30 #include <mach/mach_traps.h>
31 #include <mach/thread_switch.h>
32 #include <mach/mach_time.h>
33 #include <os/tsd.h>
34
35 #pragma mark -
36 #pragma mark _os_lock_base_t
37
38 OS_NOINLINE OS_NORETURN OS_COLD
39 void _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value);
40
41
42 OS_LOCK_STRUCT_DECL_INTERNAL(base);
43 OS_USED static OS_LOCK_TYPE_STRUCT_DECL(base);
44
45 void
46 os_lock_lock(os_lock_t l)
47 {
48 return l._osl_base->osl_type->osl_lock(l);
49 }
50
51 bool
52 os_lock_trylock(os_lock_t l)
53 {
54 return l._osl_base->osl_type->osl_trylock(l);
55 }
56
57 void
58 os_lock_unlock(os_lock_t l)
59 {
60 return l._osl_base->osl_type->osl_unlock(l);
61 }
62
63 OS_NOINLINE OS_NORETURN OS_COLD
64 void
65 _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value)
66 {
67 __LIBPLATFORM_CLIENT_CRASH__(lock_value, "os_lock is corrupt");
68 }
69
70
71 #pragma mark -
72 #pragma mark OSSpinLock
73
74 OS_NOEXPORT OS_NOINLINE void _OSSpinLockLockSlow(volatile OSSpinLock *l);
75
76 OS_ATOMIC_EXPORT void OSSpinLockLock(volatile OSSpinLock *l);
77 OS_ATOMIC_EXPORT bool OSSpinLockTry(volatile OSSpinLock *l);
78 OS_ATOMIC_EXPORT int spin_lock_try(volatile OSSpinLock *l);
79 OS_ATOMIC_EXPORT void OSSpinLockUnlock(volatile OSSpinLock *l);
80
81 static const OSSpinLock _OSSpinLockLocked = TARGET_OS_EMBEDDED ? 1 : -1;
82
83
84 #if OS_ATOMIC_UP
85 // Don't spin on UP
86 #else
87 #define OS_LOCK_SPIN_SPIN_TRIES 1000
88 #define OS_LOCK_SPIN_PAUSE() os_hardware_pause()
89 #endif
90
91 OS_ALWAYS_INLINE
92 static uint64_t
93 _os_lock_yield_deadline(mach_msg_timeout_t timeout)
94 {
95 uint64_t abstime = timeout * NSEC_PER_MSEC;
96 #if !(defined(__i386__) || defined(__x86_64__))
97 mach_timebase_info_data_t tbi;
98 kern_return_t kr = mach_timebase_info(&tbi);
99 if (kr) return UINT64_MAX;
100 abstime *= tbi.denom;
101 abstime /= tbi.numer;
102 #endif
103 return mach_absolute_time() + abstime;
104 }
105
106 OS_ALWAYS_INLINE
107 static bool
108 _os_lock_yield_until(uint64_t deadline)
109 {
110 return mach_absolute_time() < deadline;
111 }
112
113 OS_NOINLINE
114 static void
115 _OSSpinLockLockYield(volatile OSSpinLock *l)
116 {
117 int option = SWITCH_OPTION_DEPRESS;
118 mach_msg_timeout_t timeout = 1;
119 uint64_t deadline = _os_lock_yield_deadline(timeout);
120 OSSpinLock lock;
121 while (unlikely(lock = *l)) {
122 _yield:
123 if (unlikely(lock != _OSSpinLockLocked)) {
124 _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
125 }
126 thread_switch(MACH_PORT_NULL, option, timeout);
127 if (option == SWITCH_OPTION_WAIT) {
128 timeout++;
129 } else if (!_os_lock_yield_until(deadline)) {
130 option = SWITCH_OPTION_WAIT;
131 }
132 }
133 bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
134 if (likely(r)) return;
135 goto _yield;
136 }
137
138 #if OS_ATOMIC_UP
139 void
140 _OSSpinLockLockSlow(volatile OSSpinLock *l)
141 {
142 return _OSSpinLockLockYield(l); // Don't spin on UP
143 }
144 #else // !OS_ATOMIC_UP
145 void
146 _OSSpinLockLockSlow(volatile OSSpinLock *l)
147 {
148 uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES;
149 OSSpinLock lock;
150 while (unlikely(lock = *l)) {
151 _spin:
152 if (unlikely(lock != _OSSpinLockLocked)) {
153 return _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
154 }
155 if (unlikely(!tries--)) return _OSSpinLockLockYield(l);
156 OS_LOCK_SPIN_PAUSE();
157 }
158 bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
159 if (likely(r)) return;
160 goto _spin;
161 }
162 #endif // !OS_ATOMIC_UP
163
164
165
166 #if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR
167
168 typedef struct _os_nospin_lock_s *_os_nospin_lock_t;
169
170 OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock);
171 OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
172 OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
173
174 void
175 OSSpinLockLock(volatile OSSpinLock *l)
176 {
177 OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
178 OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
179 return _os_nospin_lock_lock((_os_nospin_lock_t)l);
180 }
181
182 bool
183 OSSpinLockTry(volatile OSSpinLock *l)
184 {
185 return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
186 }
187
188 int
189 spin_lock_try(volatile OSSpinLock *l)
190 {
191 OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
192 return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
193 }
194
195 void
196 OSSpinLockUnlock(volatile OSSpinLock *l)
197 {
198 OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
199 OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
200 return _os_nospin_lock_unlock((_os_nospin_lock_t)l);
201 }
202
203 #undef OS_ATOMIC_ALIAS
204 #define OS_ATOMIC_ALIAS(n, o)
205 static void _OSSpinLockLock(volatile OSSpinLock *l);
206 #undef OSSpinLockLock
207 #define OSSpinLockLock _OSSpinLockLock
208 static bool _OSSpinLockTry(volatile OSSpinLock *l);
209 #undef OSSpinLockTry
210 #define OSSpinLockTry _OSSpinLockTry
211 static __unused int __spin_lock_try(volatile OSSpinLock *l);
212 #undef spin_lock_try
213 #define spin_lock_try __spin_lock_try
214 static void _OSSpinLockUnlock(volatile OSSpinLock *l);
215 #undef OSSpinLockUnlock
216 #define OSSpinLockUnlock _OSSpinLockUnlock
217
218 #endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK
219
220 void
221 OSSpinLockLock(volatile OSSpinLock *l)
222 {
223 OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
224 OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
225 bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
226 if (likely(r)) return;
227 return _OSSpinLockLockSlow(l);
228 }
229
230 bool
231 OSSpinLockTry(volatile OSSpinLock *l)
232 {
233 bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
234 return r;
235 }
236
237 int
238 spin_lock_try(volatile OSSpinLock *l) // <rdar://problem/13316060>
239 {
240 OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
241 return OSSpinLockTry(l);
242 }
243
244 void
245 OSSpinLockUnlock(volatile OSSpinLock *l)
246 {
247 OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
248 OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
249 os_atomic_store(l, 0, release);
250 }
251
252
253 #pragma mark -
254 #pragma mark os_lock_spin_t
255
256 OS_LOCK_STRUCT_DECL_INTERNAL(spin,
257 OSSpinLock volatile osl_spinlock;
258 );
259 OS_LOCK_METHODS_DECL(spin);
260 OS_LOCK_TYPE_INSTANCE(spin);
261
262 void
263 _os_lock_spin_lock(_os_lock_spin_t l)
264 {
265 return OSSpinLockLock(&l->osl_spinlock);
266 }
267
268 bool
269 _os_lock_spin_trylock(_os_lock_spin_t l)
270 {
271 return OSSpinLockTry(&l->osl_spinlock);
272 }
273
274 void
275 _os_lock_spin_unlock(_os_lock_spin_t l)
276 {
277 return OSSpinLockUnlock(&l->osl_spinlock);
278 }
279
280
281 #pragma mark -
282 #pragma mark os_lock_owner_t
283
284 #ifndef __TSD_MACH_THREAD_SELF
285 #define __TSD_MACH_THREAD_SELF 3
286 #endif
287
288 typedef mach_port_name_t os_lock_owner_t;
289 #define OS_LOCK_NO_OWNER MACH_PORT_NULL
290
291
292 OS_ALWAYS_INLINE
293 static inline os_lock_owner_t
294 _os_lock_owner_get_self(void)
295 {
296 os_lock_owner_t self;
297 self = (os_lock_owner_t)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF);
298 return self;
299 }
300
301
302 OS_NOINLINE OS_NORETURN OS_COLD
303 static void
304 _os_lock_recursive_abort(os_lock_owner_t owner)
305 {
306 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
307 "os_lock");
308 }
309
310
311 #pragma mark -
312 #pragma mark os_lock_handoff_t
313
314 OS_LOCK_STRUCT_DECL_INTERNAL(handoff,
315 os_lock_owner_t volatile osl_owner;
316 );
317 OS_LOCK_METHODS_DECL(handoff);
318 OS_LOCK_TYPE_INSTANCE(handoff);
319
320 #define OS_LOCK_HANDOFF_YIELD_TRIES 100
321
322 OS_NOINLINE
323 static void
324 _os_lock_handoff_lock_slow(_os_lock_handoff_t l)
325 {
326 int option = SWITCH_OPTION_OSLOCK_DEPRESS;
327 mach_msg_timeout_t timeout = 1;
328 uint32_t tries = OS_LOCK_HANDOFF_YIELD_TRIES;
329 os_lock_owner_t self = _os_lock_owner_get_self(), owner;
330 while (unlikely(owner = l->osl_owner)) {
331 _handoff:
332 if (unlikely(owner == self)) return _os_lock_recursive_abort(self);
333 // Yield until tries first hits zero, then permanently switch to wait
334 if (unlikely(!tries--)) option = SWITCH_OPTION_OSLOCK_WAIT;
335 thread_switch(owner, option, timeout);
336 // Redrive the handoff every 1ms until switching to wait
337 if (option == SWITCH_OPTION_OSLOCK_WAIT) timeout++;
338 }
339 bool r = os_atomic_cmpxchgv2o(l, osl_owner, MACH_PORT_NULL, self, &owner,
340 acquire);
341 if (likely(r)) return;
342 goto _handoff;
343 }
344
345 void
346 _os_lock_handoff_lock(_os_lock_handoff_t l)
347 {
348 os_lock_owner_t self = _os_lock_owner_get_self();
349 bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
350 if (likely(r)) return;
351 return _os_lock_handoff_lock_slow(l);
352 }
353
354 bool
355 _os_lock_handoff_trylock(_os_lock_handoff_t l)
356 {
357 os_lock_owner_t self = _os_lock_owner_get_self();
358 bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
359 return r;
360 }
361
362 void
363 _os_lock_handoff_unlock(_os_lock_handoff_t l)
364 {
365 os_atomic_store2o(l, osl_owner, MACH_PORT_NULL, release);
366 }
367
368
369 #pragma mark -
370 #pragma mark os_ulock_value_t
371
372 #include <sys/errno.h>
373 #include <sys/ulock.h>
374
375 typedef os_lock_owner_t os_ulock_value_t;
376
377 // This assumes that all thread mach port values always have the low bit set!
378 // Clearing this bit is used to communicate the existence of waiters to unlock.
379 #define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u)
380 #define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT)
381
382 #define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD
383 #define OS_ULOCK_IS_OWNER(value, self, allow_anonymous_owner) ({ \
384 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner == (self)) && \
385 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
386 #define OS_ULOCK_IS_NOT_OWNER(value, self, allow_anonymous_owner) ({ \
387 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner != (self)) && \
388 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
389
390 #pragma mark -
391 #pragma mark os_unfair_lock
392
393 typedef struct _os_unfair_lock_s {
394 os_ulock_value_t oul_value;
395 } *_os_unfair_lock_t;
396
397 _Static_assert(sizeof(struct os_unfair_lock_s) ==
398 sizeof(struct _os_unfair_lock_s), "os_unfair_lock size mismatch");
399
400 OS_ATOMIC_EXPORT void os_unfair_lock_lock(os_unfair_lock_t lock);
401 OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
402 os_unfair_lock_options_t options);
403 OS_ATOMIC_EXPORT bool os_unfair_lock_trylock(os_unfair_lock_t lock);
404 OS_ATOMIC_EXPORT void os_unfair_lock_unlock(os_unfair_lock_t lock);
405
406 OS_ATOMIC_EXPORT void os_unfair_lock_lock_no_tsd_4libpthread(
407 os_unfair_lock_t lock);
408 OS_ATOMIC_EXPORT void os_unfair_lock_unlock_no_tsd_4libpthread(
409 os_unfair_lock_t lock);
410 OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options_4Libc(
411 os_unfair_lock_t lock, os_unfair_lock_options_t options);
412 OS_ATOMIC_EXPORT void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock);
413
414 OS_NOINLINE OS_NORETURN OS_COLD
415 void _os_unfair_lock_recursive_abort(os_lock_owner_t owner);
416 OS_NOINLINE OS_NORETURN OS_COLD
417 void _os_unfair_lock_unowned_abort(os_lock_owner_t owner);
418 OS_NOINLINE OS_NORETURN OS_COLD
419 void _os_unfair_lock_corruption_abort(os_ulock_value_t current);
420
421 _Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION ==
422 ULF_WAIT_WORKQ_DATA_CONTENTION,
423 "check value for OS_UNFAIR_LOCK_OPTIONS_MASK");
424 #define OS_UNFAIR_LOCK_OPTIONS_MASK \
425 (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION)
426 #define OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER 0x01000000u
427
428
429 OS_NOINLINE OS_NORETURN OS_COLD
430 void
431 _os_unfair_lock_recursive_abort(os_lock_owner_t owner)
432 {
433 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
434 "os_unfair_lock");
435 }
436
437 OS_NOINLINE OS_NORETURN OS_COLD
438 void
439 _os_unfair_lock_unowned_abort(os_lock_owner_t owner)
440 {
441 __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_unfair_lock not "
442 "owned by current thread");
443 }
444
445 OS_NOINLINE OS_NORETURN OS_COLD
446 void
447 _os_unfair_lock_corruption_abort(os_ulock_value_t current)
448 {
449 __LIBPLATFORM_CLIENT_CRASH__(current, "os_unfair_lock is corrupt");
450 }
451
452
453 OS_NOINLINE
454 static void
455 _os_unfair_lock_lock_slow(_os_unfair_lock_t l, os_lock_owner_t self,
456 os_unfair_lock_options_t options)
457 {
458 os_unfair_lock_options_t allow_anonymous_owner =
459 options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
460 options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
461 if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) {
462 __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options");
463 }
464 os_ulock_value_t current, new, waiters_mask = 0;
465 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
466 OS_LOCK_NO_OWNER)) {
467 _retry:
468 if (unlikely(OS_ULOCK_IS_OWNER(current, self, allow_anonymous_owner))) {
469 return _os_unfair_lock_recursive_abort(self);
470 }
471 new = current & ~OS_ULOCK_NOWAITERS_BIT;
472 if (current != new) {
473 // Clear nowaiters bit in lock value before waiting
474 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
475 relaxed)){
476 continue;
477 }
478 current = new;
479 }
480 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options,
481 l, current, 0);
482 if (unlikely(ret < 0)) {
483 switch (-ret) {
484 case EINTR:
485 case EFAULT:
486 continue;
487 case EOWNERDEAD:
488 _os_unfair_lock_corruption_abort(current);
489 break;
490 default:
491 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
492 }
493 }
494 // If there are more waiters, unset nowaiters bit when acquiring lock
495 waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
496 }
497 new = self & ~waiters_mask;
498 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
499 &current, acquire);
500 if (unlikely(!r)) goto _retry;
501 }
502
503 OS_NOINLINE
504 static void
505 _os_unfair_lock_unlock_slow(_os_unfair_lock_t l, os_ulock_value_t current,
506 os_lock_owner_t self, os_unfair_lock_options_t options)
507 {
508 os_unfair_lock_options_t allow_anonymous_owner =
509 options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
510 options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
511 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, allow_anonymous_owner))) {
512 return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current));
513 }
514 if (current & OS_ULOCK_NOWAITERS_BIT) {
515 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
516 }
517 for (;;) {
518 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0);
519 if (unlikely(ret < 0)) {
520 switch (-ret) {
521 case EINTR:
522 continue;
523 case ENOENT:
524 break;
525 default:
526 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
527 }
528 }
529 break;
530 }
531 }
532
533 void
534 os_unfair_lock_lock(os_unfair_lock_t lock)
535 {
536 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
537 os_lock_owner_t self = _os_lock_owner_get_self();
538 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
539 if (likely(r)) return;
540 return _os_unfair_lock_lock_slow(l, self, OS_UNFAIR_LOCK_NONE);
541 }
542
543 void
544 os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
545 os_unfair_lock_options_t options)
546 {
547 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
548 os_lock_owner_t self = _os_lock_owner_get_self();
549 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
550 if (likely(r)) return;
551 return _os_unfair_lock_lock_slow(l, self, options);
552 }
553
554 bool
555 os_unfair_lock_trylock(os_unfair_lock_t lock)
556 {
557 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
558 os_lock_owner_t self = _os_lock_owner_get_self();
559 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
560 return r;
561 }
562
563 void
564 os_unfair_lock_unlock(os_unfair_lock_t lock)
565 {
566 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
567 os_lock_owner_t self = _os_lock_owner_get_self();
568 os_ulock_value_t current;
569 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
570 if (likely(current == self)) return;
571 return _os_unfair_lock_unlock_slow(l, current, self, 0);
572 }
573
574 void
575 os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock)
576 {
577 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
578 os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
579 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
580 if (likely(r)) return;
581 return _os_unfair_lock_lock_slow(l, self,
582 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION|
583 OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER);
584 }
585
586 void
587 os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock)
588 {
589 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
590 os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
591 os_ulock_value_t current;
592 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
593 if (likely(current == self)) return;
594 return _os_unfair_lock_unlock_slow(l, current, self,
595 OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER);
596 }
597
598
599 void
600 os_unfair_lock_assert_owner(os_unfair_lock_t lock)
601 {
602 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
603 os_lock_owner_t self = _os_lock_owner_get_self();
604 os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
605 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) {
606 __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
607 "Lock unexpectedly not owned by current thread");
608 }
609 }
610
611 void
612 os_unfair_lock_assert_not_owner(os_unfair_lock_t lock)
613 {
614 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
615 os_lock_owner_t self = _os_lock_owner_get_self();
616 os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
617 if (unlikely(OS_ULOCK_IS_OWNER(current, self, 0))) {
618 __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
619 "Lock unexpectedly owned by current thread");
620 }
621 }
622
623
624 #pragma mark -
625 #pragma mark _os_lock_unfair_t
626
627 OS_LOCK_STRUCT_DECL_INTERNAL(unfair,
628 os_unfair_lock osl_unfair_lock;
629 );
630 OS_LOCK_METHODS_DECL(unfair);
631 OS_LOCK_TYPE_INSTANCE(unfair);
632
633 void
634 _os_lock_unfair_lock(_os_lock_unfair_t l)
635 {
636 return os_unfair_lock_lock(&l->osl_unfair_lock);
637 }
638
639 bool
640 _os_lock_unfair_trylock(_os_lock_unfair_t l)
641 {
642 return os_unfair_lock_trylock(&l->osl_unfair_lock);
643 }
644
645 void
646 _os_lock_unfair_unlock(_os_lock_unfair_t l)
647 {
648 return os_unfair_lock_unlock(&l->osl_unfair_lock);
649 }
650
651
652 #pragma mark -
653 #pragma mark _os_nospin_lock
654
655 typedef struct _os_nospin_lock_s {
656 os_ulock_value_t oul_value;
657 } _os_nospin_lock, *_os_nospin_lock_t;
658
659 _Static_assert(sizeof(OSSpinLock) ==
660 sizeof(struct _os_nospin_lock_s), "os_nospin_lock size mismatch");
661
662 OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock);
663 OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
664 OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
665
666
667 OS_NOINLINE
668 static void
669 _os_nospin_lock_lock_slow(_os_nospin_lock_t l)
670 {
671 os_lock_owner_t self = _os_lock_owner_get_self();
672 os_ulock_value_t current, new, waiters_mask = 0;
673 uint32_t timeout = 1;
674 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
675 OS_LOCK_NO_OWNER)) {
676 _retry:
677 new = current & ~OS_ULOCK_NOWAITERS_BIT;
678 // For safer compatibility with OSSpinLock where _OSSpinLockLocked may
679 // be 1, check that new didn't become 0 (unlocked) by clearing this bit
680 if (current != new && new) {
681 // Clear nowaiters bit in lock value before waiting
682 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
683 relaxed)){
684 continue;
685 }
686 current = new;
687 }
688 int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, current,
689 timeout * 1000);
690 if (unlikely(ret < 0)) {
691 switch (-ret) {
692 case ETIMEDOUT:
693 timeout++;
694 continue;
695 case EINTR:
696 case EFAULT:
697 continue;
698 default:
699 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
700 }
701 }
702 // If there are more waiters, unset nowaiters bit when acquiring lock
703 waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
704 }
705 new = self & ~waiters_mask;
706 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
707 &current, acquire);
708 if (unlikely(!r)) goto _retry;
709 }
710
711 OS_NOINLINE
712 static void
713 _os_nospin_lock_unlock_slow(_os_nospin_lock_t l, os_ulock_value_t current)
714 {
715 os_lock_owner_t self = _os_lock_owner_get_self();
716 if (unlikely(OS_ULOCK_OWNER(current) != self)) {
717 return; // no unowned_abort for drop-in compatibility with OSSpinLock
718 }
719 if (current & OS_ULOCK_NOWAITERS_BIT) {
720 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
721 }
722 for (;;) {
723 int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, 0);
724 if (unlikely(ret < 0)) {
725 switch (-ret) {
726 case EINTR:
727 continue;
728 case ENOENT:
729 break;
730 default:
731 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
732 }
733 }
734 break;
735 }
736 }
737
738 void
739 _os_nospin_lock_lock(_os_nospin_lock_t l)
740 {
741 os_lock_owner_t self = _os_lock_owner_get_self();
742 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
743 if (likely(r)) return;
744 return _os_nospin_lock_lock_slow(l);
745 }
746
747 bool
748 _os_nospin_lock_trylock(_os_nospin_lock_t l)
749 {
750 os_lock_owner_t self = _os_lock_owner_get_self();
751 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
752 return r;
753 }
754
755 void
756 _os_nospin_lock_unlock(_os_nospin_lock_t l)
757 {
758 os_lock_owner_t self = _os_lock_owner_get_self();
759 os_ulock_value_t current;
760 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
761 if (likely(current == self)) return;
762 return _os_nospin_lock_unlock_slow(l, current);
763 }
764
765
766 #pragma mark -
767 #pragma mark _os_lock_nospin_t
768
769 OS_LOCK_STRUCT_DECL_INTERNAL(nospin,
770 _os_nospin_lock osl_nospin_lock;
771 );
772 OS_LOCK_METHODS_DECL(nospin);
773 OS_LOCK_TYPE_INSTANCE(nospin);
774
775 void
776 _os_lock_nospin_lock(_os_lock_nospin_t l)
777 {
778 return _os_nospin_lock_lock(&l->osl_nospin_lock);
779 }
780
781 bool
782 _os_lock_nospin_trylock(_os_lock_nospin_t l)
783 {
784 return _os_nospin_lock_trylock(&l->osl_nospin_lock);
785 }
786
787 void
788 _os_lock_nospin_unlock(_os_lock_nospin_t l)
789 {
790 return _os_nospin_lock_unlock(&l->osl_nospin_lock);
791 }
792
793
794 #pragma mark -
795 #pragma mark os_once_t
796
797 typedef struct os_once_gate_s {
798 union {
799 os_ulock_value_t ogo_lock;
800 os_once_t ogo_once;
801 };
802 } os_once_gate_s, *os_once_gate_t;
803
804 #define OS_ONCE_INIT ((os_once_t)0l)
805 #define OS_ONCE_DONE (~(os_once_t)0l)
806
807 OS_ATOMIC_EXPORT void _os_once(os_once_t *val, void *ctxt, os_function_t func);
808 OS_ATOMIC_EXPORT void __os_once_reset(os_once_t *val);
809
810 OS_NOINLINE OS_NORETURN OS_COLD
811 void _os_once_gate_recursive_abort(os_lock_owner_t owner);
812 OS_NOINLINE OS_NORETURN OS_COLD
813 void _os_once_gate_unowned_abort(os_lock_owner_t owner);
814 OS_NOINLINE OS_NORETURN OS_COLD
815 void _os_once_gate_corruption_abort(os_ulock_value_t current);
816
817
818 OS_NOINLINE OS_NORETURN OS_COLD
819 void
820 _os_once_gate_recursive_abort(os_lock_owner_t owner)
821 {
822 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
823 "os_once_t");
824 }
825
826 OS_NOINLINE OS_NORETURN OS_COLD
827 void
828 _os_once_gate_unowned_abort(os_lock_owner_t owner)
829 {
830 __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_once_t not "
831 "owned by current thread");
832 }
833
834 OS_NOINLINE OS_NORETURN OS_COLD
835 void
836 _os_once_gate_corruption_abort(os_ulock_value_t current)
837 {
838 __LIBPLATFORM_CLIENT_CRASH__(current, "os_once_t is corrupt");
839 }
840
841
842 OS_NOINLINE
843 static void
844 _os_once_gate_wait_slow(os_ulock_value_t *gate, os_lock_owner_t self)
845 {
846 os_ulock_value_t tid_old, tid_new;
847
848 for (;;) {
849 os_atomic_rmw_loop(gate, tid_old, tid_new, relaxed, {
850 switch (tid_old) {
851 case (os_ulock_value_t)OS_ONCE_INIT: // raced with __os_once_reset()
852 case (os_ulock_value_t)OS_ONCE_DONE: // raced with _os_once()
853 os_atomic_rmw_loop_give_up(return);
854 }
855 tid_new = tid_old & ~OS_ULOCK_NOWAITERS_BIT;
856 if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
857 });
858 if (unlikely(OS_ULOCK_IS_OWNER(tid_old, self, 0))) {
859 return _os_once_gate_recursive_abort(self);
860 }
861 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO,
862 gate, tid_new, 0);
863 if (unlikely(ret < 0)) {
864 switch (-ret) {
865 case EINTR:
866 case EFAULT:
867 continue;
868 case EOWNERDEAD:
869 _os_once_gate_corruption_abort(tid_old);
870 break;
871 default:
872 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
873 }
874 }
875 }
876 }
877
878 OS_NOINLINE
879 static void
880 _os_once_gate_broadcast_slow(os_ulock_value_t *gate, os_ulock_value_t current,
881 os_lock_owner_t self)
882 {
883 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) {
884 return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current));
885 }
886 if (current & OS_ULOCK_NOWAITERS_BIT) {
887 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
888 }
889 for (;;) {
890 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | ULF_WAKE_ALL,
891 gate, 0);
892 if (unlikely(ret < 0)) {
893 switch (-ret) {
894 case EINTR:
895 continue;
896 case ENOENT:
897 break;
898 default:
899 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
900 }
901 }
902 break;
903 }
904 }
905
906 OS_ALWAYS_INLINE
907 static void
908 _os_once_gate_set_value_and_broadcast(os_once_gate_t og, os_lock_owner_t self,
909 os_once_t value)
910 {
911 os_ulock_value_t current;
912 #if defined(__i386__) || defined(__x86_64__)
913 // On Intel, any load is a load-acquire, so we don't need to be fancy
914 current = (os_ulock_value_t)os_atomic_xchg(&og->ogo_once, value, release);
915 #else
916 # error os_once algorithm not available for this architecture
917 #endif
918 if (likely(current == self)) return;
919 _os_once_gate_broadcast_slow(&og->ogo_lock, current, self);
920 }
921
922 // Atomically resets the once value to zero and then signals all
923 // pending waiters to return from their _os_once_gate_wait_slow()
924 void
925 __os_once_reset(os_once_t *val)
926 {
927 os_once_gate_t og = (os_once_gate_t)val;
928 os_lock_owner_t self = _os_lock_owner_get_self();
929 _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_INIT);
930 }
931
932 void
933 _os_once(os_once_t *val, void *ctxt, os_function_t func)
934 {
935 os_once_gate_t og = (os_once_gate_t)val;
936 os_lock_owner_t self = _os_lock_owner_get_self();
937 os_once_t v = (os_once_t)self;
938
939 if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, relaxed))) {
940 func(ctxt);
941 _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_DONE);
942 } else {
943 _os_once_gate_wait_slow(&og->ogo_lock, self);
944 }
945 }
946
947
948 #pragma mark -
949 #pragma mark os_lock_eliding_t
950
951 #if !TARGET_OS_IPHONE
952
953 #define _os_lock_eliding_t _os_lock_spin_t
954 #define _os_lock_eliding_lock _os_lock_spin_lock
955 #define _os_lock_eliding_trylock _os_lock_spin_trylock
956 #define _os_lock_eliding_unlock _os_lock_spin_unlock
957 OS_LOCK_METHODS_DECL(eliding);
958 OS_LOCK_TYPE_INSTANCE(eliding);
959
960 #pragma mark -
961 #pragma mark os_lock_transactional_t
962
963 OS_LOCK_STRUCT_DECL_INTERNAL(transactional,
964 uintptr_t volatile osl_lock;
965 );
966
967 #define _os_lock_transactional_t _os_lock_eliding_t
968 #define _os_lock_transactional_lock _os_lock_eliding_lock
969 #define _os_lock_transactional_trylock _os_lock_eliding_trylock
970 #define _os_lock_transactional_unlock _os_lock_eliding_unlock
971 OS_LOCK_METHODS_DECL(transactional);
972 OS_LOCK_TYPE_INSTANCE(transactional);
973
974 #endif // !TARGET_OS_IPHONE