]> git.saurik.com Git - apple/libplatform.git/blob - src/os/lock.c
libplatform-220.tar.gz
[apple/libplatform.git] / src / os / lock.c
1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #define OS_UNFAIR_LOCK_INLINE 1
22 #include "lock_internal.h"
23 #include "os/internal.h"
24 #include "resolver.h"
25 #include "libkern/OSAtomic.h"
26 #include "os/lock.h"
27 #include "os/lock_private.h"
28 #include "os/once_private.h"
29
30 #include <mach/mach_init.h>
31 #include <mach/mach_traps.h>
32 #include <mach/thread_switch.h>
33 #include <mach/mach_time.h>
34 #include <os/tsd.h>
35
36 #pragma mark -
37 #pragma mark _os_lock_base_t
38
39 OS_NOINLINE OS_NORETURN OS_COLD
40 void _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value);
41
42
43 OS_LOCK_STRUCT_DECL_INTERNAL(base);
44 OS_USED static OS_LOCK_TYPE_STRUCT_DECL(base);
45
46 void
47 os_lock_lock(os_lock_t l)
48 {
49 return l._osl_base->osl_type->osl_lock(l);
50 }
51
52 bool
53 os_lock_trylock(os_lock_t l)
54 {
55 return l._osl_base->osl_type->osl_trylock(l);
56 }
57
58 void
59 os_lock_unlock(os_lock_t l)
60 {
61 return l._osl_base->osl_type->osl_unlock(l);
62 }
63
64 OS_NOINLINE OS_NORETURN OS_COLD
65 void
66 _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value)
67 {
68 __LIBPLATFORM_CLIENT_CRASH__(lock_value, "os_lock is corrupt");
69 }
70
71
72 #pragma mark -
73 #pragma mark OSSpinLock
74
75 OS_NOEXPORT OS_NOINLINE void _OSSpinLockLockSlow(volatile OSSpinLock *l);
76
77 OS_ATOMIC_EXPORT void OSSpinLockLock(volatile OSSpinLock *l);
78 OS_ATOMIC_EXPORT bool OSSpinLockTry(volatile OSSpinLock *l);
79 OS_ATOMIC_EXPORT int spin_lock_try(volatile OSSpinLock *l);
80 OS_ATOMIC_EXPORT void OSSpinLockUnlock(volatile OSSpinLock *l);
81
82 #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
83 static const OSSpinLock _OSSpinLockLocked = 1;
84 #else
85 static const OSSpinLock _OSSpinLockLocked = -1;
86 #endif
87
88
89
90 #if OS_ATOMIC_UP
91 // Don't spin on UP
92 #else
93 #define OS_LOCK_SPIN_SPIN_TRIES 1000
94 #define OS_LOCK_SPIN_PAUSE() os_hardware_pause()
95 #endif
96
97 OS_ALWAYS_INLINE
98 static uint64_t
99 _os_lock_yield_deadline(mach_msg_timeout_t timeout)
100 {
101 uint64_t abstime = timeout * NSEC_PER_MSEC;
102 #if !(defined(__i386__) || defined(__x86_64__))
103 mach_timebase_info_data_t tbi;
104 kern_return_t kr = mach_timebase_info(&tbi);
105 if (kr) return UINT64_MAX;
106 abstime *= tbi.denom;
107 abstime /= tbi.numer;
108 #endif
109 return mach_absolute_time() + abstime;
110 }
111
112 OS_ALWAYS_INLINE
113 static bool
114 _os_lock_yield_until(uint64_t deadline)
115 {
116 return mach_absolute_time() < deadline;
117 }
118
119 OS_NOINLINE
120 static void
121 _OSSpinLockLockYield(volatile OSSpinLock *l)
122 {
123 int option = SWITCH_OPTION_DEPRESS;
124 mach_msg_timeout_t timeout = 1;
125 uint64_t deadline = _os_lock_yield_deadline(timeout);
126 OSSpinLock lock;
127 while (unlikely(lock = *l)) {
128 _yield:
129 if (unlikely(lock != _OSSpinLockLocked)) {
130 _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
131 }
132 thread_switch(MACH_PORT_NULL, option, timeout);
133 if (option == SWITCH_OPTION_WAIT) {
134 timeout++;
135 } else if (!_os_lock_yield_until(deadline)) {
136 option = SWITCH_OPTION_WAIT;
137 }
138 }
139 bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
140 if (likely(r)) return;
141 goto _yield;
142 }
143
144 #if OS_ATOMIC_UP
145 void
146 _OSSpinLockLockSlow(volatile OSSpinLock *l)
147 {
148 return _OSSpinLockLockYield(l); // Don't spin on UP
149 }
150 #else // !OS_ATOMIC_UP
151 void
152 _OSSpinLockLockSlow(volatile OSSpinLock *l)
153 {
154 uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES;
155 OSSpinLock lock;
156 while (unlikely(lock = *l)) {
157 _spin:
158 if (unlikely(lock != _OSSpinLockLocked)) {
159 return _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
160 }
161 if (unlikely(!tries--)) return _OSSpinLockLockYield(l);
162 OS_LOCK_SPIN_PAUSE();
163 }
164 bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
165 if (likely(r)) return;
166 goto _spin;
167 }
168 #endif // !OS_ATOMIC_UP
169
170
171
172 #if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR
173
174 typedef struct _os_nospin_lock_s *_os_nospin_lock_t;
175
176 OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock);
177 OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
178 OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
179
180 void
181 OSSpinLockLock(volatile OSSpinLock *l)
182 {
183 OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
184 OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
185 return _os_nospin_lock_lock((_os_nospin_lock_t)l);
186 }
187
188 bool
189 OSSpinLockTry(volatile OSSpinLock *l)
190 {
191 return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
192 }
193
194 int
195 spin_lock_try(volatile OSSpinLock *l)
196 {
197 OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
198 return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
199 }
200
201 void
202 OSSpinLockUnlock(volatile OSSpinLock *l)
203 {
204 OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
205 OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
206 return _os_nospin_lock_unlock((_os_nospin_lock_t)l);
207 }
208
209 #undef OS_ATOMIC_ALIAS
210 #define OS_ATOMIC_ALIAS(n, o)
211 static void _OSSpinLockLock(volatile OSSpinLock *l);
212 #undef OSSpinLockLock
213 #define OSSpinLockLock _OSSpinLockLock
214 static bool _OSSpinLockTry(volatile OSSpinLock *l);
215 #undef OSSpinLockTry
216 #define OSSpinLockTry _OSSpinLockTry
217 static __unused int __spin_lock_try(volatile OSSpinLock *l);
218 #undef spin_lock_try
219 #define spin_lock_try __spin_lock_try
220 static void _OSSpinLockUnlock(volatile OSSpinLock *l);
221 #undef OSSpinLockUnlock
222 #define OSSpinLockUnlock _OSSpinLockUnlock
223
224 #endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK
225
226 void
227 OSSpinLockLock(volatile OSSpinLock *l)
228 {
229 OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
230 OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
231 bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
232 if (likely(r)) return;
233 return _OSSpinLockLockSlow(l);
234 }
235
236 bool
237 OSSpinLockTry(volatile OSSpinLock *l)
238 {
239 bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
240 return r;
241 }
242
243 int
244 spin_lock_try(volatile OSSpinLock *l) // <rdar://problem/13316060>
245 {
246 OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
247 return OSSpinLockTry(l);
248 }
249
250 void
251 OSSpinLockUnlock(volatile OSSpinLock *l)
252 {
253 OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
254 OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
255 os_atomic_store(l, 0, release);
256 }
257
258
259 #pragma mark -
260 #pragma mark os_lock_spin_t
261
262 OS_LOCK_STRUCT_DECL_INTERNAL(spin,
263 OSSpinLock volatile osl_spinlock;
264 );
265 OS_LOCK_METHODS_DECL(spin);
266 OS_LOCK_TYPE_INSTANCE(spin);
267
268 void
269 _os_lock_spin_lock(_os_lock_spin_t l)
270 {
271 return OSSpinLockLock(&l->osl_spinlock);
272 }
273
274 bool
275 _os_lock_spin_trylock(_os_lock_spin_t l)
276 {
277 return OSSpinLockTry(&l->osl_spinlock);
278 }
279
280 void
281 _os_lock_spin_unlock(_os_lock_spin_t l)
282 {
283 return OSSpinLockUnlock(&l->osl_spinlock);
284 }
285
286
287 #pragma mark -
288 #pragma mark os_lock_owner_t
289
290 #ifndef __TSD_MACH_THREAD_SELF
291 #define __TSD_MACH_THREAD_SELF 3
292 #endif
293
294 typedef mach_port_name_t os_lock_owner_t;
295 #define OS_LOCK_NO_OWNER MACH_PORT_NULL
296
297
298 OS_ALWAYS_INLINE OS_CONST
299 static inline os_lock_owner_t
300 _os_lock_owner_get_self(void)
301 {
302 os_lock_owner_t self;
303 self = (os_lock_owner_t)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF);
304 return self;
305 }
306
307
308 OS_NOINLINE OS_NORETURN OS_COLD
309 static void
310 _os_lock_recursive_abort(os_lock_owner_t owner)
311 {
312 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
313 "os_lock");
314 }
315
316
317 #pragma mark -
318 #pragma mark os_lock_handoff_t
319
320 OS_LOCK_STRUCT_DECL_INTERNAL(handoff,
321 os_lock_owner_t volatile osl_owner;
322 );
323 OS_LOCK_METHODS_DECL(handoff);
324 OS_LOCK_TYPE_INSTANCE(handoff);
325
326 #define OS_LOCK_HANDOFF_YIELD_TRIES 100
327
328 OS_NOINLINE
329 static void
330 _os_lock_handoff_lock_slow(_os_lock_handoff_t l)
331 {
332 int option = SWITCH_OPTION_OSLOCK_DEPRESS;
333 mach_msg_timeout_t timeout = 1;
334 uint32_t tries = OS_LOCK_HANDOFF_YIELD_TRIES;
335 os_lock_owner_t self = _os_lock_owner_get_self(), owner;
336 while (unlikely(owner = l->osl_owner)) {
337 _handoff:
338 if (unlikely(owner == self)) return _os_lock_recursive_abort(self);
339 // Yield until tries first hits zero, then permanently switch to wait
340 if (unlikely(!tries--)) option = SWITCH_OPTION_OSLOCK_WAIT;
341 thread_switch(owner, option, timeout);
342 // Redrive the handoff every 1ms until switching to wait
343 if (option == SWITCH_OPTION_OSLOCK_WAIT) timeout++;
344 }
345 bool r = os_atomic_cmpxchgv2o(l, osl_owner, MACH_PORT_NULL, self, &owner,
346 acquire);
347 if (likely(r)) return;
348 goto _handoff;
349 }
350
351 void
352 _os_lock_handoff_lock(_os_lock_handoff_t l)
353 {
354 os_lock_owner_t self = _os_lock_owner_get_self();
355 bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
356 if (likely(r)) return;
357 return _os_lock_handoff_lock_slow(l);
358 }
359
360 bool
361 _os_lock_handoff_trylock(_os_lock_handoff_t l)
362 {
363 os_lock_owner_t self = _os_lock_owner_get_self();
364 bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
365 return r;
366 }
367
368 void
369 _os_lock_handoff_unlock(_os_lock_handoff_t l)
370 {
371 os_atomic_store2o(l, osl_owner, MACH_PORT_NULL, release);
372 }
373
374
375 #pragma mark -
376 #pragma mark os_ulock_value_t
377
378 #include <sys/errno.h>
379 #include <sys/ulock.h>
380
381 typedef os_lock_owner_t os_ulock_value_t;
382
383 // This assumes that all thread mach port values always have the low bit set!
384 // Clearing this bit is used to communicate the existence of waiters to unlock.
385 #define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u)
386 #define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT)
387
388 #define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD
389 #define OS_ULOCK_IS_OWNER(value, self, allow_anonymous_owner) ({ \
390 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner == (self)) && \
391 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
392 #define OS_ULOCK_IS_NOT_OWNER(value, self, allow_anonymous_owner) ({ \
393 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner != (self)) && \
394 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
395
396 #pragma mark -
397 #pragma mark os_unfair_lock
398
399 typedef struct _os_unfair_lock_s {
400 os_ulock_value_t oul_value;
401 } *_os_unfair_lock_t;
402
403 _Static_assert(sizeof(struct os_unfair_lock_s) ==
404 sizeof(struct _os_unfair_lock_s), "os_unfair_lock size mismatch");
405
406 OS_ATOMIC_EXPORT void os_unfair_lock_lock(os_unfair_lock_t lock);
407 OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
408 os_unfair_lock_options_t options);
409 OS_ATOMIC_EXPORT bool os_unfair_lock_trylock(os_unfair_lock_t lock);
410 OS_ATOMIC_EXPORT void os_unfair_lock_unlock(os_unfair_lock_t lock);
411
412 OS_ATOMIC_EXPORT void os_unfair_lock_lock_no_tsd_4libpthread(
413 os_unfair_lock_t lock);
414 OS_ATOMIC_EXPORT void os_unfair_lock_unlock_no_tsd_4libpthread(
415 os_unfair_lock_t lock);
416 OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options_4Libc(
417 os_unfair_lock_t lock, os_unfair_lock_options_t options);
418 OS_ATOMIC_EXPORT void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock);
419
420 OS_NOINLINE OS_NORETURN OS_COLD
421 void _os_unfair_lock_recursive_abort(os_lock_owner_t owner);
422 OS_NOINLINE OS_NORETURN OS_COLD
423 void _os_unfair_lock_unowned_abort(os_lock_owner_t owner);
424 OS_NOINLINE OS_NORETURN OS_COLD
425 void _os_unfair_lock_corruption_abort(os_ulock_value_t current);
426
427 _Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION ==
428 ULF_WAIT_WORKQ_DATA_CONTENTION,
429 "check value for OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION");
430 _Static_assert(OS_UNFAIR_LOCK_ADAPTIVE_SPIN ==
431 ULF_WAIT_ADAPTIVE_SPIN,
432 "check value for OS_UNFAIR_LOCK_ADAPTIVE_SPIN");
433 #define OS_UNFAIR_LOCK_OPTIONS_MASK \
434 (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION | \
435 OS_UNFAIR_LOCK_ADAPTIVE_SPIN)
436 #define OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER 0x01000000u
437
438
439 OS_NOINLINE OS_NORETURN OS_COLD
440 void
441 _os_unfair_lock_recursive_abort(os_lock_owner_t owner)
442 {
443 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
444 "os_unfair_lock");
445 }
446
447 OS_NOINLINE OS_NORETURN OS_COLD
448 void
449 _os_unfair_lock_unowned_abort(os_lock_owner_t owner)
450 {
451 __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_unfair_lock not "
452 "owned by current thread");
453 }
454
455 OS_NOINLINE OS_NORETURN OS_COLD
456 void
457 _os_unfair_lock_corruption_abort(os_ulock_value_t current)
458 {
459 __LIBPLATFORM_CLIENT_CRASH__(current, "os_unfair_lock is corrupt");
460 }
461
462
463 OS_NOINLINE
464 static void
465 _os_unfair_lock_lock_slow(_os_unfair_lock_t l, os_lock_owner_t self,
466 os_unfair_lock_options_t options)
467 {
468 os_unfair_lock_options_t allow_anonymous_owner =
469 options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
470 options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
471 if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) {
472 __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options");
473 }
474 os_ulock_value_t current, new, waiters_mask = 0;
475 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
476 OS_LOCK_NO_OWNER)) {
477 _retry:
478 if (unlikely(OS_ULOCK_IS_OWNER(current, self, allow_anonymous_owner))) {
479 return _os_unfair_lock_recursive_abort(self);
480 }
481 new = current & ~OS_ULOCK_NOWAITERS_BIT;
482 if (current != new) {
483 // Clear nowaiters bit in lock value before waiting
484 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
485 relaxed)){
486 continue;
487 }
488 current = new;
489 }
490 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options,
491 l, current, 0);
492 if (unlikely(ret < 0)) {
493 switch (-ret) {
494 case EINTR:
495 case EFAULT:
496 continue;
497 case EOWNERDEAD:
498 _os_unfair_lock_corruption_abort(current);
499 break;
500 default:
501 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
502 }
503 }
504 if (ret > 0) {
505 // If there are more waiters, unset nowaiters bit when acquiring lock
506 waiters_mask = OS_ULOCK_NOWAITERS_BIT;
507 }
508 }
509 new = self & ~waiters_mask;
510 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
511 &current, acquire);
512 if (unlikely(!r)) goto _retry;
513 }
514
515 OS_NOINLINE
516 static void
517 _os_unfair_lock_unlock_slow(_os_unfair_lock_t l, os_ulock_value_t current,
518 os_lock_owner_t self, os_unfair_lock_options_t options)
519 {
520 os_unfair_lock_options_t allow_anonymous_owner =
521 options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
522 options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
523 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, allow_anonymous_owner))) {
524 return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current));
525 }
526 if (current & OS_ULOCK_NOWAITERS_BIT) {
527 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
528 }
529 for (;;) {
530 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0);
531 if (unlikely(ret < 0)) {
532 switch (-ret) {
533 case EINTR:
534 continue;
535 case ENOENT:
536 break;
537 default:
538 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
539 }
540 }
541 break;
542 }
543 }
544
545 void
546 os_unfair_lock_lock(os_unfair_lock_t lock)
547 {
548 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
549 os_lock_owner_t self = _os_lock_owner_get_self();
550 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
551 if (likely(r)) return;
552 return _os_unfair_lock_lock_slow(l, self, OS_UNFAIR_LOCK_NONE);
553 }
554
555 void
556 os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
557 os_unfair_lock_options_t options)
558 {
559 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
560 os_lock_owner_t self = _os_lock_owner_get_self();
561 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
562 if (likely(r)) return;
563 return _os_unfair_lock_lock_slow(l, self, options);
564 }
565
566 bool
567 os_unfair_lock_trylock(os_unfair_lock_t lock)
568 {
569 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
570 os_lock_owner_t self = _os_lock_owner_get_self();
571 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
572 return r;
573 }
574
575 void
576 os_unfair_lock_unlock(os_unfair_lock_t lock)
577 {
578 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
579 os_lock_owner_t self = _os_lock_owner_get_self();
580 os_ulock_value_t current;
581 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
582 if (likely(current == self)) return;
583 return _os_unfair_lock_unlock_slow(l, current, self, 0);
584 }
585
586 void
587 os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock)
588 {
589 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
590 os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
591 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
592 if (likely(r)) return;
593 return _os_unfair_lock_lock_slow(l, self,
594 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION|
595 OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER);
596 }
597
598 void
599 os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock)
600 {
601 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
602 os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
603 os_ulock_value_t current;
604 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
605 if (likely(current == self)) return;
606 return _os_unfair_lock_unlock_slow(l, current, self,
607 OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER);
608 }
609
610
611 void
612 os_unfair_lock_assert_owner(os_unfair_lock_t lock)
613 {
614 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
615 os_lock_owner_t self = _os_lock_owner_get_self();
616 os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
617 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) {
618 __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
619 "Lock unexpectedly not owned by current thread");
620 }
621 }
622
623 void
624 os_unfair_lock_assert_not_owner(os_unfair_lock_t lock)
625 {
626 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
627 os_lock_owner_t self = _os_lock_owner_get_self();
628 os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
629 if (unlikely(OS_ULOCK_IS_OWNER(current, self, 0))) {
630 __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
631 "Lock unexpectedly owned by current thread");
632 }
633 }
634
635
636 #pragma mark -
637 #pragma mark os_unfair_recursive_lock
638
639 OS_ATOMIC_EXPORT
640 void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
641 os_unfair_lock_options_t options);
642
643 OS_ATOMIC_EXPORT
644 bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock);
645
646 OS_ATOMIC_EXPORT
647 void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
648
649 OS_ATOMIC_EXPORT
650 bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock);
651
652 OS_ATOMIC_EXPORT
653 void os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock);
654
655
656 static inline os_lock_owner_t
657 _os_unfair_lock_owner(os_unfair_lock_t lock)
658 {
659 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
660 return OS_ULOCK_OWNER(os_atomic_load(&l->oul_value, relaxed));
661 }
662
663
664 bool
665 os_unfair_recursive_lock_owned(os_unfair_recursive_lock_t lock)
666 {
667 return _os_unfair_lock_owner(&lock->ourl_lock) ==
668 _os_lock_owner_get_self();
669 }
670
671
672 void
673 os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
674 os_unfair_lock_options_t options)
675 {
676 os_lock_owner_t cur, self = _os_lock_owner_get_self();
677 _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock;
678
679 if (likely(os_atomic_cmpxchgv2o(l, oul_value,
680 OS_LOCK_NO_OWNER, self, &cur, acquire))) {
681 return;
682 }
683
684 if (OS_ULOCK_OWNER(cur) == self) {
685 lock->ourl_count++;
686 return;
687 }
688
689 return _os_unfair_lock_lock_slow(l, self, options);
690 }
691
692 bool
693 os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock)
694 {
695 os_lock_owner_t cur, self = _os_lock_owner_get_self();
696 _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock;
697
698 if (likely(os_atomic_cmpxchgv2o(l, oul_value,
699 OS_LOCK_NO_OWNER, self, &cur, acquire))) {
700 return true;
701 }
702
703 if (likely(OS_ULOCK_OWNER(cur) == self)) {
704 lock->ourl_count++;
705 return true;
706 }
707
708 return false;
709 }
710
711
712 OS_ALWAYS_INLINE
713 static inline void
714 _os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock,
715 os_lock_owner_t self)
716 {
717 if (unlikely(lock->ourl_count)) {
718 os_lock_owner_t cur = _os_unfair_lock_owner(&lock->ourl_lock);
719 if (unlikely(cur != self)) {
720 _os_unfair_lock_unowned_abort(cur);
721 }
722 lock->ourl_count--;
723 return;
724 }
725
726 _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
727 os_ulock_value_t current;
728 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
729 if (likely(current == self)) return;
730 return _os_unfair_lock_unlock_slow(l, current, self, 0);
731 }
732
733 void
734 os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock)
735 {
736 os_lock_owner_t self = _os_lock_owner_get_self();
737 _os_unfair_recursive_lock_unlock(lock, self);
738 }
739
740 bool
741 os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock)
742 {
743 os_lock_owner_t cur = _os_unfair_lock_owner(&lock->ourl_lock);
744 os_lock_owner_t self = _os_lock_owner_get_self();
745 if (likely(cur == self)) {
746 _os_unfair_recursive_lock_unlock(lock, self);
747 return true;
748 }
749 return false;
750 }
751
752 void
753 os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock)
754 {
755 _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock;
756
757 if (os_atomic_load(&l->oul_value, relaxed) == OS_LOCK_NO_OWNER) {
758 __LIBPLATFORM_CLIENT_CRASH__(0, "Lock was not held");
759 }
760 if (lock->ourl_count) {
761 os_lock_owner_t self = _os_lock_owner_get_self();
762 lock->ourl_count--;
763 os_atomic_store(&l->oul_value, self, relaxed);
764 } else {
765 os_atomic_store(&l->oul_value, OS_LOCK_NO_OWNER, relaxed);
766 }
767 }
768
769
770 #pragma mark -
771 #pragma mark _os_lock_unfair_t
772
773 OS_LOCK_STRUCT_DECL_INTERNAL(unfair,
774 os_unfair_lock osl_unfair_lock;
775 );
776 OS_LOCK_METHODS_DECL(unfair);
777 OS_LOCK_TYPE_INSTANCE(unfair);
778
779 void
780 _os_lock_unfair_lock(_os_lock_unfair_t l)
781 {
782 return os_unfair_lock_lock(&l->osl_unfair_lock);
783 }
784
785 bool
786 _os_lock_unfair_trylock(_os_lock_unfair_t l)
787 {
788 return os_unfair_lock_trylock(&l->osl_unfair_lock);
789 }
790
791 void
792 _os_lock_unfair_unlock(_os_lock_unfair_t l)
793 {
794 return os_unfair_lock_unlock(&l->osl_unfair_lock);
795 }
796
797
798 #pragma mark -
799 #pragma mark _os_nospin_lock
800
801 typedef struct _os_nospin_lock_s {
802 os_ulock_value_t oul_value;
803 } _os_nospin_lock, *_os_nospin_lock_t;
804
805 _Static_assert(sizeof(OSSpinLock) ==
806 sizeof(struct _os_nospin_lock_s), "os_nospin_lock size mismatch");
807
808 OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock);
809 OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
810 OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
811
812
813 OS_NOINLINE
814 static void
815 _os_nospin_lock_lock_slow(_os_nospin_lock_t l)
816 {
817 os_lock_owner_t self = _os_lock_owner_get_self();
818 os_ulock_value_t current, new, waiters_mask = 0;
819 uint32_t timeout = 1;
820 while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
821 OS_LOCK_NO_OWNER)) {
822 _retry:
823 new = current & ~OS_ULOCK_NOWAITERS_BIT;
824 // For safer compatibility with OSSpinLock where _OSSpinLockLocked may
825 // be 1, check that new didn't become 0 (unlocked) by clearing this bit
826 if (current != new && new) {
827 // Clear nowaiters bit in lock value before waiting
828 if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
829 relaxed)){
830 continue;
831 }
832 current = new;
833 }
834 int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, current,
835 timeout * 1000);
836 if (unlikely(ret < 0)) {
837 switch (-ret) {
838 case ETIMEDOUT:
839 timeout++;
840 continue;
841 case EINTR:
842 case EFAULT:
843 continue;
844 default:
845 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
846 }
847 }
848 if (ret > 0) {
849 // If there are more waiters, unset nowaiters bit when acquiring lock
850 waiters_mask = OS_ULOCK_NOWAITERS_BIT;
851 }
852 }
853 new = self & ~waiters_mask;
854 bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
855 &current, acquire);
856 if (unlikely(!r)) goto _retry;
857 }
858
859 OS_NOINLINE
860 static void
861 _os_nospin_lock_unlock_slow(_os_nospin_lock_t l, os_ulock_value_t current)
862 {
863 os_lock_owner_t self = _os_lock_owner_get_self();
864 if (unlikely(OS_ULOCK_OWNER(current) != self)) {
865 return; // no unowned_abort for drop-in compatibility with OSSpinLock
866 }
867 if (current & OS_ULOCK_NOWAITERS_BIT) {
868 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
869 }
870 for (;;) {
871 int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, 0);
872 if (unlikely(ret < 0)) {
873 switch (-ret) {
874 case EINTR:
875 continue;
876 case ENOENT:
877 break;
878 default:
879 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
880 }
881 }
882 break;
883 }
884 }
885
886 void
887 _os_nospin_lock_lock(_os_nospin_lock_t l)
888 {
889 os_lock_owner_t self = _os_lock_owner_get_self();
890 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
891 if (likely(r)) return;
892 return _os_nospin_lock_lock_slow(l);
893 }
894
895 bool
896 _os_nospin_lock_trylock(_os_nospin_lock_t l)
897 {
898 os_lock_owner_t self = _os_lock_owner_get_self();
899 bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
900 return r;
901 }
902
903 void
904 _os_nospin_lock_unlock(_os_nospin_lock_t l)
905 {
906 os_lock_owner_t self = _os_lock_owner_get_self();
907 os_ulock_value_t current;
908 current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
909 if (likely(current == self)) return;
910 return _os_nospin_lock_unlock_slow(l, current);
911 }
912
913
914 #pragma mark -
915 #pragma mark _os_lock_nospin_t
916
917 OS_LOCK_STRUCT_DECL_INTERNAL(nospin,
918 _os_nospin_lock osl_nospin_lock;
919 );
920 OS_LOCK_METHODS_DECL(nospin);
921 OS_LOCK_TYPE_INSTANCE(nospin);
922
923 void
924 _os_lock_nospin_lock(_os_lock_nospin_t l)
925 {
926 return _os_nospin_lock_lock(&l->osl_nospin_lock);
927 }
928
929 bool
930 _os_lock_nospin_trylock(_os_lock_nospin_t l)
931 {
932 return _os_nospin_lock_trylock(&l->osl_nospin_lock);
933 }
934
935 void
936 _os_lock_nospin_unlock(_os_lock_nospin_t l)
937 {
938 return _os_nospin_lock_unlock(&l->osl_nospin_lock);
939 }
940
941
942 #pragma mark -
943 #pragma mark os_once_t
944
945 typedef struct os_once_gate_s {
946 union {
947 os_ulock_value_t ogo_lock;
948 uintptr_t ogo_once;
949 };
950 } os_once_gate_s, *os_once_gate_t;
951
952 #define OS_ONCE_INIT ((uintptr_t)0l)
953 #define OS_ONCE_DONE (~(uintptr_t)0l)
954
955 #if defined(__i386__) || defined(__x86_64__)
956 #define OS_ONCE_USE_QUIESCENT_COUNTER 0
957 #else
958 #define OS_ONCE_USE_QUIESCENT_COUNTER 1
959 #endif
960
961 OS_ATOMIC_EXPORT void _os_once(os_once_t *val, void *ctxt, os_function_t func);
962 OS_ATOMIC_EXPORT void __os_once_reset(os_once_t *val);
963
964 OS_NOINLINE OS_NORETURN OS_COLD
965 void _os_once_gate_recursive_abort(os_lock_owner_t owner);
966 OS_NOINLINE OS_NORETURN OS_COLD
967 void _os_once_gate_unowned_abort(os_lock_owner_t owner);
968 OS_NOINLINE OS_NORETURN OS_COLD
969 void _os_once_gate_corruption_abort(os_ulock_value_t current);
970
971
972 OS_NOINLINE OS_NORETURN OS_COLD
973 void
974 _os_once_gate_recursive_abort(os_lock_owner_t owner)
975 {
976 __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
977 "os_once_t");
978 }
979
980 OS_NOINLINE OS_NORETURN OS_COLD
981 void
982 _os_once_gate_unowned_abort(os_lock_owner_t owner)
983 {
984 __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_once_t not "
985 "owned by current thread");
986 }
987
988 OS_NOINLINE OS_NORETURN OS_COLD
989 void
990 _os_once_gate_corruption_abort(os_ulock_value_t current)
991 {
992 __LIBPLATFORM_CLIENT_CRASH__(current, "os_once_t is corrupt");
993 }
994
995
996 #if OS_ONCE_USE_QUIESCENT_COUNTER
997 #define OS_ONCE_MAKE_GEN(gen) (((gen) << 2) + OS_ULOCK_NOWAITERS_BIT)
998 #define OS_ONCE_IS_GEN(gen) (((gen) & 3) == OS_ULOCK_NOWAITERS_BIT)
999
1000 // the _COMM_PAGE_CPU_QUIESCENT_COUNTER value is incremented every time
1001 // all CPUs have performed a context switch.
1002 //
1003 // To make sure all CPUs context switched at least once since `gen`,
1004 // we need to observe 4 increments, see libdispatch/src/shims/lock.h
1005 #define OS_ONCE_GEN_SAFE_DELTA (4 << 2)
1006
1007 OS_ALWAYS_INLINE
1008 static inline uintptr_t
1009 _os_once_generation(void)
1010 {
1011 uintptr_t value = *(volatile uintptr_t *)_COMM_PAGE_CPU_QUIESCENT_COUNTER;
1012 return OS_ONCE_MAKE_GEN(value);
1013 }
1014
1015 OS_ALWAYS_INLINE
1016 static inline uintptr_t
1017 _os_once_mark_quiescing(os_once_gate_t og)
1018 {
1019 return os_atomic_xchg(&og->ogo_once, _os_once_generation(), release);
1020 }
1021
1022 OS_ALWAYS_INLINE
1023 static void
1024 _os_once_mark_done_if_quiesced(os_once_gate_t og, uintptr_t gen)
1025 {
1026 if (_os_once_generation() - gen >= OS_ONCE_GEN_SAFE_DELTA) {
1027 os_atomic_store(&og->ogo_once, OS_ONCE_DONE, relaxed);
1028 }
1029 }
1030 #else
1031 OS_ALWAYS_INLINE
1032 static inline uintptr_t
1033 _os_once_mark_done(os_once_gate_t og)
1034 {
1035 return os_atomic_xchg(&og->ogo_once, OS_ONCE_DONE, release);
1036 }
1037 #endif
1038
1039 OS_NOINLINE
1040 static void
1041 _os_once_gate_broadcast(os_once_gate_t og, os_ulock_value_t current,
1042 os_lock_owner_t self)
1043 {
1044 if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) {
1045 return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current));
1046 }
1047 if (current & OS_ULOCK_NOWAITERS_BIT) {
1048 __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
1049 }
1050 for (;;) {
1051 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | ULF_WAKE_ALL,
1052 &og->ogo_lock, 0);
1053 if (unlikely(ret < 0)) {
1054 switch (-ret) {
1055 case EINTR:
1056 continue;
1057 case ENOENT:
1058 break;
1059 default:
1060 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
1061 }
1062 }
1063 break;
1064 }
1065 }
1066
1067 OS_NOINLINE
1068 static void
1069 _os_once_callout(os_once_gate_t og, void *ctxt, os_function_t func,
1070 os_lock_owner_t self)
1071 {
1072 uintptr_t v;
1073
1074 func(ctxt);
1075
1076 #if OS_ONCE_USE_QUIESCENT_COUNTER
1077 v = _os_once_mark_quiescing(og);
1078 #else
1079 v = _os_once_mark_done(og);
1080 #endif
1081 if (likely((os_ulock_value_t)v == self)) return;
1082 _os_once_gate_broadcast(og, (os_ulock_value_t)v, self);
1083 }
1084
1085 OS_NOINLINE
1086 static void
1087 _os_once_gate_wait(os_once_gate_t og, void *ctxt, os_function_t func,
1088 os_lock_owner_t self)
1089 {
1090 uintptr_t old, new;
1091
1092 for (;;) {
1093 os_atomic_rmw_loop(&og->ogo_once, old, new, relaxed, {
1094 if (old == OS_ONCE_DONE) {
1095 os_atomic_rmw_loop_give_up(return);
1096 #if OS_ONCE_USE_QUIESCENT_COUNTER
1097 } else if (OS_ONCE_IS_GEN(old)) {
1098 os_atomic_rmw_loop_give_up({
1099 os_atomic_thread_fence(acquire);
1100 return _os_once_mark_done_if_quiesced(og, old);
1101 });
1102 #endif
1103 } else if (old == OS_ONCE_INIT) {
1104 // __os_once_reset was used, try to become the new initializer
1105 new = (uintptr_t)self;
1106 } else {
1107 new = old & ~(uintptr_t)OS_ULOCK_NOWAITERS_BIT;
1108 if (new == old) os_atomic_rmw_loop_give_up(break);
1109 }
1110 });
1111 if (old == OS_ONCE_INIT) {
1112 // see comment in _os_once, pairs with the release barrier
1113 // in __os_once_reset()
1114 os_atomic_thread_fence(acquire);
1115 return _os_once_callout(og, ctxt, func, self);
1116 }
1117 if (unlikely(OS_ULOCK_IS_OWNER((os_lock_owner_t)old, self, 0))) {
1118 return _os_once_gate_recursive_abort(self);
1119 }
1120 int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO,
1121 &og->ogo_lock, (os_ulock_value_t)new, 0);
1122 if (unlikely(ret < 0)) {
1123 switch (-ret) {
1124 case EINTR:
1125 case EFAULT:
1126 continue;
1127 case EOWNERDEAD:
1128 _os_once_gate_corruption_abort((os_lock_owner_t)old);
1129 break;
1130 default:
1131 __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
1132 }
1133 }
1134 }
1135 }
1136
1137 // Atomically resets the once value to zero and then signals all
1138 // pending waiters to return from their __ulock_wait()
1139 void
1140 __os_once_reset(os_once_t *val)
1141 {
1142 os_once_gate_t og = (os_once_gate_t)val;
1143 os_lock_owner_t self = _os_lock_owner_get_self();
1144 uintptr_t v;
1145
1146 v = os_atomic_xchg(&og->ogo_once, OS_ONCE_INIT, release);
1147 if (likely((os_ulock_value_t)v == self)) return;
1148 return _os_once_gate_broadcast(og, (os_ulock_value_t)v, self);
1149 }
1150
1151 void
1152 _os_once(os_once_t *val, void *ctxt, os_function_t func)
1153 {
1154 os_once_gate_t og = (os_once_gate_t)val;
1155 os_lock_owner_t self;
1156 uintptr_t v;
1157
1158 #if OS_ONCE_USE_QUIESCENT_COUNTER
1159 v = os_atomic_load(&og->ogo_once, acquire);
1160 if (likely(OS_ONCE_IS_GEN(v))) {
1161 return _os_once_mark_done_if_quiesced(og, v);
1162 }
1163 #endif
1164
1165 self = _os_lock_owner_get_self();
1166 v = (uintptr_t)self;
1167
1168 // The acquire barrier pairs with the release in __os_once_reset()
1169 // for cases when a previous initializer failed.
1170 if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, acquire))) {
1171 return _os_once_callout(og, ctxt, func, self);
1172 }
1173 return _os_once_gate_wait(og, ctxt, func, self);
1174 }
1175