]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_APACHE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
7 | * you may not use this file except in compliance with the License. | |
8 | * You may obtain a copy of the License at | |
9 | * | |
10 | * http://www.apache.org/licenses/LICENSE-2.0 | |
11 | * | |
12 | * Unless required by applicable law or agreed to in writing, software | |
13 | * distributed under the License is distributed on an "AS IS" BASIS, | |
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
15 | * See the License for the specific language governing permissions and | |
16 | * limitations under the License. | |
17 | * | |
18 | * @APPLE_APACHE_LICENSE_HEADER_END@ | |
19 | */ | |
20 | ||
21 | #define OS_UNFAIR_LOCK_INLINE 1 | |
22 | #include "lock_internal.h" | |
23 | #include "os/internal.h" | |
24 | #include "resolver.h" | |
25 | #include "libkern/OSAtomic.h" | |
26 | #include "os/lock.h" | |
27 | #include "os/lock_private.h" | |
28 | #include "os/once_private.h" | |
29 | ||
30 | #include <mach/mach_init.h> | |
31 | #include <mach/mach_traps.h> | |
32 | #include <mach/thread_switch.h> | |
33 | #include <mach/mach_time.h> | |
34 | #include <os/tsd.h> | |
35 | ||
36 | #pragma mark - | |
37 | #pragma mark _os_lock_base_t | |
38 | ||
39 | OS_NOINLINE OS_NORETURN OS_COLD | |
40 | void _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value); | |
41 | ||
42 | ||
43 | OS_LOCK_STRUCT_DECL_INTERNAL(base); | |
44 | OS_USED static OS_LOCK_TYPE_STRUCT_DECL(base); | |
45 | ||
46 | void | |
47 | os_lock_lock(os_lock_t l) | |
48 | { | |
49 | return l._osl_base->osl_type->osl_lock(l); | |
50 | } | |
51 | ||
52 | bool | |
53 | os_lock_trylock(os_lock_t l) | |
54 | { | |
55 | return l._osl_base->osl_type->osl_trylock(l); | |
56 | } | |
57 | ||
58 | void | |
59 | os_lock_unlock(os_lock_t l) | |
60 | { | |
61 | return l._osl_base->osl_type->osl_unlock(l); | |
62 | } | |
63 | ||
64 | OS_NOINLINE OS_NORETURN OS_COLD | |
65 | void | |
66 | _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value) | |
67 | { | |
68 | __LIBPLATFORM_CLIENT_CRASH__(lock_value, "os_lock is corrupt"); | |
69 | } | |
70 | ||
71 | ||
72 | #pragma mark - | |
73 | #pragma mark OSSpinLock | |
74 | ||
75 | OS_NOEXPORT OS_NOINLINE void _OSSpinLockLockSlow(volatile OSSpinLock *l); | |
76 | ||
77 | OS_ATOMIC_EXPORT void OSSpinLockLock(volatile OSSpinLock *l); | |
78 | OS_ATOMIC_EXPORT bool OSSpinLockTry(volatile OSSpinLock *l); | |
79 | OS_ATOMIC_EXPORT int spin_lock_try(volatile OSSpinLock *l); | |
80 | OS_ATOMIC_EXPORT void OSSpinLockUnlock(volatile OSSpinLock *l); | |
81 | ||
82 | #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR | |
83 | static const OSSpinLock _OSSpinLockLocked = 1; | |
84 | #else | |
85 | static const OSSpinLock _OSSpinLockLocked = -1; | |
86 | #endif | |
87 | ||
88 | ||
89 | ||
90 | #if OS_ATOMIC_UP | |
91 | // Don't spin on UP | |
92 | #else | |
93 | #define OS_LOCK_SPIN_SPIN_TRIES 1000 | |
94 | #define OS_LOCK_SPIN_PAUSE() os_hardware_pause() | |
95 | #endif | |
96 | ||
97 | OS_ALWAYS_INLINE | |
98 | static uint64_t | |
99 | _os_lock_yield_deadline(mach_msg_timeout_t timeout) | |
100 | { | |
101 | uint64_t abstime = timeout * NSEC_PER_MSEC; | |
102 | #if !(defined(__i386__) || defined(__x86_64__)) | |
103 | mach_timebase_info_data_t tbi; | |
104 | kern_return_t kr = mach_timebase_info(&tbi); | |
105 | if (kr) return UINT64_MAX; | |
106 | abstime *= tbi.denom; | |
107 | abstime /= tbi.numer; | |
108 | #endif | |
109 | return mach_absolute_time() + abstime; | |
110 | } | |
111 | ||
112 | OS_ALWAYS_INLINE | |
113 | static bool | |
114 | _os_lock_yield_until(uint64_t deadline) | |
115 | { | |
116 | return mach_absolute_time() < deadline; | |
117 | } | |
118 | ||
119 | OS_NOINLINE | |
120 | static void | |
121 | _OSSpinLockLockYield(volatile OSSpinLock *l) | |
122 | { | |
123 | int option = SWITCH_OPTION_DEPRESS; | |
124 | mach_msg_timeout_t timeout = 1; | |
125 | uint64_t deadline = _os_lock_yield_deadline(timeout); | |
126 | OSSpinLock lock; | |
127 | while (unlikely(lock = *l)) { | |
128 | _yield: | |
129 | if (unlikely(lock != _OSSpinLockLocked)) { | |
130 | _os_lock_corruption_abort((void *)l, (uintptr_t)lock); | |
131 | } | |
132 | thread_switch(MACH_PORT_NULL, option, timeout); | |
133 | if (option == SWITCH_OPTION_WAIT) { | |
134 | timeout++; | |
135 | } else if (!_os_lock_yield_until(deadline)) { | |
136 | option = SWITCH_OPTION_WAIT; | |
137 | } | |
138 | } | |
139 | bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire); | |
140 | if (likely(r)) return; | |
141 | goto _yield; | |
142 | } | |
143 | ||
144 | #if OS_ATOMIC_UP | |
145 | void | |
146 | _OSSpinLockLockSlow(volatile OSSpinLock *l) | |
147 | { | |
148 | return _OSSpinLockLockYield(l); // Don't spin on UP | |
149 | } | |
150 | #else // !OS_ATOMIC_UP | |
151 | void | |
152 | _OSSpinLockLockSlow(volatile OSSpinLock *l) | |
153 | { | |
154 | uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES; | |
155 | OSSpinLock lock; | |
156 | while (unlikely(lock = *l)) { | |
157 | _spin: | |
158 | if (unlikely(lock != _OSSpinLockLocked)) { | |
159 | return _os_lock_corruption_abort((void *)l, (uintptr_t)lock); | |
160 | } | |
161 | if (unlikely(!tries--)) return _OSSpinLockLockYield(l); | |
162 | OS_LOCK_SPIN_PAUSE(); | |
163 | } | |
164 | bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire); | |
165 | if (likely(r)) return; | |
166 | goto _spin; | |
167 | } | |
168 | #endif // !OS_ATOMIC_UP | |
169 | ||
170 | ||
171 | ||
172 | #if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR | |
173 | ||
174 | typedef struct _os_nospin_lock_s *_os_nospin_lock_t; | |
175 | ||
176 | OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock); | |
177 | OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock); | |
178 | OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock); | |
179 | ||
180 | void | |
181 | OSSpinLockLock(volatile OSSpinLock *l) | |
182 | { | |
183 | OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock); | |
184 | OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock); | |
185 | return _os_nospin_lock_lock((_os_nospin_lock_t)l); | |
186 | } | |
187 | ||
188 | bool | |
189 | OSSpinLockTry(volatile OSSpinLock *l) | |
190 | { | |
191 | return _os_nospin_lock_trylock((_os_nospin_lock_t)l); | |
192 | } | |
193 | ||
194 | int | |
195 | spin_lock_try(volatile OSSpinLock *l) | |
196 | { | |
197 | OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try); | |
198 | return _os_nospin_lock_trylock((_os_nospin_lock_t)l); | |
199 | } | |
200 | ||
201 | void | |
202 | OSSpinLockUnlock(volatile OSSpinLock *l) | |
203 | { | |
204 | OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock); | |
205 | OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock); | |
206 | return _os_nospin_lock_unlock((_os_nospin_lock_t)l); | |
207 | } | |
208 | ||
209 | #undef OS_ATOMIC_ALIAS | |
210 | #define OS_ATOMIC_ALIAS(n, o) | |
211 | static void _OSSpinLockLock(volatile OSSpinLock *l); | |
212 | #undef OSSpinLockLock | |
213 | #define OSSpinLockLock _OSSpinLockLock | |
214 | static bool _OSSpinLockTry(volatile OSSpinLock *l); | |
215 | #undef OSSpinLockTry | |
216 | #define OSSpinLockTry _OSSpinLockTry | |
217 | static __unused int __spin_lock_try(volatile OSSpinLock *l); | |
218 | #undef spin_lock_try | |
219 | #define spin_lock_try __spin_lock_try | |
220 | static void _OSSpinLockUnlock(volatile OSSpinLock *l); | |
221 | #undef OSSpinLockUnlock | |
222 | #define OSSpinLockUnlock _OSSpinLockUnlock | |
223 | ||
224 | #endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK | |
225 | ||
226 | void | |
227 | OSSpinLockLock(volatile OSSpinLock *l) | |
228 | { | |
229 | OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock); | |
230 | OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock); | |
231 | bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire); | |
232 | if (likely(r)) return; | |
233 | return _OSSpinLockLockSlow(l); | |
234 | } | |
235 | ||
236 | bool | |
237 | OSSpinLockTry(volatile OSSpinLock *l) | |
238 | { | |
239 | bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire); | |
240 | return r; | |
241 | } | |
242 | ||
243 | int | |
244 | spin_lock_try(volatile OSSpinLock *l) // <rdar://problem/13316060> | |
245 | { | |
246 | OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try); | |
247 | return OSSpinLockTry(l); | |
248 | } | |
249 | ||
250 | void | |
251 | OSSpinLockUnlock(volatile OSSpinLock *l) | |
252 | { | |
253 | OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock); | |
254 | OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock); | |
255 | os_atomic_store(l, 0, release); | |
256 | } | |
257 | ||
258 | ||
259 | #pragma mark - | |
260 | #pragma mark os_lock_spin_t | |
261 | ||
262 | OS_LOCK_STRUCT_DECL_INTERNAL(spin, | |
263 | OSSpinLock volatile osl_spinlock; | |
264 | ); | |
265 | OS_LOCK_METHODS_DECL(spin); | |
266 | OS_LOCK_TYPE_INSTANCE(spin); | |
267 | ||
268 | void | |
269 | _os_lock_spin_lock(_os_lock_spin_t l) | |
270 | { | |
271 | return OSSpinLockLock(&l->osl_spinlock); | |
272 | } | |
273 | ||
274 | bool | |
275 | _os_lock_spin_trylock(_os_lock_spin_t l) | |
276 | { | |
277 | return OSSpinLockTry(&l->osl_spinlock); | |
278 | } | |
279 | ||
280 | void | |
281 | _os_lock_spin_unlock(_os_lock_spin_t l) | |
282 | { | |
283 | return OSSpinLockUnlock(&l->osl_spinlock); | |
284 | } | |
285 | ||
286 | ||
287 | #pragma mark - | |
288 | #pragma mark os_lock_owner_t | |
289 | ||
290 | #ifndef __TSD_MACH_THREAD_SELF | |
291 | #define __TSD_MACH_THREAD_SELF 3 | |
292 | #endif | |
293 | ||
294 | typedef mach_port_name_t os_lock_owner_t; | |
295 | #define OS_LOCK_NO_OWNER MACH_PORT_NULL | |
296 | ||
297 | ||
298 | OS_ALWAYS_INLINE OS_CONST | |
299 | static inline os_lock_owner_t | |
300 | _os_lock_owner_get_self(void) | |
301 | { | |
302 | os_lock_owner_t self; | |
303 | self = (os_lock_owner_t)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF); | |
304 | return self; | |
305 | } | |
306 | ||
307 | ||
308 | OS_NOINLINE OS_NORETURN OS_COLD | |
309 | static void | |
310 | _os_lock_recursive_abort(os_lock_owner_t owner) | |
311 | { | |
312 | __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an " | |
313 | "os_lock"); | |
314 | } | |
315 | ||
316 | ||
317 | #pragma mark - | |
318 | #pragma mark os_lock_handoff_t | |
319 | ||
320 | OS_LOCK_STRUCT_DECL_INTERNAL(handoff, | |
321 | os_lock_owner_t volatile osl_owner; | |
322 | ); | |
323 | OS_LOCK_METHODS_DECL(handoff); | |
324 | OS_LOCK_TYPE_INSTANCE(handoff); | |
325 | ||
326 | #define OS_LOCK_HANDOFF_YIELD_TRIES 100 | |
327 | ||
328 | OS_NOINLINE | |
329 | static void | |
330 | _os_lock_handoff_lock_slow(_os_lock_handoff_t l) | |
331 | { | |
332 | int option = SWITCH_OPTION_OSLOCK_DEPRESS; | |
333 | mach_msg_timeout_t timeout = 1; | |
334 | uint32_t tries = OS_LOCK_HANDOFF_YIELD_TRIES; | |
335 | os_lock_owner_t self = _os_lock_owner_get_self(), owner; | |
336 | while (unlikely(owner = l->osl_owner)) { | |
337 | _handoff: | |
338 | if (unlikely(owner == self)) return _os_lock_recursive_abort(self); | |
339 | // Yield until tries first hits zero, then permanently switch to wait | |
340 | if (unlikely(!tries--)) option = SWITCH_OPTION_OSLOCK_WAIT; | |
341 | thread_switch(owner, option, timeout); | |
342 | // Redrive the handoff every 1ms until switching to wait | |
343 | if (option == SWITCH_OPTION_OSLOCK_WAIT) timeout++; | |
344 | } | |
345 | bool r = os_atomic_cmpxchgv2o(l, osl_owner, MACH_PORT_NULL, self, &owner, | |
346 | acquire); | |
347 | if (likely(r)) return; | |
348 | goto _handoff; | |
349 | } | |
350 | ||
351 | void | |
352 | _os_lock_handoff_lock(_os_lock_handoff_t l) | |
353 | { | |
354 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
355 | bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire); | |
356 | if (likely(r)) return; | |
357 | return _os_lock_handoff_lock_slow(l); | |
358 | } | |
359 | ||
360 | bool | |
361 | _os_lock_handoff_trylock(_os_lock_handoff_t l) | |
362 | { | |
363 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
364 | bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire); | |
365 | return r; | |
366 | } | |
367 | ||
368 | void | |
369 | _os_lock_handoff_unlock(_os_lock_handoff_t l) | |
370 | { | |
371 | os_atomic_store2o(l, osl_owner, MACH_PORT_NULL, release); | |
372 | } | |
373 | ||
374 | ||
375 | #pragma mark - | |
376 | #pragma mark os_ulock_value_t | |
377 | ||
378 | #include <sys/errno.h> | |
379 | #include <sys/ulock.h> | |
380 | ||
381 | typedef os_lock_owner_t os_ulock_value_t; | |
382 | ||
383 | // This assumes that all thread mach port values always have the low bit set! | |
384 | // Clearing this bit is used to communicate the existence of waiters to unlock. | |
385 | #define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u) | |
386 | #define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT) | |
387 | ||
388 | #define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD | |
389 | #define OS_ULOCK_IS_OWNER(value, self, allow_anonymous_owner) ({ \ | |
390 | os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner == (self)) && \ | |
391 | (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); }) | |
392 | #define OS_ULOCK_IS_NOT_OWNER(value, self, allow_anonymous_owner) ({ \ | |
393 | os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner != (self)) && \ | |
394 | (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); }) | |
395 | ||
396 | #pragma mark - | |
397 | #pragma mark os_unfair_lock | |
398 | ||
399 | typedef struct _os_unfair_lock_s { | |
400 | os_ulock_value_t oul_value; | |
401 | } *_os_unfair_lock_t; | |
402 | ||
403 | _Static_assert(sizeof(struct os_unfair_lock_s) == | |
404 | sizeof(struct _os_unfair_lock_s), "os_unfair_lock size mismatch"); | |
405 | ||
406 | OS_ATOMIC_EXPORT void os_unfair_lock_lock(os_unfair_lock_t lock); | |
407 | OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options(os_unfair_lock_t lock, | |
408 | os_unfair_lock_options_t options); | |
409 | OS_ATOMIC_EXPORT bool os_unfair_lock_trylock(os_unfair_lock_t lock); | |
410 | OS_ATOMIC_EXPORT void os_unfair_lock_unlock(os_unfair_lock_t lock); | |
411 | ||
412 | OS_ATOMIC_EXPORT void os_unfair_lock_lock_no_tsd_4libpthread( | |
413 | os_unfair_lock_t lock); | |
414 | OS_ATOMIC_EXPORT void os_unfair_lock_unlock_no_tsd_4libpthread( | |
415 | os_unfair_lock_t lock); | |
416 | OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options_4Libc( | |
417 | os_unfair_lock_t lock, os_unfair_lock_options_t options); | |
418 | OS_ATOMIC_EXPORT void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock); | |
419 | ||
420 | OS_NOINLINE OS_NORETURN OS_COLD | |
421 | void _os_unfair_lock_recursive_abort(os_lock_owner_t owner); | |
422 | OS_NOINLINE OS_NORETURN OS_COLD | |
423 | void _os_unfair_lock_unowned_abort(os_lock_owner_t owner); | |
424 | OS_NOINLINE OS_NORETURN OS_COLD | |
425 | void _os_unfair_lock_corruption_abort(os_ulock_value_t current); | |
426 | ||
427 | _Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION == | |
428 | ULF_WAIT_WORKQ_DATA_CONTENTION, | |
429 | "check value for OS_UNFAIR_LOCK_OPTIONS_MASK"); | |
430 | #define OS_UNFAIR_LOCK_OPTIONS_MASK \ | |
431 | (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION) | |
432 | #define OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER 0x01000000u | |
433 | ||
434 | ||
435 | OS_NOINLINE OS_NORETURN OS_COLD | |
436 | void | |
437 | _os_unfair_lock_recursive_abort(os_lock_owner_t owner) | |
438 | { | |
439 | __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an " | |
440 | "os_unfair_lock"); | |
441 | } | |
442 | ||
443 | OS_NOINLINE OS_NORETURN OS_COLD | |
444 | void | |
445 | _os_unfair_lock_unowned_abort(os_lock_owner_t owner) | |
446 | { | |
447 | __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_unfair_lock not " | |
448 | "owned by current thread"); | |
449 | } | |
450 | ||
451 | OS_NOINLINE OS_NORETURN OS_COLD | |
452 | void | |
453 | _os_unfair_lock_corruption_abort(os_ulock_value_t current) | |
454 | { | |
455 | __LIBPLATFORM_CLIENT_CRASH__(current, "os_unfair_lock is corrupt"); | |
456 | } | |
457 | ||
458 | ||
459 | OS_NOINLINE | |
460 | static void | |
461 | _os_unfair_lock_lock_slow(_os_unfair_lock_t l, os_lock_owner_t self, | |
462 | os_unfair_lock_options_t options) | |
463 | { | |
464 | os_unfair_lock_options_t allow_anonymous_owner = | |
465 | options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER; | |
466 | options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER; | |
467 | if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) { | |
468 | __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options"); | |
469 | } | |
470 | os_ulock_value_t current, new, waiters_mask = 0; | |
471 | while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) != | |
472 | OS_LOCK_NO_OWNER)) { | |
473 | _retry: | |
474 | if (unlikely(OS_ULOCK_IS_OWNER(current, self, allow_anonymous_owner))) { | |
475 | return _os_unfair_lock_recursive_abort(self); | |
476 | } | |
477 | new = current & ~OS_ULOCK_NOWAITERS_BIT; | |
478 | if (current != new) { | |
479 | // Clear nowaiters bit in lock value before waiting | |
480 | if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, ¤t, | |
481 | relaxed)){ | |
482 | continue; | |
483 | } | |
484 | current = new; | |
485 | } | |
486 | int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options, | |
487 | l, current, 0); | |
488 | if (unlikely(ret < 0)) { | |
489 | switch (-ret) { | |
490 | case EINTR: | |
491 | case EFAULT: | |
492 | continue; | |
493 | case EOWNERDEAD: | |
494 | _os_unfair_lock_corruption_abort(current); | |
495 | break; | |
496 | default: | |
497 | __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure"); | |
498 | } | |
499 | } | |
500 | if (ret > 0) { | |
501 | // If there are more waiters, unset nowaiters bit when acquiring lock | |
502 | waiters_mask = OS_ULOCK_NOWAITERS_BIT; | |
503 | } | |
504 | } | |
505 | new = self & ~waiters_mask; | |
506 | bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new, | |
507 | ¤t, acquire); | |
508 | if (unlikely(!r)) goto _retry; | |
509 | } | |
510 | ||
511 | OS_NOINLINE | |
512 | static void | |
513 | _os_unfair_lock_unlock_slow(_os_unfair_lock_t l, os_ulock_value_t current, | |
514 | os_lock_owner_t self, os_unfair_lock_options_t options) | |
515 | { | |
516 | os_unfair_lock_options_t allow_anonymous_owner = | |
517 | options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER; | |
518 | options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER; | |
519 | if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, allow_anonymous_owner))) { | |
520 | return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current)); | |
521 | } | |
522 | if (current & OS_ULOCK_NOWAITERS_BIT) { | |
523 | __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters"); | |
524 | } | |
525 | for (;;) { | |
526 | int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0); | |
527 | if (unlikely(ret < 0)) { | |
528 | switch (-ret) { | |
529 | case EINTR: | |
530 | continue; | |
531 | case ENOENT: | |
532 | break; | |
533 | default: | |
534 | __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure"); | |
535 | } | |
536 | } | |
537 | break; | |
538 | } | |
539 | } | |
540 | ||
541 | void | |
542 | os_unfair_lock_lock(os_unfair_lock_t lock) | |
543 | { | |
544 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
545 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
546 | bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire); | |
547 | if (likely(r)) return; | |
548 | return _os_unfair_lock_lock_slow(l, self, OS_UNFAIR_LOCK_NONE); | |
549 | } | |
550 | ||
551 | void | |
552 | os_unfair_lock_lock_with_options(os_unfair_lock_t lock, | |
553 | os_unfair_lock_options_t options) | |
554 | { | |
555 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
556 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
557 | bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire); | |
558 | if (likely(r)) return; | |
559 | return _os_unfair_lock_lock_slow(l, self, options); | |
560 | } | |
561 | ||
562 | bool | |
563 | os_unfair_lock_trylock(os_unfair_lock_t lock) | |
564 | { | |
565 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
566 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
567 | bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire); | |
568 | return r; | |
569 | } | |
570 | ||
571 | void | |
572 | os_unfair_lock_unlock(os_unfair_lock_t lock) | |
573 | { | |
574 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
575 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
576 | os_ulock_value_t current; | |
577 | current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release); | |
578 | if (likely(current == self)) return; | |
579 | return _os_unfair_lock_unlock_slow(l, current, self, 0); | |
580 | } | |
581 | ||
582 | void | |
583 | os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock) | |
584 | { | |
585 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
586 | os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER; | |
587 | bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire); | |
588 | if (likely(r)) return; | |
589 | return _os_unfair_lock_lock_slow(l, self, | |
590 | OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION| | |
591 | OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER); | |
592 | } | |
593 | ||
594 | void | |
595 | os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock) | |
596 | { | |
597 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
598 | os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER; | |
599 | os_ulock_value_t current; | |
600 | current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release); | |
601 | if (likely(current == self)) return; | |
602 | return _os_unfair_lock_unlock_slow(l, current, self, | |
603 | OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER); | |
604 | } | |
605 | ||
606 | ||
607 | void | |
608 | os_unfair_lock_assert_owner(os_unfair_lock_t lock) | |
609 | { | |
610 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
611 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
612 | os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed); | |
613 | if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) { | |
614 | __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: " | |
615 | "Lock unexpectedly not owned by current thread"); | |
616 | } | |
617 | } | |
618 | ||
619 | void | |
620 | os_unfair_lock_assert_not_owner(os_unfair_lock_t lock) | |
621 | { | |
622 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
623 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
624 | os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed); | |
625 | if (unlikely(OS_ULOCK_IS_OWNER(current, self, 0))) { | |
626 | __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: " | |
627 | "Lock unexpectedly owned by current thread"); | |
628 | } | |
629 | } | |
630 | ||
631 | ||
632 | #pragma mark - | |
633 | #pragma mark os_unfair_recursive_lock | |
634 | ||
635 | OS_ATOMIC_EXPORT | |
636 | void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock, | |
637 | os_unfair_lock_options_t options); | |
638 | ||
639 | OS_ATOMIC_EXPORT | |
640 | bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock); | |
641 | ||
642 | OS_ATOMIC_EXPORT | |
643 | void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock); | |
644 | ||
645 | OS_ATOMIC_EXPORT | |
646 | bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock); | |
647 | ||
648 | ||
649 | static inline os_lock_owner_t | |
650 | _os_unfair_lock_owner(os_unfair_lock_t lock) | |
651 | { | |
652 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
653 | return OS_ULOCK_OWNER(os_atomic_load(&l->oul_value, relaxed)); | |
654 | } | |
655 | ||
656 | void | |
657 | os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock, | |
658 | os_unfair_lock_options_t options) | |
659 | { | |
660 | os_lock_owner_t cur, self = _os_lock_owner_get_self(); | |
661 | _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock; | |
662 | ||
663 | if (likely(os_atomic_cmpxchgv2o(l, oul_value, | |
664 | OS_LOCK_NO_OWNER, self, &cur, acquire))) { | |
665 | return; | |
666 | } | |
667 | ||
668 | if (OS_ULOCK_OWNER(cur) == self) { | |
669 | lock->ourl_count++; | |
670 | return; | |
671 | } | |
672 | ||
673 | return _os_unfair_lock_lock_slow(l, self, options); | |
674 | } | |
675 | ||
676 | bool | |
677 | os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock) | |
678 | { | |
679 | os_lock_owner_t cur, self = _os_lock_owner_get_self(); | |
680 | _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock; | |
681 | ||
682 | if (likely(os_atomic_cmpxchgv2o(l, oul_value, | |
683 | OS_LOCK_NO_OWNER, self, &cur, acquire))) { | |
684 | return true; | |
685 | } | |
686 | ||
687 | if (likely(OS_ULOCK_OWNER(cur) == self)) { | |
688 | lock->ourl_count++; | |
689 | return true; | |
690 | } | |
691 | ||
692 | return false; | |
693 | } | |
694 | ||
695 | ||
696 | OS_ALWAYS_INLINE | |
697 | static inline void | |
698 | _os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock, | |
699 | os_lock_owner_t self) | |
700 | { | |
701 | if (unlikely(lock->ourl_count)) { | |
702 | os_lock_owner_t cur = _os_unfair_lock_owner(&lock->ourl_lock); | |
703 | if (unlikely(cur != self)) { | |
704 | _os_unfair_lock_unowned_abort(cur); | |
705 | } | |
706 | lock->ourl_count--; | |
707 | return; | |
708 | } | |
709 | ||
710 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
711 | os_ulock_value_t current; | |
712 | current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release); | |
713 | if (likely(current == self)) return; | |
714 | return _os_unfair_lock_unlock_slow(l, current, self, 0); | |
715 | } | |
716 | ||
717 | void | |
718 | os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock) | |
719 | { | |
720 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
721 | _os_unfair_recursive_lock_unlock(lock, self); | |
722 | } | |
723 | ||
724 | bool | |
725 | os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock) | |
726 | { | |
727 | os_lock_owner_t cur = _os_unfair_lock_owner(&lock->ourl_lock); | |
728 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
729 | if (likely(cur == self)) { | |
730 | _os_unfair_recursive_lock_unlock(lock, self); | |
731 | return true; | |
732 | } | |
733 | return false; | |
734 | } | |
735 | ||
736 | ||
737 | #pragma mark - | |
738 | #pragma mark _os_lock_unfair_t | |
739 | ||
740 | OS_LOCK_STRUCT_DECL_INTERNAL(unfair, | |
741 | os_unfair_lock osl_unfair_lock; | |
742 | ); | |
743 | OS_LOCK_METHODS_DECL(unfair); | |
744 | OS_LOCK_TYPE_INSTANCE(unfair); | |
745 | ||
746 | void | |
747 | _os_lock_unfair_lock(_os_lock_unfair_t l) | |
748 | { | |
749 | return os_unfair_lock_lock(&l->osl_unfair_lock); | |
750 | } | |
751 | ||
752 | bool | |
753 | _os_lock_unfair_trylock(_os_lock_unfair_t l) | |
754 | { | |
755 | return os_unfair_lock_trylock(&l->osl_unfair_lock); | |
756 | } | |
757 | ||
758 | void | |
759 | _os_lock_unfair_unlock(_os_lock_unfair_t l) | |
760 | { | |
761 | return os_unfair_lock_unlock(&l->osl_unfair_lock); | |
762 | } | |
763 | ||
764 | ||
765 | #pragma mark - | |
766 | #pragma mark _os_nospin_lock | |
767 | ||
768 | typedef struct _os_nospin_lock_s { | |
769 | os_ulock_value_t oul_value; | |
770 | } _os_nospin_lock, *_os_nospin_lock_t; | |
771 | ||
772 | _Static_assert(sizeof(OSSpinLock) == | |
773 | sizeof(struct _os_nospin_lock_s), "os_nospin_lock size mismatch"); | |
774 | ||
775 | OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock); | |
776 | OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock); | |
777 | OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock); | |
778 | ||
779 | ||
780 | OS_NOINLINE | |
781 | static void | |
782 | _os_nospin_lock_lock_slow(_os_nospin_lock_t l) | |
783 | { | |
784 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
785 | os_ulock_value_t current, new, waiters_mask = 0; | |
786 | uint32_t timeout = 1; | |
787 | while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) != | |
788 | OS_LOCK_NO_OWNER)) { | |
789 | _retry: | |
790 | new = current & ~OS_ULOCK_NOWAITERS_BIT; | |
791 | // For safer compatibility with OSSpinLock where _OSSpinLockLocked may | |
792 | // be 1, check that new didn't become 0 (unlocked) by clearing this bit | |
793 | if (current != new && new) { | |
794 | // Clear nowaiters bit in lock value before waiting | |
795 | if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, ¤t, | |
796 | relaxed)){ | |
797 | continue; | |
798 | } | |
799 | current = new; | |
800 | } | |
801 | int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, current, | |
802 | timeout * 1000); | |
803 | if (unlikely(ret < 0)) { | |
804 | switch (-ret) { | |
805 | case ETIMEDOUT: | |
806 | timeout++; | |
807 | continue; | |
808 | case EINTR: | |
809 | case EFAULT: | |
810 | continue; | |
811 | default: | |
812 | __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure"); | |
813 | } | |
814 | } | |
815 | if (ret > 0) { | |
816 | // If there are more waiters, unset nowaiters bit when acquiring lock | |
817 | waiters_mask = OS_ULOCK_NOWAITERS_BIT; | |
818 | } | |
819 | } | |
820 | new = self & ~waiters_mask; | |
821 | bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new, | |
822 | ¤t, acquire); | |
823 | if (unlikely(!r)) goto _retry; | |
824 | } | |
825 | ||
826 | OS_NOINLINE | |
827 | static void | |
828 | _os_nospin_lock_unlock_slow(_os_nospin_lock_t l, os_ulock_value_t current) | |
829 | { | |
830 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
831 | if (unlikely(OS_ULOCK_OWNER(current) != self)) { | |
832 | return; // no unowned_abort for drop-in compatibility with OSSpinLock | |
833 | } | |
834 | if (current & OS_ULOCK_NOWAITERS_BIT) { | |
835 | __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters"); | |
836 | } | |
837 | for (;;) { | |
838 | int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, 0); | |
839 | if (unlikely(ret < 0)) { | |
840 | switch (-ret) { | |
841 | case EINTR: | |
842 | continue; | |
843 | case ENOENT: | |
844 | break; | |
845 | default: | |
846 | __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure"); | |
847 | } | |
848 | } | |
849 | break; | |
850 | } | |
851 | } | |
852 | ||
853 | void | |
854 | _os_nospin_lock_lock(_os_nospin_lock_t l) | |
855 | { | |
856 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
857 | bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire); | |
858 | if (likely(r)) return; | |
859 | return _os_nospin_lock_lock_slow(l); | |
860 | } | |
861 | ||
862 | bool | |
863 | _os_nospin_lock_trylock(_os_nospin_lock_t l) | |
864 | { | |
865 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
866 | bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire); | |
867 | return r; | |
868 | } | |
869 | ||
870 | void | |
871 | _os_nospin_lock_unlock(_os_nospin_lock_t l) | |
872 | { | |
873 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
874 | os_ulock_value_t current; | |
875 | current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release); | |
876 | if (likely(current == self)) return; | |
877 | return _os_nospin_lock_unlock_slow(l, current); | |
878 | } | |
879 | ||
880 | ||
881 | #pragma mark - | |
882 | #pragma mark _os_lock_nospin_t | |
883 | ||
884 | OS_LOCK_STRUCT_DECL_INTERNAL(nospin, | |
885 | _os_nospin_lock osl_nospin_lock; | |
886 | ); | |
887 | OS_LOCK_METHODS_DECL(nospin); | |
888 | OS_LOCK_TYPE_INSTANCE(nospin); | |
889 | ||
890 | void | |
891 | _os_lock_nospin_lock(_os_lock_nospin_t l) | |
892 | { | |
893 | return _os_nospin_lock_lock(&l->osl_nospin_lock); | |
894 | } | |
895 | ||
896 | bool | |
897 | _os_lock_nospin_trylock(_os_lock_nospin_t l) | |
898 | { | |
899 | return _os_nospin_lock_trylock(&l->osl_nospin_lock); | |
900 | } | |
901 | ||
902 | void | |
903 | _os_lock_nospin_unlock(_os_lock_nospin_t l) | |
904 | { | |
905 | return _os_nospin_lock_unlock(&l->osl_nospin_lock); | |
906 | } | |
907 | ||
908 | ||
909 | #pragma mark - | |
910 | #pragma mark os_once_t | |
911 | ||
912 | typedef struct os_once_gate_s { | |
913 | union { | |
914 | os_ulock_value_t ogo_lock; | |
915 | uintptr_t ogo_once; | |
916 | }; | |
917 | } os_once_gate_s, *os_once_gate_t; | |
918 | ||
919 | #define OS_ONCE_INIT ((uintptr_t)0l) | |
920 | #define OS_ONCE_DONE (~(uintptr_t)0l) | |
921 | ||
922 | #if defined(__i386__) || defined(__x86_64__) | |
923 | #define OS_ONCE_USE_QUIESCENT_COUNTER 0 | |
924 | #else | |
925 | #define OS_ONCE_USE_QUIESCENT_COUNTER 1 | |
926 | #endif | |
927 | ||
928 | OS_ATOMIC_EXPORT void _os_once(os_once_t *val, void *ctxt, os_function_t func); | |
929 | OS_ATOMIC_EXPORT void __os_once_reset(os_once_t *val); | |
930 | ||
931 | OS_NOINLINE OS_NORETURN OS_COLD | |
932 | void _os_once_gate_recursive_abort(os_lock_owner_t owner); | |
933 | OS_NOINLINE OS_NORETURN OS_COLD | |
934 | void _os_once_gate_unowned_abort(os_lock_owner_t owner); | |
935 | OS_NOINLINE OS_NORETURN OS_COLD | |
936 | void _os_once_gate_corruption_abort(os_ulock_value_t current); | |
937 | ||
938 | ||
939 | OS_NOINLINE OS_NORETURN OS_COLD | |
940 | void | |
941 | _os_once_gate_recursive_abort(os_lock_owner_t owner) | |
942 | { | |
943 | __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an " | |
944 | "os_once_t"); | |
945 | } | |
946 | ||
947 | OS_NOINLINE OS_NORETURN OS_COLD | |
948 | void | |
949 | _os_once_gate_unowned_abort(os_lock_owner_t owner) | |
950 | { | |
951 | __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_once_t not " | |
952 | "owned by current thread"); | |
953 | } | |
954 | ||
955 | OS_NOINLINE OS_NORETURN OS_COLD | |
956 | void | |
957 | _os_once_gate_corruption_abort(os_ulock_value_t current) | |
958 | { | |
959 | __LIBPLATFORM_CLIENT_CRASH__(current, "os_once_t is corrupt"); | |
960 | } | |
961 | ||
962 | ||
963 | #if OS_ONCE_USE_QUIESCENT_COUNTER | |
964 | #define OS_ONCE_MAKE_GEN(gen) (((gen) << 2) + OS_ULOCK_NOWAITERS_BIT) | |
965 | #define OS_ONCE_IS_GEN(gen) (((gen) & 3) == OS_ULOCK_NOWAITERS_BIT) | |
966 | ||
967 | // the _COMM_PAGE_CPU_QUIESCENT_COUNTER value is incremented every time | |
968 | // all CPUs have performed a context switch. | |
969 | // | |
970 | // To make sure all CPUs context switched at least once since `gen`, | |
971 | // we need to observe 4 increments, see libdispatch/src/shims/lock.h | |
972 | #define OS_ONCE_GEN_SAFE_DELTA (4 << 2) | |
973 | ||
974 | OS_ALWAYS_INLINE | |
975 | static inline uintptr_t | |
976 | _os_once_generation(void) | |
977 | { | |
978 | uintptr_t value = *(volatile uintptr_t *)_COMM_PAGE_CPU_QUIESCENT_COUNTER; | |
979 | return OS_ONCE_MAKE_GEN(value); | |
980 | } | |
981 | ||
982 | OS_ALWAYS_INLINE | |
983 | static inline uintptr_t | |
984 | _os_once_mark_quiescing(os_once_gate_t og) | |
985 | { | |
986 | return os_atomic_xchg(&og->ogo_once, _os_once_generation(), release); | |
987 | } | |
988 | ||
989 | OS_ALWAYS_INLINE | |
990 | static void | |
991 | _os_once_mark_done_if_quiesced(os_once_gate_t og, uintptr_t gen) | |
992 | { | |
993 | if (_os_once_generation() - gen >= OS_ONCE_GEN_SAFE_DELTA) { | |
994 | os_atomic_store(&og->ogo_once, OS_ONCE_DONE, relaxed); | |
995 | } | |
996 | } | |
997 | #else | |
998 | OS_ALWAYS_INLINE | |
999 | static inline uintptr_t | |
1000 | _os_once_mark_done(os_once_gate_t og) | |
1001 | { | |
1002 | return os_atomic_xchg(&og->ogo_once, OS_ONCE_DONE, release); | |
1003 | } | |
1004 | #endif | |
1005 | ||
1006 | OS_NOINLINE | |
1007 | static void | |
1008 | _os_once_gate_broadcast(os_once_gate_t og, os_ulock_value_t current, | |
1009 | os_lock_owner_t self) | |
1010 | { | |
1011 | if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) { | |
1012 | return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current)); | |
1013 | } | |
1014 | if (current & OS_ULOCK_NOWAITERS_BIT) { | |
1015 | __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters"); | |
1016 | } | |
1017 | for (;;) { | |
1018 | int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | ULF_WAKE_ALL, | |
1019 | &og->ogo_lock, 0); | |
1020 | if (unlikely(ret < 0)) { | |
1021 | switch (-ret) { | |
1022 | case EINTR: | |
1023 | continue; | |
1024 | case ENOENT: | |
1025 | break; | |
1026 | default: | |
1027 | __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure"); | |
1028 | } | |
1029 | } | |
1030 | break; | |
1031 | } | |
1032 | } | |
1033 | ||
1034 | OS_NOINLINE | |
1035 | static void | |
1036 | _os_once_callout(os_once_gate_t og, void *ctxt, os_function_t func, | |
1037 | os_lock_owner_t self) | |
1038 | { | |
1039 | uintptr_t v; | |
1040 | ||
1041 | func(ctxt); | |
1042 | ||
1043 | #if OS_ONCE_USE_QUIESCENT_COUNTER | |
1044 | v = _os_once_mark_quiescing(og); | |
1045 | #else | |
1046 | v = _os_once_mark_done(og); | |
1047 | #endif | |
1048 | if (likely((os_ulock_value_t)v == self)) return; | |
1049 | _os_once_gate_broadcast(og, (os_ulock_value_t)v, self); | |
1050 | } | |
1051 | ||
1052 | OS_NOINLINE | |
1053 | static void | |
1054 | _os_once_gate_wait(os_once_gate_t og, void *ctxt, os_function_t func, | |
1055 | os_lock_owner_t self) | |
1056 | { | |
1057 | uintptr_t old, new; | |
1058 | ||
1059 | for (;;) { | |
1060 | os_atomic_rmw_loop(&og->ogo_once, old, new, relaxed, { | |
1061 | if (old == OS_ONCE_DONE) { | |
1062 | os_atomic_rmw_loop_give_up(return); | |
1063 | #if OS_ONCE_USE_QUIESCENT_COUNTER | |
1064 | } else if (OS_ONCE_IS_GEN(old)) { | |
1065 | os_atomic_rmw_loop_give_up({ | |
1066 | os_atomic_thread_fence(acquire); | |
1067 | return _os_once_mark_done_if_quiesced(og, old); | |
1068 | }); | |
1069 | #endif | |
1070 | } else if (old == OS_ONCE_INIT) { | |
1071 | // __os_once_reset was used, try to become the new initializer | |
1072 | new = (uintptr_t)self; | |
1073 | } else { | |
1074 | new = old & ~(uintptr_t)OS_ULOCK_NOWAITERS_BIT; | |
1075 | if (new == old) os_atomic_rmw_loop_give_up(break); | |
1076 | } | |
1077 | }); | |
1078 | if (old == OS_ONCE_INIT) { | |
1079 | // see comment in _os_once, pairs with the release barrier | |
1080 | // in __os_once_reset() | |
1081 | os_atomic_thread_fence(acquire); | |
1082 | return _os_once_callout(og, ctxt, func, self); | |
1083 | } | |
1084 | if (unlikely(OS_ULOCK_IS_OWNER((os_lock_owner_t)old, self, 0))) { | |
1085 | return _os_once_gate_recursive_abort(self); | |
1086 | } | |
1087 | int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO, | |
1088 | &og->ogo_lock, (os_ulock_value_t)new, 0); | |
1089 | if (unlikely(ret < 0)) { | |
1090 | switch (-ret) { | |
1091 | case EINTR: | |
1092 | case EFAULT: | |
1093 | continue; | |
1094 | case EOWNERDEAD: | |
1095 | _os_once_gate_corruption_abort((os_lock_owner_t)old); | |
1096 | break; | |
1097 | default: | |
1098 | __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure"); | |
1099 | } | |
1100 | } | |
1101 | } | |
1102 | } | |
1103 | ||
1104 | // Atomically resets the once value to zero and then signals all | |
1105 | // pending waiters to return from their __ulock_wait() | |
1106 | void | |
1107 | __os_once_reset(os_once_t *val) | |
1108 | { | |
1109 | os_once_gate_t og = (os_once_gate_t)val; | |
1110 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
1111 | uintptr_t v; | |
1112 | ||
1113 | v = os_atomic_xchg(&og->ogo_once, OS_ONCE_INIT, release); | |
1114 | if (likely((os_ulock_value_t)v == self)) return; | |
1115 | return _os_once_gate_broadcast(og, (os_ulock_value_t)v, self); | |
1116 | } | |
1117 | ||
1118 | void | |
1119 | _os_once(os_once_t *val, void *ctxt, os_function_t func) | |
1120 | { | |
1121 | os_once_gate_t og = (os_once_gate_t)val; | |
1122 | os_lock_owner_t self; | |
1123 | uintptr_t v; | |
1124 | ||
1125 | #if OS_ONCE_USE_QUIESCENT_COUNTER | |
1126 | v = os_atomic_load(&og->ogo_once, acquire); | |
1127 | if (likely(OS_ONCE_IS_GEN(v))) { | |
1128 | return _os_once_mark_done_if_quiesced(og, v); | |
1129 | } | |
1130 | #endif | |
1131 | ||
1132 | self = _os_lock_owner_get_self(); | |
1133 | v = (uintptr_t)self; | |
1134 | ||
1135 | // The acquire barrier pairs with the release in __os_once_reset() | |
1136 | // for cases when a previous initializer failed. | |
1137 | if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, acquire))) { | |
1138 | return _os_once_callout(og, ctxt, func, self); | |
1139 | } | |
1140 | return _os_once_gate_wait(og, ctxt, func, self); | |
1141 | } | |
1142 | ||
1143 | ||
1144 | #pragma mark - | |
1145 | #pragma mark os_lock_eliding_t | |
1146 | ||
1147 | #if !TARGET_OS_IPHONE | |
1148 | ||
1149 | #define _os_lock_eliding_t _os_lock_spin_t | |
1150 | #define _os_lock_eliding_lock _os_lock_spin_lock | |
1151 | #define _os_lock_eliding_trylock _os_lock_spin_trylock | |
1152 | #define _os_lock_eliding_unlock _os_lock_spin_unlock | |
1153 | OS_LOCK_METHODS_DECL(eliding); | |
1154 | OS_LOCK_TYPE_INSTANCE(eliding); | |
1155 | ||
1156 | #pragma mark - | |
1157 | #pragma mark os_lock_transactional_t | |
1158 | ||
1159 | OS_LOCK_STRUCT_DECL_INTERNAL(transactional, | |
1160 | uintptr_t volatile osl_lock; | |
1161 | ); | |
1162 | ||
1163 | #define _os_lock_transactional_t _os_lock_eliding_t | |
1164 | #define _os_lock_transactional_lock _os_lock_eliding_lock | |
1165 | #define _os_lock_transactional_trylock _os_lock_eliding_trylock | |
1166 | #define _os_lock_transactional_unlock _os_lock_eliding_unlock | |
1167 | OS_LOCK_METHODS_DECL(transactional); | |
1168 | OS_LOCK_TYPE_INSTANCE(transactional); | |
1169 | ||
1170 | #endif // !TARGET_OS_IPHONE |