]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_APACHE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
7 | * you may not use this file except in compliance with the License. | |
8 | * You may obtain a copy of the License at | |
9 | * | |
10 | * http://www.apache.org/licenses/LICENSE-2.0 | |
11 | * | |
12 | * Unless required by applicable law or agreed to in writing, software | |
13 | * distributed under the License is distributed on an "AS IS" BASIS, | |
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
15 | * See the License for the specific language governing permissions and | |
16 | * limitations under the License. | |
17 | * | |
18 | * @APPLE_APACHE_LICENSE_HEADER_END@ | |
19 | */ | |
20 | ||
21 | #define OS_UNFAIR_LOCK_INLINE 1 | |
22 | #include "lock_internal.h" | |
23 | #include "os/internal.h" | |
24 | #include "resolver.h" | |
25 | #include "libkern/OSAtomic.h" | |
26 | #include "os/lock.h" | |
27 | #include "os/lock_private.h" | |
28 | #include "os/once_private.h" | |
29 | ||
30 | #include <mach/mach_init.h> | |
31 | #include <mach/mach_traps.h> | |
32 | #include <mach/thread_switch.h> | |
33 | #include <mach/mach_time.h> | |
34 | #include <os/tsd.h> | |
35 | ||
36 | #pragma mark - | |
37 | #pragma mark _os_lock_base_t | |
38 | ||
39 | OS_NOINLINE OS_NORETURN OS_COLD | |
40 | void _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value); | |
41 | ||
42 | ||
43 | OS_LOCK_STRUCT_DECL_INTERNAL(base); | |
44 | OS_USED static OS_LOCK_TYPE_STRUCT_DECL(base); | |
45 | ||
46 | void | |
47 | os_lock_lock(os_lock_t l) | |
48 | { | |
49 | return l._osl_base->osl_type->osl_lock(l); | |
50 | } | |
51 | ||
52 | bool | |
53 | os_lock_trylock(os_lock_t l) | |
54 | { | |
55 | return l._osl_base->osl_type->osl_trylock(l); | |
56 | } | |
57 | ||
58 | void | |
59 | os_lock_unlock(os_lock_t l) | |
60 | { | |
61 | return l._osl_base->osl_type->osl_unlock(l); | |
62 | } | |
63 | ||
64 | OS_NOINLINE OS_NORETURN OS_COLD | |
65 | void | |
66 | _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value) | |
67 | { | |
68 | __LIBPLATFORM_CLIENT_CRASH__(lock_value, "os_lock is corrupt"); | |
69 | } | |
70 | ||
71 | ||
72 | #pragma mark - | |
73 | #pragma mark OSSpinLock | |
74 | ||
75 | OS_ATOMIC_EXPORT OS_NOINLINE void _OSSpinLockLockSlow(volatile OSSpinLock *l); | |
76 | OS_ATOMIC_EXPORT void OSSpinLockLock(volatile OSSpinLock *l); | |
77 | OS_ATOMIC_EXPORT bool OSSpinLockTry(volatile OSSpinLock *l); | |
78 | OS_ATOMIC_EXPORT int spin_lock_try(volatile OSSpinLock *l); | |
79 | OS_ATOMIC_EXPORT void OSSpinLockUnlock(volatile OSSpinLock *l); | |
80 | ||
81 | #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR | |
82 | static const OSSpinLock _OSSpinLockLocked = 1; | |
83 | #else | |
84 | static const OSSpinLock _OSSpinLockLocked = -1; | |
85 | #endif | |
86 | ||
87 | #if OS_ATOMIC_UP | |
88 | // Don't spin on UP | |
89 | #elif defined(__arm__) || defined(__arm64__) | |
90 | #define OS_LOCK_SPIN_SPIN_TRIES 100 | |
91 | #define OS_LOCK_SPIN_PAUSE() os_hardware_wfe() | |
92 | #else | |
93 | #define OS_LOCK_SPIN_SPIN_TRIES 1000 | |
94 | #define OS_LOCK_SPIN_PAUSE() os_hardware_pause() | |
95 | #endif | |
96 | ||
97 | OS_ALWAYS_INLINE | |
98 | static uint64_t | |
99 | _os_lock_yield_deadline(mach_msg_timeout_t timeout) | |
100 | { | |
101 | uint64_t abstime = timeout; | |
102 | #if defined(__arm__) | |
103 | // some armv7 targets do not have div, like the armv7 arch | |
104 | // so hardcode the most typical clock resolution it has | |
105 | // as we don't really need accuracy here anyway | |
106 | abstime *= NSEC_PER_MSEC * 128 / 3; | |
107 | #elif defined(__i386__) || defined(__x86_64__) | |
108 | // abstime is in nanoseconds | |
109 | #else | |
110 | mach_timebase_info_data_t tbi; | |
111 | kern_return_t kr = mach_timebase_info(&tbi); | |
112 | if (kr) return UINT64_MAX; | |
113 | abstime *= (NSEC_PER_MSEC * tbi.denom / tbi.numer); | |
114 | #endif | |
115 | return mach_absolute_time() + abstime; | |
116 | } | |
117 | ||
118 | OS_ALWAYS_INLINE | |
119 | static bool | |
120 | _os_lock_yield_until(uint64_t deadline) | |
121 | { | |
122 | return mach_absolute_time() < deadline; | |
123 | } | |
124 | ||
125 | OS_NOINLINE | |
126 | static void | |
127 | _OSSpinLockLockYield(volatile OSSpinLock *l) | |
128 | { | |
129 | int option = SWITCH_OPTION_DEPRESS; | |
130 | mach_msg_timeout_t timeout = 1; | |
131 | uint64_t deadline = _os_lock_yield_deadline(timeout); | |
132 | OSSpinLock lock; | |
133 | while (unlikely(lock = *l)) { | |
134 | _yield: | |
135 | if (unlikely(lock != _OSSpinLockLocked)) { | |
136 | _os_lock_corruption_abort((void *)l, (uintptr_t)lock); | |
137 | } | |
138 | thread_switch(MACH_PORT_NULL, option, timeout); | |
139 | if (option == SWITCH_OPTION_WAIT) { | |
140 | timeout++; | |
141 | } else if (!_os_lock_yield_until(deadline)) { | |
142 | option = SWITCH_OPTION_WAIT; | |
143 | } | |
144 | } | |
145 | bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire); | |
146 | if (likely(r)) return; | |
147 | goto _yield; | |
148 | } | |
149 | ||
150 | #if OS_ATOMIC_UP | |
151 | void | |
152 | _OSSpinLockLockSlow(volatile OSSpinLock *l) | |
153 | { | |
154 | return _OSSpinLockLockYield(l); // Don't spin on UP | |
155 | } | |
156 | #elif defined(__arm64__) | |
157 | // Exclusive monitor must be held during WFE <rdar://problem/22300054> | |
158 | #if defined(__ARM_ARCH_8_2__) | |
159 | void | |
160 | _OSSpinLockLockSlow(volatile OSSpinLock *l) | |
161 | { | |
162 | uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES; | |
163 | OSSpinLock lock; | |
164 | _spin: | |
165 | while (unlikely(lock = os_atomic_load_exclusive(l, relaxed))) { | |
166 | if (unlikely(lock != _OSSpinLockLocked)) { | |
167 | os_atomic_clear_exclusive(); | |
168 | return _os_lock_corruption_abort((void *)l, (uintptr_t)lock); | |
169 | } | |
170 | if (unlikely(!tries--)) { | |
171 | os_atomic_clear_exclusive(); | |
172 | return _OSSpinLockLockYield(l); | |
173 | } | |
174 | OS_LOCK_SPIN_PAUSE(); | |
175 | } | |
176 | os_atomic_clear_exclusive(); | |
177 | bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire); | |
178 | if (likely(r)) return; | |
179 | goto _spin; | |
180 | } | |
181 | #else // !__ARM_ARCH_8_2__ | |
182 | void | |
183 | _OSSpinLockLockSlow(volatile OSSpinLock *l) | |
184 | { | |
185 | uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES; | |
186 | OSSpinLock lock; | |
187 | os_atomic_rmw_loop(l, lock, _OSSpinLockLocked, acquire, if (unlikely(lock)){ | |
188 | if (unlikely(lock != _OSSpinLockLocked)) { | |
189 | os_atomic_rmw_loop_give_up(return | |
190 | _os_lock_corruption_abort((void *)l, (uintptr_t)lock)); | |
191 | } | |
192 | if (unlikely(!tries--)) { | |
193 | os_atomic_rmw_loop_give_up(return _OSSpinLockLockYield(l)); | |
194 | } | |
195 | OS_LOCK_SPIN_PAUSE(); | |
196 | continue; | |
197 | }); | |
198 | } | |
199 | #endif // !__ARM_ARCH_8_2__ | |
200 | #else // !OS_ATOMIC_UP | |
201 | void | |
202 | _OSSpinLockLockSlow(volatile OSSpinLock *l) | |
203 | { | |
204 | uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES; | |
205 | OSSpinLock lock; | |
206 | while (unlikely(lock = *l)) { | |
207 | _spin: | |
208 | if (unlikely(lock != _OSSpinLockLocked)) { | |
209 | return _os_lock_corruption_abort((void *)l, (uintptr_t)lock); | |
210 | } | |
211 | if (unlikely(!tries--)) return _OSSpinLockLockYield(l); | |
212 | OS_LOCK_SPIN_PAUSE(); | |
213 | } | |
214 | bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire); | |
215 | if (likely(r)) return; | |
216 | goto _spin; | |
217 | } | |
218 | #endif // !OS_ATOMIC_UP | |
219 | ||
220 | ||
221 | #if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR | |
222 | ||
223 | typedef struct _os_nospin_lock_s *_os_nospin_lock_t; | |
224 | ||
225 | OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock); | |
226 | OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock); | |
227 | OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock); | |
228 | ||
229 | void | |
230 | OSSpinLockLock(volatile OSSpinLock *l) | |
231 | { | |
232 | OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock); | |
233 | OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock); | |
234 | return _os_nospin_lock_lock((_os_nospin_lock_t)l); | |
235 | } | |
236 | ||
237 | bool | |
238 | OSSpinLockTry(volatile OSSpinLock *l) | |
239 | { | |
240 | return _os_nospin_lock_trylock((_os_nospin_lock_t)l); | |
241 | } | |
242 | ||
243 | int | |
244 | spin_lock_try(volatile OSSpinLock *l) | |
245 | { | |
246 | OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try); | |
247 | return _os_nospin_lock_trylock((_os_nospin_lock_t)l); | |
248 | } | |
249 | ||
250 | void | |
251 | OSSpinLockUnlock(volatile OSSpinLock *l) | |
252 | { | |
253 | OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock); | |
254 | OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock); | |
255 | return _os_nospin_lock_unlock((_os_nospin_lock_t)l); | |
256 | } | |
257 | ||
258 | #undef OS_ATOMIC_ALIAS | |
259 | #define OS_ATOMIC_ALIAS(n, o) | |
260 | static void _OSSpinLockLock(volatile OSSpinLock *l); | |
261 | #undef OSSpinLockLock | |
262 | #define OSSpinLockLock _OSSpinLockLock | |
263 | static bool _OSSpinLockTry(volatile OSSpinLock *l); | |
264 | #undef OSSpinLockTry | |
265 | #define OSSpinLockTry _OSSpinLockTry | |
266 | static __unused int __spin_lock_try(volatile OSSpinLock *l); | |
267 | #undef spin_lock_try | |
268 | #define spin_lock_try __spin_lock_try | |
269 | static void _OSSpinLockUnlock(volatile OSSpinLock *l); | |
270 | #undef OSSpinLockUnlock | |
271 | #define OSSpinLockUnlock _OSSpinLockUnlock | |
272 | ||
273 | #endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK | |
274 | ||
275 | void | |
276 | OSSpinLockLock(volatile OSSpinLock *l) | |
277 | { | |
278 | OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock); | |
279 | OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock); | |
280 | bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire); | |
281 | if (likely(r)) return; | |
282 | return _OSSpinLockLockSlow(l); | |
283 | } | |
284 | ||
285 | bool | |
286 | OSSpinLockTry(volatile OSSpinLock *l) | |
287 | { | |
288 | bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire); | |
289 | return r; | |
290 | } | |
291 | ||
292 | int | |
293 | spin_lock_try(volatile OSSpinLock *l) // <rdar://problem/13316060> | |
294 | { | |
295 | OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try); | |
296 | return OSSpinLockTry(l); | |
297 | } | |
298 | ||
299 | void | |
300 | OSSpinLockUnlock(volatile OSSpinLock *l) | |
301 | { | |
302 | OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock); | |
303 | OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock); | |
304 | os_atomic_store(l, 0, release); | |
305 | } | |
306 | ||
307 | ||
308 | #pragma mark - | |
309 | #pragma mark os_lock_spin_t | |
310 | ||
311 | OS_LOCK_STRUCT_DECL_INTERNAL(spin, | |
312 | OSSpinLock volatile osl_spinlock; | |
313 | ); | |
314 | OS_LOCK_METHODS_DECL(spin); | |
315 | OS_LOCK_TYPE_INSTANCE(spin); | |
316 | ||
317 | void | |
318 | _os_lock_spin_lock(_os_lock_spin_t l) | |
319 | { | |
320 | return OSSpinLockLock(&l->osl_spinlock); | |
321 | } | |
322 | ||
323 | bool | |
324 | _os_lock_spin_trylock(_os_lock_spin_t l) | |
325 | { | |
326 | return OSSpinLockTry(&l->osl_spinlock); | |
327 | } | |
328 | ||
329 | void | |
330 | _os_lock_spin_unlock(_os_lock_spin_t l) | |
331 | { | |
332 | return OSSpinLockUnlock(&l->osl_spinlock); | |
333 | } | |
334 | ||
335 | ||
336 | #pragma mark - | |
337 | #pragma mark os_lock_owner_t | |
338 | ||
339 | #ifndef __TSD_MACH_THREAD_SELF | |
340 | #define __TSD_MACH_THREAD_SELF 3 | |
341 | #endif | |
342 | ||
343 | typedef mach_port_name_t os_lock_owner_t; | |
344 | #define OS_LOCK_NO_OWNER MACH_PORT_NULL | |
345 | ||
346 | ||
347 | OS_ALWAYS_INLINE OS_CONST | |
348 | static inline os_lock_owner_t | |
349 | _os_lock_owner_get_self(void) | |
350 | { | |
351 | os_lock_owner_t self; | |
352 | self = (os_lock_owner_t)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF); | |
353 | return self; | |
354 | } | |
355 | ||
356 | ||
357 | OS_NOINLINE OS_NORETURN OS_COLD | |
358 | static void | |
359 | _os_lock_recursive_abort(os_lock_owner_t owner) | |
360 | { | |
361 | __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an " | |
362 | "os_lock"); | |
363 | } | |
364 | ||
365 | ||
366 | #pragma mark - | |
367 | #pragma mark os_lock_handoff_t | |
368 | ||
369 | OS_LOCK_STRUCT_DECL_INTERNAL(handoff, | |
370 | os_lock_owner_t volatile osl_owner; | |
371 | ); | |
372 | OS_LOCK_METHODS_DECL(handoff); | |
373 | OS_LOCK_TYPE_INSTANCE(handoff); | |
374 | ||
375 | #define OS_LOCK_HANDOFF_YIELD_TRIES 100 | |
376 | ||
377 | OS_NOINLINE | |
378 | static void | |
379 | _os_lock_handoff_lock_slow(_os_lock_handoff_t l) | |
380 | { | |
381 | int option = SWITCH_OPTION_OSLOCK_DEPRESS; | |
382 | mach_msg_timeout_t timeout = 1; | |
383 | uint32_t tries = OS_LOCK_HANDOFF_YIELD_TRIES; | |
384 | os_lock_owner_t self = _os_lock_owner_get_self(), owner; | |
385 | while (unlikely(owner = l->osl_owner)) { | |
386 | _handoff: | |
387 | if (unlikely(owner == self)) return _os_lock_recursive_abort(self); | |
388 | // Yield until tries first hits zero, then permanently switch to wait | |
389 | if (unlikely(!tries--)) option = SWITCH_OPTION_OSLOCK_WAIT; | |
390 | thread_switch(owner, option, timeout); | |
391 | // Redrive the handoff every 1ms until switching to wait | |
392 | if (option == SWITCH_OPTION_OSLOCK_WAIT) timeout++; | |
393 | } | |
394 | bool r = os_atomic_cmpxchgv(&l->osl_owner, MACH_PORT_NULL, self, &owner, | |
395 | acquire); | |
396 | if (likely(r)) return; | |
397 | goto _handoff; | |
398 | } | |
399 | ||
400 | void | |
401 | _os_lock_handoff_lock(_os_lock_handoff_t l) | |
402 | { | |
403 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
404 | bool r = os_atomic_cmpxchg(&l->osl_owner, MACH_PORT_NULL, self, acquire); | |
405 | if (likely(r)) return; | |
406 | return _os_lock_handoff_lock_slow(l); | |
407 | } | |
408 | ||
409 | bool | |
410 | _os_lock_handoff_trylock(_os_lock_handoff_t l) | |
411 | { | |
412 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
413 | bool r = os_atomic_cmpxchg(&l->osl_owner, MACH_PORT_NULL, self, acquire); | |
414 | return r; | |
415 | } | |
416 | ||
417 | void | |
418 | _os_lock_handoff_unlock(_os_lock_handoff_t l) | |
419 | { | |
420 | os_atomic_store(&l->osl_owner, MACH_PORT_NULL, release); | |
421 | } | |
422 | ||
423 | ||
424 | #pragma mark - | |
425 | #pragma mark os_ulock_value_t | |
426 | ||
427 | #include <sys/errno.h> | |
428 | #include <sys/ulock.h> | |
429 | ||
430 | typedef os_lock_owner_t os_ulock_value_t; | |
431 | ||
432 | // This assumes that all thread mach port values always have the low bit set! | |
433 | // Clearing this bit is used to communicate the existence of waiters to unlock. | |
434 | #define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u) | |
435 | #define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT) | |
436 | ||
437 | #define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD | |
438 | #define OS_ULOCK_IS_OWNER(value, self, allow_anonymous_owner) ({ \ | |
439 | os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner == (self)) && \ | |
440 | (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); }) | |
441 | #define OS_ULOCK_IS_NOT_OWNER(value, self, allow_anonymous_owner) ({ \ | |
442 | os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner != (self)) && \ | |
443 | (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); }) | |
444 | ||
445 | #pragma mark - | |
446 | #pragma mark os_unfair_lock | |
447 | ||
448 | typedef struct _os_unfair_lock_s { | |
449 | os_ulock_value_t oul_value; | |
450 | } *_os_unfair_lock_t; | |
451 | ||
452 | _Static_assert(sizeof(struct os_unfair_lock_s) == | |
453 | sizeof(struct _os_unfair_lock_s), "os_unfair_lock size mismatch"); | |
454 | ||
455 | OS_ATOMIC_EXPORT void os_unfair_lock_lock(os_unfair_lock_t lock); | |
456 | OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options(os_unfair_lock_t lock, | |
457 | os_unfair_lock_options_t options); | |
458 | OS_ATOMIC_EXPORT bool os_unfair_lock_trylock(os_unfair_lock_t lock); | |
459 | OS_ATOMIC_EXPORT void os_unfair_lock_unlock(os_unfair_lock_t lock); | |
460 | ||
461 | OS_ATOMIC_EXPORT void os_unfair_lock_lock_no_tsd(os_unfair_lock_t lock, | |
462 | os_unfair_lock_options_t options, mach_port_t mts); | |
463 | OS_ATOMIC_EXPORT void os_unfair_lock_unlock_no_tsd(os_unfair_lock_t lock, | |
464 | mach_port_t mts); | |
465 | ||
466 | OS_NOINLINE OS_NORETURN OS_COLD | |
467 | void _os_unfair_lock_recursive_abort(os_lock_owner_t owner); | |
468 | OS_NOINLINE OS_NORETURN OS_COLD | |
469 | void _os_unfair_lock_unowned_abort(os_lock_owner_t owner); | |
470 | OS_NOINLINE OS_NORETURN OS_COLD | |
471 | void _os_unfair_lock_corruption_abort(os_ulock_value_t current); | |
472 | ||
473 | _Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION == | |
474 | ULF_WAIT_WORKQ_DATA_CONTENTION, | |
475 | "check value for OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION"); | |
476 | _Static_assert(OS_UNFAIR_LOCK_ADAPTIVE_SPIN == | |
477 | ULF_WAIT_ADAPTIVE_SPIN, | |
478 | "check value for OS_UNFAIR_LOCK_ADAPTIVE_SPIN"); | |
479 | #define OS_UNFAIR_LOCK_OPTIONS_MASK \ | |
480 | (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION | \ | |
481 | OS_UNFAIR_LOCK_ADAPTIVE_SPIN) | |
482 | #define OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER 0x01000000u | |
483 | ||
484 | ||
485 | OS_NOINLINE OS_NORETURN OS_COLD | |
486 | void | |
487 | _os_unfair_lock_recursive_abort(os_lock_owner_t owner) | |
488 | { | |
489 | __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an " | |
490 | "os_unfair_lock"); | |
491 | } | |
492 | ||
493 | OS_NOINLINE OS_NORETURN OS_COLD | |
494 | void | |
495 | _os_unfair_lock_unowned_abort(os_lock_owner_t owner) | |
496 | { | |
497 | __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_unfair_lock not " | |
498 | "owned by current thread"); | |
499 | } | |
500 | ||
501 | OS_NOINLINE OS_NORETURN OS_COLD | |
502 | void | |
503 | _os_unfair_lock_corruption_abort(os_ulock_value_t current) | |
504 | { | |
505 | __LIBPLATFORM_CLIENT_CRASH__(current, "os_unfair_lock is corrupt"); | |
506 | } | |
507 | ||
508 | ||
509 | OS_NOINLINE | |
510 | static void | |
511 | _os_unfair_lock_lock_slow(_os_unfair_lock_t l, | |
512 | os_unfair_lock_options_t options, os_lock_owner_t self) | |
513 | { | |
514 | os_unfair_lock_options_t allow_anonymous_owner = | |
515 | options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER; | |
516 | options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER; | |
517 | if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) { | |
518 | __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options"); | |
519 | } | |
520 | os_ulock_value_t current, new, waiters_mask = 0; | |
521 | while (unlikely((current = os_atomic_load(&l->oul_value, relaxed)) != | |
522 | OS_LOCK_NO_OWNER)) { | |
523 | _retry: | |
524 | if (unlikely(OS_ULOCK_IS_OWNER(current, self, allow_anonymous_owner))) { | |
525 | return _os_unfair_lock_recursive_abort(self); | |
526 | } | |
527 | new = current & ~OS_ULOCK_NOWAITERS_BIT; | |
528 | if (current != new) { | |
529 | // Clear nowaiters bit in lock value before waiting | |
530 | if (!os_atomic_cmpxchgv(&l->oul_value, current, new, ¤t, | |
531 | relaxed)){ | |
532 | continue; | |
533 | } | |
534 | current = new; | |
535 | } | |
536 | int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options, | |
537 | l, current, 0); | |
538 | if (unlikely(ret < 0)) { | |
539 | switch (-ret) { | |
540 | case EINTR: | |
541 | case EFAULT: | |
542 | continue; | |
543 | case EOWNERDEAD: | |
544 | _os_unfair_lock_corruption_abort(current); | |
545 | break; | |
546 | default: | |
547 | __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure"); | |
548 | } | |
549 | } | |
550 | if (ret > 0) { | |
551 | // If there are more waiters, unset nowaiters bit when acquiring lock | |
552 | waiters_mask = OS_ULOCK_NOWAITERS_BIT; | |
553 | } | |
554 | } | |
555 | new = self & ~waiters_mask; | |
556 | bool r = os_atomic_cmpxchgv(&l->oul_value, OS_LOCK_NO_OWNER, new, | |
557 | ¤t, acquire); | |
558 | if (unlikely(!r)) goto _retry; | |
559 | } | |
560 | ||
561 | OS_NOINLINE | |
562 | static void | |
563 | _os_unfair_lock_unlock_slow(_os_unfair_lock_t l, os_lock_owner_t self, | |
564 | os_ulock_value_t current, os_unfair_lock_options_t options) | |
565 | { | |
566 | os_unfair_lock_options_t allow_anonymous_owner = | |
567 | options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER; | |
568 | options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER; | |
569 | if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, allow_anonymous_owner))) { | |
570 | return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current)); | |
571 | } | |
572 | if (current & OS_ULOCK_NOWAITERS_BIT) { | |
573 | __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters"); | |
574 | } | |
575 | for (;;) { | |
576 | int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0); | |
577 | if (unlikely(ret < 0)) { | |
578 | switch (-ret) { | |
579 | case EINTR: | |
580 | continue; | |
581 | case ENOENT: | |
582 | break; | |
583 | default: | |
584 | __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure"); | |
585 | } | |
586 | } | |
587 | break; | |
588 | } | |
589 | } | |
590 | ||
591 | void | |
592 | os_unfair_lock_lock(os_unfair_lock_t lock) | |
593 | { | |
594 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
595 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
596 | bool r = os_atomic_cmpxchg(&l->oul_value, OS_LOCK_NO_OWNER, self, acquire); | |
597 | if (likely(r)) return; | |
598 | return _os_unfair_lock_lock_slow(l, OS_UNFAIR_LOCK_NONE, self); | |
599 | } | |
600 | ||
601 | void | |
602 | os_unfair_lock_lock_with_options(os_unfair_lock_t lock, | |
603 | os_unfair_lock_options_t options) | |
604 | { | |
605 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
606 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
607 | bool r = os_atomic_cmpxchg(&l->oul_value, OS_LOCK_NO_OWNER, self, acquire); | |
608 | if (likely(r)) return; | |
609 | return _os_unfair_lock_lock_slow(l, options, self); | |
610 | } | |
611 | ||
612 | bool | |
613 | os_unfair_lock_trylock(os_unfair_lock_t lock) | |
614 | { | |
615 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
616 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
617 | bool r = os_atomic_cmpxchg(&l->oul_value, OS_LOCK_NO_OWNER, self, acquire); | |
618 | return r; | |
619 | } | |
620 | ||
621 | void | |
622 | os_unfair_lock_unlock(os_unfair_lock_t lock) | |
623 | { | |
624 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
625 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
626 | os_ulock_value_t current; | |
627 | current = os_atomic_xchg(&l->oul_value, OS_LOCK_NO_OWNER, release); | |
628 | if (likely(current == self)) return; | |
629 | return _os_unfair_lock_unlock_slow(l, self, current, 0); | |
630 | } | |
631 | ||
632 | void | |
633 | os_unfair_lock_lock_no_tsd(os_unfair_lock_t lock, | |
634 | os_unfair_lock_options_t options, mach_port_t self) | |
635 | { | |
636 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
637 | bool r = os_atomic_cmpxchg(&l->oul_value, OS_LOCK_NO_OWNER, self, acquire); | |
638 | if (likely(r)) return; | |
639 | return _os_unfair_lock_lock_slow(l, options, self); | |
640 | } | |
641 | ||
642 | void | |
643 | os_unfair_lock_unlock_no_tsd(os_unfair_lock_t lock, mach_port_t self) | |
644 | { | |
645 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
646 | os_ulock_value_t current; | |
647 | current = os_atomic_xchg(&l->oul_value, OS_LOCK_NO_OWNER, release); | |
648 | if (likely(current == self)) return; | |
649 | return _os_unfair_lock_unlock_slow(l, self, current, 0); | |
650 | } | |
651 | ||
652 | ||
653 | void | |
654 | os_unfair_lock_assert_owner(os_unfair_lock_t lock) | |
655 | { | |
656 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
657 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
658 | os_ulock_value_t current = os_atomic_load(&l->oul_value, relaxed); | |
659 | if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) { | |
660 | __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: " | |
661 | "Lock unexpectedly not owned by current thread"); | |
662 | } | |
663 | } | |
664 | ||
665 | void | |
666 | os_unfair_lock_assert_not_owner(os_unfair_lock_t lock) | |
667 | { | |
668 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
669 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
670 | os_ulock_value_t current = os_atomic_load(&l->oul_value, relaxed); | |
671 | if (unlikely(OS_ULOCK_IS_OWNER(current, self, 0))) { | |
672 | __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: " | |
673 | "Lock unexpectedly owned by current thread"); | |
674 | } | |
675 | } | |
676 | ||
677 | ||
678 | #pragma mark - | |
679 | #pragma mark os_unfair_recursive_lock | |
680 | ||
681 | OS_ATOMIC_EXPORT | |
682 | void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock, | |
683 | os_unfair_lock_options_t options); | |
684 | ||
685 | OS_ATOMIC_EXPORT | |
686 | bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock); | |
687 | ||
688 | OS_ATOMIC_EXPORT | |
689 | void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock); | |
690 | ||
691 | OS_ATOMIC_EXPORT | |
692 | bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock); | |
693 | ||
694 | ||
695 | static inline os_lock_owner_t | |
696 | _os_unfair_lock_owner(os_unfair_lock_t lock) | |
697 | { | |
698 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
699 | return OS_ULOCK_OWNER(os_atomic_load(&l->oul_value, relaxed)); | |
700 | } | |
701 | ||
702 | ||
703 | bool | |
704 | os_unfair_recursive_lock_owned(os_unfair_recursive_lock_t lock) | |
705 | { | |
706 | return _os_unfair_lock_owner(&lock->ourl_lock) == | |
707 | _os_lock_owner_get_self(); | |
708 | } | |
709 | ||
710 | ||
711 | void | |
712 | os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock, | |
713 | os_unfair_lock_options_t options) | |
714 | { | |
715 | os_lock_owner_t cur, self = _os_lock_owner_get_self(); | |
716 | _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock; | |
717 | ||
718 | if (likely(os_atomic_cmpxchgv(&l->oul_value, | |
719 | OS_LOCK_NO_OWNER, self, &cur, acquire))) { | |
720 | return; | |
721 | } | |
722 | ||
723 | if (OS_ULOCK_OWNER(cur) == self) { | |
724 | lock->ourl_count++; | |
725 | return; | |
726 | } | |
727 | ||
728 | return _os_unfair_lock_lock_slow(l, options, self); | |
729 | } | |
730 | ||
731 | bool | |
732 | os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock) | |
733 | { | |
734 | os_lock_owner_t cur, self = _os_lock_owner_get_self(); | |
735 | _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock; | |
736 | ||
737 | if (likely(os_atomic_cmpxchgv(&l->oul_value, | |
738 | OS_LOCK_NO_OWNER, self, &cur, acquire))) { | |
739 | return true; | |
740 | } | |
741 | ||
742 | if (likely(OS_ULOCK_OWNER(cur) == self)) { | |
743 | lock->ourl_count++; | |
744 | return true; | |
745 | } | |
746 | ||
747 | return false; | |
748 | } | |
749 | ||
750 | ||
751 | OS_ALWAYS_INLINE | |
752 | static inline void | |
753 | _os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock, | |
754 | os_lock_owner_t self) | |
755 | { | |
756 | if (unlikely(lock->ourl_count)) { | |
757 | os_lock_owner_t cur = _os_unfair_lock_owner(&lock->ourl_lock); | |
758 | if (unlikely(cur != self)) { | |
759 | _os_unfair_lock_unowned_abort(cur); | |
760 | } | |
761 | lock->ourl_count--; | |
762 | return; | |
763 | } | |
764 | ||
765 | _os_unfair_lock_t l = (_os_unfair_lock_t)lock; | |
766 | os_ulock_value_t current; | |
767 | current = os_atomic_xchg(&l->oul_value, OS_LOCK_NO_OWNER, release); | |
768 | if (likely(current == self)) return; | |
769 | return _os_unfair_lock_unlock_slow(l, self, current, 0); | |
770 | } | |
771 | ||
772 | void | |
773 | os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock) | |
774 | { | |
775 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
776 | _os_unfair_recursive_lock_unlock(lock, self); | |
777 | } | |
778 | ||
779 | bool | |
780 | os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock) | |
781 | { | |
782 | os_lock_owner_t cur = _os_unfair_lock_owner(&lock->ourl_lock); | |
783 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
784 | if (likely(cur == self)) { | |
785 | _os_unfair_recursive_lock_unlock(lock, self); | |
786 | return true; | |
787 | } | |
788 | return false; | |
789 | } | |
790 | ||
791 | ||
792 | void | |
793 | os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock) | |
794 | { | |
795 | _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock; | |
796 | ||
797 | if (os_atomic_load(&l->oul_value, relaxed) == OS_LOCK_NO_OWNER) { | |
798 | __LIBPLATFORM_CLIENT_CRASH__(0, "Lock was not held"); | |
799 | } | |
800 | if (lock->ourl_count) { | |
801 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
802 | lock->ourl_count--; | |
803 | os_atomic_store(&l->oul_value, self, relaxed); | |
804 | } else { | |
805 | os_atomic_store(&l->oul_value, OS_LOCK_NO_OWNER, relaxed); | |
806 | } | |
807 | } | |
808 | ||
809 | ||
810 | #pragma mark - | |
811 | #pragma mark _os_lock_unfair_t | |
812 | ||
813 | OS_LOCK_STRUCT_DECL_INTERNAL(unfair, | |
814 | os_unfair_lock osl_unfair_lock; | |
815 | ); | |
816 | OS_LOCK_METHODS_DECL(unfair); | |
817 | OS_LOCK_TYPE_INSTANCE(unfair); | |
818 | ||
819 | void | |
820 | _os_lock_unfair_lock(_os_lock_unfair_t l) | |
821 | { | |
822 | return os_unfair_lock_lock(&l->osl_unfair_lock); | |
823 | } | |
824 | ||
825 | bool | |
826 | _os_lock_unfair_trylock(_os_lock_unfair_t l) | |
827 | { | |
828 | return os_unfair_lock_trylock(&l->osl_unfair_lock); | |
829 | } | |
830 | ||
831 | void | |
832 | _os_lock_unfair_unlock(_os_lock_unfair_t l) | |
833 | { | |
834 | return os_unfair_lock_unlock(&l->osl_unfair_lock); | |
835 | } | |
836 | ||
837 | ||
838 | #pragma mark - | |
839 | #pragma mark _os_nospin_lock | |
840 | ||
841 | typedef struct _os_nospin_lock_s { | |
842 | os_ulock_value_t oul_value; | |
843 | } _os_nospin_lock, *_os_nospin_lock_t; | |
844 | ||
845 | _Static_assert(sizeof(OSSpinLock) == | |
846 | sizeof(struct _os_nospin_lock_s), "os_nospin_lock size mismatch"); | |
847 | ||
848 | OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock); | |
849 | OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock); | |
850 | OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock); | |
851 | ||
852 | ||
853 | OS_NOINLINE | |
854 | static void | |
855 | _os_nospin_lock_lock_slow(_os_nospin_lock_t l) | |
856 | { | |
857 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
858 | os_ulock_value_t current, new, waiters_mask = 0; | |
859 | uint32_t timeout = 1; | |
860 | while (unlikely((current = os_atomic_load(&l->oul_value, relaxed)) != | |
861 | OS_LOCK_NO_OWNER)) { | |
862 | _retry: | |
863 | new = current & ~OS_ULOCK_NOWAITERS_BIT; | |
864 | // For safer compatibility with OSSpinLock where _OSSpinLockLocked may | |
865 | // be 1, check that new didn't become 0 (unlocked) by clearing this bit | |
866 | if (current != new && new) { | |
867 | // Clear nowaiters bit in lock value before waiting | |
868 | if (!os_atomic_cmpxchgv(&l->oul_value, current, new, ¤t, | |
869 | relaxed)){ | |
870 | continue; | |
871 | } | |
872 | current = new; | |
873 | } | |
874 | int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, current, | |
875 | timeout * 1000); | |
876 | if (unlikely(ret < 0)) { | |
877 | switch (-ret) { | |
878 | case ETIMEDOUT: | |
879 | timeout++; | |
880 | continue; | |
881 | case EINTR: | |
882 | case EFAULT: | |
883 | continue; | |
884 | default: | |
885 | __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure"); | |
886 | } | |
887 | } | |
888 | if (ret > 0) { | |
889 | // If there are more waiters, unset nowaiters bit when acquiring lock | |
890 | waiters_mask = OS_ULOCK_NOWAITERS_BIT; | |
891 | } | |
892 | } | |
893 | new = self & ~waiters_mask; | |
894 | bool r = os_atomic_cmpxchgv(&l->oul_value, OS_LOCK_NO_OWNER, new, | |
895 | ¤t, acquire); | |
896 | if (unlikely(!r)) goto _retry; | |
897 | } | |
898 | ||
899 | OS_NOINLINE | |
900 | static void | |
901 | _os_nospin_lock_unlock_slow(_os_nospin_lock_t l, os_ulock_value_t current) | |
902 | { | |
903 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
904 | if (unlikely(OS_ULOCK_OWNER(current) != self)) { | |
905 | return; // no unowned_abort for drop-in compatibility with OSSpinLock | |
906 | } | |
907 | if (current & OS_ULOCK_NOWAITERS_BIT) { | |
908 | __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters"); | |
909 | } | |
910 | for (;;) { | |
911 | int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, 0); | |
912 | if (unlikely(ret < 0)) { | |
913 | switch (-ret) { | |
914 | case EINTR: | |
915 | continue; | |
916 | case ENOENT: | |
917 | break; | |
918 | default: | |
919 | __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure"); | |
920 | } | |
921 | } | |
922 | break; | |
923 | } | |
924 | } | |
925 | ||
926 | void | |
927 | _os_nospin_lock_lock(_os_nospin_lock_t l) | |
928 | { | |
929 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
930 | bool r = os_atomic_cmpxchg(&l->oul_value, OS_LOCK_NO_OWNER, self, acquire); | |
931 | if (likely(r)) return; | |
932 | return _os_nospin_lock_lock_slow(l); | |
933 | } | |
934 | ||
935 | bool | |
936 | _os_nospin_lock_trylock(_os_nospin_lock_t l) | |
937 | { | |
938 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
939 | bool r = os_atomic_cmpxchg(&l->oul_value, OS_LOCK_NO_OWNER, self, acquire); | |
940 | return r; | |
941 | } | |
942 | ||
943 | void | |
944 | _os_nospin_lock_unlock(_os_nospin_lock_t l) | |
945 | { | |
946 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
947 | os_ulock_value_t current; | |
948 | current = os_atomic_xchg(&l->oul_value, OS_LOCK_NO_OWNER, release); | |
949 | if (likely(current == self)) return; | |
950 | return _os_nospin_lock_unlock_slow(l, current); | |
951 | } | |
952 | ||
953 | ||
954 | #pragma mark - | |
955 | #pragma mark _os_lock_nospin_t | |
956 | ||
957 | OS_LOCK_STRUCT_DECL_INTERNAL(nospin, | |
958 | _os_nospin_lock osl_nospin_lock; | |
959 | ); | |
960 | OS_LOCK_METHODS_DECL(nospin); | |
961 | OS_LOCK_TYPE_INSTANCE(nospin); | |
962 | ||
963 | void | |
964 | _os_lock_nospin_lock(_os_lock_nospin_t l) | |
965 | { | |
966 | return _os_nospin_lock_lock(&l->osl_nospin_lock); | |
967 | } | |
968 | ||
969 | bool | |
970 | _os_lock_nospin_trylock(_os_lock_nospin_t l) | |
971 | { | |
972 | return _os_nospin_lock_trylock(&l->osl_nospin_lock); | |
973 | } | |
974 | ||
975 | void | |
976 | _os_lock_nospin_unlock(_os_lock_nospin_t l) | |
977 | { | |
978 | return _os_nospin_lock_unlock(&l->osl_nospin_lock); | |
979 | } | |
980 | ||
981 | ||
982 | #pragma mark - | |
983 | #pragma mark os_once_t | |
984 | ||
985 | typedef struct os_once_gate_s { | |
986 | union { | |
987 | os_ulock_value_t ogo_lock; | |
988 | uintptr_t ogo_once; | |
989 | }; | |
990 | } os_once_gate_s, *os_once_gate_t; | |
991 | ||
992 | #define OS_ONCE_INIT ((uintptr_t)0l) | |
993 | #define OS_ONCE_DONE (~(uintptr_t)0l) | |
994 | ||
995 | #if defined(__i386__) || defined(__x86_64__) | |
996 | #define OS_ONCE_USE_QUIESCENT_COUNTER 0 | |
997 | #else | |
998 | #define OS_ONCE_USE_QUIESCENT_COUNTER 1 | |
999 | #endif | |
1000 | ||
1001 | OS_ATOMIC_EXPORT void _os_once(os_once_t *val, void *ctxt, os_function_t func); | |
1002 | OS_ATOMIC_EXPORT void __os_once_reset(os_once_t *val); | |
1003 | ||
1004 | OS_NOINLINE OS_NORETURN OS_COLD | |
1005 | void _os_once_gate_recursive_abort(os_lock_owner_t owner); | |
1006 | OS_NOINLINE OS_NORETURN OS_COLD | |
1007 | void _os_once_gate_unowned_abort(os_lock_owner_t owner); | |
1008 | OS_NOINLINE OS_NORETURN OS_COLD | |
1009 | void _os_once_gate_corruption_abort(os_ulock_value_t current); | |
1010 | ||
1011 | ||
1012 | OS_NOINLINE OS_NORETURN OS_COLD | |
1013 | void | |
1014 | _os_once_gate_recursive_abort(os_lock_owner_t owner) | |
1015 | { | |
1016 | __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an " | |
1017 | "os_once_t"); | |
1018 | } | |
1019 | ||
1020 | OS_NOINLINE OS_NORETURN OS_COLD | |
1021 | void | |
1022 | _os_once_gate_unowned_abort(os_lock_owner_t owner) | |
1023 | { | |
1024 | __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_once_t not " | |
1025 | "owned by current thread"); | |
1026 | } | |
1027 | ||
1028 | OS_NOINLINE OS_NORETURN OS_COLD | |
1029 | void | |
1030 | _os_once_gate_corruption_abort(os_ulock_value_t current) | |
1031 | { | |
1032 | __LIBPLATFORM_CLIENT_CRASH__(current, "os_once_t is corrupt"); | |
1033 | } | |
1034 | ||
1035 | ||
1036 | #if OS_ONCE_USE_QUIESCENT_COUNTER | |
1037 | #define OS_ONCE_MAKE_GEN(gen) (((gen) << 2) + OS_ULOCK_NOWAITERS_BIT) | |
1038 | #define OS_ONCE_IS_GEN(gen) (((gen) & 3) == OS_ULOCK_NOWAITERS_BIT) | |
1039 | ||
1040 | // the _COMM_PAGE_CPU_QUIESCENT_COUNTER value is incremented every time | |
1041 | // all CPUs have performed a context switch. | |
1042 | // | |
1043 | // To make sure all CPUs context switched at least once since `gen`, | |
1044 | // we need to observe 4 increments, see libdispatch/src/shims/lock.h | |
1045 | #define OS_ONCE_GEN_SAFE_DELTA (4 << 2) | |
1046 | ||
1047 | OS_ALWAYS_INLINE | |
1048 | static inline uintptr_t | |
1049 | _os_once_generation(void) | |
1050 | { | |
1051 | uintptr_t value = *(volatile uintptr_t *)_COMM_PAGE_CPU_QUIESCENT_COUNTER; | |
1052 | return OS_ONCE_MAKE_GEN(value); | |
1053 | } | |
1054 | ||
1055 | OS_ALWAYS_INLINE | |
1056 | static inline uintptr_t | |
1057 | _os_once_mark_quiescing(os_once_gate_t og) | |
1058 | { | |
1059 | return os_atomic_xchg(&og->ogo_once, _os_once_generation(), release); | |
1060 | } | |
1061 | ||
1062 | OS_ALWAYS_INLINE | |
1063 | static void | |
1064 | _os_once_mark_done_if_quiesced(os_once_gate_t og, uintptr_t gen) | |
1065 | { | |
1066 | if (_os_once_generation() - gen >= OS_ONCE_GEN_SAFE_DELTA) { | |
1067 | os_atomic_store(&og->ogo_once, OS_ONCE_DONE, relaxed); | |
1068 | } | |
1069 | } | |
1070 | #else | |
1071 | OS_ALWAYS_INLINE | |
1072 | static inline uintptr_t | |
1073 | _os_once_mark_done(os_once_gate_t og) | |
1074 | { | |
1075 | return os_atomic_xchg(&og->ogo_once, OS_ONCE_DONE, release); | |
1076 | } | |
1077 | #endif | |
1078 | ||
1079 | OS_NOINLINE | |
1080 | static void | |
1081 | _os_once_gate_broadcast(os_once_gate_t og, os_ulock_value_t current, | |
1082 | os_lock_owner_t self) | |
1083 | { | |
1084 | if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) { | |
1085 | return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current)); | |
1086 | } | |
1087 | if (current & OS_ULOCK_NOWAITERS_BIT) { | |
1088 | __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters"); | |
1089 | } | |
1090 | for (;;) { | |
1091 | int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | ULF_WAKE_ALL, | |
1092 | &og->ogo_lock, 0); | |
1093 | if (unlikely(ret < 0)) { | |
1094 | switch (-ret) { | |
1095 | case EINTR: | |
1096 | continue; | |
1097 | case ENOENT: | |
1098 | break; | |
1099 | default: | |
1100 | __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure"); | |
1101 | } | |
1102 | } | |
1103 | break; | |
1104 | } | |
1105 | } | |
1106 | ||
1107 | OS_NOINLINE | |
1108 | static void | |
1109 | _os_once_callout(os_once_gate_t og, void *ctxt, os_function_t func, | |
1110 | os_lock_owner_t self) | |
1111 | { | |
1112 | uintptr_t v; | |
1113 | ||
1114 | func(ctxt); | |
1115 | ||
1116 | #if OS_ONCE_USE_QUIESCENT_COUNTER | |
1117 | v = _os_once_mark_quiescing(og); | |
1118 | #else | |
1119 | v = _os_once_mark_done(og); | |
1120 | #endif | |
1121 | if (likely((os_ulock_value_t)v == self)) return; | |
1122 | _os_once_gate_broadcast(og, (os_ulock_value_t)v, self); | |
1123 | } | |
1124 | ||
1125 | OS_NOINLINE | |
1126 | static void | |
1127 | _os_once_gate_wait(os_once_gate_t og, void *ctxt, os_function_t func, | |
1128 | os_lock_owner_t self) | |
1129 | { | |
1130 | uintptr_t old, new; | |
1131 | ||
1132 | for (;;) { | |
1133 | os_atomic_rmw_loop(&og->ogo_once, old, new, relaxed, { | |
1134 | if (old == OS_ONCE_DONE) { | |
1135 | os_atomic_rmw_loop_give_up(return); | |
1136 | #if OS_ONCE_USE_QUIESCENT_COUNTER | |
1137 | } else if (OS_ONCE_IS_GEN(old)) { | |
1138 | os_atomic_rmw_loop_give_up({ | |
1139 | os_atomic_thread_fence(acquire); | |
1140 | return _os_once_mark_done_if_quiesced(og, old); | |
1141 | }); | |
1142 | #endif | |
1143 | } else if (old == OS_ONCE_INIT) { | |
1144 | // __os_once_reset was used, try to become the new initializer | |
1145 | new = (uintptr_t)self; | |
1146 | } else { | |
1147 | new = old & ~(uintptr_t)OS_ULOCK_NOWAITERS_BIT; | |
1148 | if (new == old) os_atomic_rmw_loop_give_up(break); | |
1149 | } | |
1150 | }); | |
1151 | if (old == OS_ONCE_INIT) { | |
1152 | // see comment in _os_once, pairs with the release barrier | |
1153 | // in __os_once_reset() | |
1154 | os_atomic_thread_fence(acquire); | |
1155 | return _os_once_callout(og, ctxt, func, self); | |
1156 | } | |
1157 | if (unlikely(OS_ULOCK_IS_OWNER((os_lock_owner_t)old, self, 0))) { | |
1158 | return _os_once_gate_recursive_abort(self); | |
1159 | } | |
1160 | int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO, | |
1161 | &og->ogo_lock, (os_ulock_value_t)new, 0); | |
1162 | if (unlikely(ret < 0)) { | |
1163 | switch (-ret) { | |
1164 | case EINTR: | |
1165 | case EFAULT: | |
1166 | continue; | |
1167 | case EOWNERDEAD: | |
1168 | _os_once_gate_corruption_abort((os_lock_owner_t)old); | |
1169 | break; | |
1170 | default: | |
1171 | __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure"); | |
1172 | } | |
1173 | } | |
1174 | } | |
1175 | } | |
1176 | ||
1177 | // Atomically resets the once value to zero and then signals all | |
1178 | // pending waiters to return from their __ulock_wait() | |
1179 | void | |
1180 | __os_once_reset(os_once_t *val) | |
1181 | { | |
1182 | os_once_gate_t og = (os_once_gate_t)val; | |
1183 | os_lock_owner_t self = _os_lock_owner_get_self(); | |
1184 | uintptr_t v; | |
1185 | ||
1186 | v = os_atomic_xchg(&og->ogo_once, OS_ONCE_INIT, release); | |
1187 | if (likely((os_ulock_value_t)v == self)) return; | |
1188 | return _os_once_gate_broadcast(og, (os_ulock_value_t)v, self); | |
1189 | } | |
1190 | ||
1191 | void | |
1192 | _os_once(os_once_t *val, void *ctxt, os_function_t func) | |
1193 | { | |
1194 | os_once_gate_t og = (os_once_gate_t)val; | |
1195 | os_lock_owner_t self; | |
1196 | uintptr_t v; | |
1197 | ||
1198 | #if OS_ONCE_USE_QUIESCENT_COUNTER | |
1199 | v = os_atomic_load(&og->ogo_once, acquire); | |
1200 | if (likely(OS_ONCE_IS_GEN(v))) { | |
1201 | return _os_once_mark_done_if_quiesced(og, v); | |
1202 | } | |
1203 | #endif | |
1204 | ||
1205 | self = _os_lock_owner_get_self(); | |
1206 | v = (uintptr_t)self; | |
1207 | ||
1208 | // The acquire barrier pairs with the release in __os_once_reset() | |
1209 | // for cases when a previous initializer failed. | |
1210 | if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, acquire))) { | |
1211 | return _os_once_callout(og, ctxt, func, self); | |
1212 | } | |
1213 | return _os_once_gate_wait(og, ctxt, func, self); | |
1214 | } | |
1215 |