]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread_mutex.c
ad86eebf456f4bfa3dc24faac9c05f38eb185ed3
[apple/libpthread.git] / src / pthread_mutex.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 * -- Mutex variable support
51 */
52
53 #include "resolver.h"
54 #include "internal.h"
55 #include "kern/kern_trace.h"
56
57 #ifndef BUILDING_VARIANT /* [ */
58
59 #ifdef PLOCKSTAT
60 #include "plockstat.h"
61 /* This function is never called and exists to provide never-fired dtrace
62 * probes so that user d scripts don't get errors.
63 */
64 PTHREAD_NOEXPORT PTHREAD_USED
65 void
66 _plockstat_never_fired(void)
67 {
68 PLOCKSTAT_MUTEX_SPIN(NULL);
69 PLOCKSTAT_MUTEX_SPUN(NULL, 0, 0);
70 }
71 #else /* !PLOCKSTAT */
72 #define PLOCKSTAT_MUTEX_SPIN(x)
73 #define PLOCKSTAT_MUTEX_SPUN(x, y, z)
74 #define PLOCKSTAT_MUTEX_ERROR(x, y)
75 #define PLOCKSTAT_MUTEX_BLOCK(x)
76 #define PLOCKSTAT_MUTEX_BLOCKED(x, y)
77 #define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
78 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
79 #endif /* PLOCKSTAT */
80
81 #define BLOCK_FAIL_PLOCKSTAT 0
82 #define BLOCK_SUCCESS_PLOCKSTAT 1
83
84 #define PTHREAD_MUTEX_INIT_UNUSED 1
85
86 PTHREAD_NOEXPORT PTHREAD_WEAK
87 int _pthread_mutex_lock_init_slow(_pthread_mutex *mutex, bool trylock);
88
89 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
90 int _pthread_mutex_fairshare_lock_slow(_pthread_mutex *mutex, bool trylock);
91
92 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
93 int _pthread_mutex_firstfit_lock_slow(_pthread_mutex *mutex, bool trylock);
94
95 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
96 int _pthread_mutex_fairshare_unlock_slow(_pthread_mutex *mutex);
97
98 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
99 int _pthread_mutex_firstfit_unlock_slow(_pthread_mutex *mutex);
100
101 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
102 int _pthread_mutex_corruption_abort(_pthread_mutex *mutex);
103
104 extern int __pthread_mutex_default_opt_policy PTHREAD_NOEXPORT;
105
106
107 int __pthread_mutex_default_opt_policy PTHREAD_NOEXPORT =
108 _PTHREAD_MTX_OPT_POLICY_DEFAULT;
109
110 static inline bool
111 _pthread_mutex_policy_validate(int policy)
112 {
113 return (policy >= 0 && policy < _PTHREAD_MUTEX_POLICY_LAST);
114 }
115
116 static inline int
117 _pthread_mutex_policy_to_opt(int policy)
118 {
119 switch (policy) {
120 case PTHREAD_MUTEX_POLICY_FAIRSHARE_NP:
121 return _PTHREAD_MTX_OPT_POLICY_FAIRSHARE;
122 case PTHREAD_MUTEX_POLICY_FIRSTFIT_NP:
123 return _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
124 default:
125 __builtin_unreachable();
126 }
127 }
128
129 PTHREAD_NOEXPORT
130 void
131 _pthread_mutex_global_init(const char *envp[],
132 struct _pthread_registration_data *registration_data)
133 {
134 int opt = _PTHREAD_MTX_OPT_POLICY_DEFAULT;
135 if (registration_data->mutex_default_policy) {
136 int policy = registration_data->mutex_default_policy;
137 if (_pthread_mutex_policy_validate(policy)) {
138 opt = _pthread_mutex_policy_to_opt(policy);
139 }
140 }
141
142 const char *envvar = _simple_getenv(envp, "PTHREAD_MUTEX_DEFAULT_POLICY");
143 if (envvar) {
144 int policy = envvar[0] - '0';
145 if (_pthread_mutex_policy_validate(policy)) {
146 opt = _pthread_mutex_policy_to_opt(policy);
147 }
148 }
149
150 if (opt != __pthread_mutex_default_opt_policy) {
151 __pthread_mutex_default_opt_policy = opt;
152 }
153 }
154
155
156
157 PTHREAD_ALWAYS_INLINE
158 static inline int _pthread_mutex_init(_pthread_mutex *mutex,
159 const pthread_mutexattr_t *attr, uint32_t static_type);
160
161 typedef union mutex_seq {
162 uint32_t seq[2];
163 struct { uint32_t lgenval; uint32_t ugenval; };
164 struct { uint32_t mgen; uint32_t ugen; };
165 uint64_t seq_LU;
166 uint64_t _Atomic atomic_seq_LU;
167 } mutex_seq;
168
169 _Static_assert(sizeof(mutex_seq) == 2 * sizeof(uint32_t),
170 "Incorrect mutex_seq size");
171
172 #if !__LITTLE_ENDIAN__
173 #error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
174 #endif
175
176 PTHREAD_ALWAYS_INLINE
177 static inline void
178 MUTEX_GETSEQ_ADDR(_pthread_mutex *mutex, mutex_seq **seqaddr)
179 {
180 // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex)
181 // We don't require more than byte alignment on OS X. rdar://22278325
182 *seqaddr = (void *)(((uintptr_t)mutex->m_seq + 0x7ul) & ~0x7ul);
183 }
184
185 PTHREAD_ALWAYS_INLINE
186 static inline void
187 MUTEX_GETTID_ADDR(_pthread_mutex *mutex, uint64_t **tidaddr)
188 {
189 // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex)
190 // We don't require more than byte alignment on OS X. rdar://22278325
191 *tidaddr = (void*)(((uintptr_t)mutex->m_tid + 0x7ul) & ~0x7ul);
192 }
193
194 PTHREAD_ALWAYS_INLINE
195 static inline void
196 mutex_seq_load(mutex_seq *seqaddr, mutex_seq *oldseqval)
197 {
198 oldseqval->seq_LU = seqaddr->seq_LU;
199 }
200
201 #define mutex_seq_atomic_load(seqaddr, oldseqval, m) \
202 mutex_seq_atomic_load_##m(seqaddr, oldseqval)
203
204 PTHREAD_ALWAYS_INLINE PTHREAD_USED
205 static inline bool
206 mutex_seq_atomic_cmpxchgv_relaxed(mutex_seq *seqaddr, mutex_seq *oldseqval,
207 mutex_seq *newseqval)
208 {
209 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
210 newseqval->seq_LU, &oldseqval->seq_LU, relaxed);
211 }
212
213 PTHREAD_ALWAYS_INLINE PTHREAD_USED
214 static inline bool
215 mutex_seq_atomic_cmpxchgv_acquire(mutex_seq *seqaddr, mutex_seq *oldseqval,
216 mutex_seq *newseqval)
217 {
218 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
219 newseqval->seq_LU, &oldseqval->seq_LU, acquire);
220 }
221
222 PTHREAD_ALWAYS_INLINE PTHREAD_USED
223 static inline bool
224 mutex_seq_atomic_cmpxchgv_release(mutex_seq *seqaddr, mutex_seq *oldseqval,
225 mutex_seq *newseqval)
226 {
227 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
228 newseqval->seq_LU, &oldseqval->seq_LU, release);
229 }
230
231 #define mutex_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, m)\
232 mutex_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval)
233
234 /*
235 * Initialize a mutex variable, possibly with additional attributes.
236 * Public interface - so don't trust the lock - initialize it first.
237 */
238 PTHREAD_NOEXPORT_VARIANT
239 int
240 pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr)
241 {
242 #if 0
243 /* conformance tests depend on not having this behavior */
244 /* The test for this behavior is optional */
245 if (_pthread_mutex_check_signature(mutex))
246 return EBUSY;
247 #endif
248 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
249 _PTHREAD_LOCK_INIT(mutex->lock);
250 return (_pthread_mutex_init(mutex, attr, 0x7));
251 }
252
253 PTHREAD_NOEXPORT_VARIANT
254 int
255 pthread_mutex_getprioceiling(const pthread_mutex_t *omutex, int *prioceiling)
256 {
257 int res = EINVAL;
258 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
259 if (_pthread_mutex_check_signature(mutex)) {
260 _PTHREAD_LOCK(mutex->lock);
261 *prioceiling = mutex->prioceiling;
262 res = 0;
263 _PTHREAD_UNLOCK(mutex->lock);
264 }
265 return res;
266 }
267
268 PTHREAD_NOEXPORT_VARIANT
269 int
270 pthread_mutex_setprioceiling(pthread_mutex_t *omutex, int prioceiling,
271 int *old_prioceiling)
272 {
273 int res = EINVAL;
274 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
275 if (_pthread_mutex_check_signature(mutex)) {
276 _PTHREAD_LOCK(mutex->lock);
277 if (prioceiling >= -999 && prioceiling <= 999) {
278 *old_prioceiling = mutex->prioceiling;
279 mutex->prioceiling = (int16_t)prioceiling;
280 res = 0;
281 }
282 _PTHREAD_UNLOCK(mutex->lock);
283 }
284 return res;
285 }
286
287
288 int
289 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr,
290 int *prioceiling)
291 {
292 int res = EINVAL;
293 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
294 *prioceiling = attr->prioceiling;
295 res = 0;
296 }
297 return res;
298 }
299
300 int
301 pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int *protocol)
302 {
303 int res = EINVAL;
304 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
305 *protocol = attr->protocol;
306 res = 0;
307 }
308 return res;
309 }
310
311 int
312 pthread_mutexattr_getpolicy_np(const pthread_mutexattr_t *attr, int *policy)
313 {
314 int res = EINVAL;
315 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
316 switch (attr->opt) {
317 case _PTHREAD_MTX_OPT_POLICY_FAIRSHARE:
318 *policy = PTHREAD_MUTEX_POLICY_FAIRSHARE_NP;
319 res = 0;
320 break;
321 case _PTHREAD_MTX_OPT_POLICY_FIRSTFIT:
322 *policy = PTHREAD_MUTEX_POLICY_FIRSTFIT_NP;
323 res = 0;
324 break;
325 }
326 }
327 return res;
328 }
329
330 int
331 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
332 {
333 int res = EINVAL;
334 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
335 *type = attr->type;
336 res = 0;
337 }
338 return res;
339 }
340
341 int
342 pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared)
343 {
344 int res = EINVAL;
345 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
346 *pshared = (int)attr->pshared;
347 res = 0;
348 }
349 return res;
350 }
351
352 int
353 pthread_mutexattr_init(pthread_mutexattr_t *attr)
354 {
355 attr->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
356 attr->protocol = _PTHREAD_DEFAULT_PROTOCOL;
357 attr->opt = __pthread_mutex_default_opt_policy;
358 attr->type = PTHREAD_MUTEX_DEFAULT;
359 attr->sig = _PTHREAD_MUTEX_ATTR_SIG;
360 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
361 return 0;
362 }
363
364 int
365 pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int prioceiling)
366 {
367 int res = EINVAL;
368 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
369 if (prioceiling >= -999 && prioceiling <= 999) {
370 attr->prioceiling = prioceiling;
371 res = 0;
372 }
373 }
374 return res;
375 }
376
377 int
378 pthread_mutexattr_setprotocol(pthread_mutexattr_t *attr, int protocol)
379 {
380 int res = EINVAL;
381 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
382 switch (protocol) {
383 case PTHREAD_PRIO_NONE:
384 case PTHREAD_PRIO_INHERIT:
385 case PTHREAD_PRIO_PROTECT:
386 attr->protocol = protocol;
387 res = 0;
388 break;
389 }
390 }
391 return res;
392 }
393
394 int
395 pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *attr, int policy)
396 {
397 int res = EINVAL;
398 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
399 // <rdar://problem/35844519> the first-fit implementation was broken
400 // pre-Liberty so this mapping exists to ensure that the old first-fit
401 // define (2) is no longer valid when used on older systems.
402 switch (policy) {
403 case PTHREAD_MUTEX_POLICY_FAIRSHARE_NP:
404 attr->opt = _PTHREAD_MTX_OPT_POLICY_FAIRSHARE;
405 res = 0;
406 break;
407 case PTHREAD_MUTEX_POLICY_FIRSTFIT_NP:
408 attr->opt = _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
409 res = 0;
410 break;
411 }
412 }
413 return res;
414 }
415
416 int
417 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
418 {
419 int res = EINVAL;
420 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
421 switch (type) {
422 case PTHREAD_MUTEX_NORMAL:
423 case PTHREAD_MUTEX_ERRORCHECK:
424 case PTHREAD_MUTEX_RECURSIVE:
425 //case PTHREAD_MUTEX_DEFAULT:
426 attr->type = type;
427 res = 0;
428 break;
429 }
430 }
431 return res;
432 }
433
434 int
435 pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
436 {
437 int res = EINVAL;
438 #if __DARWIN_UNIX03
439 if (__unix_conforming == 0) {
440 __unix_conforming = 1;
441 }
442 #endif /* __DARWIN_UNIX03 */
443
444 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
445 #if __DARWIN_UNIX03
446 if (( pshared == PTHREAD_PROCESS_PRIVATE) ||
447 (pshared == PTHREAD_PROCESS_SHARED))
448 #else /* __DARWIN_UNIX03 */
449 if ( pshared == PTHREAD_PROCESS_PRIVATE)
450 #endif /* __DARWIN_UNIX03 */
451 {
452 attr->pshared = pshared;
453 res = 0;
454 }
455 }
456 return res;
457 }
458
459 PTHREAD_NOEXPORT PTHREAD_NOINLINE PTHREAD_NORETURN
460 int
461 _pthread_mutex_corruption_abort(_pthread_mutex *mutex)
462 {
463 PTHREAD_CLIENT_CRASH(0, "pthread_mutex corruption: mutex owner changed "
464 "in the middle of lock/unlock");
465 }
466
467
468 PTHREAD_NOINLINE
469 static int
470 _pthread_mutex_check_init_slow(_pthread_mutex *mutex)
471 {
472 int res = EINVAL;
473
474 if (_pthread_mutex_check_signature_init(mutex)) {
475 _PTHREAD_LOCK(mutex->lock);
476 if (_pthread_mutex_check_signature_init(mutex)) {
477 // initialize a statically initialized mutex to provide
478 // compatibility for misbehaving applications.
479 // (unlock should not be the first operation on a mutex)
480 res = _pthread_mutex_init(mutex, NULL, (mutex->sig & 0xf));
481 } else if (_pthread_mutex_check_signature(mutex)) {
482 res = 0;
483 }
484 _PTHREAD_UNLOCK(mutex->lock);
485 } else if (_pthread_mutex_check_signature(mutex)) {
486 res = 0;
487 }
488 if (res != 0) {
489 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, res);
490 }
491 return res;
492 }
493
494 PTHREAD_ALWAYS_INLINE
495 static inline int
496 _pthread_mutex_check_init(_pthread_mutex *mutex)
497 {
498 int res = 0;
499 if (!_pthread_mutex_check_signature(mutex)) {
500 return _pthread_mutex_check_init_slow(mutex);
501 }
502 return res;
503 }
504
505 PTHREAD_ALWAYS_INLINE
506 static inline bool
507 _pthread_mutex_is_fairshare(_pthread_mutex *mutex)
508 {
509 return (mutex->mtxopts.options.policy == _PTHREAD_MTX_OPT_POLICY_FAIRSHARE);
510 }
511
512 PTHREAD_ALWAYS_INLINE
513 static inline bool
514 _pthread_mutex_is_firstfit(_pthread_mutex *mutex)
515 {
516 return (mutex->mtxopts.options.policy == _PTHREAD_MTX_OPT_POLICY_FIRSTFIT);
517 }
518
519 PTHREAD_ALWAYS_INLINE
520 static inline bool
521 _pthread_mutex_is_recursive(_pthread_mutex *mutex)
522 {
523 return (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE);
524 }
525
526 PTHREAD_ALWAYS_INLINE
527 static int
528 _pthread_mutex_lock_handle_options(_pthread_mutex *mutex, bool trylock,
529 uint64_t *tidaddr)
530 {
531 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL) {
532 // NORMAL does not do EDEADLK checking
533 return 0;
534 }
535
536 uint64_t selfid = _pthread_selfid_direct();
537 if (os_atomic_load(tidaddr, relaxed) == selfid) {
538 if (_pthread_mutex_is_recursive(mutex)) {
539 if (mutex->mtxopts.options.lock_count < USHRT_MAX) {
540 mutex->mtxopts.options.lock_count += 1;
541 return mutex->mtxopts.options.lock_count;
542 } else {
543 return -EAGAIN;
544 }
545 } else if (trylock) { /* PTHREAD_MUTEX_ERRORCHECK */
546 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
547 // return EDEADLK on a deadlock, it should return EBUSY.
548 return -EBUSY;
549 } else { /* PTHREAD_MUTEX_ERRORCHECK */
550 return -EDEADLK;
551 }
552 }
553
554 // Not recursive, or recursive but first lock.
555 return 0;
556 }
557
558 PTHREAD_ALWAYS_INLINE
559 static int
560 _pthread_mutex_unlock_handle_options(_pthread_mutex *mutex, uint64_t *tidaddr)
561 {
562 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL) {
563 // NORMAL does not do EDEADLK checking
564 return 0;
565 }
566
567 uint64_t selfid = _pthread_selfid_direct();
568 if (os_atomic_load(tidaddr, relaxed) != selfid) {
569 return -EPERM;
570 } else if (_pthread_mutex_is_recursive(mutex) &&
571 --mutex->mtxopts.options.lock_count) {
572 return 1;
573 }
574 return 0;
575 }
576
577 /*
578 * Sequence numbers and TID:
579 *
580 * In steady (and uncontended) state, an unlocked mutex will
581 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
582 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
583 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
584 * E=[L5 U5 TID0].
585 *
586 * If a contender comes in after B, the mutex will instead transition to
587 * E=[L6+KE U4 TID0] and then F=[L6+KE U4 TID940]. If a contender comes in after
588 * C, it will transition to F=[L6+KE U4 TID940] directly. In both cases, the
589 * contender will enter the kernel with either mutexwait(U4, TID0) or
590 * mutexwait(U4, TID940). The first owner will unlock the mutex by first
591 * updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
592 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to
593 * signal the next waiter (potentially as a prepost). When the waiter comes out
594 * of the kernel, it will update the owner to I=[L6+KE U5 TID941]. An unlock at
595 * this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
596 *
597 * At various points along these timelines, since the sequence words and TID are
598 * written independently, a thread may get preempted and another thread might
599 * see inconsistent data. In the worst case, another thread may see the TID in
600 * the SWITCHING (-1) state or unlocked (0) state for longer because the owning
601 * thread was preempted.
602 */
603
604 /*
605 * Drop the mutex unlock references from cond_wait or mutex_unlock.
606 */
607 PTHREAD_ALWAYS_INLINE
608 static inline int
609 _pthread_mutex_fairshare_unlock_updatebits(_pthread_mutex *mutex,
610 uint32_t *flagsp, uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
611 {
612 uint32_t flags = mutex->mtxopts.value;
613 flags &= ~_PTHREAD_MTX_OPT_NOTIFY; // no notification by default
614
615 mutex_seq *seqaddr;
616 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
617
618 mutex_seq oldseq, newseq;
619 mutex_seq_load(seqaddr, &oldseq);
620
621 uint64_t *tidaddr;
622 MUTEX_GETTID_ADDR(mutex, &tidaddr);
623 uint64_t oldtid, newtid;
624
625 int res = _pthread_mutex_unlock_handle_options(mutex, tidaddr);
626 if (res > 0) {
627 // Valid recursive unlock
628 if (flagsp) {
629 *flagsp = flags;
630 }
631 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
632 return 0;
633 } else if (res < 0) {
634 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, -res);
635 return -res;
636 }
637
638 bool clearnotify, spurious;
639 do {
640 newseq = oldseq;
641 oldtid = os_atomic_load(tidaddr, relaxed);
642
643 clearnotify = false;
644 spurious = false;
645
646 // pending waiters
647 int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
648 if (numwaiters == 0) {
649 // spurious unlock (unlock of unlocked lock)
650 spurious = true;
651 } else {
652 newseq.ugenval += PTHRW_INC;
653
654 if ((oldseq.lgenval & PTHRW_COUNT_MASK) ==
655 (newseq.ugenval & PTHRW_COUNT_MASK)) {
656 // our unlock sequence matches to lock sequence, so if the
657 // CAS is successful, the mutex is unlocked
658
659 /* do not reset Ibit, just K&E */
660 newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
661 clearnotify = true;
662 newtid = 0; // clear owner
663 } else {
664 newtid = PTHREAD_MTX_TID_SWITCHING;
665 // need to signal others waiting for mutex
666 flags |= _PTHREAD_MTX_OPT_NOTIFY;
667 }
668
669 if (newtid != oldtid) {
670 // We're giving up the mutex one way or the other, so go ahead
671 // and update the owner to 0 so that once the CAS below
672 // succeeds, there is no stale ownership information. If the
673 // CAS of the seqaddr fails, we may loop, but it's still valid
674 // for the owner to be SWITCHING/0
675 if (!os_atomic_cmpxchg(tidaddr, oldtid, newtid, relaxed)) {
676 // we own this mutex, nobody should be updating it except us
677 return _pthread_mutex_corruption_abort(mutex);
678 }
679 }
680 }
681
682 if (clearnotify || spurious) {
683 flags &= ~_PTHREAD_MTX_OPT_NOTIFY;
684 }
685 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, release));
686
687 PTHREAD_TRACE(psynch_mutex_unlock_updatebits, mutex, oldseq.lgenval,
688 newseq.lgenval, oldtid);
689
690 if (mgenp != NULL) {
691 *mgenp = newseq.lgenval;
692 }
693 if (ugenp != NULL) {
694 *ugenp = newseq.ugenval;
695 }
696 if (pmtxp != NULL) {
697 *pmtxp = (uint32_t *)mutex;
698 }
699 if (flagsp != NULL) {
700 *flagsp = flags;
701 }
702
703 return 0;
704 }
705
706 PTHREAD_ALWAYS_INLINE
707 static inline int
708 _pthread_mutex_fairshare_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid)
709 {
710 bool firstfit = _pthread_mutex_is_firstfit(mutex);
711 bool gotlock = true;
712
713 mutex_seq *seqaddr;
714 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
715
716 mutex_seq oldseq, newseq;
717 mutex_seq_load(seqaddr, &oldseq);
718
719 uint64_t *tidaddr;
720 MUTEX_GETTID_ADDR(mutex, &tidaddr);
721
722 do {
723 newseq = oldseq;
724
725 if (firstfit) {
726 // firstfit locks can have the lock stolen out from under a locker
727 // between the unlock from the kernel and this lock path. When this
728 // happens, we still want to set the K bit before leaving the loop
729 // (or notice if the lock unlocks while we try to update).
730 gotlock = !is_rwl_ebit_set(oldseq.lgenval);
731 } else if ((oldseq.lgenval & (PTH_RWL_KBIT | PTH_RWL_EBIT)) ==
732 (PTH_RWL_KBIT | PTH_RWL_EBIT)) {
733 // bit are already set, just update the owner tidaddr
734 break;
735 }
736
737 newseq.lgenval |= PTH_RWL_KBIT | PTH_RWL_EBIT;
738 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
739 acquire));
740
741 if (gotlock) {
742 os_atomic_store(tidaddr, selfid, relaxed);
743 }
744
745 PTHREAD_TRACE(psynch_mutex_lock_updatebits, mutex, oldseq.lgenval,
746 newseq.lgenval, 0);
747
748 // failing to take the lock in firstfit returns 1 to force the caller
749 // to wait in the kernel
750 return gotlock ? 0 : 1;
751 }
752
753 PTHREAD_NOINLINE
754 static int
755 _pthread_mutex_fairshare_lock_wait(_pthread_mutex *mutex, mutex_seq newseq,
756 uint64_t oldtid)
757 {
758 uint64_t *tidaddr;
759 MUTEX_GETTID_ADDR(mutex, &tidaddr);
760 uint64_t selfid = _pthread_selfid_direct();
761
762 PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t *)mutex);
763 do {
764 uint32_t updateval;
765 do {
766 updateval = __psynch_mutexwait(mutex, newseq.lgenval,
767 newseq.ugenval, oldtid, mutex->mtxopts.value);
768 oldtid = os_atomic_load(tidaddr, relaxed);
769 } while (updateval == (uint32_t)-1);
770
771 // returns 0 on succesful update; in firstfit it may fail with 1
772 } while (_pthread_mutex_fairshare_lock_updatebits(mutex, selfid) == 1);
773 PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t *)mutex, BLOCK_SUCCESS_PLOCKSTAT);
774
775 return 0;
776 }
777
778 PTHREAD_NOEXPORT PTHREAD_NOINLINE
779 int
780 _pthread_mutex_fairshare_lock_slow(_pthread_mutex *omutex, bool trylock)
781 {
782 int res, recursive = 0;
783 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
784
785 mutex_seq *seqaddr;
786 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
787
788 mutex_seq oldseq, newseq;
789 mutex_seq_load(seqaddr, &oldseq);
790
791 uint64_t *tidaddr;
792 MUTEX_GETTID_ADDR(mutex, &tidaddr);
793 uint64_t oldtid, selfid = _pthread_selfid_direct();
794
795 res = _pthread_mutex_lock_handle_options(mutex, trylock, tidaddr);
796 if (res > 0) {
797 recursive = 1;
798 res = 0;
799 goto out;
800 } else if (res < 0) {
801 res = -res;
802 goto out;
803 }
804
805 bool gotlock;
806 do {
807 newseq = oldseq;
808 oldtid = os_atomic_load(tidaddr, relaxed);
809
810 gotlock = ((oldseq.lgenval & PTH_RWL_EBIT) == 0);
811
812 if (trylock && !gotlock) {
813 // A trylock on a held lock will fail immediately. But since
814 // we did not load the sequence words atomically, perform a
815 // no-op CAS64 to ensure that nobody has unlocked concurrently.
816 } else {
817 // Increment the lock sequence number and force the lock into E+K
818 // mode, whether "gotlock" is true or not.
819 newseq.lgenval += PTHRW_INC;
820 newseq.lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
821 }
822 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
823
824 PTHREAD_TRACE(psynch_mutex_lock_updatebits, omutex, oldseq.lgenval,
825 newseq.lgenval, 0);
826
827 if (gotlock) {
828 os_atomic_store(tidaddr, selfid, relaxed);
829 res = 0;
830 PTHREAD_TRACE(psynch_mutex_ulock, omutex, newseq.lgenval,
831 newseq.ugenval, selfid);
832 } else if (trylock) {
833 res = EBUSY;
834 PTHREAD_TRACE(psynch_mutex_utrylock_failed, omutex, newseq.lgenval,
835 newseq.ugenval, oldtid);
836 } else {
837 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_START, omutex,
838 newseq.lgenval, newseq.ugenval, oldtid);
839 res = _pthread_mutex_fairshare_lock_wait(mutex, newseq, oldtid);
840 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_END, omutex,
841 newseq.lgenval, newseq.ugenval, oldtid);
842 }
843
844 if (res == 0 && _pthread_mutex_is_recursive(mutex)) {
845 mutex->mtxopts.options.lock_count = 1;
846 }
847
848 out:
849 #if PLOCKSTAT
850 if (res == 0) {
851 PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t *)mutex, recursive, 0);
852 } else {
853 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, res);
854 }
855 #endif
856
857 return res;
858 }
859
860 PTHREAD_NOINLINE
861 static inline int
862 _pthread_mutex_fairshare_lock(_pthread_mutex *mutex, bool trylock)
863 {
864 #if ENABLE_USERSPACE_TRACE
865 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
866 #elif PLOCKSTAT
867 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
868 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
869 }
870 #endif
871
872 uint64_t *tidaddr;
873 MUTEX_GETTID_ADDR(mutex, &tidaddr);
874 uint64_t selfid = _pthread_selfid_direct();
875
876 mutex_seq *seqaddr;
877 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
878
879 mutex_seq oldseq, newseq;
880 mutex_seq_load(seqaddr, &oldseq);
881
882 if (os_unlikely(oldseq.lgenval & PTH_RWL_EBIT)) {
883 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
884 }
885
886 bool gotlock;
887 do {
888 newseq = oldseq;
889
890 gotlock = ((oldseq.lgenval & PTH_RWL_EBIT) == 0);
891
892 if (trylock && !gotlock) {
893 // A trylock on a held lock will fail immediately. But since
894 // we did not load the sequence words atomically, perform a
895 // no-op CAS64 to ensure that nobody has unlocked concurrently.
896 } else if (os_likely(gotlock)) {
897 // Increment the lock sequence number and force the lock into E+K
898 // mode, whether "gotlock" is true or not.
899 newseq.lgenval += PTHRW_INC;
900 newseq.lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
901 } else {
902 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
903 }
904 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
905 acquire)));
906
907 if (os_likely(gotlock)) {
908 os_atomic_store(tidaddr, selfid, relaxed);
909 return 0;
910 } else if (trylock) {
911 return EBUSY;
912 } else {
913 __builtin_trap();
914 }
915 }
916
917 PTHREAD_NOINLINE
918 static int
919 _pthread_mutex_fairshare_unlock_drop(_pthread_mutex *mutex, mutex_seq newseq,
920 uint32_t flags)
921 {
922 int res;
923 uint32_t updateval;
924
925 uint64_t *tidaddr;
926 MUTEX_GETTID_ADDR(mutex, &tidaddr);
927
928 PTHREAD_TRACE(psynch_mutex_uunlock | DBG_FUNC_START, mutex, newseq.lgenval,
929 newseq.ugenval, os_atomic_load(tidaddr, relaxed));
930
931 updateval = __psynch_mutexdrop(mutex, newseq.lgenval, newseq.ugenval,
932 os_atomic_load(tidaddr, relaxed), flags);
933
934 PTHREAD_TRACE(psynch_mutex_uunlock | DBG_FUNC_END, mutex, updateval, 0, 0);
935
936 if (updateval == (uint32_t)-1) {
937 res = errno;
938
939 if (res == EINTR) {
940 res = 0;
941 }
942 if (res != 0) {
943 PTHREAD_INTERNAL_CRASH(res, "__psynch_mutexdrop failed");
944 }
945 return res;
946 }
947
948 return 0;
949 }
950
951 PTHREAD_NOEXPORT PTHREAD_NOINLINE
952 int
953 _pthread_mutex_fairshare_unlock_slow(_pthread_mutex *mutex)
954 {
955 int res;
956 mutex_seq newseq;
957 uint32_t flags;
958
959 res = _pthread_mutex_fairshare_unlock_updatebits(mutex, &flags, NULL,
960 &newseq.lgenval, &newseq.ugenval);
961 if (res != 0) return res;
962
963 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) != 0) {
964 return _pthread_mutex_fairshare_unlock_drop(mutex, newseq, flags);
965 } else {
966 uint64_t *tidaddr;
967 MUTEX_GETTID_ADDR(mutex, &tidaddr);
968 PTHREAD_TRACE(psynch_mutex_uunlock, mutex, newseq.lgenval,
969 newseq.ugenval, os_atomic_load(tidaddr, relaxed));
970 }
971
972 return 0;
973 }
974
975 PTHREAD_NOINLINE
976 static int
977 _pthread_mutex_fairshare_unlock(_pthread_mutex *mutex)
978 {
979 #if ENABLE_USERSPACE_TRACE
980 return _pthread_mutex_fairshare_unlock_slow(mutex);
981 #elif PLOCKSTAT
982 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
983 return _pthread_mutex_fairshare_unlock_slow(mutex);
984 }
985 #endif
986
987 uint64_t *tidaddr;
988 MUTEX_GETTID_ADDR(mutex, &tidaddr);
989
990 mutex_seq *seqaddr;
991 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
992
993 mutex_seq oldseq, newseq;
994 mutex_seq_load(seqaddr, &oldseq);
995
996 int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
997 if (os_unlikely(numwaiters == 0)) {
998 // spurious unlock (unlock of unlocked lock)
999 return 0;
1000 }
1001
1002 // We're giving up the mutex one way or the other, so go ahead and
1003 // update the owner to 0 so that once the CAS below succeeds, there
1004 // is no stale ownership information. If the CAS of the seqaddr
1005 // fails, we may loop, but it's still valid for the owner to be
1006 // SWITCHING/0
1007 os_atomic_store(tidaddr, 0, relaxed);
1008
1009 do {
1010 newseq = oldseq;
1011 newseq.ugenval += PTHRW_INC;
1012
1013 if (os_likely((oldseq.lgenval & PTHRW_COUNT_MASK) ==
1014 (newseq.ugenval & PTHRW_COUNT_MASK))) {
1015 // if we succeed in performing the CAS we can be sure of a fast
1016 // path (only needing the CAS) unlock, if:
1017 // a. our lock and unlock sequence are equal
1018 // b. we don't need to clear an unlock prepost from the kernel
1019
1020 // do not reset Ibit, just K&E
1021 newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
1022 } else {
1023 return _pthread_mutex_fairshare_unlock_slow(mutex);
1024 }
1025 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
1026 release)));
1027
1028 return 0;
1029 }
1030
1031 #pragma mark firstfit
1032
1033 PTHREAD_ALWAYS_INLINE
1034 static inline int
1035 _pthread_mutex_firstfit_unlock_updatebits(_pthread_mutex *mutex,
1036 uint32_t *flagsp, uint32_t **mutexp, uint32_t *lvalp, uint32_t *uvalp)
1037 {
1038 uint32_t flags = mutex->mtxopts.value & ~_PTHREAD_MTX_OPT_NOTIFY;
1039 bool kernel_wake;
1040
1041 mutex_seq *seqaddr;
1042 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1043
1044 mutex_seq oldseq, newseq;
1045 mutex_seq_load(seqaddr, &oldseq);
1046
1047 uint64_t *tidaddr;
1048 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1049 uint64_t oldtid;
1050
1051 int res = _pthread_mutex_unlock_handle_options(mutex, tidaddr);
1052 if (res > 0) {
1053 // Valid recursive unlock
1054 if (flagsp) {
1055 *flagsp = flags;
1056 }
1057 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
1058 return 0;
1059 } else if (res < 0) {
1060 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, -res);
1061 return -res;
1062 }
1063
1064 do {
1065 newseq = oldseq;
1066 oldtid = os_atomic_load(tidaddr, relaxed);
1067 // More than one kernel waiter means we need to do a wake.
1068 kernel_wake = diff_genseq(oldseq.lgenval, oldseq.ugenval) > 0;
1069 newseq.lgenval &= ~PTH_RWL_EBIT;
1070
1071 if (kernel_wake) {
1072 // Going to the kernel post-unlock removes a single waiter unlock
1073 // from the mutex counts.
1074 newseq.ugenval += PTHRW_INC;
1075 }
1076
1077 if (oldtid != 0) {
1078 if (!os_atomic_cmpxchg(tidaddr, oldtid, 0, relaxed)) {
1079 return _pthread_mutex_corruption_abort(mutex);
1080 }
1081 }
1082 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, release));
1083
1084 PTHREAD_TRACE(psynch_ffmutex_unlock_updatebits, mutex, oldseq.lgenval,
1085 newseq.lgenval, newseq.ugenval);
1086
1087 if (kernel_wake) {
1088 // We choose to return this out via flags because the condition
1089 // variable also uses this to determine whether to do a kernel wake
1090 // when beginning a cvwait.
1091 flags |= _PTHREAD_MTX_OPT_NOTIFY;
1092 }
1093 if (lvalp) {
1094 *lvalp = newseq.lgenval;
1095 }
1096 if (uvalp) {
1097 *uvalp = newseq.ugenval;
1098 }
1099 if (mutexp) {
1100 *mutexp = (uint32_t *)mutex;
1101 }
1102 if (flagsp) {
1103 *flagsp = flags;
1104 }
1105 return 0;
1106 }
1107
1108 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1109 static int
1110 _pthread_mutex_firstfit_wake(_pthread_mutex *mutex, mutex_seq newseq,
1111 uint32_t flags)
1112 {
1113 PTHREAD_TRACE(psynch_ffmutex_wake, mutex, newseq.lgenval, newseq.ugenval,
1114 0);
1115 int res = __psynch_mutexdrop(mutex, newseq.lgenval, newseq.ugenval, 0,
1116 flags);
1117
1118 if (res == -1) {
1119 res = errno;
1120 if (res == EINTR) {
1121 res = 0;
1122 }
1123 if (res != 0) {
1124 PTHREAD_INTERNAL_CRASH(res, "__psynch_mutexdrop failed");
1125 }
1126 return res;
1127 }
1128 return 0;
1129 }
1130
1131 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1132 int
1133 _pthread_mutex_firstfit_unlock_slow(_pthread_mutex *mutex)
1134 {
1135 mutex_seq newseq;
1136 uint32_t flags;
1137 int res;
1138
1139 res = _pthread_mutex_firstfit_unlock_updatebits(mutex, &flags, NULL,
1140 &newseq.lgenval, &newseq.ugenval);
1141 if (res != 0) return res;
1142
1143 if (flags & _PTHREAD_MTX_OPT_NOTIFY) {
1144 return _pthread_mutex_firstfit_wake(mutex, newseq, flags);
1145 }
1146 return 0;
1147 }
1148
1149 PTHREAD_ALWAYS_INLINE
1150 static bool
1151 _pthread_mutex_firstfit_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid,
1152 mutex_seq *newseqp)
1153 {
1154 bool gotlock;
1155
1156 mutex_seq *seqaddr;
1157 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1158
1159 mutex_seq oldseq, newseq;
1160 mutex_seq_load(seqaddr, &oldseq);
1161
1162 uint64_t *tidaddr;
1163 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1164
1165 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits | DBG_FUNC_START, mutex,
1166 oldseq.lgenval, oldseq.ugenval, 0);
1167
1168 do {
1169 newseq = oldseq;
1170 gotlock = is_rwl_ebit_clear(oldseq.lgenval);
1171
1172 if (gotlock) {
1173 // If we see the E-bit cleared, we should just attempt to take it.
1174 newseq.lgenval |= PTH_RWL_EBIT;
1175 } else {
1176 // If we failed to get the lock then we need to put ourselves back
1177 // in the queue of waiters. The previous unlocker that woke us out
1178 // of the kernel consumed the S-count for our previous wake. So
1179 // take another ticket on L and go back in the kernel to sleep.
1180 newseq.lgenval += PTHRW_INC;
1181 }
1182 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
1183
1184 if (gotlock) {
1185 os_atomic_store(tidaddr, selfid, relaxed);
1186 }
1187
1188 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits | DBG_FUNC_END, mutex,
1189 newseq.lgenval, newseq.ugenval, 0);
1190
1191 if (newseqp) {
1192 *newseqp = newseq;
1193 }
1194 return gotlock;
1195 }
1196
1197 PTHREAD_NOINLINE
1198 static int
1199 _pthread_mutex_firstfit_lock_wait(_pthread_mutex *mutex, mutex_seq newseq,
1200 uint64_t oldtid)
1201 {
1202 uint64_t *tidaddr;
1203 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1204 uint64_t selfid = _pthread_selfid_direct();
1205
1206 PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t *)mutex);
1207 do {
1208 uint32_t uval;
1209 do {
1210 PTHREAD_TRACE(psynch_ffmutex_wait | DBG_FUNC_START, mutex,
1211 newseq.lgenval, newseq.ugenval, mutex->mtxopts.value);
1212 uval = __psynch_mutexwait(mutex, newseq.lgenval, newseq.ugenval,
1213 oldtid, mutex->mtxopts.value);
1214 PTHREAD_TRACE(psynch_ffmutex_wait | DBG_FUNC_END, mutex,
1215 uval, 0, 0);
1216 oldtid = os_atomic_load(tidaddr, relaxed);
1217 } while (uval == (uint32_t)-1);
1218 } while (!_pthread_mutex_firstfit_lock_updatebits(mutex, selfid, &newseq));
1219 PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t *)mutex, BLOCK_SUCCESS_PLOCKSTAT);
1220
1221 return 0;
1222 }
1223
1224 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1225 int
1226 _pthread_mutex_firstfit_lock_slow(_pthread_mutex *mutex, bool trylock)
1227 {
1228 int res, recursive = 0;
1229
1230 mutex_seq *seqaddr;
1231 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1232
1233 mutex_seq oldseq, newseq;
1234 mutex_seq_load(seqaddr, &oldseq);
1235
1236 uint64_t *tidaddr;
1237 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1238 uint64_t oldtid, selfid = _pthread_selfid_direct();
1239
1240 res = _pthread_mutex_lock_handle_options(mutex, trylock, tidaddr);
1241 if (res > 0) {
1242 recursive = 1;
1243 res = 0;
1244 goto out;
1245 } else if (res < 0) {
1246 res = -res;
1247 goto out;
1248 }
1249
1250 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits | DBG_FUNC_START, mutex,
1251 oldseq.lgenval, oldseq.ugenval, 0);
1252
1253 bool gotlock;
1254 do {
1255 newseq = oldseq;
1256 oldtid = os_atomic_load(tidaddr, relaxed);
1257
1258 gotlock = is_rwl_ebit_clear(oldseq.lgenval);
1259 if (trylock && !gotlock) {
1260 // We still want to perform the CAS here, even though it won't
1261 // do anything so that it fails if someone unlocked while we were
1262 // in the loop
1263 } else if (gotlock) {
1264 // In first-fit, getting the lock simply adds the E-bit
1265 newseq.lgenval |= PTH_RWL_EBIT;
1266 } else {
1267 // Failed to get the lock, increment the L-val and go to
1268 // the kernel to sleep
1269 newseq.lgenval += PTHRW_INC;
1270 }
1271 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
1272
1273 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits | DBG_FUNC_END, mutex,
1274 newseq.lgenval, newseq.ugenval, 0);
1275
1276 if (gotlock) {
1277 os_atomic_store(tidaddr, selfid, relaxed);
1278 res = 0;
1279 PTHREAD_TRACE(psynch_mutex_ulock, mutex, newseq.lgenval,
1280 newseq.ugenval, selfid);
1281 } else if (trylock) {
1282 res = EBUSY;
1283 PTHREAD_TRACE(psynch_mutex_utrylock_failed, mutex, newseq.lgenval,
1284 newseq.ugenval, oldtid);
1285 } else {
1286 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_START, mutex,
1287 newseq.lgenval, newseq.ugenval, oldtid);
1288 res = _pthread_mutex_firstfit_lock_wait(mutex, newseq, oldtid);
1289 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_END, mutex,
1290 newseq.lgenval, newseq.ugenval, oldtid);
1291 }
1292
1293 if (res == 0 && _pthread_mutex_is_recursive(mutex)) {
1294 mutex->mtxopts.options.lock_count = 1;
1295 }
1296
1297 out:
1298 #if PLOCKSTAT
1299 if (res == 0) {
1300 PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t *)mutex, recursive, 0);
1301 } else {
1302 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, res);
1303 }
1304 #endif
1305 return res;
1306 }
1307
1308 #pragma mark fast path
1309
1310 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1311 int
1312 _pthread_mutex_droplock(_pthread_mutex *mutex, uint32_t *flagsp,
1313 uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
1314 {
1315 if (_pthread_mutex_is_fairshare(mutex)) {
1316 return _pthread_mutex_fairshare_unlock_updatebits(mutex, flagsp,
1317 pmtxp, mgenp, ugenp);
1318 }
1319 return _pthread_mutex_firstfit_unlock_updatebits(mutex, flagsp, pmtxp,
1320 mgenp, ugenp);
1321 }
1322
1323 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1324 int
1325 _pthread_mutex_lock_init_slow(_pthread_mutex *mutex, bool trylock)
1326 {
1327 int res;
1328
1329 res = _pthread_mutex_check_init(mutex);
1330 if (res != 0) return res;
1331
1332 if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
1333 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
1334 }
1335 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1336 }
1337
1338 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1339 static int
1340 _pthread_mutex_unlock_init_slow(_pthread_mutex *mutex)
1341 {
1342 int res;
1343
1344 // Initialize static mutexes for compatibility with misbehaving
1345 // applications (unlock should not be the first operation on a mutex).
1346 res = _pthread_mutex_check_init(mutex);
1347 if (res != 0) return res;
1348
1349 if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
1350 return _pthread_mutex_fairshare_unlock_slow(mutex);
1351 }
1352 return _pthread_mutex_firstfit_unlock_slow(mutex);
1353 }
1354
1355 PTHREAD_NOEXPORT_VARIANT
1356 int
1357 pthread_mutex_unlock(pthread_mutex_t *omutex)
1358 {
1359 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
1360 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
1361 return _pthread_mutex_unlock_init_slow(mutex);
1362 }
1363
1364 if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
1365 return _pthread_mutex_fairshare_unlock(mutex);
1366 }
1367
1368 #if ENABLE_USERSPACE_TRACE
1369 return _pthread_mutex_firstfit_unlock_slow(mutex);
1370 #elif PLOCKSTAT
1371 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
1372 return _pthread_mutex_firstfit_unlock_slow(mutex);
1373 }
1374 #endif
1375
1376 /*
1377 * This is the first-fit fast path. The fairshare fast-ish path is in
1378 * _pthread_mutex_firstfit_unlock()
1379 */
1380 uint64_t *tidaddr;
1381 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1382
1383 mutex_seq *seqaddr;
1384 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1385
1386 mutex_seq oldseq, newseq;
1387 mutex_seq_load(seqaddr, &oldseq);
1388
1389 // We're giving up the mutex one way or the other, so go ahead and
1390 // update the owner to 0 so that once the CAS below succeeds, there
1391 // is no stale ownership information. If the CAS of the seqaddr
1392 // fails, we may loop, but it's still valid for the owner to be
1393 // SWITCHING/0
1394 os_atomic_store(tidaddr, 0, relaxed);
1395
1396 do {
1397 newseq = oldseq;
1398
1399 if (diff_genseq(oldseq.lgenval, oldseq.ugenval) == 0) {
1400 // No outstanding waiters in kernel, we can simply drop the E-bit
1401 // and return.
1402 newseq.lgenval &= ~PTH_RWL_EBIT;
1403 } else {
1404 return _pthread_mutex_firstfit_unlock_slow(mutex);
1405 }
1406 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
1407 release)));
1408
1409 return 0;
1410 }
1411
1412 PTHREAD_ALWAYS_INLINE
1413 static inline int
1414 _pthread_mutex_firstfit_lock(pthread_mutex_t *omutex, bool trylock)
1415 {
1416 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
1417 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
1418 return _pthread_mutex_lock_init_slow(mutex, trylock);
1419 }
1420
1421 if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
1422 return _pthread_mutex_fairshare_lock(mutex, trylock);
1423 }
1424
1425 #if ENABLE_USERSPACE_TRACE
1426 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1427 #elif PLOCKSTAT
1428 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
1429 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1430 }
1431 #endif
1432
1433 /*
1434 * This is the first-fit fast path. The fairshare fast-ish path is in
1435 * _pthread_mutex_firstfit_lock()
1436 */
1437 uint64_t *tidaddr;
1438 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1439 uint64_t selfid = _pthread_selfid_direct();
1440
1441 mutex_seq *seqaddr;
1442 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1443
1444 mutex_seq oldseq, newseq;
1445 mutex_seq_load(seqaddr, &oldseq);
1446
1447 if (os_unlikely(!trylock && (oldseq.lgenval & PTH_RWL_EBIT))) {
1448 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1449 }
1450
1451 bool gotlock;
1452 do {
1453 newseq = oldseq;
1454 gotlock = is_rwl_ebit_clear(oldseq.lgenval);
1455
1456 if (trylock && !gotlock) {
1457 #if __LP64__
1458 // The sequence load is atomic, so we can bail here without writing
1459 // it and avoid some unnecessary coherence traffic - rdar://57259033
1460 os_atomic_thread_fence(acquire);
1461 return EBUSY;
1462 #else
1463 // A trylock on a held lock will fail immediately. But since
1464 // we did not load the sequence words atomically, perform a
1465 // no-op CAS64 to ensure that nobody has unlocked concurrently.
1466 #endif
1467 } else if (os_likely(gotlock)) {
1468 // In first-fit, getting the lock simply adds the E-bit
1469 newseq.lgenval |= PTH_RWL_EBIT;
1470 } else {
1471 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1472 }
1473 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
1474 acquire)));
1475
1476 if (os_likely(gotlock)) {
1477 os_atomic_store(tidaddr, selfid, relaxed);
1478 return 0;
1479 } else if (trylock) {
1480 return EBUSY;
1481 } else {
1482 __builtin_trap();
1483 }
1484 }
1485
1486 PTHREAD_NOEXPORT_VARIANT
1487 int
1488 pthread_mutex_lock(pthread_mutex_t *mutex)
1489 {
1490 return _pthread_mutex_firstfit_lock(mutex, false);
1491 }
1492
1493 PTHREAD_NOEXPORT_VARIANT
1494 int
1495 pthread_mutex_trylock(pthread_mutex_t *mutex)
1496 {
1497 return _pthread_mutex_firstfit_lock(mutex, true);
1498 }
1499
1500
1501 PTHREAD_ALWAYS_INLINE
1502 static inline int
1503 _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
1504 uint32_t static_type)
1505 {
1506 mutex->mtxopts.value = 0;
1507 mutex->mtxopts.options.mutex = 1;
1508 if (attr) {
1509 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1510 return EINVAL;
1511 }
1512 mutex->prioceiling = (int16_t)attr->prioceiling;
1513 mutex->mtxopts.options.protocol = attr->protocol;
1514 mutex->mtxopts.options.policy = attr->opt;
1515 mutex->mtxopts.options.type = attr->type;
1516 mutex->mtxopts.options.pshared = attr->pshared;
1517 } else {
1518 switch (static_type) {
1519 case 1:
1520 mutex->mtxopts.options.type = PTHREAD_MUTEX_ERRORCHECK;
1521 break;
1522 case 2:
1523 mutex->mtxopts.options.type = PTHREAD_MUTEX_RECURSIVE;
1524 break;
1525 case 3:
1526 /* firstfit fall thru */
1527 case 7:
1528 mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
1529 break;
1530 default:
1531 return EINVAL;
1532 }
1533
1534 mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
1535 mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
1536 if (static_type != 3) {
1537 mutex->mtxopts.options.policy = __pthread_mutex_default_opt_policy;
1538 } else {
1539 mutex->mtxopts.options.policy = _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
1540 }
1541 mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
1542 }
1543 mutex->priority = 0;
1544
1545 mutex_seq *seqaddr;
1546 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1547
1548 uint64_t *tidaddr;
1549 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1550
1551 #if PTHREAD_MUTEX_INIT_UNUSED
1552 if ((uint32_t*)tidaddr != mutex->m_tid) {
1553 mutex->mtxopts.options.misalign = 1;
1554 __builtin_memset(mutex->m_tid, 0xff, sizeof(mutex->m_tid));
1555 }
1556 __builtin_memset(mutex->m_mis, 0xff, sizeof(mutex->m_mis));
1557 #endif // PTHREAD_MUTEX_INIT_UNUSED
1558 *tidaddr = 0;
1559 *seqaddr = (mutex_seq){ };
1560
1561 long sig = _PTHREAD_MUTEX_SIG;
1562 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL &&
1563 (_pthread_mutex_is_fairshare(mutex) ||
1564 _pthread_mutex_is_firstfit(mutex))) {
1565 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath
1566 sig = _PTHREAD_MUTEX_SIG_fast;
1567 }
1568
1569 #if PTHREAD_MUTEX_INIT_UNUSED
1570 // For detecting copied mutexes and smashes during debugging
1571 uint32_t sig32 = (uint32_t)sig;
1572 #if defined(__LP64__)
1573 uintptr_t guard = ~(uintptr_t)mutex; // use ~ to hide from leaks
1574 __builtin_memcpy(mutex->_reserved, &guard, sizeof(guard));
1575 mutex->_reserved[2] = sig32;
1576 mutex->_reserved[3] = sig32;
1577 mutex->_pad = sig32;
1578 #else
1579 mutex->_reserved[0] = sig32;
1580 #endif
1581 #endif // PTHREAD_MUTEX_INIT_UNUSED
1582
1583 // Ensure all contents are properly set before setting signature.
1584 #if defined(__LP64__)
1585 // For binary compatibility reasons we cannot require natural alignment of
1586 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
1587 uint32_t *sig32_ptr = (uint32_t*)&mutex->sig;
1588 uint32_t *sig32_val = (uint32_t*)&sig;
1589 *(sig32_ptr + 1) = *(sig32_val + 1);
1590 os_atomic_store(sig32_ptr, *sig32_val, release);
1591 #else
1592 os_atomic_store2o(mutex, sig, sig, release);
1593 #endif
1594
1595 return 0;
1596 }
1597
1598 PTHREAD_NOEXPORT_VARIANT
1599 int
1600 pthread_mutex_destroy(pthread_mutex_t *omutex)
1601 {
1602 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
1603
1604 int res = EINVAL;
1605
1606 _PTHREAD_LOCK(mutex->lock);
1607 if (_pthread_mutex_check_signature(mutex)) {
1608 mutex_seq *seqaddr;
1609 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1610
1611 mutex_seq seq;
1612 mutex_seq_load(seqaddr, &seq);
1613
1614 uint64_t *tidaddr;
1615 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1616
1617 if ((os_atomic_load(tidaddr, relaxed) == 0) &&
1618 (seq.lgenval & PTHRW_COUNT_MASK) ==
1619 (seq.ugenval & PTHRW_COUNT_MASK)) {
1620 mutex->sig = _PTHREAD_NO_SIG;
1621 res = 0;
1622 } else {
1623 res = EBUSY;
1624 }
1625 } else if (_pthread_mutex_check_signature_init(mutex)) {
1626 mutex->sig = _PTHREAD_NO_SIG;
1627 res = 0;
1628 }
1629 _PTHREAD_UNLOCK(mutex->lock);
1630
1631 return res;
1632 }
1633
1634 #endif /* !BUILDING_VARIANT ] */
1635
1636 /*
1637 * Destroy a mutex attribute structure.
1638 */
1639 int
1640 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
1641 {
1642 #if __DARWIN_UNIX03
1643 if (__unix_conforming == 0) {
1644 __unix_conforming = 1;
1645 }
1646 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1647 return EINVAL;
1648 }
1649 #endif /* __DARWIN_UNIX03 */
1650
1651 attr->sig = _PTHREAD_NO_SIG;
1652 return 0;
1653 }
1654