]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread_mutex.c
edc97ee38cbcdb10948eed3da81fd43d4f748084
[apple/libpthread.git] / src / pthread_mutex.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 * -- Mutex variable support
51 */
52
53 #include "resolver.h"
54 #include "internal.h"
55 #include "kern/kern_trace.h"
56
57 #ifndef BUILDING_VARIANT /* [ */
58
59 #ifdef PLOCKSTAT
60 #include "plockstat.h"
61 /* This function is never called and exists to provide never-fired dtrace
62 * probes so that user d scripts don't get errors.
63 */
64 PTHREAD_NOEXPORT PTHREAD_USED
65 void
66 _plockstat_never_fired(void)
67 {
68 PLOCKSTAT_MUTEX_SPIN(NULL);
69 PLOCKSTAT_MUTEX_SPUN(NULL, 0, 0);
70 }
71 #else /* !PLOCKSTAT */
72 #define PLOCKSTAT_MUTEX_SPIN(x)
73 #define PLOCKSTAT_MUTEX_SPUN(x, y, z)
74 #define PLOCKSTAT_MUTEX_ERROR(x, y)
75 #define PLOCKSTAT_MUTEX_BLOCK(x)
76 #define PLOCKSTAT_MUTEX_BLOCKED(x, y)
77 #define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
78 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
79 #endif /* PLOCKSTAT */
80
81 #define BLOCK_FAIL_PLOCKSTAT 0
82 #define BLOCK_SUCCESS_PLOCKSTAT 1
83
84 #define PTHREAD_MUTEX_INIT_UNUSED 1
85
86 PTHREAD_NOEXPORT PTHREAD_WEAK
87 int _pthread_mutex_lock_init_slow(_pthread_mutex *mutex, bool trylock);
88
89 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
90 int _pthread_mutex_fairshare_lock_slow(_pthread_mutex *mutex, bool trylock);
91
92 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
93 int _pthread_mutex_firstfit_lock_slow(_pthread_mutex *mutex, bool trylock);
94
95 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
96 int _pthread_mutex_fairshare_unlock_slow(_pthread_mutex *mutex);
97
98 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
99 int _pthread_mutex_firstfit_unlock_slow(_pthread_mutex *mutex);
100
101 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
102 int _pthread_mutex_corruption_abort(_pthread_mutex *mutex);
103
104 extern int __pthread_mutex_default_opt_policy PTHREAD_NOEXPORT;
105
106
107 int __pthread_mutex_default_opt_policy PTHREAD_NOEXPORT =
108 _PTHREAD_MTX_OPT_POLICY_DEFAULT;
109
110 static inline bool
111 _pthread_mutex_policy_validate(int policy)
112 {
113 return (policy >= 0 && policy < _PTHREAD_MUTEX_POLICY_LAST);
114 }
115
116 static inline int
117 _pthread_mutex_policy_to_opt(int policy)
118 {
119 switch (policy) {
120 case PTHREAD_MUTEX_POLICY_FAIRSHARE_NP:
121 return _PTHREAD_MTX_OPT_POLICY_FAIRSHARE;
122 case PTHREAD_MUTEX_POLICY_FIRSTFIT_NP:
123 return _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
124 default:
125 __builtin_unreachable();
126 }
127 }
128
129 PTHREAD_NOEXPORT
130 void
131 _pthread_mutex_global_init(const char *envp[],
132 struct _pthread_registration_data *registration_data)
133 {
134
135 int opt = _PTHREAD_MTX_OPT_POLICY_DEFAULT;
136 if (registration_data->mutex_default_policy) {
137 int policy = registration_data->mutex_default_policy;
138 if (_pthread_mutex_policy_validate(policy)) {
139 opt = _pthread_mutex_policy_to_opt(policy);
140 }
141 }
142
143 const char *envvar = _simple_getenv(envp, "PTHREAD_MUTEX_DEFAULT_POLICY");
144 if (envvar) {
145 int policy = envvar[0] - '0';
146 if (_pthread_mutex_policy_validate(policy)) {
147 opt = _pthread_mutex_policy_to_opt(policy);
148 }
149 }
150
151 if (opt != __pthread_mutex_default_opt_policy) {
152 __pthread_mutex_default_opt_policy = opt;
153 }
154 }
155
156
157
158 PTHREAD_ALWAYS_INLINE
159 static inline int _pthread_mutex_init(_pthread_mutex *mutex,
160 const pthread_mutexattr_t *attr, uint32_t static_type);
161
162 typedef union mutex_seq {
163 uint32_t seq[2];
164 struct { uint32_t lgenval; uint32_t ugenval; };
165 struct { uint32_t mgen; uint32_t ugen; };
166 uint64_t seq_LU;
167 uint64_t _Atomic atomic_seq_LU;
168 } mutex_seq;
169
170 _Static_assert(sizeof(mutex_seq) == 2 * sizeof(uint32_t),
171 "Incorrect mutex_seq size");
172
173 #if !__LITTLE_ENDIAN__
174 #error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
175 #endif
176
177 PTHREAD_ALWAYS_INLINE
178 static inline void
179 MUTEX_GETSEQ_ADDR(_pthread_mutex *mutex, mutex_seq **seqaddr)
180 {
181 // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex)
182 // We don't require more than byte alignment on OS X. rdar://22278325
183 *seqaddr = (void *)(((uintptr_t)mutex->m_seq + 0x7ul) & ~0x7ul);
184 }
185
186 PTHREAD_ALWAYS_INLINE
187 static inline void
188 MUTEX_GETTID_ADDR(_pthread_mutex *mutex, uint64_t **tidaddr)
189 {
190 // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex)
191 // We don't require more than byte alignment on OS X. rdar://22278325
192 *tidaddr = (void*)(((uintptr_t)mutex->m_tid + 0x7ul) & ~0x7ul);
193 }
194
195 PTHREAD_ALWAYS_INLINE
196 static inline void
197 mutex_seq_load(mutex_seq *seqaddr, mutex_seq *oldseqval)
198 {
199 oldseqval->seq_LU = seqaddr->seq_LU;
200 }
201
202 #define mutex_seq_atomic_load(seqaddr, oldseqval, m) \
203 mutex_seq_atomic_load_##m(seqaddr, oldseqval)
204
205 PTHREAD_ALWAYS_INLINE PTHREAD_USED
206 static inline bool
207 mutex_seq_atomic_cmpxchgv_relaxed(mutex_seq *seqaddr, mutex_seq *oldseqval,
208 mutex_seq *newseqval)
209 {
210 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
211 newseqval->seq_LU, &oldseqval->seq_LU, relaxed);
212 }
213
214 PTHREAD_ALWAYS_INLINE PTHREAD_USED
215 static inline bool
216 mutex_seq_atomic_cmpxchgv_acquire(mutex_seq *seqaddr, mutex_seq *oldseqval,
217 mutex_seq *newseqval)
218 {
219 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
220 newseqval->seq_LU, &oldseqval->seq_LU, acquire);
221 }
222
223 PTHREAD_ALWAYS_INLINE PTHREAD_USED
224 static inline bool
225 mutex_seq_atomic_cmpxchgv_release(mutex_seq *seqaddr, mutex_seq *oldseqval,
226 mutex_seq *newseqval)
227 {
228 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
229 newseqval->seq_LU, &oldseqval->seq_LU, release);
230 }
231
232 #define mutex_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, m)\
233 mutex_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval)
234
235 /*
236 * Initialize a mutex variable, possibly with additional attributes.
237 * Public interface - so don't trust the lock - initialize it first.
238 */
239 PTHREAD_NOEXPORT_VARIANT
240 int
241 pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr)
242 {
243 #if 0
244 /* conformance tests depend on not having this behavior */
245 /* The test for this behavior is optional */
246 if (_pthread_mutex_check_signature(mutex))
247 return EBUSY;
248 #endif
249 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
250 _PTHREAD_LOCK_INIT(mutex->lock);
251 return (_pthread_mutex_init(mutex, attr, 0x7));
252 }
253
254 PTHREAD_NOEXPORT_VARIANT
255 int
256 pthread_mutex_getprioceiling(const pthread_mutex_t *omutex, int *prioceiling)
257 {
258 int res = EINVAL;
259 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
260 if (_pthread_mutex_check_signature(mutex)) {
261 _PTHREAD_LOCK(mutex->lock);
262 *prioceiling = mutex->prioceiling;
263 res = 0;
264 _PTHREAD_UNLOCK(mutex->lock);
265 }
266 return res;
267 }
268
269 PTHREAD_NOEXPORT_VARIANT
270 int
271 pthread_mutex_setprioceiling(pthread_mutex_t *omutex, int prioceiling,
272 int *old_prioceiling)
273 {
274 int res = EINVAL;
275 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
276 if (_pthread_mutex_check_signature(mutex)) {
277 _PTHREAD_LOCK(mutex->lock);
278 if (prioceiling >= -999 && prioceiling <= 999) {
279 *old_prioceiling = mutex->prioceiling;
280 mutex->prioceiling = (int16_t)prioceiling;
281 res = 0;
282 }
283 _PTHREAD_UNLOCK(mutex->lock);
284 }
285 return res;
286 }
287
288
289 int
290 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr,
291 int *prioceiling)
292 {
293 int res = EINVAL;
294 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
295 *prioceiling = attr->prioceiling;
296 res = 0;
297 }
298 return res;
299 }
300
301 int
302 pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int *protocol)
303 {
304 int res = EINVAL;
305 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
306 *protocol = attr->protocol;
307 res = 0;
308 }
309 return res;
310 }
311
312 int
313 pthread_mutexattr_getpolicy_np(const pthread_mutexattr_t *attr, int *policy)
314 {
315 int res = EINVAL;
316 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
317 switch (attr->opt) {
318 case _PTHREAD_MTX_OPT_POLICY_FAIRSHARE:
319 *policy = PTHREAD_MUTEX_POLICY_FAIRSHARE_NP;
320 res = 0;
321 break;
322 case _PTHREAD_MTX_OPT_POLICY_FIRSTFIT:
323 *policy = PTHREAD_MUTEX_POLICY_FIRSTFIT_NP;
324 res = 0;
325 break;
326 }
327 }
328 return res;
329 }
330
331 int
332 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
333 {
334 int res = EINVAL;
335 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
336 *type = attr->type;
337 res = 0;
338 }
339 return res;
340 }
341
342 int
343 pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared)
344 {
345 int res = EINVAL;
346 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
347 *pshared = (int)attr->pshared;
348 res = 0;
349 }
350 return res;
351 }
352
353 int
354 pthread_mutexattr_init(pthread_mutexattr_t *attr)
355 {
356 attr->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
357 attr->protocol = _PTHREAD_DEFAULT_PROTOCOL;
358 attr->opt = __pthread_mutex_default_opt_policy;
359 attr->type = PTHREAD_MUTEX_DEFAULT;
360 attr->sig = _PTHREAD_MUTEX_ATTR_SIG;
361 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
362 return 0;
363 }
364
365 int
366 pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int prioceiling)
367 {
368 int res = EINVAL;
369 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
370 if (prioceiling >= -999 && prioceiling <= 999) {
371 attr->prioceiling = prioceiling;
372 res = 0;
373 }
374 }
375 return res;
376 }
377
378 int
379 pthread_mutexattr_setprotocol(pthread_mutexattr_t *attr, int protocol)
380 {
381 int res = EINVAL;
382 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
383 switch (protocol) {
384 case PTHREAD_PRIO_NONE:
385 case PTHREAD_PRIO_INHERIT:
386 case PTHREAD_PRIO_PROTECT:
387 attr->protocol = protocol;
388 res = 0;
389 break;
390 }
391 }
392 return res;
393 }
394
395 int
396 pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *attr, int policy)
397 {
398 int res = EINVAL;
399 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
400 // <rdar://problem/35844519> the first-fit implementation was broken
401 // pre-Liberty so this mapping exists to ensure that the old first-fit
402 // define (2) is no longer valid when used on older systems.
403 switch (policy) {
404 case PTHREAD_MUTEX_POLICY_FAIRSHARE_NP:
405 attr->opt = _PTHREAD_MTX_OPT_POLICY_FAIRSHARE;
406 res = 0;
407 break;
408 case PTHREAD_MUTEX_POLICY_FIRSTFIT_NP:
409 attr->opt = _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
410 res = 0;
411 break;
412 }
413 }
414 return res;
415 }
416
417 int
418 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
419 {
420 int res = EINVAL;
421 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
422 switch (type) {
423 case PTHREAD_MUTEX_NORMAL:
424 case PTHREAD_MUTEX_ERRORCHECK:
425 case PTHREAD_MUTEX_RECURSIVE:
426 //case PTHREAD_MUTEX_DEFAULT:
427 attr->type = type;
428 res = 0;
429 break;
430 }
431 }
432 return res;
433 }
434
435 int
436 pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
437 {
438 int res = EINVAL;
439 #if __DARWIN_UNIX03
440 if (__unix_conforming == 0) {
441 __unix_conforming = 1;
442 }
443 #endif /* __DARWIN_UNIX03 */
444
445 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
446 #if __DARWIN_UNIX03
447 if (( pshared == PTHREAD_PROCESS_PRIVATE) ||
448 (pshared == PTHREAD_PROCESS_SHARED))
449 #else /* __DARWIN_UNIX03 */
450 if ( pshared == PTHREAD_PROCESS_PRIVATE)
451 #endif /* __DARWIN_UNIX03 */
452 {
453 attr->pshared = pshared;
454 res = 0;
455 }
456 }
457 return res;
458 }
459
460 PTHREAD_NOEXPORT PTHREAD_NOINLINE PTHREAD_NORETURN
461 int
462 _pthread_mutex_corruption_abort(_pthread_mutex *mutex)
463 {
464 PTHREAD_ABORT("pthread_mutex corruption: mutex owner changed in the "
465 "middle of lock/unlock");
466 }
467
468
469 PTHREAD_NOINLINE
470 static int
471 _pthread_mutex_check_init_slow(_pthread_mutex *mutex)
472 {
473 int res = EINVAL;
474
475 if (_pthread_mutex_check_signature_init(mutex)) {
476 _PTHREAD_LOCK(mutex->lock);
477 if (_pthread_mutex_check_signature_init(mutex)) {
478 // initialize a statically initialized mutex to provide
479 // compatibility for misbehaving applications.
480 // (unlock should not be the first operation on a mutex)
481 res = _pthread_mutex_init(mutex, NULL, (mutex->sig & 0xf));
482 } else if (_pthread_mutex_check_signature(mutex)) {
483 res = 0;
484 }
485 _PTHREAD_UNLOCK(mutex->lock);
486 } else if (_pthread_mutex_check_signature(mutex)) {
487 res = 0;
488 }
489 if (res != 0) {
490 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, res);
491 }
492 return res;
493 }
494
495 PTHREAD_ALWAYS_INLINE
496 static inline int
497 _pthread_mutex_check_init(_pthread_mutex *mutex)
498 {
499 int res = 0;
500 if (!_pthread_mutex_check_signature(mutex)) {
501 return _pthread_mutex_check_init_slow(mutex);
502 }
503 return res;
504 }
505
506 PTHREAD_ALWAYS_INLINE
507 static inline bool
508 _pthread_mutex_is_fairshare(_pthread_mutex *mutex)
509 {
510 return (mutex->mtxopts.options.policy == _PTHREAD_MTX_OPT_POLICY_FAIRSHARE);
511 }
512
513 PTHREAD_ALWAYS_INLINE
514 static inline bool
515 _pthread_mutex_is_firstfit(_pthread_mutex *mutex)
516 {
517 return (mutex->mtxopts.options.policy == _PTHREAD_MTX_OPT_POLICY_FIRSTFIT);
518 }
519
520 PTHREAD_ALWAYS_INLINE
521 static inline bool
522 _pthread_mutex_is_recursive(_pthread_mutex *mutex)
523 {
524 return (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE);
525 }
526
527 PTHREAD_ALWAYS_INLINE
528 static int
529 _pthread_mutex_lock_handle_options(_pthread_mutex *mutex, bool trylock,
530 uint64_t *tidaddr)
531 {
532 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL) {
533 // NORMAL does not do EDEADLK checking
534 return 0;
535 }
536
537 uint64_t selfid = _pthread_selfid_direct();
538 if (os_atomic_load(tidaddr, relaxed) == selfid) {
539 if (_pthread_mutex_is_recursive(mutex)) {
540 if (mutex->mtxopts.options.lock_count < USHRT_MAX) {
541 mutex->mtxopts.options.lock_count += 1;
542 return mutex->mtxopts.options.lock_count;
543 } else {
544 return -EAGAIN;
545 }
546 } else if (trylock) { /* PTHREAD_MUTEX_ERRORCHECK */
547 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
548 // return EDEADLK on a deadlock, it should return EBUSY.
549 return -EBUSY;
550 } else { /* PTHREAD_MUTEX_ERRORCHECK */
551 return -EDEADLK;
552 }
553 }
554
555 // Not recursive, or recursive but first lock.
556 return 0;
557 }
558
559 PTHREAD_ALWAYS_INLINE
560 static int
561 _pthread_mutex_unlock_handle_options(_pthread_mutex *mutex, uint64_t *tidaddr)
562 {
563 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL) {
564 // NORMAL does not do EDEADLK checking
565 return 0;
566 }
567
568 uint64_t selfid = _pthread_selfid_direct();
569 if (os_atomic_load(tidaddr, relaxed) != selfid) {
570 return -EPERM;
571 } else if (_pthread_mutex_is_recursive(mutex) &&
572 --mutex->mtxopts.options.lock_count) {
573 return 1;
574 }
575 return 0;
576 }
577
578 /*
579 * Sequence numbers and TID:
580 *
581 * In steady (and uncontended) state, an unlocked mutex will
582 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
583 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
584 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
585 * E=[L5 U5 TID0].
586 *
587 * If a contender comes in after B, the mutex will instead transition to
588 * E=[L6+KE U4 TID0] and then F=[L6+KE U4 TID940]. If a contender comes in after
589 * C, it will transition to F=[L6+KE U4 TID940] directly. In both cases, the
590 * contender will enter the kernel with either mutexwait(U4, TID0) or
591 * mutexwait(U4, TID940). The first owner will unlock the mutex by first
592 * updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
593 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to
594 * signal the next waiter (potentially as a prepost). When the waiter comes out
595 * of the kernel, it will update the owner to I=[L6+KE U5 TID941]. An unlock at
596 * this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
597 *
598 * At various points along these timelines, since the sequence words and TID are
599 * written independently, a thread may get preempted and another thread might
600 * see inconsistent data. In the worst case, another thread may see the TID in
601 * the SWITCHING (-1) state or unlocked (0) state for longer because the owning
602 * thread was preempted.
603 */
604
605 /*
606 * Drop the mutex unlock references from cond_wait or mutex_unlock.
607 */
608 PTHREAD_ALWAYS_INLINE
609 static inline int
610 _pthread_mutex_fairshare_unlock_updatebits(_pthread_mutex *mutex,
611 uint32_t *flagsp, uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
612 {
613 uint32_t flags = mutex->mtxopts.value;
614 flags &= ~_PTHREAD_MTX_OPT_NOTIFY; // no notification by default
615
616 mutex_seq *seqaddr;
617 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
618
619 mutex_seq oldseq, newseq;
620 mutex_seq_load(seqaddr, &oldseq);
621
622 uint64_t *tidaddr;
623 MUTEX_GETTID_ADDR(mutex, &tidaddr);
624 uint64_t oldtid, newtid;
625
626 int res = _pthread_mutex_unlock_handle_options(mutex, tidaddr);
627 if (res > 0) {
628 // Valid recursive unlock
629 if (flagsp) {
630 *flagsp = flags;
631 }
632 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
633 return 0;
634 } else if (res < 0) {
635 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, -res);
636 return -res;
637 }
638
639 bool clearnotify, spurious;
640 do {
641 newseq = oldseq;
642 oldtid = os_atomic_load(tidaddr, relaxed);
643
644 clearnotify = false;
645 spurious = false;
646
647 // pending waiters
648 int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
649 if (numwaiters == 0) {
650 // spurious unlock (unlock of unlocked lock)
651 spurious = true;
652 } else {
653 newseq.ugenval += PTHRW_INC;
654
655 if ((oldseq.lgenval & PTHRW_COUNT_MASK) ==
656 (newseq.ugenval & PTHRW_COUNT_MASK)) {
657 // our unlock sequence matches to lock sequence, so if the
658 // CAS is successful, the mutex is unlocked
659
660 /* do not reset Ibit, just K&E */
661 newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
662 clearnotify = true;
663 newtid = 0; // clear owner
664 } else {
665 newtid = PTHREAD_MTX_TID_SWITCHING;
666 // need to signal others waiting for mutex
667 flags |= _PTHREAD_MTX_OPT_NOTIFY;
668 }
669
670 if (newtid != oldtid) {
671 // We're giving up the mutex one way or the other, so go ahead
672 // and update the owner to 0 so that once the CAS below
673 // succeeds, there is no stale ownership information. If the
674 // CAS of the seqaddr fails, we may loop, but it's still valid
675 // for the owner to be SWITCHING/0
676 if (!os_atomic_cmpxchg(tidaddr, oldtid, newtid, relaxed)) {
677 // we own this mutex, nobody should be updating it except us
678 return _pthread_mutex_corruption_abort(mutex);
679 }
680 }
681 }
682
683 if (clearnotify || spurious) {
684 flags &= ~_PTHREAD_MTX_OPT_NOTIFY;
685 }
686 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, release));
687
688 PTHREAD_TRACE(psynch_mutex_unlock_updatebits, mutex, oldseq.lgenval,
689 newseq.lgenval, oldtid);
690
691 if (mgenp != NULL) {
692 *mgenp = newseq.lgenval;
693 }
694 if (ugenp != NULL) {
695 *ugenp = newseq.ugenval;
696 }
697 if (pmtxp != NULL) {
698 *pmtxp = (uint32_t *)mutex;
699 }
700 if (flagsp != NULL) {
701 *flagsp = flags;
702 }
703
704 return 0;
705 }
706
707 PTHREAD_ALWAYS_INLINE
708 static inline int
709 _pthread_mutex_fairshare_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid)
710 {
711 bool firstfit = _pthread_mutex_is_firstfit(mutex);
712 bool gotlock = true;
713
714 mutex_seq *seqaddr;
715 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
716
717 mutex_seq oldseq, newseq;
718 mutex_seq_load(seqaddr, &oldseq);
719
720 uint64_t *tidaddr;
721 MUTEX_GETTID_ADDR(mutex, &tidaddr);
722
723 do {
724 newseq = oldseq;
725
726 if (firstfit) {
727 // firstfit locks can have the lock stolen out from under a locker
728 // between the unlock from the kernel and this lock path. When this
729 // happens, we still want to set the K bit before leaving the loop
730 // (or notice if the lock unlocks while we try to update).
731 gotlock = !is_rwl_ebit_set(oldseq.lgenval);
732 } else if ((oldseq.lgenval & (PTH_RWL_KBIT | PTH_RWL_EBIT)) ==
733 (PTH_RWL_KBIT | PTH_RWL_EBIT)) {
734 // bit are already set, just update the owner tidaddr
735 break;
736 }
737
738 newseq.lgenval |= PTH_RWL_KBIT | PTH_RWL_EBIT;
739 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
740 acquire));
741
742 if (gotlock) {
743 os_atomic_store(tidaddr, selfid, relaxed);
744 }
745
746 PTHREAD_TRACE(psynch_mutex_lock_updatebits, mutex, oldseq.lgenval,
747 newseq.lgenval, 0);
748
749 // failing to take the lock in firstfit returns 1 to force the caller
750 // to wait in the kernel
751 return gotlock ? 0 : 1;
752 }
753
754 PTHREAD_NOINLINE
755 static int
756 _pthread_mutex_fairshare_lock_wait(_pthread_mutex *mutex, mutex_seq newseq,
757 uint64_t oldtid)
758 {
759 uint64_t *tidaddr;
760 MUTEX_GETTID_ADDR(mutex, &tidaddr);
761 uint64_t selfid = _pthread_selfid_direct();
762
763 PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t *)mutex);
764 do {
765 uint32_t updateval;
766 do {
767 updateval = __psynch_mutexwait(mutex, newseq.lgenval,
768 newseq.ugenval, oldtid, mutex->mtxopts.value);
769 oldtid = os_atomic_load(tidaddr, relaxed);
770 } while (updateval == (uint32_t)-1);
771
772 // returns 0 on succesful update; in firstfit it may fail with 1
773 } while (_pthread_mutex_fairshare_lock_updatebits(mutex, selfid) == 1);
774 PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t *)mutex, BLOCK_SUCCESS_PLOCKSTAT);
775
776 return 0;
777 }
778
779 PTHREAD_NOEXPORT PTHREAD_NOINLINE
780 int
781 _pthread_mutex_fairshare_lock_slow(_pthread_mutex *omutex, bool trylock)
782 {
783 int res, recursive = 0;
784 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
785
786 mutex_seq *seqaddr;
787 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
788
789 mutex_seq oldseq, newseq;
790 mutex_seq_load(seqaddr, &oldseq);
791
792 uint64_t *tidaddr;
793 MUTEX_GETTID_ADDR(mutex, &tidaddr);
794 uint64_t oldtid, selfid = _pthread_selfid_direct();
795
796 res = _pthread_mutex_lock_handle_options(mutex, trylock, tidaddr);
797 if (res > 0) {
798 recursive = 1;
799 res = 0;
800 goto out;
801 } else if (res < 0) {
802 res = -res;
803 goto out;
804 }
805
806 bool gotlock;
807 do {
808 newseq = oldseq;
809 oldtid = os_atomic_load(tidaddr, relaxed);
810
811 gotlock = ((oldseq.lgenval & PTH_RWL_EBIT) == 0);
812
813 if (trylock && !gotlock) {
814 // A trylock on a held lock will fail immediately. But since
815 // we did not load the sequence words atomically, perform a
816 // no-op CAS64 to ensure that nobody has unlocked concurrently.
817 } else {
818 // Increment the lock sequence number and force the lock into E+K
819 // mode, whether "gotlock" is true or not.
820 newseq.lgenval += PTHRW_INC;
821 newseq.lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
822 }
823 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
824
825 PTHREAD_TRACE(psynch_mutex_lock_updatebits, omutex, oldseq.lgenval,
826 newseq.lgenval, 0);
827
828 if (gotlock) {
829 os_atomic_store(tidaddr, selfid, relaxed);
830 res = 0;
831 PTHREAD_TRACE(psynch_mutex_ulock, omutex, newseq.lgenval,
832 newseq.ugenval, selfid);
833 } else if (trylock) {
834 res = EBUSY;
835 PTHREAD_TRACE(psynch_mutex_utrylock_failed, omutex, newseq.lgenval,
836 newseq.ugenval, oldtid);
837 } else {
838 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_START, omutex,
839 newseq.lgenval, newseq.ugenval, oldtid);
840 res = _pthread_mutex_fairshare_lock_wait(mutex, newseq, oldtid);
841 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_END, omutex,
842 newseq.lgenval, newseq.ugenval, oldtid);
843 }
844
845 if (res == 0 && _pthread_mutex_is_recursive(mutex)) {
846 mutex->mtxopts.options.lock_count = 1;
847 }
848
849 out:
850 #if PLOCKSTAT
851 if (res == 0) {
852 PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t *)mutex, recursive, 0);
853 } else {
854 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, res);
855 }
856 #endif
857
858 return res;
859 }
860
861 PTHREAD_NOINLINE
862 static inline int
863 _pthread_mutex_fairshare_lock(_pthread_mutex *mutex, bool trylock)
864 {
865 #if ENABLE_USERSPACE_TRACE
866 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
867 #elif PLOCKSTAT
868 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
869 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
870 }
871 #endif
872
873 uint64_t *tidaddr;
874 MUTEX_GETTID_ADDR(mutex, &tidaddr);
875 uint64_t selfid = _pthread_selfid_direct();
876
877 mutex_seq *seqaddr;
878 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
879
880 mutex_seq oldseq, newseq;
881 mutex_seq_load(seqaddr, &oldseq);
882
883 if (os_unlikely(oldseq.lgenval & PTH_RWL_EBIT)) {
884 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
885 }
886
887 bool gotlock;
888 do {
889 newseq = oldseq;
890
891 gotlock = ((oldseq.lgenval & PTH_RWL_EBIT) == 0);
892
893 if (trylock && !gotlock) {
894 // A trylock on a held lock will fail immediately. But since
895 // we did not load the sequence words atomically, perform a
896 // no-op CAS64 to ensure that nobody has unlocked concurrently.
897 } else if (os_likely(gotlock)) {
898 // Increment the lock sequence number and force the lock into E+K
899 // mode, whether "gotlock" is true or not.
900 newseq.lgenval += PTHRW_INC;
901 newseq.lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
902 } else {
903 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
904 }
905 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
906 acquire)));
907
908 if (os_likely(gotlock)) {
909 os_atomic_store(tidaddr, selfid, relaxed);
910 return 0;
911 } else if (trylock) {
912 return EBUSY;
913 } else {
914 __builtin_trap();
915 }
916 }
917
918 PTHREAD_NOINLINE
919 static int
920 _pthread_mutex_fairshare_unlock_drop(_pthread_mutex *mutex, mutex_seq newseq,
921 uint32_t flags)
922 {
923 int res;
924 uint32_t updateval;
925
926 uint64_t *tidaddr;
927 MUTEX_GETTID_ADDR(mutex, &tidaddr);
928
929 PTHREAD_TRACE(psynch_mutex_uunlock | DBG_FUNC_START, mutex, newseq.lgenval,
930 newseq.ugenval, os_atomic_load(tidaddr, relaxed));
931
932 updateval = __psynch_mutexdrop(mutex, newseq.lgenval, newseq.ugenval,
933 os_atomic_load(tidaddr, relaxed), flags);
934
935 PTHREAD_TRACE(psynch_mutex_uunlock | DBG_FUNC_END, mutex, updateval, 0, 0);
936
937 if (updateval == (uint32_t)-1) {
938 res = errno;
939
940 if (res == EINTR) {
941 res = 0;
942 }
943 if (res != 0) {
944 PTHREAD_ABORT("__psynch_mutexdrop failed with error %d", res);
945 }
946 return res;
947 }
948
949 return 0;
950 }
951
952 PTHREAD_NOEXPORT PTHREAD_NOINLINE
953 int
954 _pthread_mutex_fairshare_unlock_slow(_pthread_mutex *mutex)
955 {
956 int res;
957 mutex_seq newseq;
958 uint32_t flags;
959
960 res = _pthread_mutex_fairshare_unlock_updatebits(mutex, &flags, NULL,
961 &newseq.lgenval, &newseq.ugenval);
962 if (res != 0) return res;
963
964 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) != 0) {
965 return _pthread_mutex_fairshare_unlock_drop(mutex, newseq, flags);
966 } else {
967 uint64_t *tidaddr;
968 MUTEX_GETTID_ADDR(mutex, &tidaddr);
969 PTHREAD_TRACE(psynch_mutex_uunlock, mutex, newseq.lgenval,
970 newseq.ugenval, os_atomic_load(tidaddr, relaxed));
971 }
972
973 return 0;
974 }
975
976 PTHREAD_NOINLINE
977 static int
978 _pthread_mutex_fairshare_unlock(_pthread_mutex *mutex)
979 {
980 #if ENABLE_USERSPACE_TRACE
981 return _pthread_mutex_fairshare_unlock_slow(mutex);
982 #elif PLOCKSTAT
983 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
984 return _pthread_mutex_fairshare_unlock_slow(mutex);
985 }
986 #endif
987
988 uint64_t *tidaddr;
989 MUTEX_GETTID_ADDR(mutex, &tidaddr);
990
991 mutex_seq *seqaddr;
992 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
993
994 mutex_seq oldseq, newseq;
995 mutex_seq_load(seqaddr, &oldseq);
996
997 int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
998 if (os_unlikely(numwaiters == 0)) {
999 // spurious unlock (unlock of unlocked lock)
1000 return 0;
1001 }
1002
1003 // We're giving up the mutex one way or the other, so go ahead and
1004 // update the owner to 0 so that once the CAS below succeeds, there
1005 // is no stale ownership information. If the CAS of the seqaddr
1006 // fails, we may loop, but it's still valid for the owner to be
1007 // SWITCHING/0
1008 os_atomic_store(tidaddr, 0, relaxed);
1009
1010 do {
1011 newseq = oldseq;
1012 newseq.ugenval += PTHRW_INC;
1013
1014 if (os_likely((oldseq.lgenval & PTHRW_COUNT_MASK) ==
1015 (newseq.ugenval & PTHRW_COUNT_MASK))) {
1016 // if we succeed in performing the CAS we can be sure of a fast
1017 // path (only needing the CAS) unlock, if:
1018 // a. our lock and unlock sequence are equal
1019 // b. we don't need to clear an unlock prepost from the kernel
1020
1021 // do not reset Ibit, just K&E
1022 newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
1023 } else {
1024 return _pthread_mutex_fairshare_unlock_slow(mutex);
1025 }
1026 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
1027 release)));
1028
1029 return 0;
1030 }
1031
1032 #pragma mark firstfit
1033
1034 PTHREAD_ALWAYS_INLINE
1035 static inline int
1036 _pthread_mutex_firstfit_unlock_updatebits(_pthread_mutex *mutex,
1037 uint32_t *flagsp, uint32_t **mutexp, uint32_t *lvalp, uint32_t *uvalp)
1038 {
1039 uint32_t flags = mutex->mtxopts.value & ~_PTHREAD_MTX_OPT_NOTIFY;
1040 bool kernel_wake;
1041
1042 mutex_seq *seqaddr;
1043 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1044
1045 mutex_seq oldseq, newseq;
1046 mutex_seq_load(seqaddr, &oldseq);
1047
1048 uint64_t *tidaddr;
1049 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1050 uint64_t oldtid;
1051
1052 int res = _pthread_mutex_unlock_handle_options(mutex, tidaddr);
1053 if (res > 0) {
1054 // Valid recursive unlock
1055 if (flagsp) {
1056 *flagsp = flags;
1057 }
1058 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
1059 return 0;
1060 } else if (res < 0) {
1061 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, -res);
1062 return -res;
1063 }
1064
1065 do {
1066 newseq = oldseq;
1067 oldtid = os_atomic_load(tidaddr, relaxed);
1068 // More than one kernel waiter means we need to do a wake.
1069 kernel_wake = diff_genseq(oldseq.lgenval, oldseq.ugenval) > 0;
1070 newseq.lgenval &= ~PTH_RWL_EBIT;
1071
1072 if (kernel_wake) {
1073 // Going to the kernel post-unlock removes a single waiter unlock
1074 // from the mutex counts.
1075 newseq.ugenval += PTHRW_INC;
1076 }
1077
1078 if (oldtid != 0) {
1079 if (!os_atomic_cmpxchg(tidaddr, oldtid, 0, relaxed)) {
1080 return _pthread_mutex_corruption_abort(mutex);
1081 }
1082 }
1083 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, release));
1084
1085 PTHREAD_TRACE(psynch_ffmutex_unlock_updatebits, mutex, oldseq.lgenval,
1086 newseq.lgenval, newseq.ugenval);
1087
1088 if (kernel_wake) {
1089 // We choose to return this out via flags because the condition
1090 // variable also uses this to determine whether to do a kernel wake
1091 // when beginning a cvwait.
1092 flags |= _PTHREAD_MTX_OPT_NOTIFY;
1093 }
1094 if (lvalp) {
1095 *lvalp = newseq.lgenval;
1096 }
1097 if (uvalp) {
1098 *uvalp = newseq.ugenval;
1099 }
1100 if (mutexp) {
1101 *mutexp = (uint32_t *)mutex;
1102 }
1103 if (flagsp) {
1104 *flagsp = flags;
1105 }
1106 return 0;
1107 }
1108
1109 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1110 static int
1111 _pthread_mutex_firstfit_wake(_pthread_mutex *mutex, mutex_seq newseq,
1112 uint32_t flags)
1113 {
1114 PTHREAD_TRACE(psynch_ffmutex_wake, mutex, newseq.lgenval, newseq.ugenval,
1115 0);
1116 int res = __psynch_mutexdrop(mutex, newseq.lgenval, newseq.ugenval, 0,
1117 flags);
1118
1119 if (res == -1) {
1120 res = errno;
1121 if (res == EINTR) {
1122 res = 0;
1123 }
1124 if (res != 0) {
1125 PTHREAD_ABORT("__psynch_mutexdrop failed with error %d", res);
1126 }
1127 return res;
1128 }
1129 return 0;
1130 }
1131
1132 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1133 int
1134 _pthread_mutex_firstfit_unlock_slow(_pthread_mutex *mutex)
1135 {
1136 mutex_seq newseq;
1137 uint32_t flags;
1138 int res;
1139
1140 res = _pthread_mutex_firstfit_unlock_updatebits(mutex, &flags, NULL,
1141 &newseq.lgenval, &newseq.ugenval);
1142 if (res != 0) return res;
1143
1144 if (flags & _PTHREAD_MTX_OPT_NOTIFY) {
1145 return _pthread_mutex_firstfit_wake(mutex, newseq, flags);
1146 }
1147 return 0;
1148 }
1149
1150 PTHREAD_ALWAYS_INLINE
1151 static bool
1152 _pthread_mutex_firstfit_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid,
1153 mutex_seq *newseqp)
1154 {
1155 bool gotlock;
1156
1157 mutex_seq *seqaddr;
1158 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1159
1160 mutex_seq oldseq, newseq;
1161 mutex_seq_load(seqaddr, &oldseq);
1162
1163 uint64_t *tidaddr;
1164 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1165
1166 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits | DBG_FUNC_START, mutex,
1167 oldseq.lgenval, oldseq.ugenval, 0);
1168
1169 do {
1170 newseq = oldseq;
1171 gotlock = is_rwl_ebit_clear(oldseq.lgenval);
1172
1173 if (gotlock) {
1174 // If we see the E-bit cleared, we should just attempt to take it.
1175 newseq.lgenval |= PTH_RWL_EBIT;
1176 } else {
1177 // If we failed to get the lock then we need to put ourselves back
1178 // in the queue of waiters. The previous unlocker that woke us out
1179 // of the kernel consumed the S-count for our previous wake. So
1180 // take another ticket on L and go back in the kernel to sleep.
1181 newseq.lgenval += PTHRW_INC;
1182 }
1183 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
1184
1185 if (gotlock) {
1186 os_atomic_store(tidaddr, selfid, relaxed);
1187 }
1188
1189 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits | DBG_FUNC_END, mutex,
1190 newseq.lgenval, newseq.ugenval, 0);
1191
1192 if (newseqp) {
1193 *newseqp = newseq;
1194 }
1195 return gotlock;
1196 }
1197
1198 PTHREAD_NOINLINE
1199 static int
1200 _pthread_mutex_firstfit_lock_wait(_pthread_mutex *mutex, mutex_seq newseq,
1201 uint64_t oldtid)
1202 {
1203 uint64_t *tidaddr;
1204 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1205 uint64_t selfid = _pthread_selfid_direct();
1206
1207 PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t *)mutex);
1208 do {
1209 uint32_t uval;
1210 do {
1211 PTHREAD_TRACE(psynch_ffmutex_wait | DBG_FUNC_START, mutex,
1212 newseq.lgenval, newseq.ugenval, mutex->mtxopts.value);
1213 uval = __psynch_mutexwait(mutex, newseq.lgenval, newseq.ugenval,
1214 oldtid, mutex->mtxopts.value);
1215 PTHREAD_TRACE(psynch_ffmutex_wait | DBG_FUNC_END, mutex,
1216 uval, 0, 0);
1217 oldtid = os_atomic_load(tidaddr, relaxed);
1218 } while (uval == (uint32_t)-1);
1219 } while (!_pthread_mutex_firstfit_lock_updatebits(mutex, selfid, &newseq));
1220 PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t *)mutex, BLOCK_SUCCESS_PLOCKSTAT);
1221
1222 return 0;
1223 }
1224
1225 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1226 int
1227 _pthread_mutex_firstfit_lock_slow(_pthread_mutex *mutex, bool trylock)
1228 {
1229 int res, recursive = 0;
1230
1231 mutex_seq *seqaddr;
1232 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1233
1234 mutex_seq oldseq, newseq;
1235 mutex_seq_load(seqaddr, &oldseq);
1236
1237 uint64_t *tidaddr;
1238 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1239 uint64_t oldtid, selfid = _pthread_selfid_direct();
1240
1241 res = _pthread_mutex_lock_handle_options(mutex, trylock, tidaddr);
1242 if (res > 0) {
1243 recursive = 1;
1244 res = 0;
1245 goto out;
1246 } else if (res < 0) {
1247 res = -res;
1248 goto out;
1249 }
1250
1251 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits | DBG_FUNC_START, mutex,
1252 oldseq.lgenval, oldseq.ugenval, 0);
1253
1254 bool gotlock;
1255 do {
1256 newseq = oldseq;
1257 oldtid = os_atomic_load(tidaddr, relaxed);
1258
1259 gotlock = is_rwl_ebit_clear(oldseq.lgenval);
1260 if (trylock && !gotlock) {
1261 // We still want to perform the CAS here, even though it won't
1262 // do anything so that it fails if someone unlocked while we were
1263 // in the loop
1264 } else if (gotlock) {
1265 // In first-fit, getting the lock simply adds the E-bit
1266 newseq.lgenval |= PTH_RWL_EBIT;
1267 } else {
1268 // Failed to get the lock, increment the L-val and go to
1269 // the kernel to sleep
1270 newseq.lgenval += PTHRW_INC;
1271 }
1272 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
1273
1274 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits | DBG_FUNC_END, mutex,
1275 newseq.lgenval, newseq.ugenval, 0);
1276
1277 if (gotlock) {
1278 os_atomic_store(tidaddr, selfid, relaxed);
1279 res = 0;
1280 PTHREAD_TRACE(psynch_mutex_ulock, mutex, newseq.lgenval,
1281 newseq.ugenval, selfid);
1282 } else if (trylock) {
1283 res = EBUSY;
1284 PTHREAD_TRACE(psynch_mutex_utrylock_failed, mutex, newseq.lgenval,
1285 newseq.ugenval, oldtid);
1286 } else {
1287 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_START, mutex,
1288 newseq.lgenval, newseq.ugenval, oldtid);
1289 res = _pthread_mutex_firstfit_lock_wait(mutex, newseq, oldtid);
1290 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_END, mutex,
1291 newseq.lgenval, newseq.ugenval, oldtid);
1292 }
1293
1294 if (res == 0 && _pthread_mutex_is_recursive(mutex)) {
1295 mutex->mtxopts.options.lock_count = 1;
1296 }
1297
1298 out:
1299 #if PLOCKSTAT
1300 if (res == 0) {
1301 PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t *)mutex, recursive, 0);
1302 } else {
1303 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, res);
1304 }
1305 #endif
1306 return res;
1307 }
1308
1309 #pragma mark fast path
1310
1311 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1312 int
1313 _pthread_mutex_droplock(_pthread_mutex *mutex, uint32_t *flagsp,
1314 uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
1315 {
1316 if (_pthread_mutex_is_fairshare(mutex)) {
1317 return _pthread_mutex_fairshare_unlock_updatebits(mutex, flagsp,
1318 pmtxp, mgenp, ugenp);
1319 }
1320 return _pthread_mutex_firstfit_unlock_updatebits(mutex, flagsp, pmtxp,
1321 mgenp, ugenp);
1322 }
1323
1324 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1325 int
1326 _pthread_mutex_lock_init_slow(_pthread_mutex *mutex, bool trylock)
1327 {
1328 int res;
1329
1330 res = _pthread_mutex_check_init(mutex);
1331 if (res != 0) return res;
1332
1333 if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
1334 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
1335 }
1336 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1337 }
1338
1339 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1340 static int
1341 _pthread_mutex_unlock_init_slow(_pthread_mutex *mutex)
1342 {
1343 int res;
1344
1345 // Initialize static mutexes for compatibility with misbehaving
1346 // applications (unlock should not be the first operation on a mutex).
1347 res = _pthread_mutex_check_init(mutex);
1348 if (res != 0) return res;
1349
1350 if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
1351 return _pthread_mutex_fairshare_unlock_slow(mutex);
1352 }
1353 return _pthread_mutex_firstfit_unlock_slow(mutex);
1354 }
1355
1356 PTHREAD_NOEXPORT_VARIANT
1357 int
1358 pthread_mutex_unlock(pthread_mutex_t *omutex)
1359 {
1360 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
1361 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
1362 return _pthread_mutex_unlock_init_slow(mutex);
1363 }
1364
1365 if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
1366 return _pthread_mutex_fairshare_unlock(mutex);
1367 }
1368
1369 #if ENABLE_USERSPACE_TRACE
1370 return _pthread_mutex_firstfit_unlock_slow(mutex);
1371 #elif PLOCKSTAT
1372 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
1373 return _pthread_mutex_firstfit_unlock_slow(mutex);
1374 }
1375 #endif
1376
1377 /*
1378 * This is the first-fit fast path. The fairshare fast-ish path is in
1379 * _pthread_mutex_firstfit_unlock()
1380 */
1381 uint64_t *tidaddr;
1382 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1383
1384 mutex_seq *seqaddr;
1385 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1386
1387 mutex_seq oldseq, newseq;
1388 mutex_seq_load(seqaddr, &oldseq);
1389
1390 // We're giving up the mutex one way or the other, so go ahead and
1391 // update the owner to 0 so that once the CAS below succeeds, there
1392 // is no stale ownership information. If the CAS of the seqaddr
1393 // fails, we may loop, but it's still valid for the owner to be
1394 // SWITCHING/0
1395 os_atomic_store(tidaddr, 0, relaxed);
1396
1397 do {
1398 newseq = oldseq;
1399
1400 if (diff_genseq(oldseq.lgenval, oldseq.ugenval) == 0) {
1401 // No outstanding waiters in kernel, we can simply drop the E-bit
1402 // and return.
1403 newseq.lgenval &= ~PTH_RWL_EBIT;
1404 } else {
1405 return _pthread_mutex_firstfit_unlock_slow(mutex);
1406 }
1407 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
1408 release)));
1409
1410 return 0;
1411 }
1412
1413 PTHREAD_ALWAYS_INLINE
1414 static inline int
1415 _pthread_mutex_firstfit_lock(pthread_mutex_t *omutex, bool trylock)
1416 {
1417 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
1418 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
1419 return _pthread_mutex_lock_init_slow(mutex, trylock);
1420 }
1421
1422 if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
1423 return _pthread_mutex_fairshare_lock(mutex, trylock);
1424 }
1425
1426 #if ENABLE_USERSPACE_TRACE
1427 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1428 #elif PLOCKSTAT
1429 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
1430 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1431 }
1432 #endif
1433
1434 /*
1435 * This is the first-fit fast path. The fairshare fast-ish path is in
1436 * _pthread_mutex_firstfit_lock()
1437 */
1438 uint64_t *tidaddr;
1439 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1440 uint64_t selfid = _pthread_selfid_direct();
1441
1442 mutex_seq *seqaddr;
1443 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1444
1445 mutex_seq oldseq, newseq;
1446 mutex_seq_load(seqaddr, &oldseq);
1447
1448 if (os_unlikely(oldseq.lgenval & PTH_RWL_EBIT)) {
1449 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1450 }
1451
1452 bool gotlock;
1453 do {
1454 newseq = oldseq;
1455 gotlock = is_rwl_ebit_clear(oldseq.lgenval);
1456
1457 if (trylock && !gotlock) {
1458 // A trylock on a held lock will fail immediately. But since
1459 // we did not load the sequence words atomically, perform a
1460 // no-op CAS64 to ensure that nobody has unlocked concurrently.
1461 } else if (os_likely(gotlock)) {
1462 // In first-fit, getting the lock simply adds the E-bit
1463 newseq.lgenval |= PTH_RWL_EBIT;
1464 } else {
1465 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1466 }
1467 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
1468 acquire)));
1469
1470 if (os_likely(gotlock)) {
1471 os_atomic_store(tidaddr, selfid, relaxed);
1472 return 0;
1473 } else if (trylock) {
1474 return EBUSY;
1475 } else {
1476 __builtin_trap();
1477 }
1478 }
1479
1480 PTHREAD_NOEXPORT_VARIANT
1481 int
1482 pthread_mutex_lock(pthread_mutex_t *mutex)
1483 {
1484 return _pthread_mutex_firstfit_lock(mutex, false);
1485 }
1486
1487 PTHREAD_NOEXPORT_VARIANT
1488 int
1489 pthread_mutex_trylock(pthread_mutex_t *mutex)
1490 {
1491 return _pthread_mutex_firstfit_lock(mutex, true);
1492 }
1493
1494
1495 PTHREAD_ALWAYS_INLINE
1496 static inline int
1497 _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
1498 uint32_t static_type)
1499 {
1500 mutex->mtxopts.value = 0;
1501 mutex->mtxopts.options.mutex = 1;
1502 if (attr) {
1503 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1504 return EINVAL;
1505 }
1506 mutex->prioceiling = (int16_t)attr->prioceiling;
1507 mutex->mtxopts.options.protocol = attr->protocol;
1508 mutex->mtxopts.options.policy = attr->opt;
1509 mutex->mtxopts.options.type = attr->type;
1510 mutex->mtxopts.options.pshared = attr->pshared;
1511 } else {
1512 switch (static_type) {
1513 case 1:
1514 mutex->mtxopts.options.type = PTHREAD_MUTEX_ERRORCHECK;
1515 break;
1516 case 2:
1517 mutex->mtxopts.options.type = PTHREAD_MUTEX_RECURSIVE;
1518 break;
1519 case 3:
1520 /* firstfit fall thru */
1521 case 7:
1522 mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
1523 break;
1524 default:
1525 return EINVAL;
1526 }
1527
1528 mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
1529 mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
1530 if (static_type != 3) {
1531 mutex->mtxopts.options.policy = __pthread_mutex_default_opt_policy;
1532 } else {
1533 mutex->mtxopts.options.policy = _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
1534 }
1535 mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
1536 }
1537 mutex->priority = 0;
1538
1539 mutex_seq *seqaddr;
1540 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1541
1542 uint64_t *tidaddr;
1543 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1544
1545 #if PTHREAD_MUTEX_INIT_UNUSED
1546 if ((uint32_t*)tidaddr != mutex->m_tid) {
1547 mutex->mtxopts.options.misalign = 1;
1548 __builtin_memset(mutex->m_tid, 0xff, sizeof(mutex->m_tid));
1549 }
1550 __builtin_memset(mutex->m_mis, 0xff, sizeof(mutex->m_mis));
1551 #endif // PTHREAD_MUTEX_INIT_UNUSED
1552 *tidaddr = 0;
1553 *seqaddr = (mutex_seq){ };
1554
1555 long sig = _PTHREAD_MUTEX_SIG;
1556 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL &&
1557 (_pthread_mutex_is_fairshare(mutex) ||
1558 _pthread_mutex_is_firstfit(mutex))) {
1559 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath
1560 sig = _PTHREAD_MUTEX_SIG_fast;
1561 }
1562
1563 #if PTHREAD_MUTEX_INIT_UNUSED
1564 // For detecting copied mutexes and smashes during debugging
1565 uint32_t sig32 = (uint32_t)sig;
1566 #if defined(__LP64__)
1567 uintptr_t guard = ~(uintptr_t)mutex; // use ~ to hide from leaks
1568 __builtin_memcpy(mutex->_reserved, &guard, sizeof(guard));
1569 mutex->_reserved[2] = sig32;
1570 mutex->_reserved[3] = sig32;
1571 mutex->_pad = sig32;
1572 #else
1573 mutex->_reserved[0] = sig32;
1574 #endif
1575 #endif // PTHREAD_MUTEX_INIT_UNUSED
1576
1577 // Ensure all contents are properly set before setting signature.
1578 #if defined(__LP64__)
1579 // For binary compatibility reasons we cannot require natural alignment of
1580 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
1581 uint32_t *sig32_ptr = (uint32_t*)&mutex->sig;
1582 uint32_t *sig32_val = (uint32_t*)&sig;
1583 *(sig32_ptr + 1) = *(sig32_val + 1);
1584 os_atomic_store(sig32_ptr, *sig32_val, release);
1585 #else
1586 os_atomic_store2o(mutex, sig, sig, release);
1587 #endif
1588
1589 return 0;
1590 }
1591
1592 PTHREAD_NOEXPORT_VARIANT
1593 int
1594 pthread_mutex_destroy(pthread_mutex_t *omutex)
1595 {
1596 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
1597
1598 int res = EINVAL;
1599
1600 _PTHREAD_LOCK(mutex->lock);
1601 if (_pthread_mutex_check_signature(mutex)) {
1602 mutex_seq *seqaddr;
1603 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1604
1605 mutex_seq seq;
1606 mutex_seq_load(seqaddr, &seq);
1607
1608 uint64_t *tidaddr;
1609 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1610
1611 if ((os_atomic_load(tidaddr, relaxed) == 0) &&
1612 (seq.lgenval & PTHRW_COUNT_MASK) ==
1613 (seq.ugenval & PTHRW_COUNT_MASK)) {
1614 mutex->sig = _PTHREAD_NO_SIG;
1615 res = 0;
1616 } else {
1617 res = EBUSY;
1618 }
1619 } else if (_pthread_mutex_check_signature_init(mutex)) {
1620 mutex->sig = _PTHREAD_NO_SIG;
1621 res = 0;
1622 }
1623 _PTHREAD_UNLOCK(mutex->lock);
1624
1625 return res;
1626 }
1627
1628 #endif /* !BUILDING_VARIANT ] */
1629
1630 /*
1631 * Destroy a mutex attribute structure.
1632 */
1633 int
1634 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
1635 {
1636 #if __DARWIN_UNIX03
1637 if (__unix_conforming == 0) {
1638 __unix_conforming = 1;
1639 }
1640 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1641 return EINVAL;
1642 }
1643 #endif /* __DARWIN_UNIX03 */
1644
1645 attr->sig = _PTHREAD_NO_SIG;
1646 return 0;
1647 }
1648