]> git.saurik.com Git - apple/libpthread.git/blame - src/pthread_mutex.c
libpthread-454.100.8.tar.gz
[apple/libpthread.git] / src / pthread_mutex.c
CommitLineData
f1a1da6c
A
1/*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
a0619f9c 5 *
f1a1da6c
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
a0619f9c 12 *
f1a1da6c
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
a0619f9c 20 *
f1a1da6c
A
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44/*
45 * MkLinux
46 */
47
48/*
49 * POSIX Pthread Library
50 * -- Mutex variable support
51 */
52
964d3577 53#include "resolver.h"
f1a1da6c
A
54#include "internal.h"
55#include "kern/kern_trace.h"
a0619f9c 56
a0619f9c 57#ifndef BUILDING_VARIANT /* [ */
f1a1da6c
A
58
59#ifdef PLOCKSTAT
60#include "plockstat.h"
a0619f9c
A
61/* This function is never called and exists to provide never-fired dtrace
62 * probes so that user d scripts don't get errors.
63 */
c1f56ec9
A
64OS_USED static void
65_plockstat_never_fired(void);
66static void
a0619f9c
A
67_plockstat_never_fired(void)
68{
69 PLOCKSTAT_MUTEX_SPIN(NULL);
70 PLOCKSTAT_MUTEX_SPUN(NULL, 0, 0);
71}
f1a1da6c
A
72#else /* !PLOCKSTAT */
73#define PLOCKSTAT_MUTEX_SPIN(x)
74#define PLOCKSTAT_MUTEX_SPUN(x, y, z)
75#define PLOCKSTAT_MUTEX_ERROR(x, y)
76#define PLOCKSTAT_MUTEX_BLOCK(x)
77#define PLOCKSTAT_MUTEX_BLOCKED(x, y)
78#define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
79#define PLOCKSTAT_MUTEX_RELEASE(x, y)
80#endif /* PLOCKSTAT */
81
a0619f9c
A
82#define BLOCK_FAIL_PLOCKSTAT 0
83#define BLOCK_SUCCESS_PLOCKSTAT 1
3a6437e6 84
a0619f9c 85#define PTHREAD_MUTEX_INIT_UNUSED 1
f1a1da6c 86
c1f56ec9 87#if !VARIANT_DYLD
214d78a2 88
c1f56ec9
A
89int __pthread_mutex_default_opt_policy = _PTHREAD_MTX_OPT_POLICY_DEFAULT;
90bool __pthread_mutex_use_ulock = _PTHREAD_MTX_OPT_ULOCK_DEFAULT;
91bool __pthread_mutex_ulock_adaptive_spin = _PTHREAD_MTX_OPT_ADAPTIVE_DEFAULT;
76b7b9a2 92
214d78a2
A
93static inline bool
94_pthread_mutex_policy_validate(int policy)
95{
96 return (policy >= 0 && policy < _PTHREAD_MUTEX_POLICY_LAST);
97}
76b7b9a2 98
214d78a2
A
99static inline int
100_pthread_mutex_policy_to_opt(int policy)
101{
102 switch (policy) {
103 case PTHREAD_MUTEX_POLICY_FAIRSHARE_NP:
104 return _PTHREAD_MTX_OPT_POLICY_FAIRSHARE;
105 case PTHREAD_MUTEX_POLICY_FIRSTFIT_NP:
106 return _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
107 default:
108 __builtin_unreachable();
109 }
110}
76b7b9a2 111
76b7b9a2
A
112void
113_pthread_mutex_global_init(const char *envp[],
114 struct _pthread_registration_data *registration_data)
115{
214d78a2
A
116 int opt = _PTHREAD_MTX_OPT_POLICY_DEFAULT;
117 if (registration_data->mutex_default_policy) {
c1f56ec9
A
118 int policy = registration_data->mutex_default_policy &
119 _PTHREAD_REG_DEFAULT_POLICY_MASK;
214d78a2
A
120 if (_pthread_mutex_policy_validate(policy)) {
121 opt = _pthread_mutex_policy_to_opt(policy);
122 }
123 }
124
76b7b9a2 125 const char *envvar = _simple_getenv(envp, "PTHREAD_MUTEX_DEFAULT_POLICY");
214d78a2
A
126 if (envvar) {
127 int policy = envvar[0] - '0';
128 if (_pthread_mutex_policy_validate(policy)) {
129 opt = _pthread_mutex_policy_to_opt(policy);
130 }
131 }
132
133 if (opt != __pthread_mutex_default_opt_policy) {
134 __pthread_mutex_default_opt_policy = opt;
76b7b9a2 135 }
c1f56ec9
A
136
137 bool use_ulock = _PTHREAD_MTX_OPT_ULOCK_DEFAULT;
138 if (_os_xbs_chrooted) {
139 use_ulock = false;
140 } else {
141 envvar = _simple_getenv(envp, "PTHREAD_MUTEX_USE_ULOCK");
142 if (envvar) {
143 use_ulock = (envvar[0] == '1');
144 } else if (registration_data->mutex_default_policy) {
145 use_ulock = registration_data->mutex_default_policy &
146 _PTHREAD_REG_DEFAULT_USE_ULOCK;
147 }
148 }
149
150 if (use_ulock != __pthread_mutex_use_ulock) {
151 __pthread_mutex_use_ulock = use_ulock;
152 }
153
154 bool adaptive_spin = _PTHREAD_MTX_OPT_ADAPTIVE_DEFAULT;
155 envvar = _simple_getenv(envp, "PTHREAD_MUTEX_ADAPTIVE_SPIN");
156 if (envvar) {
157 adaptive_spin = (envvar[0] == '1');
158 } else if (registration_data->mutex_default_policy) {
159 adaptive_spin = registration_data->mutex_default_policy &
160 _PTHREAD_REG_DEFAULT_USE_ADAPTIVE_SPIN;
161 }
162
163 if (adaptive_spin != __pthread_mutex_ulock_adaptive_spin) {
164 __pthread_mutex_ulock_adaptive_spin = adaptive_spin;
165 }
76b7b9a2
A
166}
167
c1f56ec9 168#endif // !VARIANT_DYLD
76b7b9a2 169
964d3577 170
c1f56ec9
A
171OS_ALWAYS_INLINE
172static inline int _pthread_mutex_init(pthread_mutex_t *mutex,
a0619f9c 173 const pthread_mutexattr_t *attr, uint32_t static_type);
f1a1da6c 174
a0619f9c
A
175typedef union mutex_seq {
176 uint32_t seq[2];
177 struct { uint32_t lgenval; uint32_t ugenval; };
178 struct { uint32_t mgen; uint32_t ugen; };
179 uint64_t seq_LU;
180 uint64_t _Atomic atomic_seq_LU;
181} mutex_seq;
f1a1da6c 182
a0619f9c
A
183_Static_assert(sizeof(mutex_seq) == 2 * sizeof(uint32_t),
184 "Incorrect mutex_seq size");
f1a1da6c
A
185
186#if !__LITTLE_ENDIAN__
187#error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
188#endif
189
c1f56ec9 190OS_ALWAYS_INLINE
964d3577 191static inline void
c1f56ec9 192MUTEX_GETSEQ_ADDR(pthread_mutex_t *mutex, mutex_seq **seqaddr)
f1a1da6c 193{
3a6437e6
A
194 // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex)
195 // We don't require more than byte alignment on OS X. rdar://22278325
c1f56ec9 196 *seqaddr = (void *)(((uintptr_t)mutex->psynch.m_seq + 0x7ul) & ~0x7ul);
f1a1da6c
A
197}
198
c1f56ec9 199OS_ALWAYS_INLINE
964d3577 200static inline void
c1f56ec9 201MUTEX_GETTID_ADDR(pthread_mutex_t *mutex, uint64_t **tidaddr)
f1a1da6c 202{
3a6437e6
A
203 // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex)
204 // We don't require more than byte alignment on OS X. rdar://22278325
c1f56ec9 205 *tidaddr = (void*)(((uintptr_t)mutex->psynch.m_tid + 0x7ul) & ~0x7ul);
f1a1da6c
A
206}
207
c1f56ec9 208OS_ALWAYS_INLINE
a0619f9c
A
209static inline void
210mutex_seq_load(mutex_seq *seqaddr, mutex_seq *oldseqval)
211{
212 oldseqval->seq_LU = seqaddr->seq_LU;
213}
f1a1da6c 214
a0619f9c
A
215#define mutex_seq_atomic_load(seqaddr, oldseqval, m) \
216 mutex_seq_atomic_load_##m(seqaddr, oldseqval)
217
c1f56ec9 218OS_ALWAYS_INLINE OS_USED
a0619f9c
A
219static inline bool
220mutex_seq_atomic_cmpxchgv_relaxed(mutex_seq *seqaddr, mutex_seq *oldseqval,
221 mutex_seq *newseqval)
f1a1da6c 222{
a0619f9c
A
223 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
224 newseqval->seq_LU, &oldseqval->seq_LU, relaxed);
225}
226
c1f56ec9 227OS_ALWAYS_INLINE OS_USED
a0619f9c
A
228static inline bool
229mutex_seq_atomic_cmpxchgv_acquire(mutex_seq *seqaddr, mutex_seq *oldseqval,
230 mutex_seq *newseqval)
231{
232 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
233 newseqval->seq_LU, &oldseqval->seq_LU, acquire);
234}
235
c1f56ec9 236OS_ALWAYS_INLINE OS_USED
a0619f9c
A
237static inline bool
238mutex_seq_atomic_cmpxchgv_release(mutex_seq *seqaddr, mutex_seq *oldseqval,
239 mutex_seq *newseqval)
240{
241 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
242 newseqval->seq_LU, &oldseqval->seq_LU, release);
f1a1da6c 243}
a0619f9c
A
244
245#define mutex_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, m)\
246 mutex_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval)
f1a1da6c
A
247
248/*
249 * Initialize a mutex variable, possibly with additional attributes.
250 * Public interface - so don't trust the lock - initialize it first.
251 */
a0619f9c 252PTHREAD_NOEXPORT_VARIANT
f1a1da6c 253int
c1f56ec9 254pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
f1a1da6c
A
255{
256#if 0
257 /* conformance tests depend on not having this behavior */
258 /* The test for this behavior is optional */
964d3577 259 if (_pthread_mutex_check_signature(mutex))
f1a1da6c
A
260 return EBUSY;
261#endif
c1f56ec9 262 _pthread_lock_init(&mutex->lock);
f1a1da6c
A
263 return (_pthread_mutex_init(mutex, attr, 0x7));
264}
265
c1f56ec9 266
f1a1da6c
A
267int
268pthread_mutex_getprioceiling(const pthread_mutex_t *omutex, int *prioceiling)
269{
270 int res = EINVAL;
c1f56ec9 271 pthread_mutex_t *mutex = (pthread_mutex_t *)omutex;
964d3577 272 if (_pthread_mutex_check_signature(mutex)) {
c1f56ec9 273 _pthread_lock_lock(&mutex->lock);
f1a1da6c
A
274 *prioceiling = mutex->prioceiling;
275 res = 0;
c1f56ec9 276 _pthread_lock_unlock(&mutex->lock);
f1a1da6c
A
277 }
278 return res;
279}
280
281int
c1f56ec9 282pthread_mutex_setprioceiling(pthread_mutex_t *mutex, int prioceiling,
a0619f9c 283 int *old_prioceiling)
f1a1da6c
A
284{
285 int res = EINVAL;
964d3577 286 if (_pthread_mutex_check_signature(mutex)) {
c1f56ec9 287 _pthread_lock_lock(&mutex->lock);
a0619f9c 288 if (prioceiling >= -999 && prioceiling <= 999) {
f1a1da6c 289 *old_prioceiling = mutex->prioceiling;
a0619f9c 290 mutex->prioceiling = (int16_t)prioceiling;
f1a1da6c
A
291 res = 0;
292 }
c1f56ec9 293 _pthread_lock_unlock(&mutex->lock);
f1a1da6c
A
294 }
295 return res;
296}
297
298int
a0619f9c
A
299pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr,
300 int *prioceiling)
f1a1da6c
A
301{
302 int res = EINVAL;
303 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
304 *prioceiling = attr->prioceiling;
305 res = 0;
306 }
307 return res;
308}
309
310int
311pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int *protocol)
312{
313 int res = EINVAL;
314 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
315 *protocol = attr->protocol;
316 res = 0;
317 }
318 return res;
319}
320
76b7b9a2
A
321int
322pthread_mutexattr_getpolicy_np(const pthread_mutexattr_t *attr, int *policy)
323{
324 int res = EINVAL;
325 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
214d78a2
A
326 switch (attr->opt) {
327 case _PTHREAD_MTX_OPT_POLICY_FAIRSHARE:
328 *policy = PTHREAD_MUTEX_POLICY_FAIRSHARE_NP;
329 res = 0;
330 break;
331 case _PTHREAD_MTX_OPT_POLICY_FIRSTFIT:
332 *policy = PTHREAD_MUTEX_POLICY_FIRSTFIT_NP;
333 res = 0;
334 break;
335 }
76b7b9a2
A
336 }
337 return res;
338}
339
f1a1da6c
A
340int
341pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
342{
343 int res = EINVAL;
344 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
345 *type = attr->type;
346 res = 0;
347 }
348 return res;
349}
350
351int
352pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared)
353{
354 int res = EINVAL;
355 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
356 *pshared = (int)attr->pshared;
357 res = 0;
358 }
359 return res;
360}
361
362int
363pthread_mutexattr_init(pthread_mutexattr_t *attr)
364{
365 attr->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
366 attr->protocol = _PTHREAD_DEFAULT_PROTOCOL;
214d78a2 367 attr->opt = __pthread_mutex_default_opt_policy;
f1a1da6c
A
368 attr->type = PTHREAD_MUTEX_DEFAULT;
369 attr->sig = _PTHREAD_MUTEX_ATTR_SIG;
370 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
371 return 0;
372}
373
374int
375pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int prioceiling)
376{
377 int res = EINVAL;
378 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
a0619f9c 379 if (prioceiling >= -999 && prioceiling <= 999) {
f1a1da6c
A
380 attr->prioceiling = prioceiling;
381 res = 0;
382 }
383 }
384 return res;
385}
386
387int
388pthread_mutexattr_setprotocol(pthread_mutexattr_t *attr, int protocol)
389{
390 int res = EINVAL;
391 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
392 switch (protocol) {
c1f56ec9
A
393 case PTHREAD_PRIO_NONE:
394 case PTHREAD_PRIO_INHERIT:
395 case PTHREAD_PRIO_PROTECT:
396 attr->protocol = protocol;
397 res = 0;
398 break;
f1a1da6c
A
399 }
400 }
401 return res;
402}
403
404int
405pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *attr, int policy)
406{
407 int res = EINVAL;
408 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
214d78a2
A
409 // <rdar://problem/35844519> the first-fit implementation was broken
410 // pre-Liberty so this mapping exists to ensure that the old first-fit
411 // define (2) is no longer valid when used on older systems.
f1a1da6c 412 switch (policy) {
214d78a2
A
413 case PTHREAD_MUTEX_POLICY_FAIRSHARE_NP:
414 attr->opt = _PTHREAD_MTX_OPT_POLICY_FAIRSHARE;
415 res = 0;
416 break;
417 case PTHREAD_MUTEX_POLICY_FIRSTFIT_NP:
418 attr->opt = _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
419 res = 0;
420 break;
f1a1da6c
A
421 }
422 }
423 return res;
424}
425
426int
427pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
428{
429 int res = EINVAL;
430 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
431 switch (type) {
c1f56ec9
A
432 case PTHREAD_MUTEX_NORMAL:
433 case PTHREAD_MUTEX_ERRORCHECK:
434 case PTHREAD_MUTEX_RECURSIVE:
435 //case PTHREAD_MUTEX_DEFAULT:
436 attr->type = type;
437 res = 0;
438 break;
f1a1da6c
A
439 }
440 }
441 return res;
442}
443
f1a1da6c
A
444int
445pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
446{
447 int res = EINVAL;
f1a1da6c 448 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
a0619f9c
A
449 if (( pshared == PTHREAD_PROCESS_PRIVATE) ||
450 (pshared == PTHREAD_PROCESS_SHARED))
f1a1da6c 451 {
a0619f9c 452 attr->pshared = pshared;
f1a1da6c
A
453 res = 0;
454 }
455 }
456 return res;
457}
458
c1f56ec9 459OS_NOINLINE
964d3577 460int
c1f56ec9 461_pthread_mutex_corruption_abort(pthread_mutex_t *mutex)
964d3577 462{
c6e5f90c
A
463 PTHREAD_CLIENT_CRASH(0, "pthread_mutex corruption: mutex owner changed "
464 "in the middle of lock/unlock");
964d3577
A
465}
466
a0619f9c 467
c1f56ec9 468OS_NOINLINE
214d78a2 469static int
c1f56ec9 470_pthread_mutex_check_init_slow(pthread_mutex_t *mutex)
214d78a2
A
471{
472 int res = EINVAL;
473
474 if (_pthread_mutex_check_signature_init(mutex)) {
c1f56ec9 475 _pthread_lock_lock(&mutex->lock);
214d78a2
A
476 if (_pthread_mutex_check_signature_init(mutex)) {
477 // initialize a statically initialized mutex to provide
478 // compatibility for misbehaving applications.
479 // (unlock should not be the first operation on a mutex)
480 res = _pthread_mutex_init(mutex, NULL, (mutex->sig & 0xf));
481 } else if (_pthread_mutex_check_signature(mutex)) {
482 res = 0;
483 }
c1f56ec9 484 _pthread_lock_unlock(&mutex->lock);
214d78a2
A
485 } else if (_pthread_mutex_check_signature(mutex)) {
486 res = 0;
487 }
488 if (res != 0) {
489 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, res);
490 }
491 return res;
492}
493
c1f56ec9 494OS_ALWAYS_INLINE
214d78a2 495static inline int
c1f56ec9 496_pthread_mutex_check_init(pthread_mutex_t *mutex)
214d78a2
A
497{
498 int res = 0;
499 if (!_pthread_mutex_check_signature(mutex)) {
500 return _pthread_mutex_check_init_slow(mutex);
501 }
502 return res;
503}
504
c1f56ec9 505OS_ALWAYS_INLINE
214d78a2 506static inline bool
c1f56ec9 507_pthread_mutex_is_fairshare(pthread_mutex_t *mutex)
214d78a2
A
508{
509 return (mutex->mtxopts.options.policy == _PTHREAD_MTX_OPT_POLICY_FAIRSHARE);
510}
511
c1f56ec9 512OS_ALWAYS_INLINE
214d78a2 513static inline bool
c1f56ec9 514_pthread_mutex_is_firstfit(pthread_mutex_t *mutex)
214d78a2
A
515{
516 return (mutex->mtxopts.options.policy == _PTHREAD_MTX_OPT_POLICY_FIRSTFIT);
517}
518
c1f56ec9 519OS_ALWAYS_INLINE
214d78a2 520static inline bool
c1f56ec9 521_pthread_mutex_is_recursive(pthread_mutex_t *mutex)
214d78a2
A
522{
523 return (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE);
524}
525
c1f56ec9 526OS_ALWAYS_INLINE
214d78a2 527static int
c1f56ec9 528_pthread_mutex_lock_handle_options(pthread_mutex_t *mutex, bool trylock,
214d78a2
A
529 uint64_t *tidaddr)
530{
531 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL) {
532 // NORMAL does not do EDEADLK checking
533 return 0;
534 }
535
c1f56ec9
A
536 uint64_t selfid = _pthread_threadid_self_np_direct();
537 if (os_atomic_load_wide(tidaddr, relaxed) == selfid) {
214d78a2
A
538 if (_pthread_mutex_is_recursive(mutex)) {
539 if (mutex->mtxopts.options.lock_count < USHRT_MAX) {
540 mutex->mtxopts.options.lock_count += 1;
541 return mutex->mtxopts.options.lock_count;
542 } else {
543 return -EAGAIN;
544 }
545 } else if (trylock) { /* PTHREAD_MUTEX_ERRORCHECK */
546 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
547 // return EDEADLK on a deadlock, it should return EBUSY.
548 return -EBUSY;
549 } else { /* PTHREAD_MUTEX_ERRORCHECK */
550 return -EDEADLK;
551 }
552 }
553
554 // Not recursive, or recursive but first lock.
555 return 0;
556}
557
c1f56ec9 558OS_ALWAYS_INLINE
214d78a2 559static int
c1f56ec9 560_pthread_mutex_unlock_handle_options(pthread_mutex_t *mutex, uint64_t *tidaddr)
214d78a2
A
561{
562 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL) {
563 // NORMAL does not do EDEADLK checking
564 return 0;
565 }
566
c1f56ec9
A
567 uint64_t selfid = _pthread_threadid_self_np_direct();
568 if (os_atomic_load_wide(tidaddr, relaxed) != selfid) {
214d78a2
A
569 return -EPERM;
570 } else if (_pthread_mutex_is_recursive(mutex) &&
571 --mutex->mtxopts.options.lock_count) {
572 return 1;
573 }
574 return 0;
575}
576
f1a1da6c
A
577/*
578 * Sequence numbers and TID:
579 *
580 * In steady (and uncontended) state, an unlocked mutex will
581 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
582 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
583 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
584 * E=[L5 U5 TID0].
585 *
a0619f9c
A
586 * If a contender comes in after B, the mutex will instead transition to
587 * E=[L6+KE U4 TID0] and then F=[L6+KE U4 TID940]. If a contender comes in after
588 * C, it will transition to F=[L6+KE U4 TID940] directly. In both cases, the
589 * contender will enter the kernel with either mutexwait(U4, TID0) or
590 * mutexwait(U4, TID940). The first owner will unlock the mutex by first
591 * updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
592 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to
593 * signal the next waiter (potentially as a prepost). When the waiter comes out
594 * of the kernel, it will update the owner to I=[L6+KE U5 TID941]. An unlock at
595 * this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
f1a1da6c 596 *
a0619f9c
A
597 * At various points along these timelines, since the sequence words and TID are
598 * written independently, a thread may get preempted and another thread might
599 * see inconsistent data. In the worst case, another thread may see the TID in
600 * the SWITCHING (-1) state or unlocked (0) state for longer because the owning
601 * thread was preempted.
964d3577 602 */
f1a1da6c
A
603
604/*
a0619f9c 605 * Drop the mutex unlock references from cond_wait or mutex_unlock.
f1a1da6c 606 */
c1f56ec9 607OS_ALWAYS_INLINE
964d3577 608static inline int
c1f56ec9 609_pthread_mutex_fairshare_unlock_updatebits(pthread_mutex_t *mutex,
214d78a2 610 uint32_t *flagsp, uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
f1a1da6c 611{
a0619f9c 612 uint32_t flags = mutex->mtxopts.value;
f1a1da6c
A
613 flags &= ~_PTHREAD_MTX_OPT_NOTIFY; // no notification by default
614
a0619f9c
A
615 mutex_seq *seqaddr;
616 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
617
618 mutex_seq oldseq, newseq;
619 mutex_seq_load(seqaddr, &oldseq);
620
621 uint64_t *tidaddr;
622 MUTEX_GETTID_ADDR(mutex, &tidaddr);
623 uint64_t oldtid, newtid;
624
214d78a2
A
625 int res = _pthread_mutex_unlock_handle_options(mutex, tidaddr);
626 if (res > 0) {
627 // Valid recursive unlock
628 if (flagsp) {
629 *flagsp = flags;
f1a1da6c 630 }
214d78a2
A
631 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
632 return 0;
633 } else if (res < 0) {
634 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, -res);
635 return -res;
f1a1da6c
A
636 }
637
214d78a2 638 bool clearnotify, spurious;
f1a1da6c 639 do {
a0619f9c 640 newseq = oldseq;
c1f56ec9 641 oldtid = os_atomic_load_wide(tidaddr, relaxed);
f1a1da6c 642
f1a1da6c
A
643 clearnotify = false;
644 spurious = false;
645
a0619f9c
A
646 // pending waiters
647 int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
f1a1da6c 648 if (numwaiters == 0) {
a0619f9c 649 // spurious unlock (unlock of unlocked lock)
f1a1da6c
A
650 spurious = true;
651 } else {
a0619f9c 652 newseq.ugenval += PTHRW_INC;
f1a1da6c 653
a0619f9c
A
654 if ((oldseq.lgenval & PTHRW_COUNT_MASK) ==
655 (newseq.ugenval & PTHRW_COUNT_MASK)) {
656 // our unlock sequence matches to lock sequence, so if the
657 // CAS is successful, the mutex is unlocked
f1a1da6c
A
658
659 /* do not reset Ibit, just K&E */
a0619f9c 660 newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
f1a1da6c
A
661 clearnotify = true;
662 newtid = 0; // clear owner
663 } else {
214d78a2 664 newtid = PTHREAD_MTX_TID_SWITCHING;
f1a1da6c
A
665 // need to signal others waiting for mutex
666 flags |= _PTHREAD_MTX_OPT_NOTIFY;
667 }
a0619f9c 668
f1a1da6c 669 if (newtid != oldtid) {
a0619f9c
A
670 // We're giving up the mutex one way or the other, so go ahead
671 // and update the owner to 0 so that once the CAS below
672 // succeeds, there is no stale ownership information. If the
673 // CAS of the seqaddr fails, we may loop, but it's still valid
674 // for the owner to be SWITCHING/0
964d3577 675 if (!os_atomic_cmpxchg(tidaddr, oldtid, newtid, relaxed)) {
f1a1da6c 676 // we own this mutex, nobody should be updating it except us
964d3577 677 return _pthread_mutex_corruption_abort(mutex);
f1a1da6c
A
678 }
679 }
680 }
681
682 if (clearnotify || spurious) {
683 flags &= ~_PTHREAD_MTX_OPT_NOTIFY;
f1a1da6c 684 }
a0619f9c 685 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, release));
f1a1da6c 686
76b7b9a2
A
687 PTHREAD_TRACE(psynch_mutex_unlock_updatebits, mutex, oldseq.lgenval,
688 newseq.lgenval, oldtid);
689
f1a1da6c 690 if (mgenp != NULL) {
a0619f9c 691 *mgenp = newseq.lgenval;
f1a1da6c
A
692 }
693 if (ugenp != NULL) {
a0619f9c 694 *ugenp = newseq.ugenval;
f1a1da6c
A
695 }
696 if (pmtxp != NULL) {
697 *pmtxp = (uint32_t *)mutex;
698 }
699 if (flagsp != NULL) {
700 *flagsp = flags;
701 }
702
703 return 0;
704}
705
c1f56ec9 706OS_ALWAYS_INLINE
964d3577 707static inline int
c1f56ec9 708_pthread_mutex_fairshare_lock_updatebits(pthread_mutex_t *mutex, uint64_t selfid)
f1a1da6c 709{
214d78a2 710 bool firstfit = _pthread_mutex_is_firstfit(mutex);
76b7b9a2 711 bool gotlock = true;
f1a1da6c 712
a0619f9c 713 mutex_seq *seqaddr;
f1a1da6c 714 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
a0619f9c
A
715
716 mutex_seq oldseq, newseq;
717 mutex_seq_load(seqaddr, &oldseq);
718
719 uint64_t *tidaddr;
f1a1da6c
A
720 MUTEX_GETTID_ADDR(mutex, &tidaddr);
721
722 do {
a0619f9c 723 newseq = oldseq;
f1a1da6c 724
f1a1da6c 725 if (firstfit) {
76b7b9a2
A
726 // firstfit locks can have the lock stolen out from under a locker
727 // between the unlock from the kernel and this lock path. When this
728 // happens, we still want to set the K bit before leaving the loop
729 // (or notice if the lock unlocks while we try to update).
730 gotlock = !is_rwl_ebit_set(oldseq.lgenval);
731 } else if ((oldseq.lgenval & (PTH_RWL_KBIT | PTH_RWL_EBIT)) ==
732 (PTH_RWL_KBIT | PTH_RWL_EBIT)) {
733 // bit are already set, just update the owner tidaddr
f1a1da6c
A
734 break;
735 }
736
a0619f9c 737 newseq.lgenval |= PTH_RWL_KBIT | PTH_RWL_EBIT;
76b7b9a2 738 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
214d78a2 739 acquire));
f1a1da6c 740
76b7b9a2 741 if (gotlock) {
c1f56ec9 742 os_atomic_store_wide(tidaddr, selfid, relaxed);
f1a1da6c
A
743 }
744
76b7b9a2 745 PTHREAD_TRACE(psynch_mutex_lock_updatebits, mutex, oldseq.lgenval,
214d78a2 746 newseq.lgenval, 0);
76b7b9a2
A
747
748 // failing to take the lock in firstfit returns 1 to force the caller
749 // to wait in the kernel
750 return gotlock ? 0 : 1;
f1a1da6c
A
751}
752
c1f56ec9 753OS_NOINLINE
964d3577 754static int
c1f56ec9 755_pthread_mutex_fairshare_lock_wait(pthread_mutex_t *mutex, mutex_seq newseq,
a0619f9c 756 uint64_t oldtid)
964d3577 757{
a0619f9c 758 uint64_t *tidaddr;
964d3577 759 MUTEX_GETTID_ADDR(mutex, &tidaddr);
c1f56ec9 760 uint64_t selfid = _pthread_threadid_self_np_direct();
964d3577 761
214d78a2 762 PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t *)mutex);
964d3577
A
763 do {
764 uint32_t updateval;
765 do {
214d78a2 766 updateval = __psynch_mutexwait(mutex, newseq.lgenval,
a0619f9c 767 newseq.ugenval, oldtid, mutex->mtxopts.value);
c1f56ec9 768 oldtid = os_atomic_load_wide(tidaddr, relaxed);
964d3577
A
769 } while (updateval == (uint32_t)-1);
770
771 // returns 0 on succesful update; in firstfit it may fail with 1
214d78a2
A
772 } while (_pthread_mutex_fairshare_lock_updatebits(mutex, selfid) == 1);
773 PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t *)mutex, BLOCK_SUCCESS_PLOCKSTAT);
964d3577
A
774
775 return 0;
776}
777
c1f56ec9 778OS_NOINLINE
964d3577 779int
c1f56ec9 780_pthread_mutex_fairshare_lock_slow(pthread_mutex_t *mutex, bool trylock)
f1a1da6c 781{
a0619f9c 782 int res, recursive = 0;
f1a1da6c 783
a0619f9c
A
784 mutex_seq *seqaddr;
785 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
786
787 mutex_seq oldseq, newseq;
788 mutex_seq_load(seqaddr, &oldseq);
789
790 uint64_t *tidaddr;
f1a1da6c 791 MUTEX_GETTID_ADDR(mutex, &tidaddr);
c1f56ec9 792 uint64_t oldtid, selfid = _pthread_threadid_self_np_direct();
f1a1da6c 793
214d78a2
A
794 res = _pthread_mutex_lock_handle_options(mutex, trylock, tidaddr);
795 if (res > 0) {
796 recursive = 1;
797 res = 0;
798 goto out;
799 } else if (res < 0) {
800 res = -res;
801 goto out;
f1a1da6c
A
802 }
803
a0619f9c 804 bool gotlock;
f1a1da6c 805 do {
a0619f9c 806 newseq = oldseq;
c1f56ec9 807 oldtid = os_atomic_load_wide(tidaddr, relaxed);
f1a1da6c 808
a0619f9c 809 gotlock = ((oldseq.lgenval & PTH_RWL_EBIT) == 0);
f1a1da6c
A
810
811 if (trylock && !gotlock) {
812 // A trylock on a held lock will fail immediately. But since
813 // we did not load the sequence words atomically, perform a
814 // no-op CAS64 to ensure that nobody has unlocked concurrently.
815 } else {
816 // Increment the lock sequence number and force the lock into E+K
817 // mode, whether "gotlock" is true or not.
a0619f9c
A
818 newseq.lgenval += PTHRW_INC;
819 newseq.lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
f1a1da6c 820 }
a0619f9c 821 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
f1a1da6c 822
c1f56ec9 823 PTHREAD_TRACE(psynch_mutex_lock_updatebits, mutex, oldseq.lgenval,
76b7b9a2
A
824 newseq.lgenval, 0);
825
f1a1da6c 826 if (gotlock) {
c1f56ec9 827 os_atomic_store_wide(tidaddr, selfid, relaxed);
f1a1da6c 828 res = 0;
c1f56ec9 829 PTHREAD_TRACE(psynch_mutex_ulock, mutex, newseq.lgenval,
76b7b9a2 830 newseq.ugenval, selfid);
f1a1da6c
A
831 } else if (trylock) {
832 res = EBUSY;
c1f56ec9 833 PTHREAD_TRACE(psynch_mutex_utrylock_failed, mutex, newseq.lgenval,
76b7b9a2 834 newseq.ugenval, oldtid);
f1a1da6c 835 } else {
c1f56ec9 836 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_START, mutex,
76b7b9a2 837 newseq.lgenval, newseq.ugenval, oldtid);
214d78a2 838 res = _pthread_mutex_fairshare_lock_wait(mutex, newseq, oldtid);
c1f56ec9 839 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_END, mutex,
76b7b9a2 840 newseq.lgenval, newseq.ugenval, oldtid);
f1a1da6c
A
841 }
842
214d78a2 843 if (res == 0 && _pthread_mutex_is_recursive(mutex)) {
f1a1da6c
A
844 mutex->mtxopts.options.lock_count = 1;
845 }
846
a0619f9c
A
847out:
848#if PLOCKSTAT
849 if (res == 0) {
214d78a2 850 PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t *)mutex, recursive, 0);
a0619f9c 851 } else {
214d78a2 852 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, res);
a0619f9c
A
853 }
854#endif
f1a1da6c
A
855
856 return res;
857}
858
c1f56ec9 859OS_NOINLINE
964d3577 860static inline int
c1f56ec9 861_pthread_mutex_fairshare_lock(pthread_mutex_t *mutex, bool trylock)
964d3577 862{
76b7b9a2 863#if ENABLE_USERSPACE_TRACE
214d78a2 864 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
76b7b9a2
A
865#elif PLOCKSTAT
866 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
214d78a2 867 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
964d3577
A
868 }
869#endif
76b7b9a2 870
a0619f9c 871 uint64_t *tidaddr;
964d3577 872 MUTEX_GETTID_ADDR(mutex, &tidaddr);
c1f56ec9 873 uint64_t selfid = _pthread_threadid_self_np_direct();
964d3577 874
a0619f9c 875 mutex_seq *seqaddr;
964d3577
A
876 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
877
a0619f9c
A
878 mutex_seq oldseq, newseq;
879 mutex_seq_load(seqaddr, &oldseq);
880
881 if (os_unlikely(oldseq.lgenval & PTH_RWL_EBIT)) {
214d78a2 882 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
a0619f9c 883 }
964d3577 884
a0619f9c 885 bool gotlock;
964d3577 886 do {
a0619f9c 887 newseq = oldseq;
964d3577 888
a0619f9c 889 gotlock = ((oldseq.lgenval & PTH_RWL_EBIT) == 0);
964d3577
A
890
891 if (trylock && !gotlock) {
892 // A trylock on a held lock will fail immediately. But since
893 // we did not load the sequence words atomically, perform a
894 // no-op CAS64 to ensure that nobody has unlocked concurrently.
a0619f9c 895 } else if (os_likely(gotlock)) {
964d3577
A
896 // Increment the lock sequence number and force the lock into E+K
897 // mode, whether "gotlock" is true or not.
a0619f9c
A
898 newseq.lgenval += PTHRW_INC;
899 newseq.lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
900 } else {
214d78a2 901 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
964d3577 902 }
a0619f9c
A
903 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
904 acquire)));
964d3577 905
a0619f9c 906 if (os_likely(gotlock)) {
c1f56ec9 907 os_atomic_store_wide(tidaddr, selfid, relaxed);
964d3577
A
908 return 0;
909 } else if (trylock) {
910 return EBUSY;
911 } else {
a0619f9c 912 __builtin_trap();
964d3577
A
913 }
914}
915
c1f56ec9 916OS_NOINLINE
964d3577 917static int
c1f56ec9 918_pthread_mutex_fairshare_unlock_drop(pthread_mutex_t *mutex, mutex_seq newseq,
a0619f9c 919 uint32_t flags)
964d3577
A
920{
921 int res;
964d3577 922 uint32_t updateval;
a0619f9c
A
923
924 uint64_t *tidaddr;
964d3577
A
925 MUTEX_GETTID_ADDR(mutex, &tidaddr);
926
214d78a2 927 PTHREAD_TRACE(psynch_mutex_uunlock | DBG_FUNC_START, mutex, newseq.lgenval,
c1f56ec9 928 newseq.ugenval, os_atomic_load_wide(tidaddr, relaxed));
76b7b9a2 929
214d78a2 930 updateval = __psynch_mutexdrop(mutex, newseq.lgenval, newseq.ugenval,
c1f56ec9 931 os_atomic_load_wide(tidaddr, relaxed), flags);
964d3577 932
214d78a2 933 PTHREAD_TRACE(psynch_mutex_uunlock | DBG_FUNC_END, mutex, updateval, 0, 0);
76b7b9a2 934
964d3577
A
935 if (updateval == (uint32_t)-1) {
936 res = errno;
937
938 if (res == EINTR) {
939 res = 0;
940 }
941 if (res != 0) {
c6e5f90c 942 PTHREAD_INTERNAL_CRASH(res, "__psynch_mutexdrop failed");
964d3577
A
943 }
944 return res;
964d3577
A
945 }
946
947 return 0;
948}
949
c1f56ec9 950OS_NOINLINE
f1a1da6c 951int
c1f56ec9 952_pthread_mutex_fairshare_unlock_slow(pthread_mutex_t *mutex)
f1a1da6c
A
953{
954 int res;
a0619f9c
A
955 mutex_seq newseq;
956 uint32_t flags;
f1a1da6c 957
214d78a2
A
958 res = _pthread_mutex_fairshare_unlock_updatebits(mutex, &flags, NULL,
959 &newseq.lgenval, &newseq.ugenval);
a0619f9c 960 if (res != 0) return res;
f1a1da6c
A
961
962 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) != 0) {
214d78a2 963 return _pthread_mutex_fairshare_unlock_drop(mutex, newseq, flags);
964d3577 964 } else {
a0619f9c 965 uint64_t *tidaddr;
f1a1da6c 966 MUTEX_GETTID_ADDR(mutex, &tidaddr);
214d78a2 967 PTHREAD_TRACE(psynch_mutex_uunlock, mutex, newseq.lgenval,
c1f56ec9 968 newseq.ugenval, os_atomic_load_wide(tidaddr, relaxed));
964d3577 969 }
f1a1da6c 970
964d3577
A
971 return 0;
972}
f1a1da6c 973
c1f56ec9 974OS_NOINLINE
214d78a2 975static int
c1f56ec9 976_pthread_mutex_fairshare_unlock(pthread_mutex_t *mutex)
964d3577 977{
76b7b9a2 978#if ENABLE_USERSPACE_TRACE
214d78a2 979 return _pthread_mutex_fairshare_unlock_slow(mutex);
76b7b9a2
A
980#elif PLOCKSTAT
981 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
214d78a2 982 return _pthread_mutex_fairshare_unlock_slow(mutex);
964d3577
A
983 }
984#endif
964d3577 985
a0619f9c 986 uint64_t *tidaddr;
964d3577
A
987 MUTEX_GETTID_ADDR(mutex, &tidaddr);
988
a0619f9c 989 mutex_seq *seqaddr;
964d3577
A
990 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
991
a0619f9c
A
992 mutex_seq oldseq, newseq;
993 mutex_seq_load(seqaddr, &oldseq);
964d3577 994
a0619f9c
A
995 int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
996 if (os_unlikely(numwaiters == 0)) {
997 // spurious unlock (unlock of unlocked lock)
998 return 0;
999 }
964d3577 1000
a0619f9c
A
1001 // We're giving up the mutex one way or the other, so go ahead and
1002 // update the owner to 0 so that once the CAS below succeeds, there
1003 // is no stale ownership information. If the CAS of the seqaddr
1004 // fails, we may loop, but it's still valid for the owner to be
1005 // SWITCHING/0
c1f56ec9 1006 os_atomic_store_wide(tidaddr, 0, relaxed);
964d3577 1007
a0619f9c
A
1008 do {
1009 newseq = oldseq;
1010 newseq.ugenval += PTHRW_INC;
964d3577 1011
a0619f9c
A
1012 if (os_likely((oldseq.lgenval & PTHRW_COUNT_MASK) ==
1013 (newseq.ugenval & PTHRW_COUNT_MASK))) {
214d78a2
A
1014 // if we succeed in performing the CAS we can be sure of a fast
1015 // path (only needing the CAS) unlock, if:
1016 // a. our lock and unlock sequence are equal
1017 // b. we don't need to clear an unlock prepost from the kernel
964d3577 1018
a0619f9c
A
1019 // do not reset Ibit, just K&E
1020 newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
1021 } else {
214d78a2 1022 return _pthread_mutex_fairshare_unlock_slow(mutex);
f1a1da6c 1023 }
a0619f9c
A
1024 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
1025 release)));
f1a1da6c
A
1026
1027 return 0;
1028}
1029
c1f56ec9
A
1030#pragma mark ulock
1031
1032OS_ALWAYS_INLINE
1033static inline uint32_t
1034_pthread_mutex_ulock_self_owner_value(void)
1035{
1036 mach_port_t self_port = _pthread_mach_thread_self_direct();
1037 return self_port & _PTHREAD_MUTEX_ULOCK_OWNER_MASK;
1038}
1039
1040OS_NOINLINE
1041static int
1042_pthread_mutex_ulock_lock_slow(pthread_mutex_t *mutex, uint32_t self_ownerval,
1043 uint32_t state)
1044{
1045 bool success = false, kernel_waiters = false;
1046
1047 uint32_t wait_op = UL_UNFAIR_LOCK | ULF_NO_ERRNO;
1048 if (__pthread_mutex_ulock_adaptive_spin) {
1049 wait_op |= ULF_WAIT_ADAPTIVE_SPIN;
1050 }
1051
1052 PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t *)mutex);
1053 do {
1054 bool owner_dead = false;
1055
1056 do {
1057 uint32_t current_ownerval = state & _PTHREAD_MUTEX_ULOCK_OWNER_MASK;
1058 if (os_unlikely(owner_dead)) {
1059 // TODO: PTHREAD_STRICT candidate
1060 //
1061 // For a non-recursive mutex, this indicates that it's really
1062 // being used as a semaphore: even though we're the current
1063 // owner, in reality we're expecting another thread to 'unlock'
1064 // this mutex on our behalf later.
1065 //
1066 // __ulock_wait(2) doesn't permit you to wait for yourself, so
1067 // we need to first swap our ownership for the anonymous owner
1068 current_ownerval =
1069 MACH_PORT_DEAD & _PTHREAD_MUTEX_ULOCK_OWNER_MASK;
1070 owner_dead = false;
1071 }
1072 uint32_t new_state =
1073 current_ownerval | _PTHREAD_MUTEX_ULOCK_WAITERS_BIT;
1074 success = os_atomic_cmpxchgv(&mutex->ulock.uval, state, new_state,
1075 &state, relaxed);
1076 if (!success) {
1077 continue;
1078 }
1079
1080 int rc = __ulock_wait(wait_op, &mutex->ulock, new_state, 0);
1081
1082 PTHREAD_TRACE(ulmutex_lock_wait, mutex, new_state, rc, 0);
1083
1084 if (os_unlikely(rc < 0)) {
1085 switch (-rc) {
1086 case EINTR:
1087 case EFAULT:
1088 break;
1089 case EOWNERDEAD:
1090 owner_dead = true;
1091 continue;
1092 default:
1093 PTHREAD_INTERNAL_CRASH(rc, "ulock_wait failure");
1094 }
1095 } else if (rc > 0) {
1096 kernel_waiters = true;
1097 }
1098
1099 state = os_atomic_load(&mutex->ulock.uval, relaxed);
1100 } while (state != _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE);
1101
1102 uint32_t locked_state = self_ownerval;
1103 if (kernel_waiters) {
1104 locked_state |= _PTHREAD_MUTEX_ULOCK_WAITERS_BIT;
1105 }
1106
1107 success = os_atomic_cmpxchgv(&mutex->ulock.uval, state, locked_state,
1108 &state, acquire);
1109 } while (!success);
1110 PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t *)mutex, BLOCK_SUCCESS_PLOCKSTAT);
1111
1112 return 0;
1113}
1114
1115PTHREAD_NOEXPORT_VARIANT
1116int
1117_pthread_mutex_ulock_lock(pthread_mutex_t *mutex, bool trylock)
1118{
1119 uint32_t unlocked = _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE;
1120 uint32_t locked = _pthread_mutex_ulock_self_owner_value();
1121 uint32_t state;
1122
1123 bool success = os_atomic_cmpxchgv(&mutex->ulock.uval, unlocked, locked,
1124 &state, acquire);
1125
1126 if (trylock) {
1127 PTHREAD_TRACE(ulmutex_trylock, mutex, locked, state, success);
1128 } else {
1129 PTHREAD_TRACE(ulmutex_lock, mutex, locked, state, success);
1130 }
1131
1132 int rc = 0;
1133 if (!success) {
1134 if (trylock) {
1135 rc = EBUSY;
1136 } else {
1137 rc = _pthread_mutex_ulock_lock_slow(mutex, locked, state);
1138 }
1139 }
1140
1141 if (rc) {
1142 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, rc);
1143 } else {
1144 PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t *)mutex, /* recursive */ 0, 0);
1145 }
1146
1147 return rc;
1148}
1149
1150OS_NOINLINE
1151static int
1152_pthread_mutex_ulock_unlock_slow(pthread_mutex_t *mutex, uint32_t self_ownerval,
1153 uint32_t orig_state)
1154{
1155 if (os_unlikely(orig_state == _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE)) {
1156 // XXX This is illegal, but psynch permitted it...
1157 // TODO: PTHREAD_STRICT candidate
1158 return 0;
1159 }
1160
1161 uint32_t wake_flags = 0;
1162
1163 uint32_t orig_ownerval = orig_state & _PTHREAD_MUTEX_ULOCK_OWNER_MASK;
1164 bool orig_waiters = orig_state & _PTHREAD_MUTEX_ULOCK_WAITERS_BIT;
1165 if (os_unlikely(orig_ownerval != self_ownerval)) {
1166 // XXX This is illegal, but psynch permitted it...
1167 // TODO: PTHREAD_STRICT candidate
1168 if (!orig_waiters) {
1169 return 0;
1170 }
1171
1172 wake_flags |= ULF_WAKE_ALLOW_NON_OWNER;
1173 } else if (os_unlikely(!orig_waiters)) {
1174 PTHREAD_INTERNAL_CRASH(0, "unlock_slow without orig_waiters");
1175 }
1176
1177 for (;;) {
1178 int rc = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | wake_flags,
1179 &mutex->ulock, 0);
1180
1181 PTHREAD_TRACE(ulmutex_unlock_wake, mutex, rc, 0, 0);
1182
1183 if (os_unlikely(rc < 0)) {
1184 switch (-rc) {
1185 case EINTR:
1186 continue;
1187 case ENOENT:
1188 break;
1189 default:
1190 PTHREAD_INTERNAL_CRASH(-rc, "ulock_wake failure");
1191 }
1192 }
1193 break;
1194 }
1195
1196 return 0;
1197}
1198
1199PTHREAD_NOEXPORT_VARIANT
1200int
1201_pthread_mutex_ulock_unlock(pthread_mutex_t *mutex)
1202{
1203 uint32_t locked_uncontended = _pthread_mutex_ulock_self_owner_value();
1204 uint32_t unlocked = _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE;
1205 uint32_t state = os_atomic_xchg(&mutex->ulock.uval, unlocked, release);
1206
1207 PTHREAD_TRACE(ulmutex_unlock, mutex, locked_uncontended, state, 0);
1208
1209 int rc = 0;
1210 if (state != locked_uncontended) {
1211 rc = _pthread_mutex_ulock_unlock_slow(mutex, locked_uncontended,
1212 state);
1213 }
1214
1215 if (rc) {
1216 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, rc);
1217 } else {
1218 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, /* recursive */ 0);
1219 }
1220
1221 return rc;
1222}
1223
214d78a2
A
1224#pragma mark firstfit
1225
c1f56ec9 1226OS_ALWAYS_INLINE
214d78a2 1227static inline int
c1f56ec9 1228_pthread_mutex_firstfit_unlock_updatebits(pthread_mutex_t *mutex,
214d78a2
A
1229 uint32_t *flagsp, uint32_t **mutexp, uint32_t *lvalp, uint32_t *uvalp)
1230{
1231 uint32_t flags = mutex->mtxopts.value & ~_PTHREAD_MTX_OPT_NOTIFY;
1232 bool kernel_wake;
1233
1234 mutex_seq *seqaddr;
1235 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1236
1237 mutex_seq oldseq, newseq;
1238 mutex_seq_load(seqaddr, &oldseq);
1239
1240 uint64_t *tidaddr;
1241 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1242 uint64_t oldtid;
1243
1244 int res = _pthread_mutex_unlock_handle_options(mutex, tidaddr);
1245 if (res > 0) {
1246 // Valid recursive unlock
1247 if (flagsp) {
1248 *flagsp = flags;
1249 }
1250 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
1251 return 0;
1252 } else if (res < 0) {
1253 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, -res);
1254 return -res;
1255 }
1256
1257 do {
1258 newseq = oldseq;
c1f56ec9 1259 oldtid = os_atomic_load_wide(tidaddr, relaxed);
214d78a2
A
1260 // More than one kernel waiter means we need to do a wake.
1261 kernel_wake = diff_genseq(oldseq.lgenval, oldseq.ugenval) > 0;
1262 newseq.lgenval &= ~PTH_RWL_EBIT;
1263
1264 if (kernel_wake) {
1265 // Going to the kernel post-unlock removes a single waiter unlock
1266 // from the mutex counts.
1267 newseq.ugenval += PTHRW_INC;
1268 }
1269
1270 if (oldtid != 0) {
1271 if (!os_atomic_cmpxchg(tidaddr, oldtid, 0, relaxed)) {
1272 return _pthread_mutex_corruption_abort(mutex);
1273 }
1274 }
1275 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, release));
1276
1277 PTHREAD_TRACE(psynch_ffmutex_unlock_updatebits, mutex, oldseq.lgenval,
1278 newseq.lgenval, newseq.ugenval);
1279
1280 if (kernel_wake) {
1281 // We choose to return this out via flags because the condition
1282 // variable also uses this to determine whether to do a kernel wake
1283 // when beginning a cvwait.
1284 flags |= _PTHREAD_MTX_OPT_NOTIFY;
1285 }
1286 if (lvalp) {
1287 *lvalp = newseq.lgenval;
1288 }
1289 if (uvalp) {
1290 *uvalp = newseq.ugenval;
1291 }
1292 if (mutexp) {
1293 *mutexp = (uint32_t *)mutex;
1294 }
1295 if (flagsp) {
1296 *flagsp = flags;
1297 }
1298 return 0;
1299}
1300
c1f56ec9 1301OS_NOINLINE
214d78a2 1302static int
c1f56ec9 1303_pthread_mutex_firstfit_wake(pthread_mutex_t *mutex, mutex_seq newseq,
214d78a2
A
1304 uint32_t flags)
1305{
1306 PTHREAD_TRACE(psynch_ffmutex_wake, mutex, newseq.lgenval, newseq.ugenval,
1307 0);
1308 int res = __psynch_mutexdrop(mutex, newseq.lgenval, newseq.ugenval, 0,
1309 flags);
1310
1311 if (res == -1) {
1312 res = errno;
1313 if (res == EINTR) {
1314 res = 0;
1315 }
1316 if (res != 0) {
c6e5f90c 1317 PTHREAD_INTERNAL_CRASH(res, "__psynch_mutexdrop failed");
214d78a2
A
1318 }
1319 return res;
1320 }
1321 return 0;
1322}
1323
c1f56ec9 1324OS_NOINLINE
214d78a2 1325int
c1f56ec9 1326_pthread_mutex_firstfit_unlock_slow(pthread_mutex_t *mutex)
214d78a2
A
1327{
1328 mutex_seq newseq;
1329 uint32_t flags;
1330 int res;
1331
1332 res = _pthread_mutex_firstfit_unlock_updatebits(mutex, &flags, NULL,
1333 &newseq.lgenval, &newseq.ugenval);
1334 if (res != 0) return res;
1335
1336 if (flags & _PTHREAD_MTX_OPT_NOTIFY) {
1337 return _pthread_mutex_firstfit_wake(mutex, newseq, flags);
1338 }
1339 return 0;
1340}
1341
c1f56ec9 1342OS_ALWAYS_INLINE
214d78a2 1343static bool
c1f56ec9 1344_pthread_mutex_firstfit_lock_updatebits(pthread_mutex_t *mutex, uint64_t selfid,
214d78a2
A
1345 mutex_seq *newseqp)
1346{
1347 bool gotlock;
1348
1349 mutex_seq *seqaddr;
1350 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1351
1352 mutex_seq oldseq, newseq;
1353 mutex_seq_load(seqaddr, &oldseq);
1354
1355 uint64_t *tidaddr;
1356 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1357
1358 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits | DBG_FUNC_START, mutex,
1359 oldseq.lgenval, oldseq.ugenval, 0);
1360
1361 do {
1362 newseq = oldseq;
1363 gotlock = is_rwl_ebit_clear(oldseq.lgenval);
1364
1365 if (gotlock) {
1366 // If we see the E-bit cleared, we should just attempt to take it.
1367 newseq.lgenval |= PTH_RWL_EBIT;
1368 } else {
1369 // If we failed to get the lock then we need to put ourselves back
1370 // in the queue of waiters. The previous unlocker that woke us out
1371 // of the kernel consumed the S-count for our previous wake. So
1372 // take another ticket on L and go back in the kernel to sleep.
1373 newseq.lgenval += PTHRW_INC;
1374 }
1375 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
1376
1377 if (gotlock) {
c1f56ec9 1378 os_atomic_store_wide(tidaddr, selfid, relaxed);
214d78a2
A
1379 }
1380
1381 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits | DBG_FUNC_END, mutex,
1382 newseq.lgenval, newseq.ugenval, 0);
1383
1384 if (newseqp) {
1385 *newseqp = newseq;
1386 }
1387 return gotlock;
1388}
1389
c1f56ec9 1390OS_NOINLINE
214d78a2 1391static int
c1f56ec9 1392_pthread_mutex_firstfit_lock_wait(pthread_mutex_t *mutex, mutex_seq newseq,
214d78a2
A
1393 uint64_t oldtid)
1394{
1395 uint64_t *tidaddr;
1396 MUTEX_GETTID_ADDR(mutex, &tidaddr);
c1f56ec9 1397 uint64_t selfid = _pthread_threadid_self_np_direct();
214d78a2
A
1398
1399 PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t *)mutex);
1400 do {
1401 uint32_t uval;
1402 do {
1403 PTHREAD_TRACE(psynch_ffmutex_wait | DBG_FUNC_START, mutex,
1404 newseq.lgenval, newseq.ugenval, mutex->mtxopts.value);
1405 uval = __psynch_mutexwait(mutex, newseq.lgenval, newseq.ugenval,
1406 oldtid, mutex->mtxopts.value);
1407 PTHREAD_TRACE(psynch_ffmutex_wait | DBG_FUNC_END, mutex,
1408 uval, 0, 0);
c1f56ec9 1409 oldtid = os_atomic_load_wide(tidaddr, relaxed);
214d78a2
A
1410 } while (uval == (uint32_t)-1);
1411 } while (!_pthread_mutex_firstfit_lock_updatebits(mutex, selfid, &newseq));
1412 PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t *)mutex, BLOCK_SUCCESS_PLOCKSTAT);
1413
1414 return 0;
1415}
1416
c1f56ec9 1417OS_NOINLINE
214d78a2 1418int
c1f56ec9 1419_pthread_mutex_firstfit_lock_slow(pthread_mutex_t *mutex, bool trylock)
214d78a2
A
1420{
1421 int res, recursive = 0;
1422
1423 mutex_seq *seqaddr;
1424 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1425
1426 mutex_seq oldseq, newseq;
1427 mutex_seq_load(seqaddr, &oldseq);
1428
1429 uint64_t *tidaddr;
1430 MUTEX_GETTID_ADDR(mutex, &tidaddr);
c1f56ec9 1431 uint64_t oldtid, selfid = _pthread_threadid_self_np_direct();
214d78a2
A
1432
1433 res = _pthread_mutex_lock_handle_options(mutex, trylock, tidaddr);
1434 if (res > 0) {
1435 recursive = 1;
1436 res = 0;
1437 goto out;
1438 } else if (res < 0) {
1439 res = -res;
1440 goto out;
1441 }
1442
1443 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits | DBG_FUNC_START, mutex,
1444 oldseq.lgenval, oldseq.ugenval, 0);
1445
1446 bool gotlock;
1447 do {
1448 newseq = oldseq;
c1f56ec9 1449 oldtid = os_atomic_load_wide(tidaddr, relaxed);
214d78a2
A
1450
1451 gotlock = is_rwl_ebit_clear(oldseq.lgenval);
1452 if (trylock && !gotlock) {
1453 // We still want to perform the CAS here, even though it won't
1454 // do anything so that it fails if someone unlocked while we were
1455 // in the loop
1456 } else if (gotlock) {
1457 // In first-fit, getting the lock simply adds the E-bit
1458 newseq.lgenval |= PTH_RWL_EBIT;
1459 } else {
1460 // Failed to get the lock, increment the L-val and go to
1461 // the kernel to sleep
1462 newseq.lgenval += PTHRW_INC;
1463 }
1464 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
1465
1466 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits | DBG_FUNC_END, mutex,
1467 newseq.lgenval, newseq.ugenval, 0);
1468
1469 if (gotlock) {
c1f56ec9 1470 os_atomic_store_wide(tidaddr, selfid, relaxed);
214d78a2
A
1471 res = 0;
1472 PTHREAD_TRACE(psynch_mutex_ulock, mutex, newseq.lgenval,
1473 newseq.ugenval, selfid);
1474 } else if (trylock) {
1475 res = EBUSY;
1476 PTHREAD_TRACE(psynch_mutex_utrylock_failed, mutex, newseq.lgenval,
1477 newseq.ugenval, oldtid);
1478 } else {
1479 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_START, mutex,
1480 newseq.lgenval, newseq.ugenval, oldtid);
1481 res = _pthread_mutex_firstfit_lock_wait(mutex, newseq, oldtid);
1482 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_END, mutex,
1483 newseq.lgenval, newseq.ugenval, oldtid);
1484 }
1485
1486 if (res == 0 && _pthread_mutex_is_recursive(mutex)) {
1487 mutex->mtxopts.options.lock_count = 1;
1488 }
1489
1490out:
1491#if PLOCKSTAT
1492 if (res == 0) {
1493 PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t *)mutex, recursive, 0);
1494 } else {
1495 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, res);
1496 }
1497#endif
1498 return res;
1499}
1500
1501#pragma mark fast path
1502
c1f56ec9 1503OS_NOINLINE
214d78a2 1504int
c1f56ec9 1505_pthread_mutex_droplock(pthread_mutex_t *mutex, uint32_t *flagsp,
214d78a2
A
1506 uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
1507{
1508 if (_pthread_mutex_is_fairshare(mutex)) {
1509 return _pthread_mutex_fairshare_unlock_updatebits(mutex, flagsp,
1510 pmtxp, mgenp, ugenp);
1511 }
1512 return _pthread_mutex_firstfit_unlock_updatebits(mutex, flagsp, pmtxp,
1513 mgenp, ugenp);
1514}
1515
c1f56ec9 1516OS_NOINLINE
214d78a2 1517int
c1f56ec9 1518_pthread_mutex_lock_init_slow(pthread_mutex_t *mutex, bool trylock)
214d78a2
A
1519{
1520 int res;
1521
1522 res = _pthread_mutex_check_init(mutex);
1523 if (res != 0) return res;
1524
1525 if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
1526 return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
c1f56ec9
A
1527 } else if (os_unlikely(_pthread_mutex_uses_ulock(mutex))) {
1528 return _pthread_mutex_ulock_lock(mutex, trylock);
214d78a2
A
1529 }
1530 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1531}
1532
c1f56ec9 1533OS_NOINLINE
214d78a2 1534static int
c1f56ec9 1535_pthread_mutex_unlock_init_slow(pthread_mutex_t *mutex)
214d78a2
A
1536{
1537 int res;
1538
1539 // Initialize static mutexes for compatibility with misbehaving
1540 // applications (unlock should not be the first operation on a mutex).
1541 res = _pthread_mutex_check_init(mutex);
1542 if (res != 0) return res;
1543
1544 if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
1545 return _pthread_mutex_fairshare_unlock_slow(mutex);
c1f56ec9
A
1546 } else if (os_unlikely(_pthread_mutex_uses_ulock(mutex))) {
1547 return _pthread_mutex_ulock_unlock(mutex);
214d78a2
A
1548 }
1549 return _pthread_mutex_firstfit_unlock_slow(mutex);
1550}
1551
1552PTHREAD_NOEXPORT_VARIANT
1553int
c1f56ec9 1554pthread_mutex_unlock(pthread_mutex_t *mutex)
214d78a2 1555{
214d78a2
A
1556 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
1557 return _pthread_mutex_unlock_init_slow(mutex);
1558 }
1559
1560 if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
1561 return _pthread_mutex_fairshare_unlock(mutex);
1562 }
1563
c1f56ec9
A
1564 if (os_unlikely(_pthread_mutex_uses_ulock(mutex))) {
1565 return _pthread_mutex_ulock_unlock(mutex);
1566 }
1567
214d78a2
A
1568#if ENABLE_USERSPACE_TRACE
1569 return _pthread_mutex_firstfit_unlock_slow(mutex);
1570#elif PLOCKSTAT
1571 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
1572 return _pthread_mutex_firstfit_unlock_slow(mutex);
1573 }
1574#endif
1575
1576 /*
1577 * This is the first-fit fast path. The fairshare fast-ish path is in
1578 * _pthread_mutex_firstfit_unlock()
1579 */
1580 uint64_t *tidaddr;
1581 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1582
1583 mutex_seq *seqaddr;
1584 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1585
1586 mutex_seq oldseq, newseq;
1587 mutex_seq_load(seqaddr, &oldseq);
1588
1589 // We're giving up the mutex one way or the other, so go ahead and
1590 // update the owner to 0 so that once the CAS below succeeds, there
1591 // is no stale ownership information. If the CAS of the seqaddr
1592 // fails, we may loop, but it's still valid for the owner to be
1593 // SWITCHING/0
c1f56ec9 1594 os_atomic_store_wide(tidaddr, 0, relaxed);
214d78a2
A
1595
1596 do {
1597 newseq = oldseq;
1598
1599 if (diff_genseq(oldseq.lgenval, oldseq.ugenval) == 0) {
1600 // No outstanding waiters in kernel, we can simply drop the E-bit
1601 // and return.
1602 newseq.lgenval &= ~PTH_RWL_EBIT;
1603 } else {
1604 return _pthread_mutex_firstfit_unlock_slow(mutex);
1605 }
1606 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
1607 release)));
1608
1609 return 0;
1610}
1611
c1f56ec9 1612OS_ALWAYS_INLINE
214d78a2 1613static inline int
c1f56ec9 1614_pthread_mutex_firstfit_lock(pthread_mutex_t *mutex, bool trylock)
214d78a2 1615{
214d78a2
A
1616 /*
1617 * This is the first-fit fast path. The fairshare fast-ish path is in
c1f56ec9 1618 * _pthread_mutex_fairshare_lock()
214d78a2
A
1619 */
1620 uint64_t *tidaddr;
1621 MUTEX_GETTID_ADDR(mutex, &tidaddr);
c1f56ec9 1622 uint64_t selfid = _pthread_threadid_self_np_direct();
214d78a2
A
1623
1624 mutex_seq *seqaddr;
1625 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1626
1627 mutex_seq oldseq, newseq;
1628 mutex_seq_load(seqaddr, &oldseq);
1629
45deb508 1630 if (os_unlikely(!trylock && (oldseq.lgenval & PTH_RWL_EBIT))) {
214d78a2
A
1631 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1632 }
1633
1634 bool gotlock;
1635 do {
1636 newseq = oldseq;
1637 gotlock = is_rwl_ebit_clear(oldseq.lgenval);
1638
1639 if (trylock && !gotlock) {
45deb508
A
1640#if __LP64__
1641 // The sequence load is atomic, so we can bail here without writing
1642 // it and avoid some unnecessary coherence traffic - rdar://57259033
1643 os_atomic_thread_fence(acquire);
1644 return EBUSY;
1645#else
214d78a2
A
1646 // A trylock on a held lock will fail immediately. But since
1647 // we did not load the sequence words atomically, perform a
1648 // no-op CAS64 to ensure that nobody has unlocked concurrently.
45deb508 1649#endif
214d78a2
A
1650 } else if (os_likely(gotlock)) {
1651 // In first-fit, getting the lock simply adds the E-bit
1652 newseq.lgenval |= PTH_RWL_EBIT;
1653 } else {
1654 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1655 }
1656 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
1657 acquire)));
1658
1659 if (os_likely(gotlock)) {
c1f56ec9 1660 os_atomic_store_wide(tidaddr, selfid, relaxed);
214d78a2
A
1661 return 0;
1662 } else if (trylock) {
1663 return EBUSY;
1664 } else {
1665 __builtin_trap();
1666 }
1667}
1668
c1f56ec9
A
1669OS_ALWAYS_INLINE
1670static inline int
1671_pthread_mutex_lock(pthread_mutex_t *mutex, bool trylock)
1672{
1673 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
1674 return _pthread_mutex_lock_init_slow(mutex, trylock);
1675 }
1676
1677 if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
1678 return _pthread_mutex_fairshare_lock(mutex, trylock);
1679 }
1680
1681 if (os_unlikely(_pthread_mutex_uses_ulock(mutex))) {
1682 return _pthread_mutex_ulock_lock(mutex, trylock);
1683 }
1684
1685#if ENABLE_USERSPACE_TRACE
1686 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1687#elif PLOCKSTAT
1688 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
1689 return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
1690 }
1691#endif
1692
1693 return _pthread_mutex_firstfit_lock(mutex, trylock);
1694}
1695
214d78a2
A
1696PTHREAD_NOEXPORT_VARIANT
1697int
1698pthread_mutex_lock(pthread_mutex_t *mutex)
1699{
c1f56ec9 1700 return _pthread_mutex_lock(mutex, false);
214d78a2
A
1701}
1702
1703PTHREAD_NOEXPORT_VARIANT
1704int
1705pthread_mutex_trylock(pthread_mutex_t *mutex)
1706{
c1f56ec9 1707 return _pthread_mutex_lock(mutex, true);
214d78a2
A
1708}
1709
964d3577 1710
c1f56ec9 1711OS_ALWAYS_INLINE
964d3577 1712static inline int
c1f56ec9 1713_pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr,
3a6437e6 1714 uint32_t static_type)
f1a1da6c 1715{
3a6437e6
A
1716 mutex->mtxopts.value = 0;
1717 mutex->mtxopts.options.mutex = 1;
f1a1da6c
A
1718 if (attr) {
1719 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1720 return EINVAL;
1721 }
a0619f9c 1722 mutex->prioceiling = (int16_t)attr->prioceiling;
f1a1da6c 1723 mutex->mtxopts.options.protocol = attr->protocol;
214d78a2 1724 mutex->mtxopts.options.policy = attr->opt;
f1a1da6c
A
1725 mutex->mtxopts.options.type = attr->type;
1726 mutex->mtxopts.options.pshared = attr->pshared;
1727 } else {
1728 switch (static_type) {
c1f56ec9
A
1729 case 1:
1730 mutex->mtxopts.options.type = PTHREAD_MUTEX_ERRORCHECK;
1731 break;
1732 case 2:
1733 mutex->mtxopts.options.type = PTHREAD_MUTEX_RECURSIVE;
1734 break;
1735 case 3:
1736 /* firstfit fall thru */
1737 case 7:
1738 mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
1739 break;
1740 default:
1741 return EINVAL;
f1a1da6c
A
1742 }
1743
1744 mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
1745 mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
1746 if (static_type != 3) {
214d78a2 1747 mutex->mtxopts.options.policy = __pthread_mutex_default_opt_policy;
f1a1da6c 1748 } else {
214d78a2 1749 mutex->mtxopts.options.policy = _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
f1a1da6c
A
1750 }
1751 mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
1752 }
a0619f9c 1753
c1f56ec9 1754 mutex->priority = 0;
a0619f9c 1755
964d3577
A
1756
1757 long sig = _PTHREAD_MUTEX_SIG;
1758 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL &&
214d78a2
A
1759 (_pthread_mutex_is_fairshare(mutex) ||
1760 _pthread_mutex_is_firstfit(mutex))) {
964d3577
A
1761 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath
1762 sig = _PTHREAD_MUTEX_SIG_fast;
1763 }
f1a1da6c 1764
c1f56ec9
A
1765 // Criteria for ulock eligility:
1766 // - not ERRORCHECK or RECURSIVE
1767 // - not FAIRSHARE
1768 // - not PROCESS_SHARED
1769 // - checkfix for rdar://21813573 not active
1770 //
1771 // All of these should be addressed eventually.
1772 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL &&
1773 mutex->mtxopts.options.policy == _PTHREAD_MTX_OPT_POLICY_FIRSTFIT &&
1774 mutex->mtxopts.options.pshared == PTHREAD_PROCESS_PRIVATE &&
1775 sig == _PTHREAD_MUTEX_SIG_fast) {
1776 mutex->mtxopts.options.ulock = __pthread_mutex_use_ulock;
1777 } else {
1778 mutex->mtxopts.options.ulock = false;
1779 }
1780
1781 if (mutex->mtxopts.options.ulock) {
1782#if PTHREAD_MUTEX_INIT_UNUSED
1783 __builtin_memset(&mutex->psynch, 0xff, sizeof(mutex->psynch));
1784#endif // PTHREAD_MUTEX_INIT_UNUSED
1785
1786 mutex->ulock = _PTHREAD_MUTEX_ULOCK_UNLOCKED;
1787 } else {
1788 mutex_seq *seqaddr;
1789 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1790
1791 uint64_t *tidaddr;
1792 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1793
1794#if PTHREAD_MUTEX_INIT_UNUSED
1795 if ((uint32_t*)tidaddr != mutex->psynch.m_tid) {
1796 // TODO: PTHREAD_STRICT candidate
1797 mutex->mtxopts.options.misalign = 1;
1798 __builtin_memset(mutex->psynch.m_tid, 0xff,
1799 sizeof(mutex->psynch.m_tid));
1800 }
1801 __builtin_memset(mutex->psynch.m_mis, 0xff, sizeof(mutex->psynch.m_mis));
1802#endif // PTHREAD_MUTEX_INIT_UNUSED
1803 *tidaddr = 0;
1804 *seqaddr = (mutex_seq){ };
1805 }
1806
3a6437e6
A
1807#if PTHREAD_MUTEX_INIT_UNUSED
1808 // For detecting copied mutexes and smashes during debugging
1809 uint32_t sig32 = (uint32_t)sig;
1810#if defined(__LP64__)
a0619f9c 1811 uintptr_t guard = ~(uintptr_t)mutex; // use ~ to hide from leaks
3a6437e6
A
1812 __builtin_memcpy(mutex->_reserved, &guard, sizeof(guard));
1813 mutex->_reserved[2] = sig32;
1814 mutex->_reserved[3] = sig32;
1815 mutex->_pad = sig32;
1816#else
1817 mutex->_reserved[0] = sig32;
1818#endif
1819#endif // PTHREAD_MUTEX_INIT_UNUSED
964d3577
A
1820
1821 // Ensure all contents are properly set before setting signature.
1822#if defined(__LP64__)
1823 // For binary compatibility reasons we cannot require natural alignment of
1824 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
1825 uint32_t *sig32_ptr = (uint32_t*)&mutex->sig;
1826 uint32_t *sig32_val = (uint32_t*)&sig;
a0619f9c 1827 *(sig32_ptr + 1) = *(sig32_val + 1);
964d3577
A
1828 os_atomic_store(sig32_ptr, *sig32_val, release);
1829#else
c1f56ec9 1830 os_atomic_store(&mutex->sig, sig, release);
964d3577 1831#endif
f1a1da6c
A
1832
1833 return 0;
1834}
1835
a0619f9c 1836PTHREAD_NOEXPORT_VARIANT
f1a1da6c 1837int
c1f56ec9 1838pthread_mutex_destroy(pthread_mutex_t *mutex)
f1a1da6c 1839{
f1a1da6c
A
1840 int res = EINVAL;
1841
c1f56ec9 1842 _pthread_lock_lock(&mutex->lock);
964d3577 1843 if (_pthread_mutex_check_signature(mutex)) {
c1f56ec9
A
1844 // TODO: PTHREAD_STRICT candidate
1845 res = EBUSY;
f1a1da6c 1846
c1f56ec9
A
1847 if (_pthread_mutex_uses_ulock(mutex) &&
1848 mutex->ulock.uval == _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE) {
f1a1da6c
A
1849 res = 0;
1850 } else {
c1f56ec9
A
1851 mutex_seq *seqaddr;
1852 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1853
1854 mutex_seq seq;
1855 mutex_seq_load(seqaddr, &seq);
1856
1857 uint64_t *tidaddr;
1858 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1859
1860 if ((os_atomic_load_wide(tidaddr, relaxed) == 0) &&
1861 (seq.lgenval & PTHRW_COUNT_MASK) ==
1862 (seq.ugenval & PTHRW_COUNT_MASK)) {
1863 res = 0;
1864 }
f1a1da6c 1865 }
964d3577 1866 } else if (_pthread_mutex_check_signature_init(mutex)) {
f1a1da6c
A
1867 res = 0;
1868 }
c1f56ec9
A
1869
1870 if (res == 0) {
1871 mutex->sig = _PTHREAD_NO_SIG;
1872 }
1873
1874 _pthread_lock_unlock(&mutex->lock);
f1a1da6c 1875
a0619f9c
A
1876 return res;
1877}
964d3577 1878
f1a1da6c
A
1879#endif /* !BUILDING_VARIANT ] */
1880
1881/*
1882 * Destroy a mutex attribute structure.
1883 */
1884int
1885pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
1886{
f1a1da6c
A
1887 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1888 return EINVAL;
1889 }
f1a1da6c
A
1890
1891 attr->sig = _PTHREAD_NO_SIG;
1892 return 0;
1893}
1894