]> git.saurik.com Git - apple/libpthread.git/blame - src/pthread_mutex.c
libpthread-138.10.4.tar.gz
[apple/libpthread.git] / src / pthread_mutex.c
CommitLineData
f1a1da6c
A
1/*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44/*
45 * MkLinux
46 */
47
48/*
49 * POSIX Pthread Library
50 * -- Mutex variable support
51 */
52
964d3577 53#include "resolver.h"
f1a1da6c
A
54#include "internal.h"
55#include "kern/kern_trace.h"
56#include <sys/syscall.h>
57
964d3577
A
58#include "os/atomic.h"
59
f1a1da6c
A
60#ifdef PLOCKSTAT
61#include "plockstat.h"
62#else /* !PLOCKSTAT */
63#define PLOCKSTAT_MUTEX_SPIN(x)
64#define PLOCKSTAT_MUTEX_SPUN(x, y, z)
65#define PLOCKSTAT_MUTEX_ERROR(x, y)
66#define PLOCKSTAT_MUTEX_BLOCK(x)
67#define PLOCKSTAT_MUTEX_BLOCKED(x, y)
68#define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
69#define PLOCKSTAT_MUTEX_RELEASE(x, y)
70#endif /* PLOCKSTAT */
71
3a6437e6
A
72#define PTHREAD_MUTEX_INIT_UNUSED 1
73
f1a1da6c
A
74extern int __unix_conforming;
75
76#ifndef BUILDING_VARIANT
964d3577
A
77
78PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
79int
80_pthread_mutex_unlock_slow(pthread_mutex_t *omutex);
81
82PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
83int
84_pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock);
85
86PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into _pthread_mutex_lock
87int
88_pthread_mutex_lock_wait(pthread_mutex_t *omutex, uint64_t newval64, uint64_t oldtid);
89
f1a1da6c
A
90#endif /* BUILDING_VARIANT */
91
92#define DEBUG_TRACE_POINTS 0
93
94#if DEBUG_TRACE_POINTS
95extern int __syscall(int number, ...);
96#define DEBUG_TRACE(x, a, b, c, d) __syscall(SYS_kdebug_trace, TRACE_##x, a, b, c, d)
97#else
98#define DEBUG_TRACE(x, a, b, c, d) do { } while(0)
99#endif
100
101#include <machine/cpu_capabilities.h>
102
964d3577 103static inline int _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr, uint32_t static_type);
f1a1da6c
A
104
105#if !__LITTLE_ENDIAN__
106#error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
107#endif
108
964d3577
A
109PTHREAD_ALWAYS_INLINE
110static inline void
f1a1da6c
A
111MUTEX_GETSEQ_ADDR(_pthread_mutex *mutex,
112 volatile uint64_t **seqaddr)
113{
3a6437e6
A
114 // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex)
115 // We don't require more than byte alignment on OS X. rdar://22278325
116 *seqaddr = (volatile uint64_t*)(((uintptr_t)mutex->m_seq + 0x7ul) & ~0x7ul);
f1a1da6c
A
117}
118
964d3577
A
119PTHREAD_ALWAYS_INLINE
120static inline void
f1a1da6c
A
121MUTEX_GETTID_ADDR(_pthread_mutex *mutex,
122 volatile uint64_t **tidaddr)
123{
3a6437e6
A
124 // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex)
125 // We don't require more than byte alignment on OS X. rdar://22278325
126 *tidaddr = (volatile uint64_t*)(((uintptr_t)mutex->m_tid + 0x7ul) & ~0x7ul);
f1a1da6c
A
127}
128
129#ifndef BUILDING_VARIANT /* [ */
964d3577 130#ifndef OS_UP_VARIANT_ONLY
f1a1da6c
A
131
132#define BLOCK_FAIL_PLOCKSTAT 0
133#define BLOCK_SUCCESS_PLOCKSTAT 1
134
964d3577 135#ifdef PLOCKSTAT
f1a1da6c
A
136/* This function is never called and exists to provide never-fired dtrace
137 * probes so that user d scripts don't get errors.
138 */
964d3577
A
139PTHREAD_NOEXPORT PTHREAD_USED
140void
f1a1da6c
A
141_plockstat_never_fired(void)
142{
143 PLOCKSTAT_MUTEX_SPIN(NULL);
144 PLOCKSTAT_MUTEX_SPUN(NULL, 0, 0);
145}
964d3577 146#endif // PLOCKSTAT
f1a1da6c
A
147
148/*
149 * Initialize a mutex variable, possibly with additional attributes.
150 * Public interface - so don't trust the lock - initialize it first.
151 */
152int
153pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr)
154{
155#if 0
156 /* conformance tests depend on not having this behavior */
157 /* The test for this behavior is optional */
964d3577 158 if (_pthread_mutex_check_signature(mutex))
f1a1da6c
A
159 return EBUSY;
160#endif
161 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
162 LOCK_INIT(mutex->lock);
163 return (_pthread_mutex_init(mutex, attr, 0x7));
164}
165
166int
167pthread_mutex_getprioceiling(const pthread_mutex_t *omutex, int *prioceiling)
168{
169 int res = EINVAL;
170 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
964d3577 171 if (_pthread_mutex_check_signature(mutex)) {
f1a1da6c
A
172 LOCK(mutex->lock);
173 *prioceiling = mutex->prioceiling;
174 res = 0;
175 UNLOCK(mutex->lock);
176 }
177 return res;
178}
179
180int
181pthread_mutex_setprioceiling(pthread_mutex_t *omutex, int prioceiling, int *old_prioceiling)
182{
183 int res = EINVAL;
184 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
964d3577 185 if (_pthread_mutex_check_signature(mutex)) {
f1a1da6c
A
186 LOCK(mutex->lock);
187 if (prioceiling >= -999 || prioceiling <= 999) {
188 *old_prioceiling = mutex->prioceiling;
189 mutex->prioceiling = prioceiling;
190 res = 0;
191 }
192 UNLOCK(mutex->lock);
193 }
194 return res;
195}
196
197int
198pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr, int *prioceiling)
199{
200 int res = EINVAL;
201 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
202 *prioceiling = attr->prioceiling;
203 res = 0;
204 }
205 return res;
206}
207
208int
209pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int *protocol)
210{
211 int res = EINVAL;
212 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
213 *protocol = attr->protocol;
214 res = 0;
215 }
216 return res;
217}
218
219int
220pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
221{
222 int res = EINVAL;
223 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
224 *type = attr->type;
225 res = 0;
226 }
227 return res;
228}
229
230int
231pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared)
232{
233 int res = EINVAL;
234 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
235 *pshared = (int)attr->pshared;
236 res = 0;
237 }
238 return res;
239}
240
241int
242pthread_mutexattr_init(pthread_mutexattr_t *attr)
243{
244 attr->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
245 attr->protocol = _PTHREAD_DEFAULT_PROTOCOL;
246 attr->policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
247 attr->type = PTHREAD_MUTEX_DEFAULT;
248 attr->sig = _PTHREAD_MUTEX_ATTR_SIG;
249 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
250 return 0;
251}
252
253int
254pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int prioceiling)
255{
256 int res = EINVAL;
257 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
258 if (prioceiling >= -999 || prioceiling <= 999) {
259 attr->prioceiling = prioceiling;
260 res = 0;
261 }
262 }
263 return res;
264}
265
266int
267pthread_mutexattr_setprotocol(pthread_mutexattr_t *attr, int protocol)
268{
269 int res = EINVAL;
270 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
271 switch (protocol) {
272 case PTHREAD_PRIO_NONE:
273 case PTHREAD_PRIO_INHERIT:
274 case PTHREAD_PRIO_PROTECT:
275 attr->protocol = protocol;
276 res = 0;
277 break;
278 }
279 }
280 return res;
281}
282
283int
284pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *attr, int policy)
285{
286 int res = EINVAL;
287 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
288 switch (policy) {
289 case _PTHREAD_MUTEX_POLICY_FAIRSHARE:
290 case _PTHREAD_MUTEX_POLICY_FIRSTFIT:
291 attr->policy = policy;
292 res = 0;
293 break;
294 }
295 }
296 return res;
297}
298
299int
300pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
301{
302 int res = EINVAL;
303 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
304 switch (type) {
305 case PTHREAD_MUTEX_NORMAL:
306 case PTHREAD_MUTEX_ERRORCHECK:
307 case PTHREAD_MUTEX_RECURSIVE:
308 //case PTHREAD_MUTEX_DEFAULT:
309 attr->type = type;
310 res = 0;
311 break;
312 }
313 }
314 return res;
315}
316
317// XXX remove
318void
319cthread_yield(void)
320{
321 sched_yield();
322}
323
324void
325pthread_yield_np(void)
326{
327 sched_yield();
328}
329
330
331/*
332 * Temp: till pshared is fixed correctly
333 */
334int
335pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
336{
337 int res = EINVAL;
338#if __DARWIN_UNIX03
339 if (__unix_conforming == 0) {
340 __unix_conforming = 1;
341 }
342#endif /* __DARWIN_UNIX03 */
343
344 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
345#if __DARWIN_UNIX03
346 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
347#else /* __DARWIN_UNIX03 */
348 if ( pshared == PTHREAD_PROCESS_PRIVATE)
349#endif /* __DARWIN_UNIX03 */
350 {
351 attr->pshared = pshared;
352 res = 0;
353 }
354 }
355 return res;
356}
357
964d3577
A
358PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
359int
360_pthread_mutex_corruption_abort(_pthread_mutex *mutex);
361
362PTHREAD_NOINLINE
363int
364_pthread_mutex_corruption_abort(_pthread_mutex *mutex)
365{
366 PTHREAD_ABORT("pthread_mutex corruption: mutex %p owner changed in the middle of lock/unlock");
367 return EINVAL; // NOTREACHED
368}
369
f1a1da6c
A
370/*
371 * Sequence numbers and TID:
372 *
373 * In steady (and uncontended) state, an unlocked mutex will
374 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
375 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
376 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
377 * E=[L5 U5 TID0].
378 *
379 * If a contender comes in after B, the mutex will instead transition to E=[L6+KE U4 TID0]
380 * and then F=[L6+KE U4 TID940]. If a contender comes in after C, it will transition to
381 * F=[L6+KE U4 TID940] directly. In both cases, the contender will enter the kernel with either
382 * mutexwait(U4, TID0) or mutexwait(U4, TID940). The first owner will unlock the mutex
383 * by first updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
384 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to signal the next waiter
385 * (potentially as a prepost). When the waiter comes out of the kernel, it will update the owner to
386 * I=[L6+KE U5 TID941]. An unlock at this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
387 *
388 * At various points along these timelines, since the sequence words and TID are written independently,
389 * a thread may get preempted and another thread might see inconsistent data. In the worst case, another
390 * thread may see the TID in the SWITCHING (-1) state or unlocked (0) state for longer because the
391 * owning thread was preempted.
964d3577 392 */
f1a1da6c
A
393
394/*
395 * Drop the mutex unlock references from cond_wait. or mutex_unlock.
396 */
964d3577
A
397PTHREAD_ALWAYS_INLINE
398static inline int
399_pthread_mutex_unlock_updatebits(_pthread_mutex *mutex, uint32_t *flagsp, uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
f1a1da6c
A
400{
401 bool firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
402 uint32_t lgenval, ugenval, flags;
403 uint64_t oldtid, newtid;
404 volatile uint64_t *tidaddr;
405 MUTEX_GETTID_ADDR(mutex, &tidaddr);
406
407 flags = mutex->mtxopts.value;
408 flags &= ~_PTHREAD_MTX_OPT_NOTIFY; // no notification by default
409
410 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
411 uint64_t selfid = _pthread_selfid_direct();
412
413 if (*tidaddr != selfid) {
964d3577 414 //PTHREAD_ABORT("dropping recur or error mutex not owned by the thread");
f1a1da6c
A
415 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, EPERM);
416 return EPERM;
417 } else if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE &&
418 --mutex->mtxopts.options.lock_count) {
419 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
420 if (flagsp != NULL) {
421 *flagsp = flags;
422 }
423 return 0;
424 }
425 }
426
427 uint64_t oldval64, newval64;
428 volatile uint64_t *seqaddr;
429 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
430
431 bool clearprepost, clearnotify, spurious;
432 do {
433 oldval64 = *seqaddr;
434 oldtid = *tidaddr;
435 lgenval = (uint32_t)oldval64;
436 ugenval = (uint32_t)(oldval64 >> 32);
437
438 clearprepost = false;
439 clearnotify = false;
440 spurious = false;
441
442 int numwaiters = diff_genseq(lgenval, ugenval); // pending waiters
443
444 if (numwaiters == 0) {
445 // spurious unlock; do not touch tid
446 spurious = true;
447 } else {
448 ugenval += PTHRW_INC;
449
450 if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
451 // our unlock sequence matches to lock sequence, so if the CAS is successful, the mutex is unlocked
452
453 /* do not reset Ibit, just K&E */
454 lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
455 clearnotify = true;
456 newtid = 0; // clear owner
457 } else {
458 if (firstfit) {
459 lgenval &= ~PTH_RWL_EBIT; // reset E bit so another can acquire meanwhile
460 newtid = 0;
461 } else {
462 newtid = PTHREAD_MTX_TID_SWITCHING;
463 }
464 // need to signal others waiting for mutex
465 flags |= _PTHREAD_MTX_OPT_NOTIFY;
466 }
467
468 if (newtid != oldtid) {
469 // We're giving up the mutex one way or the other, so go ahead and update the owner to SWITCHING
470 // or 0 so that once the CAS below succeeds, there is no stale ownership information.
471 // If the CAS of the seqaddr fails, we may loop, but it's still valid for the owner
472 // to be SWITCHING/0
964d3577 473 if (!os_atomic_cmpxchg(tidaddr, oldtid, newtid, relaxed)) {
f1a1da6c 474 // we own this mutex, nobody should be updating it except us
964d3577 475 return _pthread_mutex_corruption_abort(mutex);
f1a1da6c
A
476 }
477 }
478 }
479
480 if (clearnotify || spurious) {
481 flags &= ~_PTHREAD_MTX_OPT_NOTIFY;
482 if (firstfit && ((lgenval & PTH_RWL_PBIT) != 0)) {
483 clearprepost = true;
484 lgenval &= ~PTH_RWL_PBIT;
485 }
486 }
487
488 newval64 = (((uint64_t)ugenval) << 32);
489 newval64 |= lgenval;
490
964d3577 491 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, release));
f1a1da6c
A
492
493 if (clearprepost) {
494 __psynch_cvclrprepost(mutex, lgenval, ugenval, 0, 0, lgenval, (flags | _PTHREAD_MTX_OPT_MUTEX));
495 }
496
497 if (mgenp != NULL) {
498 *mgenp = lgenval;
499 }
500 if (ugenp != NULL) {
501 *ugenp = ugenval;
502 }
503 if (pmtxp != NULL) {
504 *pmtxp = (uint32_t *)mutex;
505 }
506 if (flagsp != NULL) {
507 *flagsp = flags;
508 }
509
510 return 0;
511}
512
964d3577
A
513PTHREAD_NOEXPORT
514int
515__mtx_droplock(_pthread_mutex *mutex, uint32_t *flagsp, uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
516{
517 return _pthread_mutex_unlock_updatebits(mutex, flagsp, pmtxp, mgenp, ugenp);
518}
519
520PTHREAD_ALWAYS_INLINE
521static inline int
522_pthread_mutex_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid)
f1a1da6c
A
523{
524 int res = 0;
525 int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
526 int isebit = 0;
527
528 uint32_t lgenval, ugenval;
529 uint64_t oldval64, newval64;
530 volatile uint64_t *seqaddr;
531 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
532 uint64_t oldtid;
533 volatile uint64_t *tidaddr;
534 MUTEX_GETTID_ADDR(mutex, &tidaddr);
535
536 do {
537 do {
538 oldval64 = *seqaddr;
539 oldtid = *tidaddr;
540 lgenval = (uint32_t)oldval64;
541 ugenval = (uint32_t)(oldval64 >> 32);
542
543 // E bit was set on first pass through the loop but is no longer
544 // set. Apparently we spin until it arrives.
545 // XXX: verify this is desired behavior.
546 } while (isebit && (lgenval & PTH_RWL_EBIT) == 0);
547
548 if (isebit) {
549 // first fit mutex now has the E bit set. Return 1.
550 res = 1;
551 break;
552 }
553
554 if (firstfit) {
555 isebit = (lgenval & PTH_RWL_EBIT) != 0;
556 } else if ((lgenval & (PTH_RWL_KBIT|PTH_RWL_EBIT)) == (PTH_RWL_KBIT|PTH_RWL_EBIT)) {
557 // fairshare mutex and the bits are already set, just update tid
558 break;
559 }
560
561 // either first fit or no E bit set
562 // update the bits
563 lgenval |= PTH_RWL_KBIT | PTH_RWL_EBIT;
564
565 newval64 = (((uint64_t)ugenval) << 32);
566 newval64 |= lgenval;
567
568 // set s and b bit
569 // Retry if CAS fails, or if it succeeds with firstfit and E bit already set
964d3577 570 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire) || (firstfit && isebit));
f1a1da6c
A
571
572 if (res == 0) {
964d3577 573 if (!os_atomic_cmpxchg(tidaddr, oldtid, selfid, relaxed)) {
f1a1da6c 574 // we own this mutex, nobody should be updating it except us
964d3577 575 return _pthread_mutex_corruption_abort(mutex);
f1a1da6c
A
576 }
577 }
578
579 return res;
580}
581
964d3577
A
582PTHREAD_NOINLINE
583static int
f1a1da6c
A
584__mtx_markprepost(_pthread_mutex *mutex, uint32_t updateval, int firstfit)
585{
586 uint32_t flags;
587 uint32_t lgenval, ugenval;
588 uint64_t oldval64, newval64;
589
590 volatile uint64_t *seqaddr;
591 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
592
593 if (firstfit != 0 && (updateval & PTH_RWL_PBIT) != 0) {
594 int clearprepost;
595 do {
596 clearprepost = 0;
597
598 flags = mutex->mtxopts.value;
599
600 oldval64 = *seqaddr;
601 lgenval = (uint32_t)oldval64;
602 ugenval = (uint32_t)(oldval64 >> 32);
603
604 /* update the bits */
605 if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
606 clearprepost = 1;
607 lgenval &= ~PTH_RWL_PBIT;
608 } else {
609 lgenval |= PTH_RWL_PBIT;
610 }
611 newval64 = (((uint64_t)ugenval) << 32);
612 newval64 |= lgenval;
964d3577 613 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, release));
f1a1da6c
A
614
615 if (clearprepost != 0) {
616 __psynch_cvclrprepost(mutex, lgenval, ugenval, 0, 0, lgenval, (flags | _PTHREAD_MTX_OPT_MUTEX));
617 }
618 }
619 return 0;
620}
621
964d3577
A
622PTHREAD_NOINLINE
623static int
624_pthread_mutex_check_init_slow(pthread_mutex_t *omutex)
f1a1da6c 625{
964d3577
A
626 int res = EINVAL;
627 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
628
629 if (_pthread_mutex_check_signature_init(mutex)) {
630 LOCK(mutex->lock);
631 if (_pthread_mutex_check_signature_init(mutex)) {
632 // initialize a statically initialized mutex to provide
633 // compatibility for misbehaving applications.
634 // (unlock should not be the first operation on a mutex)
635 res = _pthread_mutex_init(mutex, NULL, (mutex->sig & 0xf));
636 } else if (_pthread_mutex_check_signature(mutex)) {
637 res = 0;
638 }
639 UNLOCK(mutex->lock);
640 } else if (_pthread_mutex_check_signature(mutex)) {
641 res = 0;
642 }
643 if (res != 0) {
644 PLOCKSTAT_MUTEX_ERROR(omutex, res);
645 }
646 return res;
f1a1da6c
A
647}
648
964d3577
A
649PTHREAD_ALWAYS_INLINE
650static inline int
f1a1da6c
A
651_pthread_mutex_check_init(pthread_mutex_t *omutex)
652{
653 int res = 0;
654 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
655
964d3577
A
656 if (!_pthread_mutex_check_signature(mutex)) {
657 return _pthread_mutex_check_init_slow(omutex);
f1a1da6c
A
658 }
659 return res;
660}
661
964d3577
A
662PTHREAD_NOINLINE
663int
664_pthread_mutex_lock_wait(pthread_mutex_t *omutex, uint64_t newval64, uint64_t oldtid)
665{
666 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
667 uint32_t lgenval = (uint32_t)newval64;
668 uint32_t ugenval = (uint32_t)(newval64 >> 32);
669
670 volatile uint64_t *tidaddr;
671 MUTEX_GETTID_ADDR(mutex, &tidaddr);
672 uint64_t selfid = _pthread_selfid_direct();
673
674 PLOCKSTAT_MUTEX_BLOCK(omutex);
675 do {
676 uint32_t updateval;
677 do {
678 updateval = __psynch_mutexwait(omutex, lgenval, ugenval, oldtid, mutex->mtxopts.value);
679 oldtid = *tidaddr;
680 } while (updateval == (uint32_t)-1);
681
682 // returns 0 on succesful update; in firstfit it may fail with 1
683 } while (_pthread_mutex_lock_updatebits(mutex, selfid) == 1);
684 PLOCKSTAT_MUTEX_BLOCKED(omutex, BLOCK_SUCCESS_PLOCKSTAT);
685
686 return 0;
687}
688
689int
690_pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock)
f1a1da6c
A
691{
692 int res;
693 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
694
964d3577
A
695 res = _pthread_mutex_check_init(omutex);
696 if (res != 0) {
697 return res;
f1a1da6c
A
698 }
699
700 uint64_t oldtid;
701 volatile uint64_t *tidaddr;
702 MUTEX_GETTID_ADDR(mutex, &tidaddr);
703 uint64_t selfid = _pthread_selfid_direct();
704
705 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
706 if (*tidaddr == selfid) {
707 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
708 if (mutex->mtxopts.options.lock_count < USHRT_MAX) {
709 mutex->mtxopts.options.lock_count++;
710 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
711 res = 0;
712 } else {
713 res = EAGAIN;
714 PLOCKSTAT_MUTEX_ERROR(omutex, res);
715 }
716 } else if (trylock) { /* PTHREAD_MUTEX_ERRORCHECK */
717 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
718 // return EDEADLK on a deadlock, it should return EBUSY.
719 res = EBUSY;
720 PLOCKSTAT_MUTEX_ERROR(omutex, res);
721 } else { /* PTHREAD_MUTEX_ERRORCHECK */
722 res = EDEADLK;
723 PLOCKSTAT_MUTEX_ERROR(omutex, res);
724 }
725 return res;
726 }
727 }
728
729 uint64_t oldval64, newval64;
730 volatile uint64_t *seqaddr;
731 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
732
733 uint32_t lgenval, ugenval;
734 bool gotlock = false;
735
736 do {
737 oldval64 = *seqaddr;
738 oldtid = *tidaddr;
739 lgenval = (uint32_t)oldval64;
740 ugenval = (uint32_t)(oldval64 >> 32);
741
742 gotlock = ((lgenval & PTH_RWL_EBIT) == 0);
743
744 if (trylock && !gotlock) {
745 // A trylock on a held lock will fail immediately. But since
746 // we did not load the sequence words atomically, perform a
747 // no-op CAS64 to ensure that nobody has unlocked concurrently.
748 } else {
749 // Increment the lock sequence number and force the lock into E+K
750 // mode, whether "gotlock" is true or not.
751 lgenval += PTHRW_INC;
752 lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
753 }
754
755 newval64 = (((uint64_t)ugenval) << 32);
756 newval64 |= lgenval;
757
758 // Set S and B bit
964d3577 759 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire));
f1a1da6c
A
760
761 if (gotlock) {
964d3577 762 os_atomic_store(tidaddr, selfid, relaxed);
f1a1da6c
A
763 res = 0;
764 DEBUG_TRACE(psynch_mutex_ulock, omutex, lgenval, ugenval, selfid);
765 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 0, 0);
766 } else if (trylock) {
767 res = EBUSY;
768 DEBUG_TRACE(psynch_mutex_utrylock_failed, omutex, lgenval, ugenval, oldtid);
769 PLOCKSTAT_MUTEX_ERROR(omutex, res);
770 } else {
964d3577 771 res = _pthread_mutex_lock_wait(omutex, newval64, oldtid);
f1a1da6c
A
772 }
773
774 if (res == 0 && mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
775 mutex->mtxopts.options.lock_count = 1;
776 }
777
778 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 0, 0);
779
780 return res;
781}
782
964d3577
A
783#endif // OS_UP_VARIANT_ONLY
784
785PTHREAD_ALWAYS_INLINE
786static inline int
787_pthread_mutex_lock(pthread_mutex_t *omutex, bool trylock)
788{
789#if PLOCKSTAT || DEBUG_TRACE_POINTS
790 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
791 DEBUG_TRACE_POINTS) {
792 return _pthread_mutex_lock_slow(omutex, trylock);
793 }
794#endif
795 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
796 if (!_pthread_mutex_check_signature_fast(mutex)) {
797 return _pthread_mutex_lock_slow(omutex, trylock);
798 }
799
800 uint64_t oldtid;
801 volatile uint64_t *tidaddr;
802 MUTEX_GETTID_ADDR(mutex, &tidaddr);
803 uint64_t selfid = _pthread_selfid_direct();
804
805 uint64_t oldval64, newval64;
806 volatile uint64_t *seqaddr;
807 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
808
809 uint32_t lgenval, ugenval;
810 bool gotlock = false;
811
812 do {
813 oldval64 = *seqaddr;
814 oldtid = *tidaddr;
815 lgenval = (uint32_t)oldval64;
816 ugenval = (uint32_t)(oldval64 >> 32);
817
818 gotlock = ((lgenval & PTH_RWL_EBIT) == 0);
819
820 if (trylock && !gotlock) {
821 // A trylock on a held lock will fail immediately. But since
822 // we did not load the sequence words atomically, perform a
823 // no-op CAS64 to ensure that nobody has unlocked concurrently.
824 } else {
825 // Increment the lock sequence number and force the lock into E+K
826 // mode, whether "gotlock" is true or not.
827 lgenval += PTHRW_INC;
828 lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
829 }
830
831 newval64 = (((uint64_t)ugenval) << 32);
832 newval64 |= lgenval;
833
834 // Set S and B bit
835 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire));
836
837 if (os_fastpath(gotlock)) {
838 os_atomic_store(tidaddr, selfid, relaxed);
839 return 0;
840 } else if (trylock) {
841 return EBUSY;
842 } else {
843 return _pthread_mutex_lock_wait(omutex, newval64, oldtid);
844 }
845}
846
847PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
848int
849pthread_mutex_lock(pthread_mutex_t *mutex)
850{
851 return _pthread_mutex_lock(mutex, false);
852}
853
964d3577 854PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
855int
856pthread_mutex_trylock(pthread_mutex_t *mutex)
857{
858 return _pthread_mutex_lock(mutex, true);
859}
860
964d3577 861#ifndef OS_UP_VARIANT_ONLY
f1a1da6c
A
862/*
863 * Unlock a mutex.
864 * TODO: Priority inheritance stuff
865 */
964d3577
A
866
867PTHREAD_NOINLINE
868static int
869_pthread_mutex_unlock_drop(pthread_mutex_t *omutex, uint64_t newval64, uint32_t flags)
870{
871 int res;
872 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
873 uint32_t lgenval = (uint32_t)newval64;
874 uint32_t ugenval = (uint32_t)(newval64 >> 32);
875
876 uint32_t updateval;
877 int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
878 volatile uint64_t *tidaddr;
879 MUTEX_GETTID_ADDR(mutex, &tidaddr);
880
881 updateval = __psynch_mutexdrop(omutex, lgenval, ugenval, *tidaddr, flags);
882
883 if (updateval == (uint32_t)-1) {
884 res = errno;
885
886 if (res == EINTR) {
887 res = 0;
888 }
889 if (res != 0) {
890 PTHREAD_ABORT("__p_mutexdrop failed with error %d", res);
891 }
892 return res;
893 } else if (firstfit == 1) {
894 if ((updateval & PTH_RWL_PBIT) != 0) {
895 __mtx_markprepost(mutex, updateval, firstfit);
896 }
897 }
898
899 return 0;
900}
901
f1a1da6c 902int
964d3577 903_pthread_mutex_unlock_slow(pthread_mutex_t *omutex)
f1a1da6c
A
904{
905 int res;
906 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
907 uint32_t mtxgen, mtxugen, flags;
908
909 // Initialize static mutexes for compatibility with misbehaving
910 // applications (unlock should not be the first operation on a mutex).
964d3577
A
911 res = _pthread_mutex_check_init(omutex);
912 if (res != 0) {
913 return res;
f1a1da6c
A
914 }
915
964d3577 916 res = _pthread_mutex_unlock_updatebits(mutex, &flags, NULL, &mtxgen, &mtxugen);
f1a1da6c
A
917 if (res != 0) {
918 return res;
919 }
920
921 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) != 0) {
964d3577
A
922 uint64_t newval64;
923 newval64 = (((uint64_t)mtxugen) << 32);
924 newval64 |= mtxgen;
925 return _pthread_mutex_unlock_drop(omutex, newval64, flags);
926 } else {
f1a1da6c
A
927 volatile uint64_t *tidaddr;
928 MUTEX_GETTID_ADDR(mutex, &tidaddr);
964d3577
A
929 DEBUG_TRACE(psynch_mutex_uunlock, omutex, mtxgen, mtxugen, *tidaddr);
930 }
f1a1da6c 931
964d3577
A
932 return 0;
933}
f1a1da6c 934
964d3577 935#endif // OS_UP_VARIANT_ONLY
f1a1da6c 936
964d3577
A
937PTHREAD_NOEXPORT_VARIANT
938int
939pthread_mutex_unlock(pthread_mutex_t *omutex)
940{
941#if PLOCKSTAT || DEBUG_TRACE_POINTS
942 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
943 DEBUG_TRACE_POINTS) {
944 return _pthread_mutex_unlock_slow(omutex);
945 }
946#endif
947 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
948 if (!_pthread_mutex_check_signature_fast(mutex)) {
949 return _pthread_mutex_unlock_slow(omutex);
950 }
951
952 volatile uint64_t *tidaddr;
953 MUTEX_GETTID_ADDR(mutex, &tidaddr);
954
955 uint64_t oldval64, newval64;
956 volatile uint64_t *seqaddr;
957 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
958
959 uint32_t lgenval, ugenval;
960
961 do {
962 oldval64 = *seqaddr;
963 lgenval = (uint32_t)oldval64;
964 ugenval = (uint32_t)(oldval64 >> 32);
965
966 int numwaiters = diff_genseq(lgenval, ugenval); // pending waiters
967
968 if (numwaiters == 0) {
969 // spurious unlock; do not touch tid
970 } else {
971 ugenval += PTHRW_INC;
972
973 if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
974 // our unlock sequence matches to lock sequence, so if the CAS is successful, the mutex is unlocked
975
976 /* do not reset Ibit, just K&E */
977 lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
978 } else {
979 return _pthread_mutex_unlock_slow(omutex);
f1a1da6c 980 }
964d3577
A
981
982 // We're giving up the mutex one way or the other, so go ahead and update the owner
983 // to 0 so that once the CAS below succeeds, there is no stale ownership information.
984 // If the CAS of the seqaddr fails, we may loop, but it's still valid for the owner
985 // to be SWITCHING/0
986 os_atomic_store(tidaddr, 0, relaxed);
f1a1da6c 987 }
964d3577
A
988
989 newval64 = (((uint64_t)ugenval) << 32);
990 newval64 |= lgenval;
991
992 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, release));
f1a1da6c
A
993
994 return 0;
995}
996
964d3577
A
997#ifndef OS_UP_VARIANT_ONLY
998
999
1000static inline int
3a6437e6
A
1001_pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
1002 uint32_t static_type)
f1a1da6c 1003{
3a6437e6
A
1004 mutex->mtxopts.value = 0;
1005 mutex->mtxopts.options.mutex = 1;
f1a1da6c
A
1006 if (attr) {
1007 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1008 return EINVAL;
1009 }
1010 mutex->prioceiling = attr->prioceiling;
1011 mutex->mtxopts.options.protocol = attr->protocol;
1012 mutex->mtxopts.options.policy = attr->policy;
1013 mutex->mtxopts.options.type = attr->type;
1014 mutex->mtxopts.options.pshared = attr->pshared;
1015 } else {
1016 switch (static_type) {
1017 case 1:
1018 mutex->mtxopts.options.type = PTHREAD_MUTEX_ERRORCHECK;
1019 break;
1020 case 2:
1021 mutex->mtxopts.options.type = PTHREAD_MUTEX_RECURSIVE;
1022 break;
1023 case 3:
1024 /* firstfit fall thru */
1025 case 7:
1026 mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
1027 break;
1028 default:
1029 return EINVAL;
1030 }
1031
1032 mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
1033 mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
1034 if (static_type != 3) {
1035 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
1036 } else {
1037 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FIRSTFIT;
1038 }
1039 mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
1040 }
f1a1da6c
A
1041 mutex->priority = 0;
1042
3a6437e6
A
1043 volatile uint64_t *seqaddr;
1044 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1045 volatile uint64_t *tidaddr;
1046 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1047#if PTHREAD_MUTEX_INIT_UNUSED
1048 if ((uint32_t*)tidaddr != mutex->m_tid) {
1049 mutex->mtxopts.options.misalign = 1;
1050 __builtin_memset(mutex->m_tid, 0xff, sizeof(mutex->m_tid));
964d3577 1051 }
3a6437e6
A
1052 __builtin_memset(mutex->m_mis, 0xff, sizeof(mutex->m_mis));
1053#endif // PTHREAD_MUTEX_INIT_UNUSED
1054 *tidaddr = 0;
1055 *seqaddr = 0;
964d3577
A
1056
1057 long sig = _PTHREAD_MUTEX_SIG;
1058 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL &&
1059 mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FAIRSHARE) {
1060 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath
1061 sig = _PTHREAD_MUTEX_SIG_fast;
1062 }
f1a1da6c 1063
3a6437e6
A
1064#if PTHREAD_MUTEX_INIT_UNUSED
1065 // For detecting copied mutexes and smashes during debugging
1066 uint32_t sig32 = (uint32_t)sig;
1067#if defined(__LP64__)
1068 uintptr_t guard = ~(uintptr_t)mutex; // use ~ to hide from leaks
1069 __builtin_memcpy(mutex->_reserved, &guard, sizeof(guard));
1070 mutex->_reserved[2] = sig32;
1071 mutex->_reserved[3] = sig32;
1072 mutex->_pad = sig32;
1073#else
1074 mutex->_reserved[0] = sig32;
1075#endif
1076#endif // PTHREAD_MUTEX_INIT_UNUSED
964d3577
A
1077
1078 // Ensure all contents are properly set before setting signature.
1079#if defined(__LP64__)
1080 // For binary compatibility reasons we cannot require natural alignment of
1081 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
1082 uint32_t *sig32_ptr = (uint32_t*)&mutex->sig;
1083 uint32_t *sig32_val = (uint32_t*)&sig;
1084 *(sig32_ptr+1) = *(sig32_val+1);
1085 os_atomic_store(sig32_ptr, *sig32_val, release);
1086#else
1087 os_atomic_store2o(mutex, sig, sig, release);
1088#endif
f1a1da6c
A
1089
1090 return 0;
1091}
1092
1093int
1094pthread_mutex_destroy(pthread_mutex_t *omutex)
1095{
1096 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
1097
1098 int res = EINVAL;
1099
1100 LOCK(mutex->lock);
964d3577 1101 if (_pthread_mutex_check_signature(mutex)) {
f1a1da6c
A
1102 uint32_t lgenval, ugenval;
1103 uint64_t oldval64;
1104 volatile uint64_t *seqaddr;
1105 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1106 volatile uint64_t *tidaddr;
1107 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1108
1109 oldval64 = *seqaddr;
1110 lgenval = (uint32_t)oldval64;
1111 ugenval = (uint32_t)(oldval64 >> 32);
1112 if ((*tidaddr == (uint64_t)0) &&
1113 ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK))) {
1114 mutex->sig = _PTHREAD_NO_SIG;
1115 res = 0;
1116 } else {
1117 res = EBUSY;
1118 }
964d3577 1119 } else if (_pthread_mutex_check_signature_init(mutex)) {
f1a1da6c
A
1120 mutex->sig = _PTHREAD_NO_SIG;
1121 res = 0;
1122 }
1123 UNLOCK(mutex->lock);
1124
1125 return res;
1126}
1127
964d3577
A
1128#endif // OS_UP_VARIANT_ONLY
1129
f1a1da6c
A
1130#endif /* !BUILDING_VARIANT ] */
1131
964d3577 1132#ifndef OS_UP_VARIANT_ONLY
f1a1da6c
A
1133/*
1134 * Destroy a mutex attribute structure.
1135 */
1136int
1137pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
1138{
1139#if __DARWIN_UNIX03
1140 if (__unix_conforming == 0) {
1141 __unix_conforming = 1;
1142 }
1143 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1144 return EINVAL;
1145 }
1146#endif /* __DARWIN_UNIX03 */
1147
1148 attr->sig = _PTHREAD_NO_SIG;
1149 return 0;
1150}
1151
964d3577 1152#endif // OS_UP_VARIANT_ONLY