]> git.saurik.com Git - apple/libpthread.git/blame_incremental - src/pthread_mutex.c
libpthread-218.60.3.tar.gz
[apple/libpthread.git] / src / pthread_mutex.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44/*
45 * MkLinux
46 */
47
48/*
49 * POSIX Pthread Library
50 * -- Mutex variable support
51 */
52
53#include "resolver.h"
54#include "internal.h"
55#include "kern/kern_trace.h"
56#include <sys/syscall.h>
57
58#ifdef PLOCKSTAT
59#include "plockstat.h"
60#else /* !PLOCKSTAT */
61#define PLOCKSTAT_MUTEX_SPIN(x)
62#define PLOCKSTAT_MUTEX_SPUN(x, y, z)
63#define PLOCKSTAT_MUTEX_ERROR(x, y)
64#define PLOCKSTAT_MUTEX_BLOCK(x)
65#define PLOCKSTAT_MUTEX_BLOCKED(x, y)
66#define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
67#define PLOCKSTAT_MUTEX_RELEASE(x, y)
68#endif /* PLOCKSTAT */
69
70#define PTHREAD_MUTEX_INIT_UNUSED 1
71
72extern int __unix_conforming;
73
74#ifndef BUILDING_VARIANT
75
76PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
77int
78_pthread_mutex_unlock_slow(pthread_mutex_t *omutex);
79
80PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
81int
82_pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock);
83
84PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into _pthread_mutex_lock
85int
86_pthread_mutex_lock_wait(pthread_mutex_t *omutex, uint64_t newval64, uint64_t oldtid);
87
88#endif /* BUILDING_VARIANT */
89
90#define DEBUG_TRACE_POINTS 0
91
92#if DEBUG_TRACE_POINTS
93extern int __syscall(int number, ...);
94#define DEBUG_TRACE(x, a, b, c, d) __syscall(SYS_kdebug_trace, TRACE_##x, a, b, c, d)
95#else
96#define DEBUG_TRACE(x, a, b, c, d) do { } while(0)
97#endif
98
99#include <machine/cpu_capabilities.h>
100
101static inline int _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr, uint32_t static_type);
102
103#if !__LITTLE_ENDIAN__
104#error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
105#endif
106
107PTHREAD_ALWAYS_INLINE
108static inline void
109MUTEX_GETSEQ_ADDR(_pthread_mutex *mutex,
110 volatile uint64_t **seqaddr)
111{
112 // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex)
113 // We don't require more than byte alignment on OS X. rdar://22278325
114 *seqaddr = (volatile uint64_t*)(((uintptr_t)mutex->m_seq + 0x7ul) & ~0x7ul);
115}
116
117PTHREAD_ALWAYS_INLINE
118static inline void
119MUTEX_GETTID_ADDR(_pthread_mutex *mutex,
120 volatile uint64_t **tidaddr)
121{
122 // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex)
123 // We don't require more than byte alignment on OS X. rdar://22278325
124 *tidaddr = (volatile uint64_t*)(((uintptr_t)mutex->m_tid + 0x7ul) & ~0x7ul);
125}
126
127#ifndef BUILDING_VARIANT /* [ */
128#ifndef OS_UP_VARIANT_ONLY
129
130#define BLOCK_FAIL_PLOCKSTAT 0
131#define BLOCK_SUCCESS_PLOCKSTAT 1
132
133#ifdef PLOCKSTAT
134/* This function is never called and exists to provide never-fired dtrace
135 * probes so that user d scripts don't get errors.
136 */
137PTHREAD_NOEXPORT PTHREAD_USED
138void
139_plockstat_never_fired(void)
140{
141 PLOCKSTAT_MUTEX_SPIN(NULL);
142 PLOCKSTAT_MUTEX_SPUN(NULL, 0, 0);
143}
144#endif // PLOCKSTAT
145
146/*
147 * Initialize a mutex variable, possibly with additional attributes.
148 * Public interface - so don't trust the lock - initialize it first.
149 */
150int
151pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr)
152{
153#if 0
154 /* conformance tests depend on not having this behavior */
155 /* The test for this behavior is optional */
156 if (_pthread_mutex_check_signature(mutex))
157 return EBUSY;
158#endif
159 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
160 _PTHREAD_LOCK_INIT(mutex->lock);
161 return (_pthread_mutex_init(mutex, attr, 0x7));
162}
163
164int
165pthread_mutex_getprioceiling(const pthread_mutex_t *omutex, int *prioceiling)
166{
167 int res = EINVAL;
168 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
169 if (_pthread_mutex_check_signature(mutex)) {
170 _PTHREAD_LOCK(mutex->lock);
171 *prioceiling = mutex->prioceiling;
172 res = 0;
173 _PTHREAD_UNLOCK(mutex->lock);
174 }
175 return res;
176}
177
178int
179pthread_mutex_setprioceiling(pthread_mutex_t *omutex, int prioceiling, int *old_prioceiling)
180{
181 int res = EINVAL;
182 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
183 if (_pthread_mutex_check_signature(mutex)) {
184 _PTHREAD_LOCK(mutex->lock);
185 if (prioceiling >= -999 || prioceiling <= 999) {
186 *old_prioceiling = mutex->prioceiling;
187 mutex->prioceiling = prioceiling;
188 res = 0;
189 }
190 _PTHREAD_UNLOCK(mutex->lock);
191 }
192 return res;
193}
194
195int
196pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr, int *prioceiling)
197{
198 int res = EINVAL;
199 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
200 *prioceiling = attr->prioceiling;
201 res = 0;
202 }
203 return res;
204}
205
206int
207pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int *protocol)
208{
209 int res = EINVAL;
210 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
211 *protocol = attr->protocol;
212 res = 0;
213 }
214 return res;
215}
216
217int
218pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
219{
220 int res = EINVAL;
221 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
222 *type = attr->type;
223 res = 0;
224 }
225 return res;
226}
227
228int
229pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared)
230{
231 int res = EINVAL;
232 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
233 *pshared = (int)attr->pshared;
234 res = 0;
235 }
236 return res;
237}
238
239int
240pthread_mutexattr_init(pthread_mutexattr_t *attr)
241{
242 attr->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
243 attr->protocol = _PTHREAD_DEFAULT_PROTOCOL;
244 attr->policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
245 attr->type = PTHREAD_MUTEX_DEFAULT;
246 attr->sig = _PTHREAD_MUTEX_ATTR_SIG;
247 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
248 return 0;
249}
250
251int
252pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int prioceiling)
253{
254 int res = EINVAL;
255 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
256 if (prioceiling >= -999 || prioceiling <= 999) {
257 attr->prioceiling = prioceiling;
258 res = 0;
259 }
260 }
261 return res;
262}
263
264int
265pthread_mutexattr_setprotocol(pthread_mutexattr_t *attr, int protocol)
266{
267 int res = EINVAL;
268 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
269 switch (protocol) {
270 case PTHREAD_PRIO_NONE:
271 case PTHREAD_PRIO_INHERIT:
272 case PTHREAD_PRIO_PROTECT:
273 attr->protocol = protocol;
274 res = 0;
275 break;
276 }
277 }
278 return res;
279}
280
281int
282pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *attr, int policy)
283{
284 int res = EINVAL;
285 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
286 switch (policy) {
287 case _PTHREAD_MUTEX_POLICY_FAIRSHARE:
288 case _PTHREAD_MUTEX_POLICY_FIRSTFIT:
289 attr->policy = policy;
290 res = 0;
291 break;
292 }
293 }
294 return res;
295}
296
297int
298pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
299{
300 int res = EINVAL;
301 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
302 switch (type) {
303 case PTHREAD_MUTEX_NORMAL:
304 case PTHREAD_MUTEX_ERRORCHECK:
305 case PTHREAD_MUTEX_RECURSIVE:
306 //case PTHREAD_MUTEX_DEFAULT:
307 attr->type = type;
308 res = 0;
309 break;
310 }
311 }
312 return res;
313}
314
315// XXX remove
316void
317cthread_yield(void)
318{
319 sched_yield();
320}
321
322void
323pthread_yield_np(void)
324{
325 sched_yield();
326}
327
328
329/*
330 * Temp: till pshared is fixed correctly
331 */
332int
333pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
334{
335 int res = EINVAL;
336#if __DARWIN_UNIX03
337 if (__unix_conforming == 0) {
338 __unix_conforming = 1;
339 }
340#endif /* __DARWIN_UNIX03 */
341
342 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
343#if __DARWIN_UNIX03
344 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
345#else /* __DARWIN_UNIX03 */
346 if ( pshared == PTHREAD_PROCESS_PRIVATE)
347#endif /* __DARWIN_UNIX03 */
348 {
349 attr->pshared = pshared;
350 res = 0;
351 }
352 }
353 return res;
354}
355
356PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
357int
358_pthread_mutex_corruption_abort(_pthread_mutex *mutex);
359
360PTHREAD_NOINLINE
361int
362_pthread_mutex_corruption_abort(_pthread_mutex *mutex)
363{
364 PTHREAD_ABORT("pthread_mutex corruption: mutex %p owner changed in the middle of lock/unlock");
365 return EINVAL; // NOTREACHED
366}
367
368/*
369 * Sequence numbers and TID:
370 *
371 * In steady (and uncontended) state, an unlocked mutex will
372 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
373 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
374 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
375 * E=[L5 U5 TID0].
376 *
377 * If a contender comes in after B, the mutex will instead transition to E=[L6+KE U4 TID0]
378 * and then F=[L6+KE U4 TID940]. If a contender comes in after C, it will transition to
379 * F=[L6+KE U4 TID940] directly. In both cases, the contender will enter the kernel with either
380 * mutexwait(U4, TID0) or mutexwait(U4, TID940). The first owner will unlock the mutex
381 * by first updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
382 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to signal the next waiter
383 * (potentially as a prepost). When the waiter comes out of the kernel, it will update the owner to
384 * I=[L6+KE U5 TID941]. An unlock at this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
385 *
386 * At various points along these timelines, since the sequence words and TID are written independently,
387 * a thread may get preempted and another thread might see inconsistent data. In the worst case, another
388 * thread may see the TID in the SWITCHING (-1) state or unlocked (0) state for longer because the
389 * owning thread was preempted.
390 */
391
392/*
393 * Drop the mutex unlock references from cond_wait. or mutex_unlock.
394 */
395PTHREAD_ALWAYS_INLINE
396static inline int
397_pthread_mutex_unlock_updatebits(_pthread_mutex *mutex, uint32_t *flagsp, uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
398{
399 bool firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
400 uint32_t lgenval, ugenval, flags;
401 uint64_t oldtid, newtid;
402 volatile uint64_t *tidaddr;
403 MUTEX_GETTID_ADDR(mutex, &tidaddr);
404
405 flags = mutex->mtxopts.value;
406 flags &= ~_PTHREAD_MTX_OPT_NOTIFY; // no notification by default
407
408 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
409 uint64_t selfid = _pthread_selfid_direct();
410
411 if (*tidaddr != selfid) {
412 //PTHREAD_ABORT("dropping recur or error mutex not owned by the thread");
413 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, EPERM);
414 return EPERM;
415 } else if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE &&
416 --mutex->mtxopts.options.lock_count) {
417 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
418 if (flagsp != NULL) {
419 *flagsp = flags;
420 }
421 return 0;
422 }
423 }
424
425 uint64_t oldval64, newval64;
426 volatile uint64_t *seqaddr;
427 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
428
429 bool clearprepost, clearnotify, spurious;
430 do {
431 oldval64 = *seqaddr;
432 oldtid = *tidaddr;
433 lgenval = (uint32_t)oldval64;
434 ugenval = (uint32_t)(oldval64 >> 32);
435
436 clearprepost = false;
437 clearnotify = false;
438 spurious = false;
439
440 int numwaiters = diff_genseq(lgenval, ugenval); // pending waiters
441
442 if (numwaiters == 0) {
443 // spurious unlock; do not touch tid
444 spurious = true;
445 } else {
446 ugenval += PTHRW_INC;
447
448 if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
449 // our unlock sequence matches to lock sequence, so if the CAS is successful, the mutex is unlocked
450
451 /* do not reset Ibit, just K&E */
452 lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
453 clearnotify = true;
454 newtid = 0; // clear owner
455 } else {
456 if (firstfit) {
457 lgenval &= ~PTH_RWL_EBIT; // reset E bit so another can acquire meanwhile
458 newtid = 0;
459 } else {
460 newtid = PTHREAD_MTX_TID_SWITCHING;
461 }
462 // need to signal others waiting for mutex
463 flags |= _PTHREAD_MTX_OPT_NOTIFY;
464 }
465
466 if (newtid != oldtid) {
467 // We're giving up the mutex one way or the other, so go ahead and update the owner to SWITCHING
468 // or 0 so that once the CAS below succeeds, there is no stale ownership information.
469 // If the CAS of the seqaddr fails, we may loop, but it's still valid for the owner
470 // to be SWITCHING/0
471 if (!os_atomic_cmpxchg(tidaddr, oldtid, newtid, relaxed)) {
472 // we own this mutex, nobody should be updating it except us
473 return _pthread_mutex_corruption_abort(mutex);
474 }
475 }
476 }
477
478 if (clearnotify || spurious) {
479 flags &= ~_PTHREAD_MTX_OPT_NOTIFY;
480 if (firstfit && ((lgenval & PTH_RWL_PBIT) != 0)) {
481 clearprepost = true;
482 lgenval &= ~PTH_RWL_PBIT;
483 }
484 }
485
486 newval64 = (((uint64_t)ugenval) << 32);
487 newval64 |= lgenval;
488
489 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, release));
490
491 if (clearprepost) {
492 __psynch_cvclrprepost(mutex, lgenval, ugenval, 0, 0, lgenval, (flags | _PTHREAD_MTX_OPT_MUTEX));
493 }
494
495 if (mgenp != NULL) {
496 *mgenp = lgenval;
497 }
498 if (ugenp != NULL) {
499 *ugenp = ugenval;
500 }
501 if (pmtxp != NULL) {
502 *pmtxp = (uint32_t *)mutex;
503 }
504 if (flagsp != NULL) {
505 *flagsp = flags;
506 }
507
508 return 0;
509}
510
511PTHREAD_NOEXPORT
512int
513__mtx_droplock(_pthread_mutex *mutex, uint32_t *flagsp, uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
514{
515 return _pthread_mutex_unlock_updatebits(mutex, flagsp, pmtxp, mgenp, ugenp);
516}
517
518PTHREAD_ALWAYS_INLINE
519static inline int
520_pthread_mutex_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid)
521{
522 int res = 0;
523 int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
524 int isebit = 0;
525
526 uint32_t lgenval, ugenval;
527 uint64_t oldval64, newval64;
528 volatile uint64_t *seqaddr;
529 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
530 uint64_t oldtid;
531 volatile uint64_t *tidaddr;
532 MUTEX_GETTID_ADDR(mutex, &tidaddr);
533
534 do {
535 do {
536 oldval64 = *seqaddr;
537 oldtid = *tidaddr;
538 lgenval = (uint32_t)oldval64;
539 ugenval = (uint32_t)(oldval64 >> 32);
540
541 // E bit was set on first pass through the loop but is no longer
542 // set. Apparently we spin until it arrives.
543 // XXX: verify this is desired behavior.
544 } while (isebit && (lgenval & PTH_RWL_EBIT) == 0);
545
546 if (isebit) {
547 // first fit mutex now has the E bit set. Return 1.
548 res = 1;
549 break;
550 }
551
552 if (firstfit) {
553 isebit = (lgenval & PTH_RWL_EBIT) != 0;
554 } else if ((lgenval & (PTH_RWL_KBIT|PTH_RWL_EBIT)) == (PTH_RWL_KBIT|PTH_RWL_EBIT)) {
555 // fairshare mutex and the bits are already set, just update tid
556 break;
557 }
558
559 // either first fit or no E bit set
560 // update the bits
561 lgenval |= PTH_RWL_KBIT | PTH_RWL_EBIT;
562
563 newval64 = (((uint64_t)ugenval) << 32);
564 newval64 |= lgenval;
565
566 // set s and b bit
567 // Retry if CAS fails, or if it succeeds with firstfit and E bit already set
568 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire) || (firstfit && isebit));
569
570 if (res == 0) {
571 if (!os_atomic_cmpxchg(tidaddr, oldtid, selfid, relaxed)) {
572 // we own this mutex, nobody should be updating it except us
573 return _pthread_mutex_corruption_abort(mutex);
574 }
575 }
576
577 return res;
578}
579
580PTHREAD_NOINLINE
581static int
582__mtx_markprepost(_pthread_mutex *mutex, uint32_t updateval, int firstfit)
583{
584 uint32_t flags;
585 uint32_t lgenval, ugenval;
586 uint64_t oldval64, newval64;
587
588 volatile uint64_t *seqaddr;
589 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
590
591 if (firstfit != 0 && (updateval & PTH_RWL_PBIT) != 0) {
592 int clearprepost;
593 do {
594 clearprepost = 0;
595
596 flags = mutex->mtxopts.value;
597
598 oldval64 = *seqaddr;
599 lgenval = (uint32_t)oldval64;
600 ugenval = (uint32_t)(oldval64 >> 32);
601
602 /* update the bits */
603 if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
604 clearprepost = 1;
605 lgenval &= ~PTH_RWL_PBIT;
606 } else {
607 lgenval |= PTH_RWL_PBIT;
608 }
609 newval64 = (((uint64_t)ugenval) << 32);
610 newval64 |= lgenval;
611 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, release));
612
613 if (clearprepost != 0) {
614 __psynch_cvclrprepost(mutex, lgenval, ugenval, 0, 0, lgenval, (flags | _PTHREAD_MTX_OPT_MUTEX));
615 }
616 }
617 return 0;
618}
619
620PTHREAD_NOINLINE
621static int
622_pthread_mutex_check_init_slow(pthread_mutex_t *omutex)
623{
624 int res = EINVAL;
625 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
626
627 if (_pthread_mutex_check_signature_init(mutex)) {
628 _PTHREAD_LOCK(mutex->lock);
629 if (_pthread_mutex_check_signature_init(mutex)) {
630 // initialize a statically initialized mutex to provide
631 // compatibility for misbehaving applications.
632 // (unlock should not be the first operation on a mutex)
633 res = _pthread_mutex_init(mutex, NULL, (mutex->sig & 0xf));
634 } else if (_pthread_mutex_check_signature(mutex)) {
635 res = 0;
636 }
637 _PTHREAD_UNLOCK(mutex->lock);
638 } else if (_pthread_mutex_check_signature(mutex)) {
639 res = 0;
640 }
641 if (res != 0) {
642 PLOCKSTAT_MUTEX_ERROR(omutex, res);
643 }
644 return res;
645}
646
647PTHREAD_ALWAYS_INLINE
648static inline int
649_pthread_mutex_check_init(pthread_mutex_t *omutex)
650{
651 int res = 0;
652 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
653
654 if (!_pthread_mutex_check_signature(mutex)) {
655 return _pthread_mutex_check_init_slow(omutex);
656 }
657 return res;
658}
659
660PTHREAD_NOINLINE
661int
662_pthread_mutex_lock_wait(pthread_mutex_t *omutex, uint64_t newval64, uint64_t oldtid)
663{
664 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
665 uint32_t lgenval = (uint32_t)newval64;
666 uint32_t ugenval = (uint32_t)(newval64 >> 32);
667
668 volatile uint64_t *tidaddr;
669 MUTEX_GETTID_ADDR(mutex, &tidaddr);
670 uint64_t selfid = _pthread_selfid_direct();
671
672 PLOCKSTAT_MUTEX_BLOCK(omutex);
673 do {
674 uint32_t updateval;
675 do {
676 updateval = __psynch_mutexwait(omutex, lgenval, ugenval, oldtid, mutex->mtxopts.value);
677 oldtid = *tidaddr;
678 } while (updateval == (uint32_t)-1);
679
680 // returns 0 on succesful update; in firstfit it may fail with 1
681 } while (_pthread_mutex_lock_updatebits(mutex, selfid) == 1);
682 PLOCKSTAT_MUTEX_BLOCKED(omutex, BLOCK_SUCCESS_PLOCKSTAT);
683
684 return 0;
685}
686
687int
688_pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock)
689{
690 int res;
691 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
692
693 res = _pthread_mutex_check_init(omutex);
694 if (res != 0) {
695 return res;
696 }
697
698 uint64_t oldtid;
699 volatile uint64_t *tidaddr;
700 MUTEX_GETTID_ADDR(mutex, &tidaddr);
701 uint64_t selfid = _pthread_selfid_direct();
702
703 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
704 if (*tidaddr == selfid) {
705 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
706 if (mutex->mtxopts.options.lock_count < USHRT_MAX) {
707 mutex->mtxopts.options.lock_count++;
708 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
709 res = 0;
710 } else {
711 res = EAGAIN;
712 PLOCKSTAT_MUTEX_ERROR(omutex, res);
713 }
714 } else if (trylock) { /* PTHREAD_MUTEX_ERRORCHECK */
715 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
716 // return EDEADLK on a deadlock, it should return EBUSY.
717 res = EBUSY;
718 PLOCKSTAT_MUTEX_ERROR(omutex, res);
719 } else { /* PTHREAD_MUTEX_ERRORCHECK */
720 res = EDEADLK;
721 PLOCKSTAT_MUTEX_ERROR(omutex, res);
722 }
723 return res;
724 }
725 }
726
727 uint64_t oldval64, newval64;
728 volatile uint64_t *seqaddr;
729 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
730
731 uint32_t lgenval, ugenval;
732 bool gotlock = false;
733
734 do {
735 oldval64 = *seqaddr;
736 oldtid = *tidaddr;
737 lgenval = (uint32_t)oldval64;
738 ugenval = (uint32_t)(oldval64 >> 32);
739
740 gotlock = ((lgenval & PTH_RWL_EBIT) == 0);
741
742 if (trylock && !gotlock) {
743 // A trylock on a held lock will fail immediately. But since
744 // we did not load the sequence words atomically, perform a
745 // no-op CAS64 to ensure that nobody has unlocked concurrently.
746 } else {
747 // Increment the lock sequence number and force the lock into E+K
748 // mode, whether "gotlock" is true or not.
749 lgenval += PTHRW_INC;
750 lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
751 }
752
753 newval64 = (((uint64_t)ugenval) << 32);
754 newval64 |= lgenval;
755
756 // Set S and B bit
757 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire));
758
759 if (gotlock) {
760 os_atomic_store(tidaddr, selfid, relaxed);
761 res = 0;
762 DEBUG_TRACE(psynch_mutex_ulock, omutex, lgenval, ugenval, selfid);
763 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 0, 0);
764 } else if (trylock) {
765 res = EBUSY;
766 DEBUG_TRACE(psynch_mutex_utrylock_failed, omutex, lgenval, ugenval, oldtid);
767 PLOCKSTAT_MUTEX_ERROR(omutex, res);
768 } else {
769 res = _pthread_mutex_lock_wait(omutex, newval64, oldtid);
770 }
771
772 if (res == 0 && mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
773 mutex->mtxopts.options.lock_count = 1;
774 }
775
776 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 0, 0);
777
778 return res;
779}
780
781#endif // OS_UP_VARIANT_ONLY
782
783PTHREAD_ALWAYS_INLINE
784static inline int
785_pthread_mutex_lock(pthread_mutex_t *omutex, bool trylock)
786{
787#if PLOCKSTAT || DEBUG_TRACE_POINTS
788 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
789 DEBUG_TRACE_POINTS) {
790 return _pthread_mutex_lock_slow(omutex, trylock);
791 }
792#endif
793 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
794 if (!_pthread_mutex_check_signature_fast(mutex)) {
795 return _pthread_mutex_lock_slow(omutex, trylock);
796 }
797
798 uint64_t oldtid;
799 volatile uint64_t *tidaddr;
800 MUTEX_GETTID_ADDR(mutex, &tidaddr);
801 uint64_t selfid = _pthread_selfid_direct();
802
803 uint64_t oldval64, newval64;
804 volatile uint64_t *seqaddr;
805 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
806
807 uint32_t lgenval, ugenval;
808 bool gotlock = false;
809
810 do {
811 oldval64 = *seqaddr;
812 oldtid = *tidaddr;
813 lgenval = (uint32_t)oldval64;
814 ugenval = (uint32_t)(oldval64 >> 32);
815
816 gotlock = ((lgenval & PTH_RWL_EBIT) == 0);
817
818 if (trylock && !gotlock) {
819 // A trylock on a held lock will fail immediately. But since
820 // we did not load the sequence words atomically, perform a
821 // no-op CAS64 to ensure that nobody has unlocked concurrently.
822 } else {
823 // Increment the lock sequence number and force the lock into E+K
824 // mode, whether "gotlock" is true or not.
825 lgenval += PTHRW_INC;
826 lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
827 }
828
829 newval64 = (((uint64_t)ugenval) << 32);
830 newval64 |= lgenval;
831
832 // Set S and B bit
833 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire));
834
835 if (os_fastpath(gotlock)) {
836 os_atomic_store(tidaddr, selfid, relaxed);
837 return 0;
838 } else if (trylock) {
839 return EBUSY;
840 } else {
841 return _pthread_mutex_lock_wait(omutex, newval64, oldtid);
842 }
843}
844
845PTHREAD_NOEXPORT_VARIANT
846int
847pthread_mutex_lock(pthread_mutex_t *mutex)
848{
849 return _pthread_mutex_lock(mutex, false);
850}
851
852PTHREAD_NOEXPORT_VARIANT
853int
854pthread_mutex_trylock(pthread_mutex_t *mutex)
855{
856 return _pthread_mutex_lock(mutex, true);
857}
858
859#ifndef OS_UP_VARIANT_ONLY
860/*
861 * Unlock a mutex.
862 * TODO: Priority inheritance stuff
863 */
864
865PTHREAD_NOINLINE
866static int
867_pthread_mutex_unlock_drop(pthread_mutex_t *omutex, uint64_t newval64, uint32_t flags)
868{
869 int res;
870 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
871 uint32_t lgenval = (uint32_t)newval64;
872 uint32_t ugenval = (uint32_t)(newval64 >> 32);
873
874 uint32_t updateval;
875 int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
876 volatile uint64_t *tidaddr;
877 MUTEX_GETTID_ADDR(mutex, &tidaddr);
878
879 updateval = __psynch_mutexdrop(omutex, lgenval, ugenval, *tidaddr, flags);
880
881 if (updateval == (uint32_t)-1) {
882 res = errno;
883
884 if (res == EINTR) {
885 res = 0;
886 }
887 if (res != 0) {
888 PTHREAD_ABORT("__p_mutexdrop failed with error %d", res);
889 }
890 return res;
891 } else if (firstfit == 1) {
892 if ((updateval & PTH_RWL_PBIT) != 0) {
893 __mtx_markprepost(mutex, updateval, firstfit);
894 }
895 }
896
897 return 0;
898}
899
900int
901_pthread_mutex_unlock_slow(pthread_mutex_t *omutex)
902{
903 int res;
904 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
905 uint32_t mtxgen, mtxugen, flags;
906
907 // Initialize static mutexes for compatibility with misbehaving
908 // applications (unlock should not be the first operation on a mutex).
909 res = _pthread_mutex_check_init(omutex);
910 if (res != 0) {
911 return res;
912 }
913
914 res = _pthread_mutex_unlock_updatebits(mutex, &flags, NULL, &mtxgen, &mtxugen);
915 if (res != 0) {
916 return res;
917 }
918
919 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) != 0) {
920 uint64_t newval64;
921 newval64 = (((uint64_t)mtxugen) << 32);
922 newval64 |= mtxgen;
923 return _pthread_mutex_unlock_drop(omutex, newval64, flags);
924 } else {
925 volatile uint64_t *tidaddr;
926 MUTEX_GETTID_ADDR(mutex, &tidaddr);
927 DEBUG_TRACE(psynch_mutex_uunlock, omutex, mtxgen, mtxugen, *tidaddr);
928 }
929
930 return 0;
931}
932
933#endif // OS_UP_VARIANT_ONLY
934
935PTHREAD_NOEXPORT_VARIANT
936int
937pthread_mutex_unlock(pthread_mutex_t *omutex)
938{
939#if PLOCKSTAT || DEBUG_TRACE_POINTS
940 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
941 DEBUG_TRACE_POINTS) {
942 return _pthread_mutex_unlock_slow(omutex);
943 }
944#endif
945 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
946 if (!_pthread_mutex_check_signature_fast(mutex)) {
947 return _pthread_mutex_unlock_slow(omutex);
948 }
949
950 volatile uint64_t *tidaddr;
951 MUTEX_GETTID_ADDR(mutex, &tidaddr);
952
953 uint64_t oldval64, newval64;
954 volatile uint64_t *seqaddr;
955 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
956
957 uint32_t lgenval, ugenval;
958
959 do {
960 oldval64 = *seqaddr;
961 lgenval = (uint32_t)oldval64;
962 ugenval = (uint32_t)(oldval64 >> 32);
963
964 int numwaiters = diff_genseq(lgenval, ugenval); // pending waiters
965
966 if (numwaiters == 0) {
967 // spurious unlock; do not touch tid
968 } else {
969 ugenval += PTHRW_INC;
970
971 if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
972 // our unlock sequence matches to lock sequence, so if the CAS is successful, the mutex is unlocked
973
974 /* do not reset Ibit, just K&E */
975 lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
976 } else {
977 return _pthread_mutex_unlock_slow(omutex);
978 }
979
980 // We're giving up the mutex one way or the other, so go ahead and update the owner
981 // to 0 so that once the CAS below succeeds, there is no stale ownership information.
982 // If the CAS of the seqaddr fails, we may loop, but it's still valid for the owner
983 // to be SWITCHING/0
984 os_atomic_store(tidaddr, 0, relaxed);
985 }
986
987 newval64 = (((uint64_t)ugenval) << 32);
988 newval64 |= lgenval;
989
990 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, release));
991
992 return 0;
993}
994
995#ifndef OS_UP_VARIANT_ONLY
996
997
998static inline int
999_pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
1000 uint32_t static_type)
1001{
1002 mutex->mtxopts.value = 0;
1003 mutex->mtxopts.options.mutex = 1;
1004 if (attr) {
1005 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1006 return EINVAL;
1007 }
1008 mutex->prioceiling = attr->prioceiling;
1009 mutex->mtxopts.options.protocol = attr->protocol;
1010 mutex->mtxopts.options.policy = attr->policy;
1011 mutex->mtxopts.options.type = attr->type;
1012 mutex->mtxopts.options.pshared = attr->pshared;
1013 } else {
1014 switch (static_type) {
1015 case 1:
1016 mutex->mtxopts.options.type = PTHREAD_MUTEX_ERRORCHECK;
1017 break;
1018 case 2:
1019 mutex->mtxopts.options.type = PTHREAD_MUTEX_RECURSIVE;
1020 break;
1021 case 3:
1022 /* firstfit fall thru */
1023 case 7:
1024 mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
1025 break;
1026 default:
1027 return EINVAL;
1028 }
1029
1030 mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
1031 mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
1032 if (static_type != 3) {
1033 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
1034 } else {
1035 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FIRSTFIT;
1036 }
1037 mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
1038 }
1039 mutex->priority = 0;
1040
1041 volatile uint64_t *seqaddr;
1042 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1043 volatile uint64_t *tidaddr;
1044 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1045#if PTHREAD_MUTEX_INIT_UNUSED
1046 if ((uint32_t*)tidaddr != mutex->m_tid) {
1047 mutex->mtxopts.options.misalign = 1;
1048 __builtin_memset(mutex->m_tid, 0xff, sizeof(mutex->m_tid));
1049 }
1050 __builtin_memset(mutex->m_mis, 0xff, sizeof(mutex->m_mis));
1051#endif // PTHREAD_MUTEX_INIT_UNUSED
1052 *tidaddr = 0;
1053 *seqaddr = 0;
1054
1055 long sig = _PTHREAD_MUTEX_SIG;
1056 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL &&
1057 mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FAIRSHARE) {
1058 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath
1059 sig = _PTHREAD_MUTEX_SIG_fast;
1060 }
1061
1062#if PTHREAD_MUTEX_INIT_UNUSED
1063 // For detecting copied mutexes and smashes during debugging
1064 uint32_t sig32 = (uint32_t)sig;
1065#if defined(__LP64__)
1066 uintptr_t guard = ~(uintptr_t)mutex; // use ~ to hide from leaks
1067 __builtin_memcpy(mutex->_reserved, &guard, sizeof(guard));
1068 mutex->_reserved[2] = sig32;
1069 mutex->_reserved[3] = sig32;
1070 mutex->_pad = sig32;
1071#else
1072 mutex->_reserved[0] = sig32;
1073#endif
1074#endif // PTHREAD_MUTEX_INIT_UNUSED
1075
1076 // Ensure all contents are properly set before setting signature.
1077#if defined(__LP64__)
1078 // For binary compatibility reasons we cannot require natural alignment of
1079 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
1080 uint32_t *sig32_ptr = (uint32_t*)&mutex->sig;
1081 uint32_t *sig32_val = (uint32_t*)&sig;
1082 *(sig32_ptr+1) = *(sig32_val+1);
1083 os_atomic_store(sig32_ptr, *sig32_val, release);
1084#else
1085 os_atomic_store2o(mutex, sig, sig, release);
1086#endif
1087
1088 return 0;
1089}
1090
1091int
1092pthread_mutex_destroy(pthread_mutex_t *omutex)
1093{
1094 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
1095
1096 int res = EINVAL;
1097
1098 _PTHREAD_LOCK(mutex->lock);
1099 if (_pthread_mutex_check_signature(mutex)) {
1100 uint32_t lgenval, ugenval;
1101 uint64_t oldval64;
1102 volatile uint64_t *seqaddr;
1103 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1104 volatile uint64_t *tidaddr;
1105 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1106
1107 oldval64 = *seqaddr;
1108 lgenval = (uint32_t)oldval64;
1109 ugenval = (uint32_t)(oldval64 >> 32);
1110 if ((*tidaddr == (uint64_t)0) &&
1111 ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK))) {
1112 mutex->sig = _PTHREAD_NO_SIG;
1113 res = 0;
1114 } else {
1115 res = EBUSY;
1116 }
1117 } else if (_pthread_mutex_check_signature_init(mutex)) {
1118 mutex->sig = _PTHREAD_NO_SIG;
1119 res = 0;
1120 }
1121 _PTHREAD_UNLOCK(mutex->lock);
1122
1123 return res;
1124}
1125
1126#endif // OS_UP_VARIANT_ONLY
1127
1128#endif /* !BUILDING_VARIANT ] */
1129
1130#ifndef OS_UP_VARIANT_ONLY
1131/*
1132 * Destroy a mutex attribute structure.
1133 */
1134int
1135pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
1136{
1137#if __DARWIN_UNIX03
1138 if (__unix_conforming == 0) {
1139 __unix_conforming = 1;
1140 }
1141 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1142 return EINVAL;
1143 }
1144#endif /* __DARWIN_UNIX03 */
1145
1146 attr->sig = _PTHREAD_NO_SIG;
1147 return 0;
1148}
1149
1150#endif // OS_UP_VARIANT_ONLY