]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread_mutex.c
libpthread-301.1.6.tar.gz
[apple/libpthread.git] / src / pthread_mutex.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 * -- Mutex variable support
51 */
52
53 #include "resolver.h"
54 #include "internal.h"
55 #include "kern/kern_trace.h"
56
57 extern int __unix_conforming;
58
59 #ifndef BUILDING_VARIANT /* [ */
60
61 #ifdef PLOCKSTAT
62 #include "plockstat.h"
63 /* This function is never called and exists to provide never-fired dtrace
64 * probes so that user d scripts don't get errors.
65 */
66 PTHREAD_NOEXPORT PTHREAD_USED
67 void
68 _plockstat_never_fired(void)
69 {
70 PLOCKSTAT_MUTEX_SPIN(NULL);
71 PLOCKSTAT_MUTEX_SPUN(NULL, 0, 0);
72 }
73 #else /* !PLOCKSTAT */
74 #define PLOCKSTAT_MUTEX_SPIN(x)
75 #define PLOCKSTAT_MUTEX_SPUN(x, y, z)
76 #define PLOCKSTAT_MUTEX_ERROR(x, y)
77 #define PLOCKSTAT_MUTEX_BLOCK(x)
78 #define PLOCKSTAT_MUTEX_BLOCKED(x, y)
79 #define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
80 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
81 #endif /* PLOCKSTAT */
82
83 #define BLOCK_FAIL_PLOCKSTAT 0
84 #define BLOCK_SUCCESS_PLOCKSTAT 1
85
86 #define PTHREAD_MUTEX_INIT_UNUSED 1
87
88 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
89 int _pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock);
90
91 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
92 int _pthread_mutex_unlock_slow(pthread_mutex_t *omutex);
93
94 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
95 int _pthread_mutex_corruption_abort(_pthread_mutex *mutex);
96
97
98 PTHREAD_ALWAYS_INLINE
99 static inline int _pthread_mutex_init(_pthread_mutex *mutex,
100 const pthread_mutexattr_t *attr, uint32_t static_type);
101
102 #define DEBUG_TRACE_POINTS 0
103
104 #if DEBUG_TRACE_POINTS
105 #include <sys/kdebug.h>
106 #define DEBUG_TRACE(x, a, b, c, d) kdebug_trace(TRACE_##x, a, b, c, d)
107 #else
108 #define DEBUG_TRACE(x, a, b, c, d) do { } while(0)
109 #endif
110
111 typedef union mutex_seq {
112 uint32_t seq[2];
113 struct { uint32_t lgenval; uint32_t ugenval; };
114 struct { uint32_t mgen; uint32_t ugen; };
115 uint64_t seq_LU;
116 uint64_t _Atomic atomic_seq_LU;
117 } mutex_seq;
118
119 _Static_assert(sizeof(mutex_seq) == 2 * sizeof(uint32_t),
120 "Incorrect mutex_seq size");
121
122 #if !__LITTLE_ENDIAN__
123 #error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
124 #endif
125
126 PTHREAD_ALWAYS_INLINE
127 static inline void
128 MUTEX_GETSEQ_ADDR(_pthread_mutex *mutex, mutex_seq **seqaddr)
129 {
130 // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex)
131 // We don't require more than byte alignment on OS X. rdar://22278325
132 *seqaddr = (void *)(((uintptr_t)mutex->m_seq + 0x7ul) & ~0x7ul);
133 }
134
135 PTHREAD_ALWAYS_INLINE
136 static inline void
137 MUTEX_GETTID_ADDR(_pthread_mutex *mutex, uint64_t **tidaddr)
138 {
139 // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex)
140 // We don't require more than byte alignment on OS X. rdar://22278325
141 *tidaddr = (void*)(((uintptr_t)mutex->m_tid + 0x7ul) & ~0x7ul);
142 }
143
144 PTHREAD_ALWAYS_INLINE
145 static inline void
146 mutex_seq_load(mutex_seq *seqaddr, mutex_seq *oldseqval)
147 {
148 oldseqval->seq_LU = seqaddr->seq_LU;
149 }
150
151 PTHREAD_ALWAYS_INLINE
152 static inline void
153 mutex_seq_atomic_load_relaxed(mutex_seq *seqaddr, mutex_seq *oldseqval)
154 {
155 oldseqval->seq_LU = os_atomic_load(&seqaddr->atomic_seq_LU, relaxed);
156 }
157
158 #define mutex_seq_atomic_load(seqaddr, oldseqval, m) \
159 mutex_seq_atomic_load_##m(seqaddr, oldseqval)
160
161 PTHREAD_ALWAYS_INLINE
162 static inline bool
163 mutex_seq_atomic_cmpxchgv_relaxed(mutex_seq *seqaddr, mutex_seq *oldseqval,
164 mutex_seq *newseqval)
165 {
166 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
167 newseqval->seq_LU, &oldseqval->seq_LU, relaxed);
168 }
169
170 PTHREAD_ALWAYS_INLINE
171 static inline bool
172 mutex_seq_atomic_cmpxchgv_acquire(mutex_seq *seqaddr, mutex_seq *oldseqval,
173 mutex_seq *newseqval)
174 {
175 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
176 newseqval->seq_LU, &oldseqval->seq_LU, acquire);
177 }
178
179 PTHREAD_ALWAYS_INLINE
180 static inline bool
181 mutex_seq_atomic_cmpxchgv_release(mutex_seq *seqaddr, mutex_seq *oldseqval,
182 mutex_seq *newseqval)
183 {
184 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
185 newseqval->seq_LU, &oldseqval->seq_LU, release);
186 }
187
188 #define mutex_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, m)\
189 mutex_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval)
190
191 /*
192 * Initialize a mutex variable, possibly with additional attributes.
193 * Public interface - so don't trust the lock - initialize it first.
194 */
195 PTHREAD_NOEXPORT_VARIANT
196 int
197 pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr)
198 {
199 #if 0
200 /* conformance tests depend on not having this behavior */
201 /* The test for this behavior is optional */
202 if (_pthread_mutex_check_signature(mutex))
203 return EBUSY;
204 #endif
205 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
206 _PTHREAD_LOCK_INIT(mutex->lock);
207 return (_pthread_mutex_init(mutex, attr, 0x7));
208 }
209
210 PTHREAD_NOEXPORT_VARIANT
211 int
212 pthread_mutex_getprioceiling(const pthread_mutex_t *omutex, int *prioceiling)
213 {
214 int res = EINVAL;
215 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
216 if (_pthread_mutex_check_signature(mutex)) {
217 _PTHREAD_LOCK(mutex->lock);
218 *prioceiling = mutex->prioceiling;
219 res = 0;
220 _PTHREAD_UNLOCK(mutex->lock);
221 }
222 return res;
223 }
224
225 PTHREAD_NOEXPORT_VARIANT
226 int
227 pthread_mutex_setprioceiling(pthread_mutex_t *omutex, int prioceiling,
228 int *old_prioceiling)
229 {
230 int res = EINVAL;
231 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
232 if (_pthread_mutex_check_signature(mutex)) {
233 _PTHREAD_LOCK(mutex->lock);
234 if (prioceiling >= -999 && prioceiling <= 999) {
235 *old_prioceiling = mutex->prioceiling;
236 mutex->prioceiling = (int16_t)prioceiling;
237 res = 0;
238 }
239 _PTHREAD_UNLOCK(mutex->lock);
240 }
241 return res;
242 }
243
244
245 int
246 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr,
247 int *prioceiling)
248 {
249 int res = EINVAL;
250 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
251 *prioceiling = attr->prioceiling;
252 res = 0;
253 }
254 return res;
255 }
256
257 int
258 pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int *protocol)
259 {
260 int res = EINVAL;
261 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
262 *protocol = attr->protocol;
263 res = 0;
264 }
265 return res;
266 }
267
268 int
269 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
270 {
271 int res = EINVAL;
272 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
273 *type = attr->type;
274 res = 0;
275 }
276 return res;
277 }
278
279 int
280 pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared)
281 {
282 int res = EINVAL;
283 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
284 *pshared = (int)attr->pshared;
285 res = 0;
286 }
287 return res;
288 }
289
290 int
291 pthread_mutexattr_init(pthread_mutexattr_t *attr)
292 {
293 attr->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
294 attr->protocol = _PTHREAD_DEFAULT_PROTOCOL;
295 attr->policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
296 attr->type = PTHREAD_MUTEX_DEFAULT;
297 attr->sig = _PTHREAD_MUTEX_ATTR_SIG;
298 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
299 return 0;
300 }
301
302 int
303 pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int prioceiling)
304 {
305 int res = EINVAL;
306 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
307 if (prioceiling >= -999 && prioceiling <= 999) {
308 attr->prioceiling = prioceiling;
309 res = 0;
310 }
311 }
312 return res;
313 }
314
315 int
316 pthread_mutexattr_setprotocol(pthread_mutexattr_t *attr, int protocol)
317 {
318 int res = EINVAL;
319 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
320 switch (protocol) {
321 case PTHREAD_PRIO_NONE:
322 case PTHREAD_PRIO_INHERIT:
323 case PTHREAD_PRIO_PROTECT:
324 attr->protocol = protocol;
325 res = 0;
326 break;
327 }
328 }
329 return res;
330 }
331
332 int
333 pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *attr, int policy)
334 {
335 int res = EINVAL;
336 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
337 switch (policy) {
338 case _PTHREAD_MUTEX_POLICY_FAIRSHARE:
339 case _PTHREAD_MUTEX_POLICY_FIRSTFIT:
340 attr->policy = policy;
341 res = 0;
342 break;
343 }
344 }
345 return res;
346 }
347
348 int
349 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
350 {
351 int res = EINVAL;
352 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
353 switch (type) {
354 case PTHREAD_MUTEX_NORMAL:
355 case PTHREAD_MUTEX_ERRORCHECK:
356 case PTHREAD_MUTEX_RECURSIVE:
357 //case PTHREAD_MUTEX_DEFAULT:
358 attr->type = type;
359 res = 0;
360 break;
361 }
362 }
363 return res;
364 }
365
366 int
367 pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
368 {
369 int res = EINVAL;
370 #if __DARWIN_UNIX03
371 if (__unix_conforming == 0) {
372 __unix_conforming = 1;
373 }
374 #endif /* __DARWIN_UNIX03 */
375
376 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
377 #if __DARWIN_UNIX03
378 if (( pshared == PTHREAD_PROCESS_PRIVATE) ||
379 (pshared == PTHREAD_PROCESS_SHARED))
380 #else /* __DARWIN_UNIX03 */
381 if ( pshared == PTHREAD_PROCESS_PRIVATE)
382 #endif /* __DARWIN_UNIX03 */
383 {
384 attr->pshared = pshared;
385 res = 0;
386 }
387 }
388 return res;
389 }
390
391 PTHREAD_NOEXPORT PTHREAD_NOINLINE PTHREAD_NORETURN
392 int
393 _pthread_mutex_corruption_abort(_pthread_mutex *mutex)
394 {
395 PTHREAD_ABORT("pthread_mutex corruption: mutex owner changed in the "
396 "middle of lock/unlock");
397 }
398
399
400 /*
401 * Sequence numbers and TID:
402 *
403 * In steady (and uncontended) state, an unlocked mutex will
404 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
405 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
406 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
407 * E=[L5 U5 TID0].
408 *
409 * If a contender comes in after B, the mutex will instead transition to
410 * E=[L6+KE U4 TID0] and then F=[L6+KE U4 TID940]. If a contender comes in after
411 * C, it will transition to F=[L6+KE U4 TID940] directly. In both cases, the
412 * contender will enter the kernel with either mutexwait(U4, TID0) or
413 * mutexwait(U4, TID940). The first owner will unlock the mutex by first
414 * updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
415 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to
416 * signal the next waiter (potentially as a prepost). When the waiter comes out
417 * of the kernel, it will update the owner to I=[L6+KE U5 TID941]. An unlock at
418 * this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
419 *
420 * At various points along these timelines, since the sequence words and TID are
421 * written independently, a thread may get preempted and another thread might
422 * see inconsistent data. In the worst case, another thread may see the TID in
423 * the SWITCHING (-1) state or unlocked (0) state for longer because the owning
424 * thread was preempted.
425 */
426
427 /*
428 * Drop the mutex unlock references from cond_wait or mutex_unlock.
429 */
430 PTHREAD_ALWAYS_INLINE
431 static inline int
432 _pthread_mutex_unlock_updatebits(_pthread_mutex *mutex, uint32_t *flagsp,
433 uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
434 {
435 bool firstfit = (mutex->mtxopts.options.policy ==
436 _PTHREAD_MUTEX_POLICY_FIRSTFIT);
437 uint32_t flags = mutex->mtxopts.value;
438 flags &= ~_PTHREAD_MTX_OPT_NOTIFY; // no notification by default
439
440 mutex_seq *seqaddr;
441 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
442
443 mutex_seq oldseq, newseq;
444 mutex_seq_load(seqaddr, &oldseq);
445
446 uint64_t *tidaddr;
447 MUTEX_GETTID_ADDR(mutex, &tidaddr);
448 uint64_t oldtid, newtid;
449
450 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
451 uint64_t selfid = _pthread_selfid_direct();
452 if (os_atomic_load(tidaddr, relaxed) != selfid) {
453 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, EPERM);
454 return EPERM;
455 } else if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE &&
456 --mutex->mtxopts.options.lock_count) {
457 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
458 if (flagsp != NULL) {
459 *flagsp = flags;
460 }
461 return 0;
462 }
463 }
464
465 bool clearprepost, clearnotify, spurious;
466 do {
467 newseq = oldseq;
468 oldtid = os_atomic_load(tidaddr, relaxed);
469
470 clearprepost = false;
471 clearnotify = false;
472 spurious = false;
473
474 // pending waiters
475 int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
476 if (numwaiters == 0) {
477 // spurious unlock (unlock of unlocked lock)
478 spurious = true;
479 } else {
480 newseq.ugenval += PTHRW_INC;
481
482 if ((oldseq.lgenval & PTHRW_COUNT_MASK) ==
483 (newseq.ugenval & PTHRW_COUNT_MASK)) {
484 // our unlock sequence matches to lock sequence, so if the
485 // CAS is successful, the mutex is unlocked
486
487 /* do not reset Ibit, just K&E */
488 newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
489 clearnotify = true;
490 newtid = 0; // clear owner
491 } else {
492 if (firstfit) {
493 // reset E bit so another can acquire meanwhile
494 newseq.lgenval &= ~PTH_RWL_EBIT;
495 newtid = 0;
496 } else {
497 newtid = PTHREAD_MTX_TID_SWITCHING;
498 }
499 // need to signal others waiting for mutex
500 flags |= _PTHREAD_MTX_OPT_NOTIFY;
501 }
502
503 if (newtid != oldtid) {
504 // We're giving up the mutex one way or the other, so go ahead
505 // and update the owner to 0 so that once the CAS below
506 // succeeds, there is no stale ownership information. If the
507 // CAS of the seqaddr fails, we may loop, but it's still valid
508 // for the owner to be SWITCHING/0
509 if (!os_atomic_cmpxchg(tidaddr, oldtid, newtid, relaxed)) {
510 // we own this mutex, nobody should be updating it except us
511 return _pthread_mutex_corruption_abort(mutex);
512 }
513 }
514 }
515
516 if (clearnotify || spurious) {
517 flags &= ~_PTHREAD_MTX_OPT_NOTIFY;
518 if (firstfit && (newseq.lgenval & PTH_RWL_PBIT)) {
519 clearprepost = true;
520 newseq.lgenval &= ~PTH_RWL_PBIT;
521 }
522 }
523 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, release));
524
525 if (clearprepost) {
526 __psynch_cvclrprepost(mutex, newseq.lgenval, newseq.ugenval, 0, 0,
527 newseq.lgenval, flags | _PTHREAD_MTX_OPT_MUTEX);
528 }
529
530 if (mgenp != NULL) {
531 *mgenp = newseq.lgenval;
532 }
533 if (ugenp != NULL) {
534 *ugenp = newseq.ugenval;
535 }
536 if (pmtxp != NULL) {
537 *pmtxp = (uint32_t *)mutex;
538 }
539 if (flagsp != NULL) {
540 *flagsp = flags;
541 }
542
543 return 0;
544 }
545
546 PTHREAD_NOEXPORT PTHREAD_NOINLINE
547 int
548 _pthread_mutex_droplock(_pthread_mutex *mutex, uint32_t *flagsp,
549 uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
550 {
551 return _pthread_mutex_unlock_updatebits(mutex, flagsp, pmtxp, mgenp, ugenp);
552 }
553
554 PTHREAD_ALWAYS_INLINE
555 static inline int
556 _pthread_mutex_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid)
557 {
558 int res = 0;
559 bool firstfit = (mutex->mtxopts.options.policy ==
560 _PTHREAD_MUTEX_POLICY_FIRSTFIT);
561 bool isebit = false, updated = false;
562
563 mutex_seq *seqaddr;
564 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
565
566 mutex_seq oldseq, newseq;
567 mutex_seq_load(seqaddr, &oldseq);
568
569 uint64_t *tidaddr;
570 MUTEX_GETTID_ADDR(mutex, &tidaddr);
571 uint64_t oldtid;
572
573 do {
574 if (firstfit && isebit && updated) {
575 mutex_seq_atomic_load(seqaddr, &oldseq, relaxed);
576 }
577 newseq = oldseq;
578 oldtid = os_atomic_load(tidaddr, relaxed);
579
580 if (isebit && !(oldseq.lgenval & PTH_RWL_EBIT)) {
581 // E bit was set on first pass through the loop but is no longer
582 // set. Apparently we spin until it arrives.
583 // XXX: verify this is desired behavior.
584 continue;
585 }
586
587 if (isebit) {
588 // first fit mutex now has the E bit set. Return 1.
589 res = 1;
590 break;
591 }
592
593 if (firstfit) {
594 isebit = (oldseq.lgenval & PTH_RWL_EBIT);
595 } else if ((oldseq.lgenval & (PTH_RWL_KBIT|PTH_RWL_EBIT)) ==
596 (PTH_RWL_KBIT|PTH_RWL_EBIT)) {
597 // fairshare mutex and the bits are already set, just update tid
598 break;
599 }
600
601 // either first fit or no E bit set
602 // update the bits
603 newseq.lgenval |= PTH_RWL_KBIT | PTH_RWL_EBIT;
604
605 // Retry if CAS fails, or if it succeeds with firstfit and E bit
606 // already set
607 } while (!(updated = mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
608 relaxed)) || (firstfit && isebit));
609
610 if (res == 0) {
611 if (!os_atomic_cmpxchg(tidaddr, oldtid, selfid, relaxed)) {
612 // we own this mutex, nobody should be updating it except us
613 return _pthread_mutex_corruption_abort(mutex);
614 }
615 }
616
617 return res;
618 }
619
620 PTHREAD_NOINLINE
621 static int
622 _pthread_mutex_markprepost(_pthread_mutex *mutex, uint32_t updateval)
623 {
624 mutex_seq *seqaddr;
625 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
626
627 mutex_seq oldseq, newseq;
628 mutex_seq_load(seqaddr, &oldseq);
629
630 bool clearprepost;
631 do {
632 clearprepost = false;
633 newseq = oldseq;
634
635 /* update the bits */
636 if ((oldseq.lgenval & PTHRW_COUNT_MASK) ==
637 (oldseq.ugenval & PTHRW_COUNT_MASK)) {
638 clearprepost = true;
639 newseq.lgenval &= ~PTH_RWL_PBIT;
640 } else {
641 newseq.lgenval |= PTH_RWL_PBIT;
642 }
643 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, relaxed));
644
645 if (clearprepost) {
646 __psynch_cvclrprepost(mutex, newseq.lgenval, newseq.ugenval, 0, 0,
647 newseq.lgenval, mutex->mtxopts.value | _PTHREAD_MTX_OPT_MUTEX);
648 }
649
650 return 0;
651 }
652
653 PTHREAD_NOINLINE
654 static int
655 _pthread_mutex_check_init_slow(pthread_mutex_t *omutex)
656 {
657 int res = EINVAL;
658 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
659
660 if (_pthread_mutex_check_signature_init(mutex)) {
661 _PTHREAD_LOCK(mutex->lock);
662 if (_pthread_mutex_check_signature_init(mutex)) {
663 // initialize a statically initialized mutex to provide
664 // compatibility for misbehaving applications.
665 // (unlock should not be the first operation on a mutex)
666 res = _pthread_mutex_init(mutex, NULL, (mutex->sig & 0xf));
667 } else if (_pthread_mutex_check_signature(mutex)) {
668 res = 0;
669 }
670 _PTHREAD_UNLOCK(mutex->lock);
671 } else if (_pthread_mutex_check_signature(mutex)) {
672 res = 0;
673 }
674 if (res != 0) {
675 PLOCKSTAT_MUTEX_ERROR(omutex, res);
676 }
677 return res;
678 }
679
680 PTHREAD_ALWAYS_INLINE
681 static inline int
682 _pthread_mutex_check_init(pthread_mutex_t *omutex)
683 {
684 int res = 0;
685 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
686
687 if (!_pthread_mutex_check_signature(mutex)) {
688 return _pthread_mutex_check_init_slow(omutex);
689 }
690 return res;
691 }
692
693 PTHREAD_NOINLINE
694 static int
695 _pthread_mutex_lock_wait(pthread_mutex_t *omutex, mutex_seq newseq,
696 uint64_t oldtid)
697 {
698 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
699
700 uint64_t *tidaddr;
701 MUTEX_GETTID_ADDR(mutex, &tidaddr);
702 uint64_t selfid = _pthread_selfid_direct();
703
704 PLOCKSTAT_MUTEX_BLOCK(omutex);
705 do {
706 uint32_t updateval;
707 do {
708 updateval = __psynch_mutexwait(omutex, newseq.lgenval,
709 newseq.ugenval, oldtid, mutex->mtxopts.value);
710 oldtid = os_atomic_load(tidaddr, relaxed);
711 } while (updateval == (uint32_t)-1);
712
713 // returns 0 on succesful update; in firstfit it may fail with 1
714 } while (_pthread_mutex_lock_updatebits(mutex, selfid) == 1);
715 PLOCKSTAT_MUTEX_BLOCKED(omutex, BLOCK_SUCCESS_PLOCKSTAT);
716
717 return 0;
718 }
719
720 PTHREAD_NOEXPORT PTHREAD_NOINLINE
721 int
722 _pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock)
723 {
724 int res, recursive = 0;
725 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
726
727 res = _pthread_mutex_check_init(omutex);
728 if (res != 0) return res;
729
730 mutex_seq *seqaddr;
731 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
732
733 mutex_seq oldseq, newseq;
734 mutex_seq_load(seqaddr, &oldseq);
735
736 uint64_t *tidaddr;
737 MUTEX_GETTID_ADDR(mutex, &tidaddr);
738 uint64_t oldtid, selfid = _pthread_selfid_direct();
739
740 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
741 if (os_atomic_load(tidaddr, relaxed) == selfid) {
742 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
743 if (mutex->mtxopts.options.lock_count < USHRT_MAX) {
744 mutex->mtxopts.options.lock_count++;
745 recursive = 1;
746 res = 0;
747 } else {
748 res = EAGAIN;
749 }
750 } else if (trylock) { /* PTHREAD_MUTEX_ERRORCHECK */
751 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
752 // return EDEADLK on a deadlock, it should return EBUSY.
753 res = EBUSY;
754 } else { /* PTHREAD_MUTEX_ERRORCHECK */
755 res = EDEADLK;
756 }
757 goto out;
758 }
759 }
760
761 bool gotlock;
762 do {
763 newseq = oldseq;
764 oldtid = os_atomic_load(tidaddr, relaxed);
765
766 gotlock = ((oldseq.lgenval & PTH_RWL_EBIT) == 0);
767
768 if (trylock && !gotlock) {
769 // A trylock on a held lock will fail immediately. But since
770 // we did not load the sequence words atomically, perform a
771 // no-op CAS64 to ensure that nobody has unlocked concurrently.
772 } else {
773 // Increment the lock sequence number and force the lock into E+K
774 // mode, whether "gotlock" is true or not.
775 newseq.lgenval += PTHRW_INC;
776 newseq.lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
777 }
778 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
779
780 if (gotlock) {
781 os_atomic_store(tidaddr, selfid, relaxed);
782 res = 0;
783 DEBUG_TRACE(psynch_mutex_ulock, omutex, lgenval, ugenval, selfid);
784 } else if (trylock) {
785 res = EBUSY;
786 DEBUG_TRACE(psynch_mutex_utrylock_failed, omutex, lgenval, ugenval,
787 oldtid);
788 } else {
789 res = _pthread_mutex_lock_wait(omutex, newseq, oldtid);
790 }
791
792 if (res == 0 && mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
793 mutex->mtxopts.options.lock_count = 1;
794 }
795
796 out:
797 #if PLOCKSTAT
798 if (res == 0) {
799 PLOCKSTAT_MUTEX_ACQUIRE(omutex, recursive, 0);
800 } else {
801 PLOCKSTAT_MUTEX_ERROR(omutex, res);
802 }
803 #endif
804
805 return res;
806 }
807
808 PTHREAD_ALWAYS_INLINE
809 static inline int
810 _pthread_mutex_lock(pthread_mutex_t *omutex, bool trylock)
811 {
812 #if PLOCKSTAT || DEBUG_TRACE_POINTS
813 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
814 DEBUG_TRACE_POINTS) {
815 return _pthread_mutex_lock_slow(omutex, trylock);
816 }
817 #endif
818 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
819 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
820 return _pthread_mutex_lock_slow(omutex, trylock);
821 }
822
823 uint64_t *tidaddr;
824 MUTEX_GETTID_ADDR(mutex, &tidaddr);
825 uint64_t selfid = _pthread_selfid_direct();
826
827 mutex_seq *seqaddr;
828 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
829
830 mutex_seq oldseq, newseq;
831 mutex_seq_load(seqaddr, &oldseq);
832
833 if (os_unlikely(oldseq.lgenval & PTH_RWL_EBIT)) {
834 return _pthread_mutex_lock_slow(omutex, trylock);
835 }
836
837 bool gotlock;
838 do {
839 newseq = oldseq;
840
841 gotlock = ((oldseq.lgenval & PTH_RWL_EBIT) == 0);
842
843 if (trylock && !gotlock) {
844 // A trylock on a held lock will fail immediately. But since
845 // we did not load the sequence words atomically, perform a
846 // no-op CAS64 to ensure that nobody has unlocked concurrently.
847 } else if (os_likely(gotlock)) {
848 // Increment the lock sequence number and force the lock into E+K
849 // mode, whether "gotlock" is true or not.
850 newseq.lgenval += PTHRW_INC;
851 newseq.lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
852 } else {
853 return _pthread_mutex_lock_slow(omutex, trylock);
854 }
855 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
856 acquire)));
857
858 if (os_likely(gotlock)) {
859 os_atomic_store(tidaddr, selfid, relaxed);
860 return 0;
861 } else if (trylock) {
862 return EBUSY;
863 } else {
864 __builtin_trap();
865 }
866 }
867
868 PTHREAD_NOEXPORT_VARIANT
869 int
870 pthread_mutex_lock(pthread_mutex_t *mutex)
871 {
872 return _pthread_mutex_lock(mutex, false);
873 }
874
875 PTHREAD_NOEXPORT_VARIANT
876 int
877 pthread_mutex_trylock(pthread_mutex_t *mutex)
878 {
879 return _pthread_mutex_lock(mutex, true);
880 }
881
882 /*
883 * Unlock a mutex.
884 * TODO: Priority inheritance stuff
885 */
886
887 PTHREAD_NOINLINE
888 static int
889 _pthread_mutex_unlock_drop(pthread_mutex_t *omutex, mutex_seq newseq,
890 uint32_t flags)
891 {
892 int res;
893 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
894
895 uint32_t updateval;
896
897 uint64_t *tidaddr;
898 MUTEX_GETTID_ADDR(mutex, &tidaddr);
899
900 updateval = __psynch_mutexdrop(omutex, newseq.lgenval, newseq.ugenval,
901 os_atomic_load(tidaddr, relaxed), flags);
902
903 if (updateval == (uint32_t)-1) {
904 res = errno;
905
906 if (res == EINTR) {
907 res = 0;
908 }
909 if (res != 0) {
910 PTHREAD_ABORT("__psynch_mutexdrop failed with error %d", res);
911 }
912 return res;
913 } else if ((mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT)
914 && (updateval & PTH_RWL_PBIT)) {
915 return _pthread_mutex_markprepost(mutex, updateval);
916 }
917
918 return 0;
919 }
920
921 PTHREAD_NOEXPORT PTHREAD_NOINLINE
922 int
923 _pthread_mutex_unlock_slow(pthread_mutex_t *omutex)
924 {
925 int res;
926 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
927 mutex_seq newseq;
928 uint32_t flags;
929
930 // Initialize static mutexes for compatibility with misbehaving
931 // applications (unlock should not be the first operation on a mutex).
932 res = _pthread_mutex_check_init(omutex);
933 if (res != 0) return res;
934
935 res = _pthread_mutex_unlock_updatebits(mutex, &flags, NULL, &newseq.lgenval,
936 &newseq.ugenval);
937 if (res != 0) return res;
938
939 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) != 0) {
940 return _pthread_mutex_unlock_drop(omutex, newseq, flags);
941 } else {
942 uint64_t *tidaddr;
943 MUTEX_GETTID_ADDR(mutex, &tidaddr);
944 DEBUG_TRACE(psynch_mutex_uunlock, omutex, mtxgen, mtxugen,
945 os_atomic_load(tidaddr, relaxed));
946 }
947
948 return 0;
949 }
950
951 PTHREAD_NOEXPORT_VARIANT
952 int
953 pthread_mutex_unlock(pthread_mutex_t *omutex)
954 {
955 #if PLOCKSTAT || DEBUG_TRACE_POINTS
956 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
957 DEBUG_TRACE_POINTS) {
958 return _pthread_mutex_unlock_slow(omutex);
959 }
960 #endif
961 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
962 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
963 return _pthread_mutex_unlock_slow(omutex);
964 }
965
966 uint64_t *tidaddr;
967 MUTEX_GETTID_ADDR(mutex, &tidaddr);
968
969 mutex_seq *seqaddr;
970 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
971
972 mutex_seq oldseq, newseq;
973 mutex_seq_load(seqaddr, &oldseq);
974
975 int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
976 if (os_unlikely(numwaiters == 0)) {
977 // spurious unlock (unlock of unlocked lock)
978 return 0;
979 }
980
981 // We're giving up the mutex one way or the other, so go ahead and
982 // update the owner to 0 so that once the CAS below succeeds, there
983 // is no stale ownership information. If the CAS of the seqaddr
984 // fails, we may loop, but it's still valid for the owner to be
985 // SWITCHING/0
986 os_atomic_store(tidaddr, 0, relaxed);
987
988 do {
989 newseq = oldseq;
990 newseq.ugenval += PTHRW_INC;
991
992 if (os_likely((oldseq.lgenval & PTHRW_COUNT_MASK) ==
993 (newseq.ugenval & PTHRW_COUNT_MASK))) {
994 // our unlock sequence matches to lock sequence, so if the
995 // CAS is successful, the mutex is unlocked
996
997 // do not reset Ibit, just K&E
998 newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
999 } else {
1000 return _pthread_mutex_unlock_slow(omutex);
1001 }
1002 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
1003 release)));
1004
1005 return 0;
1006 }
1007
1008
1009 PTHREAD_ALWAYS_INLINE
1010 static inline int
1011 _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
1012 uint32_t static_type)
1013 {
1014 mutex->mtxopts.value = 0;
1015 mutex->mtxopts.options.mutex = 1;
1016 if (attr) {
1017 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1018 return EINVAL;
1019 }
1020 mutex->prioceiling = (int16_t)attr->prioceiling;
1021 mutex->mtxopts.options.protocol = attr->protocol;
1022 mutex->mtxopts.options.policy = attr->policy;
1023 mutex->mtxopts.options.type = attr->type;
1024 mutex->mtxopts.options.pshared = attr->pshared;
1025 } else {
1026 switch (static_type) {
1027 case 1:
1028 mutex->mtxopts.options.type = PTHREAD_MUTEX_ERRORCHECK;
1029 break;
1030 case 2:
1031 mutex->mtxopts.options.type = PTHREAD_MUTEX_RECURSIVE;
1032 break;
1033 case 3:
1034 /* firstfit fall thru */
1035 case 7:
1036 mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
1037 break;
1038 default:
1039 return EINVAL;
1040 }
1041
1042 mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
1043 mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
1044 if (static_type != 3) {
1045 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
1046 } else {
1047 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FIRSTFIT;
1048 }
1049 mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
1050 }
1051 mutex->priority = 0;
1052
1053 mutex_seq *seqaddr;
1054 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1055
1056 uint64_t *tidaddr;
1057 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1058
1059 #if PTHREAD_MUTEX_INIT_UNUSED
1060 if ((uint32_t*)tidaddr != mutex->m_tid) {
1061 mutex->mtxopts.options.misalign = 1;
1062 __builtin_memset(mutex->m_tid, 0xff, sizeof(mutex->m_tid));
1063 }
1064 __builtin_memset(mutex->m_mis, 0xff, sizeof(mutex->m_mis));
1065 #endif // PTHREAD_MUTEX_INIT_UNUSED
1066 *tidaddr = 0;
1067 *seqaddr = (mutex_seq){ };
1068
1069 long sig = _PTHREAD_MUTEX_SIG;
1070 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL &&
1071 mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FAIRSHARE) {
1072 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath
1073 sig = _PTHREAD_MUTEX_SIG_fast;
1074 }
1075
1076 #if PTHREAD_MUTEX_INIT_UNUSED
1077 // For detecting copied mutexes and smashes during debugging
1078 uint32_t sig32 = (uint32_t)sig;
1079 #if defined(__LP64__)
1080 uintptr_t guard = ~(uintptr_t)mutex; // use ~ to hide from leaks
1081 __builtin_memcpy(mutex->_reserved, &guard, sizeof(guard));
1082 mutex->_reserved[2] = sig32;
1083 mutex->_reserved[3] = sig32;
1084 mutex->_pad = sig32;
1085 #else
1086 mutex->_reserved[0] = sig32;
1087 #endif
1088 #endif // PTHREAD_MUTEX_INIT_UNUSED
1089
1090 // Ensure all contents are properly set before setting signature.
1091 #if defined(__LP64__)
1092 // For binary compatibility reasons we cannot require natural alignment of
1093 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
1094 uint32_t *sig32_ptr = (uint32_t*)&mutex->sig;
1095 uint32_t *sig32_val = (uint32_t*)&sig;
1096 *(sig32_ptr + 1) = *(sig32_val + 1);
1097 os_atomic_store(sig32_ptr, *sig32_val, release);
1098 #else
1099 os_atomic_store2o(mutex, sig, sig, release);
1100 #endif
1101
1102 return 0;
1103 }
1104
1105 PTHREAD_NOEXPORT_VARIANT
1106 int
1107 pthread_mutex_destroy(pthread_mutex_t *omutex)
1108 {
1109 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
1110
1111 int res = EINVAL;
1112
1113 _PTHREAD_LOCK(mutex->lock);
1114 if (_pthread_mutex_check_signature(mutex)) {
1115 mutex_seq *seqaddr;
1116 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1117
1118 mutex_seq seq;
1119 mutex_seq_load(seqaddr, &seq);
1120
1121 uint64_t *tidaddr;
1122 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1123
1124 if ((os_atomic_load(tidaddr, relaxed) == 0) &&
1125 (seq.lgenval & PTHRW_COUNT_MASK) ==
1126 (seq.ugenval & PTHRW_COUNT_MASK)) {
1127 mutex->sig = _PTHREAD_NO_SIG;
1128 res = 0;
1129 } else {
1130 res = EBUSY;
1131 }
1132 } else if (_pthread_mutex_check_signature_init(mutex)) {
1133 mutex->sig = _PTHREAD_NO_SIG;
1134 res = 0;
1135 }
1136 _PTHREAD_UNLOCK(mutex->lock);
1137
1138 return res;
1139 }
1140
1141 #endif /* !BUILDING_VARIANT ] */
1142
1143 /*
1144 * Destroy a mutex attribute structure.
1145 */
1146 int
1147 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
1148 {
1149 #if __DARWIN_UNIX03
1150 if (__unix_conforming == 0) {
1151 __unix_conforming = 1;
1152 }
1153 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1154 return EINVAL;
1155 }
1156 #endif /* __DARWIN_UNIX03 */
1157
1158 attr->sig = _PTHREAD_NO_SIG;
1159 return 0;
1160 }
1161