]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread_mutex.c
d214739ab8ac1e63f70733539215ab84420f157f
[apple/libpthread.git] / src / pthread_mutex.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 * -- Mutex variable support
51 */
52
53 #include "resolver.h"
54 #include "internal.h"
55 #include "kern/kern_trace.h"
56 #include <sys/syscall.h>
57
58 #include "os/atomic.h"
59
60 #ifdef PLOCKSTAT
61 #include "plockstat.h"
62 #else /* !PLOCKSTAT */
63 #define PLOCKSTAT_MUTEX_SPIN(x)
64 #define PLOCKSTAT_MUTEX_SPUN(x, y, z)
65 #define PLOCKSTAT_MUTEX_ERROR(x, y)
66 #define PLOCKSTAT_MUTEX_BLOCK(x)
67 #define PLOCKSTAT_MUTEX_BLOCKED(x, y)
68 #define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
69 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
70 #endif /* PLOCKSTAT */
71
72 extern int __unix_conforming;
73
74 #ifndef BUILDING_VARIANT
75
76 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
77 int
78 _pthread_mutex_unlock_slow(pthread_mutex_t *omutex);
79
80 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
81 int
82 _pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock);
83
84 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into _pthread_mutex_lock
85 int
86 _pthread_mutex_lock_wait(pthread_mutex_t *omutex, uint64_t newval64, uint64_t oldtid);
87
88 #endif /* BUILDING_VARIANT */
89
90 #define DEBUG_TRACE_POINTS 0
91
92 #if DEBUG_TRACE_POINTS
93 extern int __syscall(int number, ...);
94 #define DEBUG_TRACE(x, a, b, c, d) __syscall(SYS_kdebug_trace, TRACE_##x, a, b, c, d)
95 #else
96 #define DEBUG_TRACE(x, a, b, c, d) do { } while(0)
97 #endif
98
99 #include <machine/cpu_capabilities.h>
100
101 static inline int _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr, uint32_t static_type);
102
103 #if !__LITTLE_ENDIAN__
104 #error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
105 #endif
106
107 PTHREAD_ALWAYS_INLINE
108 static inline void
109 MUTEX_GETSEQ_ADDR(_pthread_mutex *mutex,
110 volatile uint64_t **seqaddr)
111 {
112 // addr of m_seq[1] for misaligned, m_seq[0] for aligned mutex struct
113 *seqaddr = (volatile uint64_t *)(((uintptr_t)&mutex->m_seq[1]) & ~0x7ul);
114 }
115
116 PTHREAD_ALWAYS_INLINE
117 static inline void
118 MUTEX_GETTID_ADDR(_pthread_mutex *mutex,
119 volatile uint64_t **tidaddr)
120 {
121 // addr of m_tid[1] for misaligned, m_tid[0] for aligned mutex struct
122 *tidaddr = (volatile uint64_t *)(((uintptr_t)&mutex->m_tid[1]) & ~0x7ul);
123 }
124
125 #ifndef BUILDING_VARIANT /* [ */
126 #ifndef OS_UP_VARIANT_ONLY
127
128 #define BLOCK_FAIL_PLOCKSTAT 0
129 #define BLOCK_SUCCESS_PLOCKSTAT 1
130
131 #ifdef PLOCKSTAT
132 /* This function is never called and exists to provide never-fired dtrace
133 * probes so that user d scripts don't get errors.
134 */
135 PTHREAD_NOEXPORT PTHREAD_USED
136 void
137 _plockstat_never_fired(void)
138 {
139 PLOCKSTAT_MUTEX_SPIN(NULL);
140 PLOCKSTAT_MUTEX_SPUN(NULL, 0, 0);
141 }
142 #endif // PLOCKSTAT
143
144 /*
145 * Initialize a mutex variable, possibly with additional attributes.
146 * Public interface - so don't trust the lock - initialize it first.
147 */
148 int
149 pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr)
150 {
151 #if 0
152 /* conformance tests depend on not having this behavior */
153 /* The test for this behavior is optional */
154 if (_pthread_mutex_check_signature(mutex))
155 return EBUSY;
156 #endif
157 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
158 LOCK_INIT(mutex->lock);
159 return (_pthread_mutex_init(mutex, attr, 0x7));
160 }
161
162 int
163 pthread_mutex_getprioceiling(const pthread_mutex_t *omutex, int *prioceiling)
164 {
165 int res = EINVAL;
166 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
167 if (_pthread_mutex_check_signature(mutex)) {
168 LOCK(mutex->lock);
169 *prioceiling = mutex->prioceiling;
170 res = 0;
171 UNLOCK(mutex->lock);
172 }
173 return res;
174 }
175
176 int
177 pthread_mutex_setprioceiling(pthread_mutex_t *omutex, int prioceiling, int *old_prioceiling)
178 {
179 int res = EINVAL;
180 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
181 if (_pthread_mutex_check_signature(mutex)) {
182 LOCK(mutex->lock);
183 if (prioceiling >= -999 || prioceiling <= 999) {
184 *old_prioceiling = mutex->prioceiling;
185 mutex->prioceiling = prioceiling;
186 res = 0;
187 }
188 UNLOCK(mutex->lock);
189 }
190 return res;
191 }
192
193 int
194 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr, int *prioceiling)
195 {
196 int res = EINVAL;
197 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
198 *prioceiling = attr->prioceiling;
199 res = 0;
200 }
201 return res;
202 }
203
204 int
205 pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int *protocol)
206 {
207 int res = EINVAL;
208 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
209 *protocol = attr->protocol;
210 res = 0;
211 }
212 return res;
213 }
214
215 int
216 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
217 {
218 int res = EINVAL;
219 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
220 *type = attr->type;
221 res = 0;
222 }
223 return res;
224 }
225
226 int
227 pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared)
228 {
229 int res = EINVAL;
230 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
231 *pshared = (int)attr->pshared;
232 res = 0;
233 }
234 return res;
235 }
236
237 int
238 pthread_mutexattr_init(pthread_mutexattr_t *attr)
239 {
240 attr->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
241 attr->protocol = _PTHREAD_DEFAULT_PROTOCOL;
242 attr->policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
243 attr->type = PTHREAD_MUTEX_DEFAULT;
244 attr->sig = _PTHREAD_MUTEX_ATTR_SIG;
245 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
246 return 0;
247 }
248
249 int
250 pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int prioceiling)
251 {
252 int res = EINVAL;
253 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
254 if (prioceiling >= -999 || prioceiling <= 999) {
255 attr->prioceiling = prioceiling;
256 res = 0;
257 }
258 }
259 return res;
260 }
261
262 int
263 pthread_mutexattr_setprotocol(pthread_mutexattr_t *attr, int protocol)
264 {
265 int res = EINVAL;
266 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
267 switch (protocol) {
268 case PTHREAD_PRIO_NONE:
269 case PTHREAD_PRIO_INHERIT:
270 case PTHREAD_PRIO_PROTECT:
271 attr->protocol = protocol;
272 res = 0;
273 break;
274 }
275 }
276 return res;
277 }
278
279 int
280 pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *attr, int policy)
281 {
282 int res = EINVAL;
283 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
284 switch (policy) {
285 case _PTHREAD_MUTEX_POLICY_FAIRSHARE:
286 case _PTHREAD_MUTEX_POLICY_FIRSTFIT:
287 attr->policy = policy;
288 res = 0;
289 break;
290 }
291 }
292 return res;
293 }
294
295 int
296 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
297 {
298 int res = EINVAL;
299 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
300 switch (type) {
301 case PTHREAD_MUTEX_NORMAL:
302 case PTHREAD_MUTEX_ERRORCHECK:
303 case PTHREAD_MUTEX_RECURSIVE:
304 //case PTHREAD_MUTEX_DEFAULT:
305 attr->type = type;
306 res = 0;
307 break;
308 }
309 }
310 return res;
311 }
312
313 // XXX remove
314 void
315 cthread_yield(void)
316 {
317 sched_yield();
318 }
319
320 void
321 pthread_yield_np(void)
322 {
323 sched_yield();
324 }
325
326
327 /*
328 * Temp: till pshared is fixed correctly
329 */
330 int
331 pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
332 {
333 int res = EINVAL;
334 #if __DARWIN_UNIX03
335 if (__unix_conforming == 0) {
336 __unix_conforming = 1;
337 }
338 #endif /* __DARWIN_UNIX03 */
339
340 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
341 #if __DARWIN_UNIX03
342 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
343 #else /* __DARWIN_UNIX03 */
344 if ( pshared == PTHREAD_PROCESS_PRIVATE)
345 #endif /* __DARWIN_UNIX03 */
346 {
347 attr->pshared = pshared;
348 res = 0;
349 }
350 }
351 return res;
352 }
353
354 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
355 int
356 _pthread_mutex_corruption_abort(_pthread_mutex *mutex);
357
358 PTHREAD_NOINLINE
359 int
360 _pthread_mutex_corruption_abort(_pthread_mutex *mutex)
361 {
362 PTHREAD_ABORT("pthread_mutex corruption: mutex %p owner changed in the middle of lock/unlock");
363 return EINVAL; // NOTREACHED
364 }
365
366 /*
367 * Sequence numbers and TID:
368 *
369 * In steady (and uncontended) state, an unlocked mutex will
370 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
371 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
372 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
373 * E=[L5 U5 TID0].
374 *
375 * If a contender comes in after B, the mutex will instead transition to E=[L6+KE U4 TID0]
376 * and then F=[L6+KE U4 TID940]. If a contender comes in after C, it will transition to
377 * F=[L6+KE U4 TID940] directly. In both cases, the contender will enter the kernel with either
378 * mutexwait(U4, TID0) or mutexwait(U4, TID940). The first owner will unlock the mutex
379 * by first updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
380 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to signal the next waiter
381 * (potentially as a prepost). When the waiter comes out of the kernel, it will update the owner to
382 * I=[L6+KE U5 TID941]. An unlock at this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
383 *
384 * At various points along these timelines, since the sequence words and TID are written independently,
385 * a thread may get preempted and another thread might see inconsistent data. In the worst case, another
386 * thread may see the TID in the SWITCHING (-1) state or unlocked (0) state for longer because the
387 * owning thread was preempted.
388 */
389
390 /*
391 * Drop the mutex unlock references from cond_wait. or mutex_unlock.
392 */
393 PTHREAD_ALWAYS_INLINE
394 static inline int
395 _pthread_mutex_unlock_updatebits(_pthread_mutex *mutex, uint32_t *flagsp, uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
396 {
397 bool firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
398 uint32_t lgenval, ugenval, flags;
399 uint64_t oldtid, newtid;
400 volatile uint64_t *tidaddr;
401 MUTEX_GETTID_ADDR(mutex, &tidaddr);
402
403 flags = mutex->mtxopts.value;
404 flags &= ~_PTHREAD_MTX_OPT_NOTIFY; // no notification by default
405
406 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
407 uint64_t selfid = _pthread_selfid_direct();
408
409 if (*tidaddr != selfid) {
410 //PTHREAD_ABORT("dropping recur or error mutex not owned by the thread");
411 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, EPERM);
412 return EPERM;
413 } else if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE &&
414 --mutex->mtxopts.options.lock_count) {
415 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
416 if (flagsp != NULL) {
417 *flagsp = flags;
418 }
419 return 0;
420 }
421 }
422
423 uint64_t oldval64, newval64;
424 volatile uint64_t *seqaddr;
425 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
426
427 bool clearprepost, clearnotify, spurious;
428 do {
429 oldval64 = *seqaddr;
430 oldtid = *tidaddr;
431 lgenval = (uint32_t)oldval64;
432 ugenval = (uint32_t)(oldval64 >> 32);
433
434 clearprepost = false;
435 clearnotify = false;
436 spurious = false;
437
438 int numwaiters = diff_genseq(lgenval, ugenval); // pending waiters
439
440 if (numwaiters == 0) {
441 // spurious unlock; do not touch tid
442 spurious = true;
443 } else {
444 ugenval += PTHRW_INC;
445
446 if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
447 // our unlock sequence matches to lock sequence, so if the CAS is successful, the mutex is unlocked
448
449 /* do not reset Ibit, just K&E */
450 lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
451 clearnotify = true;
452 newtid = 0; // clear owner
453 } else {
454 if (firstfit) {
455 lgenval &= ~PTH_RWL_EBIT; // reset E bit so another can acquire meanwhile
456 newtid = 0;
457 } else {
458 newtid = PTHREAD_MTX_TID_SWITCHING;
459 }
460 // need to signal others waiting for mutex
461 flags |= _PTHREAD_MTX_OPT_NOTIFY;
462 }
463
464 if (newtid != oldtid) {
465 // We're giving up the mutex one way or the other, so go ahead and update the owner to SWITCHING
466 // or 0 so that once the CAS below succeeds, there is no stale ownership information.
467 // If the CAS of the seqaddr fails, we may loop, but it's still valid for the owner
468 // to be SWITCHING/0
469 if (!os_atomic_cmpxchg(tidaddr, oldtid, newtid, relaxed)) {
470 // we own this mutex, nobody should be updating it except us
471 return _pthread_mutex_corruption_abort(mutex);
472 }
473 }
474 }
475
476 if (clearnotify || spurious) {
477 flags &= ~_PTHREAD_MTX_OPT_NOTIFY;
478 if (firstfit && ((lgenval & PTH_RWL_PBIT) != 0)) {
479 clearprepost = true;
480 lgenval &= ~PTH_RWL_PBIT;
481 }
482 }
483
484 newval64 = (((uint64_t)ugenval) << 32);
485 newval64 |= lgenval;
486
487 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, release));
488
489 if (clearprepost) {
490 __psynch_cvclrprepost(mutex, lgenval, ugenval, 0, 0, lgenval, (flags | _PTHREAD_MTX_OPT_MUTEX));
491 }
492
493 if (mgenp != NULL) {
494 *mgenp = lgenval;
495 }
496 if (ugenp != NULL) {
497 *ugenp = ugenval;
498 }
499 if (pmtxp != NULL) {
500 *pmtxp = (uint32_t *)mutex;
501 }
502 if (flagsp != NULL) {
503 *flagsp = flags;
504 }
505
506 return 0;
507 }
508
509 PTHREAD_NOEXPORT
510 int
511 __mtx_droplock(_pthread_mutex *mutex, uint32_t *flagsp, uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
512 {
513 return _pthread_mutex_unlock_updatebits(mutex, flagsp, pmtxp, mgenp, ugenp);
514 }
515
516 PTHREAD_ALWAYS_INLINE
517 static inline int
518 _pthread_mutex_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid)
519 {
520 int res = 0;
521 int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
522 int isebit = 0;
523
524 uint32_t lgenval, ugenval;
525 uint64_t oldval64, newval64;
526 volatile uint64_t *seqaddr;
527 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
528 uint64_t oldtid;
529 volatile uint64_t *tidaddr;
530 MUTEX_GETTID_ADDR(mutex, &tidaddr);
531
532 do {
533 do {
534 oldval64 = *seqaddr;
535 oldtid = *tidaddr;
536 lgenval = (uint32_t)oldval64;
537 ugenval = (uint32_t)(oldval64 >> 32);
538
539 // E bit was set on first pass through the loop but is no longer
540 // set. Apparently we spin until it arrives.
541 // XXX: verify this is desired behavior.
542 } while (isebit && (lgenval & PTH_RWL_EBIT) == 0);
543
544 if (isebit) {
545 // first fit mutex now has the E bit set. Return 1.
546 res = 1;
547 break;
548 }
549
550 if (firstfit) {
551 isebit = (lgenval & PTH_RWL_EBIT) != 0;
552 } else if ((lgenval & (PTH_RWL_KBIT|PTH_RWL_EBIT)) == (PTH_RWL_KBIT|PTH_RWL_EBIT)) {
553 // fairshare mutex and the bits are already set, just update tid
554 break;
555 }
556
557 // either first fit or no E bit set
558 // update the bits
559 lgenval |= PTH_RWL_KBIT | PTH_RWL_EBIT;
560
561 newval64 = (((uint64_t)ugenval) << 32);
562 newval64 |= lgenval;
563
564 // set s and b bit
565 // Retry if CAS fails, or if it succeeds with firstfit and E bit already set
566 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire) || (firstfit && isebit));
567
568 if (res == 0) {
569 if (!os_atomic_cmpxchg(tidaddr, oldtid, selfid, relaxed)) {
570 // we own this mutex, nobody should be updating it except us
571 return _pthread_mutex_corruption_abort(mutex);
572 }
573 }
574
575 return res;
576 }
577
578 PTHREAD_NOINLINE
579 static int
580 __mtx_markprepost(_pthread_mutex *mutex, uint32_t updateval, int firstfit)
581 {
582 uint32_t flags;
583 uint32_t lgenval, ugenval;
584 uint64_t oldval64, newval64;
585
586 volatile uint64_t *seqaddr;
587 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
588
589 if (firstfit != 0 && (updateval & PTH_RWL_PBIT) != 0) {
590 int clearprepost;
591 do {
592 clearprepost = 0;
593
594 flags = mutex->mtxopts.value;
595
596 oldval64 = *seqaddr;
597 lgenval = (uint32_t)oldval64;
598 ugenval = (uint32_t)(oldval64 >> 32);
599
600 /* update the bits */
601 if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
602 clearprepost = 1;
603 lgenval &= ~PTH_RWL_PBIT;
604 } else {
605 lgenval |= PTH_RWL_PBIT;
606 }
607 newval64 = (((uint64_t)ugenval) << 32);
608 newval64 |= lgenval;
609 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, release));
610
611 if (clearprepost != 0) {
612 __psynch_cvclrprepost(mutex, lgenval, ugenval, 0, 0, lgenval, (flags | _PTHREAD_MTX_OPT_MUTEX));
613 }
614 }
615 return 0;
616 }
617
618 PTHREAD_NOINLINE
619 static int
620 _pthread_mutex_check_init_slow(pthread_mutex_t *omutex)
621 {
622 int res = EINVAL;
623 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
624
625 if (_pthread_mutex_check_signature_init(mutex)) {
626 LOCK(mutex->lock);
627 if (_pthread_mutex_check_signature_init(mutex)) {
628 // initialize a statically initialized mutex to provide
629 // compatibility for misbehaving applications.
630 // (unlock should not be the first operation on a mutex)
631 res = _pthread_mutex_init(mutex, NULL, (mutex->sig & 0xf));
632 } else if (_pthread_mutex_check_signature(mutex)) {
633 res = 0;
634 }
635 UNLOCK(mutex->lock);
636 } else if (_pthread_mutex_check_signature(mutex)) {
637 res = 0;
638 }
639 if (res != 0) {
640 PLOCKSTAT_MUTEX_ERROR(omutex, res);
641 }
642 return res;
643 }
644
645 PTHREAD_ALWAYS_INLINE
646 static inline int
647 _pthread_mutex_check_init(pthread_mutex_t *omutex)
648 {
649 int res = 0;
650 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
651
652 if (!_pthread_mutex_check_signature(mutex)) {
653 return _pthread_mutex_check_init_slow(omutex);
654 }
655 return res;
656 }
657
658 PTHREAD_NOINLINE
659 int
660 _pthread_mutex_lock_wait(pthread_mutex_t *omutex, uint64_t newval64, uint64_t oldtid)
661 {
662 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
663 uint32_t lgenval = (uint32_t)newval64;
664 uint32_t ugenval = (uint32_t)(newval64 >> 32);
665
666 volatile uint64_t *tidaddr;
667 MUTEX_GETTID_ADDR(mutex, &tidaddr);
668 uint64_t selfid = _pthread_selfid_direct();
669
670 PLOCKSTAT_MUTEX_BLOCK(omutex);
671 do {
672 uint32_t updateval;
673 do {
674 updateval = __psynch_mutexwait(omutex, lgenval, ugenval, oldtid, mutex->mtxopts.value);
675 oldtid = *tidaddr;
676 } while (updateval == (uint32_t)-1);
677
678 // returns 0 on succesful update; in firstfit it may fail with 1
679 } while (_pthread_mutex_lock_updatebits(mutex, selfid) == 1);
680 PLOCKSTAT_MUTEX_BLOCKED(omutex, BLOCK_SUCCESS_PLOCKSTAT);
681
682 return 0;
683 }
684
685 int
686 _pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock)
687 {
688 int res;
689 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
690
691 res = _pthread_mutex_check_init(omutex);
692 if (res != 0) {
693 return res;
694 }
695
696 uint64_t oldtid;
697 volatile uint64_t *tidaddr;
698 MUTEX_GETTID_ADDR(mutex, &tidaddr);
699 uint64_t selfid = _pthread_selfid_direct();
700
701 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
702 if (*tidaddr == selfid) {
703 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
704 if (mutex->mtxopts.options.lock_count < USHRT_MAX) {
705 mutex->mtxopts.options.lock_count++;
706 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
707 res = 0;
708 } else {
709 res = EAGAIN;
710 PLOCKSTAT_MUTEX_ERROR(omutex, res);
711 }
712 } else if (trylock) { /* PTHREAD_MUTEX_ERRORCHECK */
713 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
714 // return EDEADLK on a deadlock, it should return EBUSY.
715 res = EBUSY;
716 PLOCKSTAT_MUTEX_ERROR(omutex, res);
717 } else { /* PTHREAD_MUTEX_ERRORCHECK */
718 res = EDEADLK;
719 PLOCKSTAT_MUTEX_ERROR(omutex, res);
720 }
721 return res;
722 }
723 }
724
725 uint64_t oldval64, newval64;
726 volatile uint64_t *seqaddr;
727 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
728
729 uint32_t lgenval, ugenval;
730 bool gotlock = false;
731
732 do {
733 oldval64 = *seqaddr;
734 oldtid = *tidaddr;
735 lgenval = (uint32_t)oldval64;
736 ugenval = (uint32_t)(oldval64 >> 32);
737
738 gotlock = ((lgenval & PTH_RWL_EBIT) == 0);
739
740 if (trylock && !gotlock) {
741 // A trylock on a held lock will fail immediately. But since
742 // we did not load the sequence words atomically, perform a
743 // no-op CAS64 to ensure that nobody has unlocked concurrently.
744 } else {
745 // Increment the lock sequence number and force the lock into E+K
746 // mode, whether "gotlock" is true or not.
747 lgenval += PTHRW_INC;
748 lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
749 }
750
751 newval64 = (((uint64_t)ugenval) << 32);
752 newval64 |= lgenval;
753
754 // Set S and B bit
755 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire));
756
757 if (gotlock) {
758 os_atomic_store(tidaddr, selfid, relaxed);
759 res = 0;
760 DEBUG_TRACE(psynch_mutex_ulock, omutex, lgenval, ugenval, selfid);
761 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 0, 0);
762 } else if (trylock) {
763 res = EBUSY;
764 DEBUG_TRACE(psynch_mutex_utrylock_failed, omutex, lgenval, ugenval, oldtid);
765 PLOCKSTAT_MUTEX_ERROR(omutex, res);
766 } else {
767 res = _pthread_mutex_lock_wait(omutex, newval64, oldtid);
768 }
769
770 if (res == 0 && mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
771 mutex->mtxopts.options.lock_count = 1;
772 }
773
774 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 0, 0);
775
776 return res;
777 }
778
779 #endif // OS_UP_VARIANT_ONLY
780
781 PTHREAD_ALWAYS_INLINE
782 static inline int
783 _pthread_mutex_lock(pthread_mutex_t *omutex, bool trylock)
784 {
785 #if PLOCKSTAT || DEBUG_TRACE_POINTS
786 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
787 DEBUG_TRACE_POINTS) {
788 return _pthread_mutex_lock_slow(omutex, trylock);
789 }
790 #endif
791 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
792 if (!_pthread_mutex_check_signature_fast(mutex)) {
793 return _pthread_mutex_lock_slow(omutex, trylock);
794 }
795
796 uint64_t oldtid;
797 volatile uint64_t *tidaddr;
798 MUTEX_GETTID_ADDR(mutex, &tidaddr);
799 uint64_t selfid = _pthread_selfid_direct();
800
801 uint64_t oldval64, newval64;
802 volatile uint64_t *seqaddr;
803 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
804
805 uint32_t lgenval, ugenval;
806 bool gotlock = false;
807
808 do {
809 oldval64 = *seqaddr;
810 oldtid = *tidaddr;
811 lgenval = (uint32_t)oldval64;
812 ugenval = (uint32_t)(oldval64 >> 32);
813
814 gotlock = ((lgenval & PTH_RWL_EBIT) == 0);
815
816 if (trylock && !gotlock) {
817 // A trylock on a held lock will fail immediately. But since
818 // we did not load the sequence words atomically, perform a
819 // no-op CAS64 to ensure that nobody has unlocked concurrently.
820 } else {
821 // Increment the lock sequence number and force the lock into E+K
822 // mode, whether "gotlock" is true or not.
823 lgenval += PTHRW_INC;
824 lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
825 }
826
827 newval64 = (((uint64_t)ugenval) << 32);
828 newval64 |= lgenval;
829
830 // Set S and B bit
831 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire));
832
833 if (os_fastpath(gotlock)) {
834 os_atomic_store(tidaddr, selfid, relaxed);
835 return 0;
836 } else if (trylock) {
837 return EBUSY;
838 } else {
839 return _pthread_mutex_lock_wait(omutex, newval64, oldtid);
840 }
841 }
842
843 PTHREAD_NOEXPORT_VARIANT
844 int
845 pthread_mutex_lock(pthread_mutex_t *mutex)
846 {
847 return _pthread_mutex_lock(mutex, false);
848 }
849
850 PTHREAD_NOEXPORT_VARIANT
851 int
852 pthread_mutex_trylock(pthread_mutex_t *mutex)
853 {
854 return _pthread_mutex_lock(mutex, true);
855 }
856
857 #ifndef OS_UP_VARIANT_ONLY
858 /*
859 * Unlock a mutex.
860 * TODO: Priority inheritance stuff
861 */
862
863 PTHREAD_NOINLINE
864 static int
865 _pthread_mutex_unlock_drop(pthread_mutex_t *omutex, uint64_t newval64, uint32_t flags)
866 {
867 int res;
868 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
869 uint32_t lgenval = (uint32_t)newval64;
870 uint32_t ugenval = (uint32_t)(newval64 >> 32);
871
872 uint32_t updateval;
873 int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
874 volatile uint64_t *tidaddr;
875 MUTEX_GETTID_ADDR(mutex, &tidaddr);
876
877 updateval = __psynch_mutexdrop(omutex, lgenval, ugenval, *tidaddr, flags);
878
879 if (updateval == (uint32_t)-1) {
880 res = errno;
881
882 if (res == EINTR) {
883 res = 0;
884 }
885 if (res != 0) {
886 PTHREAD_ABORT("__p_mutexdrop failed with error %d", res);
887 }
888 return res;
889 } else if (firstfit == 1) {
890 if ((updateval & PTH_RWL_PBIT) != 0) {
891 __mtx_markprepost(mutex, updateval, firstfit);
892 }
893 }
894
895 return 0;
896 }
897
898 int
899 _pthread_mutex_unlock_slow(pthread_mutex_t *omutex)
900 {
901 int res;
902 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
903 uint32_t mtxgen, mtxugen, flags;
904
905 // Initialize static mutexes for compatibility with misbehaving
906 // applications (unlock should not be the first operation on a mutex).
907 res = _pthread_mutex_check_init(omutex);
908 if (res != 0) {
909 return res;
910 }
911
912 res = _pthread_mutex_unlock_updatebits(mutex, &flags, NULL, &mtxgen, &mtxugen);
913 if (res != 0) {
914 return res;
915 }
916
917 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) != 0) {
918 uint64_t newval64;
919 newval64 = (((uint64_t)mtxugen) << 32);
920 newval64 |= mtxgen;
921 return _pthread_mutex_unlock_drop(omutex, newval64, flags);
922 } else {
923 volatile uint64_t *tidaddr;
924 MUTEX_GETTID_ADDR(mutex, &tidaddr);
925 DEBUG_TRACE(psynch_mutex_uunlock, omutex, mtxgen, mtxugen, *tidaddr);
926 }
927
928 return 0;
929 }
930
931 #endif // OS_UP_VARIANT_ONLY
932
933 PTHREAD_NOEXPORT_VARIANT
934 int
935 pthread_mutex_unlock(pthread_mutex_t *omutex)
936 {
937 #if PLOCKSTAT || DEBUG_TRACE_POINTS
938 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
939 DEBUG_TRACE_POINTS) {
940 return _pthread_mutex_unlock_slow(omutex);
941 }
942 #endif
943 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
944 if (!_pthread_mutex_check_signature_fast(mutex)) {
945 return _pthread_mutex_unlock_slow(omutex);
946 }
947
948 volatile uint64_t *tidaddr;
949 MUTEX_GETTID_ADDR(mutex, &tidaddr);
950
951 uint64_t oldval64, newval64;
952 volatile uint64_t *seqaddr;
953 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
954
955 uint32_t lgenval, ugenval;
956
957 do {
958 oldval64 = *seqaddr;
959 lgenval = (uint32_t)oldval64;
960 ugenval = (uint32_t)(oldval64 >> 32);
961
962 int numwaiters = diff_genseq(lgenval, ugenval); // pending waiters
963
964 if (numwaiters == 0) {
965 // spurious unlock; do not touch tid
966 } else {
967 ugenval += PTHRW_INC;
968
969 if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
970 // our unlock sequence matches to lock sequence, so if the CAS is successful, the mutex is unlocked
971
972 /* do not reset Ibit, just K&E */
973 lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
974 } else {
975 return _pthread_mutex_unlock_slow(omutex);
976 }
977
978 // We're giving up the mutex one way or the other, so go ahead and update the owner
979 // to 0 so that once the CAS below succeeds, there is no stale ownership information.
980 // If the CAS of the seqaddr fails, we may loop, but it's still valid for the owner
981 // to be SWITCHING/0
982 os_atomic_store(tidaddr, 0, relaxed);
983 }
984
985 newval64 = (((uint64_t)ugenval) << 32);
986 newval64 |= lgenval;
987
988 } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, release));
989
990 return 0;
991 }
992
993 #ifndef OS_UP_VARIANT_ONLY
994
995
996 static inline int
997 _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr, uint32_t static_type)
998 {
999 if (attr) {
1000 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1001 return EINVAL;
1002 }
1003 mutex->prioceiling = attr->prioceiling;
1004 mutex->mtxopts.options.protocol = attr->protocol;
1005 mutex->mtxopts.options.policy = attr->policy;
1006 mutex->mtxopts.options.type = attr->type;
1007 mutex->mtxopts.options.pshared = attr->pshared;
1008 } else {
1009 switch (static_type) {
1010 case 1:
1011 mutex->mtxopts.options.type = PTHREAD_MUTEX_ERRORCHECK;
1012 break;
1013 case 2:
1014 mutex->mtxopts.options.type = PTHREAD_MUTEX_RECURSIVE;
1015 break;
1016 case 3:
1017 /* firstfit fall thru */
1018 case 7:
1019 mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
1020 break;
1021 default:
1022 return EINVAL;
1023 }
1024
1025 mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
1026 mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
1027 if (static_type != 3) {
1028 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
1029 } else {
1030 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FIRSTFIT;
1031 }
1032 mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
1033 }
1034
1035 mutex->mtxopts.options.notify = 0;
1036 mutex->mtxopts.options.unused = 0;
1037 mutex->mtxopts.options.hold = 0;
1038 mutex->mtxopts.options.mutex = 1;
1039 mutex->mtxopts.options.lock_count = 0;
1040
1041 mutex->m_tid[0] = 0;
1042 mutex->m_tid[1] = 0;
1043 mutex->m_seq[0] = 0;
1044 mutex->m_seq[1] = 0;
1045 mutex->m_seq[2] = 0;
1046 mutex->prioceiling = 0;
1047 mutex->priority = 0;
1048
1049 mutex->mtxopts.options.misalign = (((uintptr_t)&mutex->m_seq[0]) & 0x7ul) != 0;
1050 if (mutex->mtxopts.options.misalign) {
1051 mutex->m_tid[0] = ~0u;
1052 } else {
1053 mutex->m_seq[2] = ~0u;
1054 }
1055
1056 long sig = _PTHREAD_MUTEX_SIG;
1057 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL &&
1058 mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FAIRSHARE) {
1059 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath
1060 sig = _PTHREAD_MUTEX_SIG_fast;
1061 }
1062
1063 // unused, purely for detecting copied mutexes and smashes during debugging:
1064 mutex->reserved2[0] = ~(uintptr_t)mutex; // use ~ to hide from leaks
1065 mutex->reserved2[1] = (uintptr_t)sig;
1066
1067 // Ensure all contents are properly set before setting signature.
1068 #if defined(__LP64__)
1069 // For binary compatibility reasons we cannot require natural alignment of
1070 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
1071 uint32_t *sig32_ptr = (uint32_t*)&mutex->sig;
1072 uint32_t *sig32_val = (uint32_t*)&sig;
1073 *(sig32_ptr+1) = *(sig32_val+1);
1074 os_atomic_store(sig32_ptr, *sig32_val, release);
1075 #else
1076 os_atomic_store2o(mutex, sig, sig, release);
1077 #endif
1078
1079 return 0;
1080 }
1081
1082 int
1083 pthread_mutex_destroy(pthread_mutex_t *omutex)
1084 {
1085 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
1086
1087 int res = EINVAL;
1088
1089 LOCK(mutex->lock);
1090 if (_pthread_mutex_check_signature(mutex)) {
1091 uint32_t lgenval, ugenval;
1092 uint64_t oldval64;
1093 volatile uint64_t *seqaddr;
1094 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1095 volatile uint64_t *tidaddr;
1096 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1097
1098 oldval64 = *seqaddr;
1099 lgenval = (uint32_t)oldval64;
1100 ugenval = (uint32_t)(oldval64 >> 32);
1101 if ((*tidaddr == (uint64_t)0) &&
1102 ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK))) {
1103 mutex->sig = _PTHREAD_NO_SIG;
1104 res = 0;
1105 } else {
1106 res = EBUSY;
1107 }
1108 } else if (_pthread_mutex_check_signature_init(mutex)) {
1109 mutex->sig = _PTHREAD_NO_SIG;
1110 res = 0;
1111 }
1112 UNLOCK(mutex->lock);
1113
1114 return res;
1115 }
1116
1117 #endif // OS_UP_VARIANT_ONLY
1118
1119 #endif /* !BUILDING_VARIANT ] */
1120
1121 #ifndef OS_UP_VARIANT_ONLY
1122 /*
1123 * Destroy a mutex attribute structure.
1124 */
1125 int
1126 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
1127 {
1128 #if __DARWIN_UNIX03
1129 if (__unix_conforming == 0) {
1130 __unix_conforming = 1;
1131 }
1132 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1133 return EINVAL;
1134 }
1135 #endif /* __DARWIN_UNIX03 */
1136
1137 attr->sig = _PTHREAD_NO_SIG;
1138 return 0;
1139 }
1140
1141 #endif // OS_UP_VARIANT_ONLY