]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread_mutex.c
a68503c09b2ae4414ea4eb7e65198f00f93c56dc
[apple/libpthread.git] / src / pthread_mutex.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 * -- Mutex variable support
51 */
52
53 #include "resolver.h"
54 #include "internal.h"
55 #include "kern/kern_trace.h"
56
57 extern int __unix_conforming;
58
59 #ifndef BUILDING_VARIANT /* [ */
60
61 #ifdef PLOCKSTAT
62 #include "plockstat.h"
63 /* This function is never called and exists to provide never-fired dtrace
64 * probes so that user d scripts don't get errors.
65 */
66 PTHREAD_NOEXPORT PTHREAD_USED
67 void
68 _plockstat_never_fired(void)
69 {
70 PLOCKSTAT_MUTEX_SPIN(NULL);
71 PLOCKSTAT_MUTEX_SPUN(NULL, 0, 0);
72 }
73 #else /* !PLOCKSTAT */
74 #define PLOCKSTAT_MUTEX_SPIN(x)
75 #define PLOCKSTAT_MUTEX_SPUN(x, y, z)
76 #define PLOCKSTAT_MUTEX_ERROR(x, y)
77 #define PLOCKSTAT_MUTEX_BLOCK(x)
78 #define PLOCKSTAT_MUTEX_BLOCKED(x, y)
79 #define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
80 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
81 #endif /* PLOCKSTAT */
82
83 #define BLOCK_FAIL_PLOCKSTAT 0
84 #define BLOCK_SUCCESS_PLOCKSTAT 1
85
86 #define PTHREAD_MUTEX_INIT_UNUSED 1
87
88 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
89 int _pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock);
90
91 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
92 int _pthread_mutex_unlock_slow(pthread_mutex_t *omutex);
93
94 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
95 int _pthread_mutex_corruption_abort(_pthread_mutex *mutex);
96
97 extern int __pthread_mutex_default_policy PTHREAD_NOEXPORT;
98
99
100 int __pthread_mutex_default_policy PTHREAD_NOEXPORT =
101 _PTHREAD_MUTEX_POLICY_FAIRSHARE;
102
103 PTHREAD_NOEXPORT
104 void
105 _pthread_mutex_global_init(const char *envp[],
106 struct _pthread_registration_data *registration_data)
107 {
108 const char *envvar = _simple_getenv(envp, "PTHREAD_MUTEX_DEFAULT_POLICY");
109 if ((envvar && (envvar[0] - '0') == _PTHREAD_MUTEX_POLICY_FIRSTFIT) ||
110 (registration_data->mutex_default_policy ==
111 _PTHREAD_MUTEX_POLICY_FIRSTFIT)) {
112 __pthread_mutex_default_policy = _PTHREAD_MUTEX_POLICY_FIRSTFIT;
113 }
114 }
115
116
117
118 PTHREAD_ALWAYS_INLINE
119 static inline int _pthread_mutex_init(_pthread_mutex *mutex,
120 const pthread_mutexattr_t *attr, uint32_t static_type);
121
122 typedef union mutex_seq {
123 uint32_t seq[2];
124 struct { uint32_t lgenval; uint32_t ugenval; };
125 struct { uint32_t mgen; uint32_t ugen; };
126 uint64_t seq_LU;
127 uint64_t _Atomic atomic_seq_LU;
128 } mutex_seq;
129
130 _Static_assert(sizeof(mutex_seq) == 2 * sizeof(uint32_t),
131 "Incorrect mutex_seq size");
132
133 #if !__LITTLE_ENDIAN__
134 #error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
135 #endif
136
137 PTHREAD_ALWAYS_INLINE
138 static inline void
139 MUTEX_GETSEQ_ADDR(_pthread_mutex *mutex, mutex_seq **seqaddr)
140 {
141 // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex)
142 // We don't require more than byte alignment on OS X. rdar://22278325
143 *seqaddr = (void *)(((uintptr_t)mutex->m_seq + 0x7ul) & ~0x7ul);
144 }
145
146 PTHREAD_ALWAYS_INLINE
147 static inline void
148 MUTEX_GETTID_ADDR(_pthread_mutex *mutex, uint64_t **tidaddr)
149 {
150 // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex)
151 // We don't require more than byte alignment on OS X. rdar://22278325
152 *tidaddr = (void*)(((uintptr_t)mutex->m_tid + 0x7ul) & ~0x7ul);
153 }
154
155 PTHREAD_ALWAYS_INLINE
156 static inline void
157 mutex_seq_load(mutex_seq *seqaddr, mutex_seq *oldseqval)
158 {
159 oldseqval->seq_LU = seqaddr->seq_LU;
160 }
161
162 #define mutex_seq_atomic_load(seqaddr, oldseqval, m) \
163 mutex_seq_atomic_load_##m(seqaddr, oldseqval)
164
165 PTHREAD_ALWAYS_INLINE
166 static inline bool
167 mutex_seq_atomic_cmpxchgv_relaxed(mutex_seq *seqaddr, mutex_seq *oldseqval,
168 mutex_seq *newseqval)
169 {
170 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
171 newseqval->seq_LU, &oldseqval->seq_LU, relaxed);
172 }
173
174 PTHREAD_ALWAYS_INLINE
175 static inline bool
176 mutex_seq_atomic_cmpxchgv_acquire(mutex_seq *seqaddr, mutex_seq *oldseqval,
177 mutex_seq *newseqval)
178 {
179 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
180 newseqval->seq_LU, &oldseqval->seq_LU, acquire);
181 }
182
183 PTHREAD_ALWAYS_INLINE
184 static inline bool
185 mutex_seq_atomic_cmpxchgv_release(mutex_seq *seqaddr, mutex_seq *oldseqval,
186 mutex_seq *newseqval)
187 {
188 return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
189 newseqval->seq_LU, &oldseqval->seq_LU, release);
190 }
191
192 #define mutex_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, m)\
193 mutex_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval)
194
195 /*
196 * Initialize a mutex variable, possibly with additional attributes.
197 * Public interface - so don't trust the lock - initialize it first.
198 */
199 PTHREAD_NOEXPORT_VARIANT
200 int
201 pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr)
202 {
203 #if 0
204 /* conformance tests depend on not having this behavior */
205 /* The test for this behavior is optional */
206 if (_pthread_mutex_check_signature(mutex))
207 return EBUSY;
208 #endif
209 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
210 _PTHREAD_LOCK_INIT(mutex->lock);
211 return (_pthread_mutex_init(mutex, attr, 0x7));
212 }
213
214 PTHREAD_NOEXPORT_VARIANT
215 int
216 pthread_mutex_getprioceiling(const pthread_mutex_t *omutex, int *prioceiling)
217 {
218 int res = EINVAL;
219 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
220 if (_pthread_mutex_check_signature(mutex)) {
221 _PTHREAD_LOCK(mutex->lock);
222 *prioceiling = mutex->prioceiling;
223 res = 0;
224 _PTHREAD_UNLOCK(mutex->lock);
225 }
226 return res;
227 }
228
229 PTHREAD_NOEXPORT_VARIANT
230 int
231 pthread_mutex_setprioceiling(pthread_mutex_t *omutex, int prioceiling,
232 int *old_prioceiling)
233 {
234 int res = EINVAL;
235 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
236 if (_pthread_mutex_check_signature(mutex)) {
237 _PTHREAD_LOCK(mutex->lock);
238 if (prioceiling >= -999 && prioceiling <= 999) {
239 *old_prioceiling = mutex->prioceiling;
240 mutex->prioceiling = (int16_t)prioceiling;
241 res = 0;
242 }
243 _PTHREAD_UNLOCK(mutex->lock);
244 }
245 return res;
246 }
247
248
249 int
250 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr,
251 int *prioceiling)
252 {
253 int res = EINVAL;
254 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
255 *prioceiling = attr->prioceiling;
256 res = 0;
257 }
258 return res;
259 }
260
261 int
262 pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int *protocol)
263 {
264 int res = EINVAL;
265 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
266 *protocol = attr->protocol;
267 res = 0;
268 }
269 return res;
270 }
271
272 int
273 pthread_mutexattr_getpolicy_np(const pthread_mutexattr_t *attr, int *policy)
274 {
275 int res = EINVAL;
276 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
277 *policy = attr->policy;
278 res = 0;
279 }
280 return res;
281 }
282
283 int
284 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
285 {
286 int res = EINVAL;
287 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
288 *type = attr->type;
289 res = 0;
290 }
291 return res;
292 }
293
294 int
295 pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared)
296 {
297 int res = EINVAL;
298 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
299 *pshared = (int)attr->pshared;
300 res = 0;
301 }
302 return res;
303 }
304
305 int
306 pthread_mutexattr_init(pthread_mutexattr_t *attr)
307 {
308 attr->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
309 attr->protocol = _PTHREAD_DEFAULT_PROTOCOL;
310 attr->policy = __pthread_mutex_default_policy;
311 attr->type = PTHREAD_MUTEX_DEFAULT;
312 attr->sig = _PTHREAD_MUTEX_ATTR_SIG;
313 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
314 return 0;
315 }
316
317 int
318 pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int prioceiling)
319 {
320 int res = EINVAL;
321 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
322 if (prioceiling >= -999 && prioceiling <= 999) {
323 attr->prioceiling = prioceiling;
324 res = 0;
325 }
326 }
327 return res;
328 }
329
330 int
331 pthread_mutexattr_setprotocol(pthread_mutexattr_t *attr, int protocol)
332 {
333 int res = EINVAL;
334 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
335 switch (protocol) {
336 case PTHREAD_PRIO_NONE:
337 case PTHREAD_PRIO_INHERIT:
338 case PTHREAD_PRIO_PROTECT:
339 attr->protocol = protocol;
340 res = 0;
341 break;
342 }
343 }
344 return res;
345 }
346
347 int
348 pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *attr, int policy)
349 {
350 int res = EINVAL;
351 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
352 switch (policy) {
353 case _PTHREAD_MUTEX_POLICY_FAIRSHARE:
354 case _PTHREAD_MUTEX_POLICY_FIRSTFIT:
355 attr->policy = policy;
356 res = 0;
357 break;
358 }
359 }
360 return res;
361 }
362
363 int
364 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
365 {
366 int res = EINVAL;
367 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
368 switch (type) {
369 case PTHREAD_MUTEX_NORMAL:
370 case PTHREAD_MUTEX_ERRORCHECK:
371 case PTHREAD_MUTEX_RECURSIVE:
372 //case PTHREAD_MUTEX_DEFAULT:
373 attr->type = type;
374 res = 0;
375 break;
376 }
377 }
378 return res;
379 }
380
381 int
382 pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
383 {
384 int res = EINVAL;
385 #if __DARWIN_UNIX03
386 if (__unix_conforming == 0) {
387 __unix_conforming = 1;
388 }
389 #endif /* __DARWIN_UNIX03 */
390
391 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
392 #if __DARWIN_UNIX03
393 if (( pshared == PTHREAD_PROCESS_PRIVATE) ||
394 (pshared == PTHREAD_PROCESS_SHARED))
395 #else /* __DARWIN_UNIX03 */
396 if ( pshared == PTHREAD_PROCESS_PRIVATE)
397 #endif /* __DARWIN_UNIX03 */
398 {
399 attr->pshared = pshared;
400 res = 0;
401 }
402 }
403 return res;
404 }
405
406 PTHREAD_NOEXPORT PTHREAD_NOINLINE PTHREAD_NORETURN
407 int
408 _pthread_mutex_corruption_abort(_pthread_mutex *mutex)
409 {
410 PTHREAD_ABORT("pthread_mutex corruption: mutex owner changed in the "
411 "middle of lock/unlock");
412 }
413
414
415 /*
416 * Sequence numbers and TID:
417 *
418 * In steady (and uncontended) state, an unlocked mutex will
419 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
420 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
421 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
422 * E=[L5 U5 TID0].
423 *
424 * If a contender comes in after B, the mutex will instead transition to
425 * E=[L6+KE U4 TID0] and then F=[L6+KE U4 TID940]. If a contender comes in after
426 * C, it will transition to F=[L6+KE U4 TID940] directly. In both cases, the
427 * contender will enter the kernel with either mutexwait(U4, TID0) or
428 * mutexwait(U4, TID940). The first owner will unlock the mutex by first
429 * updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
430 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to
431 * signal the next waiter (potentially as a prepost). When the waiter comes out
432 * of the kernel, it will update the owner to I=[L6+KE U5 TID941]. An unlock at
433 * this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
434 *
435 * At various points along these timelines, since the sequence words and TID are
436 * written independently, a thread may get preempted and another thread might
437 * see inconsistent data. In the worst case, another thread may see the TID in
438 * the SWITCHING (-1) state or unlocked (0) state for longer because the owning
439 * thread was preempted.
440 */
441
442 /*
443 * Drop the mutex unlock references from cond_wait or mutex_unlock.
444 */
445 PTHREAD_ALWAYS_INLINE
446 static inline int
447 _pthread_mutex_unlock_updatebits(_pthread_mutex *mutex, uint32_t *flagsp,
448 uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
449 {
450 bool firstfit = (mutex->mtxopts.options.policy ==
451 _PTHREAD_MUTEX_POLICY_FIRSTFIT);
452 uint32_t flags = mutex->mtxopts.value;
453 flags &= ~_PTHREAD_MTX_OPT_NOTIFY; // no notification by default
454
455 mutex_seq *seqaddr;
456 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
457
458 mutex_seq oldseq, newseq;
459 mutex_seq_load(seqaddr, &oldseq);
460
461 uint64_t *tidaddr;
462 MUTEX_GETTID_ADDR(mutex, &tidaddr);
463 uint64_t oldtid, newtid;
464
465 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
466 uint64_t selfid = _pthread_selfid_direct();
467 if (os_atomic_load(tidaddr, relaxed) != selfid) {
468 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, EPERM);
469 return EPERM;
470 } else if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE &&
471 --mutex->mtxopts.options.lock_count) {
472 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
473 if (flagsp != NULL) {
474 *flagsp = flags;
475 }
476 return 0;
477 }
478 }
479
480 bool clearprepost, clearnotify, spurious;
481 do {
482 newseq = oldseq;
483 oldtid = os_atomic_load(tidaddr, relaxed);
484
485 clearprepost = false;
486 clearnotify = false;
487 spurious = false;
488
489 // pending waiters
490 int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
491 if (numwaiters == 0) {
492 // spurious unlock (unlock of unlocked lock)
493 spurious = true;
494 } else {
495 newseq.ugenval += PTHRW_INC;
496
497 if ((oldseq.lgenval & PTHRW_COUNT_MASK) ==
498 (newseq.ugenval & PTHRW_COUNT_MASK)) {
499 // our unlock sequence matches to lock sequence, so if the
500 // CAS is successful, the mutex is unlocked
501
502 /* do not reset Ibit, just K&E */
503 newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
504 clearnotify = true;
505 newtid = 0; // clear owner
506 } else {
507 if (firstfit) {
508 // reset E bit so another can acquire meanwhile
509 newseq.lgenval &= ~PTH_RWL_EBIT;
510 newtid = 0;
511 } else {
512 newtid = PTHREAD_MTX_TID_SWITCHING;
513 }
514 // need to signal others waiting for mutex
515 flags |= _PTHREAD_MTX_OPT_NOTIFY;
516 }
517
518 if (newtid != oldtid) {
519 // We're giving up the mutex one way or the other, so go ahead
520 // and update the owner to 0 so that once the CAS below
521 // succeeds, there is no stale ownership information. If the
522 // CAS of the seqaddr fails, we may loop, but it's still valid
523 // for the owner to be SWITCHING/0
524 if (!os_atomic_cmpxchg(tidaddr, oldtid, newtid, relaxed)) {
525 // we own this mutex, nobody should be updating it except us
526 return _pthread_mutex_corruption_abort(mutex);
527 }
528 }
529 }
530
531 if (clearnotify || spurious) {
532 flags &= ~_PTHREAD_MTX_OPT_NOTIFY;
533 if (firstfit && (newseq.lgenval & PTH_RWL_PBIT)) {
534 clearprepost = true;
535 newseq.lgenval &= ~PTH_RWL_PBIT;
536 }
537 }
538 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, release));
539
540 PTHREAD_TRACE(psynch_mutex_unlock_updatebits, mutex, oldseq.lgenval,
541 newseq.lgenval, oldtid);
542
543 if (clearprepost) {
544 __psynch_cvclrprepost(mutex, newseq.lgenval, newseq.ugenval, 0, 0,
545 newseq.lgenval, flags | _PTHREAD_MTX_OPT_MUTEX);
546 }
547
548 if (mgenp != NULL) {
549 *mgenp = newseq.lgenval;
550 }
551 if (ugenp != NULL) {
552 *ugenp = newseq.ugenval;
553 }
554 if (pmtxp != NULL) {
555 *pmtxp = (uint32_t *)mutex;
556 }
557 if (flagsp != NULL) {
558 *flagsp = flags;
559 }
560
561 return 0;
562 }
563
564 PTHREAD_NOEXPORT PTHREAD_NOINLINE
565 int
566 _pthread_mutex_droplock(_pthread_mutex *mutex, uint32_t *flagsp,
567 uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
568 {
569 return _pthread_mutex_unlock_updatebits(mutex, flagsp, pmtxp, mgenp, ugenp);
570 }
571
572 PTHREAD_ALWAYS_INLINE
573 static inline int
574 _pthread_mutex_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid)
575 {
576 bool firstfit = (mutex->mtxopts.options.policy ==
577 _PTHREAD_MUTEX_POLICY_FIRSTFIT);
578 bool gotlock = true;
579
580 mutex_seq *seqaddr;
581 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
582
583 mutex_seq oldseq, newseq;
584 mutex_seq_load(seqaddr, &oldseq);
585
586 uint64_t *tidaddr;
587 MUTEX_GETTID_ADDR(mutex, &tidaddr);
588 uint64_t oldtid;
589
590 do {
591 newseq = oldseq;
592 oldtid = os_atomic_load(tidaddr, relaxed);
593
594 if (firstfit) {
595 // firstfit locks can have the lock stolen out from under a locker
596 // between the unlock from the kernel and this lock path. When this
597 // happens, we still want to set the K bit before leaving the loop
598 // (or notice if the lock unlocks while we try to update).
599 gotlock = !is_rwl_ebit_set(oldseq.lgenval);
600 } else if ((oldseq.lgenval & (PTH_RWL_KBIT | PTH_RWL_EBIT)) ==
601 (PTH_RWL_KBIT | PTH_RWL_EBIT)) {
602 // bit are already set, just update the owner tidaddr
603 break;
604 }
605
606 newseq.lgenval |= PTH_RWL_KBIT | PTH_RWL_EBIT;
607 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
608 relaxed));
609
610 if (gotlock) {
611 if (!os_atomic_cmpxchg(tidaddr, oldtid, selfid, relaxed)) {
612 // we own this mutex, nobody should be updating it except us
613 return _pthread_mutex_corruption_abort(mutex);
614 }
615 }
616
617 PTHREAD_TRACE(psynch_mutex_lock_updatebits, mutex, oldseq.lgenval,
618 newseq.lgenval, oldtid);
619
620 // failing to take the lock in firstfit returns 1 to force the caller
621 // to wait in the kernel
622 return gotlock ? 0 : 1;
623 }
624
625 PTHREAD_NOINLINE
626 static int
627 _pthread_mutex_markprepost(_pthread_mutex *mutex, uint32_t updateval)
628 {
629 mutex_seq *seqaddr;
630 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
631
632 mutex_seq oldseq, newseq;
633 mutex_seq_load(seqaddr, &oldseq);
634
635 bool clearprepost;
636 do {
637 clearprepost = false;
638 newseq = oldseq;
639
640 /* update the bits */
641 if ((oldseq.lgenval & PTHRW_COUNT_MASK) ==
642 (oldseq.ugenval & PTHRW_COUNT_MASK)) {
643 clearprepost = true;
644 newseq.lgenval &= ~PTH_RWL_PBIT;
645 } else {
646 newseq.lgenval |= PTH_RWL_PBIT;
647 }
648 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, relaxed));
649
650 if (clearprepost) {
651 __psynch_cvclrprepost(mutex, newseq.lgenval, newseq.ugenval, 0, 0,
652 newseq.lgenval, mutex->mtxopts.value | _PTHREAD_MTX_OPT_MUTEX);
653 }
654
655 return 0;
656 }
657
658 PTHREAD_NOINLINE
659 static int
660 _pthread_mutex_check_init_slow(pthread_mutex_t *omutex)
661 {
662 int res = EINVAL;
663 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
664
665 if (_pthread_mutex_check_signature_init(mutex)) {
666 _PTHREAD_LOCK(mutex->lock);
667 if (_pthread_mutex_check_signature_init(mutex)) {
668 // initialize a statically initialized mutex to provide
669 // compatibility for misbehaving applications.
670 // (unlock should not be the first operation on a mutex)
671 res = _pthread_mutex_init(mutex, NULL, (mutex->sig & 0xf));
672 } else if (_pthread_mutex_check_signature(mutex)) {
673 res = 0;
674 }
675 _PTHREAD_UNLOCK(mutex->lock);
676 } else if (_pthread_mutex_check_signature(mutex)) {
677 res = 0;
678 }
679 if (res != 0) {
680 PLOCKSTAT_MUTEX_ERROR(omutex, res);
681 }
682 return res;
683 }
684
685 PTHREAD_ALWAYS_INLINE
686 static inline int
687 _pthread_mutex_check_init(pthread_mutex_t *omutex)
688 {
689 int res = 0;
690 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
691
692 if (!_pthread_mutex_check_signature(mutex)) {
693 return _pthread_mutex_check_init_slow(omutex);
694 }
695 return res;
696 }
697
698 PTHREAD_NOINLINE
699 static int
700 _pthread_mutex_lock_wait(pthread_mutex_t *omutex, mutex_seq newseq,
701 uint64_t oldtid)
702 {
703 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
704
705 uint64_t *tidaddr;
706 MUTEX_GETTID_ADDR(mutex, &tidaddr);
707 uint64_t selfid = _pthread_selfid_direct();
708
709 PLOCKSTAT_MUTEX_BLOCK(omutex);
710 do {
711 uint32_t updateval;
712 do {
713 updateval = __psynch_mutexwait(omutex, newseq.lgenval,
714 newseq.ugenval, oldtid, mutex->mtxopts.value);
715 oldtid = os_atomic_load(tidaddr, relaxed);
716 } while (updateval == (uint32_t)-1);
717
718 // returns 0 on succesful update; in firstfit it may fail with 1
719 } while (_pthread_mutex_lock_updatebits(mutex, selfid) == 1);
720 PLOCKSTAT_MUTEX_BLOCKED(omutex, BLOCK_SUCCESS_PLOCKSTAT);
721
722 return 0;
723 }
724
725 PTHREAD_NOEXPORT PTHREAD_NOINLINE
726 int
727 _pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock)
728 {
729 int res, recursive = 0;
730 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
731
732 res = _pthread_mutex_check_init(omutex);
733 if (res != 0) return res;
734
735 mutex_seq *seqaddr;
736 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
737
738 mutex_seq oldseq, newseq;
739 mutex_seq_load(seqaddr, &oldseq);
740
741 uint64_t *tidaddr;
742 MUTEX_GETTID_ADDR(mutex, &tidaddr);
743 uint64_t oldtid, selfid = _pthread_selfid_direct();
744
745 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
746 if (os_atomic_load(tidaddr, relaxed) == selfid) {
747 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
748 if (mutex->mtxopts.options.lock_count < USHRT_MAX) {
749 mutex->mtxopts.options.lock_count++;
750 recursive = 1;
751 res = 0;
752 } else {
753 res = EAGAIN;
754 }
755 } else if (trylock) { /* PTHREAD_MUTEX_ERRORCHECK */
756 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
757 // return EDEADLK on a deadlock, it should return EBUSY.
758 res = EBUSY;
759 } else { /* PTHREAD_MUTEX_ERRORCHECK */
760 res = EDEADLK;
761 }
762 goto out;
763 }
764 }
765
766 bool gotlock;
767 do {
768 newseq = oldseq;
769 oldtid = os_atomic_load(tidaddr, relaxed);
770
771 gotlock = ((oldseq.lgenval & PTH_RWL_EBIT) == 0);
772
773 if (trylock && !gotlock) {
774 // A trylock on a held lock will fail immediately. But since
775 // we did not load the sequence words atomically, perform a
776 // no-op CAS64 to ensure that nobody has unlocked concurrently.
777 } else {
778 // Increment the lock sequence number and force the lock into E+K
779 // mode, whether "gotlock" is true or not.
780 newseq.lgenval += PTHRW_INC;
781 newseq.lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
782 }
783 } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
784
785 PTHREAD_TRACE(psynch_mutex_lock_updatebits, omutex, oldseq.lgenval,
786 newseq.lgenval, 0);
787
788 if (gotlock) {
789 os_atomic_store(tidaddr, selfid, relaxed);
790 res = 0;
791 PTHREAD_TRACE(psynch_mutex_ulock, omutex, newseq.lgenval,
792 newseq.ugenval, selfid);
793 } else if (trylock) {
794 res = EBUSY;
795 PTHREAD_TRACE(psynch_mutex_utrylock_failed, omutex, newseq.lgenval,
796 newseq.ugenval, oldtid);
797 } else {
798 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_START, omutex,
799 newseq.lgenval, newseq.ugenval, oldtid);
800 res = _pthread_mutex_lock_wait(omutex, newseq, oldtid);
801 PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_END, omutex,
802 newseq.lgenval, newseq.ugenval, oldtid);
803 }
804
805 if (res == 0 && mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
806 mutex->mtxopts.options.lock_count = 1;
807 }
808
809 out:
810 #if PLOCKSTAT
811 if (res == 0) {
812 PLOCKSTAT_MUTEX_ACQUIRE(omutex, recursive, 0);
813 } else {
814 PLOCKSTAT_MUTEX_ERROR(omutex, res);
815 }
816 #endif
817
818 return res;
819 }
820
821 PTHREAD_ALWAYS_INLINE
822 static inline int
823 _pthread_mutex_lock(pthread_mutex_t *omutex, bool trylock)
824 {
825 #if ENABLE_USERSPACE_TRACE
826 return _pthread_mutex_lock_slow(omutex, trylock);
827 #elif PLOCKSTAT
828 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
829 return _pthread_mutex_lock_slow(omutex, trylock);
830 }
831 #endif
832
833 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
834 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
835 return _pthread_mutex_lock_slow(omutex, trylock);
836 }
837
838 uint64_t *tidaddr;
839 MUTEX_GETTID_ADDR(mutex, &tidaddr);
840 uint64_t selfid = _pthread_selfid_direct();
841
842 mutex_seq *seqaddr;
843 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
844
845 mutex_seq oldseq, newseq;
846 mutex_seq_load(seqaddr, &oldseq);
847
848 if (os_unlikely(oldseq.lgenval & PTH_RWL_EBIT)) {
849 return _pthread_mutex_lock_slow(omutex, trylock);
850 }
851
852 bool gotlock;
853 do {
854 newseq = oldseq;
855
856 gotlock = ((oldseq.lgenval & PTH_RWL_EBIT) == 0);
857
858 if (trylock && !gotlock) {
859 // A trylock on a held lock will fail immediately. But since
860 // we did not load the sequence words atomically, perform a
861 // no-op CAS64 to ensure that nobody has unlocked concurrently.
862 } else if (os_likely(gotlock)) {
863 // Increment the lock sequence number and force the lock into E+K
864 // mode, whether "gotlock" is true or not.
865 newseq.lgenval += PTHRW_INC;
866 newseq.lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
867 } else {
868 return _pthread_mutex_lock_slow(omutex, trylock);
869 }
870 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
871 acquire)));
872
873 if (os_likely(gotlock)) {
874 os_atomic_store(tidaddr, selfid, relaxed);
875 return 0;
876 } else if (trylock) {
877 return EBUSY;
878 } else {
879 __builtin_trap();
880 }
881 }
882
883 PTHREAD_NOEXPORT_VARIANT
884 int
885 pthread_mutex_lock(pthread_mutex_t *mutex)
886 {
887 return _pthread_mutex_lock(mutex, false);
888 }
889
890 PTHREAD_NOEXPORT_VARIANT
891 int
892 pthread_mutex_trylock(pthread_mutex_t *mutex)
893 {
894 return _pthread_mutex_lock(mutex, true);
895 }
896
897 /*
898 * Unlock a mutex.
899 * TODO: Priority inheritance stuff
900 */
901
902 PTHREAD_NOINLINE
903 static int
904 _pthread_mutex_unlock_drop(pthread_mutex_t *omutex, mutex_seq newseq,
905 uint32_t flags)
906 {
907 int res;
908 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
909
910 uint32_t updateval;
911
912 uint64_t *tidaddr;
913 MUTEX_GETTID_ADDR(mutex, &tidaddr);
914
915 PTHREAD_TRACE(psynch_mutex_uunlock | DBG_FUNC_START, omutex, newseq.lgenval,
916 newseq.ugenval, os_atomic_load(tidaddr, relaxed));
917
918 updateval = __psynch_mutexdrop(omutex, newseq.lgenval, newseq.ugenval,
919 os_atomic_load(tidaddr, relaxed), flags);
920
921 PTHREAD_TRACE(psynch_mutex_uunlock | DBG_FUNC_END, omutex, updateval, 0, 0);
922
923 if (updateval == (uint32_t)-1) {
924 res = errno;
925
926 if (res == EINTR) {
927 res = 0;
928 }
929 if (res != 0) {
930 PTHREAD_ABORT("__psynch_mutexdrop failed with error %d", res);
931 }
932 return res;
933 } else if ((mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT)
934 && (updateval & PTH_RWL_PBIT)) {
935 return _pthread_mutex_markprepost(mutex, updateval);
936 }
937
938 return 0;
939 }
940
941 PTHREAD_NOEXPORT PTHREAD_NOINLINE
942 int
943 _pthread_mutex_unlock_slow(pthread_mutex_t *omutex)
944 {
945 int res;
946 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
947 mutex_seq newseq;
948 uint32_t flags;
949
950 // Initialize static mutexes for compatibility with misbehaving
951 // applications (unlock should not be the first operation on a mutex).
952 res = _pthread_mutex_check_init(omutex);
953 if (res != 0) return res;
954
955 res = _pthread_mutex_unlock_updatebits(mutex, &flags, NULL, &newseq.lgenval,
956 &newseq.ugenval);
957 if (res != 0) return res;
958
959 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) != 0) {
960 return _pthread_mutex_unlock_drop(omutex, newseq, flags);
961 } else {
962 uint64_t *tidaddr;
963 MUTEX_GETTID_ADDR(mutex, &tidaddr);
964 PTHREAD_TRACE(psynch_mutex_uunlock, omutex, newseq.lgenval,
965 newseq.ugenval, os_atomic_load(tidaddr, relaxed));
966 }
967
968 return 0;
969 }
970
971 PTHREAD_NOEXPORT_VARIANT
972 int
973 pthread_mutex_unlock(pthread_mutex_t *omutex)
974 {
975 #if ENABLE_USERSPACE_TRACE
976 return _pthread_mutex_unlock_slow(omutex);
977 #elif PLOCKSTAT
978 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
979 return _pthread_mutex_unlock_slow(omutex);
980 }
981 #endif
982 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
983 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
984 return _pthread_mutex_unlock_slow(omutex);
985 }
986
987 uint64_t *tidaddr;
988 MUTEX_GETTID_ADDR(mutex, &tidaddr);
989
990 mutex_seq *seqaddr;
991 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
992
993 mutex_seq oldseq, newseq;
994 mutex_seq_load(seqaddr, &oldseq);
995
996 int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
997 if (os_unlikely(numwaiters == 0)) {
998 // spurious unlock (unlock of unlocked lock)
999 return 0;
1000 }
1001
1002 // We're giving up the mutex one way or the other, so go ahead and
1003 // update the owner to 0 so that once the CAS below succeeds, there
1004 // is no stale ownership information. If the CAS of the seqaddr
1005 // fails, we may loop, but it's still valid for the owner to be
1006 // SWITCHING/0
1007 os_atomic_store(tidaddr, 0, relaxed);
1008
1009 do {
1010 newseq = oldseq;
1011 newseq.ugenval += PTHRW_INC;
1012
1013 if (os_likely((oldseq.lgenval & PTHRW_COUNT_MASK) ==
1014 (newseq.ugenval & PTHRW_COUNT_MASK))) {
1015 // our unlock sequence matches to lock sequence, so if the
1016 // CAS is successful, the mutex is unlocked
1017
1018 // do not reset Ibit, just K&E
1019 newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
1020 } else {
1021 return _pthread_mutex_unlock_slow(omutex);
1022 }
1023 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
1024 release)));
1025
1026 return 0;
1027 }
1028
1029
1030 PTHREAD_ALWAYS_INLINE
1031 static inline int
1032 _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
1033 uint32_t static_type)
1034 {
1035 mutex->mtxopts.value = 0;
1036 mutex->mtxopts.options.mutex = 1;
1037 if (attr) {
1038 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1039 return EINVAL;
1040 }
1041 mutex->prioceiling = (int16_t)attr->prioceiling;
1042 mutex->mtxopts.options.protocol = attr->protocol;
1043 mutex->mtxopts.options.policy = attr->policy;
1044 mutex->mtxopts.options.type = attr->type;
1045 mutex->mtxopts.options.pshared = attr->pshared;
1046 } else {
1047 switch (static_type) {
1048 case 1:
1049 mutex->mtxopts.options.type = PTHREAD_MUTEX_ERRORCHECK;
1050 break;
1051 case 2:
1052 mutex->mtxopts.options.type = PTHREAD_MUTEX_RECURSIVE;
1053 break;
1054 case 3:
1055 /* firstfit fall thru */
1056 case 7:
1057 mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
1058 break;
1059 default:
1060 return EINVAL;
1061 }
1062
1063 mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
1064 mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
1065 if (static_type != 3) {
1066 mutex->mtxopts.options.policy = __pthread_mutex_default_policy;
1067 } else {
1068 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FIRSTFIT;
1069 }
1070 mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
1071 }
1072 mutex->priority = 0;
1073
1074 mutex_seq *seqaddr;
1075 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1076
1077 uint64_t *tidaddr;
1078 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1079
1080 #if PTHREAD_MUTEX_INIT_UNUSED
1081 if ((uint32_t*)tidaddr != mutex->m_tid) {
1082 mutex->mtxopts.options.misalign = 1;
1083 __builtin_memset(mutex->m_tid, 0xff, sizeof(mutex->m_tid));
1084 }
1085 __builtin_memset(mutex->m_mis, 0xff, sizeof(mutex->m_mis));
1086 #endif // PTHREAD_MUTEX_INIT_UNUSED
1087 *tidaddr = 0;
1088 *seqaddr = (mutex_seq){ };
1089
1090 long sig = _PTHREAD_MUTEX_SIG;
1091 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL &&
1092 mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FAIRSHARE) {
1093 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath
1094 sig = _PTHREAD_MUTEX_SIG_fast;
1095 }
1096
1097 #if PTHREAD_MUTEX_INIT_UNUSED
1098 // For detecting copied mutexes and smashes during debugging
1099 uint32_t sig32 = (uint32_t)sig;
1100 #if defined(__LP64__)
1101 uintptr_t guard = ~(uintptr_t)mutex; // use ~ to hide from leaks
1102 __builtin_memcpy(mutex->_reserved, &guard, sizeof(guard));
1103 mutex->_reserved[2] = sig32;
1104 mutex->_reserved[3] = sig32;
1105 mutex->_pad = sig32;
1106 #else
1107 mutex->_reserved[0] = sig32;
1108 #endif
1109 #endif // PTHREAD_MUTEX_INIT_UNUSED
1110
1111 // Ensure all contents are properly set before setting signature.
1112 #if defined(__LP64__)
1113 // For binary compatibility reasons we cannot require natural alignment of
1114 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
1115 uint32_t *sig32_ptr = (uint32_t*)&mutex->sig;
1116 uint32_t *sig32_val = (uint32_t*)&sig;
1117 *(sig32_ptr + 1) = *(sig32_val + 1);
1118 os_atomic_store(sig32_ptr, *sig32_val, release);
1119 #else
1120 os_atomic_store2o(mutex, sig, sig, release);
1121 #endif
1122
1123 return 0;
1124 }
1125
1126 PTHREAD_NOEXPORT_VARIANT
1127 int
1128 pthread_mutex_destroy(pthread_mutex_t *omutex)
1129 {
1130 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
1131
1132 int res = EINVAL;
1133
1134 _PTHREAD_LOCK(mutex->lock);
1135 if (_pthread_mutex_check_signature(mutex)) {
1136 mutex_seq *seqaddr;
1137 MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
1138
1139 mutex_seq seq;
1140 mutex_seq_load(seqaddr, &seq);
1141
1142 uint64_t *tidaddr;
1143 MUTEX_GETTID_ADDR(mutex, &tidaddr);
1144
1145 if ((os_atomic_load(tidaddr, relaxed) == 0) &&
1146 (seq.lgenval & PTHRW_COUNT_MASK) ==
1147 (seq.ugenval & PTHRW_COUNT_MASK)) {
1148 mutex->sig = _PTHREAD_NO_SIG;
1149 res = 0;
1150 } else {
1151 res = EBUSY;
1152 }
1153 } else if (_pthread_mutex_check_signature_init(mutex)) {
1154 mutex->sig = _PTHREAD_NO_SIG;
1155 res = 0;
1156 }
1157 _PTHREAD_UNLOCK(mutex->lock);
1158
1159 return res;
1160 }
1161
1162 #endif /* !BUILDING_VARIANT ] */
1163
1164 /*
1165 * Destroy a mutex attribute structure.
1166 */
1167 int
1168 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
1169 {
1170 #if __DARWIN_UNIX03
1171 if (__unix_conforming == 0) {
1172 __unix_conforming = 1;
1173 }
1174 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
1175 return EINVAL;
1176 }
1177 #endif /* __DARWIN_UNIX03 */
1178
1179 attr->sig = _PTHREAD_NO_SIG;
1180 return 0;
1181 }
1182