]> git.saurik.com Git - apple/libc.git/blame_incremental - pthreads/pthread_mutex.c
Libc-825.24.tar.gz
[apple/libc.git] / pthreads / pthread_mutex.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44/*
45 * MkLinux
46 */
47
48/*
49 * POSIX Pthread Library
50 * -- Mutex variable support
51 */
52
53#include "pthread_internals.h"
54
55#ifdef PLOCKSTAT
56#include "plockstat.h"
57#else /* !PLOCKSTAT */
58#define PLOCKSTAT_MUTEX_SPIN(x)
59#define PLOCKSTAT_MUTEX_SPUN(x, y, z)
60#define PLOCKSTAT_MUTEX_ERROR(x, y)
61#define PLOCKSTAT_MUTEX_BLOCK(x)
62#define PLOCKSTAT_MUTEX_BLOCKED(x, y)
63#define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
64#define PLOCKSTAT_MUTEX_RELEASE(x, y)
65#endif /* PLOCKSTAT */
66
67extern int __unix_conforming;
68extern int __unix_conforming;
69
70#ifndef BUILDING_VARIANT
71__private_extern__ int usenew_mtximpl = 1;
72static void __pthread_mutex_set_signature(npthread_mutex_t * mutex);
73int __mtx_markprepost(npthread_mutex_t *mutex, uint32_t oupdateval, int firstfit);
74static int _pthread_mutex_destroy_locked(pthread_mutex_t *omutex);
75#else /* BUILDING_VARIANT */
76extern int usenew_mtximpl;
77#endif /* BUILDING_VARIANT */
78
79
80#ifdef NOTNEEDED
81#define USE_COMPAGE 1
82extern int _commpage_pthread_mutex_lock(uint32_t * lvalp, int flags, uint64_t mtid, uint32_t mask, uint64_t * tidp, int *sysret);
83#endif
84
85#include <machine/cpu_capabilities.h>
86
87int _pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr, uint32_t static_type);
88
89
90#if defined(__LP64__)
91#define MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr) \
92{ \
93 if (mutex->mtxopts.options.misalign != 0) { \
94 lseqaddr = &mutex->m_seq[0]; \
95 useqaddr = &mutex->m_seq[1]; \
96 } else { \
97 lseqaddr = &mutex->m_seq[1]; \
98 useqaddr = &mutex->m_seq[2]; \
99 } \
100}
101#else /* __LP64__ */
102#define MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr) \
103{ \
104 if (mutex->mtxopts.options.misalign != 0) { \
105 lseqaddr = &mutex->m_seq[1]; \
106 useqaddr = &mutex->m_seq[2]; \
107 }else { \
108 lseqaddr = &mutex->m_seq[0]; \
109 useqaddr = &mutex->m_seq[1]; \
110 } \
111}
112#endif /* __LP64__ */
113
114#define _KSYN_TRACE_ 0
115
116#if _KSYN_TRACE_
117/* The Function qualifiers */
118#define DBG_FUNC_START 1
119#define DBG_FUNC_END 2
120#define DBG_FUNC_NONE 0
121
122int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
123
124#define _KSYN_TRACE_UM_LOCK 0x9000060
125#define _KSYN_TRACE_UM_UNLOCK 0x9000064
126#define _KSYN_TRACE_UM_MHOLD 0x9000068
127#define _KSYN_TRACE_UM_MDROP 0x900006c
128#define _KSYN_TRACE_UM_MUBITS 0x900007c
129#define _KSYN_TRACE_UM_MARKPP 0x90000a8
130
131#endif /* _KSYN_TRACE_ */
132
133#ifndef BUILDING_VARIANT /* [ */
134
135#define BLOCK_FAIL_PLOCKSTAT 0
136#define BLOCK_SUCCESS_PLOCKSTAT 1
137
138#ifdef PR_5243343
139/* 5243343 - temporary hack to detect if we are running the conformance test */
140extern int PR_5243343_flag;
141#endif /* PR_5243343 */
142
143/* This function is never called and exists to provide never-fired dtrace
144 * probes so that user d scripts don't get errors.
145 */
146__private_extern__ __attribute__((used)) void
147_plockstat_never_fired(void)
148{
149 PLOCKSTAT_MUTEX_SPIN(NULL);
150 PLOCKSTAT_MUTEX_SPUN(NULL, 0, 0);
151}
152
153
154/*
155 * Initialize a mutex variable, possibly with additional attributes.
156 * Public interface - so don't trust the lock - initialize it first.
157 */
158int
159pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
160{
161#if 0
162 /* conformance tests depend on not having this behavior */
163 /* The test for this behavior is optional */
164 if (mutex->sig == _PTHREAD_MUTEX_SIG)
165 return EBUSY;
166#endif
167 LOCK_INIT(mutex->lock);
168 return (_pthread_mutex_init(mutex, attr, 0x7));
169}
170
171/*
172 * Fetch the priority ceiling value from a mutex variable.
173 * Note: written as a 'helper' function to hide implementation details.
174 */
175int
176pthread_mutex_getprioceiling(const pthread_mutex_t *mutex,
177 int *prioceiling)
178{
179 int res;
180
181 LOCK(mutex->lock);
182 if (mutex->sig == _PTHREAD_MUTEX_SIG)
183 {
184 *prioceiling = mutex->prioceiling;
185 res = 0;
186 } else
187 res = EINVAL; /* Not an initialized 'attribute' structure */
188 UNLOCK(mutex->lock);
189 return (res);
190}
191
192/*
193 * Set the priority ceiling for a mutex.
194 * Note: written as a 'helper' function to hide implementation details.
195 */
196int
197pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
198 int prioceiling,
199 int *old_prioceiling)
200{
201 int res;
202
203 LOCK(mutex->lock);
204 if (mutex->sig == _PTHREAD_MUTEX_SIG)
205 {
206 if ((prioceiling >= -999) ||
207 (prioceiling <= 999))
208 {
209 *old_prioceiling = mutex->prioceiling;
210 mutex->prioceiling = prioceiling;
211 res = 0;
212 } else
213 res = EINVAL; /* Invalid parameter */
214 } else
215 res = EINVAL; /* Not an initialized 'attribute' structure */
216 UNLOCK(mutex->lock);
217 return (res);
218}
219
220/*
221 * Get the priority ceiling value from a mutex attribute structure.
222 * Note: written as a 'helper' function to hide implementation details.
223 */
224int
225pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr,
226 int *prioceiling)
227{
228 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
229 {
230 *prioceiling = attr->prioceiling;
231 return (0);
232 } else
233 {
234 return (EINVAL); /* Not an initialized 'attribute' structure */
235 }
236}
237
238/*
239 * Get the mutex 'protocol' value from a mutex attribute structure.
240 * Note: written as a 'helper' function to hide implementation details.
241 */
242int
243pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr,
244 int *protocol)
245{
246 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
247 {
248 *protocol = attr->protocol;
249 return (0);
250 } else
251 {
252 return (EINVAL); /* Not an initialized 'attribute' structure */
253 }
254}
255/*
256 * Get the mutex 'type' value from a mutex attribute structure.
257 * Note: written as a 'helper' function to hide implementation details.
258 */
259int
260pthread_mutexattr_gettype(const pthread_mutexattr_t *attr,
261 int *type)
262{
263 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
264 {
265 *type = attr->type;
266 return (0);
267 } else
268 {
269 return (EINVAL); /* Not an initialized 'attribute' structure */
270 }
271}
272
273/*
274 *
275 */
276int
277pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared)
278{
279 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
280 {
281 *pshared = (int)attr->pshared;
282 return (0);
283 } else
284 {
285 return (EINVAL); /* Not an initialized 'attribute' structure */
286 }
287}
288
289/*
290 * Initialize a mutex attribute structure to system defaults.
291 */
292int
293pthread_mutexattr_init(pthread_mutexattr_t *attr)
294{
295 attr->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
296 attr->protocol = _PTHREAD_DEFAULT_PROTOCOL;
297 attr->policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
298 attr->type = PTHREAD_MUTEX_DEFAULT;
299 attr->sig = _PTHREAD_MUTEX_ATTR_SIG;
300 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
301 return (0);
302}
303
304/*
305 * Set the priority ceiling value in a mutex attribute structure.
306 * Note: written as a 'helper' function to hide implementation details.
307 */
308int
309pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr,
310 int prioceiling)
311{
312 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
313 {
314 if ((prioceiling >= -999) ||
315 (prioceiling <= 999))
316 {
317 attr->prioceiling = prioceiling;
318 return (0);
319 } else
320 {
321 return (EINVAL); /* Invalid parameter */
322 }
323 } else
324 {
325 return (EINVAL); /* Not an initialized 'attribute' structure */
326 }
327}
328
329/*
330 * Set the mutex 'protocol' value in a mutex attribute structure.
331 * Note: written as a 'helper' function to hide implementation details.
332 */
333int
334pthread_mutexattr_setprotocol(pthread_mutexattr_t *attr,
335 int protocol)
336{
337 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
338 {
339 if ((protocol == PTHREAD_PRIO_NONE) ||
340 (protocol == PTHREAD_PRIO_INHERIT) ||
341 (protocol == PTHREAD_PRIO_PROTECT))
342 {
343 attr->protocol = protocol;
344 return (0);
345 } else
346 {
347 return (EINVAL); /* Invalid parameter */
348 }
349 } else
350 {
351 return (EINVAL); /* Not an initialized 'attribute' structure */
352 }
353}
354
355int
356pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *attr,
357 int policy)
358{
359 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
360 {
361 if (
362 (policy == _PTHREAD_MUTEX_POLICY_FAIRSHARE) ||
363 (policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT)
364#if NOTYET
365 ||
366 (policy == _PTHREAD_MUTEX_POLICY_REALTIME) ||
367 (policy == _PTHREAD_MUTEX_POLICY_ADAPTIVE) ||
368 (policy == _PTHREAD_MUTEX_POLICY_PRIPROTECT) ||
369 (policy == _PTHREAD_MUTEX_POLICY_PRIINHERIT)
370#endif /* NOTYET */
371 )
372 {
373 attr->policy = policy;
374 return (0);
375 } else
376 {
377 return (EINVAL); /* Invalid parameter */
378 }
379 } else
380 {
381 return (EINVAL); /* Not an initialized 'attribute' structure */
382 }
383}
384
385/*
386 * Set the mutex 'type' value in a mutex attribute structure.
387 * Note: written as a 'helper' function to hide implementation details.
388 */
389int
390pthread_mutexattr_settype(pthread_mutexattr_t *attr,
391 int type)
392{
393 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
394 {
395 if ((type == PTHREAD_MUTEX_NORMAL) ||
396 (type == PTHREAD_MUTEX_ERRORCHECK) ||
397 (type == PTHREAD_MUTEX_RECURSIVE) ||
398 (type == PTHREAD_MUTEX_DEFAULT))
399 {
400 attr->type = type;
401 return (0);
402 } else
403 {
404 return (EINVAL); /* Invalid parameter */
405 }
406 } else
407 {
408 return (EINVAL); /* Not an initialized 'attribute' structure */
409 }
410}
411
412
413int mutex_try_lock(int *x) {
414 return _spin_lock_try((pthread_lock_t *)x);
415}
416
417void mutex_wait_lock(int *x) {
418 for (;;) {
419 if( _spin_lock_try((pthread_lock_t *)x)) {
420 return;
421 }
422 swtch_pri(0);
423 }
424}
425
426void
427cthread_yield(void)
428{
429 sched_yield();
430}
431
432void
433pthread_yield_np (void)
434{
435 sched_yield();
436}
437
438
439/*
440 * Temp: till pshared is fixed correctly
441 */
442int
443pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
444{
445#if __DARWIN_UNIX03
446 if (__unix_conforming == 0)
447 __unix_conforming = 1;
448#endif /* __DARWIN_UNIX03 */
449
450 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
451 {
452#if __DARWIN_UNIX03
453 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
454#else /* __DARWIN_UNIX03 */
455 if ( pshared == PTHREAD_PROCESS_PRIVATE)
456#endif /* __DARWIN_UNIX03 */
457 {
458 attr->pshared = pshared;
459 return (0);
460 } else {
461 return (EINVAL); /* Invalid parameter */
462 }
463 } else
464 {
465 return (EINVAL); /* Not an initialized 'attribute' structure */
466 }
467}
468
469/*
470 * Drop the mutex unlock references(from cond wait or mutex_unlock().
471 *
472 */
473__private_extern__ int
474__mtx_droplock(npthread_mutex_t * mutex, uint32_t diffgen, uint32_t * flagsp, uint32_t ** pmtxp, uint32_t * mgenp, uint32_t * ugenp)
475{
476 pthread_t self;
477 uint64_t selfid, resettid;
478 int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
479 uint32_t lgenval, ugenval, nlval, ulval, morewaiters=0, flags;
480 volatile uint32_t * lseqaddr, *useqaddr;
481 uint64_t oldval64, newval64;
482 int numwaiters=0, clearprepost = 0;
483
484#if _KSYN_TRACE_
485 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_START, (uint32_t)mutex, diffgen, 0, 0, 0);
486#endif
487 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
488
489
490 flags = mutex->mtxopts.value;
491 flags &= ~_PTHREAD_MTX_OPT_NOTIFY; /* no notification by default */
492
493
494 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL)
495 {
496 self = pthread_self();
497 (void) pthread_threadid_np(self, &selfid);
498
499 if (mutex->m_tid != selfid)
500 {
501 //LIBC_ABORT("dropping recur or error mutex not owned by the thread\n");
502 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, EPERM);
503 return(EPERM);
504 } else if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE &&
505 --mutex->mtxopts.options.lock_count)
506 {
507 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
508 goto out;
509 }
510 }
511
512
513retry:
514 lgenval = *lseqaddr;
515 ugenval = *useqaddr;
516
517 clearprepost = 0;
518
519 numwaiters = diff_genseq((lgenval & PTHRW_COUNT_MASK),(ugenval & PTHRW_COUNT_MASK)); /* pendig waiters */
520
521 if (numwaiters == 0) {
522 /* spurious unlocks, do not touch tid */
523 oldval64 = (((uint64_t)ugenval) << 32);
524 oldval64 |= lgenval;
525 if ((firstfit != 0) && ((lgenval & PTH_RWL_PBIT) != 0)) {
526 clearprepost = 1;
527 lgenval &= ~PTH_RWL_PBIT;
528 newval64 = (((uint64_t)ugenval) << 32);
529 newval64 |= lgenval;
530 } else
531 newval64 = oldval64;
532 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
533 goto retry;
534 /* validated L & U to be same, this is spurious unlock */
535 flags &= ~_PTHREAD_MTX_OPT_NOTIFY;
536 if (clearprepost == 1)
537 __psynch_cvclrprepost(mutex, lgenval, ugenval, 0, 0, lgenval, (flags | _PTHREAD_MTX_OPT_MUTEX));
538
539 goto out;
540 }
541
542 if (numwaiters < diffgen) {
543#if _KSYN_TRACE_
544 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_NONE, (uint32_t)mutex, numwaiters, lgenval, ugenval, 0);
545#endif
546 /* cannot drop more than existing number of waiters */
547 diffgen = numwaiters;
548 }
549
550 oldval64 = (((uint64_t)ugenval) << 32);
551 oldval64 |= lgenval;
552 ulval = ugenval + diffgen;
553 nlval = lgenval;
554
555 if ((lgenval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) {
556 /* do not reset Ibit, just K&E */
557 nlval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
558 flags &= ~_PTHREAD_MTX_OPT_NOTIFY;
559 if ((firstfit != 0) && ((lgenval & PTH_RWL_PBIT) != 0)) {
560 clearprepost = 1;
561 nlval &= ~PTH_RWL_PBIT;
562 }
563 } else {
564 /* need to signal others waiting for mutex */
565 morewaiters = 1;
566 flags |= _PTHREAD_MTX_OPT_NOTIFY;
567 }
568
569 if (((nlval & PTH_RWL_EBIT) != 0) && (firstfit != 0)) {
570 nlval &= ~PTH_RWL_EBIT; /* reset Ebit so another can acquire meanwhile */
571 }
572
573 newval64 = (((uint64_t)ulval) << 32);
574 newval64 |= nlval;
575
576 resettid = mutex->m_tid;
577
578 if ((lgenval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK))
579 mutex->m_tid = 0;
580 else if (firstfit == 0)
581 mutex->m_tid = PTHREAD_MTX_TID_SWITCHING;
582
583 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE) {
584 mutex->m_tid = resettid;
585 goto retry;
586 }
587
588
589#if _KSYN_TRACE_
590 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_NONE, (uint32_t)mutex, 2, lgenval, ugenval, 0);
591 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_NONE, (uint32_t)mutex, 2, nlval, ulval, 0);
592#endif
593
594 if (clearprepost != 0) {
595 __psynch_cvclrprepost(mutex, nlval, ulval, 0, 0, nlval, (flags | _PTHREAD_MTX_OPT_MUTEX));
596 }
597
598 if (mgenp != NULL)
599 *mgenp = nlval;
600 if (ugenp != NULL)
601 *ugenp = ulval;
602#if USE_COMPAGE
603 if (pmtxp != NULL)
604 *pmtxp = lseqaddr;
605#else
606 if (pmtxp != NULL)
607 *pmtxp = (uint32_t *)mutex;
608#endif
609
610out:
611 if (flagsp != NULL)
612 *flagsp = flags;
613
614#if _KSYN_TRACE_
615 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_END, (uint32_t)mutex, flags, 0, 0, 0);
616#endif
617 return(0);
618}
619
620int
621__mtx_updatebits(npthread_mutex_t *mutex, uint32_t oupdateval, int firstfit, int fromcond, uint64_t selfid)
622{
623 uint32_t updateval = oupdateval;
624#if !USE_COMPAGE
625 pthread_mutex_t * omutex = (pthread_mutex_t *)mutex;
626#endif
627 int isebit = 0;
628 uint32_t lgenval, ugenval, nval, uval, bits;
629 volatile uint32_t * lseqaddr, *useqaddr;
630 uint64_t oldval64, newval64;
631
632 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
633
634#if _KSYN_TRACE_
635 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_START, (uint32_t)mutex, oupdateval, firstfit, fromcond, 0);
636#endif
637
638retry:
639 lgenval = *lseqaddr;
640 ugenval = *useqaddr;
641 bits = updateval & PTHRW_BIT_MASK;
642
643#if _KSYN_TRACE_
644 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 1, lgenval, ugenval, 0);
645#endif
646
647
648 if ((updateval & PTH_RWL_MTX_WAIT) != 0) {
649 lgenval = (updateval & PTHRW_COUNT_MASK) | (lgenval & PTHRW_BIT_MASK);
650 if (fromcond == 0) {
651 /* if from mutex_lock(), it will handle the rewind */
652 return(1);
653 }
654 /* go block in the kernel with same lgenval as returned */
655 goto ml1;
656 } else {
657 /* firsfit might not have EBIT */
658 if (firstfit != 0) {
659 if ((lgenval & PTH_RWL_EBIT) != 0)
660 isebit = 1;
661 else
662 isebit = 0;
663 } else if ((lgenval & (PTH_RWL_KBIT|PTH_RWL_EBIT)) == (PTH_RWL_KBIT|PTH_RWL_EBIT)) {
664 /* fairshare mutex and the bits are already set, just update tid */
665 goto out;
666 }
667 }
668
669 /* either firstfist or no E bit set */
670 /* update the bits */
671 oldval64 = (((uint64_t)ugenval) << 32);
672 oldval64 |= lgenval;
673 uval = ugenval;
674 nval = lgenval | (PTH_RWL_KBIT|PTH_RWL_EBIT);
675 newval64 = (((uint64_t)uval) << 32);
676 newval64 |= nval;
677
678 /* set s and b bit */
679 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lseqaddr) == TRUE) {
680#if _KSYN_TRACE_
681 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 2, nval, uval, 0);
682#endif
683 if ((firstfit != 0) && (isebit != 0))
684 goto handleffit;
685
686 goto out;
687 } else {
688 if (firstfit == 0)
689 goto retry;
690 else
691 goto handleffit;
692 }
693
694#if _KSYN_TRACE_
695 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 4, nval, uval, 0);
696#endif
697
698out:
699 /* succesful bits updation */
700 mutex->m_tid = selfid;
701#if _KSYN_TRACE_
702 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
703#endif
704 return(0);
705
706handleffit:
707 /* firstfit failure */
708 lgenval = *lseqaddr;
709 ugenval = *useqaddr;
710 if ((lgenval & PTH_RWL_EBIT) == 0)
711 goto retry;
712
713 if (fromcond == 0)
714 return(1);
715 else {
716 /* called from condition variable code block again */
717ml1:
718#if USE_COMPAGE /* [ */
719 updateval = __psynch_mutexwait((pthread_mutex_t *)lseqaddr, lgenval | PTH_RWL_RETRYBIT, ugenval, mutex->m_tid,
720 mutex->mtxopts.value);
721#else /* USECOMPAGE ][ */
722 updateval = __psynch_mutexwait(omutex, lgenval | PTH_RWL_RETRYBIT, ugenval, mutex->m_tid,
723 mutex->mtxopts.value);
724#endif /* USE_COMPAGE ] */
725 if (updateval == (uint32_t)-1) {
726 goto ml1;
727 }
728
729 /* now update the bits */
730 goto retry;
731 }
732 /* cannot reach */
733 goto retry;
734}
735
736
737int
738__mtx_markprepost(npthread_mutex_t *mutex, uint32_t oupdateval, int firstfit)
739{
740 uint32_t updateval = oupdateval;
741 int clearprepost = 0;
742 uint32_t lgenval, ugenval,flags;
743 volatile uint32_t * lseqaddr, *useqaddr;
744 uint64_t oldval64, newval64;
745
746 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
747
748#if _KSYN_TRACE_
749 (void)__kdebug_trace(_KSYN_TRACE_UM_MARKPP | DBG_FUNC_START, (uint32_t)mutex, oupdateval, firstfit, 0, 0);
750#endif
751
752retry:
753
754 clearprepost = 0;
755
756 if ((firstfit != 0) && ((updateval & PTH_RWL_PBIT) != 0)) {
757 flags = mutex->mtxopts.value;
758
759 lgenval = *lseqaddr;
760 ugenval = *useqaddr;
761
762#if _KSYN_TRACE_
763 (void)__kdebug_trace(_KSYN_TRACE_UM_MARKPP | DBG_FUNC_NONE, (uint32_t)mutex, 1, lgenval, ugenval, 0);
764#endif
765 /* update the bits */
766 oldval64 = (((uint64_t)ugenval) << 32);
767 oldval64 |= lgenval;
768
769 if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
770 clearprepost = 1;
771 lgenval &= ~PTH_RWL_PBIT;
772
773 } else {
774 lgenval |= PTH_RWL_PBIT;
775 }
776 newval64 = (((uint64_t)ugenval) << 32);
777 newval64 |= lgenval;
778
779 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lseqaddr) == TRUE) {
780#if _KSYN_TRACE_
781 (void)__kdebug_trace(_KSYN_TRACE_UM_MARKPP | DBG_FUNC_NONE, (uint32_t)mutex, 2, lgenval, ugenval, 0);
782#endif
783
784 if (clearprepost != 0)
785 __psynch_cvclrprepost(mutex, lgenval, ugenval, 0, 0, lgenval, (flags | _PTHREAD_MTX_OPT_MUTEX));
786
787 } else {
788 goto retry;
789 }
790
791#if _KSYN_TRACE_
792 (void)__kdebug_trace(_KSYN_TRACE_UM_MARKPP | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
793#endif
794 }
795 return(0);
796}
797
798/*
799 * For the new style mutex, interlocks are not held all the time.
800 * We needed the signature to be set in the end. And we need
801 * to protect against the code getting reorganized by compiler.
802 */
803static void
804__pthread_mutex_set_signature(npthread_mutex_t * mutex)
805{
806 mutex->sig = _PTHREAD_MUTEX_SIG;
807}
808
809int
810pthread_mutex_lock(pthread_mutex_t *omutex)
811{
812 pthread_t self;
813 uint64_t selfid;
814 npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
815 int sig = mutex->sig;
816#if NEVERINCOMPAGE || !USE_COMPAGE
817 //uint32_t oldval, newval;
818#endif
819 int retval;
820 int gotlock = 0, firstfit = 0;
821 uint32_t updateval, lgenval, ugenval, nval, uval;
822 volatile uint32_t * lseqaddr, *useqaddr;
823 uint64_t oldval64, newval64;
824#if USE_COMPAGE
825 int sysret = 0;
826 uint32_t mask;
827#else
828 int retrybit = 0;
829#endif
830
831 /* To provide backwards compat for apps using mutex incorrectly */
832 if ((sig != _PTHREAD_MUTEX_SIG) && ((sig & _PTHREAD_MUTEX_SIG_init_MASK) != _PTHREAD_MUTEX_SIG_CMP)) {
833 PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
834 return(EINVAL);
835 }
836 if (mutex->sig != _PTHREAD_MUTEX_SIG) {
837 LOCK(mutex->lock);
838 if ((mutex->sig & _PTHREAD_MUTEX_SIG_init_MASK) == _PTHREAD_MUTEX_SIG_CMP) {
839 /* static initializer, init the mutex */
840 if(retval = _pthread_mutex_init(omutex, NULL, (mutex->sig & 0xf)) != 0){
841 UNLOCK(mutex->lock);
842 PLOCKSTAT_MUTEX_ERROR(omutex, retval);
843 return(retval);
844 }
845 } else if (mutex->sig != _PTHREAD_MUTEX_SIG) {
846 UNLOCK(mutex->lock);
847 PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
848 return(EINVAL);
849 }
850 UNLOCK(mutex->lock);
851 }
852
853#if _KSYN_TRACE_
854 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_START, (uint32_t)mutex, 0, 0, 0, 0);
855#endif
856 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
857
858 self = pthread_self();
859 (void) pthread_threadid_np(self, &selfid);
860
861 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
862 if (mutex->m_tid == selfid) {
863 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
864 {
865 if (mutex->mtxopts.options.lock_count < USHRT_MAX)
866 {
867 mutex->mtxopts.options.lock_count++;
868 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
869 retval = 0;
870 } else {
871 retval = EAGAIN;
872 PLOCKSTAT_MUTEX_ERROR(omutex, retval);
873 }
874 } else { /* PTHREAD_MUTEX_ERRORCHECK */
875 retval = EDEADLK;
876 PLOCKSTAT_MUTEX_ERROR(omutex, retval);
877 }
878 return (retval);
879 }
880 }
881
882#if _KSYN_TRACE_
883 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 1, 0, 0, 0);
884#endif
885
886#if USE_COMPAGE /* [ */
887
888ml0:
889 mask = PTH_RWL_EBIT;
890 retval = _commpage_pthread_mutex_lock(lseqaddr, mutex->mtxopts.value, selfid, mask, &mutex->m_tid, &sysret);
891 if (retval == 0) {
892 gotlock = 1;
893 } else if (retval == 1) {
894 gotlock = 1;
895 updateval = sysret;
896 /* returns 0 on succesful update */
897 if (__mtx_updatebits( mutex, updateval, firstfit, 0, selfid) == 1) {
898 /* could not acquire, may be locked in ffit case */
899#if USE_COMPAGE
900 LIBC_ABORT("comapge implementatin looping in libc \n");
901#endif
902 goto ml0;
903 }
904 }
905#if NEVERINCOMPAGE
906 else if (retval == 3) {
907 cthread_set_errno_self(sysret);
908 oldval = *lseqaddr;
909 uval = *useqaddr;
910 newval = oldval + PTHRW_INC;
911 gotlock = 0;
912 /* to block in the kerenl again */
913 }
914#endif
915 else {
916 LIBC_ABORT("comapge implementation bombed \n");
917 }
918
919
920#else /* USECOMPAGE ][ */
921retry:
922 lgenval = *lseqaddr;
923 ugenval = *useqaddr;
924
925#if _KSYN_TRACE_
926 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 2, lgenval, ugenval, 0);
927#endif /* _KSYN_TRACE_ */
928
929 if((lgenval & PTH_RWL_EBIT) == 0) {
930 gotlock = 1;
931 } else {
932 gotlock = 0;
933 }
934
935 oldval64 = (((uint64_t)ugenval) << 32);
936 oldval64 |= lgenval;
937 uval = ugenval;
938 nval = (lgenval + PTHRW_INC) | (PTH_RWL_EBIT|PTH_RWL_KBIT);
939 newval64 = (((uint64_t)uval) << 32);
940 newval64 |= nval;
941
942 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lseqaddr) == TRUE) {
943#if _KSYN_TRACE_
944 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 2, nval, uval, 0);
945#endif
946 if (gotlock != 0) {
947 mutex->m_tid = selfid;
948 goto out;
949 }
950 } else
951 goto retry;
952
953
954 retrybit = 0;
955 if (gotlock == 0) {
956#if _KSYN_TRACE_
957 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 3, nval, uval, 0);
958#endif
959 firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
960ml1:
961 updateval = __psynch_mutexwait(omutex, nval | retrybit, uval, mutex->m_tid,
962 mutex->mtxopts.value);
963
964#if _KSYN_TRACE_
965 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 4, updateval, 0, 0);
966#endif
967 if (updateval == (uint32_t)-1) {
968 goto ml1;
969 }
970
971 /* returns 0 on succesful update; in firstfit it may fail with 1 */
972 if (__mtx_updatebits( mutex, PTHRW_INC | (PTH_RWL_KBIT | PTH_RWL_EBIT), firstfit, 0, selfid) == 1) {
973 /* could not acquire, may be locked in ffit case */
974 retrybit = PTH_RWL_RETRYBIT;
975#if USE_COMPAGE
976 LIBC_ABORT("comapge implementatin looping in libc \n");
977
978#endif
979 goto ml1;
980 }
981 }
982#endif /* USE_COMPAGE ] */
983
984out:
985 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
986 mutex->mtxopts.options.lock_count = 1;
987
988#if _KSYN_TRACE_
989 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
990#endif
991 return (0);
992}
993
994/*
995 * Attempt to lock a mutex, but don't block if this isn't possible.
996 */
997int
998pthread_mutex_trylock(pthread_mutex_t *omutex)
999{
1000 npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
1001 int sig = mutex->sig;
1002 int error = 0;
1003 pthread_t self;
1004 uint64_t selfid;
1005 int gotlock = 0;
1006 uint32_t lgenval, ugenval, nval, uval;
1007 volatile uint32_t * lseqaddr, *useqaddr;
1008 uint64_t oldval64, newval64;
1009
1010 /* To provide backwards compat for apps using mutex incorrectly */
1011 if ((sig != _PTHREAD_MUTEX_SIG) && ((sig & _PTHREAD_MUTEX_SIG_init_MASK) != _PTHREAD_MUTEX_SIG_CMP)) {
1012 PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
1013 return(EINVAL);
1014 }
1015
1016 if (mutex->sig != _PTHREAD_MUTEX_SIG) {
1017 LOCK(mutex->lock);
1018 if ((mutex->sig & _PTHREAD_MUTEX_SIG_init_MASK) == _PTHREAD_MUTEX_SIG_CMP) {
1019 /* static initializer, init the mutex */
1020 if((error = _pthread_mutex_init(omutex, NULL, (mutex->sig & 0xf))) != 0){
1021 UNLOCK(mutex->lock);
1022 PLOCKSTAT_MUTEX_ERROR(omutex, error);
1023 return(error);
1024 }
1025 } else if (mutex->sig != _PTHREAD_MUTEX_SIG) {
1026 UNLOCK(mutex->lock);
1027 PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
1028 return(EINVAL);
1029 }
1030 UNLOCK(mutex->lock);
1031 }
1032
1033 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
1034
1035 self = pthread_self();
1036 (void) pthread_threadid_np(self, &selfid);
1037
1038 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
1039 if (mutex->m_tid == selfid) {
1040 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
1041 {
1042 if (mutex->mtxopts.options.lock_count < USHRT_MAX)
1043 {
1044 mutex->mtxopts.options.lock_count++;
1045 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
1046 error = 0;
1047 } else {
1048 error = EAGAIN;
1049 PLOCKSTAT_MUTEX_ERROR(omutex, error);
1050 }
1051 } else { /* PTHREAD_MUTEX_ERRORCHECK */
1052 error = EDEADLK;
1053 PLOCKSTAT_MUTEX_ERROR(omutex, error);
1054 }
1055 return (error);
1056 }
1057 }
1058retry:
1059 lgenval = *lseqaddr;
1060 ugenval = *useqaddr;
1061
1062#if _KSYN_TRACE_
1063 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 2, lgenval, ugenval, 0);
1064#endif /* _KSYN_TRACE_ */
1065
1066
1067 oldval64 = (((uint64_t)ugenval) << 32);
1068 oldval64 |= lgenval;
1069 uval = ugenval;
1070
1071 /* if we can acquire go ahead otherwise ensure it is still busy */
1072 if((lgenval & PTH_RWL_EBIT) == 0) {
1073 gotlock = 1;
1074 nval = (lgenval + PTHRW_INC) | (PTH_RWL_EBIT|PTH_RWL_KBIT);
1075 } else {
1076 nval = (lgenval | PTH_RWL_TRYLKBIT);
1077 gotlock = 0;
1078 }
1079
1080 newval64 = (((uint64_t)uval) << 32);
1081 newval64 |= nval;
1082
1083 /* set s and b bit */
1084 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lseqaddr) == TRUE) {
1085#if _KSYN_TRACE_
1086 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 2, nval, uval, 0);
1087#endif
1088 if (gotlock != 0) {
1089 mutex->m_tid = selfid;
1090 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
1091 mutex->mtxopts.options.lock_count = 1;
1092 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
1093 } else {
1094 error = EBUSY;
1095 PLOCKSTAT_MUTEX_ERROR(omutex, error);
1096 }
1097 } else
1098 goto retry;
1099
1100
1101#if _KSYN_TRACE_
1102 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_END, (uint32_t)mutex, 0xfafafafa, 0, error, 0);
1103#endif
1104 return (error);
1105}
1106
1107/*
1108 * Unlock a mutex.
1109 * TODO: Priority inheritance stuff
1110 */
1111int
1112pthread_mutex_unlock(pthread_mutex_t *omutex)
1113{
1114 npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
1115 int retval;
1116 uint32_t mtxgen, mtxugen, flags, notify, updateval;
1117 int sig = mutex->sig;
1118 pthread_t self;
1119 uint64_t selfid;
1120 volatile uint32_t * lseqaddr, *useqaddr;
1121 int firstfit = 0;
1122
1123 /* To provide backwards compat for apps using mutex incorrectly */
1124
1125#if _KSYN_TRACE_
1126 (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_START, (uint32_t)mutex, 0, 0, 0, 0);
1127#endif
1128 if ((sig != _PTHREAD_MUTEX_SIG) && ((sig & _PTHREAD_MUTEX_SIG_init_MASK) != _PTHREAD_MUTEX_SIG_CMP)) {
1129 PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
1130 return(EINVAL);
1131 }
1132
1133 if (mutex->sig != _PTHREAD_MUTEX_SIG) {
1134 LOCK(mutex->lock);
1135 if ((mutex->sig & _PTHREAD_MUTEX_SIG_init_MASK) == _PTHREAD_MUTEX_SIG_CMP) {
1136 /* static initializer, init the mutex */
1137 if((retval = _pthread_mutex_init(omutex, NULL, (mutex->sig & 0xf))) != 0){
1138 UNLOCK(mutex->lock);
1139 PLOCKSTAT_MUTEX_ERROR(omutex, retval);
1140 return(retval);
1141 }
1142 } else if (mutex->sig != _PTHREAD_MUTEX_SIG) {
1143 UNLOCK(mutex->lock);
1144 PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
1145 return(EINVAL);
1146 }
1147 UNLOCK(mutex->lock);
1148 }
1149
1150 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
1151
1152 notify = 0;
1153 retval = __mtx_droplock(mutex, PTHRW_INC, &flags, NULL, &mtxgen, &mtxugen);
1154 if (retval != 0)
1155 return(retval);
1156
1157 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) != 0) {
1158 firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
1159
1160 self = pthread_self();
1161 (void) pthread_threadid_np(self, &selfid);
1162
1163#if _KSYN_TRACE_
1164 (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_NONE, (uint32_t)mutex, 1, mtxgen, mtxugen, 0);
1165#endif
1166#if USE_COMPAGE /* [ */
1167 if ((updateval = __psynch_mutexdrop((pthread_mutex_t *)lseqaddr, mtxgen, mtxugen, mutex->m_tid, flags)) == (uint32_t)-1)
1168#else /* USECOMPAGE ][ */
1169 if ((updateval = __psynch_mutexdrop(omutex, mtxgen, mtxugen, mutex->m_tid, flags))== (uint32_t)-1)
1170#endif /* USE_COMPAGE ] */
1171 {
1172 retval = errno;
1173#if _KSYN_TRACE_
1174 (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_END, (uint32_t)mutex, retval, 0, 0, 0);
1175#endif
1176 if (retval == 0)
1177 return(0);
1178 else if (errno == EINTR)
1179 return(0);
1180 else {
1181 LIBC_ABORT("__p_mutexdrop failed with error %d\n", retval);
1182 return(retval);
1183 }
1184 } else if (firstfit == 1) {
1185 if ((updateval & PTH_RWL_PBIT) != 0) {
1186 __mtx_markprepost(mutex, updateval, firstfit);
1187 }
1188 }
1189 }
1190#if _KSYN_TRACE_
1191 (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
1192#endif
1193 return(0);
1194}
1195
1196
1197/*
1198 * Initialize a mutex variable, possibly with additional attributes.
1199 */
1200int
1201_pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr, uint32_t static_type)
1202{
1203 npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
1204
1205 if (attr)
1206 {
1207 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG)
1208 return (EINVAL);
1209 mutex->prioceiling = attr->prioceiling;
1210 mutex->mtxopts.options.protocol = attr->protocol;
1211 mutex->mtxopts.options.policy = attr->policy;
1212 mutex->mtxopts.options.type = attr->type;
1213 mutex->mtxopts.options.pshared = attr->pshared;
1214 } else {
1215 switch(static_type) {
1216 case 1:
1217 mutex->mtxopts.options.type = PTHREAD_MUTEX_ERRORCHECK;
1218 break;
1219 case 2:
1220 mutex->mtxopts.options.type = PTHREAD_MUTEX_RECURSIVE;
1221 break;
1222 case 3:
1223 /* firstfit fall thru */
1224 case 7:
1225 mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
1226 break;
1227 default:
1228 return(EINVAL);
1229 }
1230
1231 mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
1232 mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
1233 if (static_type != 3)
1234 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
1235 else
1236 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FIRSTFIT;
1237 mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
1238 }
1239
1240 mutex->mtxopts.options.notify = 0;
1241 mutex->mtxopts.options.rfu = 0;
1242 mutex->mtxopts.options.hold = 0;
1243 mutex->mtxopts.options.mutex = 1;
1244 mutex->mtxopts.options.lock_count = 0;
1245 /* address 8byte aligned? */
1246 if (((uintptr_t)mutex & 0x07) != 0) {
1247 /* 4byte alinged */
1248 mutex->mtxopts.options.misalign = 1;
1249#if defined(__LP64__)
1250 mutex->m_lseqaddr = &mutex->m_seq[0];
1251 mutex->m_useqaddr = &mutex->m_seq[1];
1252#else /* __LP64__ */
1253 mutex->m_lseqaddr = &mutex->m_seq[1];
1254 mutex->m_useqaddr = &mutex->m_seq[2];
1255#endif /* __LP64__ */
1256 } else {
1257 /* 8byte alinged */
1258 mutex->mtxopts.options.misalign = 0;
1259#if defined(__LP64__)
1260 mutex->m_lseqaddr = &mutex->m_seq[1];
1261 mutex->m_useqaddr = &mutex->m_seq[2];
1262#else /* __LP64__ */
1263 mutex->m_lseqaddr = &mutex->m_seq[0];
1264 mutex->m_useqaddr = &mutex->m_seq[1];
1265#endif /* __LP64__ */
1266 }
1267 mutex->m_tid = 0;
1268 mutex->m_seq[0] = 0;
1269 mutex->m_seq[1] = 0;
1270 mutex->m_seq[2] = 0;
1271 mutex->prioceiling = 0;
1272 mutex->priority = 0;
1273 /*
1274 * For the new style mutex, interlocks are not held all the time.
1275 * We needed the signature to be set in the end. And we need
1276 * to protect against the code getting reorganized by compiler.
1277 * mutex->sig = _PTHREAD_MUTEX_SIG;
1278 */
1279 __pthread_mutex_set_signature(mutex);
1280 return (0);
1281}
1282
1283
1284/*
1285 * Destroy a mutex variable.
1286 */
1287int
1288pthread_mutex_destroy(pthread_mutex_t *omutex)
1289{
1290 int res;
1291 npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
1292
1293 LOCK(mutex->lock);
1294 res = _pthread_mutex_destroy_locked(omutex);
1295 UNLOCK(mutex->lock);
1296
1297 return(res);
1298}
1299
1300
1301static int
1302_pthread_mutex_destroy_locked(pthread_mutex_t *omutex)
1303{
1304 int res;
1305 npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
1306 uint32_t lgenval, ugenval;
1307 volatile uint32_t * lseqaddr, *useqaddr;
1308
1309
1310 if (mutex->sig == _PTHREAD_MUTEX_SIG)
1311 {
1312 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
1313
1314 lgenval = *(lseqaddr);
1315 ugenval = *(useqaddr);
1316 if ((mutex->m_tid == (uint64_t)0) &&
1317 ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)))
1318 {
1319 mutex->sig = _PTHREAD_NO_SIG;
1320 res = 0;
1321 }
1322 else
1323 res = EBUSY;
1324 } else if((mutex->sig & _PTHREAD_MUTEX_SIG_init_MASK )== _PTHREAD_MUTEX_SIG_CMP) {
1325 mutex->sig = _PTHREAD_NO_SIG;
1326 res = 0;
1327 } else
1328 res = EINVAL;
1329
1330 return (res);
1331}
1332
1333
1334#endif /* !BUILDING_VARIANT ] */
1335
1336/*
1337 * Destroy a mutex attribute structure.
1338 */
1339int
1340pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
1341{
1342#if __DARWIN_UNIX03
1343 if (__unix_conforming == 0)
1344 __unix_conforming = 1;
1345 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG)
1346 return (EINVAL);
1347#endif /* __DARWIN_UNIX03 */
1348
1349 attr->sig = _PTHREAD_NO_SIG; /* Uninitialized */
1350 return (0);
1351}
1352
1353