]> git.saurik.com Git - apple/libc.git/blame_incremental - pthreads/pthread_mutex.c
Libc-594.9.1.tar.gz
[apple/libc.git] / pthreads / pthread_mutex.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44/*
45 * MkLinux
46 */
47
48/*
49 * POSIX Pthread Library
50 * -- Mutex variable support
51 */
52
53#include "pthread_internals.h"
54
55#ifdef PLOCKSTAT
56#include "plockstat.h"
57#else /* !PLOCKSTAT */
58#define PLOCKSTAT_MUTEX_SPIN(x)
59#define PLOCKSTAT_MUTEX_SPUN(x, y, z)
60#define PLOCKSTAT_MUTEX_ERROR(x, y)
61#define PLOCKSTAT_MUTEX_BLOCK(x)
62#define PLOCKSTAT_MUTEX_BLOCKED(x, y)
63#define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
64#define PLOCKSTAT_MUTEX_RELEASE(x, y)
65#endif /* PLOCKSTAT */
66
67extern int __unix_conforming;
68extern int __unix_conforming;
69int _pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
70
71#if defined(__i386__) || defined(__x86_64__)
72#define USE_COMPAGE 1
73
74#include <machine/cpu_capabilities.h>
75
76extern int _commpage_pthread_mutex_lock(uint32_t * lvalp, int flags, uint64_t mtid, uint32_t mask, uint64_t * tidp, int *sysret);
77
78int _new_pthread_mutex_destroy(pthread_mutex_t *mutex);
79int _new_pthread_mutex_destroy_locked(pthread_mutex_t *mutex);
80int _new_pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
81int _new_pthread_mutex_lock(pthread_mutex_t *omutex);
82int _new_pthread_mutex_trylock(pthread_mutex_t *omutex);
83int _new_pthread_mutex_unlock(pthread_mutex_t *omutex);
84
85#if defined(__LP64__)
86#define MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr) \
87{ \
88 if (mutex->mtxopts.options.misalign != 0) { \
89 lseqaddr = &mutex->m_seq[0]; \
90 useqaddr = &mutex->m_seq[1]; \
91 } else { \
92 lseqaddr = &mutex->m_seq[1]; \
93 useqaddr = &mutex->m_seq[2]; \
94 } \
95}
96#else /* __LP64__ */
97#define MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr) \
98{ \
99 if (mutex->mtxopts.options.misalign != 0) { \
100 lseqaddr = &mutex->m_seq[1]; \
101 useqaddr = &mutex->m_seq[2]; \
102 }else { \
103 lseqaddr = &mutex->m_seq[0]; \
104 useqaddr = &mutex->m_seq[1]; \
105 } \
106}
107#endif /* __LP64__ */
108
109#define _KSYN_TRACE_ 0
110
111#if _KSYN_TRACE_
112/* The Function qualifiers */
113#define DBG_FUNC_START 1
114#define DBG_FUNC_END 2
115#define DBG_FUNC_NONE 0
116
117int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
118
119#define _KSYN_TRACE_UM_LOCK 0x9000060
120#define _KSYN_TRACE_UM_UNLOCK 0x9000064
121#define _KSYN_TRACE_UM_MHOLD 0x9000068
122#define _KSYN_TRACE_UM_MDROP 0x900006c
123#define _KSYN_TRACE_UM_MUBITS 0x900007c
124
125#endif /* _KSYN_TRACE_ */
126
127#endif /* __i386__ || __x86_64__ */
128
129#ifndef BUILDING_VARIANT /* [ */
130
131#define BLOCK_FAIL_PLOCKSTAT 0
132#define BLOCK_SUCCESS_PLOCKSTAT 1
133
134#ifdef PR_5243343
135/* 5243343 - temporary hack to detect if we are running the conformance test */
136extern int PR_5243343_flag;
137#endif /* PR_5243343 */
138
139/* This function is never called and exists to provide never-fired dtrace
140 * probes so that user d scripts don't get errors.
141 */
142__private_extern__ void _plockstat_never_fired(void)
143{
144 PLOCKSTAT_MUTEX_SPIN(NULL);
145 PLOCKSTAT_MUTEX_SPUN(NULL, 0, 0);
146}
147
148/*
149 * Destroy a mutex variable.
150 */
151int
152pthread_mutex_destroy(pthread_mutex_t *mutex)
153{
154 int res;
155
156 LOCK(mutex->lock);
157 if (mutex->sig == _PTHREAD_MUTEX_SIG)
158 {
159
160#if defined(__i386__) || defined(__x86_64__)
161 if(mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED){
162
163 res = _new_pthread_mutex_destroy_locked(mutex);
164 UNLOCK(mutex->lock);
165 return(res);
166 }
167#endif /* __i386__ || __x86_64__ */
168
169 if (mutex->owner == (pthread_t)NULL &&
170 mutex->busy == (pthread_cond_t *)NULL)
171 {
172 mutex->sig = _PTHREAD_NO_SIG;
173 res = 0;
174 }
175 else
176 res = EBUSY;
177 } else
178 res = EINVAL;
179 UNLOCK(mutex->lock);
180 return (res);
181}
182
183/*
184 * Initialize a mutex variable, possibly with additional attributes.
185 */
186int
187_pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
188{
189 if (attr)
190 {
191 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG)
192 return (EINVAL);
193#if defined(__i386__) || defined(__x86_64__)
194 if (attr->pshared == PTHREAD_PROCESS_SHARED) {
195 return(_new_pthread_mutex_init(mutex, attr));
196 } else
197#endif /* __i386__ || __x86_64__ */
198 {
199 mutex->prioceiling = attr->prioceiling;
200 mutex->mtxopts.options.protocol = attr->protocol;
201 mutex->mtxopts.options.policy = attr->policy;
202 mutex->mtxopts.options.type = attr->type;
203 mutex->mtxopts.options.pshared = attr->pshared;
204 }
205 } else {
206 mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
207 mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
208 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
209 mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
210 mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
211 }
212 mutex->mtxopts.options.lock_count = 0;
213 mutex->owner = (pthread_t)NULL;
214 mutex->next = (pthread_mutex_t *)NULL;
215 mutex->prev = (pthread_mutex_t *)NULL;
216 mutex->busy = (pthread_cond_t *)NULL;
217 mutex->waiters = 0;
218 mutex->sem = SEMAPHORE_NULL;
219 mutex->order = SEMAPHORE_NULL;
220 mutex->prioceiling = 0;
221 mutex->sig = _PTHREAD_MUTEX_SIG;
222 return (0);
223}
224
225/*
226 * Initialize a mutex variable, possibly with additional attributes.
227 * Public interface - so don't trust the lock - initialize it first.
228 */
229int
230pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
231{
232#if 0
233 /* conformance tests depend on not having this behavior */
234 /* The test for this behavior is optional */
235 if (mutex->sig == _PTHREAD_MUTEX_SIG)
236 return EBUSY;
237#endif
238 LOCK_INIT(mutex->lock);
239 return (_pthread_mutex_init(mutex, attr));
240}
241
242/*
243 * Lock a mutex.
244 * TODO: Priority inheritance stuff
245 */
246int
247pthread_mutex_lock(pthread_mutex_t *mutex)
248{
249 kern_return_t kern_res;
250 pthread_t self;
251 int sig = mutex->sig;
252
253 /* To provide backwards compat for apps using mutex incorrectly */
254 if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
255 PLOCKSTAT_MUTEX_ERROR(mutex, EINVAL);
256 return(EINVAL);
257 }
258
259 LOCK(mutex->lock);
260 if (mutex->sig != _PTHREAD_MUTEX_SIG)
261 {
262 if (mutex->sig != _PTHREAD_MUTEX_SIG_init)
263 {
264 UNLOCK(mutex->lock);
265 PLOCKSTAT_MUTEX_ERROR(mutex, EINVAL);
266 return (EINVAL);
267 }
268 _pthread_mutex_init(mutex, NULL);
269 self = _PTHREAD_MUTEX_OWNER_SELF;
270 }
271#if defined(__i386__) || defined(__x86_64__)
272 else if(mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED){
273 UNLOCK(mutex->lock);
274 return(_new_pthread_mutex_lock(mutex));
275 }
276#endif /* __i386__ || __x86_64__ */
277 else if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL)
278 {
279 self = pthread_self();
280 if (mutex->owner == self)
281 {
282 int res;
283
284 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
285 {
286 if (mutex->mtxopts.options.lock_count < USHRT_MAX)
287 {
288 mutex->mtxopts.options.lock_count++;
289 PLOCKSTAT_MUTEX_ACQUIRE(mutex, 1, 0);
290 res = 0;
291 } else {
292 res = EAGAIN;
293 PLOCKSTAT_MUTEX_ERROR(mutex, res);
294 }
295 } else { /* PTHREAD_MUTEX_ERRORCHECK */
296 res = EDEADLK;
297 PLOCKSTAT_MUTEX_ERROR(mutex, res);
298 }
299 UNLOCK(mutex->lock);
300 return (res);
301 }
302 } else
303 self = _PTHREAD_MUTEX_OWNER_SELF;
304
305 if (mutex->owner != (pthread_t)NULL) {
306 if (mutex->waiters || mutex->owner != _PTHREAD_MUTEX_OWNER_SWITCHING)
307 {
308 semaphore_t sem, order;
309
310 if (++mutex->waiters == 1)
311 {
312 mutex->sem = sem = new_sem_from_pool();
313 mutex->order = order = new_sem_from_pool();
314 }
315 else
316 {
317 sem = mutex->sem;
318 order = mutex->order;
319 do {
320 PTHREAD_MACH_CALL(semaphore_wait(order), kern_res);
321 } while (kern_res == KERN_ABORTED);
322 }
323 UNLOCK(mutex->lock);
324
325 PLOCKSTAT_MUTEX_BLOCK(mutex);
326 PTHREAD_MACH_CALL(semaphore_wait_signal(sem, order), kern_res);
327 while (kern_res == KERN_ABORTED)
328 {
329 PTHREAD_MACH_CALL(semaphore_wait(sem), kern_res);
330 }
331
332 PLOCKSTAT_MUTEX_BLOCKED(mutex, BLOCK_SUCCESS_PLOCKSTAT);
333
334 LOCK(mutex->lock);
335 if (--mutex->waiters == 0)
336 {
337 PTHREAD_MACH_CALL(semaphore_wait(order), kern_res);
338 mutex->sem = mutex->order = SEMAPHORE_NULL;
339 restore_sem_to_pool(order);
340 restore_sem_to_pool(sem);
341 }
342 }
343 else if (mutex->owner == _PTHREAD_MUTEX_OWNER_SWITCHING)
344 {
345 semaphore_t sem = mutex->sem;
346 do {
347 PTHREAD_MACH_CALL(semaphore_wait(sem), kern_res);
348 } while (kern_res == KERN_ABORTED);
349 mutex->sem = SEMAPHORE_NULL;
350 restore_sem_to_pool(sem);
351 }
352 }
353
354 mutex->mtxopts.options.lock_count = 1;
355 mutex->owner = self;
356 UNLOCK(mutex->lock);
357 PLOCKSTAT_MUTEX_ACQUIRE(mutex, 0, 0);
358 return (0);
359}
360
361/*
362 * Attempt to lock a mutex, but don't block if this isn't possible.
363 */
364int
365pthread_mutex_trylock(pthread_mutex_t *mutex)
366{
367 kern_return_t kern_res;
368 pthread_t self;
369
370 LOCK(mutex->lock);
371 if (mutex->sig != _PTHREAD_MUTEX_SIG)
372 {
373 if (mutex->sig != _PTHREAD_MUTEX_SIG_init)
374 {
375 PLOCKSTAT_MUTEX_ERROR(mutex, EINVAL);
376 UNLOCK(mutex->lock);
377 return (EINVAL);
378 }
379 _pthread_mutex_init(mutex, NULL);
380 self = _PTHREAD_MUTEX_OWNER_SELF;
381 }
382#if defined(__i386__) || defined(__x86_64__)
383 else if(mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED){
384 UNLOCK(mutex->lock);
385 return(_new_pthread_mutex_trylock(mutex));
386 }
387#endif /* __i386__ || __x86_64__ */
388 else if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL)
389 {
390 self = pthread_self();
391 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
392 {
393 if (mutex->owner == self)
394 {
395 int res;
396
397 if (mutex->mtxopts.options.lock_count < USHRT_MAX)
398 {
399 mutex->mtxopts.options.lock_count++;
400 PLOCKSTAT_MUTEX_ACQUIRE(mutex, 1, 0);
401 res = 0;
402 } else {
403 res = EAGAIN;
404 PLOCKSTAT_MUTEX_ERROR(mutex, res);
405 }
406 UNLOCK(mutex->lock);
407 return (res);
408 }
409 }
410 } else
411 self = _PTHREAD_MUTEX_OWNER_SELF;
412
413 if (mutex->owner != (pthread_t)NULL)
414 {
415 if (mutex->waiters || mutex->owner != _PTHREAD_MUTEX_OWNER_SWITCHING)
416 {
417 PLOCKSTAT_MUTEX_ERROR(mutex, EBUSY);
418 UNLOCK(mutex->lock);
419 return (EBUSY);
420 }
421 else if (mutex->owner == _PTHREAD_MUTEX_OWNER_SWITCHING)
422 {
423 semaphore_t sem = mutex->sem;
424
425 do {
426 PTHREAD_MACH_CALL(semaphore_wait(sem), kern_res);
427 } while (kern_res == KERN_ABORTED);
428 restore_sem_to_pool(sem);
429 mutex->sem = SEMAPHORE_NULL;
430 }
431 }
432
433 mutex->mtxopts.options.lock_count = 1;
434 mutex->owner = self;
435 UNLOCK(mutex->lock);
436 PLOCKSTAT_MUTEX_ACQUIRE(mutex, 0, 0);
437 return (0);
438}
439
440/*
441 * Unlock a mutex.
442 * TODO: Priority inheritance stuff
443 */
444int
445pthread_mutex_unlock(pthread_mutex_t *mutex)
446{
447 kern_return_t kern_res;
448 int waiters;
449 int sig = mutex->sig;
450
451
452 /* To provide backwards compat for apps using mutex incorrectly */
453
454 if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
455 PLOCKSTAT_MUTEX_ERROR(mutex, EINVAL);
456 return(EINVAL);
457 }
458 LOCK(mutex->lock);
459 if (mutex->sig != _PTHREAD_MUTEX_SIG)
460 {
461 if (mutex->sig != _PTHREAD_MUTEX_SIG_init)
462 {
463 PLOCKSTAT_MUTEX_ERROR(mutex, EINVAL);
464 UNLOCK(mutex->lock);
465 return (EINVAL);
466 }
467 _pthread_mutex_init(mutex, NULL);
468 }
469#if defined(__i386__) || defined(__x86_64__)
470 else if(mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED){
471 UNLOCK(mutex->lock);
472 return(_new_pthread_mutex_unlock(mutex));
473 }
474#endif /* __i386__ || __x86_64__ */
475 else if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL)
476 {
477 pthread_t self = pthread_self();
478 if (mutex->owner != self)
479 {
480 PLOCKSTAT_MUTEX_ERROR(mutex, EPERM);
481 UNLOCK(mutex->lock);
482 return EPERM;
483 } else if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE &&
484 --mutex->mtxopts.options.lock_count)
485 {
486 PLOCKSTAT_MUTEX_RELEASE(mutex, 1);
487 UNLOCK(mutex->lock);
488 return(0);
489 }
490 }
491
492 mutex->mtxopts.options.lock_count = 0;
493
494 waiters = mutex->waiters;
495 if (waiters)
496 {
497 mutex->owner = _PTHREAD_MUTEX_OWNER_SWITCHING;
498 PLOCKSTAT_MUTEX_RELEASE(mutex, 0);
499 UNLOCK(mutex->lock);
500 PTHREAD_MACH_CALL(semaphore_signal(mutex->sem), kern_res);
501 }
502 else
503 {
504 mutex->owner = (pthread_t)NULL;
505 PLOCKSTAT_MUTEX_RELEASE(mutex, 0);
506 UNLOCK(mutex->lock);
507 }
508 return (0);
509}
510
511/*
512 * Fetch the priority ceiling value from a mutex variable.
513 * Note: written as a 'helper' function to hide implementation details.
514 */
515int
516pthread_mutex_getprioceiling(const pthread_mutex_t *mutex,
517 int *prioceiling)
518{
519 int res;
520
521 LOCK(mutex->lock);
522 if (mutex->sig == _PTHREAD_MUTEX_SIG)
523 {
524 *prioceiling = mutex->prioceiling;
525 res = 0;
526 } else
527 res = EINVAL; /* Not an initialized 'attribute' structure */
528 UNLOCK(mutex->lock);
529 return (res);
530}
531
532/*
533 * Set the priority ceiling for a mutex.
534 * Note: written as a 'helper' function to hide implementation details.
535 */
536int
537pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
538 int prioceiling,
539 int *old_prioceiling)
540{
541 int res;
542
543 LOCK(mutex->lock);
544 if (mutex->sig == _PTHREAD_MUTEX_SIG)
545 {
546 if ((prioceiling >= -999) ||
547 (prioceiling <= 999))
548 {
549 *old_prioceiling = mutex->prioceiling;
550 mutex->prioceiling = prioceiling;
551 res = 0;
552 } else
553 res = EINVAL; /* Invalid parameter */
554 } else
555 res = EINVAL; /* Not an initialized 'attribute' structure */
556 UNLOCK(mutex->lock);
557 return (res);
558}
559
560/*
561 * Get the priority ceiling value from a mutex attribute structure.
562 * Note: written as a 'helper' function to hide implementation details.
563 */
564int
565pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr,
566 int *prioceiling)
567{
568 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
569 {
570 *prioceiling = attr->prioceiling;
571 return (0);
572 } else
573 {
574 return (EINVAL); /* Not an initialized 'attribute' structure */
575 }
576}
577
578/*
579 * Get the mutex 'protocol' value from a mutex attribute structure.
580 * Note: written as a 'helper' function to hide implementation details.
581 */
582int
583pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr,
584 int *protocol)
585{
586 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
587 {
588 *protocol = attr->protocol;
589 return (0);
590 } else
591 {
592 return (EINVAL); /* Not an initialized 'attribute' structure */
593 }
594}
595/*
596 * Get the mutex 'type' value from a mutex attribute structure.
597 * Note: written as a 'helper' function to hide implementation details.
598 */
599int
600pthread_mutexattr_gettype(const pthread_mutexattr_t *attr,
601 int *type)
602{
603 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
604 {
605 *type = attr->type;
606 return (0);
607 } else
608 {
609 return (EINVAL); /* Not an initialized 'attribute' structure */
610 }
611}
612
613/*
614 *
615 */
616int
617pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared)
618{
619 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
620 {
621 *pshared = (int)attr->pshared;
622 return (0);
623 } else
624 {
625 return (EINVAL); /* Not an initialized 'attribute' structure */
626 }
627}
628
629/*
630 * Initialize a mutex attribute structure to system defaults.
631 */
632int
633pthread_mutexattr_init(pthread_mutexattr_t *attr)
634{
635 attr->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
636 attr->protocol = _PTHREAD_DEFAULT_PROTOCOL;
637 attr->policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
638 attr->type = PTHREAD_MUTEX_DEFAULT;
639 attr->sig = _PTHREAD_MUTEX_ATTR_SIG;
640 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
641 return (0);
642}
643
644/*
645 * Set the priority ceiling value in a mutex attribute structure.
646 * Note: written as a 'helper' function to hide implementation details.
647 */
648int
649pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr,
650 int prioceiling)
651{
652 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
653 {
654 if ((prioceiling >= -999) ||
655 (prioceiling <= 999))
656 {
657 attr->prioceiling = prioceiling;
658 return (0);
659 } else
660 {
661 return (EINVAL); /* Invalid parameter */
662 }
663 } else
664 {
665 return (EINVAL); /* Not an initialized 'attribute' structure */
666 }
667}
668
669/*
670 * Set the mutex 'protocol' value in a mutex attribute structure.
671 * Note: written as a 'helper' function to hide implementation details.
672 */
673int
674pthread_mutexattr_setprotocol(pthread_mutexattr_t *attr,
675 int protocol)
676{
677 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
678 {
679 if ((protocol == PTHREAD_PRIO_NONE) ||
680 (protocol == PTHREAD_PRIO_INHERIT) ||
681 (protocol == PTHREAD_PRIO_PROTECT))
682 {
683 attr->protocol = protocol;
684 return (0);
685 } else
686 {
687 return (EINVAL); /* Invalid parameter */
688 }
689 } else
690 {
691 return (EINVAL); /* Not an initialized 'attribute' structure */
692 }
693}
694
695#ifdef NOTYET
696int
697pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *attr,
698 int policy)
699{
700 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
701 {
702 if ((policy == _PTHREAD_MUTEX_POLICY_FAIRSHARE) ||
703 (policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT) ||
704 (policy == _PTHREAD_MUTEX_POLICY_REALTIME) ||
705 (policy == _PTHREAD_MUTEX_POLICY_ADAPTIVE) ||
706 (policy == _PTHREAD_MUTEX_POLICY_PRIPROTECT) ||
707 (policy == _PTHREAD_MUTEX_POLICY_PRIINHERIT))
708 {
709 attr->policy = policy;
710 return (0);
711 } else
712 {
713 return (EINVAL); /* Invalid parameter */
714 }
715 } else
716 {
717 return (EINVAL); /* Not an initialized 'attribute' structure */
718 }
719}
720#endif /* NOTYET */
721
722/*
723 * Set the mutex 'type' value in a mutex attribute structure.
724 * Note: written as a 'helper' function to hide implementation details.
725 */
726int
727pthread_mutexattr_settype(pthread_mutexattr_t *attr,
728 int type)
729{
730 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
731 {
732 if ((type == PTHREAD_MUTEX_NORMAL) ||
733 (type == PTHREAD_MUTEX_ERRORCHECK) ||
734 (type == PTHREAD_MUTEX_RECURSIVE) ||
735 (type == PTHREAD_MUTEX_DEFAULT))
736 {
737 attr->type = type;
738 return (0);
739 } else
740 {
741 return (EINVAL); /* Invalid parameter */
742 }
743 } else
744 {
745 return (EINVAL); /* Not an initialized 'attribute' structure */
746 }
747}
748
749
750int mutex_try_lock(int *x) {
751 return _spin_lock_try((pthread_lock_t *)x);
752}
753
754void mutex_wait_lock(int *x) {
755 for (;;) {
756 if( _spin_lock_try((pthread_lock_t *)x)) {
757 return;
758 }
759 swtch_pri(0);
760 }
761}
762
763void
764cthread_yield(void)
765{
766 sched_yield();
767}
768
769void
770pthread_yield_np (void)
771{
772 sched_yield();
773}
774
775
776/*
777 * Temp: till pshared is fixed correctly
778 */
779int
780pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
781{
782#if __DARWIN_UNIX03
783 if (__unix_conforming == 0)
784 __unix_conforming = 1;
785#endif /* __DARWIN_UNIX03 */
786
787 if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
788 {
789#if __DARWIN_UNIX03
790#ifdef PR_5243343
791 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED && PR_5243343_flag))
792#else /* !PR_5243343 */
793 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
794#endif /* PR_5243343 */
795#else /* __DARWIN_UNIX03 */
796 if ( pshared == PTHREAD_PROCESS_PRIVATE)
797#endif /* __DARWIN_UNIX03 */
798 {
799 attr->pshared = pshared;
800 return (0);
801 } else
802 {
803 return (EINVAL); /* Invalid parameter */
804 }
805 } else
806 {
807 return (EINVAL); /* Not an initialized 'attribute' structure */
808 }
809}
810
811#if defined(__i386__) || defined(__x86_64__)
812
813/*
814 * Acquire lock seq for condition var signalling/broadcast
815 */
816__private_extern__ void
817__mtx_holdlock(npthread_mutex_t * mutex, uint32_t diff, uint32_t * flagp, uint32_t **pmtxp, uint32_t * mgenp, uint32_t * ugenp)
818{
819 uint32_t mgen, ugen, ngen;
820 int hold = 0;
821 int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
822 uint32_t * lseqaddr;
823 uint32_t * useqaddr;
824
825
826#if _KSYN_TRACE_
827 (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_START, (uint32_t)mutex, diff, firstfit, 0, 0);
828#endif
829 if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
830 /* no holds for shared mutexes */
831 hold = 2;
832 mgen = 0;
833 ugen = 0;
834 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
835 goto out;
836 } else {
837 lseqaddr = mutex->m_lseqaddr;
838 useqaddr = mutex->m_useqaddr;
839 }
840
841retry:
842 mgen = *lseqaddr;
843 ugen = *useqaddr;
844 /* no need to do extra wrap */
845 ngen = mgen + (PTHRW_INC * diff);
846 hold = 0;
847
848
849#if _KSYN_TRACE_
850 (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_NONE, (uint32_t)mutex, 0, mgen, ngen, 0);
851#endif
852 /* can we acquire the lock ? */
853 if ((mgen & PTHRW_EBIT) == 0) {
854 /* if it is firstfit, no need to hold till the cvar returns */
855 if (firstfit == 0) {
856 ngen |= PTHRW_EBIT;
857 hold = 1;
858 }
859#if _KSYN_TRACE_
860 (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_NONE, (uint32_t)mutex, 1, mgen, ngen, 0);
861#endif
862 }
863
864 /* update lockseq */
865 if (OSAtomicCompareAndSwap32(mgen, ngen, (volatile int32_t *)lseqaddr) != TRUE)
866 goto retry;
867 if (hold == 1) {
868 mutex->m_tid = PTHREAD_MTX_TID_SWITCHING ;
869 }
870#if _KSYN_TRACE_
871 (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_NONE, (uint32_t)mutex, 2, hold, 0, 0);
872#endif
873
874out:
875 if (flagp != NULL) {
876 if (hold == 1) {
877 *flagp = (mutex->mtxopts.value | _PTHREAD_MTX_OPT_HOLD);
878 } else if (hold == 2) {
879 *flagp = (mutex->mtxopts.value | _PTHREAD_MTX_OPT_NOHOLD);
880 } else {
881 *flagp = mutex->mtxopts.value;
882 }
883 }
884 if (mgenp != NULL)
885 *mgenp = mgen;
886 if (ugenp != NULL)
887 *ugenp = ugen;
888 if (pmtxp != NULL)
889 *pmtxp = lseqaddr;
890#if _KSYN_TRACE_
891 (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_END, (uint32_t)mutex, hold, 0, 0, 0);
892#endif
893}
894
895
896/*
897 * Drop the mutex unlock references(from cond wait or mutex_unlock().
898 * mgenp and ugenp valid only if notifyp is set
899 *
900 */
901__private_extern__ int
902__mtx_droplock(npthread_mutex_t * mutex, int count, uint32_t * flagp, uint32_t ** pmtxp, uint32_t * mgenp, uint32_t * ugenp, uint32_t *notifyp)
903{
904 int oldval, newval, lockval, unlockval;
905 uint64_t oldtid;
906 pthread_t self = pthread_self();
907 uint32_t notify = 0;
908 uint64_t oldval64, newval64;
909 uint32_t * lseqaddr;
910 uint32_t * useqaddr;
911 int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
912
913#if _KSYN_TRACE_
914 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_START, (uint32_t)mutex, count, 0, 0, 0);
915#endif
916 if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
917 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
918 } else {
919 lseqaddr = mutex->m_lseqaddr;
920 useqaddr = mutex->m_useqaddr;
921 }
922
923 if (flagp != NULL)
924 *flagp = mutex->mtxopts.value;
925
926 if (firstfit != 0)
927 notify |= 0x80000000;
928 if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED)
929 notify |= 0x40000000;
930
931 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL)
932 {
933 if (mutex->m_tid != (uint64_t)((uintptr_t)self))
934 {
935 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, EPERM);
936 return(EPERM);
937 } else if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE &&
938 --mutex->mtxopts.options.lock_count)
939 {
940 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
941 goto out;
942 }
943 }
944
945
946 if (mutex->m_tid != (uint64_t)((uintptr_t)self))
947 return(EINVAL);
948
949
950ml0:
951 oldval = *useqaddr;
952 unlockval = oldval + (PTHRW_INC * count);
953 lockval = *lseqaddr;
954
955
956#if _KSYN_TRACE_
957 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_NONE, (uint32_t)mutex, 10, lockval, oldval, 0);
958#endif
959#if 1
960 if (lockval == oldval)
961 LIBC_ABORT("same unlock and lockseq \n");
962#endif
963
964 if ((lockval & PTHRW_COUNT_MASK) == unlockval) {
965 oldtid = mutex->m_tid;
966
967 mutex->m_tid = 0;
968
969 oldval64 = (((uint64_t)oldval) << 32);
970 oldval64 |= lockval;
971
972 newval64 = 0;
973
974 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) == TRUE) {
975#if _KSYN_TRACE_
976 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_NONE, (uint32_t)mutex, 1, 0, 0, 0);
977#endif
978 goto out;
979 } else {
980 mutex->m_tid = oldtid;
981 /* fall thru for kernel call */
982 goto ml0;
983 }
984 }
985
986 if (firstfit != 0) {
987 /* reset ebit along with unlock */
988 newval = (lockval & ~PTHRW_EBIT);
989
990 lockval = newval;
991 oldval64 = (((uint64_t)oldval) << 32);
992 oldval64 |= lockval;
993
994 newval64 = (((uint64_t)unlockval) << 32);
995 newval64 |= newval;
996
997 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE) {
998 goto ml0;
999 }
1000 lockval = newval;
1001 } else {
1002 /* fairshare , just update and go to kernel */
1003 if (OSAtomicCompareAndSwap32(oldval, unlockval, (volatile int32_t *)useqaddr) != TRUE)
1004 goto ml0;
1005
1006#if _KSYN_TRACE_
1007 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_NONE, (uint32_t)mutex, 2, oldval, unlockval, 0);
1008#endif
1009 }
1010
1011 notify |= 1;
1012
1013 if (notifyp != 0) {
1014 if (mgenp != NULL)
1015 *mgenp = lockval;
1016 if (ugenp != NULL)
1017 *ugenp = unlockval;
1018 if (pmtxp != NULL)
1019 *pmtxp = lseqaddr;
1020 *notifyp = notify;
1021 }
1022out:
1023 if (notifyp != 0) {
1024 *notifyp = notify;
1025 }
1026#if _KSYN_TRACE_
1027 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
1028#endif
1029 return(0);
1030}
1031
1032int
1033__mtx_updatebits(npthread_mutex_t *mutex, uint32_t oupdateval, int firstfit, int fromcond)
1034{
1035 uint32_t lgenval, newval, bits;
1036 int isebit = 0;
1037 uint32_t updateval = oupdateval;
1038 pthread_mutex_t * omutex = (pthread_mutex_t *)mutex;
1039 uint32_t * lseqaddr;
1040 uint32_t * useqaddr;
1041
1042 if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
1043 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
1044 } else {
1045 lseqaddr = mutex->m_lseqaddr;
1046 useqaddr = mutex->m_useqaddr;
1047 }
1048#if _KSYN_TRACE_
1049 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_START, (uint32_t)mutex, oupdateval, firstfit, fromcond, 0);
1050#endif
1051
1052retry:
1053 lgenval = *lseqaddr;
1054 bits = updateval & PTHRW_BIT_MASK;
1055
1056 if (lgenval == updateval)
1057 goto out;
1058
1059#if _KSYN_TRACE_
1060 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 1, lgenval, updateval, 0);
1061#endif
1062 if ((lgenval & PTHRW_BIT_MASK) == bits)
1063 goto out;
1064
1065#if _KSYN_TRACE_
1066 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 2, lgenval, bits, 0);
1067#endif
1068 /* firsfit might not have EBIT */
1069 if (firstfit != 0) {
1070 lgenval &= ~PTHRW_EBIT; /* see whether EBIT is set */
1071 if ((lgenval & PTHRW_EBIT) != 0)
1072 isebit = 1;
1073 }
1074
1075 if ((lgenval & PTHRW_COUNT_MASK) == (updateval & PTHRW_COUNT_MASK)) {
1076#if _KSYN_TRACE_
1077 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 3, lgenval, updateval, 0);
1078#endif
1079 updateval |= PTHRW_EBIT; /* just in case.. */
1080 if (OSAtomicCompareAndSwap32(lgenval, updateval, (volatile int32_t *)lseqaddr) != TRUE) {
1081 if (firstfit == 0)
1082 goto retry;
1083 goto handleffit;
1084 }
1085 /* update succesfully */
1086 goto out;
1087 }
1088
1089
1090 if (((lgenval & PTHRW_WBIT) != 0) && ((updateval & PTHRW_WBIT) == 0)) {
1091 newval = lgenval | (bits | PTHRW_WBIT | PTHRW_EBIT);
1092 } else {
1093 newval = lgenval | (bits | PTHRW_EBIT);
1094 }
1095
1096#if _KSYN_TRACE_
1097 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 4, lgenval, newval, 0);
1098#endif
1099 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE) {
1100 if (firstfit == 0)
1101 goto retry;
1102 goto handleffit;
1103 }
1104out:
1105 /* succesful bits updation */
1106 mutex->m_tid = (uint64_t)((uintptr_t)pthread_self());
1107#if _KSYN_TRACE_
1108 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
1109#endif
1110 return(0);
1111
1112handleffit:
1113 /* firstfit failure */
1114 newval = *lseqaddr;
1115 if ((newval & PTHRW_EBIT) == 0)
1116 goto retry;
1117 if (((lgenval & PTHRW_COUNT_MASK) == (newval & PTHRW_COUNT_MASK)) && (isebit == 1)) {
1118 if (fromcond == 0)
1119 return(1);
1120 else {
1121 /* called from condition variable code block again */
1122ml1:
1123#if USE_COMPAGE /* [ */
1124 updateval = __psynch_mutexwait((pthread_mutex_t *)lseqaddr, newval | PTHRW_RETRYBIT, *useqaddr, (uint64_t)0,
1125 mutex->mtxopts.value);
1126#else /* USECOMPAGE ][ */
1127 updateval = __psynch_mutexwait(omutex, newval | PTHRW_RETRYBIT, *useqaddr, (uint64_t)0,
1128#endif /* USE_COMPAGE ] */
1129 if (updateval == (uint32_t)-1) {
1130 goto ml1;
1131 }
1132
1133 goto retry;
1134 }
1135 }
1136 /* seqcount changed, retry */
1137 goto retry;
1138}
1139
1140int
1141_new_pthread_mutex_lock(pthread_mutex_t *omutex)
1142{
1143 pthread_t self;
1144 npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
1145 int sig = mutex->sig;
1146 int retval;
1147 uint32_t oldval, newval, uval, updateval;
1148 int gotlock = 0;
1149 int firstfit = 0;
1150 int retrybit = 0;
1151 uint32_t * lseqaddr;
1152 uint32_t * useqaddr;
1153 int updatebitsonly = 0;
1154#if USE_COMPAGE
1155 uint64_t mytid;
1156 int sysret = 0;
1157 uint32_t mask;
1158#else
1159
1160#endif
1161
1162 /* To provide backwards compat for apps using mutex incorrectly */
1163 if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
1164 PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
1165 return(EINVAL);
1166 }
1167 if (sig != _PTHREAD_MUTEX_SIG) {
1168 LOCK(mutex->lock);
1169 if ((sig != _PTHREAD_MUTEX_SIG) && (sig == _PTHREAD_MUTEX_SIG_init)) {
1170 /* static initializer, init the mutex */
1171 _new_pthread_mutex_init(omutex, NULL);
1172 self = _PTHREAD_MUTEX_OWNER_SELF;
1173 } else {
1174 UNLOCK(mutex->lock);
1175 PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
1176 return(EINVAL);
1177 }
1178 UNLOCK(mutex->lock);
1179 }
1180
1181#if _KSYN_TRACE_
1182 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_START, (uint32_t)mutex, 0, 0, 0, 0);
1183#endif
1184 if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
1185 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
1186 } else {
1187 lseqaddr = mutex->m_lseqaddr;
1188 useqaddr = mutex->m_useqaddr;
1189 }
1190
1191 self = pthread_self();
1192 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
1193 if (mutex->m_tid == (uint64_t)((uintptr_t)self)) {
1194 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
1195 {
1196 if (mutex->mtxopts.options.lock_count < USHRT_MAX)
1197 {
1198 mutex->mtxopts.options.lock_count++;
1199 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
1200 retval = 0;
1201 } else {
1202 retval = EAGAIN;
1203 PLOCKSTAT_MUTEX_ERROR(omutex, retval);
1204 }
1205 } else { /* PTHREAD_MUTEX_ERRORCHECK */
1206 retval = EDEADLK;
1207 PLOCKSTAT_MUTEX_ERROR(omutex, retval);
1208 }
1209 return (retval);
1210 }
1211 }
1212#if _KSYN_TRACE_
1213 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 1, 0, 0, 0);
1214#endif
1215loop:
1216#if USE_COMPAGE /* [ */
1217
1218 mytid = (uint64_t)((uintptr_t)pthread_self());
1219
1220ml0:
1221 mask = PTHRW_EBIT;
1222 retval = _commpage_pthread_mutex_lock(lseqaddr, mutex->mtxopts.value, mytid, mask, &mutex->m_tid, &sysret);
1223 if (retval == 0) {
1224 gotlock = 1;
1225 } else if (retval == 1) {
1226 gotlock = 1;
1227 updateval = sysret;
1228 /* returns 0 on succesful update */
1229 if (__mtx_updatebits( mutex, updateval, firstfit, 0) == 1) {
1230 /* could not acquire, may be locked in ffit case */
1231#if USE_COMPAGE
1232 LIBC_ABORT("comapge implementatin looping in libc \n");
1233#endif
1234 goto ml0;
1235 }
1236 }
1237#if NEVERINCOMPAGE
1238 else if (retval == 3) {
1239 cthread_set_errno_self(sysret);
1240 oldval = *lseqaddr;
1241 uval = *useqaddr;
1242 newval = oldval + PTHRW_INC;
1243 gotlock = 0;
1244 /* to block in the kerenl again */
1245 }
1246#endif
1247 else {
1248 LIBC_ABORT("comapge implementatin bombed \n");
1249 }
1250
1251
1252#else /* USECOMPAGE ][ */
1253 oldval = *lseqaddr;
1254 uval = *useqaddr;
1255 newval = oldval + PTHRW_INC;
1256
1257 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 2, oldval, uval, 0);
1258
1259 if((oldval & PTHRW_EBIT) == 0) {
1260 gotlock = 1;
1261 newval |= PTHRW_EBIT;
1262 } else {
1263 gotlock = 0;
1264 newval |= PTHRW_WBIT;
1265 }
1266
1267 if (OSAtomicCompareAndSwap32(oldval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
1268 if (gotlock != 0)
1269 mutex->m_tid = (uint64_t)((uintptr_t)self);
1270#if _KSYN_TRACE_
1271 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 2, oldval, newval, 0);
1272#endif
1273 } else
1274 goto loop;
1275
1276
1277 retrybit = 0;
1278 if (gotlock == 0) {
1279#if _KSYN_TRACE_
1280 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 3, 0, 0, 0);
1281#endif
1282 firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
1283ml1:
1284 updateval = __psynch_mutexwait(omutex, newval | retrybit, uval, (uint64_t)0,
1285 mutex->mtxopts.value);
1286
1287 if (updateval == (uint32_t)-1) {
1288 updatebitsonly = 0;
1289 goto ml1;
1290 }
1291
1292#if _KSYN_TRACE_
1293 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 4, updateval, 0, 0);
1294#endif
1295 /* returns 0 on succesful update */
1296 if (__mtx_updatebits( mutex, updateval, firstfit, 0) == 1) {
1297 /* could not acquire, may be locked in ffit case */
1298 retrybit = PTHRW_RETRYBIT;
1299#if USE_COMPAGE
1300 LIBC_ABORT("comapge implementatin looping in libc \n");
1301
1302#endif
1303 goto ml1;
1304 }
1305 }
1306#endif /* USE_COMPAGE ] */
1307
1308 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
1309 mutex->mtxopts.options.lock_count++;
1310
1311#if _KSYN_TRACE_
1312 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
1313#endif
1314 return (0);
1315}
1316
1317/*
1318 * Attempt to lock a mutex, but don't block if this isn't possible.
1319 */
1320int
1321_new_pthread_mutex_trylock(pthread_mutex_t *omutex)
1322{
1323 npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
1324 int sig = mutex->sig;
1325 uint32_t oldval, newval;
1326 int error = 0;
1327 pthread_t self;
1328 uint32_t * lseqaddr;
1329 uint32_t * useqaddr;
1330
1331 /* To provide backwards compat for apps using mutex incorrectly */
1332 if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
1333 PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
1334 return(EINVAL);
1335 }
1336
1337 if (sig != _PTHREAD_MUTEX_SIG) {
1338 LOCK(mutex->lock);
1339 if ((sig != _PTHREAD_MUTEX_SIG) && (sig == _PTHREAD_MUTEX_SIG_init)) {
1340 /* static initializer, init the mutex */
1341 _new_pthread_mutex_init(omutex, NULL);
1342 self = _PTHREAD_MUTEX_OWNER_SELF;
1343 } else {
1344 UNLOCK(mutex->lock);
1345 PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
1346 return(EINVAL);
1347 }
1348 UNLOCK(mutex->lock);
1349 }
1350
1351 if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
1352 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
1353 } else {
1354 lseqaddr = mutex->m_lseqaddr;
1355 useqaddr = mutex->m_useqaddr;
1356 }
1357
1358 self = pthread_self();
1359 if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
1360 if (mutex->m_tid == (uint64_t)((uintptr_t)self)) {
1361 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
1362 {
1363 if (mutex->mtxopts.options.lock_count < USHRT_MAX)
1364 {
1365 mutex->mtxopts.options.lock_count++;
1366 PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
1367 error = 0;
1368 } else {
1369 error = EAGAIN;
1370 PLOCKSTAT_MUTEX_ERROR(omutex, error);
1371 }
1372 } else { /* PTHREAD_MUTEX_ERRORCHECK */
1373 error = EDEADLK;
1374 PLOCKSTAT_MUTEX_ERROR(omutex, error);
1375 }
1376 return (error);
1377 }
1378 }
1379retry:
1380 oldval = *lseqaddr;
1381
1382 if ((oldval & PTHRW_EBIT) != 0) {
1383 newval = oldval | PTHRW_TRYLKBIT;
1384 if (OSAtomicCompareAndSwap32(oldval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
1385 error = EBUSY;
1386 } else
1387 goto retry;
1388 } else {
1389 newval = (oldval + PTHRW_INC)| PTHRW_EBIT;
1390 if ((OSAtomicCompareAndSwap32(oldval, newval, (volatile int32_t *)lseqaddr) == TRUE)) {
1391 mutex->m_tid = (uint64_t)((uintptr_t)self);
1392 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
1393 mutex->mtxopts.options.lock_count++;
1394 } else
1395 goto retry;
1396 }
1397
1398 return(error);
1399}
1400
1401/*
1402 * Unlock a mutex.
1403 * TODO: Priority inheritance stuff
1404 */
1405int
1406_new_pthread_mutex_unlock(pthread_mutex_t *omutex)
1407{
1408 npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
1409 int retval;
1410 uint32_t mtxgen, mtxugen, flags, notify;
1411 int sig = mutex->sig;
1412 pthread_t self = pthread_self();
1413 uint32_t * lseqaddr;
1414 uint32_t * useqaddr;
1415
1416 /* To provide backwards compat for apps using mutex incorrectly */
1417
1418#if _KSYN_TRACE_
1419 (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_START, (uint32_t)mutex, 0, 0, 0, 0);
1420#endif
1421 if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
1422 PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
1423 return(EINVAL);
1424 }
1425 if (sig != _PTHREAD_MUTEX_SIG) {
1426 LOCK(mutex->lock);
1427 if ((sig != _PTHREAD_MUTEX_SIG) && (sig == _PTHREAD_MUTEX_SIG_init)) {
1428 /* static initializer, init the mutex */
1429 _new_pthread_mutex_init(omutex, NULL);
1430 self = _PTHREAD_MUTEX_OWNER_SELF;
1431 } else {
1432 UNLOCK(mutex->lock);
1433 PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
1434 return(EINVAL);
1435 }
1436 UNLOCK(mutex->lock);
1437 }
1438
1439 if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
1440 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
1441 } else {
1442 lseqaddr = mutex->m_lseqaddr;
1443 useqaddr = mutex->m_useqaddr;
1444 }
1445 notify = 0;
1446 retval = __mtx_droplock(mutex, 1, &flags, NULL, &mtxgen, &mtxugen, &notify);
1447 if (retval != 0)
1448 return(retval);
1449
1450 if ((notify & 1) != 0) {
1451#if _KSYN_TRACE_
1452 (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_NONE, (uint32_t)mutex, 1, 0, 0, 0);
1453#endif
1454#if USE_COMPAGE /* [ */
1455 if ( __psynch_mutexdrop((pthread_mutex_t *)lseqaddr, mtxgen, mtxugen, (uint64_t)0, flags)== (uint32_t)-1)
1456#else /* USECOMPAGE ][ */
1457 if ( __psynch_mutexdrop(omutex, mtxgen, mtxugen, (uint64_t)0, flags)== (uint32_t)-1)
1458#endif /* USE_COMPAGE ] */
1459 {
1460 if (errno == EINTR)
1461 return(0);
1462 else
1463 return(errno);
1464 }
1465 }
1466#if _KSYN_TRACE_
1467 (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
1468#endif
1469 return(0);
1470}
1471
1472
1473/*
1474 * Initialize a mutex variable, possibly with additional attributes.
1475 */
1476int
1477_new_pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr)
1478{
1479 npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
1480
1481 if (attr)
1482 {
1483 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG)
1484 return (EINVAL);
1485 mutex->prioceiling = attr->prioceiling;
1486 mutex->mtxopts.options.protocol = attr->protocol;
1487 mutex->mtxopts.options.policy = attr->policy;
1488 mutex->mtxopts.options.type = attr->type;
1489 mutex->mtxopts.options.pshared = attr->pshared;
1490 } else {
1491 mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
1492 mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
1493 mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
1494 mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
1495 mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
1496 }
1497
1498 mutex->mtxopts.options.lock_count = 0;
1499 /* address 8byte aligned? */
1500 if (((uintptr_t)mutex & 0x07) != 0) {
1501 /* 4byte alinged */
1502 mutex->mtxopts.options.misalign = 1;
1503#if defined(__LP64__)
1504 mutex->m_lseqaddr = &mutex->m_seq[0];
1505 mutex->m_useqaddr = &mutex->m_seq[1];
1506#else /* __LP64__ */
1507 mutex->m_lseqaddr = &mutex->m_seq[1];
1508 mutex->m_useqaddr = &mutex->m_seq[2];
1509#endif /* __LP64__ */
1510 } else {
1511 /* 8byte alinged */
1512 mutex->mtxopts.options.misalign = 0;
1513#if defined(__LP64__)
1514 mutex->m_lseqaddr = &mutex->m_seq[1];
1515 mutex->m_useqaddr = &mutex->m_seq[2];
1516#else /* __LP64__ */
1517 mutex->m_lseqaddr = &mutex->m_seq[0];
1518 mutex->m_useqaddr = &mutex->m_seq[1];
1519#endif /* __LP64__ */
1520 }
1521 mutex->m_tid = 0;
1522 mutex->m_seq[0] = 0;
1523 mutex->m_seq[1] = 0;
1524 mutex->m_seq[2] = 0;
1525 mutex->prioceiling = 0;
1526 mutex->priority = 0;
1527 mutex->sig = _PTHREAD_MUTEX_SIG;
1528 return (0);
1529}
1530
1531
1532
1533/*
1534 * Destroy a mutex variable.
1535 */
1536int
1537_new_pthread_mutex_destroy(pthread_mutex_t *omutex)
1538{
1539 int res;
1540 npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
1541
1542 LOCK(mutex->lock);
1543 res = _new_pthread_mutex_destroy_locked(omutex);
1544 UNLOCK(mutex->lock);
1545
1546 return(res);
1547}
1548
1549
1550int
1551_new_pthread_mutex_destroy_locked(pthread_mutex_t *omutex)
1552{
1553 int res;
1554 npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
1555 uint32_t lgenval;
1556 uint32_t * lseqaddr;
1557 uint32_t * useqaddr;
1558
1559
1560 if (mutex->sig == _PTHREAD_MUTEX_SIG)
1561 {
1562 if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
1563 MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
1564 } else {
1565 lseqaddr = mutex->m_lseqaddr;
1566 useqaddr = mutex->m_useqaddr;
1567 }
1568
1569 lgenval = *(lseqaddr);
1570 if ((mutex->m_tid == (uint64_t)0) &&
1571 ((lgenval & PTHRW_COUNT_MASK) == 0))
1572 {
1573 mutex->sig = _PTHREAD_NO_SIG;
1574 res = 0;
1575 }
1576 else
1577 res = EBUSY;
1578 } else
1579 res = EINVAL;
1580
1581 return (res);
1582}
1583
1584#endif /* __i386__ || __x86_64__ */
1585
1586#endif /* !BUILDING_VARIANT ] */
1587
1588/*
1589 * Destroy a mutex attribute structure.
1590 */
1591int
1592pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
1593{
1594#if __DARWIN_UNIX03
1595 if (__unix_conforming == 0)
1596 __unix_conforming = 1;
1597 if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG)
1598 return (EINVAL);
1599#endif /* __DARWIN_UNIX03 */
1600
1601 attr->sig = _PTHREAD_NO_SIG; /* Uninitialized */
1602 return (0);
1603}
1604
1605