]> git.saurik.com Git - apple/libc.git/blob - pthreads/pthread_rwlock.c
Libc-583.tar.gz
[apple/libc.git] / pthreads / pthread_rwlock.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*-
24 * Copyright (c) 1998 Alex Nash
25 * All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
48 * $FreeBSD: src/lib/libc_r/uthread/uthread_rwlock.c,v 1.6 2001/04/10 04:19:20 deischen Exp $
49 */
50
51 /*
52 * POSIX Pthread Library
53 * -- Read Write Lock support
54 * 4/24/02: A. Ramesh
55 * Ported from FreeBSD
56 */
57
58 #include "pthread_internals.h"
59 #include <stdio.h> /* For printf(). */
60
61 extern int __unix_conforming;
62
63 #ifdef PLOCKSTAT
64 #include "plockstat.h"
65 #else /* !PLOCKSTAT */
66 #define PLOCKSTAT_RW_ERROR(x, y, z)
67 #define PLOCKSTAT_RW_BLOCK(x, y)
68 #define PLOCKSTAT_RW_BLOCKED(x, y, z)
69 #define PLOCKSTAT_RW_ACQUIRE(x, y)
70 #define PLOCKSTAT_RW_RELEASE(x, y)
71 #endif /* PLOCKSTAT */
72
73 #define READ_LOCK_PLOCKSTAT 0
74 #define WRITE_LOCK_PLOCKSTAT 1
75
76 #define BLOCK_FAIL_PLOCKSTAT 0
77 #define BLOCK_SUCCESS_PLOCKSTAT 1
78
79 /* maximum number of times a read lock may be obtained */
80 #define MAX_READ_LOCKS (INT_MAX - 1)
81
82 #if defined(__i386__) || defined(__x86_64__)
83
84 #ifndef BUILDING_VARIANT /* [ */
85 int usenew_impl = 0;
86 #else /* BUILDING_VARIANT */
87 extern int usenew_impl;
88 #endif /* BUILDING_VARIANT */
89
90
91 #if defined(__LP64__)
92 #define RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr) \
93 { \
94 if (rwlock->misalign != 0) { \
95 lseqaddr = &rwlock->rw_seq[1]; \
96 wcaddr = &rwlock->rw_seq[2]; \
97 useqaddr = &rwlock->rw_seq[3]; \
98 } else { \
99 lseqaddr = &rwlock->rw_seq[0]; \
100 wcaddr = &rwlock->rw_seq[1]; \
101 useqaddr = &rwlock->rw_seq[2]; \
102 } \
103 }
104 #else /* __LP64__ */
105 #define RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr) \
106 { \
107 if (rwlock->misalign != 0) { \
108 lseqaddr = &rwlock->rw_seq[0]; \
109 wcaddr = &rwlock->rw_seq[1]; \
110 useqaddr = &rwlock->rw_seq[2]; \
111 }else { \
112 lseqaddr = &rwlock->rw_seq[1]; \
113 wcaddr = &rwlock->rw_seq[2]; \
114 useqaddr = &rwlock->rw_seq[3]; \
115 } \
116 }
117 #endif /* __LP64__ */
118
119 int _new_pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
120 int _new_pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
121 int _new_pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
122 int _new_pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
123 int _new_pthread_rwlock_longrdlock_np(pthread_rwlock_t *rwlock);
124 int _new_pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
125 int _new_pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
126 int _new_pthread_rwlock_yieldwrlock_np(pthread_rwlock_t *rwlock);
127 int _new_pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
128 int _new_pthread_rwlock_downgrade_np(pthread_rwlock_t *rwlock);
129 int _new_pthread_rwlock_upgrade_np(pthread_rwlock_t *rwlock);
130
131 #define _KSYN_TRACE_ 0
132
133 #if _KSYN_TRACE_
134 /* The Function qualifiers */
135 #define DBG_FUNC_START 1
136 #define DBG_FUNC_END 2
137 #define DBG_FUNC_NONE 0
138
139 int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
140
141 #define _KSYN_TRACE_RW_RDLOCK 0x9000080
142 #define _KSYN_TRACE_RW_WRLOCK 0x9000084
143 #define _KSYN_TRACE_RW_UNLOCK 0x9000088
144 #define _KSYN_TRACE_RW_UNACT1 0x900808c
145 #define _KSYN_TRACE_RW_UNACT2 0x9008090
146 #define _KSYN_TRACE_RW_UNACTK 0x9008094
147 #define _KSYN_TRACE_RW_UNACTE 0x9008098
148 #endif /* _KSYN_TRACE_ */
149 #endif /* __i386__ || __x86_64__ */
150
151 #ifndef BUILDING_VARIANT /* [ */
152
153 #if defined(__i386__) || defined(__x86_64__)
154 static int rwlock_unlock_action_onread(pthread_rwlock_t * rwlock, uint32_t updateval);
155 static int rwlock_unlock_action1(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t updateval);
156 static int rwlock_unlock_action2(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t updateval);
157 static uint32_t modbits(uint32_t lgenval, uint32_t updateval);
158 static int rwlock_unlock_action_k(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t updateval);
159 static int rwlock_exclusive_lockreturn(pthread_rwlock_t * rwlock, uint32_t updateval);
160 static int rw_diffgenseq(uint32_t x, uint32_t y);
161 #endif /* __i386__ || __x86_64__ */
162
163
164 int
165 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
166 {
167 attr->sig = _PTHREAD_RWLOCK_ATTR_SIG;
168 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
169 return (0);
170 }
171
172 int
173 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
174 {
175 attr->sig = _PTHREAD_NO_SIG; /* Uninitialized */
176 attr->pshared = 0;
177 return (0);
178 }
179
180 int
181 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr,
182 int *pshared)
183 {
184 if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG)
185 {
186 *pshared = (int)attr->pshared;
187 return (0);
188 } else
189 {
190 return (EINVAL); /* Not an initialized 'attribute' structure */
191 }
192 }
193
194
195 int
196 pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr, int pshared)
197 {
198 if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG)
199 {
200 #if __DARWIN_UNIX03
201 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
202 #else /* __DARWIN_UNIX03 */
203 if ( pshared == PTHREAD_PROCESS_PRIVATE)
204 #endif /* __DARWIN_UNIX03 */
205 {
206 attr->pshared = pshared ;
207 return (0);
208 } else
209 {
210 return (EINVAL); /* Invalid parameter */
211 }
212 } else
213 {
214 return (EINVAL); /* Not an initialized 'attribute' structure */
215 }
216
217 }
218
219 #if defined(__i386__) || defined(__x86_64__) /* [ */
220 int
221 _new_pthread_rwlock_destroy(pthread_rwlock_t *orwlock)
222 {
223 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
224 #if __DARWIN_UNIX03
225 uint32_t rw_lseqcnt, rw_useqcnt;
226 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
227 #endif /* __DARWIN_UNIX03 */
228
229 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
230 return(EINVAL);
231 } else {
232 #if __DARWIN_UNIX03
233 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
234 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
235 } else {
236 lseqaddr = rwlock->rw_lseqaddr;
237 useqaddr = rwlock->rw_useqaddr;
238 wcaddr = rwlock->rw_wcaddr;
239 }
240
241 rw_lseqcnt = *lseqaddr;
242 rw_useqcnt = *useqaddr;
243
244 if((rw_lseqcnt & PTHRW_COUNT_MASK) != rw_useqcnt)
245 return(EBUSY);
246
247 #endif /* __DARWIN_UNIX03 */
248 //bzero(rwlock, sizeof(npthread_rwlock_t));
249 rwlock->sig = _PTHREAD_NO_SIG;
250 return(0);
251 }
252 }
253
254
255 int
256 _new_pthread_rwlock_init(pthread_rwlock_t * orwlock, const pthread_rwlockattr_t *attr)
257 {
258 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
259 #if __DARWIN_UNIX03
260 uint32_t rw_lseqcnt, rw_useqcnt;
261 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
262 #endif /* __DARWIN_UNIX03 */
263
264 #if __DARWIN_UNIX03
265 if (attr && (attr->sig != _PTHREAD_RWLOCK_ATTR_SIG)) {
266 return(EINVAL);
267 }
268
269 /* if already inited check whether it is in use, then return EBUSY */
270 if (rwlock->sig == _PTHREAD_RWLOCK_SIG) {
271 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
272 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
273 } else {
274 lseqaddr = rwlock->rw_lseqaddr;
275 useqaddr = rwlock->rw_useqaddr;
276 wcaddr = rwlock->rw_wcaddr;
277 }
278 rw_lseqcnt = *lseqaddr;
279 rw_useqcnt = *useqaddr;
280
281 if ((rw_lseqcnt & PTHRW_COUNT_MASK) != rw_useqcnt)
282 return(EBUSY);
283
284 }
285 #endif /* __DARWIN_UNIX03 */
286
287 /* initialize the lock */
288 bzero(rwlock, sizeof(pthread_rwlock_t));
289
290 if ((attr != NULL) && (attr->pshared == PTHREAD_PROCESS_SHARED)) {
291 rwlock->pshared = PTHREAD_PROCESS_SHARED;
292 rwlock->rw_flags = PTHRW_KERN_PROCESS_SHARED;
293
294 } else {
295 rwlock->pshared = _PTHREAD_DEFAULT_PSHARED;
296 rwlock->rw_flags = PTHRW_KERN_PROCESS_PRIVATE;
297 }
298
299 if (((uintptr_t)rwlock & 0x07) != 0) {
300 rwlock->misalign = 1;
301 #if defined(__LP64__)
302 rwlock->rw_lseqaddr = &rwlock->rw_seq[1];
303 rwlock->rw_wcaddr = &rwlock->rw_seq[2];
304 rwlock->rw_useqaddr = &rwlock->rw_seq[3];
305 rwlock->rw_seq[1]= PTHRW_RW_INIT;
306 #else /* __LP64__ */
307 rwlock->rw_lseqaddr = &rwlock->rw_seq[0];
308 rwlock->rw_wcaddr = &rwlock->rw_seq[1];
309 rwlock->rw_useqaddr = &rwlock->rw_seq[2];
310 rwlock->rw_seq[0]= PTHRW_RW_INIT;
311 #endif /* __LP64__ */
312
313 } else {
314 rwlock->misalign = 0;
315 #if defined(__LP64__)
316 rwlock->rw_lseqaddr = &rwlock->rw_seq[0];
317 rwlock->rw_wcaddr = &rwlock->rw_seq[1];
318 rwlock->rw_useqaddr = &rwlock->rw_seq[2];
319 rwlock->rw_seq[0]= PTHRW_RW_INIT;
320 #else /* __LP64__ */
321 rwlock->rw_lseqaddr = &rwlock->rw_seq[1];
322 rwlock->rw_wcaddr = &rwlock->rw_seq[2];
323 rwlock->rw_useqaddr = &rwlock->rw_seq[3];
324 rwlock->rw_seq[1]= PTHRW_RW_INIT;
325 #endif /* __LP64__ */
326
327 }
328 rwlock->sig = _PTHREAD_RWLOCK_SIG;
329
330 return(0);
331 }
332
333 int
334 _new_pthread_rwlock_rdlock(pthread_rwlock_t * orwlock)
335 {
336 #if __DARWIN_UNIX03
337 pthread_t self;
338 #endif /* __DARWIN_UNIX03 */
339 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
340 uint32_t lgenval, ugenval, rw_wc, newval, updateval;
341 int error = 0, ret;
342 uint64_t oldval64, newval64;
343 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
344
345 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
346 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
347 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
348 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
349 return(error);
350 }
351 } else {
352 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
353 return(EINVAL);
354 }
355 }
356
357 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
358 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
359 } else {
360 lseqaddr = rwlock->rw_lseqaddr;
361 useqaddr = rwlock->rw_useqaddr;
362 wcaddr = rwlock->rw_wcaddr;
363 }
364 loop:
365 lgenval = *lseqaddr;
366 ugenval = *useqaddr;
367 rw_wc = *wcaddr;
368 #if _KSYN_TRACE_
369 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgenval, newval, rw_wc, 0);
370 #endif
371
372 if (is_rw_lbit_set(lgenval))
373 goto gotlock;
374 if(is_rw_ewubit_clear(lgenval))
375 goto gotlock;
376
377 #if __DARWIN_UNIX03
378 if (is_rw_ebit_set(lgenval)) {
379 self = pthread_self();
380 if(rwlock->rw_owner == self) {
381 error = EDEADLK;
382 goto out;
383 }
384 }
385 #endif /* __DARWIN_UNIX03 */
386
387 /* mean Lbit is set and R bit not set; block in kernel */
388 newval = (lgenval + PTHRW_INC);
389
390 oldval64 = (((uint64_t)rw_wc) << 32);
391 oldval64 |= lgenval;
392
393 newval64 = (((uint64_t)(rw_wc + 1)) << 32);
394 newval64 |= newval;
395
396 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
397 goto loop;
398
399 /* give writers priority over readers */
400 PLOCKSTAT_RW_BLOCK(orwlock, READ_LOCK_PLOCKSTAT);
401
402 #if _KSYN_TRACE_
403 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, lgenval, newval, rw_wc+1, 0);
404 #endif
405
406 retry:
407 updateval = __psynch_rw_rdlock(orwlock, (newval & ~PTHRW_RW_INIT), ugenval, rw_wc, rwlock->rw_flags);
408
409 if (updateval == (uint32_t)-1) {
410 error = errno;
411 } else
412 error = 0;
413
414 if (error == EINTR)
415 goto retry;
416
417 OSAtomicDecrement32((volatile int32_t *)wcaddr);
418
419
420
421 if (error == 0) {
422 if ((updateval & PTHRW_RW_HUNLOCK) != 0) {
423 ret = rwlock_unlock_action_onread(orwlock, (updateval & ~PTHRW_RW_HUNLOCK));
424 if (ret != 0) {
425 LIBC_ABORT("rdlock_unlock handling failed");
426 }
427 }
428 PLOCKSTAT_RW_BLOCKED(orwlock, READ_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
429 PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
430 return(0);
431 } else {
432 PLOCKSTAT_RW_BLOCKED(orwlock, READ_LOCK_PLOCKSTAT, BLOCK_FAIL_PLOCKSTAT);
433 goto out;
434 }
435 /* Not reached */
436
437 gotlock:
438 /* check for max readers */
439 ugenval = *useqaddr;
440 if (rw_diffgenseq(lgenval, ugenval) >= PTHRW_MAX_READERS) {
441 error = EAGAIN;
442 goto out;
443 }
444
445 newval = (lgenval + PTHRW_INC);
446
447 #if _KSYN_TRACE_
448 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lgenval, newval, 0);
449 #endif
450
451 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
452 PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
453 #if _KSYN_TRACE_
454 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, 0, 0, 0);
455 #endif
456 return(0);
457 } else
458 goto loop;
459 out:
460 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
461 #if _KSYN_TRACE_
462 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
463 #endif
464 return(error);
465 }
466
467
468 int
469 _new_pthread_rwlock_tryrdlock(pthread_rwlock_t * orwlock)
470 {
471 uint32_t lgenval, newval, ugenval;
472 int error = 0;
473 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
474 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
475
476 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
477 /* check for static initialization */
478 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
479 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
480 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
481 return(error);
482 }
483 } else {
484 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
485 return(EINVAL);
486 }
487 }
488
489 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
490 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
491 } else {
492 lseqaddr = rwlock->rw_lseqaddr;
493 useqaddr = rwlock->rw_useqaddr;
494 wcaddr = rwlock->rw_wcaddr;
495 }
496
497 loop:
498 lgenval = *lseqaddr;
499 if (is_rw_lbit_set(lgenval))
500 goto gotlock;
501 if (is_rw_ewubit_clear(lgenval))
502 goto gotlock;
503
504
505 error = EBUSY;
506 goto out;
507
508 gotlock:
509 ugenval = *useqaddr;
510 if (rw_diffgenseq(lgenval, ugenval) >= PTHRW_MAX_READERS) {
511 error = EAGAIN;
512 goto out;
513 }
514
515 newval = (lgenval + PTHRW_INC);
516 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
517 PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
518 return(0);
519 } else
520 goto loop;
521 out:
522 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
523 return(error);
524 }
525
526 #ifdef NOTYET
527 /*****************************************************************************/
528 /* TBD need to add towards MAX_READERS */
529 int
530 _new_pthread_rwlock_longrdlock_np(pthread_rwlock_t * orwlock)
531 {
532 pthread_t self;
533 uint32_t lgenval, ugenval, rw_wc, newval, updateval;
534 int error = 0, ret;
535 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
536 uint64_t oldval64, newval64;
537 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
538
539 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
540 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
541 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
542 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
543 return(error);
544 }
545 } else {
546 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
547 return(EINVAL);
548 }
549 }
550
551 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
552 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
553 } else {
554 lseqaddr = rwlock->rw_lseqaddr;
555 useqaddr = rwlock->rw_useqaddr;
556 wcaddr = rwlock->rw_wcaddr;
557 }
558
559 loop:
560
561 lgenval = *lseqaddr;
562 ugenval = *useqaddr;
563 rw_wc = *wcaddr;
564
565 if (is_rw_ewuybit_clear(lgenval))
566 goto gotlock;
567
568 /* if w bit is set ensure there is no deadlock */
569 if (is_rw_ebit_set(lgenval)) {
570 self = pthread_self();
571 if(rwlock->rw_owner == self) {
572 error = EDEADLK;
573 goto out;
574 }
575 }
576
577 newval = (lgenval + PTHRW_INC);
578 /* update lock seq and block in kernel */
579
580 oldval64 = (((uint64_t)rw_wc) << 32);
581 oldval64 |= lgenval;
582
583 newval64 = (((uint64_t)(rw_wc + 1)) << 32);
584 newval64 |= newval;
585
586 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
587 goto loop;
588 kblock:
589 updateval = __psynch_rw_longrdlock(orwlock, newval, ugenval, (rw_wc+1), rwlock->rw_flags);
590 if (updateval == (uint32_t)-1) {
591 error = errno;
592 } else
593 error = 0;
594
595 if (error == EINTR)
596 goto kblock;
597
598 OSAtomicDecrement32((volatile int32_t *)wcaddr);
599 if (error == 0) {
600
601 if ((updateval & PTHRW_RW_HUNLOCK) != 0) {
602 ret = rwlock_unlock_action_onread(orwlock, (updateval & ~PTHRW_RW_HUNLOCK));
603 if (ret != 0) {
604 LIBC_ABORT("rdlock_unlock handling failed");
605 }
606 }
607
608 error = FALSE;
609 while (error == FALSE) {
610 lgenval = *lseqaddr;
611 newval = lgenval | PTHRW_LBIT;
612 error = OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr);
613 }
614
615 goto successout;
616 } else
617 goto out;
618 goto successout;
619
620 gotlock:
621 newval = ((lgenval + PTHRW_INC)| PTHRW_LBIT);
622 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE)
623 goto loop;
624
625 successout:
626 PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
627 return(0);
628 out:
629 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
630 return(error);
631 }
632 /**************************************************************/
633 #endif /* NOTYET */
634
635 int
636 _new_pthread_rwlock_trywrlock(pthread_rwlock_t * orwlock)
637 {
638 int error = 0;
639 uint32_t lgenval, newval;
640 #if __DARWIN_UNIX03
641 pthread_t self = pthread_self();
642 #endif /* __DARWIN_UNIX03 */
643 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
644 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
645
646 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
647 /* check for static initialization */
648 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
649 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
650 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
651 return(error);
652 }
653 } else {
654 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
655 return(EINVAL);
656 }
657 }
658
659 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
660 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
661 } else {
662 lseqaddr = rwlock->rw_lseqaddr;
663 useqaddr = rwlock->rw_useqaddr;
664 wcaddr = rwlock->rw_wcaddr;
665 }
666
667 lgenval = PTHRW_RW_INIT;
668 newval = PTHRW_RW_INIT | PTHRW_INC | PTHRW_EBIT;
669 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
670 #if __DARWIN_UNIX03
671 rwlock->rw_owner = self;
672 #endif /* __DARWIN_UNIX03 */
673 PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
674 return(0);
675 }
676 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EBUSY);
677 return(EBUSY);
678 }
679
680 int
681 _new_pthread_rwlock_wrlock(pthread_rwlock_t * orwlock)
682 {
683 uint32_t lgenval, newval, ugenval, updateval, rw_wc;
684 int error = 0;
685 #if __DARWIN_UNIX03
686 pthread_t self = pthread_self();
687 #endif /* __DARWIN_UNIX03 */
688 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
689 uint64_t oldval64, newval64;
690 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
691
692 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
693 /* check for static initialization */
694 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
695 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
696 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
697 return(error);
698 }
699 } else {
700 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
701 return(EINVAL);
702 }
703 }
704
705
706 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
707 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
708 } else {
709 lseqaddr = rwlock->rw_lseqaddr;
710 useqaddr = rwlock->rw_useqaddr;
711 wcaddr = rwlock->rw_wcaddr;
712 }
713
714 #if _KSYN_TRACE_
715 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
716 #endif
717 loop:
718 lgenval = *lseqaddr;
719 ugenval = *useqaddr;
720 rw_wc = *wcaddr;
721
722 #if _KSYN_TRACE_
723 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, lgenval, ugenval, rw_wc, 0);
724 #endif
725 #if __DARWIN_UNIX03
726 if (is_rw_ebit_set(lgenval)) {
727 if(rwlock->rw_owner == self) {
728 error = EDEADLK;
729 goto out;
730 }
731 }
732 #endif /* __DARWIN_UNIX03 */
733
734 if (lgenval == PTHRW_RW_INIT) {
735 newval = ( PTHRW_RW_INIT | PTHRW_INC | PTHRW_EBIT);
736 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
737 goto gotit;
738 }
739 }
740
741 newval = (lgenval + PTHRW_INC) | PTHRW_WBIT | PTHRW_SHADOW_W;
742
743 /* update lock seq and block in kernel */
744 oldval64 = (((uint64_t)rw_wc) << 32);
745 oldval64 |= lgenval;
746
747 newval64 = (((uint64_t)(rw_wc + 1)) << 32);
748 newval64 |= newval;
749
750 #if _KSYN_TRACE_
751 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lgenval, newval, 0);
752 #endif
753 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
754 goto loop;
755
756 retry:
757 PLOCKSTAT_RW_BLOCK(orwlock, WRITE_LOCK_PLOCKSTAT);
758 retry1:
759 updateval = __psynch_rw_wrlock(orwlock, newval, ugenval, (rw_wc+1), rwlock->rw_flags);
760 if (updateval == (uint32_t)-1) {
761 error = errno;
762 } else
763 error = 0;
764
765 if (error == EINTR) {
766 goto retry1;
767 }
768
769 #if _KSYN_TRACE_
770 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x33333333, newval, updateval, 0);
771 #endif
772 PLOCKSTAT_RW_BLOCKED(orwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
773 if (error != 0) {
774 OSAtomicDecrement32((volatile int32_t *)wcaddr);
775 goto out;
776 }
777
778 if (is_rw_ebit_clear(updateval)) {
779 /* kernel cannot wakeup without granting E bit */
780 abort();
781 }
782
783 error = rwlock_exclusive_lockreturn(orwlock, updateval);
784 if (error == EAGAIN)
785 goto retry;
786
787 OSAtomicDecrement32((volatile int32_t *)wcaddr);
788 if (error == 0) {
789 gotit:
790 #if __DARWIN_UNIX03
791 rwlock->rw_owner = self;
792 #endif /* __DARWIN_UNIX03 */
793 PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
794 #if _KSYN_TRACE_
795 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
796 #endif
797 return(0);
798 }
799 out:
800 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
801 #if _KSYN_TRACE_
802 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
803 #endif
804 return(error);
805 }
806
807
808 #ifdef NOTYET
809 /*****************************************************************************/
810 int
811 _new_pthread_rwlock_yieldwrlock_np(pthread_rwlock_t * orwlock)
812 {
813 uint32_t lgenval, newval, ugenval, updateval, rw_wc;
814 int error = 0;
815 #if __DARWIN_UNIX03
816 pthread_t self = pthread_self();
817 #endif /* __DARWIN_UNIX03 */
818 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
819 uint64_t oldval64, newval64;
820 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
821
822 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
823 /* check for static initialization */
824 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
825 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
826 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
827 return(error);
828 }
829 } else {
830 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
831 return(EINVAL);
832 }
833 }
834
835
836 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
837 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
838 } else {
839 lseqaddr = rwlock->rw_lseqaddr;
840 useqaddr = rwlock->rw_useqaddr;
841 wcaddr = rwlock->rw_wcaddr;
842 }
843
844 lgenval = *lseqaddr;
845 ugenval = *useqaddr;
846 rw_wc = *wcaddr;
847
848 #if __DARWIN_UNIX03
849 if (is_rw_ebit_set(lgenval)) {
850 if (rwlock->rw_owner == self) {
851 error = EDEADLK;
852 goto out;
853 }
854 }
855 #endif /* __DARWIN_UNIX03 */
856
857 if (lgenval == PTHRW_RW_INIT) {
858 newval = PTHRW_RW_INIT | PTHRW_INC | PTHRW_EBIT;
859 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
860 goto gotit;
861 }
862 }
863
864 newval = (lgenval + PTHRW_INC);
865 if ((lgenval & PTHRW_WBIT) == 0)
866 newval |= PTHRW_YBIT;
867
868 oldval64 = (((uint64_t)rw_wc) << 32);
869 oldval64 |= lgenval;
870
871 newval64 = (((uint64_t)(rw_wc + 1)) << 32);
872 newval64 |= newval;
873
874 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
875 PLOCKSTAT_RW_BLOCK(orwlock, WRITE_LOCK_PLOCKSTAT);
876 retry:
877 updateval = __psynch_rw_yieldwrlock(orwlock, newval, ugenval, (rw_wc+1), rwlock->rw_flags);
878 if (updateval == (uint32_t)-1) {
879 error = errno;
880 } else
881 error = 0;
882
883 if (error == EINTR)
884 goto retry;
885
886
887 PLOCKSTAT_RW_BLOCKED(orwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
888 if (error != 0) {
889 OSAtomicDecrement32((volatile int32_t *)wcaddr);
890 goto out;
891 }
892
893 if (is_rw_ebit_clear(updateval)) {
894 /* kernel cannot wakeup without granting E bit */
895 abort();
896 }
897
898 error = rwlock_exclusive_lockreturn(orwlock, updateval);
899 if (error == EAGAIN)
900 goto retry;
901
902 OSAtomicDecrement32((volatile int32_t *)wcaddr);
903 if (error == 0) {
904 gotit:
905 #if __DARWIN_UNIX03
906 rwlock->rw_owner = self;
907 #endif /* __DARWIN_UNIX03 */
908 PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
909 return(0);
910 } else {
911 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
912 }
913 return(error);
914 out:
915 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
916 return(error);
917 }
918 /**************************************************************/
919 #endif /* NOTYET */
920
921 int
922 _new_pthread_rwlock_unlock(pthread_rwlock_t * orwlock)
923 {
924 uint32_t lgenval, ugenval, rw_wc, newval, nlval, ulval;
925 int error = 0;
926 int wrlock = 0, kern_trans;
927 uint32_t updateval, bits, newbits;
928 uint32_t isupgrade = 0;
929 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
930 int retry_count = 0, retry_count1 = 0;
931 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
932 pthread_t self = NULL;
933 uint64_t threadid = 0;
934 int ubitchanged = 0, initbitset = 0, num;
935
936 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
937 /* check for static initialization */
938 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
939 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
940 PLOCKSTAT_RW_ERROR(orwlock, wrlock, error);
941 return(error);
942 }
943 } else {
944 PLOCKSTAT_RW_ERROR(orwlock, wrlock, EINVAL);
945 return(EINVAL);
946 }
947 }
948
949 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
950 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
951 } else {
952 lseqaddr = rwlock->rw_lseqaddr;
953 useqaddr = rwlock->rw_useqaddr;
954 wcaddr = rwlock->rw_wcaddr;
955 }
956
957 #if _KSYN_TRACE_
958 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
959 #endif
960 loop:
961 lgenval = *lseqaddr;
962 ugenval = *useqaddr;
963 rw_wc = *wcaddr;
964
965
966 loop1:
967 if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
968 retry_count++;
969 sched_yield();
970 if (retry_count < 1024)
971 goto loop;
972 error = EINVAL;
973 goto out;
974 }
975 retry_count = 0;
976
977 #if _KSYN_TRACE_
978 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lgenval, ugenval, 0);
979 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, rw_wc, 0, 0);
980 #endif
981 if (is_rw_ebit_set(lgenval)) {
982 wrlock = 1;
983 #if __DARWIN_UNIX03
984 rwlock->rw_owner = (pthread_t)0;
985 #endif /* __DARWIN_UNIX03 */
986 }
987
988 /* last unlock ? */
989 if((lgenval & PTHRW_COUNT_MASK) == (ugenval + PTHRW_INC)) {
990 if (OSAtomicCompareAndSwap32(ugenval, 0, (volatile int32_t *)useqaddr) != TRUE) {
991 goto loop;
992 }
993 if (OSAtomicCompareAndSwap32(lgenval, PTHRW_RW_INIT, (volatile int32_t *)lseqaddr) != TRUE) {
994 if (OSAtomicCompareAndSwap32(0, ugenval, (volatile int32_t *)useqaddr) != TRUE) {
995 lp1:
996 ulval = *useqaddr;
997 nlval = ugenval+ulval;
998 if (OSAtomicCompareAndSwap32(ulval, nlval, (volatile int32_t *)useqaddr) != TRUE)
999 goto lp1;
1000 }
1001
1002 goto loop;
1003 }
1004
1005 goto succout;
1006 }
1007
1008 /* do we need kernel trans? */
1009
1010 lp11:
1011 nlval = lgenval & PTHRW_COUNT_MASK;
1012 if (ubitchanged == 0)
1013 ulval = (ugenval + PTHRW_INC) & PTHRW_COUNT_MASK;
1014 else
1015 ulval = ugenval & PTHRW_COUNT_MASK;
1016
1017 num = rw_diffgenseq(nlval, ulval);
1018 kern_trans = ( num == (rw_wc << PTHRW_COUNT_SHIFT));
1019 /* if three more waiters than needed for kernel tras*/
1020 if ((ubitchanged ==0) && (kern_trans == 0) && (num < (rw_wc << PTHRW_COUNT_SHIFT))) {
1021 retry_count1++;
1022 sched_yield();
1023 if (retry_count1 < 1024)
1024 goto loop;
1025 }
1026 retry_count1 = 0;
1027
1028 if (ubitchanged == 0) {
1029 if (OSAtomicCompareAndSwap32(ugenval, ugenval+PTHRW_INC, (volatile int32_t *)useqaddr) != TRUE)
1030 goto loop;
1031 ubitchanged = 1;
1032 }
1033
1034
1035 if (kern_trans == 0) {
1036 goto succout;
1037 }
1038
1039 #if _KSYN_TRACE_
1040 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 1, ugenval+PTHRW_INC, 0);
1041 #endif
1042 initbitset = 0;
1043 bits = lgenval & PTHRW_BIT_MASK;
1044 newbits = bits;
1045 /* if this is first unlock to kernel, notify kernel of init status */
1046 if ((bits & PTHRW_RW_INIT) != 0) {
1047 /* reset the initbit if present */
1048 newbits &= ~PTHRW_RW_INIT;
1049 initbitset = PTHRW_RW_INIT;
1050 }
1051 if (((bits & PTHRW_EBIT) != 0) && ((bits & PTHRW_WBIT) == 0)) {
1052 /* reset E bit is no U bit is set */
1053 newbits &= ~PTHRW_EBIT;
1054 }
1055 /* clear shadow bit, as W is going to be sent to kernel */
1056 if ((bits & PTHRW_WBIT) != 0) {
1057 newbits &= ~PTHRW_SHADOW_W;
1058 }
1059
1060 /* reset L bit */
1061 if (bits & PTHRW_LBIT)
1062 newbits &= ~PTHRW_LBIT;
1063 if (bits & PTHRW_UBIT) {
1064 /* reset U and set E bit */
1065 newbits &= ~PTHRW_LBIT;
1066 newbits |= PTHRW_EBIT;
1067 isupgrade = PTHRW_UBIT;
1068 }
1069
1070 /* updates bits on the L */
1071 newval = (lgenval & PTHRW_COUNT_MASK) | newbits;
1072 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE) {
1073 /* reread the value */
1074 lgenval = *lseqaddr;
1075 ugenval = *useqaddr;
1076 rw_wc = *wcaddr;
1077 /* since lgen changed check for trans again */
1078 goto lp11;
1079 }
1080
1081 #if _KSYN_TRACE_
1082 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 2, newval, 0);
1083 #endif
1084
1085 /* send upgrade bit to kernel */
1086 newval |= (isupgrade | initbitset);
1087 updateval = __psynch_rw_unlock(orwlock, newval, ugenval+PTHRW_INC, rw_wc, rwlock->rw_flags);
1088 if (updateval == (uint32_t)-1) {
1089 error = errno;
1090 } else
1091 error = 0;
1092
1093 if(error != 0) {
1094 /* not sure what is the scenario */
1095 if(error != EINTR)
1096 goto out;
1097 }
1098
1099 /*
1100 * If the unlock is spurious return. Also if the
1101 * exclusive lock is being granted, let that thread
1102 * manage the status bits, otherwise stale bits exclusive
1103 * bit can be set, if that thread had already unlocked.
1104 */
1105 if ((updateval & (PTHRW_RW_SPURIOUS | PTHRW_EBIT)) != 0) {
1106 goto succout;
1107 }
1108
1109 lp2:
1110 lgenval = *lseqaddr;
1111
1112
1113 #if _KSYN_TRACE_
1114 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 3, lgenval, 0);
1115 #endif
1116 /* if the kernel antcipated seq and one on the lock are same, set the one from kernel */
1117 if ((lgenval & PTHRW_COUNT_MASK) == (updateval & PTHRW_COUNT_MASK)) {
1118 if (OSAtomicCompareAndSwap32(lgenval, updateval, (volatile int32_t *)lseqaddr) != TRUE)
1119 goto lp2;
1120 goto succout;
1121 }
1122
1123 /* state bits are same? */
1124 if ((lgenval & PTHRW_BIT_MASK) == (updateval & PTHRW_BIT_MASK)) {
1125 /* nothing to do */
1126 goto succout;
1127 }
1128
1129 newval = ((lgenval & PTHRW_UN_BIT_MASK) << PTHRW_COUNT_SHIFT) | (updateval & PTHRW_BIT_MASK);
1130
1131 #if _KSYN_TRACE_
1132 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 4, newval, 0);
1133 #endif
1134 /* high bits are state on the lock; lowbits are one kernel need to set */
1135 switch (newval) {
1136 /* W States */
1137 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT)) : {
1138 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1139 }
1140 break;
1141 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT)) : {
1142 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1143 }
1144 break;
1145 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
1146 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1147 }
1148 break;
1149 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
1150 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1151 }
1152 break;
1153 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
1154 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1155 }
1156 break;
1157 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
1158 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1159 }
1160 break;
1161 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
1162 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1163 }
1164 break;
1165 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
1166 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1167 }
1168 break;
1169 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
1170 error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
1171 //goto ktrans;
1172 }
1173 break;
1174
1175
1176 /* L states */
1177 case ((PTHRW_LBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
1178 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1179 }
1180 break;
1181
1182 /* Y states */
1183 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT)) : {
1184 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1185 }
1186 break;
1187 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
1188 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1189 }
1190 break;
1191 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
1192 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1193 }
1194 break;
1195 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
1196 error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
1197 //goto ktrans;
1198 }
1199 break;
1200 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
1201 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1202 }
1203 break;
1204 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
1205 error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
1206 //goto ktrans;
1207 }
1208 break;
1209
1210 /* YU states */
1211 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT)) : {
1212 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1213 }
1214 break;
1215 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
1216 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1217 }
1218 break;
1219 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
1220 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1221 }
1222 break;
1223 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
1224 error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
1225 //goto ktrans;
1226 }
1227 break;
1228 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
1229 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1230 }
1231 break;
1232 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
1233 error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
1234 //goto ktrans;
1235 }
1236 break;
1237
1238 /* E states */
1239 case ((PTHRW_EBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
1240 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1241 }
1242 break;
1243
1244 /* WE states */
1245 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT)) : {
1246 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1247 }
1248 break;
1249 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
1250 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1251 }
1252 break;
1253 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
1254 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1255 }
1256 break;
1257 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
1258 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1259 }
1260 break;
1261 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
1262 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1263 }
1264 break;
1265 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
1266 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1267 }
1268 break;
1269 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
1270 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1271 }
1272 break;
1273
1274 /* WL states */
1275 case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
1276 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1277 }
1278 break;
1279 case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
1280 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1281 }
1282 break;
1283 case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
1284 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1285 }
1286 break;
1287
1288 default:
1289 /* illegal states */
1290 self = pthread_self();
1291 threadid = self->thread_id;
1292 #if _KSYN_TRACE_
1293 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 6, lgenval, 0);
1294 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 7, updateval, 0);
1295 #endif
1296 LIBC_ABORT("incorect state on return 0x%x: lgenval 0x%x, updateval 0x%x; threadid (0x%x)\n", newval, lgenval, updateval, (uint32_t)threadid);
1297
1298 };
1299
1300 if (error != 0)
1301 goto lp2;
1302 succout:
1303 PLOCKSTAT_RW_RELEASE(orwlock, wrlock);
1304 #if _KSYN_TRACE_
1305 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
1306 #endif
1307 return(0);
1308 out:
1309 PLOCKSTAT_RW_ERROR(orwlock, wrlock, error);
1310 #if _KSYN_TRACE_
1311 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
1312 #endif
1313 return(error);
1314 }
1315
1316 #ifdef NOTYET
1317 /*****************************************************************************/
1318 int
1319 _new_pthread_rwlock_downgrade_np(pthread_rwlock_t * orwlock)
1320 {
1321 uint32_t lgenval, newval, ugenval, rw_wc;
1322 int error = 0;
1323 pthread_t self = pthread_self();
1324 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1325 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
1326
1327
1328 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1329 /* check for static initialization */
1330 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1331 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
1332 return(error);
1333 }
1334 } else {
1335 return(EINVAL);
1336 }
1337 }
1338 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1339 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
1340 } else {
1341 lseqaddr = rwlock->rw_lseqaddr;
1342 useqaddr = rwlock->rw_useqaddr;
1343 wcaddr = rwlock->rw_wcaddr;
1344 }
1345
1346 loop:
1347 lgenval = *lseqaddr;
1348 ugenval = *useqaddr;
1349 rw_wc = *wcaddr;
1350
1351 if ((is_rw_ebit_set(lgenval )) && (rwlock->rw_owner != self)) {
1352 return(EINVAL);
1353 }
1354
1355 if ((lgenval & PTHRW_COUNT_MASK) != ugenval) {
1356
1357 newval = lgenval & ~PTHRW_EBIT;
1358
1359 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
1360 #if __DARWIN_UNIX03
1361 rwlock->rw_owner = 0;
1362 #endif /* __DARWIN_UNIX03 */
1363 if (rw_wc != 0) {
1364 error = __psynch_rw_downgrade(orwlock, newval, ugenval, rw_wc, rwlock->rw_flags);
1365
1366 }
1367 return(0);
1368 } else {
1369 goto loop;
1370 }
1371 }
1372 return(EINVAL);
1373 }
1374
1375
1376 int
1377 _new_pthread_rwlock_upgrade_np(pthread_rwlock_t * orwlock)
1378 {
1379 uint32_t lgenval, newval, ugenval, ulval, updateval, rw_wc;
1380 int error = 0, kern_trans;
1381 pthread_t self = pthread_self();
1382 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1383 uint64_t oldval64, newval64;
1384 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
1385
1386 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1387 /* check for static initialization */
1388 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1389 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
1390 return(error);
1391 }
1392 } else {
1393 return(EINVAL);
1394 }
1395 }
1396 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1397 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
1398 } else {
1399 lseqaddr = rwlock->rw_lseqaddr;
1400 useqaddr = rwlock->rw_useqaddr;
1401 wcaddr = rwlock->rw_wcaddr;
1402 }
1403 loop:
1404 lgenval = *lseqaddr;
1405 ugenval = *useqaddr;
1406 rw_wc = *wcaddr;
1407
1408 if (is_rw_uebit_set(lgenval)) {
1409 return(EINVAL);
1410
1411 }
1412
1413 if ((lgenval & PTHRW_COUNT_MASK) == ugenval)
1414 return(EINVAL);
1415
1416 if (lgenval > ugenval)
1417 ulval = (lgenval & PTHRW_COUNT_MASK) - (ugenval & PTHRW_COUNT_MASK);
1418 else
1419 ulval = (ugenval & PTHRW_COUNT_MASK) - (lgenval & PTHRW_COUNT_MASK);
1420
1421
1422 newval = lgenval | PTHRW_UBIT;
1423
1424 kern_trans = 1;
1425 if (rw_wc != 0) {
1426 if (ulval == ((rw_wc - 1) << PTHRW_COUNT_SHIFT))
1427 kern_trans = 0;
1428 } else if (ulval == 1)
1429 kern_trans = 0;
1430
1431 if (kern_trans == 0) {
1432 newval = ((lgenval | PTHRW_EBIT) & ~PTHRW_LBIT);
1433 } else {
1434 newval = lgenval | PTHRW_UBIT;
1435 }
1436 if (kern_trans == 0) {
1437 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE)
1438 goto loop;
1439
1440 } else {
1441 newval = (lgenval + PTHRW_INC);
1442
1443 oldval64 = (((uint64_t)rw_wc) << 32);
1444 oldval64 |= lgenval;
1445
1446 newval64 = (((uint64_t)(rw_wc + 1)) << 32);
1447 newval64 |= newval;
1448
1449 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
1450 goto loop;
1451 /* kern_trans == 1 */
1452 retry:
1453 updateval = __psynch_rw_upgrade(orwlock, newval, ugenval, rw_wc+1, rwlock->rw_flags);
1454 if (updateval == (uint32_t)-1) {
1455 error = errno;
1456 } else
1457 error = 0;
1458
1459 if (error == EINTR)
1460 goto retry;
1461
1462 if (error != 0) {
1463 OSAtomicDecrement32((volatile int32_t *)wcaddr);
1464 goto out;
1465 }
1466
1467 if (is_rw_ebit_set(updateval)) {
1468 /* kernel cannot wakeup without granting E bit */
1469 abort();
1470 }
1471
1472 error = rwlock_exclusive_lockreturn(orwlock, updateval);
1473 if (error == EAGAIN)
1474 goto retry;
1475
1476 OSAtomicDecrement32((volatile int32_t *)wcaddr);
1477
1478 }
1479 if (error == 0) {
1480 rwlock->rw_owner = self;
1481 PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
1482 return(0);
1483 }
1484
1485 out:
1486 return(error);
1487 }
1488
1489 int
1490 pthread_rwlock_tryupgrade_np(pthread_rwlock_t *orwlock)
1491 {
1492 pthread_t self = pthread_self();
1493 uint32_t lgenval, newval, ugenval, ulval, rw_wc;
1494 int error = 0, kern_trans;
1495 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1496 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
1497
1498 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1499 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1500 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
1501 return(error);
1502 }
1503 } else {
1504 return(EINVAL);
1505 }
1506 }
1507 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1508 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
1509 } else {
1510 lseqaddr = rwlock->rw_lseqaddr;
1511 useqaddr = rwlock->rw_useqaddr;
1512 wcaddr = rwlock->rw_wcaddr;
1513 }
1514
1515 loop:
1516 lgenval = *lseqaddr;
1517 ugenval = *useqaddr;
1518 rw_wc = *wcaddr;
1519
1520 if (is_rw_uebit_set(lgenval)) {
1521 return(EBUSY);
1522 }
1523
1524 if ((lgenval & PTHRW_COUNT_MASK) == ugenval)
1525 return(EINVAL);
1526
1527 if (lgenval > ugenval)
1528 ulval = (lgenval & PTHRW_COUNT_MASK) - (ugenval & PTHRW_COUNT_MASK);
1529 else
1530 ulval = (ugenval & PTHRW_COUNT_MASK) - (lgenval & PTHRW_COUNT_MASK);
1531
1532
1533 newval = lgenval | PTHRW_UBIT;
1534
1535 kern_trans = 1;
1536 if (rw_wc != 0) {
1537 /* there is only one reader thread */
1538 if (ulval == (rw_wc - 1))
1539 kern_trans = 0;
1540 } else if (ulval == 1)
1541 kern_trans = 0;
1542
1543 if (kern_trans == 0) {
1544 newval = (lgenval | PTHRW_EBIT) & ~PTHRW_LBIT;
1545 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE)
1546 goto loop;
1547
1548 rwlock->rw_owner = self;
1549 PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
1550 return(0);
1551 }
1552 return(EBUSY);
1553 }
1554
1555 /* Returns true if the rwlock is held for reading by any thread or held for writing by the current thread */
1556 int
1557 pthread_rwlock_held_np(pthread_rwlock_t * orwlock)
1558 {
1559 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1560 uint32_t lgenval, ugenval;
1561 int error = 0;
1562 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
1563
1564 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1565 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1566 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
1567 return(0);
1568 }
1569 } else {
1570 return(-1);
1571 }
1572 }
1573
1574 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1575 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
1576 } else {
1577 lseqaddr = rwlock->rw_lseqaddr;
1578 useqaddr = rwlock->rw_useqaddr;
1579 wcaddr = rwlock->rw_wcaddr;
1580 }
1581
1582 lgenval = *lseqaddr;
1583 ugenval = *useqaddr;
1584
1585 if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK))
1586 return(0);
1587
1588 return(1);
1589 }
1590
1591 /* Returns true if the rwlock is held for reading by any thread */
1592 int
1593 pthread_rwlock_rdheld_np(pthread_rwlock_t * orwlock)
1594 {
1595 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1596 uint32_t lgenval;
1597 int error = 0;
1598 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
1599
1600 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1601 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1602 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
1603 return(0);
1604 }
1605 } else {
1606 return(-1);
1607 }
1608 }
1609
1610 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1611 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
1612 } else {
1613 lseqaddr = rwlock->rw_lseqaddr;
1614 useqaddr = rwlock->rw_useqaddr;
1615 wcaddr = rwlock->rw_wcaddr;
1616 }
1617
1618 lgenval = *lseqaddr;
1619
1620 if (is_rw_ebit_set(lgenval)) {
1621 return(0);
1622 }
1623 return(0);
1624 }
1625
1626 /* Returns true if the rwlock is held for writing by the current thread */
1627 int
1628 pthread_rwlock_wrheld_np(pthread_rwlock_t * orwlock)
1629 {
1630 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1631 pthread_t self;
1632 uint32_t lgenval;
1633 int error = 0;
1634 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
1635
1636 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1637 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1638 if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
1639 return(0);
1640 }
1641 } else {
1642 return(-1);
1643 }
1644 }
1645
1646 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1647 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
1648 } else {
1649 lseqaddr = rwlock->rw_lseqaddr;
1650 useqaddr = rwlock->rw_useqaddr;
1651 wcaddr = rwlock->rw_wcaddr;
1652 }
1653
1654 self = pthread_self();
1655
1656 lgenval = *lseqaddr;
1657 if ((is_rw_ebit_set(lgenval)) && (rwlock->rw_owner == self)) {
1658 return(1);
1659 }
1660 return(0);
1661 }
1662 /**************************************************************/
1663 #endif /* NOTYET */
1664
1665 static int
1666 rwlock_unlock_action_onread(pthread_rwlock_t * orwlock, uint32_t updateval)
1667 {
1668 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1669 int error = 0;
1670 uint32_t lgenval, newval;
1671 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
1672 pthread_t self;
1673 uint64_t threadid;
1674
1675 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1676 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
1677 } else {
1678 lseqaddr = rwlock->rw_lseqaddr;
1679 useqaddr = rwlock->rw_useqaddr;
1680 wcaddr = rwlock->rw_wcaddr;
1681 }
1682
1683 lgenval = *lseqaddr;
1684
1685 lp2:
1686 lgenval = *lseqaddr;
1687
1688
1689 #if _KSYN_TRACE_
1690 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 3, lgenval, 0);
1691 #endif
1692 /* if the kernel antcipated seq and one on the lock are same, set the one from kernel */
1693 if ((lgenval & PTHRW_COUNT_MASK) == (updateval & PTHRW_COUNT_MASK)) {
1694 if (OSAtomicCompareAndSwap32(lgenval, updateval, (volatile int32_t *)lseqaddr) != TRUE)
1695 goto lp2;
1696 goto succout;
1697 }
1698
1699 /* state bits are same? */
1700 if ((lgenval & PTHRW_BIT_MASK) == (updateval & PTHRW_BIT_MASK)) {
1701 /* nothing to do */
1702 goto succout;
1703 }
1704
1705 newval = ((lgenval & PTHRW_UN_BIT_MASK) << PTHRW_COUNT_SHIFT) | (updateval & PTHRW_BIT_MASK);
1706
1707 #if _KSYN_TRACE_
1708 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 4, newval, 0);
1709 #endif
1710 /* high bits are state on the lock; lowbits are one kernel need to set */
1711 switch (newval) {
1712 /* W States */
1713 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT)) : {
1714 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1715 }
1716 break;
1717 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT)) : {
1718 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1719 }
1720 break;
1721 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
1722 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1723 }
1724 break;
1725 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
1726 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1727 }
1728 break;
1729 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
1730 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1731 }
1732 break;
1733 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
1734 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1735 }
1736 break;
1737 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
1738 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1739 }
1740 break;
1741 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
1742 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1743 }
1744 break;
1745 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
1746 error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
1747 //goto ktrans;
1748 }
1749 break;
1750
1751
1752 /* L states */
1753 case ((PTHRW_LBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
1754 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1755 }
1756 break;
1757
1758 /* Y states */
1759 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT)) : {
1760 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1761 }
1762 break;
1763 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
1764 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1765 }
1766 break;
1767 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
1768 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1769 }
1770 break;
1771 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
1772 error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
1773 //goto ktrans;
1774 }
1775 break;
1776 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
1777 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1778 }
1779 break;
1780 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
1781 error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
1782 //goto ktrans;
1783 }
1784 break;
1785
1786 /* YU states */
1787 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT)) : {
1788 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1789 }
1790 break;
1791 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
1792 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1793 }
1794 break;
1795 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
1796 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1797 }
1798 break;
1799 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
1800 error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
1801 //goto ktrans;
1802 }
1803 break;
1804 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
1805 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1806 }
1807 break;
1808 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
1809 error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
1810 //goto ktrans;
1811 }
1812 break;
1813
1814 /* E states */
1815 case ((PTHRW_EBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
1816 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1817 }
1818 break;
1819
1820 /* WE states */
1821 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT)) : {
1822 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1823 }
1824 break;
1825 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
1826 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1827 }
1828 break;
1829 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
1830 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1831 }
1832 break;
1833 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
1834 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1835 }
1836 break;
1837 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
1838 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1839 }
1840 break;
1841 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
1842 error = rwlock_unlock_action2(orwlock, lgenval, updateval);
1843 }
1844 break;
1845 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
1846 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1847 }
1848 break;
1849
1850 /* WL states */
1851 case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
1852 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1853 }
1854 break;
1855 case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
1856 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1857 }
1858 break;
1859 case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
1860 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
1861 }
1862 break;
1863
1864 default:
1865 /* illegal states */
1866 self = pthread_self();
1867 threadid = self->thread_id;
1868 #if _KSYN_TRACE_
1869 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 6, lgenval, 0);
1870 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 7, updateval, 0);
1871 #endif
1872 LIBC_ABORT("incorect state on return 0x%x: lgenval 0x%x, updateval 0x%x; threadid (0x%x)\n", newval, lgenval, updateval, (uint32_t)threadid);
1873 };
1874
1875 if (error != 0)
1876 goto lp2;
1877
1878 succout:
1879 #if _KSYN_TRACE_
1880 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_NONE, lgenval, newval, 0, 0, 0);
1881 #endif
1882 return(0);
1883 }
1884
1885
1886 static uint32_t
1887 modbits(uint32_t lgenval, uint32_t updateval)
1888 {
1889 uint32_t lval = lgenval & PTHRW_BIT_MASK;
1890 uint32_t uval = updateval & PTHRW_BIT_MASK;
1891 uint32_t rval, nlval;
1892
1893 nlval = (lval | uval);
1894 if ((uval & PTHRW_EBIT) == 0)
1895 nlval &= ~PTHRW_EBIT;
1896 if ((nlval & (PTHRW_WBIT | PTHRW_YBIT)) == (PTHRW_WBIT | PTHRW_YBIT))
1897 nlval &= ~PTHRW_YBIT;
1898 /* no new writers and kernel resets w bit, reset W bit on the lock */
1899 if (((nlval & (PTHRW_WBIT | PTHRW_SHADOW_W)) == PTHRW_WBIT) && ((updateval & PTHRW_WBIT) == 0))
1900 nlval &= ~PTHRW_WBIT;
1901
1902 rval = (lgenval & PTHRW_COUNT_MASK) | nlval;
1903 return(rval);
1904 }
1905
1906 static int
1907 rwlock_unlock_action1(pthread_rwlock_t * orwlock, uint32_t lgenval, uint32_t updateval)
1908 {
1909 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1910 int error = 0;
1911 uint32_t newval;
1912 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
1913
1914 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1915 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
1916 } else {
1917 lseqaddr = rwlock->rw_lseqaddr;
1918 useqaddr = rwlock->rw_useqaddr;
1919 wcaddr = rwlock->rw_wcaddr;
1920 }
1921
1922 newval = modbits(lgenval, updateval);
1923 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE)
1924 error = EINVAL;
1925 #if _KSYN_TRACE_
1926 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_NONE, lgenval, newval, 0, 0, 0);
1927 #endif
1928 return(error);
1929 }
1930
1931 static int
1932 rwlock_unlock_action2(pthread_rwlock_t * orwlock, uint32_t lgenval, uint32_t updateval)
1933 {
1934 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1935 uint32_t newval;
1936 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
1937
1938 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1939 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
1940 } else {
1941 lseqaddr = rwlock->rw_lseqaddr;
1942 useqaddr = rwlock->rw_useqaddr;
1943 wcaddr = rwlock->rw_wcaddr;
1944 }
1945
1946 newval = modbits(lgenval, updateval);
1947 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
1948 /* roundtrip kernel */
1949
1950 #if _KSYN_TRACE_
1951 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT2 | DBG_FUNC_NONE, lgenval, newval, 0, 0, 0);
1952 #endif
1953 (void) __psynch_rw_unlock2(orwlock, lgenval, *useqaddr, *wcaddr, rwlock->rw_flags);
1954 return(0);
1955 }
1956 #if _KSYN_TRACE_
1957 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT2 | DBG_FUNC_NONE, 0xffffffff, 0, 0, 0, 0);
1958 #endif
1959
1960 return(EINVAL);
1961 }
1962
1963 /* This is used when an exclusive write lock of any kind is being granted. For unlock thread, it needs to try to set the bit, if not move on */
1964 static int
1965 rwlock_unlock_action_k(pthread_rwlock_t * orwlock, uint32_t lgenval, uint32_t updateval)
1966 {
1967 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1968 uint32_t newval;
1969 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
1970
1971 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1972 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
1973 } else {
1974 lseqaddr = rwlock->rw_lseqaddr;
1975 useqaddr = rwlock->rw_useqaddr;
1976 wcaddr = rwlock->rw_wcaddr;
1977 }
1978
1979 newval = modbits(lgenval, updateval);
1980 #if _KSYN_TRACE_
1981 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTK | DBG_FUNC_NONE, lgenval, updateval, newval, 0, 0);
1982 #endif
1983 /* try to set, if not not a prolem as the thread taking exclusive will take care of the discrepency */
1984
1985 if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
1986 #if _KSYN_TRACE_
1987 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTK | DBG_FUNC_NONE, 0x55555555, lgenval, newval, 0, 0);
1988 #endif
1989
1990 } else {
1991 #if _KSYN_TRACE_
1992 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTK | DBG_FUNC_NONE, 0xAAAAAAAA, lgenval, newval, 0, 0);
1993 #endif
1994
1995 }
1996
1997 return(0);
1998 }
1999
2000 static int
2001 rwlock_exclusive_lockreturn(pthread_rwlock_t * orwlock, uint32_t updateval)
2002 {
2003 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
2004 uint32_t lgenval, newval;
2005 volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
2006 pthread_t self;
2007 uint64_t threadid;
2008
2009 int error = 0;
2010
2011 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
2012 RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
2013 } else {
2014 lseqaddr = rwlock->rw_lseqaddr;
2015 useqaddr = rwlock->rw_useqaddr;
2016 wcaddr = rwlock->rw_wcaddr;
2017 }
2018
2019 lp2:
2020 lgenval = *lseqaddr;
2021
2022 /* if the kernel antcipated seq and one on the lock are same, set the one from kernel */
2023 if ((lgenval & PTHRW_COUNT_MASK) == (updateval & PTHRW_COUNT_MASK)) {
2024 if (OSAtomicCompareAndSwap32(lgenval, updateval, (volatile int32_t *)lseqaddr) != TRUE)
2025 goto lp2;
2026 goto out;
2027 }
2028
2029 #if _KSYN_TRACE_
2030 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, lgenval, updateval, 1, 0, 0);
2031 #endif
2032 /* state bits are same? */
2033 if ((lgenval & PTHRW_BIT_MASK) == (updateval & PTHRW_BIT_MASK)) {
2034 /* nothing to do */
2035 goto out;
2036 }
2037
2038
2039 newval = ((lgenval & PTHRW_UN_BIT_MASK) << PTHRW_COUNT_SHIFT) | (updateval & PTHRW_BIT_MASK);
2040
2041 #if _KSYN_TRACE_
2042 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, newval, 0, 2, 0, 0);
2043 #endif
2044 /* high bits are state on the lock; lowbits are one kernel need to set */
2045 switch (newval) {
2046 /* W States */
2047 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT)) : {
2048 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
2049 }
2050 break;
2051 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
2052 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
2053 }
2054 break;
2055 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
2056 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
2057 }
2058 break;
2059 case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
2060 error = EAGAIN;
2061 }
2062 break;
2063
2064
2065 /* All L states illegal here */
2066
2067 /* Y states */
2068 case (PTHRW_YBIT << PTHRW_COUNT_SHIFT) : {
2069 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
2070 }
2071 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
2072 error = EAGAIN;
2073 }
2074 break;
2075 case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
2076 error = EAGAIN;
2077 }
2078 break;
2079
2080 /* YU states */
2081 case ((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) : {
2082 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
2083 }
2084 break;
2085 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
2086 error = EAGAIN;
2087 }
2088 break;
2089
2090 case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
2091 error = EAGAIN;
2092 }
2093 break;
2094
2095 /* E states */
2096 case ((PTHRW_EBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
2097 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
2098 }
2099 break;
2100
2101 /* WE states */
2102 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
2103 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
2104 }
2105 break;
2106 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
2107 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
2108 }
2109 break;
2110 case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
2111 error = rwlock_unlock_action1(orwlock, lgenval, updateval);
2112 }
2113 break;
2114
2115 /* All WL states are illegal*/
2116
2117 default:
2118 /* illegal states */
2119 self = pthread_self();
2120 threadid = self->thread_id;
2121 #if _KSYN_TRACE_
2122 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 6, lgenval, 0);
2123 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 7, updateval, 0);
2124 #endif
2125 LIBC_ABORT("rwlock_exclusive_lockreturn: incorect state on return 0x%x: lgenval 0x%x, updateval 0x%x; threadid (0x%x)\n", newval, lgenval, updateval, (uint32_t)threadid);
2126 };
2127
2128 if (error == EINVAL)
2129 goto lp2;
2130 out:
2131 #if _KSYN_TRACE_
2132 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, error, 0, 0xffffffff, 0, 0);
2133 #endif
2134 return(error);
2135 }
2136
2137 /* returns are not bit shifted */
2138 static int
2139 rw_diffgenseq(uint32_t x, uint32_t y)
2140 {
2141 uint32_t lx = (x & PTHRW_COUNT_MASK);
2142 uint32_t ly = (y &PTHRW_COUNT_MASK);
2143
2144 if (lx > ly) {
2145 return(lx-ly);
2146 } else {
2147 return((PTHRW_MAX_READERS - y) + lx + PTHRW_INC);
2148 }
2149
2150 }
2151
2152 #endif /* i386 || x86_64 ] */
2153
2154
2155 #endif /* !BUILDING_VARIANT ] */
2156
2157 int
2158 pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
2159 {
2160 #if defined(__i386__) || defined(__x86_64__) || defined(__DARWIN_UNIX03)
2161 int ret;
2162 #endif /* __i386__ || __x86_64__ */
2163
2164
2165 #if defined(__i386__) || defined(__x86_64__)
2166 if ((usenew_impl != 0)) {
2167 return(_new_pthread_rwlock_destroy(rwlock));
2168 }
2169 #endif /* __i386__ || __x86_64__ */
2170
2171 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
2172 return(EINVAL);
2173 }
2174 #if defined(__i386__) || defined(__x86_64__)
2175 else if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
2176 ret = _new_pthread_rwlock_destroy(rwlock);
2177 return(ret);
2178 }
2179 #endif /* __i386__ || __x86_64__ */
2180 else {
2181 #if __DARWIN_UNIX03
2182 /* grab the monitor lock */
2183 if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0)
2184 return(ret);
2185
2186 if (rwlock->state != 0) {
2187 pthread_mutex_unlock(&rwlock->lock);
2188 return(EBUSY);
2189 }
2190 pthread_mutex_unlock(&rwlock->lock);
2191 #endif /* __DARWIN_UNIX03 */
2192
2193 pthread_mutex_destroy(&rwlock->lock);
2194 pthread_cond_destroy(&rwlock->read_signal);
2195 pthread_cond_destroy(&rwlock->write_signal);
2196 rwlock->sig = _PTHREAD_NO_SIG;
2197 return(0);
2198 }
2199 }
2200
2201 int
2202 pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
2203 {
2204 int ret;
2205
2206 #if defined(__i386__) || defined(__x86_64__)
2207 if ((usenew_impl != 0)) {
2208 return(_new_pthread_rwlock_init(rwlock, attr));
2209 }
2210 #endif /* __i386__ || __x86_64__ */
2211
2212 #if __DARWIN_UNIX03
2213 if (attr && (attr->sig != _PTHREAD_RWLOCK_ATTR_SIG)) {
2214 return(EINVAL);
2215 }
2216 #endif /* __DARWIN_UNIX03 */
2217
2218 #if defined(__i386__) || defined(__x86_64__)
2219 if ((attr != NULL) && (attr->pshared == PTHREAD_PROCESS_SHARED)) {
2220 ret = _new_pthread_rwlock_init(rwlock, attr);
2221 return(ret);
2222 }
2223 #endif /* __i386__ || __x86_64__ */
2224
2225 #if __DARWIN_UNIX03
2226 /* if already inited check whether it is in use, then return EBUSY */
2227 if ((rwlock->sig == _PTHREAD_RWLOCK_SIG) && (rwlock->state !=0 )) {
2228 return(EBUSY);
2229 }
2230 #endif /* __DARWIN_UNIX03 */
2231
2232 /* initialize the lock */
2233 if ((ret = pthread_mutex_init(&rwlock->lock, NULL)) != 0)
2234 return(ret);
2235 else {
2236 /* initialize the read condition signal */
2237 ret = pthread_cond_init(&rwlock->read_signal, NULL);
2238
2239 if (ret != 0) {
2240 pthread_mutex_destroy(&rwlock->lock);
2241 return(ret);
2242 } else {
2243 /* initialize the write condition signal */
2244 ret = pthread_cond_init(&rwlock->write_signal, NULL);
2245
2246 if (ret != 0) {
2247 pthread_cond_destroy(&rwlock->read_signal);
2248 pthread_mutex_destroy(&rwlock->lock);
2249 return(ret);
2250 } else {
2251 /* success */
2252 rwlock->state = 0;
2253 rwlock->owner = (pthread_t)0;
2254 rwlock->blocked_writers = 0;
2255 if (attr)
2256 rwlock->pshared = attr->pshared;
2257 else
2258 rwlock->pshared = _PTHREAD_DEFAULT_PSHARED;
2259
2260 rwlock->sig = _PTHREAD_RWLOCK_SIG;
2261 return(0);
2262 }
2263 }
2264 }
2265 }
2266
2267 int
2268 pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
2269 {
2270 int ret;
2271 #if __DARWIN_UNIX03
2272 pthread_t self = pthread_self();
2273 #endif
2274
2275 #if defined(__i386__) || defined(__x86_64__)
2276 if ((usenew_impl != 0)) {
2277 return(_new_pthread_rwlock_rdlock(rwlock));
2278 }
2279 #endif /* __i386__ || __x86_64__ */
2280
2281 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
2282 if ((ret = pthread_rwlock_init(rwlock, NULL)) != 0) {
2283 PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
2284 return(ret);
2285 }
2286 }
2287
2288 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
2289 PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, EINVAL);
2290 return(EINVAL);
2291 }
2292 #if defined(__i386__) || defined(__x86_64__)
2293 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
2294 ret = _new_pthread_rwlock_rdlock(rwlock);
2295 return(ret);
2296 }
2297 #endif /* __i386__ || __x86_64__ */
2298 /* grab the monitor lock */
2299 if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
2300 PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
2301 return(ret);
2302 }
2303
2304 #if __DARWIN_UNIX03
2305 if ((rwlock->state < 0) && (rwlock->owner == self)) {
2306 pthread_mutex_unlock(&rwlock->lock);
2307 PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, EDEADLK);
2308 return(EDEADLK);
2309 }
2310 #endif /* __DARWIN_UNIX03 */
2311
2312 #if __DARWIN_UNIX03
2313 while (rwlock->blocked_writers || ((rwlock->state < 0) && (rwlock->owner != self)))
2314 #else /* __DARWIN_UNIX03 */
2315 while (rwlock->blocked_writers || rwlock->state < 0)
2316
2317 #endif /* __DARWIN_UNIX03 */
2318 {
2319 /* give writers priority over readers */
2320 PLOCKSTAT_RW_BLOCK(rwlock, READ_LOCK_PLOCKSTAT);
2321 ret = pthread_cond_wait(&rwlock->read_signal, &rwlock->lock);
2322
2323 if (ret != 0) {
2324 /* can't do a whole lot if this fails */
2325 pthread_mutex_unlock(&rwlock->lock);
2326 PLOCKSTAT_RW_BLOCKED(rwlock, READ_LOCK_PLOCKSTAT, BLOCK_FAIL_PLOCKSTAT);
2327 PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
2328 return(ret);
2329 }
2330
2331 PLOCKSTAT_RW_BLOCKED(rwlock, READ_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
2332 }
2333
2334 /* check lock count */
2335 if (rwlock->state == MAX_READ_LOCKS) {
2336 ret = EAGAIN;
2337 PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
2338 }
2339 else {
2340 ++rwlock->state; /* indicate we are locked for reading */
2341 PLOCKSTAT_RW_ACQUIRE(rwlock, READ_LOCK_PLOCKSTAT);
2342 }
2343
2344 /*
2345 * Something is really wrong if this call fails. Returning
2346 * error won't do because we've already obtained the read
2347 * lock. Decrementing 'state' is no good because we probably
2348 * don't have the monitor lock.
2349 */
2350 pthread_mutex_unlock(&rwlock->lock);
2351
2352 return(ret);
2353 }
2354
2355 int
2356 pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
2357 {
2358 int ret;
2359
2360 #if defined(__i386__) || defined(__x86_64__)
2361 if ((usenew_impl != 0)) {
2362 return(_new_pthread_rwlock_tryrdlock(rwlock));
2363 }
2364 #endif /* __i386__ || __x86_64__ */
2365
2366 /* check for static initialization */
2367 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
2368 if ((ret = pthread_rwlock_init(rwlock, NULL)) != 0) {
2369 PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
2370 return(ret);
2371 }
2372 }
2373
2374 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
2375 PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, EINVAL);
2376 return(EINVAL);
2377 }
2378 #if defined(__i386__) || defined(__x86_64__)
2379 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
2380 ret = _new_pthread_rwlock_tryrdlock(rwlock);
2381 return(ret);
2382 }
2383 #endif /* __i386__ || __x86_64__ */
2384
2385 /* grab the monitor lock */
2386 if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
2387 PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
2388 return(ret);
2389 }
2390
2391 /* give writers priority over readers */
2392 if (rwlock->blocked_writers || rwlock->state < 0) {
2393 ret = EBUSY;
2394 PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
2395 }
2396 else if (rwlock->state == MAX_READ_LOCKS) {
2397 ret = EAGAIN; /* too many read locks acquired */
2398 PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
2399 }
2400 else {
2401 ++rwlock->state; /* indicate we are locked for reading */
2402 PLOCKSTAT_RW_ACQUIRE(rwlock, READ_LOCK_PLOCKSTAT);
2403 }
2404
2405 /* see the comment on this in pthread_rwlock_rdlock */
2406 pthread_mutex_unlock(&rwlock->lock);
2407
2408 return(ret);
2409 }
2410
2411 int
2412 pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
2413 {
2414 int ret;
2415 #if __DARWIN_UNIX03
2416 pthread_t self = pthread_self();
2417 #endif /* __DARWIN_UNIX03 */
2418
2419 #if defined(__i386__) || defined(__x86_64__)
2420 if ((usenew_impl != 0)) {
2421 return(_new_pthread_rwlock_trywrlock(rwlock));
2422 }
2423 #endif /* __i386__ || __x86_64__ */
2424
2425 /* check for static initialization */
2426 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
2427 if ((ret = pthread_rwlock_init(rwlock, NULL)) != 0) {
2428 PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
2429 return(ret);
2430 }
2431 }
2432
2433 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
2434 PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
2435 return(EINVAL);
2436 }
2437
2438 #if defined(__i386__) || defined(__x86_64__)
2439 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
2440 ret = _new_pthread_rwlock_trywrlock(rwlock);
2441 return(ret);
2442 }
2443 #endif /* __i386__ || __x86_64__ */
2444
2445 /* grab the monitor lock */
2446 if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
2447 PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
2448 return(ret);
2449 }
2450
2451
2452 if (rwlock->state != 0) {
2453 ret = EBUSY;
2454 PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
2455 }
2456 else {
2457 /* indicate we are locked for writing */
2458 rwlock->state = -1;
2459 #if __DARWIN_UNIX03
2460 rwlock->owner = self;
2461 #endif /* __DARWIN_UNIX03 */
2462 PLOCKSTAT_RW_ACQUIRE(rwlock, WRITE_LOCK_PLOCKSTAT);
2463 }
2464
2465 /* see the comment on this in pthread_rwlock_rdlock */
2466 pthread_mutex_unlock(&rwlock->lock);
2467
2468 return(ret);
2469 }
2470
2471 int
2472 pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
2473 {
2474 int ret;
2475 int writer = (rwlock < 0) ? 1:0;
2476
2477 #if defined(__i386__) || defined(__x86_64__)
2478 if ((usenew_impl != 0)) {
2479 return(_new_pthread_rwlock_unlock(rwlock));
2480 }
2481 #endif /* __i386__ || __x86_64__ */
2482
2483 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
2484 PLOCKSTAT_RW_ERROR(rwlock, writer, EINVAL);
2485 return(EINVAL);
2486 }
2487
2488 #if defined(__i386__) || defined(__x86_64__)
2489 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
2490 ret = _new_pthread_rwlock_unlock(rwlock);
2491 return(ret);
2492 }
2493 #endif /* __i386__ || __x86_64__ */
2494
2495
2496 /* grab the monitor lock */
2497 if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
2498 PLOCKSTAT_RW_ERROR(rwlock, writer, ret);
2499 return(ret);
2500 }
2501
2502 if (rwlock->state > 0) {
2503 if (--rwlock->state == 0 && rwlock->blocked_writers)
2504 ret = pthread_cond_signal(&rwlock->write_signal);
2505 } else if (rwlock->state < 0) {
2506 rwlock->state = 0;
2507 #if __DARWIN_UNIX03
2508 rwlock->owner = (pthread_t)0;
2509 #endif /* __DARWIN_UNIX03 */
2510
2511 if (rwlock->blocked_writers)
2512 ret = pthread_cond_signal(&rwlock->write_signal);
2513 else
2514 ret = pthread_cond_broadcast(&rwlock->read_signal);
2515 } else
2516 ret = EINVAL;
2517
2518 if (ret == 0) {
2519 PLOCKSTAT_RW_RELEASE(rwlock, writer);
2520 } else {
2521 PLOCKSTAT_RW_ERROR(rwlock, writer, ret);
2522 }
2523
2524 /* see the comment on this in pthread_rwlock_rdlock */
2525 pthread_mutex_unlock(&rwlock->lock);
2526
2527 return(ret);
2528 }
2529
2530 int
2531 pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
2532 {
2533 int ret;
2534 #if __DARWIN_UNIX03
2535 pthread_t self = pthread_self();
2536 #endif /* __DARWIN_UNIX03 */
2537
2538 #if defined(__i386__) || defined(__x86_64__)
2539 if ((usenew_impl != 0)) {
2540 return(_new_pthread_rwlock_wrlock(rwlock));
2541 }
2542 #endif /* __i386__ || __x86_64__ */
2543
2544 /* check for static initialization */
2545 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
2546 if ((ret = pthread_rwlock_init(rwlock, NULL)) != 0) {
2547 PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
2548 return(ret);
2549 }
2550 }
2551
2552 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
2553 PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
2554 return(EINVAL);
2555 }
2556
2557 #if defined(__i386__) || defined(__x86_64__)
2558 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
2559 ret = _new_pthread_rwlock_wrlock(rwlock);
2560 return(ret);
2561 }
2562 #endif /* __i386__ || __x86_64__ */
2563
2564
2565 /* grab the monitor lock */
2566 if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
2567 PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
2568 return(ret);
2569 }
2570
2571 #if __DARWIN_UNIX03
2572 if ((rwlock->state < 0) && (rwlock->owner == self)) {
2573 pthread_mutex_unlock(&rwlock->lock);
2574 PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, EDEADLK);
2575 return(EDEADLK);
2576 }
2577 #endif /* __DARWIN_UNIX03 */
2578 while (rwlock->state != 0) {
2579 ++rwlock->blocked_writers;
2580
2581 PLOCKSTAT_RW_BLOCK(rwlock, WRITE_LOCK_PLOCKSTAT);
2582 ret = pthread_cond_wait(&rwlock->write_signal, &rwlock->lock);
2583
2584 if (ret != 0) {
2585 --rwlock->blocked_writers;
2586 pthread_mutex_unlock(&rwlock->lock);
2587 PLOCKSTAT_RW_BLOCKED(rwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_FAIL_PLOCKSTAT);
2588 PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
2589 return(ret);
2590 }
2591
2592 PLOCKSTAT_RW_BLOCKED(rwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
2593
2594 --rwlock->blocked_writers;
2595 }
2596
2597 /* indicate we are locked for writing */
2598 rwlock->state = -1;
2599 #if __DARWIN_UNIX03
2600 rwlock->owner = self;
2601 #endif /* __DARWIN_UNIX03 */
2602 PLOCKSTAT_RW_ACQUIRE(rwlock, WRITE_LOCK_PLOCKSTAT);
2603
2604 /* see the comment on this in pthread_rwlock_rdlock */
2605 pthread_mutex_unlock(&rwlock->lock);
2606
2607 return(ret);
2608 }
2609