]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/pthread_support.c
xnu-1699.24.23.tar.gz
[apple/xnu.git] / bsd / kern / pthread_support.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995-2005 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * pthread_support.c
31 */
32
33 #if PSYNCH
34
35 #include <sys/param.h>
36 #include <sys/queue.h>
37 #include <sys/resourcevar.h>
38 #include <sys/proc_internal.h>
39 #include <sys/kauth.h>
40 #include <sys/systm.h>
41 #include <sys/timeb.h>
42 #include <sys/times.h>
43 #include <sys/time.h>
44 #include <sys/acct.h>
45 #include <sys/kernel.h>
46 #include <sys/wait.h>
47 #include <sys/signalvar.h>
48 #include <sys/syslog.h>
49 #include <sys/stat.h>
50 #include <sys/lock.h>
51 #include <sys/kdebug.h>
52 #include <sys/sysproto.h>
53 #include <sys/pthread_internal.h>
54 #include <sys/vm.h>
55 #include <sys/user.h>
56
57 #include <mach/mach_types.h>
58 #include <mach/vm_prot.h>
59 #include <mach/semaphore.h>
60 #include <mach/sync_policy.h>
61 #include <mach/task.h>
62 #include <kern/kern_types.h>
63 #include <kern/task.h>
64 #include <kern/clock.h>
65 #include <mach/kern_return.h>
66 #include <kern/thread.h>
67 #include <kern/sched_prim.h>
68 #include <kern/thread_call.h>
69 #include <kern/kalloc.h>
70 #include <kern/zalloc.h>
71 #include <kern/sched_prim.h>
72 #include <kern/processor.h>
73 #include <kern/affinity.h>
74 #include <kern/wait_queue.h>
75 #include <kern/mach_param.h>
76 #include <mach/mach_vm.h>
77 #include <mach/mach_param.h>
78 #include <mach/thread_policy.h>
79 #include <mach/message.h>
80 #include <mach/port.h>
81 #include <vm/vm_protos.h>
82 #include <vm/vm_map.h>
83 #include <mach/vm_region.h>
84
85 #include <libkern/OSAtomic.h>
86
87 #include <pexpert/pexpert.h>
88
89 #define __PSYNCH_DEBUG__ 0 /* debug panic actions */
90 #define _PSYNCH_TRACE_ 1 /* kdebug trace */
91
92 #define __TESTMODE__ 2 /* 0 - return error on user error conditions */
93 /* 1 - log error on user error conditions */
94 /* 2 - abort caller on user error conditions */
95 /* 3 - panic on user error conditions */
96 static int __test_panics__;
97 static int __test_aborts__;
98 static int __test_prints__;
99
100 static inline void __FAILEDUSERTEST__(const char *str)
101 {
102 proc_t p;
103
104 if (__test_panics__ != 0)
105 panic(str);
106
107 if (__test_aborts__ != 0 || __test_prints__ != 0)
108 p = current_proc();
109
110 if (__test_prints__ != 0)
111 printf("PSYNCH: pid[%d]: %s\n", p->p_pid, str);
112
113 if (__test_aborts__ != 0)
114 psignal(p, SIGABRT);
115 }
116
117 #if _PSYNCH_TRACE_
118 #define _PSYNCH_TRACE_MLWAIT 0x9000000
119 #define _PSYNCH_TRACE_MLDROP 0x9000004
120 #define _PSYNCH_TRACE_CVWAIT 0x9000008
121 #define _PSYNCH_TRACE_CVSIGNAL 0x900000c
122 #define _PSYNCH_TRACE_CVBROAD 0x9000010
123 #define _PSYNCH_TRACE_KMDROP 0x9000014
124 #define _PSYNCH_TRACE_RWRDLOCK 0x9000018
125 #define _PSYNCH_TRACE_RWLRDLOCK 0x900001c
126 #define _PSYNCH_TRACE_RWWRLOCK 0x9000020
127 #define _PSYNCH_TRACE_RWYWRLOCK 0x9000024
128 #define _PSYNCH_TRACE_RWUPGRADE 0x9000028
129 #define _PSYNCH_TRACE_RWDOWNGRADE 0x900002c
130 #define _PSYNCH_TRACE_RWUNLOCK 0x9000030
131 #define _PSYNCH_TRACE_RWUNLOCK2 0x9000034
132 #define _PSYNCH_TRACE_RWHANDLEU 0x9000038
133 #define _PSYNCH_TRACE_FSEQTILL 0x9000040
134 #define _PSYNCH_TRACE_CLRPRE 0x9000044
135 #define _PSYNCH_TRACE_CVHBROAD 0x9000048
136 #define _PSYNCH_TRACE_CVSEQ 0x900004c
137 #define _PSYNCH_TRACE_THWAKEUP 0x9000050
138 /* user side */
139 #define _PSYNCH_TRACE_UM_LOCK 0x9000060
140 #define _PSYNCH_TRACE_UM_UNLOCK 0x9000064
141 #define _PSYNCH_TRACE_UM_MHOLD 0x9000068
142 #define _PSYNCH_TRACE_UM_MDROP 0x900006c
143 #define _PSYNCH_TRACE_UM_CVWAIT 0x9000070
144 #define _PSYNCH_TRACE_UM_CVSIG 0x9000074
145 #define _PSYNCH_TRACE_UM_CVBRD 0x9000078
146
147 proc_t pthread_debug_proc = PROC_NULL;
148 static inline void __PTHREAD_TRACE_DEBUG(uint32_t debugid, uintptr_t arg1,
149 uintptr_t arg2,
150 uintptr_t arg3,
151 uintptr_t arg4,
152 uintptr_t arg5)
153 {
154 proc_t p = current_proc();
155
156 if ((pthread_debug_proc != NULL) && (p == pthread_debug_proc))
157 KERNEL_DEBUG_CONSTANT(debugid, arg1, arg2, arg3, arg4, arg5);
158 }
159
160 #endif /* _PSYNCH_TRACE_ */
161
162 #define ECVCERORR 256
163 #define ECVPERORR 512
164
165 lck_mtx_t * pthread_list_mlock;
166
167 #define PTHHASH(addr) (&pthashtbl[(addr) & pthhash])
168 extern LIST_HEAD(pthhashhead, ksyn_wait_queue) *pth_glob_hashtbl;
169 struct pthhashhead * pth_glob_hashtbl;
170 u_long pthhash;
171
172 LIST_HEAD(, ksyn_wait_queue) pth_free_list;
173 int num_total_kwq = 0; /* number of kwq in use currently */
174 int num_infreekwq = 0; /* number of kwq in free list */
175 int num_freekwq = 0; /* number of kwq actually freed from the free the list */
176 int num_reusekwq = 0; /* number of kwq pulled back for reuse from free list */
177 int num_addedfreekwq = 0; /* number of added free kwq from the last instance */
178 int num_lastfreekwqcount = 0; /* the free count from the last time */
179
180 static int PTH_HASHSIZE = 100;
181
182 static zone_t kwq_zone; /* zone for allocation of ksyn_queue */
183 static zone_t kwe_zone; /* zone for allocation of ksyn_waitq_element */
184
185 #define SEQFIT 0
186 #define FIRSTFIT 1
187
188 struct ksyn_queue {
189 TAILQ_HEAD(ksynq_kwelist_head, ksyn_waitq_element) ksynq_kwelist;
190 uint32_t ksynq_count; /* number of entries in queue */
191 uint32_t ksynq_firstnum; /* lowest seq in queue */
192 uint32_t ksynq_lastnum; /* highest seq in queue */
193 };
194 typedef struct ksyn_queue * ksyn_queue_t;
195
196 #define KSYN_QUEUE_READ 0
197 #define KSYN_QUEUE_LREAD 1
198 #define KSYN_QUEUE_WRITER 2
199 #define KSYN_QUEUE_YWRITER 3
200 #define KSYN_QUEUE_UPGRADE 4
201 #define KSYN_QUEUE_MAX 5
202
203 struct ksyn_wait_queue {
204 LIST_ENTRY(ksyn_wait_queue) kw_hash;
205 LIST_ENTRY(ksyn_wait_queue) kw_list;
206 user_addr_t kw_addr;
207 uint64_t kw_owner;
208 uint64_t kw_object; /* object backing in shared mode */
209 uint64_t kw_offset; /* offset inside the object in shared mode */
210 int kw_flags; /* mutex, cvar options/flags */
211 int kw_pflags; /* flags under listlock protection */
212 struct timeval kw_ts; /* timeval need for upkeep before free */
213 int kw_iocount; /* inuse reference */
214 int kw_dropcount; /* current users unlocking... */
215
216 int kw_type; /* queue type like mutex, cvar, etc */
217 uint32_t kw_inqueue; /* num of waiters held */
218 uint32_t kw_fakecount; /* number of error/prepost fakes */
219 uint32_t kw_highseq; /* highest seq in the queue */
220 uint32_t kw_lowseq; /* lowest seq in the queue */
221 uint32_t kw_lword; /* L value from userland */
222 uint32_t kw_uword; /* U world value from userland */
223 uint32_t kw_sword; /* S word value from userland */
224 uint32_t kw_lastunlockseq; /* the last seq that unlocked */
225 /* for CV to be used as the seq kernel has seen so far */
226 #define kw_cvkernelseq kw_lastunlockseq
227 uint32_t kw_lastseqword; /* the last seq that unlocked */
228 /* for mutex and cvar we need to track I bit values */
229 uint32_t kw_nextseqword; /* the last seq that unlocked; with num of waiters */
230 #define kw_initrecv kw_nextseqword /* number of incoming waiters with Ibit seen sofar */
231 uint32_t kw_overlapwatch; /* chance for overlaps */
232 #define kw_initcount kw_overlapwatch /* number of incoming waiters with Ibit expected */
233 uint32_t kw_initcountseq; /* highest seq with Ibit on for mutex and cvar*/
234 uint32_t kw_pre_rwwc; /* prepost count */
235 uint32_t kw_pre_lockseq; /* prepost target seq */
236 uint32_t kw_pre_sseq; /* prepost target sword, in cvar used for mutexowned */
237 uint32_t kw_pre_intrcount; /* prepost of missed wakeup due to intrs */
238 uint32_t kw_pre_intrseq; /* prepost of missed wakeup limit seq */
239 uint32_t kw_pre_intrretbits; /* return bits value for missed wakeup threads */
240 uint32_t kw_pre_intrtype; /* type of failed wakueps*/
241
242 int kw_kflags;
243 struct ksyn_queue kw_ksynqueues[KSYN_QUEUE_MAX]; /* queues to hold threads */
244 lck_mtx_t kw_lock; /* mutex lock protecting this structure */
245 };
246 typedef struct ksyn_wait_queue * ksyn_wait_queue_t;
247
248 #define PTHRW_INC 0x100
249 #define PTHRW_BIT_MASK 0x000000ff
250
251 #define PTHRW_COUNT_SHIFT 8
252 #define PTHRW_COUNT_MASK 0xffffff00
253 #define PTHRW_MAX_READERS 0xffffff00
254
255 /* New model bits on Lword */
256 #define PTH_RWL_KBIT 0x01 /* users cannot acquire in user mode */
257 #define PTH_RWL_EBIT 0x02 /* exclusive lock in progress */
258 #define PTH_RWL_WBIT 0x04 /* write waiters pending in kernel */
259 #define PTH_RWL_PBIT 0x04 /* prepost (cv) pending in kernel */
260 #define PTH_RWL_YBIT 0x08 /* yielding write waiters pending in kernel */
261 #define PTH_RWL_RETRYBIT 0x08 /* mutex retry wait */
262 #define PTH_RWL_LBIT 0x10 /* long read in progress */
263 #define PTH_RWL_MTXNONE 0x10 /* indicates the cvwait does not have mutex held */
264 #define PTH_RWL_UBIT 0x20 /* upgrade request pending */
265 #define PTH_RWL_MTX_WAIT 0x20 /* in cvar in mutex wait */
266 #define PTH_RWL_RBIT 0x40 /* reader pending in kernel(not used) */
267 #define PTH_RWL_MBIT 0x40 /* overlapping grants from kernel */
268 #define PTH_RWL_TRYLKBIT 0x40 /* trylock attempt (mutex only) */
269 #define PTH_RWL_IBIT 0x80 /* lcok reset, held untill first succeesful unlock */
270
271
272 /* UBIT values for mutex, cvar */
273 #define PTH_RWU_SBIT 0x01
274 #define PTH_RWU_BBIT 0x02
275
276 #define PTHRW_RWL_INIT PTH_RWL_IBIT /* reset state on the lock bits (U)*/
277
278 /* New model bits on Sword */
279 #define PTH_RWS_SBIT 0x01 /* kernel transition seq not set yet*/
280 #define PTH_RWS_IBIT 0x02 /* Sequence is not set on return from kernel */
281 #define PTH_RWS_CV_CBIT PTH_RWS_SBIT /* kernel has cleared all info w.r.s.t CV */
282 #define PTH_RWS_CV_PBIT PTH_RWS_IBIT /* kernel has prepost/fake structs only,no waiters */
283 #define PTH_RWS_CV_MBIT PTH_RWL_MBIT /* to indicate prepost return */
284 #define PTH_RWS_WSVBIT 0x04 /* save W bit */
285 #define PTH_RWS_USVBIT 0x08 /* save U bit */
286 #define PTH_RWS_YSVBIT 0x10 /* save Y bit */
287 #define PTHRW_RWS_INIT PTH_RWS_SBIT /* reset on the lock bits (U)*/
288 #define PTHRW_RWS_SAVEMASK (PTH_RWS_WSVBIT|PTH_RWS_USVBIT|PTH_RWS_YSVBIT) /*save bits mask*/
289 #define PTHRW_SW_Reset_BIT_MASK 0x000000fe /* remove S bit and get rest of the bits */
290
291 #define PTHRW_RWS_INIT PTH_RWS_SBIT /* reset on the lock bits (U)*/
292
293
294 #define PTHRW_UN_BIT_MASK 0x000000bf /* remove overlap bit */
295
296
297 #define PTHREAD_MTX_TID_SWITCHING (uint64_t)-1
298
299 /* new L word defns */
300 #define is_rwl_readinuser(x) ((((x) & (PTH_RWL_UBIT | PTH_RWL_KBIT)) == 0)||(((x) & PTH_RWL_LBIT) != 0))
301 #define is_rwl_ebit_set(x) (((x) & PTH_RWL_EBIT) != 0)
302 #define is_rwl_lbit_set(x) (((x) & PTH_RWL_LBIT) != 0)
303 #define is_rwl_readoverlap(x) (((x) & PTH_RWL_MBIT) != 0)
304 #define is_rw_ubit_set(x) (((x) & PTH_RWL_UBIT) != 0)
305
306 /* S word checks */
307 #define is_rws_setseq(x) (((x) & PTH_RWS_SBIT))
308 #define is_rws_setunlockinit(x) (((x) & PTH_RWS_IBIT))
309
310 /* first contended seq that kernel sees */
311 #define KW_MTXFIRST_KSEQ 0x200
312 #define KW_CVFIRST_KSEQ 1
313 #define KW_RWFIRST_KSEQ 0x200
314
315 int is_seqlower(uint32_t x, uint32_t y);
316 int is_seqlower_eq(uint32_t x, uint32_t y);
317 int is_seqhigher(uint32_t x, uint32_t y);
318 int is_seqhigher_eq(uint32_t x, uint32_t y);
319 int find_diff(uint32_t upto, uint32_t lowest);
320
321
322 static inline int diff_genseq(uint32_t x, uint32_t y) {
323 if (x > y) {
324 return(x-y);
325 } else {
326 return((PTHRW_MAX_READERS - y) + x + PTHRW_INC);
327 }
328 }
329
330 #define TID_ZERO (uint64_t)0
331
332 /* bits needed in handling the rwlock unlock */
333 #define PTH_RW_TYPE_READ 0x01
334 #define PTH_RW_TYPE_LREAD 0x02
335 #define PTH_RW_TYPE_WRITE 0x04
336 #define PTH_RW_TYPE_YWRITE 0x08
337 #define PTH_RW_TYPE_UPGRADE 0x10
338 #define PTH_RW_TYPE_MASK 0xff
339 #define PTH_RW_TYPE_SHIFT 8
340
341 #define PTH_RWSHFT_TYPE_READ 0x0100
342 #define PTH_RWSHFT_TYPE_LREAD 0x0200
343 #define PTH_RWSHFT_TYPE_WRITE 0x0400
344 #define PTH_RWSHFT_TYPE_YWRITE 0x0800
345 #define PTH_RWSHFT_TYPE_MASK 0xff00
346
347 /*
348 * Mutex protocol attributes
349 */
350 #define PTHREAD_PRIO_NONE 0
351 #define PTHREAD_PRIO_INHERIT 1
352 #define PTHREAD_PRIO_PROTECT 2
353 #define PTHREAD_PROTOCOL_FLAGS_MASK 0x3
354
355 /*
356 * Mutex type attributes
357 */
358 #define PTHREAD_MUTEX_NORMAL 0
359 #define PTHREAD_MUTEX_ERRORCHECK 4
360 #define PTHREAD_MUTEX_RECURSIVE 8
361 #define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_NORMAL
362 #define PTHREAD_TYPE_FLAGS_MASK 0xc
363
364 /*
365 * Mutex pshared attributes
366 */
367 #define PTHREAD_PROCESS_SHARED 0x10
368 #define PTHREAD_PROCESS_PRIVATE 0x20
369 #define PTHREAD_PSHARED_FLAGS_MASK 0x30
370
371 /*
372 * Mutex policy attributes
373 */
374 #define _PTHREAD_MUTEX_POLICY_NONE 0
375 #define _PTHREAD_MUTEX_POLICY_FAIRSHARE 0x040 /* 1 */
376 #define _PTHREAD_MUTEX_POLICY_FIRSTFIT 0x080 /* 2 */
377 #define _PTHREAD_MUTEX_POLICY_REALTIME 0x0c0 /* 3 */
378 #define _PTHREAD_MUTEX_POLICY_ADAPTIVE 0x100 /* 4 */
379 #define _PTHREAD_MUTEX_POLICY_PRIPROTECT 0x140 /* 5 */
380 #define _PTHREAD_MUTEX_POLICY_PRIINHERIT 0x180 /* 6 */
381 #define PTHREAD_POLICY_FLAGS_MASK 0x1c0
382
383 #define _PTHREAD_MTX_OPT_HOLDLOCK 0x200
384 #define _PTHREAD_MTX_OPT_NOMTX 0x400
385
386 #define _PTHREAD_MTX_OPT_NOTIFY 0x1000
387 #define _PTHREAD_MTX_OPT_MUTEX 0x2000 /* this is a mutex type */
388
389 #define _PTHREAD_RWLOCK_UPGRADE_TRY 0x10000
390
391 /* pflags */
392 #define KSYN_WQ_INLIST 1
393 #define KSYN_WQ_INHASH 2
394 #define KSYN_WQ_SHARED 4
395 #define KSYN_WQ_WAITING 8 /* threads waiting for this wq to be available */
396 #define KSYN_WQ_FLIST 0X10 /* in free list to be freed after a short delay */
397
398 /* kflags */
399 #define KSYN_KWF_INITCLEARED 1 /* the init status found and preposts cleared */
400 #define KSYN_KWF_ZEROEDOUT 2 /* the lword, etc are inited to 0 */
401
402 #define KSYN_CLEANUP_DEADLINE 10
403 int psynch_cleanupset;
404 thread_call_t psynch_thcall;
405
406 #define KSYN_WQTYPE_INWAIT 0x1000
407 #define KSYN_WQTYPE_INDROP 0x2000
408 #define KSYN_WQTYPE_MTX 0x1
409 #define KSYN_WQTYPE_CVAR 0x2
410 #define KSYN_WQTYPE_RWLOCK 0x4
411 #define KSYN_WQTYPE_SEMA 0x8
412 #define KSYN_WQTYPE_BARR 0x10
413 #define KSYN_WQTYPE_MASK 0x00ff
414
415 #define KSYN_MTX_MAX 0x0fffffff
416 #define KSYN_WQTYPE_MUTEXDROP (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX)
417
418 #define KW_UNLOCK_PREPOST 0x01
419 #define KW_UNLOCK_PREPOST_UPGRADE 0x02
420 #define KW_UNLOCK_PREPOST_DOWNGRADE 0x04
421 #define KW_UNLOCK_PREPOST_READLOCK 0x08
422 #define KW_UNLOCK_PREPOST_LREADLOCK 0x10
423 #define KW_UNLOCK_PREPOST_WRLOCK 0x20
424 #define KW_UNLOCK_PREPOST_YWRLOCK 0x40
425
426 #define CLEAR_PREPOST_BITS(kwq) {\
427 kwq->kw_pre_lockseq = 0; \
428 kwq->kw_pre_sseq = PTHRW_RWS_INIT; \
429 kwq->kw_pre_rwwc = 0; \
430 }
431
432 #define CLEAR_INITCOUNT_BITS(kwq) {\
433 kwq->kw_initcount = 0; \
434 kwq->kw_initrecv = 0; \
435 kwq->kw_initcountseq = 0; \
436 }
437
438 #define CLEAR_INTR_PREPOST_BITS(kwq) {\
439 kwq->kw_pre_intrcount = 0; \
440 kwq->kw_pre_intrseq = 0; \
441 kwq->kw_pre_intrretbits = 0; \
442 kwq->kw_pre_intrtype = 0; \
443 }
444
445 #define CLEAR_REINIT_BITS(kwq) {\
446 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) { \
447 if((kwq->kw_inqueue != 0) && (kwq->kw_inqueue != kwq->kw_fakecount)) \
448 panic("CV:entries in queue durinmg reinit %d:%d\n",kwq->kw_inqueue, kwq->kw_fakecount); \
449 };\
450 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_RWLOCK) { \
451 kwq->kw_nextseqword = PTHRW_RWS_INIT; \
452 kwq->kw_overlapwatch = 0; \
453 }; \
454 kwq->kw_pre_lockseq = 0; \
455 kwq->kw_pre_rwwc = 0; \
456 kwq->kw_pre_sseq = PTHRW_RWS_INIT; \
457 kwq->kw_lastunlockseq = PTHRW_RWL_INIT; \
458 kwq->kw_lastseqword = PTHRW_RWS_INIT; \
459 kwq->kw_pre_intrcount = 0; \
460 kwq->kw_pre_intrseq = 0; \
461 kwq->kw_pre_intrretbits = 0; \
462 kwq->kw_pre_intrtype = 0; \
463 kwq->kw_lword = 0; \
464 kwq->kw_uword = 0; \
465 kwq->kw_sword = PTHRW_RWS_INIT; \
466 }
467
468 void pthread_list_lock(void);
469 void pthread_list_unlock(void);
470 void pthread_list_lock_spin(void);
471 void pthread_list_lock_convert_spin(void);
472 void ksyn_wqlock(ksyn_wait_queue_t kwq);
473 void ksyn_wqunlock(ksyn_wait_queue_t kwq);
474 ksyn_wait_queue_t ksyn_wq_hash_lookup(user_addr_t mutex, proc_t p, int flags, uint64_t object, uint64_t offset);
475 int ksyn_wqfind(user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, uint64_t tid, int flags, int wqtype , ksyn_wait_queue_t * wq);
476 void ksyn_wqrelease(ksyn_wait_queue_t mkwq, ksyn_wait_queue_t ckwq, int qfreenow, int wqtype);
477 extern int ksyn_findobj(uint64_t mutex, uint64_t * object, uint64_t * offset);
478 static void UPDATE_CVKWQ(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, uint64_t tid, int wqtype);
479 extern thread_t port_name_to_thread(mach_port_name_t port_name);
480
481 int ksyn_block_thread_locked(ksyn_wait_queue_t kwq, uint64_t abstime, ksyn_waitq_element_t kwe, int log);
482 kern_return_t ksyn_wakeup_thread(ksyn_wait_queue_t kwq, ksyn_waitq_element_t kwe);
483 void ksyn_freeallkwe(ksyn_queue_t kq);
484
485 uint32_t psynch_mutexdrop_internal(ksyn_wait_queue_t kwq, uint32_t lkseq, uint32_t ugen, int flags);
486 int kwq_handle_unlock(ksyn_wait_queue_t, uint32_t mgen, uint32_t rw_wc, uint32_t * updatep, int flags, int *blockp, uint32_t premgen);
487
488 void ksyn_queue_init(ksyn_queue_t kq);
489 int ksyn_queue_insert(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t mgen, struct uthread * uth, ksyn_waitq_element_t kwe, int firstfit);
490 ksyn_waitq_element_t ksyn_queue_removefirst(ksyn_queue_t kq, ksyn_wait_queue_t kwq);
491 void ksyn_queue_removeitem(ksyn_wait_queue_t kwq, ksyn_queue_t kq, ksyn_waitq_element_t kwe);
492 int ksyn_queue_move_tofree(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t upto, ksyn_queue_t freeq, int all, int reease);
493 void update_low_high(ksyn_wait_queue_t kwq, uint32_t lockseq);
494 uint32_t find_nextlowseq(ksyn_wait_queue_t kwq);
495 uint32_t find_nexthighseq(ksyn_wait_queue_t kwq);
496
497 int find_seq_till(ksyn_wait_queue_t kwq, uint32_t upto, uint32_t nwaiters, uint32_t *countp);
498 uint32_t ksyn_queue_count_tolowest(ksyn_queue_t kq, uint32_t upto);
499
500 ksyn_waitq_element_t ksyn_queue_find_cvpreposeq(ksyn_queue_t kq, uint32_t cgen);
501 uint32_t ksyn_queue_cvcount_entries(ksyn_queue_t kq, uint32_t upto, uint32_t from, int * numwaitersp, int * numintrp, int * numprepop);
502 void ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq, uint32_t upto, uint32_t *updatep);
503 void ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq, uint32_t *updatep, ksyn_queue_t kfreeq, int release);
504 ksyn_waitq_element_t ksyn_queue_find_signalseq(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t toseq, uint32_t lockseq);
505 ksyn_waitq_element_t ksyn_queue_find_threadseq(ksyn_wait_queue_t ckwq, ksyn_queue_t kq, thread_t th, uint32_t toseq);
506
507 int ksyn_wakeupreaders(ksyn_wait_queue_t kwq, uint32_t limitread, int longreadset, int allreaders, uint32_t updatebits, int * wokenp);
508 int kwq_find_rw_lowest(ksyn_wait_queue_t kwq, int flags, uint32_t premgen, int * type, uint32_t lowest[]);
509 ksyn_waitq_element_t ksyn_queue_find_seq(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t seq, int remove);
510 int kwq_handle_overlap(ksyn_wait_queue_t kwq, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, uint32_t *updatebitsp, int flags , int * blockp);
511 int kwq_handle_downgrade(ksyn_wait_queue_t kwq, uint32_t mgen, int flags, uint32_t premgen, int * blockp);
512
513 static void
514 UPDATE_CVKWQ(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, __unused uint64_t tid, __unused int wqtype)
515 {
516 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) {
517 if ((kwq->kw_kflags & KSYN_KWF_ZEROEDOUT) != 0) {
518 /* the values of L,U and S are cleared out due to L==S in previous transition */
519 kwq->kw_lword = mgen;
520 kwq->kw_uword = ugen;
521 kwq->kw_sword = rw_wc;
522 kwq->kw_kflags &= ~KSYN_KWF_ZEROEDOUT;
523 }
524 if (is_seqhigher((mgen & PTHRW_COUNT_MASK), (kwq->kw_lword & PTHRW_COUNT_MASK)) != 0)
525 kwq->kw_lword = mgen;
526 if (is_seqhigher((ugen & PTHRW_COUNT_MASK), (kwq->kw_uword & PTHRW_COUNT_MASK)) != 0)
527 kwq->kw_uword = ugen;
528 if ((rw_wc & PTH_RWS_CV_CBIT) != 0) {
529 if(is_seqlower(kwq->kw_cvkernelseq, (rw_wc & PTHRW_COUNT_MASK)) != 0) {
530 kwq->kw_cvkernelseq = (rw_wc & PTHRW_COUNT_MASK);
531 }
532 if (is_seqhigher((rw_wc & PTHRW_COUNT_MASK), (kwq->kw_sword & PTHRW_COUNT_MASK)) != 0)
533 kwq->kw_sword = rw_wc;
534 }
535 }
536 }
537
538
539 /* to protect the hashes, iocounts, freelist */
540 void
541 pthread_list_lock(void)
542 {
543 lck_mtx_lock(pthread_list_mlock);
544 }
545
546 void
547 pthread_list_lock_spin(void)
548 {
549 lck_mtx_lock_spin(pthread_list_mlock);
550 }
551
552 void
553 pthread_list_lock_convert_spin(void)
554 {
555 lck_mtx_convert_spin(pthread_list_mlock);
556 }
557
558
559 void
560 pthread_list_unlock(void)
561 {
562 lck_mtx_unlock(pthread_list_mlock);
563 }
564
565 /* to protect the indiv queue */
566 void
567 ksyn_wqlock(ksyn_wait_queue_t kwq)
568 {
569
570 lck_mtx_lock(&kwq->kw_lock);
571 }
572
573 void
574 ksyn_wqunlock(ksyn_wait_queue_t kwq)
575 {
576 lck_mtx_unlock(&kwq->kw_lock);
577 }
578
579
580 /* routine to drop the mutex unlocks , used both for mutexunlock system call and drop during cond wait */
581 uint32_t
582 psynch_mutexdrop_internal(ksyn_wait_queue_t kwq, uint32_t lkseq, uint32_t ugen, int flags)
583 {
584 uint32_t nextgen, low_writer, updatebits, returnbits = 0;
585 int firstfit = flags & _PTHREAD_MUTEX_POLICY_FIRSTFIT;
586 ksyn_waitq_element_t kwe = NULL;
587 kern_return_t kret = KERN_SUCCESS;
588
589 nextgen = (ugen + PTHRW_INC);
590
591 #if _PSYNCH_TRACE_
592 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_START, (uint32_t)kwq->kw_addr, lkseq, ugen, flags, 0);
593 #endif /* _PSYNCH_TRACE_ */
594
595 ksyn_wqlock(kwq);
596
597 redrive:
598
599 if (kwq->kw_inqueue != 0) {
600 updatebits = (kwq->kw_highseq & PTHRW_COUNT_MASK) | (PTH_RWL_EBIT | PTH_RWL_KBIT);
601 kwq->kw_lastunlockseq = (ugen & PTHRW_COUNT_MASK);
602 if (firstfit != 0)
603 {
604 /* first fit , pick any one */
605 kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwq);
606 kwe->kwe_psynchretval = updatebits;
607 kwe->kwe_kwqqueue = NULL;
608
609 #if _PSYNCH_TRACE_
610 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xcafecaf1, (uint32_t)(thread_tid((struct thread *)(((struct uthread *)(kwe->kwe_uth))->uu_context.vc_thread))), kwe->kwe_psynchretval, 0);
611 #endif /* _PSYNCH_TRACE_ */
612
613 kret = ksyn_wakeup_thread(kwq, kwe);
614 #if __TESTPANICS__
615 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
616 panic("psynch_mutexdrop_internal: panic unable to wakeup firstfit mutex thread\n");
617 #endif /* __TESTPANICS__ */
618 if (kret == KERN_NOT_WAITING)
619 goto redrive;
620 } else {
621 /* handle fairshare */
622 low_writer = kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_firstnum;
623 low_writer &= PTHRW_COUNT_MASK;
624
625 if (low_writer == nextgen) {
626 /* next seq to be granted found */
627 kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwq);
628
629 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
630 kwe->kwe_psynchretval = updatebits | PTH_RWL_MTX_WAIT;
631 kwe->kwe_kwqqueue = NULL;
632
633 #if _PSYNCH_TRACE_
634 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xcafecaf2, (uint32_t)(thread_tid((struct thread *)(((struct uthread *)(kwe->kwe_uth))->uu_context.vc_thread))), kwe->kwe_psynchretval, 0);
635 #endif /* _PSYNCH_TRACE_ */
636
637 kret = ksyn_wakeup_thread(kwq, kwe);
638 #if __TESTPANICS__
639 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
640 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
641 #endif /* __TESTPANICS__ */
642 if (kret == KERN_NOT_WAITING) {
643 /* interrupt post */
644 kwq->kw_pre_intrcount = 1;
645 kwq->kw_pre_intrseq = nextgen;
646 kwq->kw_pre_intrretbits = updatebits;
647 kwq->kw_pre_intrtype = PTH_RW_TYPE_WRITE;
648 #if _PSYNCH_TRACE_
649 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfafafaf1, nextgen, kwq->kw_pre_intrretbits, 0);
650 #endif /* _PSYNCH_TRACE_ */
651 }
652
653 } else if (is_seqhigher(low_writer, nextgen) != 0) {
654 kwq->kw_pre_rwwc++;
655
656 if (kwq->kw_pre_rwwc > 1) {
657 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (1)\n");
658 goto out;
659 }
660
661 kwq->kw_pre_lockseq = (nextgen & PTHRW_COUNT_MASK);
662 #if _PSYNCH_TRACE_
663 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfefefef1, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
664 #endif /* _PSYNCH_TRACE_ */
665 } else {
666
667 //__FAILEDUSERTEST__("psynch_mutexdrop_internal: FS mutex unlock sequence higher than the lowest one is queue\n");
668
669 kwe = ksyn_queue_find_seq(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], (nextgen & PTHRW_COUNT_MASK), 1);
670 if (kwe != NULL) {
671 /* next seq to be granted found */
672 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
673 kwe->kwe_psynchretval = updatebits | PTH_RWL_MTX_WAIT;
674 kwe->kwe_kwqqueue = NULL;
675 #if _PSYNCH_TRACE_
676 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xcafecaf3, (uint32_t)(thread_tid((struct thread *)(((struct uthread *)(kwe->kwe_uth))->uu_context.vc_thread))), kwe->kwe_psynchretval, 0);
677 #endif /* _PSYNCH_TRACE_ */
678 kret = ksyn_wakeup_thread(kwq, kwe);
679 #if __TESTPANICS__
680 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
681 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
682 #endif /* __TESTPANICS__ */
683 if (kret == KERN_NOT_WAITING)
684 goto redrive;
685 } else {
686 /* next seq to be granted not found, prepost */
687 kwq->kw_pre_rwwc++;
688
689 if (kwq->kw_pre_rwwc > 1) {
690 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (2)\n");
691 goto out;
692 }
693
694 kwq->kw_pre_lockseq = (nextgen & PTHRW_COUNT_MASK);
695 #if _PSYNCH_TRACE_
696 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfefefef2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
697 #endif /* _PSYNCH_TRACE_ */
698 }
699 }
700 }
701 } else {
702
703 /* if firstfit the last one could be spurious */
704 if (firstfit == 0) {
705 kwq->kw_lastunlockseq = (ugen & PTHRW_COUNT_MASK);
706 kwq->kw_pre_rwwc++;
707
708 if (kwq->kw_pre_rwwc > 1) {
709 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (3)\n");
710 goto out;
711 }
712
713 kwq->kw_pre_lockseq = (nextgen & PTHRW_COUNT_MASK);
714 #if _PSYNCH_TRACE_
715 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfefefef3, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
716 #endif /* _PSYNCH_TRACE_ */
717 } else {
718 /* first fit case */
719 #if _PSYNCH_TRACE_
720 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfefefef3, kwq->kw_lastunlockseq, kwq->kw_pre_lockseq, 0);
721 #endif /* _PSYNCH_TRACE_ */
722 kwq->kw_lastunlockseq = (ugen & PTHRW_COUNT_MASK);
723 /* not set or the new lkseq is higher */
724 if ((kwq->kw_pre_rwwc == 0) || (is_seqlower(kwq->kw_pre_lockseq, lkseq) == 0))
725 kwq->kw_pre_lockseq = (lkseq & PTHRW_COUNT_MASK);
726 kwq->kw_pre_rwwc = 1;
727 #if _PSYNCH_TRACE_
728 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfefefef3, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
729 #endif /* _PSYNCH_TRACE_ */
730
731 /* indicate prepost content in kernel */
732 returnbits = lkseq | PTH_RWL_PBIT;
733 }
734 }
735
736 out:
737 ksyn_wqunlock(kwq);
738
739 #if _PSYNCH_TRACE_
740 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_END, (uint32_t)kwq->kw_addr, 0xeeeeeeed, 0, 0, 0);
741 #endif /* _PSYNCH_TRACE_ */
742 ksyn_wqrelease(kwq, NULL, 1, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX));
743 return(returnbits);
744 }
745
746 /*
747 * psynch_mutexwait: This system call is used for contended psynch mutexes to block.
748 */
749
750 int
751 psynch_mutexwait(__unused proc_t p, struct psynch_mutexwait_args * uap, uint32_t * retval)
752 {
753 user_addr_t mutex = uap->mutex;
754 uint32_t mgen = uap->mgen;
755 uint32_t ugen = uap->ugen;
756 uint64_t tid = uap->tid;
757 int flags = uap->flags;
758 ksyn_wait_queue_t kwq;
759 int error=0;
760 int ins_flags, retry;
761 uthread_t uth;
762 int firstfit = flags & _PTHREAD_MUTEX_POLICY_FIRSTFIT;
763 uint32_t lockseq, updatebits=0;
764 ksyn_waitq_element_t kwe;
765
766 #if _PSYNCH_TRACE_
767 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_START, (uint32_t)mutex, mgen, ugen, flags, 0);
768 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_NONE, (uint32_t)mutex, mgen, ugen, (uint32_t)tid, 0);
769 #endif /* _PSYNCH_TRACE_ */
770
771 uth = current_uthread();
772
773 kwe = &uth->uu_kwe;
774 kwe->kwe_lockseq = uap->mgen;
775 kwe->kwe_uth = uth;
776 kwe->kwe_psynchretval = 0;
777 kwe->kwe_kwqqueue = NULL;
778 lockseq = (uap->mgen & PTHRW_COUNT_MASK);
779
780 if (firstfit == 0) {
781 ins_flags = SEQFIT;
782 } else {
783 /* first fit */
784 ins_flags = FIRSTFIT;
785 }
786
787 error = ksyn_wqfind(mutex, mgen, ugen, 0, tid, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_MTX), &kwq);
788 if (error != 0) {
789 #if _PSYNCH_TRACE_
790 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_END, (uint32_t)mutex, 1, 0xdeadbeef, error, 0);
791 #endif /* _PSYNCH_TRACE_ */
792 return(error);
793 }
794
795 ksyn_wqlock(kwq);
796
797
798 if ((mgen & PTH_RWL_RETRYBIT) != 0) {
799 retry = 1;
800 mgen &= ~PTH_RWL_RETRYBIT;
801 }
802
803 /* handle first the missed wakeups */
804 if ((kwq->kw_pre_intrcount != 0) &&
805 ((kwq->kw_pre_intrtype == PTH_RW_TYPE_WRITE)) &&
806 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
807 kwq->kw_pre_intrcount--;
808 kwe->kwe_psynchretval = kwq->kw_pre_intrretbits;
809 if (kwq->kw_pre_intrcount==0)
810 CLEAR_INTR_PREPOST_BITS(kwq);
811 ksyn_wqunlock(kwq);
812 *retval = kwe->kwe_psynchretval;
813 #if _PSYNCH_TRACE_
814 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_NONE, (uint32_t)mutex, 0xfafafaf1, kwe->kwe_psynchretval, kwq->kw_pre_intrcount, 0);
815 #endif /* _PSYNCH_TRACE_ */
816 goto out;
817 }
818
819 if ((kwq->kw_pre_rwwc != 0) && ((ins_flags == FIRSTFIT) || ((lockseq & PTHRW_COUNT_MASK) == (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK) ))) {
820 /* got preposted lock */
821 kwq->kw_pre_rwwc--;
822 if (kwq->kw_pre_rwwc == 0) {
823 CLEAR_PREPOST_BITS(kwq);
824 kwq->kw_lastunlockseq = PTHRW_RWL_INIT;
825 if (kwq->kw_inqueue == 0) {
826 updatebits = lockseq | (PTH_RWL_KBIT | PTH_RWL_EBIT);
827 } else {
828 updatebits = (kwq->kw_highseq & PTHRW_COUNT_MASK) | (PTH_RWL_KBIT | PTH_RWL_EBIT);
829 }
830 updatebits &= ~PTH_RWL_MTX_WAIT;
831
832 kwe->kwe_psynchretval = updatebits;
833
834 if (updatebits == 0) {
835 __FAILEDUSERTEST__("psynch_mutexwait(prepost): returning 0 lseq in mutexwait with no EBIT \n");
836 }
837 ksyn_wqunlock(kwq);
838 *retval = updatebits;
839 #if _PSYNCH_TRACE_
840 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfefefef1, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
841 #endif /* _PSYNCH_TRACE_ */
842 goto out;
843 } else {
844 __FAILEDUSERTEST__("psynch_mutexwait: more than one prepost\n");
845 kwq->kw_pre_lockseq += PTHRW_INC; /* look for next one */
846 ksyn_wqunlock(kwq);
847 error = EINVAL;
848 goto out;
849 }
850 }
851
852 #if _PSYNCH_TRACE_
853 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfeedfeed, mgen, ins_flags, 0);
854 #endif /* _PSYNCH_TRACE_ */
855
856 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], mgen, uth, kwe, ins_flags);
857 if (error != 0) {
858 ksyn_wqunlock(kwq);
859 #if _PSYNCH_TRACE_
860 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_END, (uint32_t)mutex, 2, 0xdeadbeef, error, 0);
861 #endif /* _PSYNCH_TRACE_ */
862 goto out;
863 }
864
865 error = ksyn_block_thread_locked(kwq, (uint64_t)0, kwe, 0);
866 /* drops the wq lock */
867
868 if (error != 0) {
869 ksyn_wqlock(kwq);
870
871 #if _PSYNCH_TRACE_
872 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_NONE, (uint32_t)mutex, 3, 0xdeadbeef, error, 0);
873 #endif /* _PSYNCH_TRACE_ */
874 if (kwe->kwe_kwqqueue != NULL)
875 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwe);
876 ksyn_wqunlock(kwq);
877 } else {
878 updatebits = kwe->kwe_psynchretval;
879 updatebits &= ~PTH_RWL_MTX_WAIT;
880 *retval = updatebits;
881
882 if (updatebits == 0)
883 __FAILEDUSERTEST__("psynch_mutexwait: returning 0 lseq in mutexwait with no EBIT \n");
884 }
885 out:
886 ksyn_wqrelease(kwq, NULL, 1, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_MTX));
887 #if _PSYNCH_TRACE_
888 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_END, (uint32_t)mutex, 0xeeeeeeed, updatebits, error, 0);
889 #endif /* _PSYNCH_TRACE_ */
890
891 return(error);
892 }
893
894 /*
895 * psynch_mutexdrop: This system call is used for unlock postings on contended psynch mutexes.
896 */
897 int
898 psynch_mutexdrop(__unused proc_t p, struct psynch_mutexdrop_args * uap, uint32_t * retval)
899 {
900 user_addr_t mutex = uap->mutex;
901 uint32_t mgen = uap->mgen;
902 uint32_t ugen = uap->ugen;
903 uint64_t tid = uap->tid;
904 int flags = uap->flags;
905 ksyn_wait_queue_t kwq;
906 uint32_t updateval;
907 int error=0;
908
909 error = ksyn_wqfind(mutex, mgen, ugen, 0, tid, flags, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX), &kwq);
910 if (error != 0) {
911 return(error);
912 }
913
914 updateval = psynch_mutexdrop_internal(kwq, mgen, ugen, flags);
915 /* drops the kwq reference */
916
917 *retval = updateval;
918 return(0);
919
920 }
921
922 /*
923 * psynch_cvbroad: This system call is used for broadcast posting on blocked waiters of psynch cvars.
924 */
925 int
926 psynch_cvbroad(__unused proc_t p, struct psynch_cvbroad_args * uap, uint32_t * retval)
927 {
928 user_addr_t cond = uap->cv;
929 uint64_t cvlsgen = uap->cvlsgen;
930 uint64_t cvudgen = uap->cvudgen;
931 uint32_t cgen, cugen, csgen, diffgen;
932 uint32_t uptoseq, fromseq;
933 int flags = uap->flags;
934 ksyn_wait_queue_t ckwq;
935 int error=0;
936 uint32_t updatebits = 0;
937 uint32_t count;
938 struct ksyn_queue kfreeq;
939
940 csgen = (uint32_t)((cvlsgen >> 32) & 0xffffffff);
941 cgen = ((uint32_t)(cvlsgen & 0xffffffff));
942 cugen = (uint32_t)((cvudgen >> 32) & 0xffffffff);
943 diffgen = ((uint32_t)(cvudgen & 0xffffffff));
944 count = (diffgen >> PTHRW_COUNT_SHIFT);
945
946 #if _PSYNCH_TRACE_
947 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_START, (uint32_t)cond, cgen, cugen, csgen, 0);
948 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_NONE, (uint32_t)cond, 0xcbcbcbc1, diffgen,flags, 0);
949 #endif /* _PSYNCH_TRACE_ */
950
951 uptoseq = cgen & PTHRW_COUNT_MASK;
952 fromseq = (cugen & PTHRW_COUNT_MASK) + PTHRW_INC;
953
954 if (is_seqhigher(fromseq, uptoseq) || is_seqhigher((csgen & PTHRW_COUNT_MASK), uptoseq)) {
955 __FAILEDUSERTEST__("cvbroad: invalid L, U and S values\n");
956 return EINVAL;
957 }
958 if (count > (uint32_t)task_threadmax) {
959 __FAILEDUSERTEST__("cvbroad: difference greater than maximum possible thread count\n");
960 return EBUSY;
961 }
962
963 ckwq = NULL;
964
965 error = ksyn_wqfind(cond, cgen, cugen, csgen, 0, flags, (KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INDROP), &ckwq);
966 if (error != 0) {
967 #if _PSYNCH_TRACE_
968 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_END, (uint32_t)cond, 0, 0xdeadbeef, error, 0);
969 #endif /* _PSYNCH_TRACE_ */
970 return(error);
971 }
972
973 *retval = 0;
974
975 ksyn_wqlock(ckwq);
976
977 /* update L, U and S... */
978 UPDATE_CVKWQ(ckwq, cgen, cugen, csgen, 0, KSYN_WQTYPE_CVAR);
979
980 /* broadcast wakeups/prepost handling */
981 ksyn_handle_cvbroad(ckwq, uptoseq, &updatebits);
982
983 /* set C or P bits and free if needed */
984 ckwq->kw_sword += (updatebits & PTHRW_COUNT_MASK);
985 ksyn_cvupdate_fixup(ckwq, &updatebits, &kfreeq, 1);
986 ksyn_wqunlock(ckwq);
987
988 *retval = updatebits;
989
990 ksyn_wqrelease(ckwq, NULL, 1, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_CVAR));
991 #if _PSYNCH_TRACE_
992 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_END, (uint32_t)cond, 0xeeeeeeed, (uint32_t)*retval, error, 0);
993 #endif /* _PSYNCH_TRACE_ */
994
995 return(error);
996 }
997
998 ksyn_waitq_element_t
999 ksyn_queue_find_threadseq(ksyn_wait_queue_t ckwq, __unused ksyn_queue_t kq, thread_t th, uint32_t upto)
1000 {
1001 uthread_t uth = get_bsdthread_info(th);
1002 ksyn_waitq_element_t kwe = &uth->uu_kwe;
1003
1004 if (kwe->kwe_kwqqueue != ckwq ||
1005 is_seqhigher((kwe->kwe_lockseq & PTHRW_COUNT_MASK), upto)) {
1006 /* the thread is not waiting in the cv (or wasn't when the wakeup happened) */
1007 return NULL;
1008 }
1009 return kwe;
1010 }
1011
1012 /*
1013 * psynch_cvsignal: This system call is used for signalling the blocked waiters of psynch cvars.
1014 */
1015 int
1016 psynch_cvsignal(__unused proc_t p, struct psynch_cvsignal_args * uap, uint32_t * retval)
1017 {
1018 user_addr_t cond = uap->cv;
1019 uint64_t cvlsgen = uap->cvlsgen;
1020 uint32_t cgen, csgen, signalseq, uptoseq;
1021 uint32_t cugen = uap->cvugen;
1022 int threadport = uap->thread_port;
1023 int flags = uap->flags;
1024 ksyn_wait_queue_t ckwq = NULL;
1025 ksyn_waitq_element_t kwe, nkwe = NULL;
1026 ksyn_queue_t kq;
1027 int error=0;
1028 thread_t th = THREAD_NULL;
1029 uint32_t updatebits = 0;
1030 kern_return_t kret;
1031 struct ksyn_queue kfreeq;
1032
1033
1034 csgen = (uint32_t)((cvlsgen >> 32) & 0xffffffff);
1035 cgen = ((uint32_t)(cvlsgen & 0xffffffff));
1036
1037 #if _PSYNCH_TRACE_
1038 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_START, (uint32_t)cond, cgen, cugen, threadport, 0);
1039 #endif /* _PSYNCH_TRACE_ */
1040
1041 uptoseq = cgen & PTHRW_COUNT_MASK;
1042 signalseq = (cugen & PTHRW_COUNT_MASK) + PTHRW_INC;
1043
1044 /* validate sane L, U, and S values */
1045 if (((threadport == 0) && (is_seqhigher(signalseq, uptoseq))) || is_seqhigher((csgen & PTHRW_COUNT_MASK), uptoseq)) {
1046 __FAILEDUSERTEST__("psync_cvsignal; invalid sequence numbers\n");
1047 error = EINVAL;
1048 goto out;
1049 }
1050
1051 /* If we are looking for a specific thread, grab a reference for it */
1052 if (threadport != 0) {
1053 th = (thread_t)port_name_to_thread((mach_port_name_t)threadport);
1054 if (th == THREAD_NULL) {
1055 error = ESRCH;
1056 goto out;
1057 }
1058 }
1059
1060 error = ksyn_wqfind(cond, cgen, cugen, csgen, 0, flags, (KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INDROP), &ckwq);
1061 if (error != 0) {
1062 #if _PSYNCH_TRACE_
1063 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_END, (uint32_t)cond, 0, 0xdeadbeef, error, 0);
1064 #endif /* _PSYNCH_TRACE_ */
1065 goto out;
1066 }
1067
1068 ksyn_wqlock(ckwq);
1069
1070 /* update L, U and S... */
1071 UPDATE_CVKWQ(ckwq, cgen, cugen, csgen, 0, KSYN_WQTYPE_CVAR);
1072
1073 kq = &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER];
1074
1075 retry:
1076 /* Only bother if we aren't already balanced */
1077 if ((ckwq->kw_lword & PTHRW_COUNT_MASK) != (ckwq->kw_sword & PTHRW_COUNT_MASK)) {
1078
1079 kwe = (th != NULL) ? ksyn_queue_find_threadseq(ckwq, kq, th, uptoseq) :
1080 ksyn_queue_find_signalseq(ckwq, kq, uptoseq, signalseq);
1081 if (kwe != NULL) {
1082 switch (kwe->kwe_flags) {
1083
1084 case KWE_THREAD_BROADCAST:
1085 /* broadcasts swallow our signal */
1086 break;
1087
1088 case KWE_THREAD_PREPOST:
1089 /* merge in with existing prepost at our same uptoseq */
1090 kwe->kwe_count += 1;
1091 break;
1092
1093 case KWE_THREAD_INWAIT:
1094 if (is_seqlower((kwe->kwe_lockseq & PTHRW_COUNT_MASK), signalseq)) {
1095 /*
1096 * A valid thread in our range, but lower than our signal.
1097 * Matching it may leave our match with nobody to wake it if/when
1098 * it arrives (the signal originally meant for this thread might
1099 * not successfully wake it).
1100 *
1101 * Convert to broadcast - may cause some spurious wakeups
1102 * (allowed by spec), but avoids starvation (better choice).
1103 */
1104 #if _PSYNCH_TRACE_
1105 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xc1c1c1c1, uptoseq, 0, 0);
1106 #endif /* _PSYNCH_TRACE_ */
1107 ksyn_handle_cvbroad(ckwq, uptoseq, &updatebits);
1108 } else {
1109 ksyn_queue_removeitem(ckwq, kq, kwe);
1110 kwe->kwe_psynchretval = PTH_RWL_MTX_WAIT;
1111 kwe->kwe_kwqqueue = NULL;
1112 #if _PSYNCH_TRACE_
1113 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xcafecaf2, (uint32_t)(thread_tid((struct thread *)(((struct uthread *)(kwe->kwe_uth))->uu_context.vc_thread))), kwe->kwe_psynchretval, 0);
1114 #endif /* _PSYNCH_TRACE_ */
1115 kret = ksyn_wakeup_thread(ckwq, kwe);
1116 #if __TESTPANICS__
1117 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
1118 panic("ksyn_wakeup_thread: panic waking up condition waiter\n");
1119 #endif /* __TESTPANICS__ */
1120 updatebits += PTHRW_INC;
1121 }
1122
1123 ckwq->kw_sword += (updatebits & PTHRW_COUNT_MASK);
1124 break;
1125
1126 default:
1127 panic("unknown kweflags\n");
1128 break;
1129 }
1130
1131 } else if (th != NULL) {
1132 /*
1133 * Could not find the thread, post a broadcast,
1134 * otherwise the waiter will be stuck. Use to send
1135 * ESRCH here, did lead to rare hangs.
1136 */
1137 ksyn_handle_cvbroad(ckwq, uptoseq, &updatebits);
1138 ckwq->kw_sword += (updatebits & PTHRW_COUNT_MASK);
1139 } else if (nkwe == NULL) {
1140 ksyn_wqunlock(ckwq);
1141 nkwe = (ksyn_waitq_element_t)zalloc(kwe_zone);
1142 ksyn_wqlock(ckwq);
1143 goto retry;
1144
1145 } else {
1146 /* no eligible entries - add prepost */
1147 bzero(nkwe, sizeof(struct ksyn_waitq_element));
1148 nkwe->kwe_kwqqueue = ckwq;
1149 nkwe->kwe_flags = KWE_THREAD_PREPOST;
1150 nkwe->kwe_lockseq = uptoseq;
1151 nkwe->kwe_count = 1;
1152 nkwe->kwe_uth = NULL;
1153 nkwe->kwe_psynchretval = 0;
1154
1155 #if _PSYNCH_TRACE_
1156 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xfeedfefe, uptoseq, 0, 0);
1157 #endif /* _PSYNCH_TRACE_ */
1158
1159 (void)ksyn_queue_insert(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], uptoseq, NULL, nkwe, SEQFIT);
1160 ckwq->kw_fakecount++;
1161 nkwe = NULL;
1162 }
1163
1164 /* set C or P bits and free if needed */
1165 ksyn_cvupdate_fixup(ckwq, &updatebits, &kfreeq, 1);
1166 }
1167
1168 ksyn_wqunlock(ckwq);
1169 if (nkwe != NULL)
1170 zfree(kwe_zone, nkwe);
1171
1172 ksyn_wqrelease(ckwq, NULL, 1, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_CVAR));
1173
1174 out:
1175 if (th != NULL)
1176 thread_deallocate(th);
1177 if (error == 0)
1178 *retval = updatebits;
1179 #if _PSYNCH_TRACE_
1180 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_END, (uint32_t)cond, 0xeeeeeeed, updatebits, error, 0);
1181 #endif /* _PSYNCH_TRACE_ */
1182
1183 return(error);
1184 }
1185
1186 /*
1187 * psynch_cvwait: This system call is used for psynch cvar waiters to block in kernel.
1188 */
1189 int
1190 psynch_cvwait(__unused proc_t p, struct psynch_cvwait_args * uap, uint32_t * retval)
1191 {
1192 user_addr_t cond = uap->cv;
1193 uint64_t cvlsgen = uap->cvlsgen;
1194 uint32_t cgen, csgen;
1195 uint32_t cugen = uap->cvugen;
1196 user_addr_t mutex = uap->mutex;
1197 uint64_t mugen = uap->mugen;
1198 uint32_t mgen, ugen;
1199 int flags = uap->flags;
1200 ksyn_wait_queue_t kwq, ckwq;
1201 int error=0, local_error = 0;
1202 uint64_t abstime = 0;
1203 uint32_t lockseq, updatebits=0;
1204 struct timespec ts;
1205 uthread_t uth;
1206 ksyn_waitq_element_t kwe, nkwe = NULL;
1207 struct ksyn_queue *kq, kfreeq;
1208 #if __TESTPANICS__
1209 //int timeoutval = 3; /* 3 secs */
1210 //u_int64_t ntime = 0;
1211 #endif /* __TESTPANICS__ */
1212
1213 /* for conformance reasons */
1214 __pthread_testcancel(0);
1215
1216 csgen = (uint32_t)((cvlsgen >> 32) & 0xffffffff);
1217 cgen = ((uint32_t)(cvlsgen & 0xffffffff));
1218 ugen = (uint32_t)((mugen >> 32) & 0xffffffff);
1219 mgen = ((uint32_t)(mugen & 0xffffffff));
1220
1221 #if _PSYNCH_TRACE_
1222 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_START, (uint32_t)cond, cgen, cugen, csgen, 0);
1223 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_NONE, (uint32_t)mutex, mgen, ugen, flags, 0);
1224 #endif /* _PSYNCH_TRACE_ */
1225
1226 lockseq = (cgen & PTHRW_COUNT_MASK);
1227 /*
1228 * In cvwait U word can be out of range as cond could be used only for
1229 * timeouts. However S word needs to be within bounds and validated at
1230 * user level as well.
1231 */
1232 if (is_seqhigher_eq((csgen & PTHRW_COUNT_MASK), lockseq) != 0) {
1233 __FAILEDUSERTEST__("psync_cvwait; invalid sequence numbers\n");
1234 return EINVAL;
1235 }
1236
1237 ckwq = kwq = NULL;
1238 error = ksyn_wqfind(cond, cgen, cugen, csgen, 0, flags, KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INWAIT, &ckwq);
1239 if (error != 0) {
1240 #if _PSYNCH_TRACE_
1241 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_END, (uint32_t)cond, 1, 0xdeadbeef, error, 0);
1242 #endif /* _PSYNCH_TRACE_ */
1243 return(error);
1244 }
1245
1246 #if __TESTPANICS__
1247 //clock_interval_to_deadline(timeoutval, NSEC_PER_SEC, &ntime);
1248 #endif /* __TESTPANICS__ */
1249
1250 if (mutex != (user_addr_t)0) {
1251 error = ksyn_wqfind(mutex, mgen, ugen, 0, 0, flags, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX), &kwq);
1252 if (error != 0) {
1253 local_error = error;
1254 #if _PSYNCH_TRACE_
1255 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_END, (uint32_t)mutex, 2, 0xdeadbeef, error, 0);
1256 #endif /* _PSYNCH_TRACE_ */
1257 goto out;
1258 }
1259
1260 (void)psynch_mutexdrop_internal(kwq, mgen, ugen, flags);
1261 /* drops kwq reference */
1262 kwq = NULL;
1263 }
1264
1265 if (uap->sec != 0 || (uap->nsec & 0x3fffffff) != 0) {
1266 ts.tv_sec = uap->sec;
1267 ts.tv_nsec = (uap->nsec & 0x3fffffff);
1268 nanoseconds_to_absolutetime((uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec, &abstime );
1269 clock_absolutetime_interval_to_deadline( abstime, &abstime );
1270 }
1271
1272 ksyn_wqlock(ckwq);
1273
1274 /* update L, U and S... */
1275 UPDATE_CVKWQ(ckwq, cgen, cugen, csgen, 0, KSYN_WQTYPE_CVAR);
1276
1277 /* Look for the sequence for prepost (or conflicting thread */
1278 kq = &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER];
1279 kwe = ksyn_queue_find_cvpreposeq(kq, lockseq);
1280
1281 if (kwe != NULL) {
1282 switch (kwe->kwe_flags) {
1283
1284 case KWE_THREAD_INWAIT:
1285 ksyn_wqunlock(ckwq);
1286 __FAILEDUSERTEST__("cvwait: thread entry with same sequence already present\n");
1287 local_error = EBUSY;
1288 goto out;
1289
1290 case KWE_THREAD_BROADCAST:
1291 break;
1292
1293 case KWE_THREAD_PREPOST:
1294 if ((kwe->kwe_lockseq & PTHRW_COUNT_MASK) == lockseq) {
1295 /* we can safely consume a reference, so do so */
1296 if (--kwe->kwe_count == 0) {
1297 ksyn_queue_removeitem(ckwq, kq, kwe);
1298 ckwq->kw_fakecount--;
1299 nkwe = kwe;
1300 }
1301 } else {
1302 /*
1303 * consuming a prepost higher than our lock sequence is valid, but
1304 * can leave the higher thread without a match. Convert the entry
1305 * to a broadcast to compensate for this.
1306 */
1307 #if _PSYNCH_TRACE_
1308 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xc2c2c2c2, kwe->kwe_lockseq, 0, 0);
1309 #endif /* _PSYNCH_TRACE_ */
1310
1311 ksyn_handle_cvbroad(ckwq, kwe->kwe_lockseq, &updatebits);
1312 #if __TESTPANICS__
1313 if (updatebits != 0)
1314 panic("psync_cvwait: convert pre-post to broadcast: woke up %d threads that shouldn't be there\n",
1315 updatebits);
1316 #endif /* __TESTPANICS__ */
1317 }
1318
1319 break;
1320
1321 default:
1322 panic("psync_cvwait: unexpected wait queue element type\n");
1323 }
1324
1325 #if _PSYNCH_TRACE_
1326 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xfefefefe, kwe->kwe_lockseq, 0, 0);
1327 #endif /* _PSYNCH_TRACE_ */
1328
1329
1330 updatebits = PTHRW_INC;
1331 ckwq->kw_sword += PTHRW_INC;
1332
1333 /* set C or P bits and free if needed */
1334 ksyn_cvupdate_fixup(ckwq, &updatebits, &kfreeq, 1);
1335
1336 error = 0;
1337 local_error = 0;
1338
1339 *retval = updatebits;
1340
1341 ksyn_wqunlock(ckwq);
1342
1343 if (nkwe != NULL)
1344 zfree(kwe_zone, nkwe);
1345
1346 goto out;
1347
1348 }
1349
1350 uth = current_uthread();
1351 kwe = &uth->uu_kwe;
1352 kwe->kwe_kwqqueue = ckwq;
1353 kwe->kwe_flags = KWE_THREAD_INWAIT;
1354 kwe->kwe_lockseq = lockseq;
1355 kwe->kwe_count = 1;
1356 kwe->kwe_uth = uth;
1357 kwe->kwe_psynchretval = 0;
1358
1359 #if _PSYNCH_TRACE_
1360 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xfeedfeed, cgen, 0, 0);
1361 #endif /* _PSYNCH_TRACE_ */
1362
1363 error = ksyn_queue_insert(ckwq, kq, cgen, uth, kwe, SEQFIT);
1364 if (error != 0) {
1365 ksyn_wqunlock(ckwq);
1366 local_error = error;
1367 goto out;
1368 }
1369
1370 #if 0 /* __TESTPANICS__ */
1371 /* if no timeout is passed, set 5 secs timeout to catch hangs */
1372 error = ksyn_block_thread_locked(ckwq, (abstime == 0) ? ntime : abstime, kwe, 1);
1373 #else
1374 error = ksyn_block_thread_locked(ckwq, abstime, kwe, 1);
1375 #endif /* __TESTPANICS__ */
1376 /* lock dropped */
1377
1378
1379 local_error = error;
1380 if (error != 0) {
1381 ksyn_wqlock(ckwq);
1382 /* just in case it got woken up as we were granting */
1383 *retval = kwe->kwe_psynchretval;
1384
1385 #if __TESTPANICS__
1386 if ((kwe->kwe_kwqqueue != NULL) && (kwe->kwe_kwqqueue != ckwq))
1387 panic("cvwait waiting on some other kwq\n");
1388
1389 #endif /* __TESTPANICS__ */
1390
1391
1392 if (kwe->kwe_kwqqueue != NULL) {
1393 ksyn_queue_removeitem(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwe);
1394 kwe->kwe_kwqqueue = NULL;
1395 }
1396 if ((kwe->kwe_psynchretval & PTH_RWL_MTX_WAIT) != 0) {
1397 /* the condition var granted.
1398 * reset the error so that the thread returns back.
1399 */
1400 local_error = 0;
1401 /* no need to set any bits just return as cvsig/broad covers this */
1402 ksyn_wqunlock(ckwq);
1403 *retval = 0;
1404 goto out;
1405 }
1406
1407 ckwq->kw_sword += PTHRW_INC;
1408
1409 /* set C and P bits, in the local error as well as updatebits */
1410 if ((ckwq->kw_lword & PTHRW_COUNT_MASK) == (ckwq->kw_sword & PTHRW_COUNT_MASK)) {
1411 updatebits |= PTH_RWS_CV_CBIT;
1412 local_error |= ECVCERORR;
1413 if (ckwq->kw_inqueue != 0) {
1414 (void)ksyn_queue_move_tofree(ckwq, kq, (ckwq->kw_lword & PTHRW_COUNT_MASK), &kfreeq, 1, 1);
1415 }
1416 ckwq->kw_lword = ckwq->kw_uword = ckwq->kw_sword = 0;
1417 ckwq->kw_kflags |= KSYN_KWF_ZEROEDOUT;
1418 } else {
1419 /* everythig in the queue is a fake entry ? */
1420 if ((ckwq->kw_inqueue != 0) && (ckwq->kw_fakecount == ckwq->kw_inqueue)) {
1421 updatebits |= PTH_RWS_CV_PBIT;
1422 local_error |= ECVPERORR;
1423 }
1424 }
1425 ksyn_wqunlock(ckwq);
1426
1427 } else {
1428 /* PTH_RWL_MTX_WAIT is removed */
1429 if ((kwe->kwe_psynchretval & PTH_RWS_CV_MBIT) != 0)
1430 *retval = PTHRW_INC | PTH_RWS_CV_CBIT;
1431 else
1432 *retval = 0;
1433 local_error = 0;
1434 }
1435 out:
1436 #if _PSYNCH_TRACE_
1437 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_END, (uint32_t)cond, 0xeeeeeeed, (uint32_t)*retval, local_error, 0);
1438 #endif /* _PSYNCH_TRACE_ */
1439 ksyn_wqrelease(ckwq, NULL, 1, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_CVAR));
1440 return(local_error);
1441 }
1442
1443 /*
1444 * psynch_cvclrprepost: This system call clears pending prepost if present.
1445 */
1446 int
1447 psynch_cvclrprepost(__unused proc_t p, struct psynch_cvclrprepost_args * uap, __unused int * retval)
1448 {
1449 user_addr_t cond = uap->cv;
1450 uint32_t cgen = uap->cvgen;
1451 uint32_t cugen = uap->cvugen;
1452 uint32_t csgen = uap->cvsgen;
1453 uint32_t pseq = uap->preposeq;
1454 uint32_t flags = uap->flags;
1455 int error;
1456 ksyn_wait_queue_t ckwq = NULL;
1457 struct ksyn_queue kfreeq;
1458
1459 #if _PSYNCH_TRACE_
1460 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE | DBG_FUNC_START, (uint32_t)cond, cgen, cugen, csgen, 0);
1461 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE | DBG_FUNC_NONE, (uint32_t)cond, 0xcececece, pseq, flags, 0);
1462 #endif /* _PSYNCH_TRACE_ */
1463
1464 if ((flags & _PTHREAD_MTX_OPT_MUTEX) == 0) {
1465 error = ksyn_wqfind(cond, cgen, cugen, csgen, 0, flags, (KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INDROP), &ckwq);
1466 if (error != 0) {
1467 *retval = 0;
1468 #if _PSYNCH_TRACE_
1469 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE | DBG_FUNC_END, (uint32_t)cond, 0, 0xdeadbeef, error, 0);
1470 #endif /* _PSYNCH_TRACE_ */
1471 return(error);
1472 }
1473
1474 ksyn_wqlock(ckwq);
1475 (void)ksyn_queue_move_tofree(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], (pseq & PTHRW_COUNT_MASK), &kfreeq, 0, 1);
1476 ksyn_wqunlock(ckwq);
1477 ksyn_wqrelease(ckwq, NULL, 1, (KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INDROP));
1478 } else {
1479 /* mutex type */
1480 error = ksyn_wqfind(cond, cgen, cugen, 0, 0, flags, (KSYN_WQTYPE_MTX | KSYN_WQTYPE_INDROP), &ckwq);
1481 if (error != 0) {
1482 *retval = 0;
1483 #if _PSYNCH_TRACE_
1484 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE | DBG_FUNC_END, (uint32_t)cond, 0, 0xdeadbeef, error, 0);
1485 #endif /* _PSYNCH_TRACE_ */
1486 return(error);
1487 }
1488
1489 ksyn_wqlock(ckwq);
1490 if (((flags & _PTHREAD_MUTEX_POLICY_FIRSTFIT) != 0) && (ckwq->kw_pre_rwwc != 0)) {
1491 if (is_seqlower_eq(ckwq->kw_pre_lockseq, cgen) != 0) {
1492 /* clear prepost */
1493 ckwq->kw_pre_rwwc = 0;
1494 ckwq->kw_pre_lockseq = 0;
1495 }
1496 }
1497 ksyn_wqunlock(ckwq);
1498 ksyn_wqrelease(ckwq, NULL, 1, (KSYN_WQTYPE_MTX | KSYN_WQTYPE_INDROP));
1499 }
1500
1501 #if _PSYNCH_TRACE_
1502 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE | DBG_FUNC_END, (uint32_t)cond, 0xeeeeeeed, 0, 0, 0);
1503 #endif /* _PSYNCH_TRACE_ */
1504 return(0);
1505 }
1506
1507 /* ***************** pthread_rwlock ************************ */
1508 /*
1509 * psynch_rw_rdlock: This system call is used for psync rwlock readers to block.
1510 */
1511 int
1512 psynch_rw_rdlock(__unused proc_t p, struct psynch_rw_rdlock_args * uap, uint32_t * retval)
1513 {
1514 user_addr_t rwlock = uap->rwlock;
1515 uint32_t lgen = uap->lgenval;
1516 uint32_t ugen = uap->ugenval;
1517 uint32_t rw_wc = uap->rw_wc;
1518 //uint64_t tid = uap->tid;
1519 int flags = uap->flags;
1520 int error = 0, block;
1521 uint32_t lockseq = 0, updatebits = 0, preseq = 0, prerw_wc = 0;
1522 ksyn_wait_queue_t kwq;
1523 uthread_t uth;
1524 int isinit = lgen & PTHRW_RWL_INIT;
1525 uint32_t returnbits = 0;
1526 ksyn_waitq_element_t kwe;
1527
1528 #if _PSYNCH_TRACE_
1529 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1530 #endif /* _PSYNCH_TRACE_ */
1531 uth = current_uthread();
1532
1533 /* preserve the seq number */
1534 kwe = &uth->uu_kwe;
1535 kwe->kwe_lockseq = lgen;
1536 kwe->kwe_uth = uth;
1537 kwe->kwe_psynchretval = 0;
1538 kwe->kwe_kwqqueue = NULL;
1539
1540 lockseq = lgen & PTHRW_COUNT_MASK;
1541
1542
1543 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1544 if (error != 0) {
1545 #if _PSYNCH_TRACE_
1546 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1547 #endif /* _PSYNCH_TRACE_ */
1548 return(error);
1549 }
1550
1551 ksyn_wqlock(kwq);
1552
1553 if (isinit != 0) {
1554 lgen &= ~PTHRW_RWL_INIT;
1555 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0) {
1556 /* first to notice the reset of the lock, clear preposts */
1557 CLEAR_REINIT_BITS(kwq);
1558 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
1559 #if _PSYNCH_TRACE_
1560 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
1561 #endif /* _PSYNCH_TRACE_ */
1562 }
1563 }
1564
1565 /* handle first the missed wakeups */
1566 if ((kwq->kw_pre_intrcount != 0) &&
1567 ((kwq->kw_pre_intrtype == PTH_RW_TYPE_READ) || (kwq->kw_pre_intrtype == PTH_RW_TYPE_LREAD)) &&
1568 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
1569
1570 kwq->kw_pre_intrcount--;
1571 kwe->kwe_psynchretval = kwq->kw_pre_intrretbits;
1572 if (kwq->kw_pre_intrcount==0)
1573 CLEAR_INTR_PREPOST_BITS(kwq);
1574 ksyn_wqunlock(kwq);
1575 goto out;
1576 }
1577
1578 /* handle overlap first as they are not counted against pre_rwwc */
1579
1580 /* check for overlap and if no pending W bit (indicates writers) */
1581 if ((kwq->kw_overlapwatch != 0) && ((rw_wc & PTHRW_RWS_SAVEMASK) == 0) && ((lgen & PTH_RWL_WBIT) == 0)) {
1582 #if _PSYNCH_TRACE_
1583 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 10, kwq->kw_nextseqword, kwq->kw_lastseqword, 0);
1584 #endif /* _PSYNCH_TRACE_ */
1585 error = kwq_handle_overlap(kwq, lgen, ugen, rw_wc, &updatebits, (KW_UNLOCK_PREPOST_READLOCK|KW_UNLOCK_PREPOST), &block);
1586 #if __TESTPANICS__
1587 if (error != 0)
1588 panic("rw_rdlock: kwq_handle_overlap failed %d\n",error);
1589 #endif /* __TESTPANICS__ */
1590 if (block == 0) {
1591 error = 0;
1592 kwe->kwe_psynchretval = updatebits;
1593 #if _PSYNCH_TRACE_
1594 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0xff, updatebits, 0xee, 0);
1595 #endif /* _PSYNCH_TRACE_ */
1596 ksyn_wqunlock(kwq);
1597 goto out;
1598 }
1599 }
1600
1601 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
1602 #if _PSYNCH_TRACE_
1603 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1604 #endif /* _PSYNCH_TRACE_ */
1605 kwq->kw_pre_rwwc--;
1606 if (kwq->kw_pre_rwwc == 0) {
1607 preseq = kwq->kw_pre_lockseq;
1608 prerw_wc = kwq->kw_pre_sseq;
1609 CLEAR_PREPOST_BITS(kwq);
1610 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0){
1611 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
1612 #if _PSYNCH_TRACE_
1613 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
1614 #endif /* _PSYNCH_TRACE_ */
1615 }
1616 error = kwq_handle_unlock(kwq, preseq, prerw_wc, &updatebits, (KW_UNLOCK_PREPOST_READLOCK|KW_UNLOCK_PREPOST), &block, lgen);
1617 #if __TESTPANICS__
1618 if (error != 0)
1619 panic("rw_rdlock: kwq_handle_unlock failed %d\n",error);
1620 #endif /* __TESTPANICS__ */
1621 if (block == 0) {
1622 ksyn_wqunlock(kwq);
1623 goto out;
1624 }
1625 /* insert to q and proceed as ususal */
1626 }
1627 }
1628
1629
1630 #if _PSYNCH_TRACE_
1631 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1632 #endif /* _PSYNCH_TRACE_ */
1633 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_READ], lgen, uth, kwe, SEQFIT);
1634 #if __TESTPANICS__
1635 if (error != 0)
1636 panic("psynch_rw_rdlock: failed to enqueue\n");
1637 #endif /* __TESTPANICS__ */
1638 error = ksyn_block_thread_locked(kwq, (uint64_t)0, kwe, 0);
1639 /* drops the kwq lock */
1640
1641 out:
1642 if (error != 0) {
1643 #if _PSYNCH_TRACE_
1644 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 4, error, 0, 0);
1645 #endif /* _PSYNCH_TRACE_ */
1646 ksyn_wqlock(kwq);
1647 if (kwe->kwe_kwqqueue != NULL)
1648 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_READ], kwe);
1649 ksyn_wqunlock(kwq);
1650 } else {
1651 /* update bits */
1652 *retval = kwe->kwe_psynchretval;
1653 returnbits = kwe->kwe_psynchretval;
1654 }
1655 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK));
1656 #if _PSYNCH_TRACE_
1657 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, returnbits, error, 0);
1658 #endif /* _PSYNCH_TRACE_ */
1659 return(error);
1660 }
1661
1662 /*
1663 * psynch_rw_longrdlock: This system call is used for psync rwlock long readers to block.
1664 */
1665 int
1666 psynch_rw_longrdlock(__unused proc_t p, __unused struct psynch_rw_longrdlock_args * uap, __unused uint32_t * retval)
1667 {
1668 user_addr_t rwlock = uap->rwlock;
1669 uint32_t lgen = uap->lgenval;
1670 uint32_t ugen = uap->ugenval;
1671 uint32_t rw_wc = uap->rw_wc;
1672 //uint64_t tid = uap->tid;
1673 int flags = uap->flags;
1674 int isinit = lgen & PTHRW_RWL_INIT;
1675 uint32_t returnbits=0;
1676 ksyn_waitq_element_t kwe;
1677
1678 ksyn_wait_queue_t kwq;
1679 int error=0, block = 0 ;
1680 uthread_t uth;
1681 uint32_t lockseq = 0, updatebits = 0, preseq = 0, prerw_wc = 0;
1682
1683 #if _PSYNCH_TRACE_
1684 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1685 #endif /* _PSYNCH_TRACE_ */
1686 uth = current_uthread();
1687 kwe = &uth->uu_kwe;
1688 kwe->kwe_lockseq = lgen;
1689 kwe->kwe_uth = uth;
1690 kwe->kwe_psynchretval = 0;
1691 kwe->kwe_kwqqueue = NULL;
1692 lockseq = (lgen & PTHRW_COUNT_MASK);
1693
1694 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1695 if (error != 0) {
1696 #if _PSYNCH_TRACE_
1697 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1698 #endif /* _PSYNCH_TRACE_ */
1699 return(error);
1700 }
1701
1702 ksyn_wqlock(kwq);
1703
1704 if (isinit != 0) {
1705 lgen &= ~PTHRW_RWL_INIT;
1706 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0) {
1707 /* first to notice the reset of the lock, clear preposts */
1708 CLEAR_REINIT_BITS(kwq);
1709 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
1710 #if _PSYNCH_TRACE_
1711 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
1712 #endif /* _PSYNCH_TRACE_ */
1713 }
1714 }
1715
1716 /* handle first the missed wakeups */
1717 if ((kwq->kw_pre_intrcount != 0) &&
1718 (kwq->kw_pre_intrtype == PTH_RW_TYPE_LREAD) &&
1719 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
1720
1721 kwq->kw_pre_intrcount--;
1722 kwe->kwe_psynchretval = kwq->kw_pre_intrretbits;
1723 if (kwq->kw_pre_intrcount==0)
1724 CLEAR_INTR_PREPOST_BITS(kwq);
1725 ksyn_wqunlock(kwq);
1726 goto out;
1727 }
1728
1729
1730 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
1731 #if _PSYNCH_TRACE_
1732 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1733 #endif /* _PSYNCH_TRACE_ */
1734 kwq->kw_pre_rwwc--;
1735 if (kwq->kw_pre_rwwc == 0) {
1736 preseq = kwq->kw_pre_lockseq;
1737 prerw_wc = kwq->kw_pre_sseq;
1738 CLEAR_PREPOST_BITS(kwq);
1739 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0){
1740 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
1741 #if _PSYNCH_TRACE_
1742 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
1743 #endif /* _PSYNCH_TRACE_ */
1744 }
1745 error = kwq_handle_unlock(kwq, preseq, prerw_wc, &updatebits, (KW_UNLOCK_PREPOST_LREADLOCK|KW_UNLOCK_PREPOST), &block, lgen);
1746 #if __TESTPANICS__
1747 if (error != 0)
1748 panic("kwq_handle_unlock failed %d\n",error);
1749 #endif /* __TESTPANICS__ */
1750 if (block == 0) {
1751 ksyn_wqunlock(kwq);
1752 goto out;
1753 }
1754 /* insert to q and proceed as ususal */
1755 }
1756 }
1757
1758 #if _PSYNCH_TRACE_
1759 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1760 #endif /* _PSYNCH_TRACE_ */
1761 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_LREAD], lgen, uth, kwe, SEQFIT);
1762 #if __TESTPANICS__
1763 if (error != 0)
1764 panic("psynch_rw_longrdlock: failed to enqueue\n");
1765 #endif /* __TESTPANICS__ */
1766
1767 error = ksyn_block_thread_locked(kwq, (uint64_t)0, kwe, 0);
1768 /* drops the kwq lock */
1769 out:
1770 if (error != 0) {
1771 #if _PSYNCH_TRACE_
1772 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1773 #endif /* _PSYNCH_TRACE_ */
1774 ksyn_wqlock(kwq);
1775 if (kwe->kwe_kwqqueue != NULL)
1776 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_LREAD], kwe);
1777 ksyn_wqunlock(kwq);
1778 } else {
1779 /* update bits */
1780 *retval = kwe->kwe_psynchretval;
1781 returnbits = kwe->kwe_psynchretval;
1782 }
1783
1784 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK));
1785
1786 #if _PSYNCH_TRACE_
1787 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0, returnbits, error, 0);
1788 #endif /* _PSYNCH_TRACE_ */
1789 return(error);
1790 }
1791
1792 /*
1793 * psynch_rw_wrlock: This system call is used for psync rwlock writers to block.
1794 */
1795 int
1796 psynch_rw_wrlock(__unused proc_t p, struct psynch_rw_wrlock_args * uap, uint32_t * retval)
1797 {
1798 user_addr_t rwlock = uap->rwlock;
1799 uint32_t lgen = uap->lgenval;
1800 uint32_t ugen = uap->ugenval;
1801 uint32_t rw_wc = uap->rw_wc;
1802 //uint64_t tid = uap->tid;
1803 int flags = uap->flags;
1804 int block;
1805 ksyn_wait_queue_t kwq;
1806 int error=0;
1807 uthread_t uth;
1808 uint32_t lockseq = 0, updatebits = 0, preseq = 0, prerw_wc = 0;
1809 int isinit = lgen & PTHRW_RWL_INIT;
1810 uint32_t returnbits = 0;
1811 ksyn_waitq_element_t kwe;
1812
1813 #if _PSYNCH_TRACE_
1814 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1815 #endif /* _PSYNCH_TRACE_ */
1816 uth = current_uthread();
1817 kwe = &uth->uu_kwe;
1818 kwe->kwe_lockseq = lgen;
1819 kwe->kwe_uth = uth;
1820 kwe->kwe_psynchretval = 0;
1821 kwe->kwe_kwqqueue = NULL;
1822 lockseq = (lgen & PTHRW_COUNT_MASK);
1823
1824 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1825 if (error != 0) {
1826 #if _PSYNCH_TRACE_
1827 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1828 #endif /* _PSYNCH_TRACE_ */
1829 return(error);
1830 }
1831
1832 ksyn_wqlock(kwq);
1833
1834
1835 if (isinit != 0) {
1836 lgen &= ~PTHRW_RWL_INIT;
1837 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0) {
1838 /* first to notice the reset of the lock, clear preposts */
1839 CLEAR_REINIT_BITS(kwq);
1840 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
1841 #if _PSYNCH_TRACE_
1842 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
1843 #endif /* _PSYNCH_TRACE_ */
1844 }
1845 }
1846
1847
1848 /* handle first the missed wakeups */
1849 if ((kwq->kw_pre_intrcount != 0) &&
1850 (kwq->kw_pre_intrtype == PTH_RW_TYPE_WRITE) &&
1851 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
1852
1853 kwq->kw_pre_intrcount--;
1854 kwe->kwe_psynchretval = kwq->kw_pre_intrretbits;
1855 if (kwq->kw_pre_intrcount==0)
1856 CLEAR_INTR_PREPOST_BITS(kwq);
1857 ksyn_wqunlock(kwq);
1858 goto out;
1859 }
1860
1861
1862 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
1863 #if _PSYNCH_TRACE_
1864 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1865 #endif /* _PSYNCH_TRACE_ */
1866 kwq->kw_pre_rwwc--;
1867 if (kwq->kw_pre_rwwc == 0) {
1868 preseq = kwq->kw_pre_lockseq;
1869 prerw_wc = kwq->kw_pre_sseq;
1870 CLEAR_PREPOST_BITS(kwq);
1871 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0){
1872 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
1873 #if _PSYNCH_TRACE_
1874 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
1875 #endif /* _PSYNCH_TRACE_ */
1876 }
1877 error = kwq_handle_unlock(kwq, preseq, prerw_wc, &updatebits, (KW_UNLOCK_PREPOST_WRLOCK|KW_UNLOCK_PREPOST), &block, lgen);
1878 #if __TESTPANICS__
1879 if (error != 0)
1880 panic("rw_wrlock: kwq_handle_unlock failed %d\n",error);
1881 #endif /* __TESTPANICS__ */
1882 if (block == 0) {
1883 ksyn_wqunlock(kwq);
1884 *retval = updatebits;
1885 goto out1;
1886 }
1887 /* insert to q and proceed as ususal */
1888 }
1889 }
1890
1891 /* No overlap watch needed go ahead and block */
1892
1893 #if _PSYNCH_TRACE_
1894 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1895 #endif /* _PSYNCH_TRACE_ */
1896 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], lgen, uth, kwe, SEQFIT);
1897 #if __TESTPANICS__
1898 if (error != 0)
1899 panic("psynch_rw_wrlock: failed to enqueue\n");
1900 #endif /* __TESTPANICS__ */
1901
1902 error = ksyn_block_thread_locked(kwq, (uint64_t)0, kwe, 0);
1903 /* drops the wq lock */
1904
1905 out:
1906 if (error != 0) {
1907 #if _PSYNCH_TRACE_
1908 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 4, error, 0, 0);
1909 #endif /* _PSYNCH_TRACE_ */
1910 ksyn_wqlock(kwq);
1911 if (kwe->kwe_kwqqueue != NULL)
1912 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwe);
1913 ksyn_wqunlock(kwq);
1914 } else {
1915 /* update bits */
1916 *retval = kwe->kwe_psynchretval;
1917 returnbits = kwe->kwe_psynchretval;
1918 }
1919 out1:
1920 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK));
1921
1922 #if _PSYNCH_TRACE_
1923 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, returnbits, error, 0);
1924 #endif /* _PSYNCH_TRACE_ */
1925 return(error);
1926 }
1927
1928 /*
1929 * psynch_rw_yieldwrlock: This system call is used for psync rwlock yielding writers to block.
1930 */
1931 int
1932 psynch_rw_yieldwrlock(__unused proc_t p, __unused struct psynch_rw_yieldwrlock_args * uap, __unused uint32_t * retval)
1933 {
1934 user_addr_t rwlock = uap->rwlock;
1935 uint32_t lgen = uap->lgenval;
1936 uint32_t ugen = uap->ugenval;
1937 uint32_t rw_wc = uap->rw_wc;
1938 //uint64_t tid = uap->tid;
1939 int flags = uap->flags;
1940 int block;
1941 ksyn_wait_queue_t kwq;
1942 int error=0;
1943 int isinit = lgen & PTHRW_RWL_INIT;
1944 uthread_t uth;
1945 uint32_t returnbits=0;
1946 ksyn_waitq_element_t kwe;
1947
1948 #if _PSYNCH_TRACE_
1949 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1950 #endif /* _PSYNCH_TRACE_ */
1951 uint32_t lockseq = 0, updatebits = 0, preseq = 0, prerw_wc = 0;
1952
1953 uth = current_uthread();
1954 kwe = &uth->uu_kwe;
1955 kwe->kwe_lockseq = lgen;
1956 kwe->kwe_uth = uth;
1957 kwe->kwe_psynchretval = 0;
1958 kwe->kwe_kwqqueue = NULL;
1959 lockseq = (lgen & PTHRW_COUNT_MASK);
1960
1961 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1962 if (error != 0) {
1963 #if _PSYNCH_TRACE_
1964 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1965 #endif /* _PSYNCH_TRACE_ */
1966 return(error);
1967 }
1968
1969 ksyn_wqlock(kwq);
1970
1971 if (isinit != 0) {
1972 lgen &= ~PTHRW_RWL_INIT;
1973 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0) {
1974 /* first to notice the reset of the lock, clear preposts */
1975 CLEAR_REINIT_BITS(kwq);
1976 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
1977 #if _PSYNCH_TRACE_
1978 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
1979 #endif /* _PSYNCH_TRACE_ */
1980 }
1981 }
1982
1983 /* handle first the missed wakeups */
1984 if ((kwq->kw_pre_intrcount != 0) &&
1985 (kwq->kw_pre_intrtype == PTH_RW_TYPE_YWRITE) &&
1986 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
1987
1988 kwq->kw_pre_intrcount--;
1989 kwe->kwe_psynchretval = kwq->kw_pre_intrretbits;
1990 if (kwq->kw_pre_intrcount==0)
1991 CLEAR_INTR_PREPOST_BITS(kwq);
1992 ksyn_wqunlock(kwq);
1993 goto out;
1994 }
1995
1996 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
1997 #if _PSYNCH_TRACE_
1998 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1999 #endif /* _PSYNCH_TRACE_ */
2000 kwq->kw_pre_rwwc--;
2001 if (kwq->kw_pre_rwwc == 0) {
2002 preseq = kwq->kw_pre_lockseq;
2003 prerw_wc = kwq->kw_pre_sseq;
2004 CLEAR_PREPOST_BITS(kwq);
2005 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0){
2006 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
2007 #if _PSYNCH_TRACE_
2008 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
2009 #endif /* _PSYNCH_TRACE_ */
2010 }
2011 error = kwq_handle_unlock(kwq, preseq, prerw_wc, &updatebits, (KW_UNLOCK_PREPOST_YWRLOCK|KW_UNLOCK_PREPOST), &block, lgen);
2012 #if __TESTPANICS__
2013 if (error != 0)
2014 panic("kwq_handle_unlock failed %d\n",error);
2015 #endif /* __TESTPANICS__ */
2016 if (block == 0) {
2017 ksyn_wqunlock(kwq);
2018 *retval = updatebits;
2019 goto out;
2020 }
2021 /* insert to q and proceed as ususal */
2022 }
2023 }
2024
2025 #if _PSYNCH_TRACE_
2026 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
2027 #endif /* _PSYNCH_TRACE_ */
2028 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER], lgen, uth, kwe, SEQFIT);
2029 #if __TESTPANICS__
2030 if (error != 0)
2031 panic("psynch_rw_yieldwrlock: failed to enqueue\n");
2032 #endif /* __TESTPANICS__ */
2033
2034 error = ksyn_block_thread_locked(kwq, (uint64_t)0, kwe, 0);
2035
2036 out:
2037 if (error != 0) {
2038 #if _PSYNCH_TRACE_
2039 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 4, error, 0, 0);
2040 #endif /* _PSYNCH_TRACE_ */
2041 ksyn_wqlock(kwq);
2042 if (kwe->kwe_kwqqueue != NULL)
2043 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER], kwe);
2044 ksyn_wqunlock(kwq);
2045 } else {
2046 /* update bits */
2047 *retval = kwe->kwe_psynchretval;
2048 returnbits = kwe->kwe_psynchretval;
2049 }
2050
2051 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_RWLOCK));
2052
2053 #if _PSYNCH_TRACE_
2054 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, returnbits, error, 0);
2055 #endif /* _PSYNCH_TRACE_ */
2056 return(error);
2057 }
2058
2059 #if NOTYET
2060 /*
2061 * psynch_rw_downgrade: This system call is used for wakeup blocked readers who are eligible to run due to downgrade.
2062 */
2063 int
2064 psynch_rw_downgrade(__unused proc_t p, struct psynch_rw_downgrade_args * uap, __unused int * retval)
2065 {
2066 user_addr_t rwlock = uap->rwlock;
2067 uint32_t lgen = uap->lgenval;
2068 uint32_t ugen = uap->ugenval;
2069 uint32_t rw_wc = uap->rw_wc;
2070 //uint64_t tid = uap->tid;
2071 int flags = uap->flags;
2072 uint32_t count = 0;
2073 int isinit = lgen & PTHRW_RWL_INIT;
2074 ksyn_wait_queue_t kwq;
2075 int error=0;
2076 uthread_t uth;
2077 uint32_t curgen = 0;
2078
2079 #if _PSYNCH_TRACE_
2080 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
2081 #endif /* _PSYNCH_TRACE_ */
2082 uth = current_uthread();
2083
2084 curgen = (lgen & PTHRW_COUNT_MASK);
2085
2086 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK), &kwq);
2087 if (error != 0) {
2088 #if _PSYNCH_TRACE_
2089 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
2090 #endif /* _PSYNCH_TRACE_ */
2091 return(error);
2092 }
2093
2094 ksyn_wqlock(kwq);
2095
2096 if ((lgen & PTHRW_RWL_INIT) != 0) {
2097 lgen &= ~PTHRW_RWL_INIT;
2098 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0){
2099 CLEAR_REINIT_BITS(kwq);
2100 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
2101 #if _PSYNCH_TRACE_
2102 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
2103 #endif /* _PSYNCH_TRACE_ */
2104 }
2105 isinit = 1;
2106 }
2107
2108 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
2109 if ((kwq->kw_lastunlockseq != PTHRW_RWL_INIT) && (is_seqlower(ugen, kwq->kw_lastunlockseq)!= 0)) {
2110 /* spurious updatebits?? */
2111 error = 0;
2112 goto out;
2113 }
2114
2115
2116
2117 /* If L-U != num of waiters, then it needs to be preposted or spr */
2118 diff = find_diff(lgen, ugen);
2119 /* take count of the downgrade thread itself */
2120 diff--;
2121
2122
2123 #if _PSYNCH_TRACE_
2124 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 1, kwq->kw_inqueue, curgen, 0);
2125 #endif /* _PSYNCH_TRACE_ */
2126 if (find_seq_till(kwq, curgen, diff, &count) == 0) {
2127 if (count < (uint32_t)diff)
2128 goto prepost;
2129 }
2130
2131 /* no prepost and all threads are in place, reset the bit */
2132 if ((isinit != 0) && ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0)){
2133 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
2134 #if _PSYNCH_TRACE_
2135 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
2136 #endif /* _PSYNCH_TRACE_ */
2137 }
2138
2139 /* can handle unlock now */
2140
2141 CLEAR_PREPOST_BITS(kwq);
2142
2143 dounlock:
2144 #if _PSYNCH_TRACE_
2145 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
2146 #endif /* _PSYNCH_TRACE_ */
2147 error = kwq_handle_downgrade(kwq, lgen, 0, 0, NULL);
2148
2149 #if __TESTPANICS__
2150 if (error != 0)
2151 panic("psynch_rw_downgrade: failed to wakeup\n");
2152 #endif /* __TESTPANICS__ */
2153
2154 out:
2155 ksyn_wqunlock(kwq);
2156 #if _PSYNCH_TRACE_
2157 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_END, (uint32_t)rwlock, 0, 0, error, 0);
2158 #endif /* _PSYNCH_TRACE_ */
2159 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK));
2160
2161 return(error);
2162
2163 prepost:
2164 kwq->kw_pre_rwwc = (rw_wc - count);
2165 kwq->kw_pre_lockseq = lgen;
2166 #if _PSYNCH_TRACE_
2167 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 1, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
2168 #endif /* _PSYNCH_TRACE_ */
2169 error = 0;
2170 goto out;
2171 }
2172
2173
2174 /*
2175 * psynch_rw_upgrade: This system call is used by an reader to block waiting for upgrade to be granted.
2176 */
2177 int
2178 psynch_rw_upgrade(__unused proc_t p, struct psynch_rw_upgrade_args * uap, uint32_t * retval)
2179 {
2180 user_addr_t rwlock = uap->rwlock;
2181 uint32_t lgen = uap->lgenval;
2182 uint32_t ugen = uap->ugenval;
2183 uint32_t rw_wc = uap->rw_wc;
2184 //uint64_t tid = uap->tid;
2185 int flags = uap->flags;
2186 int block;
2187 ksyn_wait_queue_t kwq;
2188 int error=0;
2189 uthread_t uth;
2190 uint32_t lockseq = 0, updatebits = 0, preseq = 0;
2191 int isinit = lgen & PTHRW_RWL_INIT;
2192 ksyn_waitq_element_t kwe;
2193
2194 #if _PSYNCH_TRACE_
2195 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
2196 #endif /* _PSYNCH_TRACE_ */
2197 uth = current_uthread();
2198 kwe = &uth->uu_kwe;
2199 kwe->kwe_lockseq = lgen;
2200 kwe->kwe_uth = uth;
2201 kwe->kwe_psynchretval = 0;
2202 kwe->kwe_kwqqueue = NULL;
2203 lockseq = (lgen & PTHRW_COUNT_MASK);
2204
2205 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_RWLOCK), &kwq);
2206 if (error != 0) {
2207 #if _PSYNCH_TRACE_
2208 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
2209 #endif /* _PSYNCH_TRACE_ */
2210 return(error);
2211 }
2212
2213 ksyn_wqlock(kwq);
2214
2215 if (isinit != 0) {
2216 lgen &= ~PTHRW_RWL_INIT;
2217 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0) {
2218 /* first to notice the reset of the lock, clear preposts */
2219 CLEAR_REINIT_BITS(kwq);
2220 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
2221 #if _PSYNCH_TRACE_
2222 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
2223 #endif /* _PSYNCH_TRACE_ */
2224 }
2225 }
2226
2227 /* handle first the missed wakeups */
2228 if ((kwq->kw_pre_intrcount != 0) &&
2229 ((kwq->kw_pre_intrtype == PTH_RW_TYPE_READ) || (kwq->kw_pre_intrtype == PTH_RW_TYPE_LREAD)) &&
2230 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
2231
2232 kwq->kw_pre_intrcount--;
2233 kwe->kwe_psynchretval = kwq->kw_pre_intrretbits;
2234 if (kwq->kw_pre_intrcount==0)
2235 CLEAR_INTR_PREPOST_BITS(kwq);
2236 ksyn_wqunlock(kwq);
2237 goto out;
2238 }
2239
2240 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
2241 #if _PSYNCH_TRACE_
2242 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
2243 #endif /* _PSYNCH_TRACE_ */
2244 kwq->kw_pre_rwwc--;
2245 if (kwq->kw_pre_rwwc == 0) {
2246 preseq = kwq->kw_pre_lockseq;
2247 prerw_wc = kwq->kw_pre_sseq;
2248 CLEAR_PREPOST_BITS(kwq);
2249 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0){
2250 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
2251 #if _PSYNCH_TRACE_
2252 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
2253 #endif /* _PSYNCH_TRACE_ */
2254 }
2255 error = kwq_handle_unlock(kwq, preseq, prerw_wc, &updatebits, (KW_UNLOCK_PREPOST_UPGRADE|KW_UNLOCK_PREPOST), &block, lgen);
2256 #if __TESTPANICS__
2257 if (error != 0)
2258 panic("rw_rdlock: kwq_handle_unlock failed %d\n",error);
2259 #endif /* __TESTPANICS__ */
2260 if (block == 0) {
2261 ksyn_wqunlock(kwq);
2262 goto out;
2263 }
2264 /* insert to q and proceed as ususal */
2265 }
2266 }
2267
2268
2269 #if _PSYNCH_TRACE_
2270 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
2271 #endif /* _PSYNCH_TRACE_ */
2272 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_UPGRADE], lgen, uth, kwe, SEQFIT);
2273 #if __TESTPANICS__
2274 if (error != 0)
2275 panic("psynch_rw_upgrade: failed to enqueue\n");
2276 #endif /* __TESTPANICS__ */
2277
2278
2279 error = ksyn_block_thread_locked(kwq, (uint64_t)0, kwe, 0);
2280 /* drops the lock */
2281
2282 out:
2283 if (error != 0) {
2284 #if _PSYNCH_TRACE_
2285 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 4, error, 0, 0);
2286 #endif /* _PSYNCH_TRACE_ */
2287 ksyn_wqlock(kwq);
2288 if (kwe->kwe_kwqqueue != NULL)
2289 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_UPGRADE], kwe);
2290 ksyn_wqunlock(kwq);
2291 } else {
2292 /* update bits */
2293 *retval = kwe->kwe_psynchretval;
2294 }
2295
2296 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_RWLOCK));
2297 #if _PSYNCH_TRACE_
2298 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
2299 #endif /* _PSYNCH_TRACE_ */
2300
2301 return(error);
2302 }
2303
2304 #else /* NOTYET */
2305 int
2306 psynch_rw_upgrade(__unused proc_t p, __unused struct psynch_rw_upgrade_args * uap, __unused uint32_t * retval)
2307 {
2308 return(0);
2309 }
2310 int
2311 psynch_rw_downgrade(__unused proc_t p, __unused struct psynch_rw_downgrade_args * uap, __unused int * retval)
2312 {
2313 return(0);
2314 }
2315 #endif /* NOTYET */
2316 /*
2317 * psynch_rw_unlock: This system call is used for unlock state postings. This will grant appropriate
2318 * reader/writer variety lock.
2319 */
2320
2321 int
2322 psynch_rw_unlock(__unused proc_t p, struct psynch_rw_unlock_args * uap, uint32_t * retval)
2323 {
2324 user_addr_t rwlock = uap->rwlock;
2325 uint32_t lgen = uap->lgenval;
2326 uint32_t ugen = uap->ugenval;
2327 uint32_t rw_wc = uap->rw_wc;
2328 uint32_t curgen;
2329 //uint64_t tid = uap->tid;
2330 int flags = uap->flags;
2331 uthread_t uth;
2332 ksyn_wait_queue_t kwq;
2333 uint32_t updatebits = 0;
2334 int error=0, diff;
2335 uint32_t count = 0;
2336 int isinit = 0;
2337
2338
2339 #if _PSYNCH_TRACE_
2340 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
2341 #endif /* _PSYNCH_TRACE_ */
2342 uth = current_uthread();
2343
2344 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK), &kwq);
2345 if (error != 0) {
2346 #if _PSYNCH_TRACE_
2347 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
2348 #endif /* _PSYNCH_TRACE_ */
2349 return(error);
2350 }
2351
2352 curgen = lgen & PTHRW_COUNT_MASK;
2353
2354 ksyn_wqlock(kwq);
2355
2356 if ((lgen & PTHRW_RWL_INIT) != 0) {
2357 lgen &= ~PTHRW_RWL_INIT;
2358 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0){
2359 CLEAR_REINIT_BITS(kwq);
2360 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
2361 #if _PSYNCH_TRACE_
2362 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
2363 #endif /* _PSYNCH_TRACE_ */
2364 }
2365 isinit = 1;
2366 }
2367
2368 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
2369 if ((kwq->kw_lastunlockseq != PTHRW_RWL_INIT) && (is_seqlower(ugen, kwq->kw_lastunlockseq)!= 0)) {
2370 #if _PSYNCH_TRACE_
2371 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, (uint32_t)0xeeeeeeee, rw_wc, kwq->kw_lastunlockseq, 0);
2372 #endif /* _PSYNCH_TRACE_ */
2373 error = 0;
2374 goto out;
2375 }
2376
2377 /* If L-U != num of waiters, then it needs to be preposted or spr */
2378 diff = find_diff(lgen, ugen);
2379
2380 #if _PSYNCH_TRACE_
2381 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 1, kwq->kw_inqueue, curgen, 0);
2382 #endif /* _PSYNCH_TRACE_ */
2383 if (find_seq_till(kwq, curgen, diff, &count) == 0) {
2384 if ((count == 0) || (count < (uint32_t)diff))
2385 goto prepost;
2386 }
2387
2388 /* no prepost and all threads are in place, reset the bit */
2389 if ((isinit != 0) && ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0)){
2390 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
2391 #if _PSYNCH_TRACE_
2392 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
2393 #endif /* _PSYNCH_TRACE_ */
2394 }
2395
2396 /* can handle unlock now */
2397
2398 CLEAR_PREPOST_BITS(kwq);
2399
2400 #if _PSYNCH_TRACE_
2401 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, 0, 0, 0);
2402 #endif /* _PSYNCH_TRACE_ */
2403 error = kwq_handle_unlock(kwq, lgen, rw_wc, &updatebits, 0, NULL, 0);
2404 #if __TESTPANICS__
2405 if (error != 0)
2406 panic("psynch_rw_unlock: kwq_handle_unlock failed %d\n",error);
2407 #endif /* __TESTPANICS__ */
2408 out:
2409 if (error == 0) {
2410 /* update bits?? */
2411 *retval = updatebits;
2412 }
2413
2414
2415 ksyn_wqunlock(kwq);
2416
2417 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK));
2418 #if _PSYNCH_TRACE_
2419 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0, updatebits, error, 0);
2420 #endif /* _PSYNCH_TRACE_ */
2421
2422 return(error);
2423
2424 prepost:
2425 /* update if the new seq is higher than prev prepost, or first set */
2426 if ((is_rws_setseq(kwq->kw_pre_sseq) != 0) ||
2427 (is_seqhigher_eq((rw_wc & PTHRW_COUNT_MASK), (kwq->kw_pre_sseq & PTHRW_COUNT_MASK)) != 0)) {
2428 kwq->kw_pre_rwwc = (diff - count);
2429 kwq->kw_pre_lockseq = curgen;
2430 kwq->kw_pre_sseq = rw_wc;
2431 #if _PSYNCH_TRACE_
2432 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, rw_wc, count, 0);
2433 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 4, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
2434 #endif /* _PSYNCH_TRACE_ */
2435 updatebits = lgen; /* let this not do unlock handling */
2436 }
2437 error = 0;
2438 goto out;
2439 }
2440
2441
2442 /*
2443 * psynch_rw_unlock2: This system call is used to wakeup pending readers when unlock grant frm kernel
2444 * to new reader arrival races
2445 */
2446 int
2447 psynch_rw_unlock2(__unused proc_t p, __unused struct psynch_rw_unlock2_args * uap, __unused uint32_t * retval)
2448 {
2449 return(ENOTSUP);
2450 }
2451
2452
2453 /* ************************************************************************** */
2454 void
2455 pth_global_hashinit()
2456 {
2457 int arg;
2458
2459 pth_glob_hashtbl = hashinit(PTH_HASHSIZE * 4, M_PROC, &pthhash);
2460
2461 /*
2462 * pthtest={0,1,2,3} (override default aborting behavior on pthread sync failures)
2463 * 0 - just return errors
2464 * 1 - print and return errors
2465 * 2 - abort user, print and return errors
2466 * 3 - panic
2467 */
2468 if (!PE_parse_boot_argn("pthtest", &arg, sizeof(arg)))
2469 arg = __TESTMODE__;
2470
2471 if (arg == 3) {
2472 __test_panics__ = 1;
2473 printf("Pthread support PANICS when sync kernel primitives misused\n");
2474 } else if (arg == 2) {
2475 __test_aborts__ = 1;
2476 __test_prints__ = 1;
2477 printf("Pthread support ABORTS when sync kernel primitives misused\n");
2478 } else if (arg == 1) {
2479 __test_prints__ = 1;
2480 printf("Pthread support LOGS when sync kernel primitives misused\n");
2481 }
2482 }
2483
2484 void
2485 pth_proc_hashinit(proc_t p)
2486 {
2487 p->p_pthhash = hashinit(PTH_HASHSIZE, M_PROC, &pthhash);
2488 if (p->p_pthhash == NULL)
2489 panic("pth_proc_hashinit: hash init returned 0\n");
2490 }
2491
2492
2493 ksyn_wait_queue_t
2494 ksyn_wq_hash_lookup(user_addr_t mutex, proc_t p, int flags, uint64_t object, uint64_t objoffset)
2495 {
2496 ksyn_wait_queue_t kwq;
2497 struct pthhashhead * hashptr;
2498
2499 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED)
2500 {
2501 hashptr = pth_glob_hashtbl;
2502 kwq = (&hashptr[object & pthhash])->lh_first;
2503 if (kwq != 0) {
2504 for (; kwq != NULL; kwq = kwq->kw_hash.le_next) {
2505 if ((kwq->kw_object == object) &&(kwq->kw_offset == objoffset)) {
2506 return (kwq);
2507 }
2508 }
2509 }
2510 } else {
2511 hashptr = p->p_pthhash;
2512 kwq = (&hashptr[mutex & pthhash])->lh_first;
2513 if (kwq != 0)
2514 for (; kwq != NULL; kwq = kwq->kw_hash.le_next) {
2515 if (kwq->kw_addr == mutex) {
2516 return (kwq);
2517 }
2518 }
2519 }
2520 return(NULL);
2521 }
2522
2523 void
2524 pth_proc_hashdelete(proc_t p)
2525 {
2526 struct pthhashhead * hashptr;
2527 ksyn_wait_queue_t kwq;
2528 int hashsize = pthhash + 1;
2529 int i;
2530
2531 #if _PSYNCH_TRACE_
2532 if ((pthread_debug_proc != NULL) && (p == pthread_debug_proc))
2533 pthread_debug_proc = PROC_NULL;
2534 #endif /* _PSYNCH_TRACE_ */
2535 hashptr = p->p_pthhash;
2536 if (hashptr == NULL)
2537 return;
2538
2539 for(i= 0; i < hashsize; i++) {
2540 while ((kwq = LIST_FIRST(&hashptr[i])) != NULL) {
2541 pthread_list_lock();
2542 if ((kwq->kw_pflags & KSYN_WQ_INHASH) != 0) {
2543 kwq->kw_pflags &= ~KSYN_WQ_INHASH;
2544 LIST_REMOVE(kwq, kw_hash);
2545 }
2546 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
2547 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
2548 LIST_REMOVE(kwq, kw_list);
2549 num_infreekwq--;
2550 }
2551 num_freekwq++;
2552 pthread_list_unlock();
2553 /* release fake entries if present for cvars */
2554 if (((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) && (kwq->kw_inqueue != 0))
2555 ksyn_freeallkwe(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER]);
2556 lck_mtx_destroy(&kwq->kw_lock, pthread_lck_grp);
2557 zfree(kwq_zone, kwq);
2558 }
2559 }
2560 FREE(p->p_pthhash, M_PROC);
2561 p->p_pthhash = NULL;
2562 }
2563
2564 /* no lock held for this as the waitqueue is getting freed */
2565 void
2566 ksyn_freeallkwe(ksyn_queue_t kq)
2567 {
2568 ksyn_waitq_element_t kwe;
2569
2570 /* free all the fake entries, dequeue rest */
2571 kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
2572 while (kwe != NULL) {
2573 if (kwe->kwe_flags != KWE_THREAD_INWAIT) {
2574 TAILQ_REMOVE(&kq->ksynq_kwelist, kwe, kwe_list);
2575 zfree(kwe_zone, kwe);
2576 } else {
2577 TAILQ_REMOVE(&kq->ksynq_kwelist, kwe, kwe_list);
2578 }
2579 kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
2580 }
2581 }
2582
2583 /* find kernel waitqueue, if not present create one. Grants a reference */
2584 int
2585 ksyn_wqfind(user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, uint64_t tid, int flags, int wqtype, ksyn_wait_queue_t * kwqp)
2586 {
2587 ksyn_wait_queue_t kwq;
2588 ksyn_wait_queue_t nkwq;
2589 struct pthhashhead * hashptr;
2590 uint64_t object = 0, offset = 0;
2591 uint64_t hashhint;
2592 proc_t p = current_proc();
2593 int retry = mgen & PTH_RWL_RETRYBIT;
2594 struct ksyn_queue kfreeq;
2595 int i;
2596
2597 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED)
2598 {
2599 (void)ksyn_findobj(mutex, &object, &offset);
2600 hashhint = object;
2601 hashptr = pth_glob_hashtbl;
2602 } else {
2603 hashptr = p->p_pthhash;
2604 }
2605
2606 ksyn_queue_init(&kfreeq);
2607
2608 if (((wqtype & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_MTX) && (retry != 0))
2609 mgen &= ~PTH_RWL_RETRYBIT;
2610
2611 loop:
2612 //pthread_list_lock_spin();
2613 pthread_list_lock();
2614
2615 kwq = ksyn_wq_hash_lookup(mutex, p, flags, object, offset);
2616
2617 if (kwq != NULL) {
2618 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
2619 LIST_REMOVE(kwq, kw_list);
2620 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
2621 num_infreekwq--;
2622 num_reusekwq++;
2623 }
2624 if ((kwq->kw_type & KSYN_WQTYPE_MASK) != (wqtype &KSYN_WQTYPE_MASK)) {
2625 if ((kwq->kw_inqueue == 0) && (kwq->kw_pre_rwwc ==0) && (kwq->kw_pre_intrcount == 0)) {
2626 if (kwq->kw_iocount == 0) {
2627 kwq->kw_addr = mutex;
2628 kwq->kw_flags = flags;
2629 kwq->kw_object = object;
2630 kwq->kw_offset = offset;
2631 kwq->kw_type = (wqtype & KSYN_WQTYPE_MASK);
2632 CLEAR_REINIT_BITS(kwq);
2633 CLEAR_INTR_PREPOST_BITS(kwq);
2634 CLEAR_PREPOST_BITS(kwq);
2635 kwq->kw_lword = mgen;
2636 kwq->kw_uword = ugen;
2637 kwq->kw_sword = rw_wc;
2638 kwq->kw_owner = tid;
2639 } else if ((kwq->kw_iocount == 1) && (kwq->kw_dropcount == kwq->kw_iocount)) {
2640 /* if all users are unlockers then wait for it to finish */
2641 kwq->kw_pflags |= KSYN_WQ_WAITING;
2642 /* wait for the wq to be free */
2643 (void)msleep(&kwq->kw_pflags, pthread_list_mlock, PDROP, "ksyn_wqfind", 0);
2644 /* does not have list lock */
2645 goto loop;
2646 } else {
2647 __FAILEDUSERTEST__("address already known to kernel for another (busy) synchronizer type\n");
2648 pthread_list_unlock();
2649 return EBUSY;
2650 }
2651 } else {
2652 __FAILEDUSERTEST__("address already known to kernel for another (busy) synchronizer type(1)\n");
2653 pthread_list_unlock();
2654 return EBUSY;
2655 }
2656 }
2657 kwq->kw_iocount++;
2658 if (wqtype == KSYN_WQTYPE_MUTEXDROP)
2659 kwq->kw_dropcount++;
2660 if (kwqp != NULL)
2661 *kwqp = kwq;
2662 pthread_list_unlock();
2663 return (0);
2664 }
2665
2666 pthread_list_unlock();
2667
2668 nkwq = (ksyn_wait_queue_t)zalloc(kwq_zone);
2669 bzero(nkwq, sizeof(struct ksyn_wait_queue));
2670 nkwq->kw_addr = mutex;
2671 nkwq->kw_flags = flags;
2672 nkwq->kw_iocount = 1;
2673 if (wqtype == KSYN_WQTYPE_MUTEXDROP)
2674 nkwq->kw_dropcount++;
2675 nkwq->kw_object = object;
2676 nkwq->kw_offset = offset;
2677 nkwq->kw_type = (wqtype & KSYN_WQTYPE_MASK);
2678 nkwq->kw_lastseqword = PTHRW_RWS_INIT;
2679 if (nkwq->kw_type == KSYN_WQTYPE_RWLOCK)
2680 nkwq->kw_nextseqword = PTHRW_RWS_INIT;
2681
2682 nkwq->kw_pre_sseq = PTHRW_RWS_INIT;
2683
2684 CLEAR_PREPOST_BITS(nkwq);
2685 CLEAR_INTR_PREPOST_BITS(nkwq);
2686 CLEAR_REINIT_BITS(nkwq);
2687 nkwq->kw_lword = mgen;
2688 nkwq->kw_uword = ugen;
2689 nkwq->kw_sword = rw_wc;
2690 nkwq->kw_owner = tid;
2691
2692
2693 for (i=0; i< KSYN_QUEUE_MAX; i++)
2694 ksyn_queue_init(&nkwq->kw_ksynqueues[i]);
2695
2696 lck_mtx_init(&nkwq->kw_lock, pthread_lck_grp, pthread_lck_attr);
2697
2698 //pthread_list_lock_spin();
2699 pthread_list_lock();
2700 /* see whether it is alread allocated */
2701 kwq = ksyn_wq_hash_lookup(mutex, p, flags, object, offset);
2702
2703 if (kwq != NULL) {
2704 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
2705 LIST_REMOVE(kwq, kw_list);
2706 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
2707 num_infreekwq--;
2708 num_reusekwq++;
2709 }
2710 if ((kwq->kw_type & KSYN_WQTYPE_MASK) != (wqtype &KSYN_WQTYPE_MASK)) {
2711 if ((kwq->kw_inqueue == 0) && (kwq->kw_pre_rwwc ==0) && (kwq->kw_pre_intrcount == 0)) {
2712 if (kwq->kw_iocount == 0) {
2713 kwq->kw_addr = mutex;
2714 kwq->kw_flags = flags;
2715 kwq->kw_object = object;
2716 kwq->kw_offset = offset;
2717 kwq->kw_type = (wqtype & KSYN_WQTYPE_MASK);
2718 CLEAR_REINIT_BITS(kwq);
2719 CLEAR_INTR_PREPOST_BITS(kwq);
2720 CLEAR_PREPOST_BITS(kwq);
2721 kwq->kw_lword = mgen;
2722 kwq->kw_uword = ugen;
2723 kwq->kw_sword = rw_wc;
2724 kwq->kw_owner = tid;
2725 } else if ((kwq->kw_iocount == 1) && (kwq->kw_dropcount == kwq->kw_iocount)) {
2726 kwq->kw_pflags |= KSYN_WQ_WAITING;
2727 /* wait for the wq to be free */
2728 (void)msleep(&kwq->kw_pflags, pthread_list_mlock, PDROP, "ksyn_wqfind", 0);
2729
2730 lck_mtx_destroy(&nkwq->kw_lock, pthread_lck_grp);
2731 zfree(kwq_zone, nkwq);
2732 /* will acquire lock again */
2733
2734 goto loop;
2735 } else {
2736 __FAILEDUSERTEST__("address already known to kernel for another [busy] synchronizer type(2)\n");
2737 pthread_list_unlock();
2738 lck_mtx_destroy(&nkwq->kw_lock, pthread_lck_grp);
2739 zfree(kwq_zone, nkwq);
2740 return EBUSY;
2741 }
2742 } else {
2743 __FAILEDUSERTEST__("address already known to kernel for another [busy] synchronizer type(3)\n");
2744 pthread_list_unlock();
2745 lck_mtx_destroy(&nkwq->kw_lock, pthread_lck_grp);
2746 zfree(kwq_zone, nkwq);
2747 return EBUSY;
2748 }
2749 }
2750 kwq->kw_iocount++;
2751 if (wqtype == KSYN_WQTYPE_MUTEXDROP)
2752 kwq->kw_dropcount++;
2753 if (kwqp != NULL)
2754 *kwqp = kwq;
2755 pthread_list_unlock();
2756 lck_mtx_destroy(&nkwq->kw_lock, pthread_lck_grp);
2757 zfree(kwq_zone, nkwq);
2758 return (0);
2759 }
2760 kwq = nkwq;
2761
2762 #if _PSYNCH_TRACE_
2763 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, kwq->kw_lword, kwq->kw_uword, kwq->kw_sword, 0xffff, 0);
2764 #endif /* _PSYNCH_TRACE_ */
2765 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED)
2766 {
2767 kwq->kw_pflags |= KSYN_WQ_SHARED;
2768 LIST_INSERT_HEAD(&hashptr[kwq->kw_object & pthhash], kwq, kw_hash);
2769 } else
2770 LIST_INSERT_HEAD(&hashptr[mutex & pthhash], kwq, kw_hash);
2771
2772 kwq->kw_pflags |= KSYN_WQ_INHASH;
2773 num_total_kwq++;
2774
2775 pthread_list_unlock();
2776
2777 if (kwqp != NULL)
2778 *kwqp = kwq;
2779 return (0);
2780 }
2781
2782 /* Reference from find is dropped here. Starts the free process if needed */
2783 void
2784 ksyn_wqrelease(ksyn_wait_queue_t kwq, ksyn_wait_queue_t ckwq, int qfreenow, int wqtype)
2785 {
2786 uint64_t deadline;
2787 struct timeval t;
2788 int sched = 0;
2789 ksyn_wait_queue_t free_elem = NULL;
2790 ksyn_wait_queue_t free_elem1 = NULL;
2791
2792 //pthread_list_lock_spin();
2793 pthread_list_lock();
2794 kwq->kw_iocount--;
2795 if (wqtype == KSYN_WQTYPE_MUTEXDROP) {
2796 kwq->kw_dropcount--;
2797 }
2798 if (kwq->kw_iocount == 0) {
2799 if ((kwq->kw_pflags & KSYN_WQ_WAITING) != 0) {
2800 /* some one is waiting for the waitqueue, wake them up */
2801 kwq->kw_pflags &= ~KSYN_WQ_WAITING;
2802 wakeup(&kwq->kw_pflags);
2803 }
2804
2805 if ((kwq->kw_pre_rwwc == 0) && (kwq->kw_inqueue == 0) && (kwq->kw_pre_intrcount == 0)) {
2806 if (qfreenow == 0) {
2807 microuptime(&kwq->kw_ts);
2808 LIST_INSERT_HEAD(&pth_free_list, kwq, kw_list);
2809 kwq->kw_pflags |= KSYN_WQ_FLIST;
2810 num_infreekwq++;
2811 free_elem = NULL;
2812 } else {
2813 /* remove from the only list it is in ie hash */
2814 kwq->kw_pflags &= ~(KSYN_WQ_FLIST | KSYN_WQ_INHASH);
2815 LIST_REMOVE(kwq, kw_hash);
2816 lck_mtx_destroy(&kwq->kw_lock, pthread_lck_grp);
2817 num_total_kwq--;
2818 num_freekwq++;
2819 free_elem = kwq;
2820 }
2821 } else
2822 free_elem = NULL;
2823 if (qfreenow == 0)
2824 sched = 1;
2825 }
2826
2827 if (ckwq != NULL) {
2828 ckwq->kw_iocount--;
2829 if (wqtype == KSYN_WQTYPE_MUTEXDROP) {
2830 kwq->kw_dropcount--;
2831 }
2832 if ( ckwq->kw_iocount == 0) {
2833 if ((kwq->kw_pflags & KSYN_WQ_WAITING) != 0) {
2834 /* some one is waiting for the waitqueue, wake them up */
2835 kwq->kw_pflags &= ~KSYN_WQ_WAITING;
2836 wakeup(&kwq->kw_pflags);
2837 }
2838 if ((ckwq->kw_pre_rwwc == 0) && (ckwq->kw_inqueue == 0) && (ckwq->kw_pre_intrcount == 0)) {
2839 if (qfreenow == 0) {
2840 /* mark for free if we can */
2841 microuptime(&ckwq->kw_ts);
2842 LIST_INSERT_HEAD(&pth_free_list, ckwq, kw_list);
2843 ckwq->kw_pflags |= KSYN_WQ_FLIST;
2844 num_infreekwq++;
2845 free_elem1 = NULL;
2846 } else {
2847 /* remove from the only list it is in ie hash */
2848 ckwq->kw_pflags &= ~(KSYN_WQ_FLIST | KSYN_WQ_INHASH);
2849 LIST_REMOVE(ckwq, kw_hash);
2850 lck_mtx_destroy(&ckwq->kw_lock, pthread_lck_grp);
2851 num_total_kwq--;
2852 num_freekwq++;
2853 free_elem1 = ckwq;
2854 }
2855 } else
2856 free_elem1 = NULL;
2857 if (qfreenow == 0)
2858 sched = 1;
2859 }
2860 }
2861
2862 if (sched == 1 && psynch_cleanupset == 0) {
2863 psynch_cleanupset = 1;
2864 microuptime(&t);
2865 t.tv_sec += KSYN_CLEANUP_DEADLINE;
2866
2867 deadline = tvtoabstime(&t);
2868 thread_call_enter_delayed(psynch_thcall, deadline);
2869 }
2870 pthread_list_unlock();
2871 if (free_elem != NULL)
2872 zfree(kwq_zone, free_elem);
2873 if (free_elem1 != NULL)
2874 zfree(kwq_zone, free_elem1);
2875 }
2876
2877 /* responsible to free the waitqueues */
2878 void
2879 psynch_wq_cleanup(__unused void * param, __unused void * param1)
2880 {
2881 ksyn_wait_queue_t kwq;
2882 struct timeval t;
2883 LIST_HEAD(, ksyn_wait_queue) freelist = {NULL};
2884 int count = 0, delayed = 0, diff;
2885 uint64_t deadline = 0;
2886
2887 //pthread_list_lock_spin();
2888 pthread_list_lock();
2889
2890 num_addedfreekwq = num_infreekwq - num_lastfreekwqcount;
2891 num_lastfreekwqcount = num_infreekwq;
2892 microuptime(&t);
2893
2894 LIST_FOREACH(kwq, &pth_free_list, kw_list) {
2895 if ((kwq->kw_iocount != 0) || (kwq->kw_pre_rwwc != 0) || (kwq->kw_inqueue != 0) || (kwq->kw_pre_intrcount != 0)) {
2896 /* still in use */
2897 continue;
2898 }
2899 diff = t.tv_sec - kwq->kw_ts.tv_sec;
2900 if (diff < 0)
2901 diff *= -1;
2902 if (diff >= KSYN_CLEANUP_DEADLINE) {
2903 /* out of hash */
2904 kwq->kw_pflags &= ~(KSYN_WQ_FLIST | KSYN_WQ_INHASH);
2905 num_infreekwq--;
2906 num_freekwq++;
2907 LIST_REMOVE(kwq, kw_hash);
2908 LIST_REMOVE(kwq, kw_list);
2909 LIST_INSERT_HEAD(&freelist, kwq, kw_list);
2910 count ++;
2911 num_total_kwq--;
2912 } else {
2913 delayed = 1;
2914 }
2915
2916 }
2917 if (delayed != 0) {
2918 t.tv_sec += KSYN_CLEANUP_DEADLINE;
2919
2920 deadline = tvtoabstime(&t);
2921 thread_call_enter_delayed(psynch_thcall, deadline);
2922 psynch_cleanupset = 1;
2923 } else
2924 psynch_cleanupset = 0;
2925
2926 pthread_list_unlock();
2927
2928
2929 while ((kwq = LIST_FIRST(&freelist)) != NULL) {
2930 LIST_REMOVE(kwq, kw_list);
2931 lck_mtx_destroy(&kwq->kw_lock, pthread_lck_grp);
2932 zfree(kwq_zone, kwq);
2933 }
2934 }
2935
2936
2937 int
2938 ksyn_block_thread_locked(ksyn_wait_queue_t kwq, uint64_t abstime, ksyn_waitq_element_t kwe, int mylog)
2939 {
2940 kern_return_t kret;
2941 int error = 0;
2942 #if _PSYNCH_TRACE_
2943 uthread_t uth = NULL;
2944 #endif /* _PSYNCH_TRACE_ */
2945
2946 kwe->kwe_kwqqueue = (void *)kwq;
2947 assert_wait_deadline(&kwe->kwe_psynchretval, THREAD_ABORTSAFE, abstime);
2948 ksyn_wqunlock(kwq);
2949
2950 kret = thread_block(NULL);
2951 switch (kret) {
2952 case THREAD_TIMED_OUT:
2953 error = ETIMEDOUT;
2954 break;
2955 case THREAD_INTERRUPTED:
2956 error = EINTR;
2957 break;
2958 }
2959 #if _PSYNCH_TRACE_
2960 uth = current_uthread();
2961 #if defined(__i386__)
2962 if (mylog != 0)
2963 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP | DBG_FUNC_NONE, 0xf4f3f2f1, (uint32_t)uth, kret, 0, 0);
2964 #else
2965 if (mylog != 0)
2966 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP | DBG_FUNC_NONE, 0xeeeeeeee, kret, error, 0xeeeeeeee, 0);
2967 #endif
2968 #endif /* _PSYNCH_TRACE_ */
2969
2970 return(error);
2971 }
2972
2973 kern_return_t
2974 ksyn_wakeup_thread(__unused ksyn_wait_queue_t kwq, ksyn_waitq_element_t kwe)
2975 {
2976 kern_return_t kret;
2977 #if _PSYNCH_TRACE_
2978 uthread_t uth = NULL;
2979 #endif /* _PSYNCH_TRACE_ */
2980
2981 kret = thread_wakeup_one((caddr_t)&kwe->kwe_psynchretval);
2982
2983 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
2984 panic("ksyn_wakeup_thread: panic waking up thread %x\n", kret);
2985 #if _PSYNCH_TRACE_
2986 uth = kwe->kwe_uth;
2987 #if defined(__i386__)
2988 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP | DBG_FUNC_NONE, 0xf1f2f3f4, (uint32_t)uth, kret, 0, 0);
2989 #endif
2990 #endif /* _PSYNCH_TRACE_ */
2991
2992 return(kret);
2993 }
2994
2995 /* find the true shared obect/offset for shared mutexes */
2996 int
2997 ksyn_findobj(uint64_t mutex, uint64_t * objectp, uint64_t * offsetp)
2998 {
2999 vm_page_info_basic_data_t info;
3000 kern_return_t kret;
3001 mach_msg_type_number_t count = VM_PAGE_INFO_BASIC_COUNT;
3002
3003 kret = vm_map_page_info(current_map(), mutex, VM_PAGE_INFO_BASIC,
3004 (vm_page_info_t)&info, &count);
3005
3006 if (kret != KERN_SUCCESS)
3007 return(EINVAL);
3008
3009 if (objectp != NULL)
3010 *objectp = (uint64_t)info.object_id;
3011 if (offsetp != NULL)
3012 *offsetp = (uint64_t)info.offset;
3013
3014 return(0);
3015 }
3016
3017
3018 /* lowest of kw_fr, kw_flr, kw_fwr, kw_fywr */
3019 int
3020 kwq_find_rw_lowest(ksyn_wait_queue_t kwq, int flags, uint32_t premgen, int * typep, uint32_t lowest[])
3021 {
3022
3023 uint32_t kw_fr, kw_flr, kw_fwr, kw_fywr, low;
3024 int type = 0, lowtype, typenum[4];
3025 uint32_t numbers[4];
3026 int count = 0, i;
3027
3028
3029 if ((kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0)) {
3030 type |= PTH_RWSHFT_TYPE_READ;
3031 /* read entries are present */
3032 if (kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count != 0) {
3033 kw_fr = kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_firstnum;
3034 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) && (is_seqlower(premgen, kw_fr) != 0))
3035 kw_fr = premgen;
3036 } else
3037 kw_fr = premgen;
3038
3039 lowest[KSYN_QUEUE_READ] = kw_fr;
3040 numbers[count]= kw_fr;
3041 typenum[count] = PTH_RW_TYPE_READ;
3042 count++;
3043 } else
3044 lowest[KSYN_QUEUE_READ] = 0;
3045
3046 if ((kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_LREADLOCK) != 0)) {
3047 type |= PTH_RWSHFT_TYPE_LREAD;
3048 /* read entries are present */
3049 if (kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count != 0) {
3050 kw_flr = kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_firstnum;
3051 if (((flags & KW_UNLOCK_PREPOST_LREADLOCK) != 0) && (is_seqlower(premgen, kw_flr) != 0))
3052 kw_flr = premgen;
3053 } else
3054 kw_flr = premgen;
3055
3056 lowest[KSYN_QUEUE_LREAD] = kw_flr;
3057 numbers[count]= kw_flr;
3058 typenum[count] = PTH_RW_TYPE_LREAD;
3059 count++;
3060 } else
3061 lowest[KSYN_QUEUE_LREAD] = 0;
3062
3063
3064 if ((kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0)) {
3065 type |= PTH_RWSHFT_TYPE_WRITE;
3066 /* read entries are present */
3067 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0) {
3068 kw_fwr = kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_firstnum;
3069 if (((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) && (is_seqlower(premgen, kw_fwr) != 0))
3070 kw_fwr = premgen;
3071 } else
3072 kw_fwr = premgen;
3073
3074 lowest[KSYN_QUEUE_WRITER] = kw_fwr;
3075 numbers[count]= kw_fwr;
3076 typenum[count] = PTH_RW_TYPE_WRITE;
3077 count++;
3078 } else
3079 lowest[KSYN_QUEUE_WRITER] = 0;
3080
3081 if ((kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_YWRLOCK) != 0)) {
3082 type |= PTH_RWSHFT_TYPE_YWRITE;
3083 /* read entries are present */
3084 if (kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0) {
3085 kw_fywr = kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_firstnum;
3086 if (((flags & KW_UNLOCK_PREPOST_YWRLOCK) != 0) && (is_seqlower(premgen, kw_fywr) != 0))
3087 kw_fywr = premgen;
3088 } else
3089 kw_fywr = premgen;
3090
3091 lowest[KSYN_QUEUE_YWRITER] = kw_fywr;
3092 numbers[count]= kw_fywr;
3093 typenum[count] = PTH_RW_TYPE_YWRITE;
3094 count++;
3095 } else
3096 lowest[KSYN_QUEUE_YWRITER] = 0;
3097
3098
3099 #if __TESTPANICS__
3100 if (count == 0)
3101 panic("nothing in the queue???\n");
3102 #endif /* __TESTPANICS__ */
3103
3104 low = numbers[0];
3105 lowtype = typenum[0];
3106 if (count > 1) {
3107 for (i = 1; i< count; i++) {
3108 if(is_seqlower(numbers[i] , low) != 0) {
3109 low = numbers[i];
3110 lowtype = typenum[i];
3111 }
3112 }
3113 }
3114 type |= lowtype;
3115
3116 if (typep != 0)
3117 *typep = type;
3118 return(0);
3119 }
3120
3121 /* wakeup readers and longreaders to upto the writer limits */
3122 int
3123 ksyn_wakeupreaders(ksyn_wait_queue_t kwq, uint32_t limitread, int longreadset, int allreaders, uint32_t updatebits, int * wokenp)
3124 {
3125 ksyn_waitq_element_t kwe = NULL;
3126 ksyn_queue_t kq;
3127 int failedwakeup = 0;
3128 int numwoken = 0;
3129 kern_return_t kret = KERN_SUCCESS;
3130 uint32_t lbits = 0;
3131
3132 lbits = updatebits;
3133 if (longreadset != 0) {
3134 /* clear all read and longreads */
3135 while ((kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_READ], kwq)) != NULL) {
3136 kwe->kwe_psynchretval = lbits;
3137 kwe->kwe_kwqqueue = NULL;
3138
3139 numwoken++;
3140 kret = ksyn_wakeup_thread(kwq, kwe);
3141 #if __TESTPANICS__
3142 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3143 panic("ksyn_wakeupreaders: panic waking up readers\n");
3144 #endif /* __TESTPANICS__ */
3145 if (kret == KERN_NOT_WAITING) {
3146 failedwakeup++;
3147 }
3148 }
3149 while ((kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_LREAD], kwq)) != NULL) {
3150 kwe->kwe_psynchretval = lbits;
3151 kwe->kwe_kwqqueue = NULL;
3152 numwoken++;
3153 kret = ksyn_wakeup_thread(kwq, kwe);
3154 #if __TESTPANICS__
3155 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3156 panic("ksyn_wakeupreaders: panic waking up lreaders\n");
3157 #endif /* __TESTPANICS__ */
3158 if (kret == KERN_NOT_WAITING) {
3159 failedwakeup++;
3160 }
3161 }
3162 } else {
3163 kq = &kwq->kw_ksynqueues[KSYN_QUEUE_READ];
3164 while ((kq->ksynq_count != 0) && (allreaders || (is_seqlower(kq->ksynq_firstnum, limitread) != 0))) {
3165 kwe = ksyn_queue_removefirst(kq, kwq);
3166 kwe->kwe_psynchretval = lbits;
3167 kwe->kwe_kwqqueue = NULL;
3168 numwoken++;
3169 kret = ksyn_wakeup_thread(kwq, kwe);
3170 #if __TESTPANICS__
3171 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3172 panic("ksyn_wakeupreaders: panic waking up readers\n");
3173 #endif /* __TESTPANICS__ */
3174 if (kret == KERN_NOT_WAITING) {
3175 failedwakeup++;
3176 }
3177 }
3178 }
3179
3180 if (wokenp != NULL)
3181 *wokenp = numwoken;
3182 return(failedwakeup);
3183 }
3184
3185
3186 /* This handles the unlock grants for next set on rw_unlock() or on arrival of all preposted waiters */
3187 int
3188 kwq_handle_unlock(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t rw_wc, uint32_t * updatep, int flags, int * blockp, uint32_t premgen)
3189 {
3190 uint32_t low_reader, low_writer, low_ywriter, low_lreader,limitrdnum;
3191 int rwtype, error=0;
3192 int longreadset = 0, allreaders, failed;
3193 uint32_t updatebits=0, numneeded = 0;;
3194 int prepost = flags & KW_UNLOCK_PREPOST;
3195 thread_t preth = THREAD_NULL;
3196 ksyn_waitq_element_t kwe;
3197 uthread_t uth;
3198 thread_t th;
3199 int woken = 0;
3200 int block = 1;
3201 uint32_t lowest[KSYN_QUEUE_MAX]; /* np need for upgrade as it is handled separately */
3202 kern_return_t kret = KERN_SUCCESS;
3203 ksyn_queue_t kq;
3204 int curthreturns = 0;
3205
3206 #if _PSYNCH_TRACE_
3207 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_START, (uint32_t)kwq->kw_addr, mgen, premgen, rw_wc, 0);
3208 #endif /* _PSYNCH_TRACE_ */
3209 if (prepost != 0) {
3210 preth = current_thread();
3211 }
3212
3213 kq = &kwq->kw_ksynqueues[KSYN_QUEUE_READ];
3214 kwq->kw_lastseqword = rw_wc;
3215 kwq->kw_lastunlockseq = (rw_wc & PTHRW_COUNT_MASK);
3216 kwq->kw_overlapwatch = 0;
3217
3218 /* upgrade pending */
3219 if (is_rw_ubit_set(mgen)) {
3220 #if __TESTPANICS__
3221 panic("NO UBIT SHOULD BE SET\n");
3222 updatebits = PTH_RWL_EBIT | PTH_RWL_KBIT;
3223 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0)
3224 updatebits |= PTH_RWL_WBIT;
3225 if (kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0)
3226 updatebits |= PTH_RWL_YBIT;
3227 if (prepost != 0) {
3228 if((flags & KW_UNLOCK_PREPOST_UPGRADE) != 0) {
3229 /* upgrade thread calling the prepost */
3230 /* upgrade granted */
3231 block = 0;
3232 goto out;
3233 }
3234
3235 }
3236 if (kwq->kw_ksynqueues[KSYN_QUEUE_UPGRADE].ksynq_count > 0) {
3237 kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_UPGRADE], kwq);
3238
3239 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
3240 kwe->kwe_psynchretval = updatebits;
3241 kwe->kwe_kwqqueue = NULL;
3242 kret = ksyn_wakeup_thread(kwq, kwe);
3243 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3244 panic("kwq_handle_unlock: panic waking up the upgrade thread \n");
3245 if (kret == KERN_NOT_WAITING) {
3246 kwq->kw_pre_intrcount = 1; /* actually a count */
3247 kwq->kw_pre_intrseq = mgen;
3248 kwq->kw_pre_intrretbits = kwe->kwe_psynchretval;
3249 kwq->kw_pre_intrtype = PTH_RW_TYPE_UPGRADE;
3250 }
3251 error = 0;
3252 } else {
3253 panic("panic unable to find the upgrade thread\n");
3254 }
3255 #endif /* __TESTPANICS__ */
3256 ksyn_wqunlock(kwq);
3257 goto out;
3258 }
3259
3260 error = kwq_find_rw_lowest(kwq, flags, premgen, &rwtype, lowest);
3261 #if __TESTPANICS__
3262 if (error != 0)
3263 panic("rwunlock: cannot fails to slot next round of threads");
3264 #endif /* __TESTPANICS__ */
3265
3266 #if _PSYNCH_TRACE_
3267 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 1, rwtype, 0, 0);
3268 #endif /* _PSYNCH_TRACE_ */
3269 low_reader = lowest[KSYN_QUEUE_READ];
3270 low_lreader = lowest[KSYN_QUEUE_LREAD];
3271 low_writer = lowest[KSYN_QUEUE_WRITER];
3272 low_ywriter = lowest[KSYN_QUEUE_YWRITER];
3273
3274
3275 longreadset = 0;
3276 allreaders = 0;
3277 updatebits = 0;
3278
3279
3280 switch (rwtype & PTH_RW_TYPE_MASK) {
3281 case PTH_RW_TYPE_LREAD:
3282 longreadset = 1;
3283
3284 case PTH_RW_TYPE_READ: {
3285 /* what about the preflight which is LREAD or READ ?? */
3286 if ((rwtype & PTH_RWSHFT_TYPE_MASK) != 0) {
3287 if (rwtype & PTH_RWSHFT_TYPE_WRITE)
3288 updatebits |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
3289 if (rwtype & PTH_RWSHFT_TYPE_YWRITE)
3290 updatebits |= PTH_RWL_YBIT;
3291 }
3292 limitrdnum = 0;
3293 if (longreadset == 0) {
3294 switch (rwtype & (PTH_RWSHFT_TYPE_WRITE | PTH_RWSHFT_TYPE_YWRITE)) {
3295 case PTH_RWSHFT_TYPE_WRITE:
3296 limitrdnum = low_writer;
3297 if (((rwtype & PTH_RWSHFT_TYPE_LREAD) != 0) &&
3298 (is_seqlower(low_lreader, limitrdnum) != 0)) {
3299 longreadset = 1;
3300 }
3301 if (((flags & KW_UNLOCK_PREPOST_LREADLOCK) != 0) &&
3302 (is_seqlower(premgen, limitrdnum) != 0)) {
3303 longreadset = 1;
3304 }
3305 break;
3306 case PTH_RWSHFT_TYPE_YWRITE:
3307 /* all read ? */
3308 if (((rwtype & PTH_RWSHFT_TYPE_LREAD) != 0) &&
3309 (is_seqlower(low_lreader, low_ywriter) != 0)) {
3310 longreadset = 1;
3311 } else
3312 allreaders = 1;
3313 if (((flags & KW_UNLOCK_PREPOST_LREADLOCK) != 0) &&
3314 (is_seqlower(premgen, low_ywriter) != 0)) {
3315 longreadset = 1;
3316 allreaders = 0;
3317 }
3318
3319
3320 break;
3321 case (PTH_RWSHFT_TYPE_WRITE | PTH_RWSHFT_TYPE_YWRITE):
3322 if (is_seqlower(low_ywriter, low_writer) != 0) {
3323 limitrdnum = low_ywriter;
3324 } else
3325 limitrdnum = low_writer;
3326 if (((rwtype & PTH_RWSHFT_TYPE_LREAD) != 0) &&
3327 (is_seqlower(low_lreader, limitrdnum) != 0)) {
3328 longreadset = 1;
3329 }
3330 if (((flags & KW_UNLOCK_PREPOST_LREADLOCK) != 0) &&
3331 (is_seqlower(premgen, limitrdnum) != 0)) {
3332 longreadset = 1;
3333 }
3334 break;
3335 default: /* no writers at all */
3336 if ((rwtype & PTH_RWSHFT_TYPE_LREAD) != 0)
3337 longreadset = 1;
3338 else
3339 allreaders = 1;
3340 };
3341
3342 }
3343 numneeded = 0;
3344 if (longreadset != 0) {
3345 updatebits |= PTH_RWL_LBIT;
3346 updatebits &= ~PTH_RWL_KBIT;
3347 if ((flags & (KW_UNLOCK_PREPOST_READLOCK | KW_UNLOCK_PREPOST_LREADLOCK)) != 0)
3348 numneeded += 1;
3349 numneeded += kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count;
3350 numneeded += kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count;
3351 updatebits += (numneeded << PTHRW_COUNT_SHIFT);
3352 kwq->kw_overlapwatch = 1;
3353 } else {
3354 /* no longread, evaluate number of readers */
3355
3356 switch (rwtype & (PTH_RWSHFT_TYPE_WRITE | PTH_RWSHFT_TYPE_YWRITE)) {
3357 case PTH_RWSHFT_TYPE_WRITE:
3358 limitrdnum = low_writer;
3359 numneeded = ksyn_queue_count_tolowest(kq, limitrdnum);
3360 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) && (is_seqlower(premgen, limitrdnum) != 0)) {
3361 curthreturns = 1;
3362 numneeded += 1;
3363 }
3364 break;
3365 case PTH_RWSHFT_TYPE_YWRITE:
3366 /* all read ? */
3367 numneeded += kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count;
3368 if ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) {
3369 curthreturns = 1;
3370 numneeded += 1;
3371 }
3372 break;
3373 case (PTH_RWSHFT_TYPE_WRITE | PTH_RWSHFT_TYPE_YWRITE):
3374 limitrdnum = low_writer;
3375 numneeded = ksyn_queue_count_tolowest(kq, limitrdnum);
3376 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) && (is_seqlower(premgen, limitrdnum) != 0)) {
3377 curthreturns = 1;
3378 numneeded += 1;
3379 }
3380 break;
3381 default: /* no writers at all */
3382 /* no other waiters only readers */
3383 kwq->kw_overlapwatch = 1;
3384 numneeded += kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count;
3385 if ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) {
3386 curthreturns = 1;
3387 numneeded += 1;
3388 }
3389 };
3390
3391 updatebits += (numneeded << PTHRW_COUNT_SHIFT);
3392 }
3393 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
3394
3395 if (curthreturns != 0) {
3396 block = 0;
3397 uth = current_uthread();
3398 kwe = &uth->uu_kwe;
3399 kwe->kwe_psynchretval = updatebits;
3400 }
3401
3402
3403 failed = ksyn_wakeupreaders(kwq, limitrdnum, longreadset, allreaders, updatebits, &woken);
3404 #if _PSYNCH_TRACE_
3405 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 2, woken, failed, 0);
3406 #endif /* _PSYNCH_TRACE_ */
3407
3408 if (failed != 0) {
3409 kwq->kw_pre_intrcount = failed; /* actually a count */
3410 kwq->kw_pre_intrseq = limitrdnum;
3411 kwq->kw_pre_intrretbits = updatebits;
3412 if (longreadset)
3413 kwq->kw_pre_intrtype = PTH_RW_TYPE_LREAD;
3414 else
3415 kwq->kw_pre_intrtype = PTH_RW_TYPE_READ;
3416 }
3417
3418 error = 0;
3419
3420 if ((kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0) && ((updatebits & PTH_RWL_WBIT) == 0))
3421 panic("kwq_handle_unlock: writer pending but no writebit set %x\n", updatebits);
3422 }
3423 break;
3424
3425 case PTH_RW_TYPE_WRITE: {
3426
3427 /* only one thread is goin to be granted */
3428 updatebits |= (PTHRW_INC);
3429 updatebits |= PTH_RWL_KBIT| PTH_RWL_EBIT;
3430
3431 if (((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) && (low_writer == premgen)) {
3432 block = 0;
3433 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0)
3434 updatebits |= PTH_RWL_WBIT;
3435 if ((rwtype & PTH_RWSHFT_TYPE_YWRITE) != 0)
3436 updatebits |= PTH_RWL_YBIT;
3437 th = preth;
3438 uth = get_bsdthread_info(th);
3439 kwe = &uth->uu_kwe;
3440 kwe->kwe_psynchretval = updatebits;
3441 } else {
3442 /* we are not granting writelock to the preposting thread */
3443 kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwq);
3444
3445 /* if there are writers present or the preposting write thread then W bit is to be set */
3446 if ((kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) )
3447 updatebits |= PTH_RWL_WBIT;
3448 if ((rwtype & PTH_RWSHFT_TYPE_YWRITE) != 0)
3449 updatebits |= PTH_RWL_YBIT;
3450 kwe->kwe_psynchretval = updatebits;
3451 kwe->kwe_kwqqueue = NULL;
3452 /* setup next in the queue */
3453 kret = ksyn_wakeup_thread(kwq, kwe);
3454 #if _PSYNCH_TRACE_
3455 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 3, kret, 0, 0);
3456 #endif /* _PSYNCH_TRACE_ */
3457 #if __TESTPANICS__
3458 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3459 panic("kwq_handle_unlock: panic waking up writer\n");
3460 #endif /* __TESTPANICS__ */
3461 if (kret == KERN_NOT_WAITING) {
3462 kwq->kw_pre_intrcount = 1; /* actually a count */
3463 kwq->kw_pre_intrseq = low_writer;
3464 kwq->kw_pre_intrretbits = updatebits;
3465 kwq->kw_pre_intrtype = PTH_RW_TYPE_WRITE;
3466 }
3467 error = 0;
3468 }
3469 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
3470 if ((updatebits & (PTH_RWL_KBIT | PTH_RWL_EBIT)) != (PTH_RWL_KBIT | PTH_RWL_EBIT))
3471 panic("kwq_handle_unlock: writer lock granted but no ke set %x\n", updatebits);
3472
3473 }
3474 break;
3475
3476 case PTH_RW_TYPE_YWRITE: {
3477 /* can reader locks be granted ahead of this write? */
3478 if ((rwtype & PTH_RWSHFT_TYPE_READ) != 0) {
3479 if ((rwtype & PTH_RWSHFT_TYPE_MASK) != 0) {
3480 if (rwtype & PTH_RWSHFT_TYPE_WRITE)
3481 updatebits |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
3482 if (rwtype & PTH_RWSHFT_TYPE_YWRITE)
3483 updatebits |= PTH_RWL_YBIT;
3484 }
3485
3486 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0) {
3487 /* is lowest reader less than the low writer? */
3488 if (is_seqlower(low_reader,low_writer) == 0)
3489 goto yielditis;
3490
3491 numneeded = ksyn_queue_count_tolowest(kq, low_writer);
3492 updatebits += (numneeded << PTHRW_COUNT_SHIFT);
3493 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) && (is_seqlower(premgen, low_writer) != 0)) {
3494 uth = current_uthread();
3495 kwe = &uth->uu_kwe;
3496 /* add one more */
3497 updatebits += PTHRW_INC;
3498 kwe->kwe_psynchretval = updatebits;
3499 block = 0;
3500 }
3501
3502 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
3503
3504 /* there will be readers to wakeup , no need to check for woken */
3505 failed = ksyn_wakeupreaders(kwq, low_writer, 0, 0, updatebits, NULL);
3506 #if _PSYNCH_TRACE_
3507 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 2, woken, failed, 0);
3508 #endif /* _PSYNCH_TRACE_ */
3509 if (failed != 0) {
3510 kwq->kw_pre_intrcount = failed; /* actually a count */
3511 kwq->kw_pre_intrseq = low_writer;
3512 kwq->kw_pre_intrretbits = updatebits;
3513 kwq->kw_pre_intrtype = PTH_RW_TYPE_READ;
3514 }
3515 error = 0;
3516 } else {
3517 /* wakeup all readers */
3518 numneeded = kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count;
3519 updatebits += (numneeded << PTHRW_COUNT_SHIFT);
3520 if ((prepost != 0) && ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0)) {
3521 uth = current_uthread();
3522 kwe = &uth->uu_kwe;
3523 updatebits += PTHRW_INC;
3524 kwe->kwe_psynchretval = updatebits;
3525 block = 0;
3526 }
3527 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
3528 failed = ksyn_wakeupreaders(kwq, low_writer, 0, 1, updatebits, &woken);
3529 #if _PSYNCH_TRACE_
3530 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 2, woken, failed, 0);
3531 #endif /* _PSYNCH_TRACE_ */
3532 if (failed != 0) {
3533 kwq->kw_pre_intrcount = failed; /* actually a count */
3534 kwq->kw_pre_intrseq = kwq->kw_highseq;
3535 kwq->kw_pre_intrretbits = updatebits;
3536 kwq->kw_pre_intrtype = PTH_RW_TYPE_READ;
3537 }
3538 error = 0;
3539 }
3540 } else {
3541 yielditis:
3542 /* no reads, so granting yeilding writes */
3543 updatebits |= PTHRW_INC;
3544 updatebits |= PTH_RWL_KBIT| PTH_RWL_EBIT;
3545
3546 if (((flags & KW_UNLOCK_PREPOST_YWRLOCK) != 0) && (low_writer == premgen)) {
3547 /* preposting yielding write thread is being granted exclusive lock */
3548
3549 block = 0;
3550
3551 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0)
3552 updatebits |= PTH_RWL_WBIT;
3553 else if (kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0)
3554 updatebits |= PTH_RWL_YBIT;
3555
3556 th = preth;
3557 uth = get_bsdthread_info(th);
3558 kwe = &uth->uu_kwe;
3559 kwe->kwe_psynchretval = updatebits;
3560 } else {
3561 /* we are granting yield writelock to some other thread */
3562 kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER], kwq);
3563
3564 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0)
3565 updatebits |= PTH_RWL_WBIT;
3566 /* if there are ywriters present or the preposting ywrite thread then W bit is to be set */
3567 else if ((kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_YWRLOCK) != 0) )
3568 updatebits |= PTH_RWL_YBIT;
3569
3570 kwe->kwe_psynchretval = updatebits;
3571 kwe->kwe_kwqqueue = NULL;
3572
3573 kret = ksyn_wakeup_thread(kwq, kwe);
3574 #if _PSYNCH_TRACE_
3575 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 3, kret, 0, 0);
3576 #endif /* _PSYNCH_TRACE_ */
3577 #if __TESTPANICS__
3578 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3579 panic("kwq_handle_unlock : panic waking up readers\n");
3580 #endif /* __TESTPANICS__ */
3581 if (kret == KERN_NOT_WAITING) {
3582 kwq->kw_pre_intrcount = 1; /* actually a count */
3583 kwq->kw_pre_intrseq = low_ywriter;
3584 kwq->kw_pre_intrretbits = updatebits;
3585 kwq->kw_pre_intrtype = PTH_RW_TYPE_YWRITE;
3586 }
3587 error = 0;
3588 }
3589 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
3590 }
3591 }
3592 break;
3593
3594 default:
3595 panic("rwunlock: invalid type for lock grants");
3596
3597 };
3598
3599
3600 out:
3601 if (updatep != NULL)
3602 *updatep = updatebits;
3603 if (blockp != NULL)
3604 *blockp = block;
3605 #if _PSYNCH_TRACE_
3606 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_END, (uint32_t)kwq->kw_addr, 0, updatebits, block, 0);
3607 #endif /* _PSYNCH_TRACE_ */
3608 return(error);
3609 }
3610
3611 int
3612 kwq_handle_overlap(ksyn_wait_queue_t kwq, uint32_t lgenval, __unused uint32_t ugenval, uint32_t rw_wc, uint32_t *updatebitsp, __unused int flags , int * blockp)
3613 {
3614 uint32_t highword = kwq->kw_nextseqword & PTHRW_COUNT_MASK;
3615 uint32_t lowword = kwq->kw_lastseqword & PTHRW_COUNT_MASK;
3616 uint32_t val=0;
3617 int withinseq;
3618
3619
3620 /* overlap is set, so no need to check for valid state for overlap */
3621
3622 withinseq = ((is_seqlower_eq(rw_wc, highword) != 0) || (is_seqhigher_eq(lowword, rw_wc) != 0));
3623
3624 if (withinseq != 0) {
3625 if ((kwq->kw_nextseqword & PTH_RWL_LBIT) == 0) {
3626 /* if no writers ahead, overlap granted */
3627 if ((lgenval & PTH_RWL_WBIT) == 0) {
3628 goto grantoverlap;
3629 }
3630 } else {
3631 /* Lbit is set, and writers ahead does not count */
3632 goto grantoverlap;
3633 }
3634 }
3635
3636 *blockp = 1;
3637 return(0);
3638
3639 grantoverlap:
3640 /* increase the next expected seq by one */
3641 kwq->kw_nextseqword += PTHRW_INC;
3642 /* set count by one & bits from the nextseq and add M bit */
3643 val = PTHRW_INC;
3644 val |= ((kwq->kw_nextseqword & PTHRW_BIT_MASK) | PTH_RWL_MBIT);
3645 *updatebitsp = val;
3646 *blockp = 0;
3647 return(0);
3648 }
3649
3650 #if NOTYET
3651 /* handle downgrade actions */
3652 int
3653 kwq_handle_downgrade(ksyn_wait_queue_t kwq, uint32_t mgen, __unused int flags, __unused uint32_t premgen, __unused int * blockp)
3654 {
3655 uint32_t updatebits, lowriter = 0;
3656 int longreadset, allreaders, count;
3657
3658 /* can handle downgrade now */
3659 updatebits = mgen;
3660
3661 longreadset = 0;
3662 allreaders = 0;
3663 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count > 0) {
3664 lowriter = kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_firstnum;
3665 if (kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count > 0) {
3666 if (is_seqlower(kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_firstnum, lowriter) != 0)
3667 longreadset = 1;
3668 }
3669 } else {
3670 allreaders = 1;
3671 if (kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count > 0) {
3672 lowriter = kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_firstnum;
3673 if (kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count > 0) {
3674 if (is_seqlower(kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_firstnum, lowriter) != 0)
3675 longreadset = 1;
3676 }
3677 }
3678 }
3679
3680 count = ksyn_wakeupreaders(kwq, lowriter, longreadset, allreaders, updatebits, NULL);
3681 if (count != 0) {
3682 kwq->kw_pre_limrd = count;
3683 kwq->kw_pre_limrdseq = lowriter;
3684 kwq->kw_pre_limrdbits = lowriter;
3685 /* need to handle prepost */
3686 }
3687 return(0);
3688 }
3689
3690 #endif /* NOTYET */
3691
3692 /************* Indiv queue support routines ************************/
3693 void
3694 ksyn_queue_init(ksyn_queue_t kq)
3695 {
3696 TAILQ_INIT(&kq->ksynq_kwelist);
3697 kq->ksynq_count = 0;
3698 kq->ksynq_firstnum = 0;
3699 kq->ksynq_lastnum = 0;
3700 }
3701
3702 int
3703 ksyn_queue_insert(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t mgen, struct uthread * uth, ksyn_waitq_element_t kwe, int fit)
3704 {
3705 uint32_t lockseq = mgen & PTHRW_COUNT_MASK;
3706 ksyn_waitq_element_t q_kwe, r_kwe;
3707 int res = 0;
3708 uthread_t nuth = NULL;
3709
3710 if (kq->ksynq_count == 0) {
3711 TAILQ_INSERT_HEAD(&kq->ksynq_kwelist, kwe, kwe_list);
3712 kq->ksynq_firstnum = lockseq;
3713 kq->ksynq_lastnum = lockseq;
3714 goto out;
3715 }
3716
3717 if (fit == FIRSTFIT) {
3718 /* TBD: if retry bit is set for mutex, add it to the head */
3719 /* firstfit, arriving order */
3720 TAILQ_INSERT_TAIL(&kq->ksynq_kwelist, kwe, kwe_list);
3721 if (is_seqlower (lockseq, kq->ksynq_firstnum) != 0)
3722 kq->ksynq_firstnum = lockseq;
3723 if (is_seqhigher (lockseq, kq->ksynq_lastnum) != 0)
3724 kq->ksynq_lastnum = lockseq;
3725 goto out;
3726 }
3727
3728 if ((lockseq == kq->ksynq_firstnum) || (lockseq == kq->ksynq_lastnum)) {
3729 /* During prepost when a thread is getting cancelled, we could have two with same seq */
3730 if (kwe->kwe_flags == KWE_THREAD_PREPOST) {
3731 q_kwe = ksyn_queue_find_seq(kwq, kq, lockseq, 0);
3732 if ((q_kwe != NULL) && ((nuth = (uthread_t)q_kwe->kwe_uth) != NULL) &&
3733 ((nuth->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL)) {
3734 TAILQ_INSERT_TAIL(&kq->ksynq_kwelist, kwe, kwe_list);
3735 goto out;
3736
3737 } else {
3738 __FAILEDUSERTEST__("ksyn_queue_insert: two threads with same lockseq ");
3739 res = EBUSY;
3740 goto out1;
3741 }
3742 } else {
3743 __FAILEDUSERTEST__("ksyn_queue_insert: two threads with same lockseq ");
3744 res = EBUSY;
3745 goto out1;
3746 }
3747 }
3748
3749 /* check for next seq one */
3750 if (is_seqlower(kq->ksynq_lastnum, lockseq) != 0) {
3751 TAILQ_INSERT_TAIL(&kq->ksynq_kwelist, kwe, kwe_list);
3752 kq->ksynq_lastnum = lockseq;
3753 goto out;
3754 }
3755
3756 if (is_seqlower(lockseq, kq->ksynq_firstnum) != 0) {
3757 TAILQ_INSERT_HEAD(&kq->ksynq_kwelist, kwe, kwe_list);
3758 kq->ksynq_firstnum = lockseq;
3759 goto out;
3760 }
3761
3762 /* goto slow insert mode */
3763 TAILQ_FOREACH_SAFE(q_kwe, &kq->ksynq_kwelist, kwe_list, r_kwe) {
3764 if (is_seqhigher(q_kwe->kwe_lockseq, lockseq) != 0) {
3765 TAILQ_INSERT_BEFORE(q_kwe, kwe, kwe_list);
3766 goto out;
3767 }
3768 }
3769
3770 #if __TESTPANICS__
3771 panic("failed to insert \n");
3772 #endif /* __TESTPANICS__ */
3773
3774 out:
3775 if (uth != NULL)
3776 kwe->kwe_uth = uth;
3777 kq->ksynq_count++;
3778 kwq->kw_inqueue++;
3779 update_low_high(kwq, lockseq);
3780 out1:
3781 return(res);
3782 }
3783
3784 ksyn_waitq_element_t
3785 ksyn_queue_removefirst(ksyn_queue_t kq, ksyn_wait_queue_t kwq)
3786 {
3787 ksyn_waitq_element_t kwe = NULL;
3788 ksyn_waitq_element_t q_kwe;
3789 uint32_t curseq;
3790
3791 if (kq->ksynq_count != 0) {
3792 kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
3793 TAILQ_REMOVE(&kq->ksynq_kwelist, kwe, kwe_list);
3794 curseq = kwe->kwe_lockseq & PTHRW_COUNT_MASK;
3795 kq->ksynq_count--;
3796 kwq->kw_inqueue--;
3797
3798 if(kq->ksynq_count != 0) {
3799 q_kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
3800 kq->ksynq_firstnum = (q_kwe->kwe_lockseq & PTHRW_COUNT_MASK);
3801 } else {
3802 kq->ksynq_firstnum = 0;
3803 kq->ksynq_lastnum = 0;
3804
3805 }
3806 if (kwq->kw_inqueue == 0) {
3807 kwq->kw_lowseq = 0;
3808 kwq->kw_highseq = 0;
3809 } else {
3810 if (kwq->kw_lowseq == curseq)
3811 kwq->kw_lowseq = find_nextlowseq(kwq);
3812 if (kwq->kw_highseq == curseq)
3813 kwq->kw_highseq = find_nexthighseq(kwq);
3814 }
3815 }
3816 return(kwe);
3817 }
3818
3819 void
3820 ksyn_queue_removeitem(ksyn_wait_queue_t kwq, ksyn_queue_t kq, ksyn_waitq_element_t kwe)
3821 {
3822 ksyn_waitq_element_t q_kwe;
3823 uint32_t curseq;
3824
3825 if (kq->ksynq_count > 0) {
3826 TAILQ_REMOVE(&kq->ksynq_kwelist, kwe, kwe_list);
3827 kq->ksynq_count--;
3828 if(kq->ksynq_count != 0) {
3829 q_kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
3830 kq->ksynq_firstnum = (q_kwe->kwe_lockseq & PTHRW_COUNT_MASK);
3831 q_kwe = TAILQ_LAST(&kq->ksynq_kwelist, ksynq_kwelist_head);
3832 kq->ksynq_lastnum = (q_kwe->kwe_lockseq & PTHRW_COUNT_MASK);
3833 } else {
3834 kq->ksynq_firstnum = 0;
3835 kq->ksynq_lastnum = 0;
3836
3837 }
3838 kwq->kw_inqueue--;
3839 curseq = kwe->kwe_lockseq & PTHRW_COUNT_MASK;
3840 if (kwq->kw_inqueue == 0) {
3841 kwq->kw_lowseq = 0;
3842 kwq->kw_highseq = 0;
3843 } else {
3844 if (kwq->kw_lowseq == curseq)
3845 kwq->kw_lowseq = find_nextlowseq(kwq);
3846 if (kwq->kw_highseq == curseq)
3847 kwq->kw_highseq = find_nexthighseq(kwq);
3848 }
3849 }
3850 }
3851
3852 /* find the thread and removes from the queue */
3853 ksyn_waitq_element_t
3854 ksyn_queue_find_seq(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t seq, int remove)
3855 {
3856 ksyn_waitq_element_t q_kwe, r_kwe;
3857
3858 /* TBD: bail out if higher seq is seen */
3859 /* case where wrap in the tail of the queue exists */
3860 TAILQ_FOREACH_SAFE(q_kwe, &kq->ksynq_kwelist, kwe_list, r_kwe) {
3861 if ((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK) == seq) {
3862 if (remove != 0)
3863 ksyn_queue_removeitem(kwq, kq, q_kwe);
3864 return(q_kwe);
3865 }
3866 }
3867 return(NULL);
3868 }
3869
3870
3871 /* find the thread at the target sequence (or a broadcast/prepost at or above) */
3872 ksyn_waitq_element_t
3873 ksyn_queue_find_cvpreposeq(ksyn_queue_t kq, uint32_t cgen)
3874 {
3875 ksyn_waitq_element_t q_kwe, r_kwe;
3876 uint32_t lgen = (cgen & PTHRW_COUNT_MASK);
3877
3878 /* case where wrap in the tail of the queue exists */
3879 TAILQ_FOREACH_SAFE(q_kwe, &kq->ksynq_kwelist, kwe_list, r_kwe) {
3880
3881 /* skip the lower entries */
3882 if (is_seqlower((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK), cgen) != 0)
3883 continue;
3884
3885 switch (q_kwe->kwe_flags) {
3886
3887 case KWE_THREAD_INWAIT:
3888 if ((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK) != lgen)
3889 break;
3890 /* fall thru */
3891
3892 case KWE_THREAD_BROADCAST:
3893 case KWE_THREAD_PREPOST:
3894 return (q_kwe);
3895 }
3896 }
3897 return(NULL);
3898 }
3899
3900 /* look for a thread at lockseq, a */
3901 ksyn_waitq_element_t
3902 ksyn_queue_find_signalseq(__unused ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t uptoseq, uint32_t signalseq)
3903 {
3904 ksyn_waitq_element_t q_kwe, r_kwe, t_kwe = NULL;
3905
3906 /* case where wrap in the tail of the queue exists */
3907 TAILQ_FOREACH_SAFE(q_kwe, &kq->ksynq_kwelist, kwe_list, r_kwe) {
3908
3909 switch (q_kwe->kwe_flags) {
3910
3911 case KWE_THREAD_PREPOST:
3912 if (is_seqhigher((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK), uptoseq))
3913 return t_kwe;
3914 /* fall thru */
3915
3916 case KWE_THREAD_BROADCAST:
3917 /* match any prepost at our same uptoseq or any broadcast above */
3918 if (is_seqlower((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK), uptoseq))
3919 continue;
3920 return q_kwe;
3921
3922 case KWE_THREAD_INWAIT:
3923 /*
3924 * Match any (non-cancelled) thread at or below our upto sequence -
3925 * but prefer an exact match to our signal sequence (if present) to
3926 * keep exact matches happening.
3927 */
3928 if (is_seqhigher((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK), uptoseq))
3929 return t_kwe;
3930
3931 if (q_kwe->kwe_kwqqueue == kwq) {
3932 uthread_t ut = q_kwe->kwe_uth;
3933 if ((ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) != UT_CANCEL) {
3934 /* if equal or higher than our signal sequence, return this one */
3935 if (is_seqhigher_eq((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK), signalseq))
3936 return q_kwe;
3937
3938 /* otherwise, just remember this eligible thread and move on */
3939 if (t_kwe == NULL)
3940 t_kwe = q_kwe;
3941 }
3942 }
3943 break;
3944
3945 default:
3946 panic("ksyn_queue_find_signalseq(): unknow wait queue element type (%d)\n", q_kwe->kwe_flags);
3947 break;
3948 }
3949 }
3950 return t_kwe;
3951 }
3952
3953
3954 int
3955 ksyn_queue_move_tofree(ksyn_wait_queue_t ckwq, ksyn_queue_t kq, uint32_t upto, ksyn_queue_t kfreeq, int all, int release)
3956 {
3957 ksyn_waitq_element_t kwe;
3958 int count = 0;
3959 uint32_t tseq = upto & PTHRW_COUNT_MASK;
3960 #if _PSYNCH_TRACE_
3961 uthread_t ut;
3962 #endif /* _PSYNCH_TRACE_ */
3963
3964 ksyn_queue_init(kfreeq);
3965
3966 /* free all the entries, must be only fakes.. */
3967 kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
3968 while (kwe != NULL) {
3969 if ((all == 0) && (is_seqhigher((kwe->kwe_lockseq & PTHRW_COUNT_MASK), tseq) != 0))
3970 break;
3971 if (kwe->kwe_flags == KWE_THREAD_INWAIT) {
3972 /*
3973 * This scenario is typically noticed when the cvar is
3974 * reinited and the new waiters are waiting. We can
3975 * return them as spurious wait so the cvar state gets
3976 * reset correctly.
3977 */
3978 #if _PSYNCH_TRACE_
3979 ut = (uthread_t)kwe->kwe_uth;
3980 #endif /* _PSYNCH_TRACE_ */
3981
3982 /* skip canceled ones */
3983 /* wake the rest */
3984 ksyn_queue_removeitem(ckwq, kq, kwe);
3985 /* set M bit to indicate to waking CV to retun Inc val */
3986 kwe->kwe_psynchretval = PTHRW_INC | (PTH_RWS_CV_MBIT | PTH_RWL_MTX_WAIT);
3987 kwe->kwe_kwqqueue = NULL;
3988 #if _PSYNCH_TRACE_
3989 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xcafecaf3, (uint32_t)(thread_tid((struct thread *)(((struct uthread *)(kwe->kwe_uth))->uu_context.vc_thread))), kwe->kwe_psynchretval, 0);
3990 #endif /* _PSYNCH_TRACE_ */
3991 (void)ksyn_wakeup_thread(ckwq, kwe);
3992 } else {
3993 ksyn_queue_removeitem(ckwq, kq, kwe);
3994 TAILQ_INSERT_TAIL(&kfreeq->ksynq_kwelist, kwe, kwe_list);
3995 ckwq->kw_fakecount--;
3996 count++;
3997 }
3998 kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
3999 }
4000
4001 if ((release != 0) && (count != 0)) {
4002 kwe = TAILQ_FIRST(&kfreeq->ksynq_kwelist);
4003 while (kwe != NULL) {
4004 TAILQ_REMOVE(&kfreeq->ksynq_kwelist, kwe, kwe_list);
4005 zfree(kwe_zone, kwe);
4006 kwe = TAILQ_FIRST(&kfreeq->ksynq_kwelist);
4007 }
4008 }
4009
4010 return(count);
4011 }
4012
4013 /*************************************************************************/
4014
4015 void
4016 update_low_high(ksyn_wait_queue_t kwq, uint32_t lockseq)
4017 {
4018 if (kwq->kw_inqueue == 1) {
4019 kwq->kw_lowseq = lockseq;
4020 kwq->kw_highseq = lockseq;
4021 } else {
4022 if (is_seqlower(lockseq, kwq->kw_lowseq) != 0)
4023 kwq->kw_lowseq = lockseq;
4024 if (is_seqhigher(lockseq, kwq->kw_highseq) != 0)
4025 kwq->kw_highseq = lockseq;
4026 }
4027 }
4028
4029 uint32_t
4030 find_nextlowseq(ksyn_wait_queue_t kwq)
4031 {
4032 uint32_t numbers[4];
4033 int count = 0, i;
4034 uint32_t lowest;
4035
4036 for(i = 0; i< KSYN_QUEUE_MAX; i++) {
4037 if (kwq->kw_ksynqueues[i].ksynq_count != 0) {
4038 numbers[count]= kwq->kw_ksynqueues[i].ksynq_firstnum;
4039 count++;
4040 }
4041 }
4042
4043 if (count == 0)
4044 return(0);
4045 lowest = numbers[0];
4046 if (count > 1) {
4047 for (i = 1; i< count; i++) {
4048 if(is_seqlower(numbers[i] , lowest) != 0)
4049 lowest = numbers[count];
4050
4051 }
4052 }
4053 return(lowest);
4054 }
4055
4056 uint32_t
4057 find_nexthighseq(ksyn_wait_queue_t kwq)
4058 {
4059 uint32_t numbers[4];
4060 int count = 0, i;
4061 uint32_t highest;
4062
4063 for(i = 0; i< KSYN_QUEUE_MAX; i++) {
4064 if (kwq->kw_ksynqueues[i].ksynq_count != 0) {
4065 numbers[count]= kwq->kw_ksynqueues[i].ksynq_lastnum;
4066 count++;
4067 }
4068 }
4069
4070
4071
4072 if (count == 0)
4073 return(0);
4074 highest = numbers[0];
4075 if (count > 1) {
4076 for (i = 1; i< count; i++) {
4077 if(is_seqhigher(numbers[i], highest) != 0)
4078 highest = numbers[i];
4079
4080 }
4081 }
4082 return(highest);
4083 }
4084
4085 int
4086 is_seqlower(uint32_t x, uint32_t y)
4087 {
4088 if (x < y) {
4089 if ((y-x) < (PTHRW_MAX_READERS/2))
4090 return(1);
4091 } else {
4092 if ((x-y) > (PTHRW_MAX_READERS/2))
4093 return(1);
4094 }
4095 return(0);
4096 }
4097
4098 int
4099 is_seqlower_eq(uint32_t x, uint32_t y)
4100 {
4101 if (x==y)
4102 return(1);
4103 else
4104 return(is_seqlower(x,y));
4105 }
4106
4107 int
4108 is_seqhigher(uint32_t x, uint32_t y)
4109 {
4110 if (x > y) {
4111 if ((x-y) < (PTHRW_MAX_READERS/2))
4112 return(1);
4113 } else {
4114 if ((y-x) > (PTHRW_MAX_READERS/2))
4115 return(1);
4116 }
4117 return(0);
4118 }
4119
4120 int
4121 is_seqhigher_eq(uint32_t x, uint32_t y)
4122 {
4123 if (x==y)
4124 return(1);
4125 else
4126 return(is_seqhigher(x,y));
4127 }
4128
4129
4130 int
4131 find_diff(uint32_t upto, uint32_t lowest)
4132 {
4133 uint32_t diff;
4134
4135 if (upto == lowest)
4136 return(0);
4137 #if 0
4138 diff = diff_genseq(upto, lowest);
4139 #else
4140 if (is_seqlower(upto, lowest) != 0)
4141 diff = diff_genseq(lowest, upto);
4142 else
4143 diff = diff_genseq(upto, lowest);
4144 #endif
4145 diff = (diff >> PTHRW_COUNT_SHIFT);
4146 return(diff);
4147 }
4148
4149
4150 int
4151 find_seq_till(ksyn_wait_queue_t kwq, uint32_t upto, uint32_t nwaiters, uint32_t *countp)
4152 {
4153 int i;
4154 uint32_t count = 0;
4155
4156
4157 #if _PSYNCH_TRACE_
4158 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL | DBG_FUNC_START, 0, 0, upto, nwaiters, 0);
4159 #endif /* _PSYNCH_TRACE_ */
4160
4161 for (i= 0; i< KSYN_QUEUE_MAX; i++) {
4162 count += ksyn_queue_count_tolowest(&kwq->kw_ksynqueues[i], upto);
4163 #if _PSYNCH_TRACE_
4164 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL | DBG_FUNC_NONE, 0, 1, i, count, 0);
4165 #endif /* _PSYNCH_TRACE_ */
4166 if (count >= nwaiters) {
4167 break;
4168 }
4169 }
4170
4171 if (countp != NULL) {
4172 *countp = count;
4173 }
4174 #if _PSYNCH_TRACE_
4175 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL | DBG_FUNC_END, 0, 0, count, nwaiters, 0);
4176 #endif /* _PSYNCH_TRACE_ */
4177 if (count == 0)
4178 return(0);
4179 else if (count >= nwaiters)
4180 return(1);
4181 else
4182 return(0);
4183 }
4184
4185
4186 uint32_t
4187 ksyn_queue_count_tolowest(ksyn_queue_t kq, uint32_t upto)
4188 {
4189 uint32_t i = 0;
4190 ksyn_waitq_element_t kwe, newkwe;
4191 uint32_t curval;
4192
4193 /* if nothing or the first num is greater than upto, return none */
4194 if ((kq->ksynq_count == 0) || (is_seqhigher(kq->ksynq_firstnum, upto) != 0))
4195 return(0);
4196 if (upto == kq->ksynq_firstnum)
4197 return(1);
4198
4199 TAILQ_FOREACH_SAFE(kwe, &kq->ksynq_kwelist, kwe_list, newkwe) {
4200 curval = (kwe->kwe_lockseq & PTHRW_COUNT_MASK);
4201 if (upto == curval) {
4202 i++;
4203 break;
4204 } else if (is_seqhigher(curval, upto) != 0) {
4205 break;
4206 } else {
4207 /* seq is lower */
4208 i++;
4209 }
4210 }
4211 return(i);
4212 }
4213
4214
4215 /* handles the cond broadcast of cvar and returns number of woken threads and bits for syscall return */
4216 void
4217 ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq, uint32_t upto, uint32_t * updatep)
4218 {
4219 kern_return_t kret;
4220 ksyn_queue_t kq;
4221 ksyn_waitq_element_t kwe, newkwe;
4222 uint32_t updatebits = 0;
4223 struct ksyn_queue kfreeq;
4224 uthread_t ut;
4225
4226 #if _PSYNCH_TRACE_
4227 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD | DBG_FUNC_START, 0xcbcbcbc2, upto, 0, 0, 0);
4228 #endif /* _PSYNCH_TRACE_ */
4229
4230 ksyn_queue_init(&kfreeq);
4231 kq = &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER];
4232
4233 retry:
4234 TAILQ_FOREACH_SAFE(kwe, &kq->ksynq_kwelist, kwe_list, newkwe) {
4235
4236 if (is_seqhigher((kwe->kwe_lockseq & PTHRW_COUNT_MASK), upto)) /* outside our range */
4237 break;
4238
4239 /* now handle the one we found (inside the range) */
4240 switch (kwe->kwe_flags) {
4241
4242 case KWE_THREAD_INWAIT:
4243 ut = (uthread_t)kwe->kwe_uth;
4244
4245 /* skip canceled ones */
4246 if (kwe->kwe_kwqqueue != ckwq ||
4247 (ut->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL)
4248 break;
4249
4250 /* wake the rest */
4251 ksyn_queue_removeitem(ckwq, kq, kwe);
4252 kwe->kwe_psynchretval = PTH_RWL_MTX_WAIT;
4253 kwe->kwe_kwqqueue = NULL;
4254 #if _PSYNCH_TRACE_
4255 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xcafecaf2, (uint32_t)(thread_tid((struct thread *)(((struct uthread *)(kwe->kwe_uth))->uu_context.vc_thread))), kwe->kwe_psynchretval, 0);
4256 #endif /* _PSYNCH_TRACE_ */
4257 kret = ksyn_wakeup_thread(ckwq, kwe);
4258 #if __TESTPANICS__
4259 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
4260 panic("ksyn_wakeupreaders: panic waking up readers\n");
4261 #endif /* __TESTPANICS__ */
4262 updatebits += PTHRW_INC;
4263 break;
4264
4265 case KWE_THREAD_BROADCAST:
4266 case KWE_THREAD_PREPOST:
4267 ksyn_queue_removeitem(ckwq, kq, kwe);
4268 TAILQ_INSERT_TAIL(&kfreeq.ksynq_kwelist, kwe, kwe_list);
4269 ckwq->kw_fakecount--;
4270 break;
4271
4272 default:
4273 panic("unknown kweflags\n");
4274 break;
4275 }
4276 }
4277
4278 /* Need to enter a broadcast in the queue (if not already at L == S) */
4279
4280 if ((ckwq->kw_lword & PTHRW_COUNT_MASK) != (ckwq->kw_sword & PTHRW_COUNT_MASK)) {
4281
4282 newkwe = TAILQ_FIRST(&kfreeq.ksynq_kwelist);
4283 if (newkwe == NULL) {
4284 ksyn_wqunlock(ckwq);
4285 newkwe = (ksyn_waitq_element_t)zalloc(kwe_zone);
4286 TAILQ_INSERT_TAIL(&kfreeq.ksynq_kwelist, newkwe, kwe_list);
4287 ksyn_wqlock(ckwq);
4288 goto retry;
4289 }
4290
4291 TAILQ_REMOVE(&kfreeq.ksynq_kwelist, newkwe, kwe_list);
4292 bzero(newkwe, sizeof(struct ksyn_waitq_element));
4293 newkwe->kwe_kwqqueue = ckwq;
4294 newkwe->kwe_flags = KWE_THREAD_BROADCAST;
4295 newkwe->kwe_lockseq = upto;
4296 newkwe->kwe_count = 0;
4297 newkwe->kwe_uth = NULL;
4298 newkwe->kwe_psynchretval = 0;
4299
4300 #if _PSYNCH_TRACE_
4301 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xfeedfeed, upto, 0, 0);
4302 #endif /* _PSYNCH_TRACE_ */
4303
4304 (void)ksyn_queue_insert(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], upto, NULL, newkwe, SEQFIT);
4305 ckwq->kw_fakecount++;
4306 }
4307
4308 /* free up any remaining things stumbled across above */
4309 kwe = TAILQ_FIRST(&kfreeq.ksynq_kwelist);
4310 while (kwe != NULL) {
4311 TAILQ_REMOVE(&kfreeq.ksynq_kwelist, kwe, kwe_list);
4312 zfree(kwe_zone, kwe);
4313 kwe = TAILQ_FIRST(&kfreeq.ksynq_kwelist);
4314 }
4315
4316 if (updatep != NULL)
4317 *updatep = updatebits;
4318
4319 #if _PSYNCH_TRACE_
4320 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD | DBG_FUNC_END, 0xeeeeeeed, updatebits, 0, 0, 0);
4321 #endif /* _PSYNCH_TRACE_ */
4322 }
4323
4324 void
4325 ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq, uint32_t *updatep, ksyn_queue_t kfreeq, int release)
4326 {
4327 uint32_t updatebits = 0;
4328
4329 if (updatep != NULL)
4330 updatebits = *updatep;
4331 if ((ckwq->kw_lword & PTHRW_COUNT_MASK) == (ckwq->kw_sword & PTHRW_COUNT_MASK)) {
4332 updatebits |= PTH_RWS_CV_CBIT;
4333 if (ckwq->kw_inqueue != 0) {
4334 /* FREE THE QUEUE */
4335 ksyn_queue_move_tofree(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], ckwq->kw_lword, kfreeq, 0, release);
4336 #if __TESTPANICS__
4337 if (ckwq->kw_inqueue != 0)
4338 panic("ksyn_cvupdate_fixup: L == S, but entries in queue beyond S");
4339 #endif /* __TESTPANICS__ */
4340 }
4341 ckwq->kw_lword = ckwq->kw_uword = ckwq->kw_sword = 0;
4342 ckwq->kw_kflags |= KSYN_KWF_ZEROEDOUT;
4343 } else if ((ckwq->kw_inqueue != 0) && (ckwq->kw_fakecount == ckwq->kw_inqueue)) {
4344 /* only fake entries are present in the queue */
4345 updatebits |= PTH_RWS_CV_PBIT;
4346 }
4347 if (updatep != NULL)
4348 *updatep = updatebits;
4349 }
4350
4351 void
4352 psynch_zoneinit(void)
4353 {
4354 kwq_zone = (zone_t)zinit(sizeof(struct ksyn_wait_queue), 8192 * sizeof(struct ksyn_wait_queue), 4096, "ksyn_waitqueue zone");
4355 kwe_zone = (zone_t)zinit(sizeof(struct ksyn_waitq_element), 8192 * sizeof(struct ksyn_waitq_element), 4096, "ksyn_waitq_element zone");
4356 }
4357 #endif /* PSYNCH */