]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/pthread_support.c
xnu-1699.26.8.tar.gz
[apple/xnu.git] / bsd / kern / pthread_support.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995-2005 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * pthread_support.c
31 */
32
33 #if PSYNCH
34
35 #include <sys/param.h>
36 #include <sys/queue.h>
37 #include <sys/resourcevar.h>
38 #include <sys/proc_internal.h>
39 #include <sys/kauth.h>
40 #include <sys/systm.h>
41 #include <sys/timeb.h>
42 #include <sys/times.h>
43 #include <sys/time.h>
44 #include <sys/acct.h>
45 #include <sys/kernel.h>
46 #include <sys/wait.h>
47 #include <sys/signalvar.h>
48 #include <sys/syslog.h>
49 #include <sys/stat.h>
50 #include <sys/lock.h>
51 #include <sys/kdebug.h>
52 #include <sys/sysproto.h>
53 #include <sys/pthread_internal.h>
54 #include <sys/vm.h>
55 #include <sys/user.h>
56
57 #include <mach/mach_types.h>
58 #include <mach/vm_prot.h>
59 #include <mach/semaphore.h>
60 #include <mach/sync_policy.h>
61 #include <mach/task.h>
62 #include <kern/kern_types.h>
63 #include <kern/task.h>
64 #include <kern/clock.h>
65 #include <mach/kern_return.h>
66 #include <kern/thread.h>
67 #include <kern/sched_prim.h>
68 #include <kern/thread_call.h>
69 #include <kern/kalloc.h>
70 #include <kern/zalloc.h>
71 #include <kern/sched_prim.h>
72 #include <kern/processor.h>
73 #include <kern/affinity.h>
74 #include <kern/wait_queue.h>
75 #include <kern/mach_param.h>
76 #include <mach/mach_vm.h>
77 #include <mach/mach_param.h>
78 #include <mach/thread_policy.h>
79 #include <mach/message.h>
80 #include <mach/port.h>
81 #include <vm/vm_protos.h>
82 #include <vm/vm_map.h>
83 #include <mach/vm_region.h>
84
85 #include <libkern/OSAtomic.h>
86
87 #include <pexpert/pexpert.h>
88
89 #define __PSYNCH_DEBUG__ 0 /* debug panic actions */
90 #define _PSYNCH_TRACE_ 1 /* kdebug trace */
91
92 #define __TESTMODE__ 2 /* 0 - return error on user error conditions */
93 /* 1 - log error on user error conditions */
94 /* 2 - abort caller on user error conditions */
95 /* 3 - panic on user error conditions */
96 static int __test_panics__;
97 static int __test_aborts__;
98 static int __test_prints__;
99
100 static inline void __FAILEDUSERTEST__(const char *str)
101 {
102 proc_t p;
103
104 if (__test_panics__ != 0)
105 panic(str);
106
107 if (__test_aborts__ != 0 || __test_prints__ != 0)
108 p = current_proc();
109
110 if (__test_prints__ != 0)
111 printf("PSYNCH: pid[%d]: %s\n", p->p_pid, str);
112
113 if (__test_aborts__ != 0)
114 psignal(p, SIGABRT);
115 }
116
117 #if _PSYNCH_TRACE_
118 #define _PSYNCH_TRACE_MLWAIT 0x9000000
119 #define _PSYNCH_TRACE_MLDROP 0x9000004
120 #define _PSYNCH_TRACE_CVWAIT 0x9000008
121 #define _PSYNCH_TRACE_CVSIGNAL 0x900000c
122 #define _PSYNCH_TRACE_CVBROAD 0x9000010
123 #define _PSYNCH_TRACE_KMDROP 0x9000014
124 #define _PSYNCH_TRACE_RWRDLOCK 0x9000018
125 #define _PSYNCH_TRACE_RWLRDLOCK 0x900001c
126 #define _PSYNCH_TRACE_RWWRLOCK 0x9000020
127 #define _PSYNCH_TRACE_RWYWRLOCK 0x9000024
128 #define _PSYNCH_TRACE_RWUPGRADE 0x9000028
129 #define _PSYNCH_TRACE_RWDOWNGRADE 0x900002c
130 #define _PSYNCH_TRACE_RWUNLOCK 0x9000030
131 #define _PSYNCH_TRACE_RWUNLOCK2 0x9000034
132 #define _PSYNCH_TRACE_RWHANDLEU 0x9000038
133 #define _PSYNCH_TRACE_FSEQTILL 0x9000040
134 #define _PSYNCH_TRACE_CLRPRE 0x9000044
135 #define _PSYNCH_TRACE_CVHBROAD 0x9000048
136 #define _PSYNCH_TRACE_CVSEQ 0x900004c
137 #define _PSYNCH_TRACE_THWAKEUP 0x9000050
138 /* user side */
139 #define _PSYNCH_TRACE_UM_LOCK 0x9000060
140 #define _PSYNCH_TRACE_UM_UNLOCK 0x9000064
141 #define _PSYNCH_TRACE_UM_MHOLD 0x9000068
142 #define _PSYNCH_TRACE_UM_MDROP 0x900006c
143 #define _PSYNCH_TRACE_UM_CVWAIT 0x9000070
144 #define _PSYNCH_TRACE_UM_CVSIG 0x9000074
145 #define _PSYNCH_TRACE_UM_CVBRD 0x9000078
146
147 proc_t pthread_debug_proc = PROC_NULL;
148 static inline void __PTHREAD_TRACE_DEBUG(uint32_t debugid, uintptr_t arg1,
149 uintptr_t arg2,
150 uintptr_t arg3,
151 uintptr_t arg4,
152 uintptr_t arg5)
153 {
154 proc_t p = current_proc();
155
156 if ((pthread_debug_proc != NULL) && (p == pthread_debug_proc))
157 KERNEL_DEBUG_CONSTANT(debugid, arg1, arg2, arg3, arg4, arg5);
158 }
159
160 #endif /* _PSYNCH_TRACE_ */
161
162 #define ECVCERORR 256
163 #define ECVPERORR 512
164
165 lck_mtx_t * pthread_list_mlock;
166
167 #define PTHHASH(addr) (&pthashtbl[(addr) & pthhash])
168 extern LIST_HEAD(pthhashhead, ksyn_wait_queue) *pth_glob_hashtbl;
169 struct pthhashhead * pth_glob_hashtbl;
170 u_long pthhash;
171
172 LIST_HEAD(, ksyn_wait_queue) pth_free_list;
173 int num_total_kwq = 0; /* number of kwq in use currently */
174 int num_infreekwq = 0; /* number of kwq in free list */
175 int num_freekwq = 0; /* number of kwq actually freed from the free the list */
176 int num_reusekwq = 0; /* number of kwq pulled back for reuse from free list */
177 int num_addedfreekwq = 0; /* number of added free kwq from the last instance */
178 int num_lastfreekwqcount = 0; /* the free count from the last time */
179
180 static int PTH_HASHSIZE = 100;
181
182 static zone_t kwq_zone; /* zone for allocation of ksyn_queue */
183 static zone_t kwe_zone; /* zone for allocation of ksyn_waitq_element */
184
185 #define SEQFIT 0
186 #define FIRSTFIT 1
187
188 struct ksyn_queue {
189 TAILQ_HEAD(ksynq_kwelist_head, ksyn_waitq_element) ksynq_kwelist;
190 uint32_t ksynq_count; /* number of entries in queue */
191 uint32_t ksynq_firstnum; /* lowest seq in queue */
192 uint32_t ksynq_lastnum; /* highest seq in queue */
193 };
194 typedef struct ksyn_queue * ksyn_queue_t;
195
196 #define KSYN_QUEUE_READ 0
197 #define KSYN_QUEUE_LREAD 1
198 #define KSYN_QUEUE_WRITER 2
199 #define KSYN_QUEUE_YWRITER 3
200 #define KSYN_QUEUE_UPGRADE 4
201 #define KSYN_QUEUE_MAX 5
202
203 struct ksyn_wait_queue {
204 LIST_ENTRY(ksyn_wait_queue) kw_hash;
205 LIST_ENTRY(ksyn_wait_queue) kw_list;
206 user_addr_t kw_addr;
207 uint64_t kw_owner;
208 uint64_t kw_object; /* object backing in shared mode */
209 uint64_t kw_offset; /* offset inside the object in shared mode */
210 int kw_flags; /* mutex, cvar options/flags */
211 int kw_pflags; /* flags under listlock protection */
212 struct timeval kw_ts; /* timeval need for upkeep before free */
213 int kw_iocount; /* inuse reference */
214 int kw_dropcount; /* current users unlocking... */
215
216 int kw_type; /* queue type like mutex, cvar, etc */
217 uint32_t kw_inqueue; /* num of waiters held */
218 uint32_t kw_fakecount; /* number of error/prepost fakes */
219 uint32_t kw_highseq; /* highest seq in the queue */
220 uint32_t kw_lowseq; /* lowest seq in the queue */
221 uint32_t kw_lword; /* L value from userland */
222 uint32_t kw_uword; /* U world value from userland */
223 uint32_t kw_sword; /* S word value from userland */
224 uint32_t kw_lastunlockseq; /* the last seq that unlocked */
225 /* for CV to be used as the seq kernel has seen so far */
226 #define kw_cvkernelseq kw_lastunlockseq
227 uint32_t kw_lastseqword; /* the last seq that unlocked */
228 /* for mutex and cvar we need to track I bit values */
229 uint32_t kw_nextseqword; /* the last seq that unlocked; with num of waiters */
230 #define kw_initrecv kw_nextseqword /* number of incoming waiters with Ibit seen sofar */
231 uint32_t kw_overlapwatch; /* chance for overlaps */
232 #define kw_initcount kw_overlapwatch /* number of incoming waiters with Ibit expected */
233 uint32_t kw_initcountseq; /* highest seq with Ibit on for mutex and cvar*/
234 uint32_t kw_pre_rwwc; /* prepost count */
235 uint32_t kw_pre_lockseq; /* prepost target seq */
236 uint32_t kw_pre_sseq; /* prepost target sword, in cvar used for mutexowned */
237 uint32_t kw_pre_intrcount; /* prepost of missed wakeup due to intrs */
238 uint32_t kw_pre_intrseq; /* prepost of missed wakeup limit seq */
239 uint32_t kw_pre_intrretbits; /* return bits value for missed wakeup threads */
240 uint32_t kw_pre_intrtype; /* type of failed wakueps*/
241
242 int kw_kflags;
243 struct ksyn_queue kw_ksynqueues[KSYN_QUEUE_MAX]; /* queues to hold threads */
244 lck_mtx_t kw_lock; /* mutex lock protecting this structure */
245 };
246 typedef struct ksyn_wait_queue * ksyn_wait_queue_t;
247
248 #define PTHRW_INC 0x100
249 #define PTHRW_BIT_MASK 0x000000ff
250
251 #define PTHRW_COUNT_SHIFT 8
252 #define PTHRW_COUNT_MASK 0xffffff00
253 #define PTHRW_MAX_READERS 0xffffff00
254
255 /* New model bits on Lword */
256 #define PTH_RWL_KBIT 0x01 /* users cannot acquire in user mode */
257 #define PTH_RWL_EBIT 0x02 /* exclusive lock in progress */
258 #define PTH_RWL_WBIT 0x04 /* write waiters pending in kernel */
259 #define PTH_RWL_PBIT 0x04 /* prepost (cv) pending in kernel */
260 #define PTH_RWL_YBIT 0x08 /* yielding write waiters pending in kernel */
261 #define PTH_RWL_RETRYBIT 0x08 /* mutex retry wait */
262 #define PTH_RWL_LBIT 0x10 /* long read in progress */
263 #define PTH_RWL_MTXNONE 0x10 /* indicates the cvwait does not have mutex held */
264 #define PTH_RWL_UBIT 0x20 /* upgrade request pending */
265 #define PTH_RWL_MTX_WAIT 0x20 /* in cvar in mutex wait */
266 #define PTH_RWL_RBIT 0x40 /* reader pending in kernel(not used) */
267 #define PTH_RWL_MBIT 0x40 /* overlapping grants from kernel */
268 #define PTH_RWL_TRYLKBIT 0x40 /* trylock attempt (mutex only) */
269 #define PTH_RWL_IBIT 0x80 /* lcok reset, held untill first succeesful unlock */
270
271
272 /* UBIT values for mutex, cvar */
273 #define PTH_RWU_SBIT 0x01
274 #define PTH_RWU_BBIT 0x02
275
276 #define PTHRW_RWL_INIT PTH_RWL_IBIT /* reset state on the lock bits (U)*/
277
278 /* New model bits on Sword */
279 #define PTH_RWS_SBIT 0x01 /* kernel transition seq not set yet*/
280 #define PTH_RWS_IBIT 0x02 /* Sequence is not set on return from kernel */
281 #define PTH_RWS_CV_CBIT PTH_RWS_SBIT /* kernel has cleared all info w.r.s.t CV */
282 #define PTH_RWS_CV_PBIT PTH_RWS_IBIT /* kernel has prepost/fake structs only,no waiters */
283 #define PTH_RWS_CV_MBIT PTH_RWL_MBIT /* to indicate prepost return */
284 #define PTH_RWS_WSVBIT 0x04 /* save W bit */
285 #define PTH_RWS_USVBIT 0x08 /* save U bit */
286 #define PTH_RWS_YSVBIT 0x10 /* save Y bit */
287 #define PTHRW_RWS_INIT PTH_RWS_SBIT /* reset on the lock bits (U)*/
288 #define PTHRW_RWS_SAVEMASK (PTH_RWS_WSVBIT|PTH_RWS_USVBIT|PTH_RWS_YSVBIT) /*save bits mask*/
289 #define PTHRW_SW_Reset_BIT_MASK 0x000000fe /* remove S bit and get rest of the bits */
290
291 #define PTHRW_RWS_INIT PTH_RWS_SBIT /* reset on the lock bits (U)*/
292
293
294 #define PTHRW_UN_BIT_MASK 0x000000bf /* remove overlap bit */
295
296
297 #define PTHREAD_MTX_TID_SWITCHING (uint64_t)-1
298
299 /* new L word defns */
300 #define is_rwl_readinuser(x) ((((x) & (PTH_RWL_UBIT | PTH_RWL_KBIT)) == 0)||(((x) & PTH_RWL_LBIT) != 0))
301 #define is_rwl_ebit_set(x) (((x) & PTH_RWL_EBIT) != 0)
302 #define is_rwl_lbit_set(x) (((x) & PTH_RWL_LBIT) != 0)
303 #define is_rwl_readoverlap(x) (((x) & PTH_RWL_MBIT) != 0)
304 #define is_rw_ubit_set(x) (((x) & PTH_RWL_UBIT) != 0)
305
306 /* S word checks */
307 #define is_rws_setseq(x) (((x) & PTH_RWS_SBIT))
308 #define is_rws_setunlockinit(x) (((x) & PTH_RWS_IBIT))
309
310 /* first contended seq that kernel sees */
311 #define KW_MTXFIRST_KSEQ 0x200
312 #define KW_CVFIRST_KSEQ 1
313 #define KW_RWFIRST_KSEQ 0x200
314
315 int is_seqlower(uint32_t x, uint32_t y);
316 int is_seqlower_eq(uint32_t x, uint32_t y);
317 int is_seqhigher(uint32_t x, uint32_t y);
318 int is_seqhigher_eq(uint32_t x, uint32_t y);
319 int find_diff(uint32_t upto, uint32_t lowest);
320
321
322 static inline int diff_genseq(uint32_t x, uint32_t y) {
323 if (x > y) {
324 return(x-y);
325 } else {
326 return((PTHRW_MAX_READERS - y) + x + PTHRW_INC);
327 }
328 }
329
330 #define TID_ZERO (uint64_t)0
331
332 /* bits needed in handling the rwlock unlock */
333 #define PTH_RW_TYPE_READ 0x01
334 #define PTH_RW_TYPE_LREAD 0x02
335 #define PTH_RW_TYPE_WRITE 0x04
336 #define PTH_RW_TYPE_YWRITE 0x08
337 #define PTH_RW_TYPE_UPGRADE 0x10
338 #define PTH_RW_TYPE_MASK 0xff
339 #define PTH_RW_TYPE_SHIFT 8
340
341 #define PTH_RWSHFT_TYPE_READ 0x0100
342 #define PTH_RWSHFT_TYPE_LREAD 0x0200
343 #define PTH_RWSHFT_TYPE_WRITE 0x0400
344 #define PTH_RWSHFT_TYPE_YWRITE 0x0800
345 #define PTH_RWSHFT_TYPE_MASK 0xff00
346
347 /*
348 * Mutex protocol attributes
349 */
350 #define PTHREAD_PRIO_NONE 0
351 #define PTHREAD_PRIO_INHERIT 1
352 #define PTHREAD_PRIO_PROTECT 2
353 #define PTHREAD_PROTOCOL_FLAGS_MASK 0x3
354
355 /*
356 * Mutex type attributes
357 */
358 #define PTHREAD_MUTEX_NORMAL 0
359 #define PTHREAD_MUTEX_ERRORCHECK 4
360 #define PTHREAD_MUTEX_RECURSIVE 8
361 #define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_NORMAL
362 #define PTHREAD_TYPE_FLAGS_MASK 0xc
363
364 /*
365 * Mutex pshared attributes
366 */
367 #define PTHREAD_PROCESS_SHARED 0x10
368 #define PTHREAD_PROCESS_PRIVATE 0x20
369 #define PTHREAD_PSHARED_FLAGS_MASK 0x30
370
371 /*
372 * Mutex policy attributes
373 */
374 #define _PTHREAD_MUTEX_POLICY_NONE 0
375 #define _PTHREAD_MUTEX_POLICY_FAIRSHARE 0x040 /* 1 */
376 #define _PTHREAD_MUTEX_POLICY_FIRSTFIT 0x080 /* 2 */
377 #define _PTHREAD_MUTEX_POLICY_REALTIME 0x0c0 /* 3 */
378 #define _PTHREAD_MUTEX_POLICY_ADAPTIVE 0x100 /* 4 */
379 #define _PTHREAD_MUTEX_POLICY_PRIPROTECT 0x140 /* 5 */
380 #define _PTHREAD_MUTEX_POLICY_PRIINHERIT 0x180 /* 6 */
381 #define PTHREAD_POLICY_FLAGS_MASK 0x1c0
382
383 #define _PTHREAD_MTX_OPT_HOLDLOCK 0x200
384 #define _PTHREAD_MTX_OPT_NOMTX 0x400
385
386 #define _PTHREAD_MTX_OPT_NOTIFY 0x1000
387 #define _PTHREAD_MTX_OPT_MUTEX 0x2000 /* this is a mutex type */
388
389 #define _PTHREAD_RWLOCK_UPGRADE_TRY 0x10000
390
391 /* pflags */
392 #define KSYN_WQ_INLIST 1
393 #define KSYN_WQ_INHASH 2
394 #define KSYN_WQ_SHARED 4
395 #define KSYN_WQ_WAITING 8 /* threads waiting for this wq to be available */
396 #define KSYN_WQ_FLIST 0X10 /* in free list to be freed after a short delay */
397
398 /* kflags */
399 #define KSYN_KWF_INITCLEARED 1 /* the init status found and preposts cleared */
400 #define KSYN_KWF_ZEROEDOUT 2 /* the lword, etc are inited to 0 */
401
402 #define KSYN_CLEANUP_DEADLINE 10
403 int psynch_cleanupset;
404 thread_call_t psynch_thcall;
405
406 #define KSYN_WQTYPE_INWAIT 0x1000
407 #define KSYN_WQTYPE_INDROP 0x2000
408 #define KSYN_WQTYPE_MTX 0x1
409 #define KSYN_WQTYPE_CVAR 0x2
410 #define KSYN_WQTYPE_RWLOCK 0x4
411 #define KSYN_WQTYPE_SEMA 0x8
412 #define KSYN_WQTYPE_BARR 0x10
413 #define KSYN_WQTYPE_MASK 0x00ff
414
415 #define KSYN_MTX_MAX 0x0fffffff
416 #define KSYN_WQTYPE_MUTEXDROP (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX)
417
418 #define KW_UNLOCK_PREPOST 0x01
419 #define KW_UNLOCK_PREPOST_UPGRADE 0x02
420 #define KW_UNLOCK_PREPOST_DOWNGRADE 0x04
421 #define KW_UNLOCK_PREPOST_READLOCK 0x08
422 #define KW_UNLOCK_PREPOST_LREADLOCK 0x10
423 #define KW_UNLOCK_PREPOST_WRLOCK 0x20
424 #define KW_UNLOCK_PREPOST_YWRLOCK 0x40
425
426 #define CLEAR_PREPOST_BITS(kwq) {\
427 kwq->kw_pre_lockseq = 0; \
428 kwq->kw_pre_sseq = PTHRW_RWS_INIT; \
429 kwq->kw_pre_rwwc = 0; \
430 }
431
432 #define CLEAR_INITCOUNT_BITS(kwq) {\
433 kwq->kw_initcount = 0; \
434 kwq->kw_initrecv = 0; \
435 kwq->kw_initcountseq = 0; \
436 }
437
438 #define CLEAR_INTR_PREPOST_BITS(kwq) {\
439 kwq->kw_pre_intrcount = 0; \
440 kwq->kw_pre_intrseq = 0; \
441 kwq->kw_pre_intrretbits = 0; \
442 kwq->kw_pre_intrtype = 0; \
443 }
444
445 #define CLEAR_REINIT_BITS(kwq) {\
446 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) { \
447 if((kwq->kw_inqueue != 0) && (kwq->kw_inqueue != kwq->kw_fakecount)) \
448 panic("CV:entries in queue durinmg reinit %d:%d\n",kwq->kw_inqueue, kwq->kw_fakecount); \
449 };\
450 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_RWLOCK) { \
451 kwq->kw_nextseqword = PTHRW_RWS_INIT; \
452 kwq->kw_overlapwatch = 0; \
453 }; \
454 kwq->kw_pre_lockseq = 0; \
455 kwq->kw_pre_rwwc = 0; \
456 kwq->kw_pre_sseq = PTHRW_RWS_INIT; \
457 kwq->kw_lastunlockseq = PTHRW_RWL_INIT; \
458 kwq->kw_lastseqword = PTHRW_RWS_INIT; \
459 kwq->kw_pre_intrcount = 0; \
460 kwq->kw_pre_intrseq = 0; \
461 kwq->kw_pre_intrretbits = 0; \
462 kwq->kw_pre_intrtype = 0; \
463 kwq->kw_lword = 0; \
464 kwq->kw_uword = 0; \
465 kwq->kw_sword = PTHRW_RWS_INIT; \
466 }
467
468 void pthread_list_lock(void);
469 void pthread_list_unlock(void);
470 void pthread_list_lock_spin(void);
471 void pthread_list_lock_convert_spin(void);
472 void ksyn_wqlock(ksyn_wait_queue_t kwq);
473 void ksyn_wqunlock(ksyn_wait_queue_t kwq);
474 ksyn_wait_queue_t ksyn_wq_hash_lookup(user_addr_t mutex, proc_t p, int flags, uint64_t object, uint64_t offset);
475 int ksyn_wqfind(user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, uint64_t tid, int flags, int wqtype , ksyn_wait_queue_t * wq);
476 void ksyn_wqrelease(ksyn_wait_queue_t mkwq, ksyn_wait_queue_t ckwq, int qfreenow, int wqtype);
477 extern int ksyn_findobj(uint64_t mutex, uint64_t * object, uint64_t * offset);
478 static void UPDATE_CVKWQ(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, uint64_t tid, int wqtype);
479 extern thread_t port_name_to_thread(mach_port_name_t port_name);
480
481 kern_return_t ksyn_block_thread_locked(ksyn_wait_queue_t kwq, uint64_t abstime, ksyn_waitq_element_t kwe, int log, thread_continue_t, void * parameter);
482 kern_return_t ksyn_wakeup_thread(ksyn_wait_queue_t kwq, ksyn_waitq_element_t kwe);
483 void ksyn_freeallkwe(ksyn_queue_t kq);
484
485 uint32_t psynch_mutexdrop_internal(ksyn_wait_queue_t kwq, uint32_t lkseq, uint32_t ugen, int flags);
486 int kwq_handle_unlock(ksyn_wait_queue_t, uint32_t mgen, uint32_t rw_wc, uint32_t * updatep, int flags, int *blockp, uint32_t premgen);
487
488 void ksyn_queue_init(ksyn_queue_t kq);
489 int ksyn_queue_insert(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t mgen, struct uthread * uth, ksyn_waitq_element_t kwe, int firstfit);
490 ksyn_waitq_element_t ksyn_queue_removefirst(ksyn_queue_t kq, ksyn_wait_queue_t kwq);
491 void ksyn_queue_removeitem(ksyn_wait_queue_t kwq, ksyn_queue_t kq, ksyn_waitq_element_t kwe);
492 int ksyn_queue_move_tofree(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t upto, ksyn_queue_t freeq, int all, int reease);
493 void update_low_high(ksyn_wait_queue_t kwq, uint32_t lockseq);
494 uint32_t find_nextlowseq(ksyn_wait_queue_t kwq);
495 uint32_t find_nexthighseq(ksyn_wait_queue_t kwq);
496
497 int find_seq_till(ksyn_wait_queue_t kwq, uint32_t upto, uint32_t nwaiters, uint32_t *countp);
498 uint32_t ksyn_queue_count_tolowest(ksyn_queue_t kq, uint32_t upto);
499
500 ksyn_waitq_element_t ksyn_queue_find_cvpreposeq(ksyn_queue_t kq, uint32_t cgen);
501 uint32_t ksyn_queue_cvcount_entries(ksyn_queue_t kq, uint32_t upto, uint32_t from, int * numwaitersp, int * numintrp, int * numprepop);
502 void ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq, uint32_t upto, uint32_t *updatep);
503 void ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq, uint32_t *updatep, ksyn_queue_t kfreeq, int release);
504 ksyn_waitq_element_t ksyn_queue_find_signalseq(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t toseq, uint32_t lockseq);
505 ksyn_waitq_element_t ksyn_queue_find_threadseq(ksyn_wait_queue_t ckwq, ksyn_queue_t kq, thread_t th, uint32_t toseq);
506 void psynch_cvcontinue(void *, wait_result_t);
507 void psynch_mtxcontinue(void *, wait_result_t);
508
509 int ksyn_wakeupreaders(ksyn_wait_queue_t kwq, uint32_t limitread, int longreadset, int allreaders, uint32_t updatebits, int * wokenp);
510 int kwq_find_rw_lowest(ksyn_wait_queue_t kwq, int flags, uint32_t premgen, int * type, uint32_t lowest[]);
511 ksyn_waitq_element_t ksyn_queue_find_seq(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t seq, int remove);
512 int kwq_handle_overlap(ksyn_wait_queue_t kwq, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, uint32_t *updatebitsp, int flags , int * blockp);
513 int kwq_handle_downgrade(ksyn_wait_queue_t kwq, uint32_t mgen, int flags, uint32_t premgen, int * blockp);
514
515 static void
516 UPDATE_CVKWQ(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, __unused uint64_t tid, __unused int wqtype)
517 {
518 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) {
519 if ((kwq->kw_kflags & KSYN_KWF_ZEROEDOUT) != 0) {
520 /* the values of L,U and S are cleared out due to L==S in previous transition */
521 kwq->kw_lword = mgen;
522 kwq->kw_uword = ugen;
523 kwq->kw_sword = rw_wc;
524 kwq->kw_kflags &= ~KSYN_KWF_ZEROEDOUT;
525 }
526 if (is_seqhigher((mgen & PTHRW_COUNT_MASK), (kwq->kw_lword & PTHRW_COUNT_MASK)) != 0)
527 kwq->kw_lword = mgen;
528 if (is_seqhigher((ugen & PTHRW_COUNT_MASK), (kwq->kw_uword & PTHRW_COUNT_MASK)) != 0)
529 kwq->kw_uword = ugen;
530 if ((rw_wc & PTH_RWS_CV_CBIT) != 0) {
531 if(is_seqlower(kwq->kw_cvkernelseq, (rw_wc & PTHRW_COUNT_MASK)) != 0) {
532 kwq->kw_cvkernelseq = (rw_wc & PTHRW_COUNT_MASK);
533 }
534 if (is_seqhigher((rw_wc & PTHRW_COUNT_MASK), (kwq->kw_sword & PTHRW_COUNT_MASK)) != 0)
535 kwq->kw_sword = rw_wc;
536 }
537 }
538 }
539
540
541 /* to protect the hashes, iocounts, freelist */
542 void
543 pthread_list_lock(void)
544 {
545 lck_mtx_lock(pthread_list_mlock);
546 }
547
548 void
549 pthread_list_lock_spin(void)
550 {
551 lck_mtx_lock_spin(pthread_list_mlock);
552 }
553
554 void
555 pthread_list_lock_convert_spin(void)
556 {
557 lck_mtx_convert_spin(pthread_list_mlock);
558 }
559
560
561 void
562 pthread_list_unlock(void)
563 {
564 lck_mtx_unlock(pthread_list_mlock);
565 }
566
567 /* to protect the indiv queue */
568 void
569 ksyn_wqlock(ksyn_wait_queue_t kwq)
570 {
571
572 lck_mtx_lock(&kwq->kw_lock);
573 }
574
575 void
576 ksyn_wqunlock(ksyn_wait_queue_t kwq)
577 {
578 lck_mtx_unlock(&kwq->kw_lock);
579 }
580
581
582 /* routine to drop the mutex unlocks , used both for mutexunlock system call and drop during cond wait */
583 uint32_t
584 psynch_mutexdrop_internal(ksyn_wait_queue_t kwq, uint32_t lkseq, uint32_t ugen, int flags)
585 {
586 uint32_t nextgen, low_writer, updatebits, returnbits = 0;
587 int firstfit = flags & _PTHREAD_MUTEX_POLICY_FIRSTFIT;
588 ksyn_waitq_element_t kwe = NULL;
589 kern_return_t kret = KERN_SUCCESS;
590
591 nextgen = (ugen + PTHRW_INC);
592
593 #if _PSYNCH_TRACE_
594 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_START, (uint32_t)kwq->kw_addr, lkseq, ugen, flags, 0);
595 #endif /* _PSYNCH_TRACE_ */
596
597 ksyn_wqlock(kwq);
598
599 redrive:
600
601 if (kwq->kw_inqueue != 0) {
602 updatebits = (kwq->kw_highseq & PTHRW_COUNT_MASK) | (PTH_RWL_EBIT | PTH_RWL_KBIT);
603 kwq->kw_lastunlockseq = (ugen & PTHRW_COUNT_MASK);
604 if (firstfit != 0)
605 {
606 /* first fit , pick any one */
607 kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwq);
608 kwe->kwe_psynchretval = updatebits;
609 kwe->kwe_kwqqueue = NULL;
610
611 #if _PSYNCH_TRACE_
612 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xcafecaf1, (uint32_t)(thread_tid((struct thread *)(((struct uthread *)(kwe->kwe_uth))->uu_context.vc_thread))), kwe->kwe_psynchretval, 0);
613 #endif /* _PSYNCH_TRACE_ */
614
615 kret = ksyn_wakeup_thread(kwq, kwe);
616 #if __TESTPANICS__
617 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
618 panic("psynch_mutexdrop_internal: panic unable to wakeup firstfit mutex thread\n");
619 #endif /* __TESTPANICS__ */
620 if (kret == KERN_NOT_WAITING)
621 goto redrive;
622 } else {
623 /* handle fairshare */
624 low_writer = kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_firstnum;
625 low_writer &= PTHRW_COUNT_MASK;
626
627 if (low_writer == nextgen) {
628 /* next seq to be granted found */
629 kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwq);
630
631 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
632 kwe->kwe_psynchretval = updatebits | PTH_RWL_MTX_WAIT;
633 kwe->kwe_kwqqueue = NULL;
634
635 #if _PSYNCH_TRACE_
636 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xcafecaf2, (uint32_t)(thread_tid((struct thread *)(((struct uthread *)(kwe->kwe_uth))->uu_context.vc_thread))), kwe->kwe_psynchretval, 0);
637 #endif /* _PSYNCH_TRACE_ */
638
639 kret = ksyn_wakeup_thread(kwq, kwe);
640 #if __TESTPANICS__
641 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
642 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
643 #endif /* __TESTPANICS__ */
644 if (kret == KERN_NOT_WAITING) {
645 /* interrupt post */
646 kwq->kw_pre_intrcount = 1;
647 kwq->kw_pre_intrseq = nextgen;
648 kwq->kw_pre_intrretbits = updatebits;
649 kwq->kw_pre_intrtype = PTH_RW_TYPE_WRITE;
650 #if _PSYNCH_TRACE_
651 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfafafaf1, nextgen, kwq->kw_pre_intrretbits, 0);
652 #endif /* _PSYNCH_TRACE_ */
653 }
654
655 } else if (is_seqhigher(low_writer, nextgen) != 0) {
656 kwq->kw_pre_rwwc++;
657
658 if (kwq->kw_pre_rwwc > 1) {
659 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (1)\n");
660 goto out;
661 }
662
663 kwq->kw_pre_lockseq = (nextgen & PTHRW_COUNT_MASK);
664 #if _PSYNCH_TRACE_
665 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfefefef1, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
666 #endif /* _PSYNCH_TRACE_ */
667 } else {
668
669 //__FAILEDUSERTEST__("psynch_mutexdrop_internal: FS mutex unlock sequence higher than the lowest one is queue\n");
670
671 kwe = ksyn_queue_find_seq(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], (nextgen & PTHRW_COUNT_MASK), 1);
672 if (kwe != NULL) {
673 /* next seq to be granted found */
674 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
675 kwe->kwe_psynchretval = updatebits | PTH_RWL_MTX_WAIT;
676 kwe->kwe_kwqqueue = NULL;
677 #if _PSYNCH_TRACE_
678 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xcafecaf3, (uint32_t)(thread_tid((struct thread *)(((struct uthread *)(kwe->kwe_uth))->uu_context.vc_thread))), kwe->kwe_psynchretval, 0);
679 #endif /* _PSYNCH_TRACE_ */
680 kret = ksyn_wakeup_thread(kwq, kwe);
681 #if __TESTPANICS__
682 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
683 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
684 #endif /* __TESTPANICS__ */
685 if (kret == KERN_NOT_WAITING)
686 goto redrive;
687 } else {
688 /* next seq to be granted not found, prepost */
689 kwq->kw_pre_rwwc++;
690
691 if (kwq->kw_pre_rwwc > 1) {
692 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (2)\n");
693 goto out;
694 }
695
696 kwq->kw_pre_lockseq = (nextgen & PTHRW_COUNT_MASK);
697 #if _PSYNCH_TRACE_
698 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfefefef2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
699 #endif /* _PSYNCH_TRACE_ */
700 }
701 }
702 }
703 } else {
704
705 /* if firstfit the last one could be spurious */
706 if (firstfit == 0) {
707 kwq->kw_lastunlockseq = (ugen & PTHRW_COUNT_MASK);
708 kwq->kw_pre_rwwc++;
709
710 if (kwq->kw_pre_rwwc > 1) {
711 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (3)\n");
712 goto out;
713 }
714
715 kwq->kw_pre_lockseq = (nextgen & PTHRW_COUNT_MASK);
716 #if _PSYNCH_TRACE_
717 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfefefef3, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
718 #endif /* _PSYNCH_TRACE_ */
719 } else {
720 /* first fit case */
721 #if _PSYNCH_TRACE_
722 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfefefef3, kwq->kw_lastunlockseq, kwq->kw_pre_lockseq, 0);
723 #endif /* _PSYNCH_TRACE_ */
724 kwq->kw_lastunlockseq = (ugen & PTHRW_COUNT_MASK);
725 /* not set or the new lkseq is higher */
726 if ((kwq->kw_pre_rwwc == 0) || (is_seqlower(kwq->kw_pre_lockseq, lkseq) == 0))
727 kwq->kw_pre_lockseq = (lkseq & PTHRW_COUNT_MASK);
728 kwq->kw_pre_rwwc = 1;
729 #if _PSYNCH_TRACE_
730 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfefefef3, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
731 #endif /* _PSYNCH_TRACE_ */
732
733 /* indicate prepost content in kernel */
734 returnbits = lkseq | PTH_RWL_PBIT;
735 }
736 }
737
738 out:
739 ksyn_wqunlock(kwq);
740
741 #if _PSYNCH_TRACE_
742 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP | DBG_FUNC_END, (uint32_t)kwq->kw_addr, 0xeeeeeeed, 0, 0, 0);
743 #endif /* _PSYNCH_TRACE_ */
744 ksyn_wqrelease(kwq, NULL, 1, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX));
745 return(returnbits);
746 }
747
748 /*
749 * psynch_mutexwait: This system call is used for contended psynch mutexes to block.
750 */
751
752 int
753 psynch_mutexwait(__unused proc_t p, struct psynch_mutexwait_args * uap, uint32_t * retval)
754 {
755 user_addr_t mutex = uap->mutex;
756 uint32_t mgen = uap->mgen;
757 uint32_t ugen = uap->ugen;
758 uint64_t tid = uap->tid;
759 int flags = uap->flags;
760 ksyn_wait_queue_t kwq;
761 int error=0;
762 int ins_flags, retry;
763 uthread_t uth;
764 int firstfit = flags & _PTHREAD_MUTEX_POLICY_FIRSTFIT;
765 uint32_t lockseq, updatebits=0;
766 ksyn_waitq_element_t kwe;
767 kern_return_t kret;
768
769 #if _PSYNCH_TRACE_
770 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_START, (uint32_t)mutex, mgen, ugen, flags, 0);
771 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_NONE, (uint32_t)mutex, mgen, ugen, (uint32_t)tid, 0);
772 #endif /* _PSYNCH_TRACE_ */
773
774 uth = current_uthread();
775
776 kwe = &uth->uu_kwe;
777 kwe->kwe_lockseq = uap->mgen;
778 kwe->kwe_uth = uth;
779 kwe->kwe_psynchretval = 0;
780 kwe->kwe_kwqqueue = NULL;
781 lockseq = (uap->mgen & PTHRW_COUNT_MASK);
782
783 if (firstfit == 0) {
784 ins_flags = SEQFIT;
785 } else {
786 /* first fit */
787 ins_flags = FIRSTFIT;
788 }
789
790 error = ksyn_wqfind(mutex, mgen, ugen, 0, tid, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_MTX), &kwq);
791 if (error != 0) {
792 #if _PSYNCH_TRACE_
793 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_END, (uint32_t)mutex, 1, 0xdeadbeef, error, 0);
794 #endif /* _PSYNCH_TRACE_ */
795 return(error);
796 }
797
798 ksyn_wqlock(kwq);
799
800
801 if ((mgen & PTH_RWL_RETRYBIT) != 0) {
802 retry = 1;
803 mgen &= ~PTH_RWL_RETRYBIT;
804 }
805
806 /* handle first the missed wakeups */
807 if ((kwq->kw_pre_intrcount != 0) &&
808 ((kwq->kw_pre_intrtype == PTH_RW_TYPE_WRITE)) &&
809 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
810 kwq->kw_pre_intrcount--;
811 kwe->kwe_psynchretval = kwq->kw_pre_intrretbits;
812 if (kwq->kw_pre_intrcount==0)
813 CLEAR_INTR_PREPOST_BITS(kwq);
814 ksyn_wqunlock(kwq);
815 *retval = kwe->kwe_psynchretval;
816 #if _PSYNCH_TRACE_
817 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_NONE, (uint32_t)mutex, 0xfafafaf1, kwe->kwe_psynchretval, kwq->kw_pre_intrcount, 0);
818 #endif /* _PSYNCH_TRACE_ */
819 goto out;
820 }
821
822 if ((kwq->kw_pre_rwwc != 0) && ((ins_flags == FIRSTFIT) || ((lockseq & PTHRW_COUNT_MASK) == (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK) ))) {
823 /* got preposted lock */
824 kwq->kw_pre_rwwc--;
825 if (kwq->kw_pre_rwwc == 0) {
826 CLEAR_PREPOST_BITS(kwq);
827 kwq->kw_lastunlockseq = PTHRW_RWL_INIT;
828 if (kwq->kw_inqueue == 0) {
829 updatebits = lockseq | (PTH_RWL_KBIT | PTH_RWL_EBIT);
830 } else {
831 updatebits = (kwq->kw_highseq & PTHRW_COUNT_MASK) | (PTH_RWL_KBIT | PTH_RWL_EBIT);
832 }
833 updatebits &= ~PTH_RWL_MTX_WAIT;
834
835 kwe->kwe_psynchretval = updatebits;
836
837 if (updatebits == 0) {
838 __FAILEDUSERTEST__("psynch_mutexwait(prepost): returning 0 lseq in mutexwait with no EBIT \n");
839 }
840 ksyn_wqunlock(kwq);
841 *retval = updatebits;
842 #if _PSYNCH_TRACE_
843 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfefefef1, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
844 #endif /* _PSYNCH_TRACE_ */
845 goto out;
846 } else {
847 __FAILEDUSERTEST__("psynch_mutexwait: more than one prepost\n");
848 kwq->kw_pre_lockseq += PTHRW_INC; /* look for next one */
849 ksyn_wqunlock(kwq);
850 error = EINVAL;
851 goto out;
852 }
853 }
854
855 #if _PSYNCH_TRACE_
856 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 0xfeedfeed, mgen, ins_flags, 0);
857 #endif /* _PSYNCH_TRACE_ */
858
859 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], mgen, uth, kwe, ins_flags);
860 if (error != 0) {
861 ksyn_wqunlock(kwq);
862 #if _PSYNCH_TRACE_
863 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_END, (uint32_t)mutex, 2, 0xdeadbeef, error, 0);
864 #endif /* _PSYNCH_TRACE_ */
865 goto out;
866 }
867
868 kret = ksyn_block_thread_locked(kwq, (uint64_t)0, kwe, 0, psynch_mtxcontinue, (void *)kwq);
869
870 psynch_mtxcontinue((void *)kwq, kret);
871
872 /* not expected to return from unix_syscall_return */
873 panic("psynch_mtxcontinue returned from unix_syscall_return");
874
875 out:
876 ksyn_wqrelease(kwq, NULL, 1, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_MTX));
877 #if _PSYNCH_TRACE_
878 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_END, (uint32_t)mutex, 0xeeeeeeed, updatebits, error, 0);
879 #endif /* _PSYNCH_TRACE_ */
880
881 return(error);
882 }
883
884 void
885 psynch_mtxcontinue(void * parameter, wait_result_t result)
886 {
887 int error = 0;
888 uint32_t updatebits = 0;
889 uthread_t uth = current_uthread();
890 ksyn_wait_queue_t kwq = (ksyn_wait_queue_t)parameter;
891 ksyn_waitq_element_t kwe;
892
893 kwe = &uth->uu_kwe;
894
895 switch (result) {
896 case THREAD_TIMED_OUT:
897 error = ETIMEDOUT;
898 break;
899 case THREAD_INTERRUPTED:
900 error = EINTR;
901 break;
902 default:
903 error = 0;
904 break;
905 }
906
907 if (error != 0) {
908 ksyn_wqlock(kwq);
909
910 #if _PSYNCH_TRACE_
911 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 3, 0xdeadbeef, error, 0);
912 #endif /* _PSYNCH_TRACE_ */
913 if (kwe->kwe_kwqqueue != NULL)
914 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwe);
915 ksyn_wqunlock(kwq);
916 } else {
917 updatebits = kwe->kwe_psynchretval;
918 updatebits &= ~PTH_RWL_MTX_WAIT;
919 uth->uu_rval[0] = updatebits;
920
921 if (updatebits == 0)
922 __FAILEDUSERTEST__("psynch_mutexwait: returning 0 lseq in mutexwait with no EBIT \n");
923 }
924 ksyn_wqrelease(kwq, NULL, 1, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_MTX));
925 #if _PSYNCH_TRACE_
926 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_END, (uint32_t)kwq->kw_addr, 0xeeeeeeed, updatebits, error, 0);
927 #endif /* _PSYNCH_TRACE_ */
928
929 unix_syscall_return(error);
930 }
931
932 /*
933 * psynch_mutexdrop: This system call is used for unlock postings on contended psynch mutexes.
934 */
935 int
936 psynch_mutexdrop(__unused proc_t p, struct psynch_mutexdrop_args * uap, uint32_t * retval)
937 {
938 user_addr_t mutex = uap->mutex;
939 uint32_t mgen = uap->mgen;
940 uint32_t ugen = uap->ugen;
941 uint64_t tid = uap->tid;
942 int flags = uap->flags;
943 ksyn_wait_queue_t kwq;
944 uint32_t updateval;
945 int error=0;
946
947 error = ksyn_wqfind(mutex, mgen, ugen, 0, tid, flags, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX), &kwq);
948 if (error != 0) {
949 return(error);
950 }
951
952 updateval = psynch_mutexdrop_internal(kwq, mgen, ugen, flags);
953 /* drops the kwq reference */
954
955 *retval = updateval;
956 return(0);
957
958 }
959
960 /*
961 * psynch_cvbroad: This system call is used for broadcast posting on blocked waiters of psynch cvars.
962 */
963 int
964 psynch_cvbroad(__unused proc_t p, struct psynch_cvbroad_args * uap, uint32_t * retval)
965 {
966 user_addr_t cond = uap->cv;
967 uint64_t cvlsgen = uap->cvlsgen;
968 uint64_t cvudgen = uap->cvudgen;
969 uint32_t cgen, cugen, csgen, diffgen;
970 uint32_t uptoseq, fromseq;
971 int flags = uap->flags;
972 ksyn_wait_queue_t ckwq;
973 int error=0;
974 uint32_t updatebits = 0;
975 uint32_t count;
976 struct ksyn_queue kfreeq;
977
978 csgen = (uint32_t)((cvlsgen >> 32) & 0xffffffff);
979 cgen = ((uint32_t)(cvlsgen & 0xffffffff));
980 cugen = (uint32_t)((cvudgen >> 32) & 0xffffffff);
981 diffgen = ((uint32_t)(cvudgen & 0xffffffff));
982 count = (diffgen >> PTHRW_COUNT_SHIFT);
983
984 #if _PSYNCH_TRACE_
985 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_START, (uint32_t)cond, cgen, cugen, csgen, 0);
986 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_NONE, (uint32_t)cond, 0xcbcbcbc1, diffgen,flags, 0);
987 #endif /* _PSYNCH_TRACE_ */
988
989 uptoseq = cgen & PTHRW_COUNT_MASK;
990 fromseq = (cugen & PTHRW_COUNT_MASK) + PTHRW_INC;
991
992 if (is_seqhigher(fromseq, uptoseq) || is_seqhigher((csgen & PTHRW_COUNT_MASK), uptoseq)) {
993 __FAILEDUSERTEST__("cvbroad: invalid L, U and S values\n");
994 return EINVAL;
995 }
996 if (count > (uint32_t)task_threadmax) {
997 __FAILEDUSERTEST__("cvbroad: difference greater than maximum possible thread count\n");
998 return EBUSY;
999 }
1000
1001 ckwq = NULL;
1002
1003 error = ksyn_wqfind(cond, cgen, cugen, csgen, 0, flags, (KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INDROP), &ckwq);
1004 if (error != 0) {
1005 #if _PSYNCH_TRACE_
1006 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_END, (uint32_t)cond, 0, 0xdeadbeef, error, 0);
1007 #endif /* _PSYNCH_TRACE_ */
1008 return(error);
1009 }
1010
1011 *retval = 0;
1012
1013 ksyn_wqlock(ckwq);
1014
1015 /* update L, U and S... */
1016 UPDATE_CVKWQ(ckwq, cgen, cugen, csgen, 0, KSYN_WQTYPE_CVAR);
1017
1018 /* broadcast wakeups/prepost handling */
1019 ksyn_handle_cvbroad(ckwq, uptoseq, &updatebits);
1020
1021 /* set C or P bits and free if needed */
1022 ckwq->kw_sword += (updatebits & PTHRW_COUNT_MASK);
1023 ksyn_cvupdate_fixup(ckwq, &updatebits, &kfreeq, 1);
1024 ksyn_wqunlock(ckwq);
1025
1026 *retval = updatebits;
1027
1028 ksyn_wqrelease(ckwq, NULL, 1, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_CVAR));
1029 #if _PSYNCH_TRACE_
1030 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_END, (uint32_t)cond, 0xeeeeeeed, (uint32_t)*retval, error, 0);
1031 #endif /* _PSYNCH_TRACE_ */
1032
1033 return(error);
1034 }
1035
1036 ksyn_waitq_element_t
1037 ksyn_queue_find_threadseq(ksyn_wait_queue_t ckwq, __unused ksyn_queue_t kq, thread_t th, uint32_t upto)
1038 {
1039 uthread_t uth = get_bsdthread_info(th);
1040 ksyn_waitq_element_t kwe = &uth->uu_kwe;
1041
1042 if (kwe->kwe_kwqqueue != ckwq ||
1043 is_seqhigher((kwe->kwe_lockseq & PTHRW_COUNT_MASK), upto)) {
1044 /* the thread is not waiting in the cv (or wasn't when the wakeup happened) */
1045 return NULL;
1046 }
1047 return kwe;
1048 }
1049
1050 /*
1051 * psynch_cvsignal: This system call is used for signalling the blocked waiters of psynch cvars.
1052 */
1053 int
1054 psynch_cvsignal(__unused proc_t p, struct psynch_cvsignal_args * uap, uint32_t * retval)
1055 {
1056 user_addr_t cond = uap->cv;
1057 uint64_t cvlsgen = uap->cvlsgen;
1058 uint32_t cgen, csgen, signalseq, uptoseq;
1059 uint32_t cugen = uap->cvugen;
1060 int threadport = uap->thread_port;
1061 int flags = uap->flags;
1062 ksyn_wait_queue_t ckwq = NULL;
1063 ksyn_waitq_element_t kwe, nkwe = NULL;
1064 ksyn_queue_t kq;
1065 int error=0;
1066 thread_t th = THREAD_NULL;
1067 uint32_t updatebits = 0;
1068 kern_return_t kret;
1069 struct ksyn_queue kfreeq;
1070
1071
1072 csgen = (uint32_t)((cvlsgen >> 32) & 0xffffffff);
1073 cgen = ((uint32_t)(cvlsgen & 0xffffffff));
1074
1075 #if _PSYNCH_TRACE_
1076 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_START, (uint32_t)cond, cgen, cugen, threadport, 0);
1077 #endif /* _PSYNCH_TRACE_ */
1078
1079 uptoseq = cgen & PTHRW_COUNT_MASK;
1080 signalseq = (cugen & PTHRW_COUNT_MASK) + PTHRW_INC;
1081
1082 /* validate sane L, U, and S values */
1083 if (((threadport == 0) && (is_seqhigher(signalseq, uptoseq))) || is_seqhigher((csgen & PTHRW_COUNT_MASK), uptoseq)) {
1084 __FAILEDUSERTEST__("psync_cvsignal; invalid sequence numbers\n");
1085 error = EINVAL;
1086 goto out;
1087 }
1088
1089 /* If we are looking for a specific thread, grab a reference for it */
1090 if (threadport != 0) {
1091 th = (thread_t)port_name_to_thread((mach_port_name_t)threadport);
1092 if (th == THREAD_NULL) {
1093 error = ESRCH;
1094 goto out;
1095 }
1096 }
1097
1098 error = ksyn_wqfind(cond, cgen, cugen, csgen, 0, flags, (KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INDROP), &ckwq);
1099 if (error != 0) {
1100 #if _PSYNCH_TRACE_
1101 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_END, (uint32_t)cond, 0, 0xdeadbeef, error, 0);
1102 #endif /* _PSYNCH_TRACE_ */
1103 goto out;
1104 }
1105
1106 ksyn_wqlock(ckwq);
1107
1108 /* update L, U and S... */
1109 UPDATE_CVKWQ(ckwq, cgen, cugen, csgen, 0, KSYN_WQTYPE_CVAR);
1110
1111 kq = &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER];
1112
1113 retry:
1114 /* Only bother if we aren't already balanced */
1115 if ((ckwq->kw_lword & PTHRW_COUNT_MASK) != (ckwq->kw_sword & PTHRW_COUNT_MASK)) {
1116
1117 kwe = (th != NULL) ? ksyn_queue_find_threadseq(ckwq, kq, th, uptoseq) :
1118 ksyn_queue_find_signalseq(ckwq, kq, uptoseq, signalseq);
1119 if (kwe != NULL) {
1120 switch (kwe->kwe_flags) {
1121
1122 case KWE_THREAD_BROADCAST:
1123 /* broadcasts swallow our signal */
1124 break;
1125
1126 case KWE_THREAD_PREPOST:
1127 /* merge in with existing prepost at our same uptoseq */
1128 kwe->kwe_count += 1;
1129 break;
1130
1131 case KWE_THREAD_INWAIT:
1132 if (is_seqlower((kwe->kwe_lockseq & PTHRW_COUNT_MASK), signalseq)) {
1133 /*
1134 * A valid thread in our range, but lower than our signal.
1135 * Matching it may leave our match with nobody to wake it if/when
1136 * it arrives (the signal originally meant for this thread might
1137 * not successfully wake it).
1138 *
1139 * Convert to broadcast - may cause some spurious wakeups
1140 * (allowed by spec), but avoids starvation (better choice).
1141 */
1142 #if _PSYNCH_TRACE_
1143 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xc1c1c1c1, uptoseq, 0, 0);
1144 #endif /* _PSYNCH_TRACE_ */
1145 ksyn_handle_cvbroad(ckwq, uptoseq, &updatebits);
1146 } else {
1147 ksyn_queue_removeitem(ckwq, kq, kwe);
1148 kwe->kwe_psynchretval = PTH_RWL_MTX_WAIT;
1149 kwe->kwe_kwqqueue = NULL;
1150 #if _PSYNCH_TRACE_
1151 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xcafecaf2, (uint32_t)(thread_tid((struct thread *)(((struct uthread *)(kwe->kwe_uth))->uu_context.vc_thread))), kwe->kwe_psynchretval, 0);
1152 #endif /* _PSYNCH_TRACE_ */
1153 kret = ksyn_wakeup_thread(ckwq, kwe);
1154 #if __TESTPANICS__
1155 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
1156 panic("ksyn_wakeup_thread: panic waking up condition waiter\n");
1157 #endif /* __TESTPANICS__ */
1158 updatebits += PTHRW_INC;
1159 }
1160
1161 ckwq->kw_sword += (updatebits & PTHRW_COUNT_MASK);
1162 break;
1163
1164 default:
1165 panic("unknown kweflags\n");
1166 break;
1167 }
1168
1169 } else if (th != NULL) {
1170 /*
1171 * Could not find the thread, post a broadcast,
1172 * otherwise the waiter will be stuck. Use to send
1173 * ESRCH here, did lead to rare hangs.
1174 */
1175 ksyn_handle_cvbroad(ckwq, uptoseq, &updatebits);
1176 ckwq->kw_sword += (updatebits & PTHRW_COUNT_MASK);
1177 } else if (nkwe == NULL) {
1178 ksyn_wqunlock(ckwq);
1179 nkwe = (ksyn_waitq_element_t)zalloc(kwe_zone);
1180 ksyn_wqlock(ckwq);
1181 goto retry;
1182
1183 } else {
1184 /* no eligible entries - add prepost */
1185 bzero(nkwe, sizeof(struct ksyn_waitq_element));
1186 nkwe->kwe_kwqqueue = ckwq;
1187 nkwe->kwe_flags = KWE_THREAD_PREPOST;
1188 nkwe->kwe_lockseq = uptoseq;
1189 nkwe->kwe_count = 1;
1190 nkwe->kwe_uth = NULL;
1191 nkwe->kwe_psynchretval = 0;
1192
1193 #if _PSYNCH_TRACE_
1194 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xfeedfefe, uptoseq, 0, 0);
1195 #endif /* _PSYNCH_TRACE_ */
1196
1197 (void)ksyn_queue_insert(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], uptoseq, NULL, nkwe, SEQFIT);
1198 ckwq->kw_fakecount++;
1199 nkwe = NULL;
1200 }
1201
1202 /* set C or P bits and free if needed */
1203 ksyn_cvupdate_fixup(ckwq, &updatebits, &kfreeq, 1);
1204 }
1205
1206 ksyn_wqunlock(ckwq);
1207 if (nkwe != NULL)
1208 zfree(kwe_zone, nkwe);
1209
1210 ksyn_wqrelease(ckwq, NULL, 1, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_CVAR));
1211
1212 out:
1213 if (th != NULL)
1214 thread_deallocate(th);
1215 if (error == 0)
1216 *retval = updatebits;
1217 #if _PSYNCH_TRACE_
1218 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_END, (uint32_t)cond, 0xeeeeeeed, updatebits, error, 0);
1219 #endif /* _PSYNCH_TRACE_ */
1220
1221 return(error);
1222 }
1223
1224 /*
1225 * psynch_cvwait: This system call is used for psynch cvar waiters to block in kernel.
1226 */
1227 int
1228 psynch_cvwait(__unused proc_t p, struct psynch_cvwait_args * uap, uint32_t * retval)
1229 {
1230 user_addr_t cond = uap->cv;
1231 uint64_t cvlsgen = uap->cvlsgen;
1232 uint32_t cgen, csgen;
1233 uint32_t cugen = uap->cvugen;
1234 user_addr_t mutex = uap->mutex;
1235 uint64_t mugen = uap->mugen;
1236 uint32_t mgen, ugen;
1237 int flags = uap->flags;
1238 ksyn_wait_queue_t kwq, ckwq;
1239 int error=0, local_error = 0;
1240 uint64_t abstime = 0;
1241 uint32_t lockseq, updatebits=0;
1242 struct timespec ts;
1243 uthread_t uth;
1244 ksyn_waitq_element_t kwe, nkwe = NULL;
1245 struct ksyn_queue *kq, kfreeq;
1246 kern_return_t kret;
1247
1248 /* for conformance reasons */
1249 __pthread_testcancel(0);
1250
1251 csgen = (uint32_t)((cvlsgen >> 32) & 0xffffffff);
1252 cgen = ((uint32_t)(cvlsgen & 0xffffffff));
1253 ugen = (uint32_t)((mugen >> 32) & 0xffffffff);
1254 mgen = ((uint32_t)(mugen & 0xffffffff));
1255
1256 #if _PSYNCH_TRACE_
1257 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_START, (uint32_t)cond, cgen, cugen, csgen, 0);
1258 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_NONE, (uint32_t)mutex, mgen, ugen, flags, 0);
1259 #endif /* _PSYNCH_TRACE_ */
1260
1261 lockseq = (cgen & PTHRW_COUNT_MASK);
1262 /*
1263 * In cvwait U word can be out of range as cond could be used only for
1264 * timeouts. However S word needs to be within bounds and validated at
1265 * user level as well.
1266 */
1267 if (is_seqhigher_eq((csgen & PTHRW_COUNT_MASK), lockseq) != 0) {
1268 __FAILEDUSERTEST__("psync_cvwait; invalid sequence numbers\n");
1269 return EINVAL;
1270 }
1271
1272 ckwq = kwq = NULL;
1273 error = ksyn_wqfind(cond, cgen, cugen, csgen, 0, flags, KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INWAIT, &ckwq);
1274 if (error != 0) {
1275 #if _PSYNCH_TRACE_
1276 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_END, (uint32_t)cond, 1, 0xdeadbeef, error, 0);
1277 #endif /* _PSYNCH_TRACE_ */
1278 return(error);
1279 }
1280
1281
1282 if (mutex != (user_addr_t)0) {
1283 error = ksyn_wqfind(mutex, mgen, ugen, 0, 0, flags, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX), &kwq);
1284 if (error != 0) {
1285 local_error = error;
1286 #if _PSYNCH_TRACE_
1287 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_END, (uint32_t)mutex, 2, 0xdeadbeef, error, 0);
1288 #endif /* _PSYNCH_TRACE_ */
1289 goto out;
1290 }
1291
1292 (void)psynch_mutexdrop_internal(kwq, mgen, ugen, flags);
1293 /* drops kwq reference */
1294 kwq = NULL;
1295 }
1296
1297 if (uap->sec != 0 || (uap->nsec & 0x3fffffff) != 0) {
1298 ts.tv_sec = uap->sec;
1299 ts.tv_nsec = (uap->nsec & 0x3fffffff);
1300 nanoseconds_to_absolutetime((uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec, &abstime );
1301 clock_absolutetime_interval_to_deadline( abstime, &abstime );
1302 }
1303
1304 ksyn_wqlock(ckwq);
1305
1306 /* update L, U and S... */
1307 UPDATE_CVKWQ(ckwq, cgen, cugen, csgen, 0, KSYN_WQTYPE_CVAR);
1308
1309 /* Look for the sequence for prepost (or conflicting thread */
1310 kq = &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER];
1311 kwe = ksyn_queue_find_cvpreposeq(kq, lockseq);
1312
1313 if (kwe != NULL) {
1314 switch (kwe->kwe_flags) {
1315
1316 case KWE_THREAD_INWAIT:
1317 ksyn_wqunlock(ckwq);
1318 __FAILEDUSERTEST__("cvwait: thread entry with same sequence already present\n");
1319 local_error = EBUSY;
1320 goto out;
1321
1322 case KWE_THREAD_BROADCAST:
1323 break;
1324
1325 case KWE_THREAD_PREPOST:
1326 if ((kwe->kwe_lockseq & PTHRW_COUNT_MASK) == lockseq) {
1327 /* we can safely consume a reference, so do so */
1328 if (--kwe->kwe_count == 0) {
1329 ksyn_queue_removeitem(ckwq, kq, kwe);
1330 ckwq->kw_fakecount--;
1331 nkwe = kwe;
1332 }
1333 } else {
1334 /*
1335 * consuming a prepost higher than our lock sequence is valid, but
1336 * can leave the higher thread without a match. Convert the entry
1337 * to a broadcast to compensate for this.
1338 */
1339 #if _PSYNCH_TRACE_
1340 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xc2c2c2c2, kwe->kwe_lockseq, 0, 0);
1341 #endif /* _PSYNCH_TRACE_ */
1342
1343 ksyn_handle_cvbroad(ckwq, kwe->kwe_lockseq, &updatebits);
1344 #if __TESTPANICS__
1345 if (updatebits != 0)
1346 panic("psync_cvwait: convert pre-post to broadcast: woke up %d threads that shouldn't be there\n",
1347 updatebits);
1348 #endif /* __TESTPANICS__ */
1349 }
1350
1351 break;
1352
1353 default:
1354 panic("psync_cvwait: unexpected wait queue element type\n");
1355 }
1356
1357 #if _PSYNCH_TRACE_
1358 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xfefefefe, kwe->kwe_lockseq, 0, 0);
1359 #endif /* _PSYNCH_TRACE_ */
1360
1361
1362 updatebits = PTHRW_INC;
1363 ckwq->kw_sword += PTHRW_INC;
1364
1365 /* set C or P bits and free if needed */
1366 ksyn_cvupdate_fixup(ckwq, &updatebits, &kfreeq, 1);
1367
1368 error = 0;
1369 local_error = 0;
1370
1371 *retval = updatebits;
1372
1373 ksyn_wqunlock(ckwq);
1374
1375 if (nkwe != NULL)
1376 zfree(kwe_zone, nkwe);
1377
1378 goto out;
1379
1380 }
1381
1382 uth = current_uthread();
1383 kwe = &uth->uu_kwe;
1384 kwe->kwe_kwqqueue = ckwq;
1385 kwe->kwe_flags = KWE_THREAD_INWAIT;
1386 kwe->kwe_lockseq = lockseq;
1387 kwe->kwe_count = 1;
1388 kwe->kwe_uth = uth;
1389 kwe->kwe_psynchretval = 0;
1390
1391 #if _PSYNCH_TRACE_
1392 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xfeedfeed, cgen, 0, 0);
1393 #endif /* _PSYNCH_TRACE_ */
1394
1395 error = ksyn_queue_insert(ckwq, kq, cgen, uth, kwe, SEQFIT);
1396 if (error != 0) {
1397 ksyn_wqunlock(ckwq);
1398 local_error = error;
1399 goto out;
1400 }
1401
1402 kret = ksyn_block_thread_locked(ckwq, abstime, kwe, 1, psynch_cvcontinue, (void *)ckwq);
1403 /* lock dropped */
1404
1405 psynch_cvcontinue(ckwq, kret);
1406 /* not expected to return from unix_syscall_return */
1407 panic("psynch_cvcontinue returned from unix_syscall_return");
1408
1409 out:
1410 #if _PSYNCH_TRACE_
1411 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_END, (uint32_t)cond, 0xeeeeeeed, (uint32_t)*retval, local_error, 0);
1412 #endif /* _PSYNCH_TRACE_ */
1413 ksyn_wqrelease(ckwq, NULL, 1, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_CVAR));
1414 return(local_error);
1415 }
1416
1417
1418 void
1419 psynch_cvcontinue(void * parameter, wait_result_t result)
1420 {
1421 int error = 0, local_error = 0;
1422 uthread_t uth = current_uthread();
1423 ksyn_wait_queue_t ckwq = (ksyn_wait_queue_t)parameter;
1424 ksyn_waitq_element_t kwe;
1425 struct ksyn_queue kfreeq;
1426
1427 switch (result) {
1428 case THREAD_TIMED_OUT:
1429 error = ETIMEDOUT;
1430 break;
1431 case THREAD_INTERRUPTED:
1432 error = EINTR;
1433 break;
1434 default:
1435 error = 0;
1436 break;
1437 }
1438 #if _PSYNCH_TRACE_
1439 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP | DBG_FUNC_NONE, 0xf4f3f2f1, (uintptr_t)uth, result, 0, 0);
1440 #endif /* _PSYNCH_TRACE_ */
1441
1442 local_error = error;
1443 kwe = &uth->uu_kwe;
1444
1445 if (error != 0) {
1446 ksyn_wqlock(ckwq);
1447 /* just in case it got woken up as we were granting */
1448 uth->uu_rval[0] = kwe->kwe_psynchretval;
1449
1450 #if __TESTPANICS__
1451 if ((kwe->kwe_kwqqueue != NULL) && (kwe->kwe_kwqqueue != ckwq))
1452 panic("cvwait waiting on some other kwq\n");
1453
1454 #endif /* __TESTPANICS__ */
1455
1456
1457 if (kwe->kwe_kwqqueue != NULL) {
1458 ksyn_queue_removeitem(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwe);
1459 kwe->kwe_kwqqueue = NULL;
1460 }
1461 if ((kwe->kwe_psynchretval & PTH_RWL_MTX_WAIT) != 0) {
1462 /* the condition var granted.
1463 * reset the error so that the thread returns back.
1464 */
1465 local_error = 0;
1466 /* no need to set any bits just return as cvsig/broad covers this */
1467 ksyn_wqunlock(ckwq);
1468 goto out;
1469 }
1470
1471 ckwq->kw_sword += PTHRW_INC;
1472
1473 /* set C and P bits, in the local error */
1474 if ((ckwq->kw_lword & PTHRW_COUNT_MASK) == (ckwq->kw_sword & PTHRW_COUNT_MASK)) {
1475 local_error |= ECVCERORR;
1476 if (ckwq->kw_inqueue != 0) {
1477 (void)ksyn_queue_move_tofree(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], (ckwq->kw_lword & PTHRW_COUNT_MASK), &kfreeq, 1, 1);
1478 }
1479 ckwq->kw_lword = ckwq->kw_uword = ckwq->kw_sword = 0;
1480 ckwq->kw_kflags |= KSYN_KWF_ZEROEDOUT;
1481 } else {
1482 /* everythig in the queue is a fake entry ? */
1483 if ((ckwq->kw_inqueue != 0) && (ckwq->kw_fakecount == ckwq->kw_inqueue)) {
1484 local_error |= ECVPERORR;
1485 }
1486 }
1487 ksyn_wqunlock(ckwq);
1488
1489 } else {
1490 /* PTH_RWL_MTX_WAIT is removed */
1491 if ((kwe->kwe_psynchretval & PTH_RWS_CV_MBIT) != 0)
1492 uth->uu_rval[0] = PTHRW_INC | PTH_RWS_CV_CBIT;
1493 else
1494 uth->uu_rval[0] = 0;
1495 local_error = 0;
1496 }
1497 out:
1498 #if _PSYNCH_TRACE_
1499 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_END, (uint32_t)ckwq->kw_addr, 0xeeeeeeed, uth->uu_rval[0], local_error, 0);
1500 #endif /* _PSYNCH_TRACE_ */
1501 ksyn_wqrelease(ckwq, NULL, 1, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_CVAR));
1502
1503 unix_syscall_return(local_error);
1504
1505 }
1506
1507 /*
1508 * psynch_cvclrprepost: This system call clears pending prepost if present.
1509 */
1510 int
1511 psynch_cvclrprepost(__unused proc_t p, struct psynch_cvclrprepost_args * uap, __unused int * retval)
1512 {
1513 user_addr_t cond = uap->cv;
1514 uint32_t cgen = uap->cvgen;
1515 uint32_t cugen = uap->cvugen;
1516 uint32_t csgen = uap->cvsgen;
1517 uint32_t pseq = uap->preposeq;
1518 uint32_t flags = uap->flags;
1519 int error;
1520 ksyn_wait_queue_t ckwq = NULL;
1521 struct ksyn_queue kfreeq;
1522
1523 #if _PSYNCH_TRACE_
1524 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE | DBG_FUNC_START, (uint32_t)cond, cgen, cugen, csgen, 0);
1525 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE | DBG_FUNC_NONE, (uint32_t)cond, 0xcececece, pseq, flags, 0);
1526 #endif /* _PSYNCH_TRACE_ */
1527
1528 if ((flags & _PTHREAD_MTX_OPT_MUTEX) == 0) {
1529 error = ksyn_wqfind(cond, cgen, cugen, csgen, 0, flags, (KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INDROP), &ckwq);
1530 if (error != 0) {
1531 *retval = 0;
1532 #if _PSYNCH_TRACE_
1533 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE | DBG_FUNC_END, (uint32_t)cond, 0, 0xdeadbeef, error, 0);
1534 #endif /* _PSYNCH_TRACE_ */
1535 return(error);
1536 }
1537
1538 ksyn_wqlock(ckwq);
1539 (void)ksyn_queue_move_tofree(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], (pseq & PTHRW_COUNT_MASK), &kfreeq, 0, 1);
1540 ksyn_wqunlock(ckwq);
1541 ksyn_wqrelease(ckwq, NULL, 1, (KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INDROP));
1542 } else {
1543 /* mutex type */
1544 error = ksyn_wqfind(cond, cgen, cugen, 0, 0, flags, (KSYN_WQTYPE_MTX | KSYN_WQTYPE_INDROP), &ckwq);
1545 if (error != 0) {
1546 *retval = 0;
1547 #if _PSYNCH_TRACE_
1548 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE | DBG_FUNC_END, (uint32_t)cond, 0, 0xdeadbeef, error, 0);
1549 #endif /* _PSYNCH_TRACE_ */
1550 return(error);
1551 }
1552
1553 ksyn_wqlock(ckwq);
1554 if (((flags & _PTHREAD_MUTEX_POLICY_FIRSTFIT) != 0) && (ckwq->kw_pre_rwwc != 0)) {
1555 if (is_seqlower_eq(ckwq->kw_pre_lockseq, cgen) != 0) {
1556 /* clear prepost */
1557 ckwq->kw_pre_rwwc = 0;
1558 ckwq->kw_pre_lockseq = 0;
1559 }
1560 }
1561 ksyn_wqunlock(ckwq);
1562 ksyn_wqrelease(ckwq, NULL, 1, (KSYN_WQTYPE_MTX | KSYN_WQTYPE_INDROP));
1563 }
1564
1565 #if _PSYNCH_TRACE_
1566 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE | DBG_FUNC_END, (uint32_t)cond, 0xeeeeeeed, 0, 0, 0);
1567 #endif /* _PSYNCH_TRACE_ */
1568 return(0);
1569 }
1570
1571 /* ***************** pthread_rwlock ************************ */
1572 /*
1573 * psynch_rw_rdlock: This system call is used for psync rwlock readers to block.
1574 */
1575 int
1576 psynch_rw_rdlock(__unused proc_t p, struct psynch_rw_rdlock_args * uap, uint32_t * retval)
1577 {
1578 user_addr_t rwlock = uap->rwlock;
1579 uint32_t lgen = uap->lgenval;
1580 uint32_t ugen = uap->ugenval;
1581 uint32_t rw_wc = uap->rw_wc;
1582 //uint64_t tid = uap->tid;
1583 int flags = uap->flags;
1584 int error = 0, block;
1585 uint32_t lockseq = 0, updatebits = 0, preseq = 0, prerw_wc = 0;
1586 ksyn_wait_queue_t kwq;
1587 uthread_t uth;
1588 int isinit = lgen & PTHRW_RWL_INIT;
1589 uint32_t returnbits = 0;
1590 ksyn_waitq_element_t kwe;
1591 kern_return_t kret;
1592
1593 #if _PSYNCH_TRACE_
1594 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1595 #endif /* _PSYNCH_TRACE_ */
1596 uth = current_uthread();
1597
1598 /* preserve the seq number */
1599 kwe = &uth->uu_kwe;
1600 kwe->kwe_lockseq = lgen;
1601 kwe->kwe_uth = uth;
1602 kwe->kwe_psynchretval = 0;
1603 kwe->kwe_kwqqueue = NULL;
1604
1605 lockseq = lgen & PTHRW_COUNT_MASK;
1606
1607
1608 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1609 if (error != 0) {
1610 #if _PSYNCH_TRACE_
1611 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1612 #endif /* _PSYNCH_TRACE_ */
1613 return(error);
1614 }
1615
1616 ksyn_wqlock(kwq);
1617
1618 if (isinit != 0) {
1619 lgen &= ~PTHRW_RWL_INIT;
1620 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0) {
1621 /* first to notice the reset of the lock, clear preposts */
1622 CLEAR_REINIT_BITS(kwq);
1623 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
1624 #if _PSYNCH_TRACE_
1625 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
1626 #endif /* _PSYNCH_TRACE_ */
1627 }
1628 }
1629
1630 /* handle first the missed wakeups */
1631 if ((kwq->kw_pre_intrcount != 0) &&
1632 ((kwq->kw_pre_intrtype == PTH_RW_TYPE_READ) || (kwq->kw_pre_intrtype == PTH_RW_TYPE_LREAD)) &&
1633 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
1634
1635 kwq->kw_pre_intrcount--;
1636 kwe->kwe_psynchretval = kwq->kw_pre_intrretbits;
1637 if (kwq->kw_pre_intrcount==0)
1638 CLEAR_INTR_PREPOST_BITS(kwq);
1639 ksyn_wqunlock(kwq);
1640 goto out;
1641 }
1642
1643 /* handle overlap first as they are not counted against pre_rwwc */
1644
1645 /* check for overlap and if no pending W bit (indicates writers) */
1646 if ((kwq->kw_overlapwatch != 0) && ((rw_wc & PTHRW_RWS_SAVEMASK) == 0) && ((lgen & PTH_RWL_WBIT) == 0)) {
1647 #if _PSYNCH_TRACE_
1648 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 10, kwq->kw_nextseqword, kwq->kw_lastseqword, 0);
1649 #endif /* _PSYNCH_TRACE_ */
1650 error = kwq_handle_overlap(kwq, lgen, ugen, rw_wc, &updatebits, (KW_UNLOCK_PREPOST_READLOCK|KW_UNLOCK_PREPOST), &block);
1651 #if __TESTPANICS__
1652 if (error != 0)
1653 panic("rw_rdlock: kwq_handle_overlap failed %d\n",error);
1654 #endif /* __TESTPANICS__ */
1655 if (block == 0) {
1656 error = 0;
1657 kwe->kwe_psynchretval = updatebits;
1658 #if _PSYNCH_TRACE_
1659 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0xff, updatebits, 0xee, 0);
1660 #endif /* _PSYNCH_TRACE_ */
1661 ksyn_wqunlock(kwq);
1662 goto out;
1663 }
1664 }
1665
1666 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
1667 #if _PSYNCH_TRACE_
1668 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1669 #endif /* _PSYNCH_TRACE_ */
1670 kwq->kw_pre_rwwc--;
1671 if (kwq->kw_pre_rwwc == 0) {
1672 preseq = kwq->kw_pre_lockseq;
1673 prerw_wc = kwq->kw_pre_sseq;
1674 CLEAR_PREPOST_BITS(kwq);
1675 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0){
1676 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
1677 #if _PSYNCH_TRACE_
1678 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
1679 #endif /* _PSYNCH_TRACE_ */
1680 }
1681 error = kwq_handle_unlock(kwq, preseq, prerw_wc, &updatebits, (KW_UNLOCK_PREPOST_READLOCK|KW_UNLOCK_PREPOST), &block, lgen);
1682 #if __TESTPANICS__
1683 if (error != 0)
1684 panic("rw_rdlock: kwq_handle_unlock failed %d\n",error);
1685 #endif /* __TESTPANICS__ */
1686 if (block == 0) {
1687 ksyn_wqunlock(kwq);
1688 goto out;
1689 }
1690 /* insert to q and proceed as ususal */
1691 }
1692 }
1693
1694
1695 #if _PSYNCH_TRACE_
1696 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1697 #endif /* _PSYNCH_TRACE_ */
1698 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_READ], lgen, uth, kwe, SEQFIT);
1699 #if __TESTPANICS__
1700 if (error != 0)
1701 panic("psynch_rw_rdlock: failed to enqueue\n");
1702 #endif /* __TESTPANICS__ */
1703 kret = ksyn_block_thread_locked(kwq, (uint64_t)0, kwe, 0, THREAD_CONTINUE_NULL, NULL);
1704 /* drops the kwq lock */
1705 switch (kret) {
1706 case THREAD_TIMED_OUT:
1707 error = ETIMEDOUT;
1708 break;
1709 case THREAD_INTERRUPTED:
1710 error = EINTR;
1711 break;
1712 default:
1713 error = 0;
1714 break;
1715 }
1716
1717 out:
1718 if (error != 0) {
1719 #if _PSYNCH_TRACE_
1720 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 4, error, 0, 0);
1721 #endif /* _PSYNCH_TRACE_ */
1722 ksyn_wqlock(kwq);
1723 if (kwe->kwe_kwqqueue != NULL)
1724 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_READ], kwe);
1725 ksyn_wqunlock(kwq);
1726 } else {
1727 /* update bits */
1728 *retval = kwe->kwe_psynchretval;
1729 returnbits = kwe->kwe_psynchretval;
1730 }
1731 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK));
1732 #if _PSYNCH_TRACE_
1733 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, returnbits, error, 0);
1734 #endif /* _PSYNCH_TRACE_ */
1735 return(error);
1736 }
1737
1738 /*
1739 * psynch_rw_longrdlock: This system call is used for psync rwlock long readers to block.
1740 */
1741 int
1742 psynch_rw_longrdlock(__unused proc_t p, __unused struct psynch_rw_longrdlock_args * uap, __unused uint32_t * retval)
1743 {
1744 user_addr_t rwlock = uap->rwlock;
1745 uint32_t lgen = uap->lgenval;
1746 uint32_t ugen = uap->ugenval;
1747 uint32_t rw_wc = uap->rw_wc;
1748 //uint64_t tid = uap->tid;
1749 int flags = uap->flags;
1750 int isinit = lgen & PTHRW_RWL_INIT;
1751 uint32_t returnbits=0;
1752 ksyn_waitq_element_t kwe;
1753 kern_return_t kret;
1754
1755 ksyn_wait_queue_t kwq;
1756 int error=0, block = 0 ;
1757 uthread_t uth;
1758 uint32_t lockseq = 0, updatebits = 0, preseq = 0, prerw_wc = 0;
1759
1760 #if _PSYNCH_TRACE_
1761 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1762 #endif /* _PSYNCH_TRACE_ */
1763 uth = current_uthread();
1764 kwe = &uth->uu_kwe;
1765 kwe->kwe_lockseq = lgen;
1766 kwe->kwe_uth = uth;
1767 kwe->kwe_psynchretval = 0;
1768 kwe->kwe_kwqqueue = NULL;
1769 lockseq = (lgen & PTHRW_COUNT_MASK);
1770
1771 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1772 if (error != 0) {
1773 #if _PSYNCH_TRACE_
1774 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1775 #endif /* _PSYNCH_TRACE_ */
1776 return(error);
1777 }
1778
1779 ksyn_wqlock(kwq);
1780
1781 if (isinit != 0) {
1782 lgen &= ~PTHRW_RWL_INIT;
1783 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0) {
1784 /* first to notice the reset of the lock, clear preposts */
1785 CLEAR_REINIT_BITS(kwq);
1786 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
1787 #if _PSYNCH_TRACE_
1788 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
1789 #endif /* _PSYNCH_TRACE_ */
1790 }
1791 }
1792
1793 /* handle first the missed wakeups */
1794 if ((kwq->kw_pre_intrcount != 0) &&
1795 (kwq->kw_pre_intrtype == PTH_RW_TYPE_LREAD) &&
1796 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
1797
1798 kwq->kw_pre_intrcount--;
1799 kwe->kwe_psynchretval = kwq->kw_pre_intrretbits;
1800 if (kwq->kw_pre_intrcount==0)
1801 CLEAR_INTR_PREPOST_BITS(kwq);
1802 ksyn_wqunlock(kwq);
1803 goto out;
1804 }
1805
1806
1807 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
1808 #if _PSYNCH_TRACE_
1809 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1810 #endif /* _PSYNCH_TRACE_ */
1811 kwq->kw_pre_rwwc--;
1812 if (kwq->kw_pre_rwwc == 0) {
1813 preseq = kwq->kw_pre_lockseq;
1814 prerw_wc = kwq->kw_pre_sseq;
1815 CLEAR_PREPOST_BITS(kwq);
1816 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0){
1817 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
1818 #if _PSYNCH_TRACE_
1819 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
1820 #endif /* _PSYNCH_TRACE_ */
1821 }
1822 error = kwq_handle_unlock(kwq, preseq, prerw_wc, &updatebits, (KW_UNLOCK_PREPOST_LREADLOCK|KW_UNLOCK_PREPOST), &block, lgen);
1823 #if __TESTPANICS__
1824 if (error != 0)
1825 panic("kwq_handle_unlock failed %d\n",error);
1826 #endif /* __TESTPANICS__ */
1827 if (block == 0) {
1828 ksyn_wqunlock(kwq);
1829 goto out;
1830 }
1831 /* insert to q and proceed as ususal */
1832 }
1833 }
1834
1835 #if _PSYNCH_TRACE_
1836 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1837 #endif /* _PSYNCH_TRACE_ */
1838 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_LREAD], lgen, uth, kwe, SEQFIT);
1839 #if __TESTPANICS__
1840 if (error != 0)
1841 panic("psynch_rw_longrdlock: failed to enqueue\n");
1842 #endif /* __TESTPANICS__ */
1843
1844 kret = ksyn_block_thread_locked(kwq, (uint64_t)0, kwe, 0, THREAD_CONTINUE_NULL, NULL);
1845 /* drops the kwq lock */
1846 switch (kret) {
1847 case THREAD_TIMED_OUT:
1848 error = ETIMEDOUT;
1849 break;
1850 case THREAD_INTERRUPTED:
1851 error = EINTR;
1852 break;
1853 default:
1854 error = 0;
1855 break;
1856 }
1857 out:
1858 if (error != 0) {
1859 #if _PSYNCH_TRACE_
1860 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1861 #endif /* _PSYNCH_TRACE_ */
1862 ksyn_wqlock(kwq);
1863 if (kwe->kwe_kwqqueue != NULL)
1864 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_LREAD], kwe);
1865 ksyn_wqunlock(kwq);
1866 } else {
1867 /* update bits */
1868 *retval = kwe->kwe_psynchretval;
1869 returnbits = kwe->kwe_psynchretval;
1870 }
1871
1872 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK));
1873
1874 #if _PSYNCH_TRACE_
1875 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0, returnbits, error, 0);
1876 #endif /* _PSYNCH_TRACE_ */
1877 return(error);
1878 }
1879
1880 /*
1881 * psynch_rw_wrlock: This system call is used for psync rwlock writers to block.
1882 */
1883 int
1884 psynch_rw_wrlock(__unused proc_t p, struct psynch_rw_wrlock_args * uap, uint32_t * retval)
1885 {
1886 user_addr_t rwlock = uap->rwlock;
1887 uint32_t lgen = uap->lgenval;
1888 uint32_t ugen = uap->ugenval;
1889 uint32_t rw_wc = uap->rw_wc;
1890 //uint64_t tid = uap->tid;
1891 int flags = uap->flags;
1892 int block;
1893 ksyn_wait_queue_t kwq;
1894 int error=0;
1895 uthread_t uth;
1896 uint32_t lockseq = 0, updatebits = 0, preseq = 0, prerw_wc = 0;
1897 int isinit = lgen & PTHRW_RWL_INIT;
1898 uint32_t returnbits = 0;
1899 ksyn_waitq_element_t kwe;
1900 kern_return_t kret;
1901
1902 #if _PSYNCH_TRACE_
1903 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1904 #endif /* _PSYNCH_TRACE_ */
1905 uth = current_uthread();
1906 kwe = &uth->uu_kwe;
1907 kwe->kwe_lockseq = lgen;
1908 kwe->kwe_uth = uth;
1909 kwe->kwe_psynchretval = 0;
1910 kwe->kwe_kwqqueue = NULL;
1911 lockseq = (lgen & PTHRW_COUNT_MASK);
1912
1913 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1914 if (error != 0) {
1915 #if _PSYNCH_TRACE_
1916 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1917 #endif /* _PSYNCH_TRACE_ */
1918 return(error);
1919 }
1920
1921 ksyn_wqlock(kwq);
1922
1923
1924 if (isinit != 0) {
1925 lgen &= ~PTHRW_RWL_INIT;
1926 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0) {
1927 /* first to notice the reset of the lock, clear preposts */
1928 CLEAR_REINIT_BITS(kwq);
1929 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
1930 #if _PSYNCH_TRACE_
1931 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
1932 #endif /* _PSYNCH_TRACE_ */
1933 }
1934 }
1935
1936
1937 /* handle first the missed wakeups */
1938 if ((kwq->kw_pre_intrcount != 0) &&
1939 (kwq->kw_pre_intrtype == PTH_RW_TYPE_WRITE) &&
1940 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
1941
1942 kwq->kw_pre_intrcount--;
1943 kwe->kwe_psynchretval = kwq->kw_pre_intrretbits;
1944 if (kwq->kw_pre_intrcount==0)
1945 CLEAR_INTR_PREPOST_BITS(kwq);
1946 ksyn_wqunlock(kwq);
1947 goto out;
1948 }
1949
1950
1951 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
1952 #if _PSYNCH_TRACE_
1953 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1954 #endif /* _PSYNCH_TRACE_ */
1955 kwq->kw_pre_rwwc--;
1956 if (kwq->kw_pre_rwwc == 0) {
1957 preseq = kwq->kw_pre_lockseq;
1958 prerw_wc = kwq->kw_pre_sseq;
1959 CLEAR_PREPOST_BITS(kwq);
1960 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0){
1961 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
1962 #if _PSYNCH_TRACE_
1963 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
1964 #endif /* _PSYNCH_TRACE_ */
1965 }
1966 error = kwq_handle_unlock(kwq, preseq, prerw_wc, &updatebits, (KW_UNLOCK_PREPOST_WRLOCK|KW_UNLOCK_PREPOST), &block, lgen);
1967 #if __TESTPANICS__
1968 if (error != 0)
1969 panic("rw_wrlock: kwq_handle_unlock failed %d\n",error);
1970 #endif /* __TESTPANICS__ */
1971 if (block == 0) {
1972 ksyn_wqunlock(kwq);
1973 *retval = updatebits;
1974 goto out1;
1975 }
1976 /* insert to q and proceed as ususal */
1977 }
1978 }
1979
1980 /* No overlap watch needed go ahead and block */
1981
1982 #if _PSYNCH_TRACE_
1983 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1984 #endif /* _PSYNCH_TRACE_ */
1985 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], lgen, uth, kwe, SEQFIT);
1986 #if __TESTPANICS__
1987 if (error != 0)
1988 panic("psynch_rw_wrlock: failed to enqueue\n");
1989 #endif /* __TESTPANICS__ */
1990
1991 kret = ksyn_block_thread_locked(kwq, (uint64_t)0, kwe, 0, THREAD_CONTINUE_NULL, NULL);
1992 /* drops the wq lock */
1993 switch (kret) {
1994 case THREAD_TIMED_OUT:
1995 error = ETIMEDOUT;
1996 break;
1997 case THREAD_INTERRUPTED:
1998 error = EINTR;
1999 break;
2000 default:
2001 error = 0;
2002 break;
2003 }
2004
2005 out:
2006 if (error != 0) {
2007 #if _PSYNCH_TRACE_
2008 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 4, error, 0, 0);
2009 #endif /* _PSYNCH_TRACE_ */
2010 ksyn_wqlock(kwq);
2011 if (kwe->kwe_kwqqueue != NULL)
2012 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwe);
2013 ksyn_wqunlock(kwq);
2014 } else {
2015 /* update bits */
2016 *retval = kwe->kwe_psynchretval;
2017 returnbits = kwe->kwe_psynchretval;
2018 }
2019 out1:
2020 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK));
2021
2022 #if _PSYNCH_TRACE_
2023 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, returnbits, error, 0);
2024 #endif /* _PSYNCH_TRACE_ */
2025 return(error);
2026 }
2027
2028 /*
2029 * psynch_rw_yieldwrlock: This system call is used for psync rwlock yielding writers to block.
2030 */
2031 int
2032 psynch_rw_yieldwrlock(__unused proc_t p, __unused struct psynch_rw_yieldwrlock_args * uap, __unused uint32_t * retval)
2033 {
2034 user_addr_t rwlock = uap->rwlock;
2035 uint32_t lgen = uap->lgenval;
2036 uint32_t ugen = uap->ugenval;
2037 uint32_t rw_wc = uap->rw_wc;
2038 //uint64_t tid = uap->tid;
2039 int flags = uap->flags;
2040 int block;
2041 ksyn_wait_queue_t kwq;
2042 int error=0;
2043 int isinit = lgen & PTHRW_RWL_INIT;
2044 uthread_t uth;
2045 uint32_t returnbits=0;
2046 ksyn_waitq_element_t kwe;
2047 kern_return_t kret;
2048
2049 #if _PSYNCH_TRACE_
2050 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
2051 #endif /* _PSYNCH_TRACE_ */
2052 uint32_t lockseq = 0, updatebits = 0, preseq = 0, prerw_wc = 0;
2053
2054 uth = current_uthread();
2055 kwe = &uth->uu_kwe;
2056 kwe->kwe_lockseq = lgen;
2057 kwe->kwe_uth = uth;
2058 kwe->kwe_psynchretval = 0;
2059 kwe->kwe_kwqqueue = NULL;
2060 lockseq = (lgen & PTHRW_COUNT_MASK);
2061
2062 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
2063 if (error != 0) {
2064 #if _PSYNCH_TRACE_
2065 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
2066 #endif /* _PSYNCH_TRACE_ */
2067 return(error);
2068 }
2069
2070 ksyn_wqlock(kwq);
2071
2072 if (isinit != 0) {
2073 lgen &= ~PTHRW_RWL_INIT;
2074 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0) {
2075 /* first to notice the reset of the lock, clear preposts */
2076 CLEAR_REINIT_BITS(kwq);
2077 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
2078 #if _PSYNCH_TRACE_
2079 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
2080 #endif /* _PSYNCH_TRACE_ */
2081 }
2082 }
2083
2084 /* handle first the missed wakeups */
2085 if ((kwq->kw_pre_intrcount != 0) &&
2086 (kwq->kw_pre_intrtype == PTH_RW_TYPE_YWRITE) &&
2087 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
2088
2089 kwq->kw_pre_intrcount--;
2090 kwe->kwe_psynchretval = kwq->kw_pre_intrretbits;
2091 if (kwq->kw_pre_intrcount==0)
2092 CLEAR_INTR_PREPOST_BITS(kwq);
2093 ksyn_wqunlock(kwq);
2094 goto out;
2095 }
2096
2097 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
2098 #if _PSYNCH_TRACE_
2099 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
2100 #endif /* _PSYNCH_TRACE_ */
2101 kwq->kw_pre_rwwc--;
2102 if (kwq->kw_pre_rwwc == 0) {
2103 preseq = kwq->kw_pre_lockseq;
2104 prerw_wc = kwq->kw_pre_sseq;
2105 CLEAR_PREPOST_BITS(kwq);
2106 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0){
2107 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
2108 #if _PSYNCH_TRACE_
2109 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
2110 #endif /* _PSYNCH_TRACE_ */
2111 }
2112 error = kwq_handle_unlock(kwq, preseq, prerw_wc, &updatebits, (KW_UNLOCK_PREPOST_YWRLOCK|KW_UNLOCK_PREPOST), &block, lgen);
2113 #if __TESTPANICS__
2114 if (error != 0)
2115 panic("kwq_handle_unlock failed %d\n",error);
2116 #endif /* __TESTPANICS__ */
2117 if (block == 0) {
2118 ksyn_wqunlock(kwq);
2119 *retval = updatebits;
2120 goto out;
2121 }
2122 /* insert to q and proceed as ususal */
2123 }
2124 }
2125
2126 #if _PSYNCH_TRACE_
2127 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
2128 #endif /* _PSYNCH_TRACE_ */
2129 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER], lgen, uth, kwe, SEQFIT);
2130 #if __TESTPANICS__
2131 if (error != 0)
2132 panic("psynch_rw_yieldwrlock: failed to enqueue\n");
2133 #endif /* __TESTPANICS__ */
2134
2135 kret = ksyn_block_thread_locked(kwq, (uint64_t)0, kwe, 0, THREAD_CONTINUE_NULL, NULL);
2136 switch (kret) {
2137 case THREAD_TIMED_OUT:
2138 error = ETIMEDOUT;
2139 break;
2140 case THREAD_INTERRUPTED:
2141 error = EINTR;
2142 break;
2143 default:
2144 error = 0;
2145 break;
2146 }
2147
2148 out:
2149 if (error != 0) {
2150 #if _PSYNCH_TRACE_
2151 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 4, error, 0, 0);
2152 #endif /* _PSYNCH_TRACE_ */
2153 ksyn_wqlock(kwq);
2154 if (kwe->kwe_kwqqueue != NULL)
2155 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER], kwe);
2156 ksyn_wqunlock(kwq);
2157 } else {
2158 /* update bits */
2159 *retval = kwe->kwe_psynchretval;
2160 returnbits = kwe->kwe_psynchretval;
2161 }
2162
2163 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_RWLOCK));
2164
2165 #if _PSYNCH_TRACE_
2166 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, returnbits, error, 0);
2167 #endif /* _PSYNCH_TRACE_ */
2168 return(error);
2169 }
2170
2171 #if NOTYET
2172 /*
2173 * psynch_rw_downgrade: This system call is used for wakeup blocked readers who are eligible to run due to downgrade.
2174 */
2175 int
2176 psynch_rw_downgrade(__unused proc_t p, struct psynch_rw_downgrade_args * uap, __unused int * retval)
2177 {
2178 user_addr_t rwlock = uap->rwlock;
2179 uint32_t lgen = uap->lgenval;
2180 uint32_t ugen = uap->ugenval;
2181 uint32_t rw_wc = uap->rw_wc;
2182 //uint64_t tid = uap->tid;
2183 int flags = uap->flags;
2184 uint32_t count = 0;
2185 int isinit = lgen & PTHRW_RWL_INIT;
2186 ksyn_wait_queue_t kwq;
2187 int error=0;
2188 uthread_t uth;
2189 uint32_t curgen = 0;
2190
2191 #if _PSYNCH_TRACE_
2192 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
2193 #endif /* _PSYNCH_TRACE_ */
2194 uth = current_uthread();
2195
2196 curgen = (lgen & PTHRW_COUNT_MASK);
2197
2198 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK), &kwq);
2199 if (error != 0) {
2200 #if _PSYNCH_TRACE_
2201 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
2202 #endif /* _PSYNCH_TRACE_ */
2203 return(error);
2204 }
2205
2206 ksyn_wqlock(kwq);
2207
2208 if ((lgen & PTHRW_RWL_INIT) != 0) {
2209 lgen &= ~PTHRW_RWL_INIT;
2210 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0){
2211 CLEAR_REINIT_BITS(kwq);
2212 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
2213 #if _PSYNCH_TRACE_
2214 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
2215 #endif /* _PSYNCH_TRACE_ */
2216 }
2217 isinit = 1;
2218 }
2219
2220 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
2221 if ((kwq->kw_lastunlockseq != PTHRW_RWL_INIT) && (is_seqlower(ugen, kwq->kw_lastunlockseq)!= 0)) {
2222 /* spurious updatebits?? */
2223 error = 0;
2224 goto out;
2225 }
2226
2227
2228
2229 /* If L-U != num of waiters, then it needs to be preposted or spr */
2230 diff = find_diff(lgen, ugen);
2231 /* take count of the downgrade thread itself */
2232 diff--;
2233
2234
2235 #if _PSYNCH_TRACE_
2236 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 1, kwq->kw_inqueue, curgen, 0);
2237 #endif /* _PSYNCH_TRACE_ */
2238 if (find_seq_till(kwq, curgen, diff, &count) == 0) {
2239 if (count < (uint32_t)diff)
2240 goto prepost;
2241 }
2242
2243 /* no prepost and all threads are in place, reset the bit */
2244 if ((isinit != 0) && ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0)){
2245 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
2246 #if _PSYNCH_TRACE_
2247 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
2248 #endif /* _PSYNCH_TRACE_ */
2249 }
2250
2251 /* can handle unlock now */
2252
2253 CLEAR_PREPOST_BITS(kwq);
2254
2255 dounlock:
2256 #if _PSYNCH_TRACE_
2257 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
2258 #endif /* _PSYNCH_TRACE_ */
2259 error = kwq_handle_downgrade(kwq, lgen, 0, 0, NULL);
2260
2261 #if __TESTPANICS__
2262 if (error != 0)
2263 panic("psynch_rw_downgrade: failed to wakeup\n");
2264 #endif /* __TESTPANICS__ */
2265
2266 out:
2267 ksyn_wqunlock(kwq);
2268 #if _PSYNCH_TRACE_
2269 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_END, (uint32_t)rwlock, 0, 0, error, 0);
2270 #endif /* _PSYNCH_TRACE_ */
2271 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK));
2272
2273 return(error);
2274
2275 prepost:
2276 kwq->kw_pre_rwwc = (rw_wc - count);
2277 kwq->kw_pre_lockseq = lgen;
2278 #if _PSYNCH_TRACE_
2279 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 1, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
2280 #endif /* _PSYNCH_TRACE_ */
2281 error = 0;
2282 goto out;
2283 }
2284
2285
2286 /*
2287 * psynch_rw_upgrade: This system call is used by an reader to block waiting for upgrade to be granted.
2288 */
2289 int
2290 psynch_rw_upgrade(__unused proc_t p, struct psynch_rw_upgrade_args * uap, uint32_t * retval)
2291 {
2292 user_addr_t rwlock = uap->rwlock;
2293 uint32_t lgen = uap->lgenval;
2294 uint32_t ugen = uap->ugenval;
2295 uint32_t rw_wc = uap->rw_wc;
2296 //uint64_t tid = uap->tid;
2297 int flags = uap->flags;
2298 int block;
2299 ksyn_wait_queue_t kwq;
2300 int error=0;
2301 uthread_t uth;
2302 uint32_t lockseq = 0, updatebits = 0, preseq = 0;
2303 int isinit = lgen & PTHRW_RWL_INIT;
2304 ksyn_waitq_element_t kwe;
2305 kern_return_t kret;
2306
2307 #if _PSYNCH_TRACE_
2308 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
2309 #endif /* _PSYNCH_TRACE_ */
2310 uth = current_uthread();
2311 kwe = &uth->uu_kwe;
2312 kwe->kwe_lockseq = lgen;
2313 kwe->kwe_uth = uth;
2314 kwe->kwe_psynchretval = 0;
2315 kwe->kwe_kwqqueue = NULL;
2316 lockseq = (lgen & PTHRW_COUNT_MASK);
2317
2318 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_RWLOCK), &kwq);
2319 if (error != 0) {
2320 #if _PSYNCH_TRACE_
2321 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
2322 #endif /* _PSYNCH_TRACE_ */
2323 return(error);
2324 }
2325
2326 ksyn_wqlock(kwq);
2327
2328 if (isinit != 0) {
2329 lgen &= ~PTHRW_RWL_INIT;
2330 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0) {
2331 /* first to notice the reset of the lock, clear preposts */
2332 CLEAR_REINIT_BITS(kwq);
2333 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
2334 #if _PSYNCH_TRACE_
2335 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
2336 #endif /* _PSYNCH_TRACE_ */
2337 }
2338 }
2339
2340 /* handle first the missed wakeups */
2341 if ((kwq->kw_pre_intrcount != 0) &&
2342 ((kwq->kw_pre_intrtype == PTH_RW_TYPE_READ) || (kwq->kw_pre_intrtype == PTH_RW_TYPE_LREAD)) &&
2343 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
2344
2345 kwq->kw_pre_intrcount--;
2346 kwe->kwe_psynchretval = kwq->kw_pre_intrretbits;
2347 if (kwq->kw_pre_intrcount==0)
2348 CLEAR_INTR_PREPOST_BITS(kwq);
2349 ksyn_wqunlock(kwq);
2350 goto out;
2351 }
2352
2353 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
2354 #if _PSYNCH_TRACE_
2355 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
2356 #endif /* _PSYNCH_TRACE_ */
2357 kwq->kw_pre_rwwc--;
2358 if (kwq->kw_pre_rwwc == 0) {
2359 preseq = kwq->kw_pre_lockseq;
2360 prerw_wc = kwq->kw_pre_sseq;
2361 CLEAR_PREPOST_BITS(kwq);
2362 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0){
2363 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
2364 #if _PSYNCH_TRACE_
2365 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
2366 #endif /* _PSYNCH_TRACE_ */
2367 }
2368 error = kwq_handle_unlock(kwq, preseq, prerw_wc, &updatebits, (KW_UNLOCK_PREPOST_UPGRADE|KW_UNLOCK_PREPOST), &block, lgen);
2369 #if __TESTPANICS__
2370 if (error != 0)
2371 panic("rw_rdlock: kwq_handle_unlock failed %d\n",error);
2372 #endif /* __TESTPANICS__ */
2373 if (block == 0) {
2374 ksyn_wqunlock(kwq);
2375 goto out;
2376 }
2377 /* insert to q and proceed as ususal */
2378 }
2379 }
2380
2381
2382 #if _PSYNCH_TRACE_
2383 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
2384 #endif /* _PSYNCH_TRACE_ */
2385 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_UPGRADE], lgen, uth, kwe, SEQFIT);
2386 #if __TESTPANICS__
2387 if (error != 0)
2388 panic("psynch_rw_upgrade: failed to enqueue\n");
2389 #endif /* __TESTPANICS__ */
2390
2391
2392 kret = ksyn_block_thread_locked(kwq, (uint64_t)0, kwe, 0, THREAD_CONTINUE_NULL, NULL);
2393 /* drops the lock */
2394 switch (kret) {
2395 case THREAD_TIMED_OUT:
2396 error = ETIMEDOUT;
2397 break;
2398 case THREAD_INTERRUPTED:
2399 error = EINTR;
2400 break;
2401 default:
2402 error = 0;
2403 break;
2404 }
2405
2406 out:
2407 if (error != 0) {
2408 #if _PSYNCH_TRACE_
2409 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 4, error, 0, 0);
2410 #endif /* _PSYNCH_TRACE_ */
2411 ksyn_wqlock(kwq);
2412 if (kwe->kwe_kwqqueue != NULL)
2413 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_UPGRADE], kwe);
2414 ksyn_wqunlock(kwq);
2415 } else {
2416 /* update bits */
2417 *retval = kwe->kwe_psynchretval;
2418 }
2419
2420 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_RWLOCK));
2421 #if _PSYNCH_TRACE_
2422 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
2423 #endif /* _PSYNCH_TRACE_ */
2424
2425 return(error);
2426 }
2427
2428 #else /* NOTYET */
2429 int
2430 psynch_rw_upgrade(__unused proc_t p, __unused struct psynch_rw_upgrade_args * uap, __unused uint32_t * retval)
2431 {
2432 return(0);
2433 }
2434 int
2435 psynch_rw_downgrade(__unused proc_t p, __unused struct psynch_rw_downgrade_args * uap, __unused int * retval)
2436 {
2437 return(0);
2438 }
2439 #endif /* NOTYET */
2440 /*
2441 * psynch_rw_unlock: This system call is used for unlock state postings. This will grant appropriate
2442 * reader/writer variety lock.
2443 */
2444
2445 int
2446 psynch_rw_unlock(__unused proc_t p, struct psynch_rw_unlock_args * uap, uint32_t * retval)
2447 {
2448 user_addr_t rwlock = uap->rwlock;
2449 uint32_t lgen = uap->lgenval;
2450 uint32_t ugen = uap->ugenval;
2451 uint32_t rw_wc = uap->rw_wc;
2452 uint32_t curgen;
2453 //uint64_t tid = uap->tid;
2454 int flags = uap->flags;
2455 uthread_t uth;
2456 ksyn_wait_queue_t kwq;
2457 uint32_t updatebits = 0;
2458 int error=0, diff;
2459 uint32_t count = 0;
2460 int isinit = 0;
2461
2462
2463 #if _PSYNCH_TRACE_
2464 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
2465 #endif /* _PSYNCH_TRACE_ */
2466 uth = current_uthread();
2467
2468 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK), &kwq);
2469 if (error != 0) {
2470 #if _PSYNCH_TRACE_
2471 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
2472 #endif /* _PSYNCH_TRACE_ */
2473 return(error);
2474 }
2475
2476 curgen = lgen & PTHRW_COUNT_MASK;
2477
2478 ksyn_wqlock(kwq);
2479
2480 if ((lgen & PTHRW_RWL_INIT) != 0) {
2481 lgen &= ~PTHRW_RWL_INIT;
2482 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0){
2483 CLEAR_REINIT_BITS(kwq);
2484 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
2485 #if _PSYNCH_TRACE_
2486 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 1, 0);
2487 #endif /* _PSYNCH_TRACE_ */
2488 }
2489 isinit = 1;
2490 }
2491
2492 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
2493 if ((kwq->kw_lastunlockseq != PTHRW_RWL_INIT) && (is_seqlower(ugen, kwq->kw_lastunlockseq)!= 0)) {
2494 #if _PSYNCH_TRACE_
2495 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, (uint32_t)0xeeeeeeee, rw_wc, kwq->kw_lastunlockseq, 0);
2496 #endif /* _PSYNCH_TRACE_ */
2497 error = 0;
2498 goto out;
2499 }
2500
2501 /* If L-U != num of waiters, then it needs to be preposted or spr */
2502 diff = find_diff(lgen, ugen);
2503
2504 #if _PSYNCH_TRACE_
2505 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 1, kwq->kw_inqueue, curgen, 0);
2506 #endif /* _PSYNCH_TRACE_ */
2507 if (find_seq_till(kwq, curgen, diff, &count) == 0) {
2508 if ((count == 0) || (count < (uint32_t)diff))
2509 goto prepost;
2510 }
2511
2512 /* no prepost and all threads are in place, reset the bit */
2513 if ((isinit != 0) && ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0)){
2514 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
2515 #if _PSYNCH_TRACE_
2516 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, lgen, ugen, rw_wc, 0, 0);
2517 #endif /* _PSYNCH_TRACE_ */
2518 }
2519
2520 /* can handle unlock now */
2521
2522 CLEAR_PREPOST_BITS(kwq);
2523
2524 #if _PSYNCH_TRACE_
2525 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, 0, 0, 0);
2526 #endif /* _PSYNCH_TRACE_ */
2527 error = kwq_handle_unlock(kwq, lgen, rw_wc, &updatebits, 0, NULL, 0);
2528 #if __TESTPANICS__
2529 if (error != 0)
2530 panic("psynch_rw_unlock: kwq_handle_unlock failed %d\n",error);
2531 #endif /* __TESTPANICS__ */
2532 out:
2533 if (error == 0) {
2534 /* update bits?? */
2535 *retval = updatebits;
2536 }
2537
2538
2539 ksyn_wqunlock(kwq);
2540
2541 ksyn_wqrelease(kwq, NULL, 0, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK));
2542 #if _PSYNCH_TRACE_
2543 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0, updatebits, error, 0);
2544 #endif /* _PSYNCH_TRACE_ */
2545
2546 return(error);
2547
2548 prepost:
2549 /* update if the new seq is higher than prev prepost, or first set */
2550 if ((is_rws_setseq(kwq->kw_pre_sseq) != 0) ||
2551 (is_seqhigher_eq((rw_wc & PTHRW_COUNT_MASK), (kwq->kw_pre_sseq & PTHRW_COUNT_MASK)) != 0)) {
2552 kwq->kw_pre_rwwc = (diff - count);
2553 kwq->kw_pre_lockseq = curgen;
2554 kwq->kw_pre_sseq = rw_wc;
2555 #if _PSYNCH_TRACE_
2556 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, rw_wc, count, 0);
2557 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 4, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
2558 #endif /* _PSYNCH_TRACE_ */
2559 updatebits = lgen; /* let this not do unlock handling */
2560 }
2561 error = 0;
2562 goto out;
2563 }
2564
2565
2566 /*
2567 * psynch_rw_unlock2: This system call is used to wakeup pending readers when unlock grant frm kernel
2568 * to new reader arrival races
2569 */
2570 int
2571 psynch_rw_unlock2(__unused proc_t p, __unused struct psynch_rw_unlock2_args * uap, __unused uint32_t * retval)
2572 {
2573 return(ENOTSUP);
2574 }
2575
2576
2577 /* ************************************************************************** */
2578 void
2579 pth_global_hashinit()
2580 {
2581 int arg;
2582
2583 pth_glob_hashtbl = hashinit(PTH_HASHSIZE * 4, M_PROC, &pthhash);
2584
2585 /*
2586 * pthtest={0,1,2,3} (override default aborting behavior on pthread sync failures)
2587 * 0 - just return errors
2588 * 1 - print and return errors
2589 * 2 - abort user, print and return errors
2590 * 3 - panic
2591 */
2592 if (!PE_parse_boot_argn("pthtest", &arg, sizeof(arg)))
2593 arg = __TESTMODE__;
2594
2595 if (arg == 3) {
2596 __test_panics__ = 1;
2597 printf("Pthread support PANICS when sync kernel primitives misused\n");
2598 } else if (arg == 2) {
2599 __test_aborts__ = 1;
2600 __test_prints__ = 1;
2601 printf("Pthread support ABORTS when sync kernel primitives misused\n");
2602 } else if (arg == 1) {
2603 __test_prints__ = 1;
2604 printf("Pthread support LOGS when sync kernel primitives misused\n");
2605 }
2606 }
2607
2608 void
2609 pth_proc_hashinit(proc_t p)
2610 {
2611 p->p_pthhash = hashinit(PTH_HASHSIZE, M_PROC, &pthhash);
2612 if (p->p_pthhash == NULL)
2613 panic("pth_proc_hashinit: hash init returned 0\n");
2614 }
2615
2616
2617 ksyn_wait_queue_t
2618 ksyn_wq_hash_lookup(user_addr_t mutex, proc_t p, int flags, uint64_t object, uint64_t objoffset)
2619 {
2620 ksyn_wait_queue_t kwq;
2621 struct pthhashhead * hashptr;
2622
2623 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED)
2624 {
2625 hashptr = pth_glob_hashtbl;
2626 kwq = (&hashptr[object & pthhash])->lh_first;
2627 if (kwq != 0) {
2628 for (; kwq != NULL; kwq = kwq->kw_hash.le_next) {
2629 if ((kwq->kw_object == object) &&(kwq->kw_offset == objoffset)) {
2630 return (kwq);
2631 }
2632 }
2633 }
2634 } else {
2635 hashptr = p->p_pthhash;
2636 kwq = (&hashptr[mutex & pthhash])->lh_first;
2637 if (kwq != 0)
2638 for (; kwq != NULL; kwq = kwq->kw_hash.le_next) {
2639 if (kwq->kw_addr == mutex) {
2640 return (kwq);
2641 }
2642 }
2643 }
2644 return(NULL);
2645 }
2646
2647 void
2648 pth_proc_hashdelete(proc_t p)
2649 {
2650 struct pthhashhead * hashptr;
2651 ksyn_wait_queue_t kwq;
2652 int hashsize = pthhash + 1;
2653 int i;
2654
2655 #if _PSYNCH_TRACE_
2656 if ((pthread_debug_proc != NULL) && (p == pthread_debug_proc))
2657 pthread_debug_proc = PROC_NULL;
2658 #endif /* _PSYNCH_TRACE_ */
2659 hashptr = p->p_pthhash;
2660 if (hashptr == NULL)
2661 return;
2662
2663 for(i= 0; i < hashsize; i++) {
2664 while ((kwq = LIST_FIRST(&hashptr[i])) != NULL) {
2665 pthread_list_lock();
2666 if ((kwq->kw_pflags & KSYN_WQ_INHASH) != 0) {
2667 kwq->kw_pflags &= ~KSYN_WQ_INHASH;
2668 LIST_REMOVE(kwq, kw_hash);
2669 }
2670 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
2671 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
2672 LIST_REMOVE(kwq, kw_list);
2673 num_infreekwq--;
2674 }
2675 num_freekwq++;
2676 pthread_list_unlock();
2677 /* release fake entries if present for cvars */
2678 if (((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) && (kwq->kw_inqueue != 0))
2679 ksyn_freeallkwe(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER]);
2680 lck_mtx_destroy(&kwq->kw_lock, pthread_lck_grp);
2681 zfree(kwq_zone, kwq);
2682 }
2683 }
2684 FREE(p->p_pthhash, M_PROC);
2685 p->p_pthhash = NULL;
2686 }
2687
2688 /* no lock held for this as the waitqueue is getting freed */
2689 void
2690 ksyn_freeallkwe(ksyn_queue_t kq)
2691 {
2692 ksyn_waitq_element_t kwe;
2693
2694 /* free all the fake entries, dequeue rest */
2695 kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
2696 while (kwe != NULL) {
2697 if (kwe->kwe_flags != KWE_THREAD_INWAIT) {
2698 TAILQ_REMOVE(&kq->ksynq_kwelist, kwe, kwe_list);
2699 zfree(kwe_zone, kwe);
2700 } else {
2701 TAILQ_REMOVE(&kq->ksynq_kwelist, kwe, kwe_list);
2702 }
2703 kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
2704 }
2705 }
2706
2707 /* find kernel waitqueue, if not present create one. Grants a reference */
2708 int
2709 ksyn_wqfind(user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, uint64_t tid, int flags, int wqtype, ksyn_wait_queue_t * kwqp)
2710 {
2711 ksyn_wait_queue_t kwq;
2712 ksyn_wait_queue_t nkwq;
2713 struct pthhashhead * hashptr;
2714 uint64_t object = 0, offset = 0;
2715 uint64_t hashhint;
2716 proc_t p = current_proc();
2717 int retry = mgen & PTH_RWL_RETRYBIT;
2718 struct ksyn_queue kfreeq;
2719 int i;
2720
2721 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED)
2722 {
2723 (void)ksyn_findobj(mutex, &object, &offset);
2724 hashhint = object;
2725 hashptr = pth_glob_hashtbl;
2726 } else {
2727 hashptr = p->p_pthhash;
2728 }
2729
2730 ksyn_queue_init(&kfreeq);
2731
2732 if (((wqtype & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_MTX) && (retry != 0))
2733 mgen &= ~PTH_RWL_RETRYBIT;
2734
2735 loop:
2736 //pthread_list_lock_spin();
2737 pthread_list_lock();
2738
2739 kwq = ksyn_wq_hash_lookup(mutex, p, flags, object, offset);
2740
2741 if (kwq != NULL) {
2742 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
2743 LIST_REMOVE(kwq, kw_list);
2744 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
2745 num_infreekwq--;
2746 num_reusekwq++;
2747 }
2748 if ((kwq->kw_type & KSYN_WQTYPE_MASK) != (wqtype &KSYN_WQTYPE_MASK)) {
2749 if ((kwq->kw_inqueue == 0) && (kwq->kw_pre_rwwc ==0) && (kwq->kw_pre_intrcount == 0)) {
2750 if (kwq->kw_iocount == 0) {
2751 kwq->kw_addr = mutex;
2752 kwq->kw_flags = flags;
2753 kwq->kw_object = object;
2754 kwq->kw_offset = offset;
2755 kwq->kw_type = (wqtype & KSYN_WQTYPE_MASK);
2756 CLEAR_REINIT_BITS(kwq);
2757 CLEAR_INTR_PREPOST_BITS(kwq);
2758 CLEAR_PREPOST_BITS(kwq);
2759 kwq->kw_lword = mgen;
2760 kwq->kw_uword = ugen;
2761 kwq->kw_sword = rw_wc;
2762 kwq->kw_owner = tid;
2763 } else if ((kwq->kw_iocount == 1) && (kwq->kw_dropcount == kwq->kw_iocount)) {
2764 /* if all users are unlockers then wait for it to finish */
2765 kwq->kw_pflags |= KSYN_WQ_WAITING;
2766 /* wait for the wq to be free */
2767 (void)msleep(&kwq->kw_pflags, pthread_list_mlock, PDROP, "ksyn_wqfind", 0);
2768 /* does not have list lock */
2769 goto loop;
2770 } else {
2771 __FAILEDUSERTEST__("address already known to kernel for another (busy) synchronizer type\n");
2772 pthread_list_unlock();
2773 return EBUSY;
2774 }
2775 } else {
2776 __FAILEDUSERTEST__("address already known to kernel for another (busy) synchronizer type(1)\n");
2777 pthread_list_unlock();
2778 return EBUSY;
2779 }
2780 }
2781 kwq->kw_iocount++;
2782 if (wqtype == KSYN_WQTYPE_MUTEXDROP)
2783 kwq->kw_dropcount++;
2784 if (kwqp != NULL)
2785 *kwqp = kwq;
2786 pthread_list_unlock();
2787 return (0);
2788 }
2789
2790 pthread_list_unlock();
2791
2792 nkwq = (ksyn_wait_queue_t)zalloc(kwq_zone);
2793 bzero(nkwq, sizeof(struct ksyn_wait_queue));
2794 nkwq->kw_addr = mutex;
2795 nkwq->kw_flags = flags;
2796 nkwq->kw_iocount = 1;
2797 if (wqtype == KSYN_WQTYPE_MUTEXDROP)
2798 nkwq->kw_dropcount++;
2799 nkwq->kw_object = object;
2800 nkwq->kw_offset = offset;
2801 nkwq->kw_type = (wqtype & KSYN_WQTYPE_MASK);
2802 nkwq->kw_lastseqword = PTHRW_RWS_INIT;
2803 if (nkwq->kw_type == KSYN_WQTYPE_RWLOCK)
2804 nkwq->kw_nextseqword = PTHRW_RWS_INIT;
2805
2806 nkwq->kw_pre_sseq = PTHRW_RWS_INIT;
2807
2808 CLEAR_PREPOST_BITS(nkwq);
2809 CLEAR_INTR_PREPOST_BITS(nkwq);
2810 CLEAR_REINIT_BITS(nkwq);
2811 nkwq->kw_lword = mgen;
2812 nkwq->kw_uword = ugen;
2813 nkwq->kw_sword = rw_wc;
2814 nkwq->kw_owner = tid;
2815
2816
2817 for (i=0; i< KSYN_QUEUE_MAX; i++)
2818 ksyn_queue_init(&nkwq->kw_ksynqueues[i]);
2819
2820 lck_mtx_init(&nkwq->kw_lock, pthread_lck_grp, pthread_lck_attr);
2821
2822 //pthread_list_lock_spin();
2823 pthread_list_lock();
2824 /* see whether it is alread allocated */
2825 kwq = ksyn_wq_hash_lookup(mutex, p, flags, object, offset);
2826
2827 if (kwq != NULL) {
2828 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
2829 LIST_REMOVE(kwq, kw_list);
2830 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
2831 num_infreekwq--;
2832 num_reusekwq++;
2833 }
2834 if ((kwq->kw_type & KSYN_WQTYPE_MASK) != (wqtype &KSYN_WQTYPE_MASK)) {
2835 if ((kwq->kw_inqueue == 0) && (kwq->kw_pre_rwwc ==0) && (kwq->kw_pre_intrcount == 0)) {
2836 if (kwq->kw_iocount == 0) {
2837 kwq->kw_addr = mutex;
2838 kwq->kw_flags = flags;
2839 kwq->kw_object = object;
2840 kwq->kw_offset = offset;
2841 kwq->kw_type = (wqtype & KSYN_WQTYPE_MASK);
2842 CLEAR_REINIT_BITS(kwq);
2843 CLEAR_INTR_PREPOST_BITS(kwq);
2844 CLEAR_PREPOST_BITS(kwq);
2845 kwq->kw_lword = mgen;
2846 kwq->kw_uword = ugen;
2847 kwq->kw_sword = rw_wc;
2848 kwq->kw_owner = tid;
2849 } else if ((kwq->kw_iocount == 1) && (kwq->kw_dropcount == kwq->kw_iocount)) {
2850 kwq->kw_pflags |= KSYN_WQ_WAITING;
2851 /* wait for the wq to be free */
2852 (void)msleep(&kwq->kw_pflags, pthread_list_mlock, PDROP, "ksyn_wqfind", 0);
2853
2854 lck_mtx_destroy(&nkwq->kw_lock, pthread_lck_grp);
2855 zfree(kwq_zone, nkwq);
2856 /* will acquire lock again */
2857
2858 goto loop;
2859 } else {
2860 __FAILEDUSERTEST__("address already known to kernel for another [busy] synchronizer type(2)\n");
2861 pthread_list_unlock();
2862 lck_mtx_destroy(&nkwq->kw_lock, pthread_lck_grp);
2863 zfree(kwq_zone, nkwq);
2864 return EBUSY;
2865 }
2866 } else {
2867 __FAILEDUSERTEST__("address already known to kernel for another [busy] synchronizer type(3)\n");
2868 pthread_list_unlock();
2869 lck_mtx_destroy(&nkwq->kw_lock, pthread_lck_grp);
2870 zfree(kwq_zone, nkwq);
2871 return EBUSY;
2872 }
2873 }
2874 kwq->kw_iocount++;
2875 if (wqtype == KSYN_WQTYPE_MUTEXDROP)
2876 kwq->kw_dropcount++;
2877 if (kwqp != NULL)
2878 *kwqp = kwq;
2879 pthread_list_unlock();
2880 lck_mtx_destroy(&nkwq->kw_lock, pthread_lck_grp);
2881 zfree(kwq_zone, nkwq);
2882 return (0);
2883 }
2884 kwq = nkwq;
2885
2886 #if _PSYNCH_TRACE_
2887 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ | DBG_FUNC_NONE, kwq->kw_lword, kwq->kw_uword, kwq->kw_sword, 0xffff, 0);
2888 #endif /* _PSYNCH_TRACE_ */
2889 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED)
2890 {
2891 kwq->kw_pflags |= KSYN_WQ_SHARED;
2892 LIST_INSERT_HEAD(&hashptr[kwq->kw_object & pthhash], kwq, kw_hash);
2893 } else
2894 LIST_INSERT_HEAD(&hashptr[mutex & pthhash], kwq, kw_hash);
2895
2896 kwq->kw_pflags |= KSYN_WQ_INHASH;
2897 num_total_kwq++;
2898
2899 pthread_list_unlock();
2900
2901 if (kwqp != NULL)
2902 *kwqp = kwq;
2903 return (0);
2904 }
2905
2906 /* Reference from find is dropped here. Starts the free process if needed */
2907 void
2908 ksyn_wqrelease(ksyn_wait_queue_t kwq, ksyn_wait_queue_t ckwq, int qfreenow, int wqtype)
2909 {
2910 uint64_t deadline;
2911 struct timeval t;
2912 int sched = 0;
2913 ksyn_wait_queue_t free_elem = NULL;
2914 ksyn_wait_queue_t free_elem1 = NULL;
2915
2916 //pthread_list_lock_spin();
2917 pthread_list_lock();
2918 kwq->kw_iocount--;
2919 if (wqtype == KSYN_WQTYPE_MUTEXDROP) {
2920 kwq->kw_dropcount--;
2921 }
2922 if (kwq->kw_iocount == 0) {
2923 if ((kwq->kw_pflags & KSYN_WQ_WAITING) != 0) {
2924 /* some one is waiting for the waitqueue, wake them up */
2925 kwq->kw_pflags &= ~KSYN_WQ_WAITING;
2926 wakeup(&kwq->kw_pflags);
2927 }
2928
2929 if ((kwq->kw_pre_rwwc == 0) && (kwq->kw_inqueue == 0) && (kwq->kw_pre_intrcount == 0)) {
2930 if (qfreenow == 0) {
2931 microuptime(&kwq->kw_ts);
2932 LIST_INSERT_HEAD(&pth_free_list, kwq, kw_list);
2933 kwq->kw_pflags |= KSYN_WQ_FLIST;
2934 num_infreekwq++;
2935 free_elem = NULL;
2936 } else {
2937 /* remove from the only list it is in ie hash */
2938 kwq->kw_pflags &= ~(KSYN_WQ_FLIST | KSYN_WQ_INHASH);
2939 LIST_REMOVE(kwq, kw_hash);
2940 lck_mtx_destroy(&kwq->kw_lock, pthread_lck_grp);
2941 num_total_kwq--;
2942 num_freekwq++;
2943 free_elem = kwq;
2944 }
2945 } else
2946 free_elem = NULL;
2947 if (qfreenow == 0)
2948 sched = 1;
2949 }
2950
2951 if (ckwq != NULL) {
2952 ckwq->kw_iocount--;
2953 if (wqtype == KSYN_WQTYPE_MUTEXDROP) {
2954 kwq->kw_dropcount--;
2955 }
2956 if ( ckwq->kw_iocount == 0) {
2957 if ((kwq->kw_pflags & KSYN_WQ_WAITING) != 0) {
2958 /* some one is waiting for the waitqueue, wake them up */
2959 kwq->kw_pflags &= ~KSYN_WQ_WAITING;
2960 wakeup(&kwq->kw_pflags);
2961 }
2962 if ((ckwq->kw_pre_rwwc == 0) && (ckwq->kw_inqueue == 0) && (ckwq->kw_pre_intrcount == 0)) {
2963 if (qfreenow == 0) {
2964 /* mark for free if we can */
2965 microuptime(&ckwq->kw_ts);
2966 LIST_INSERT_HEAD(&pth_free_list, ckwq, kw_list);
2967 ckwq->kw_pflags |= KSYN_WQ_FLIST;
2968 num_infreekwq++;
2969 free_elem1 = NULL;
2970 } else {
2971 /* remove from the only list it is in ie hash */
2972 ckwq->kw_pflags &= ~(KSYN_WQ_FLIST | KSYN_WQ_INHASH);
2973 LIST_REMOVE(ckwq, kw_hash);
2974 lck_mtx_destroy(&ckwq->kw_lock, pthread_lck_grp);
2975 num_total_kwq--;
2976 num_freekwq++;
2977 free_elem1 = ckwq;
2978 }
2979 } else
2980 free_elem1 = NULL;
2981 if (qfreenow == 0)
2982 sched = 1;
2983 }
2984 }
2985
2986 if (sched == 1 && psynch_cleanupset == 0) {
2987 psynch_cleanupset = 1;
2988 microuptime(&t);
2989 t.tv_sec += KSYN_CLEANUP_DEADLINE;
2990
2991 deadline = tvtoabstime(&t);
2992 thread_call_enter_delayed(psynch_thcall, deadline);
2993 }
2994 pthread_list_unlock();
2995 if (free_elem != NULL)
2996 zfree(kwq_zone, free_elem);
2997 if (free_elem1 != NULL)
2998 zfree(kwq_zone, free_elem1);
2999 }
3000
3001 /* responsible to free the waitqueues */
3002 void
3003 psynch_wq_cleanup(__unused void * param, __unused void * param1)
3004 {
3005 ksyn_wait_queue_t kwq;
3006 struct timeval t;
3007 LIST_HEAD(, ksyn_wait_queue) freelist = {NULL};
3008 int count = 0, delayed = 0, diff;
3009 uint64_t deadline = 0;
3010
3011 //pthread_list_lock_spin();
3012 pthread_list_lock();
3013
3014 num_addedfreekwq = num_infreekwq - num_lastfreekwqcount;
3015 num_lastfreekwqcount = num_infreekwq;
3016 microuptime(&t);
3017
3018 LIST_FOREACH(kwq, &pth_free_list, kw_list) {
3019 if ((kwq->kw_iocount != 0) || (kwq->kw_pre_rwwc != 0) || (kwq->kw_inqueue != 0) || (kwq->kw_pre_intrcount != 0)) {
3020 /* still in use */
3021 continue;
3022 }
3023 diff = t.tv_sec - kwq->kw_ts.tv_sec;
3024 if (diff < 0)
3025 diff *= -1;
3026 if (diff >= KSYN_CLEANUP_DEADLINE) {
3027 /* out of hash */
3028 kwq->kw_pflags &= ~(KSYN_WQ_FLIST | KSYN_WQ_INHASH);
3029 num_infreekwq--;
3030 num_freekwq++;
3031 LIST_REMOVE(kwq, kw_hash);
3032 LIST_REMOVE(kwq, kw_list);
3033 LIST_INSERT_HEAD(&freelist, kwq, kw_list);
3034 count ++;
3035 num_total_kwq--;
3036 } else {
3037 delayed = 1;
3038 }
3039
3040 }
3041 if (delayed != 0) {
3042 t.tv_sec += KSYN_CLEANUP_DEADLINE;
3043
3044 deadline = tvtoabstime(&t);
3045 thread_call_enter_delayed(psynch_thcall, deadline);
3046 psynch_cleanupset = 1;
3047 } else
3048 psynch_cleanupset = 0;
3049
3050 pthread_list_unlock();
3051
3052
3053 while ((kwq = LIST_FIRST(&freelist)) != NULL) {
3054 LIST_REMOVE(kwq, kw_list);
3055 lck_mtx_destroy(&kwq->kw_lock, pthread_lck_grp);
3056 zfree(kwq_zone, kwq);
3057 }
3058 }
3059
3060
3061 kern_return_t
3062 #if _PSYNCH_TRACE_
3063 ksyn_block_thread_locked(ksyn_wait_queue_t kwq, uint64_t abstime, ksyn_waitq_element_t kwe, int mylog, thread_continue_t continuation, void * parameter)
3064 #else
3065 ksyn_block_thread_locked(ksyn_wait_queue_t kwq, uint64_t abstime, ksyn_waitq_element_t kwe, __unused int mylog, thread_continue_t continuation, void * parameter)
3066 #endif
3067 {
3068 kern_return_t kret;
3069 int error = 0;
3070 #if _PSYNCH_TRACE_
3071 uthread_t uth = NULL;
3072 #endif /* _PSYNCH_TRACE_ */
3073
3074 kwe->kwe_kwqqueue = (void *)kwq;
3075 assert_wait_deadline(&kwe->kwe_psynchretval, THREAD_ABORTSAFE, abstime);
3076 ksyn_wqunlock(kwq);
3077
3078 if (continuation == THREAD_CONTINUE_NULL)
3079 kret = thread_block(NULL);
3080 else
3081 kret = thread_block_parameter(continuation, parameter);
3082
3083 #if _PSYNCH_TRACE_
3084 switch (kret) {
3085 case THREAD_TIMED_OUT:
3086 error = ETIMEDOUT;
3087 break;
3088 case THREAD_INTERRUPTED:
3089 error = EINTR;
3090 break;
3091 }
3092 uth = current_uthread();
3093 #if defined(__i386__)
3094 if (mylog != 0)
3095 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP | DBG_FUNC_NONE, 0xf4f3f2f1, (uint32_t)uth, kret, 0, 0);
3096 #else
3097 if (mylog != 0)
3098 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP | DBG_FUNC_NONE, 0xeeeeeeee, kret, error, 0xeeeeeeee, 0);
3099 #endif
3100 #endif /* _PSYNCH_TRACE_ */
3101
3102 return(kret);
3103 }
3104
3105 kern_return_t
3106 ksyn_wakeup_thread(__unused ksyn_wait_queue_t kwq, ksyn_waitq_element_t kwe)
3107 {
3108 kern_return_t kret;
3109 #if _PSYNCH_TRACE_
3110 uthread_t uth = NULL;
3111 #endif /* _PSYNCH_TRACE_ */
3112
3113 kret = thread_wakeup_one((caddr_t)&kwe->kwe_psynchretval);
3114
3115 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3116 panic("ksyn_wakeup_thread: panic waking up thread %x\n", kret);
3117 #if _PSYNCH_TRACE_
3118 uth = kwe->kwe_uth;
3119 #if defined(__i386__)
3120 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP | DBG_FUNC_NONE, 0xf1f2f3f4, (uint32_t)uth, kret, 0, 0);
3121 #endif
3122 #endif /* _PSYNCH_TRACE_ */
3123
3124 return(kret);
3125 }
3126
3127 /* find the true shared obect/offset for shared mutexes */
3128 int
3129 ksyn_findobj(uint64_t mutex, uint64_t * objectp, uint64_t * offsetp)
3130 {
3131 vm_page_info_basic_data_t info;
3132 kern_return_t kret;
3133 mach_msg_type_number_t count = VM_PAGE_INFO_BASIC_COUNT;
3134
3135 kret = vm_map_page_info(current_map(), mutex, VM_PAGE_INFO_BASIC,
3136 (vm_page_info_t)&info, &count);
3137
3138 if (kret != KERN_SUCCESS)
3139 return(EINVAL);
3140
3141 if (objectp != NULL)
3142 *objectp = (uint64_t)info.object_id;
3143 if (offsetp != NULL)
3144 *offsetp = (uint64_t)info.offset;
3145
3146 return(0);
3147 }
3148
3149
3150 /* lowest of kw_fr, kw_flr, kw_fwr, kw_fywr */
3151 int
3152 kwq_find_rw_lowest(ksyn_wait_queue_t kwq, int flags, uint32_t premgen, int * typep, uint32_t lowest[])
3153 {
3154
3155 uint32_t kw_fr, kw_flr, kw_fwr, kw_fywr, low;
3156 int type = 0, lowtype, typenum[4];
3157 uint32_t numbers[4];
3158 int count = 0, i;
3159
3160
3161 if ((kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0)) {
3162 type |= PTH_RWSHFT_TYPE_READ;
3163 /* read entries are present */
3164 if (kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count != 0) {
3165 kw_fr = kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_firstnum;
3166 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) && (is_seqlower(premgen, kw_fr) != 0))
3167 kw_fr = premgen;
3168 } else
3169 kw_fr = premgen;
3170
3171 lowest[KSYN_QUEUE_READ] = kw_fr;
3172 numbers[count]= kw_fr;
3173 typenum[count] = PTH_RW_TYPE_READ;
3174 count++;
3175 } else
3176 lowest[KSYN_QUEUE_READ] = 0;
3177
3178 if ((kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_LREADLOCK) != 0)) {
3179 type |= PTH_RWSHFT_TYPE_LREAD;
3180 /* read entries are present */
3181 if (kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count != 0) {
3182 kw_flr = kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_firstnum;
3183 if (((flags & KW_UNLOCK_PREPOST_LREADLOCK) != 0) && (is_seqlower(premgen, kw_flr) != 0))
3184 kw_flr = premgen;
3185 } else
3186 kw_flr = premgen;
3187
3188 lowest[KSYN_QUEUE_LREAD] = kw_flr;
3189 numbers[count]= kw_flr;
3190 typenum[count] = PTH_RW_TYPE_LREAD;
3191 count++;
3192 } else
3193 lowest[KSYN_QUEUE_LREAD] = 0;
3194
3195
3196 if ((kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0)) {
3197 type |= PTH_RWSHFT_TYPE_WRITE;
3198 /* read entries are present */
3199 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0) {
3200 kw_fwr = kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_firstnum;
3201 if (((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) && (is_seqlower(premgen, kw_fwr) != 0))
3202 kw_fwr = premgen;
3203 } else
3204 kw_fwr = premgen;
3205
3206 lowest[KSYN_QUEUE_WRITER] = kw_fwr;
3207 numbers[count]= kw_fwr;
3208 typenum[count] = PTH_RW_TYPE_WRITE;
3209 count++;
3210 } else
3211 lowest[KSYN_QUEUE_WRITER] = 0;
3212
3213 if ((kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_YWRLOCK) != 0)) {
3214 type |= PTH_RWSHFT_TYPE_YWRITE;
3215 /* read entries are present */
3216 if (kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0) {
3217 kw_fywr = kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_firstnum;
3218 if (((flags & KW_UNLOCK_PREPOST_YWRLOCK) != 0) && (is_seqlower(premgen, kw_fywr) != 0))
3219 kw_fywr = premgen;
3220 } else
3221 kw_fywr = premgen;
3222
3223 lowest[KSYN_QUEUE_YWRITER] = kw_fywr;
3224 numbers[count]= kw_fywr;
3225 typenum[count] = PTH_RW_TYPE_YWRITE;
3226 count++;
3227 } else
3228 lowest[KSYN_QUEUE_YWRITER] = 0;
3229
3230
3231 #if __TESTPANICS__
3232 if (count == 0)
3233 panic("nothing in the queue???\n");
3234 #endif /* __TESTPANICS__ */
3235
3236 low = numbers[0];
3237 lowtype = typenum[0];
3238 if (count > 1) {
3239 for (i = 1; i< count; i++) {
3240 if(is_seqlower(numbers[i] , low) != 0) {
3241 low = numbers[i];
3242 lowtype = typenum[i];
3243 }
3244 }
3245 }
3246 type |= lowtype;
3247
3248 if (typep != 0)
3249 *typep = type;
3250 return(0);
3251 }
3252
3253 /* wakeup readers and longreaders to upto the writer limits */
3254 int
3255 ksyn_wakeupreaders(ksyn_wait_queue_t kwq, uint32_t limitread, int longreadset, int allreaders, uint32_t updatebits, int * wokenp)
3256 {
3257 ksyn_waitq_element_t kwe = NULL;
3258 ksyn_queue_t kq;
3259 int failedwakeup = 0;
3260 int numwoken = 0;
3261 kern_return_t kret = KERN_SUCCESS;
3262 uint32_t lbits = 0;
3263
3264 lbits = updatebits;
3265 if (longreadset != 0) {
3266 /* clear all read and longreads */
3267 while ((kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_READ], kwq)) != NULL) {
3268 kwe->kwe_psynchretval = lbits;
3269 kwe->kwe_kwqqueue = NULL;
3270
3271 numwoken++;
3272 kret = ksyn_wakeup_thread(kwq, kwe);
3273 #if __TESTPANICS__
3274 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3275 panic("ksyn_wakeupreaders: panic waking up readers\n");
3276 #endif /* __TESTPANICS__ */
3277 if (kret == KERN_NOT_WAITING) {
3278 failedwakeup++;
3279 }
3280 }
3281 while ((kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_LREAD], kwq)) != NULL) {
3282 kwe->kwe_psynchretval = lbits;
3283 kwe->kwe_kwqqueue = NULL;
3284 numwoken++;
3285 kret = ksyn_wakeup_thread(kwq, kwe);
3286 #if __TESTPANICS__
3287 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3288 panic("ksyn_wakeupreaders: panic waking up lreaders\n");
3289 #endif /* __TESTPANICS__ */
3290 if (kret == KERN_NOT_WAITING) {
3291 failedwakeup++;
3292 }
3293 }
3294 } else {
3295 kq = &kwq->kw_ksynqueues[KSYN_QUEUE_READ];
3296 while ((kq->ksynq_count != 0) && (allreaders || (is_seqlower(kq->ksynq_firstnum, limitread) != 0))) {
3297 kwe = ksyn_queue_removefirst(kq, kwq);
3298 kwe->kwe_psynchretval = lbits;
3299 kwe->kwe_kwqqueue = NULL;
3300 numwoken++;
3301 kret = ksyn_wakeup_thread(kwq, kwe);
3302 #if __TESTPANICS__
3303 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3304 panic("ksyn_wakeupreaders: panic waking up readers\n");
3305 #endif /* __TESTPANICS__ */
3306 if (kret == KERN_NOT_WAITING) {
3307 failedwakeup++;
3308 }
3309 }
3310 }
3311
3312 if (wokenp != NULL)
3313 *wokenp = numwoken;
3314 return(failedwakeup);
3315 }
3316
3317
3318 /* This handles the unlock grants for next set on rw_unlock() or on arrival of all preposted waiters */
3319 int
3320 kwq_handle_unlock(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t rw_wc, uint32_t * updatep, int flags, int * blockp, uint32_t premgen)
3321 {
3322 uint32_t low_reader, low_writer, low_ywriter, low_lreader,limitrdnum;
3323 int rwtype, error=0;
3324 int longreadset = 0, allreaders, failed;
3325 uint32_t updatebits=0, numneeded = 0;;
3326 int prepost = flags & KW_UNLOCK_PREPOST;
3327 thread_t preth = THREAD_NULL;
3328 ksyn_waitq_element_t kwe;
3329 uthread_t uth;
3330 thread_t th;
3331 int woken = 0;
3332 int block = 1;
3333 uint32_t lowest[KSYN_QUEUE_MAX]; /* np need for upgrade as it is handled separately */
3334 kern_return_t kret = KERN_SUCCESS;
3335 ksyn_queue_t kq;
3336 int curthreturns = 0;
3337
3338 #if _PSYNCH_TRACE_
3339 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_START, (uint32_t)kwq->kw_addr, mgen, premgen, rw_wc, 0);
3340 #endif /* _PSYNCH_TRACE_ */
3341 if (prepost != 0) {
3342 preth = current_thread();
3343 }
3344
3345 kq = &kwq->kw_ksynqueues[KSYN_QUEUE_READ];
3346 kwq->kw_lastseqword = rw_wc;
3347 kwq->kw_lastunlockseq = (rw_wc & PTHRW_COUNT_MASK);
3348 kwq->kw_overlapwatch = 0;
3349
3350 /* upgrade pending */
3351 if (is_rw_ubit_set(mgen)) {
3352 #if __TESTPANICS__
3353 panic("NO UBIT SHOULD BE SET\n");
3354 updatebits = PTH_RWL_EBIT | PTH_RWL_KBIT;
3355 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0)
3356 updatebits |= PTH_RWL_WBIT;
3357 if (kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0)
3358 updatebits |= PTH_RWL_YBIT;
3359 if (prepost != 0) {
3360 if((flags & KW_UNLOCK_PREPOST_UPGRADE) != 0) {
3361 /* upgrade thread calling the prepost */
3362 /* upgrade granted */
3363 block = 0;
3364 goto out;
3365 }
3366
3367 }
3368 if (kwq->kw_ksynqueues[KSYN_QUEUE_UPGRADE].ksynq_count > 0) {
3369 kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_UPGRADE], kwq);
3370
3371 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
3372 kwe->kwe_psynchretval = updatebits;
3373 kwe->kwe_kwqqueue = NULL;
3374 kret = ksyn_wakeup_thread(kwq, kwe);
3375 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3376 panic("kwq_handle_unlock: panic waking up the upgrade thread \n");
3377 if (kret == KERN_NOT_WAITING) {
3378 kwq->kw_pre_intrcount = 1; /* actually a count */
3379 kwq->kw_pre_intrseq = mgen;
3380 kwq->kw_pre_intrretbits = kwe->kwe_psynchretval;
3381 kwq->kw_pre_intrtype = PTH_RW_TYPE_UPGRADE;
3382 }
3383 error = 0;
3384 } else {
3385 panic("panic unable to find the upgrade thread\n");
3386 }
3387 #endif /* __TESTPANICS__ */
3388 ksyn_wqunlock(kwq);
3389 goto out;
3390 }
3391
3392 error = kwq_find_rw_lowest(kwq, flags, premgen, &rwtype, lowest);
3393 #if __TESTPANICS__
3394 if (error != 0)
3395 panic("rwunlock: cannot fails to slot next round of threads");
3396 #endif /* __TESTPANICS__ */
3397
3398 #if _PSYNCH_TRACE_
3399 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 1, rwtype, 0, 0);
3400 #endif /* _PSYNCH_TRACE_ */
3401 low_reader = lowest[KSYN_QUEUE_READ];
3402 low_lreader = lowest[KSYN_QUEUE_LREAD];
3403 low_writer = lowest[KSYN_QUEUE_WRITER];
3404 low_ywriter = lowest[KSYN_QUEUE_YWRITER];
3405
3406
3407 longreadset = 0;
3408 allreaders = 0;
3409 updatebits = 0;
3410
3411
3412 switch (rwtype & PTH_RW_TYPE_MASK) {
3413 case PTH_RW_TYPE_LREAD:
3414 longreadset = 1;
3415
3416 case PTH_RW_TYPE_READ: {
3417 /* what about the preflight which is LREAD or READ ?? */
3418 if ((rwtype & PTH_RWSHFT_TYPE_MASK) != 0) {
3419 if (rwtype & PTH_RWSHFT_TYPE_WRITE)
3420 updatebits |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
3421 if (rwtype & PTH_RWSHFT_TYPE_YWRITE)
3422 updatebits |= PTH_RWL_YBIT;
3423 }
3424 limitrdnum = 0;
3425 if (longreadset == 0) {
3426 switch (rwtype & (PTH_RWSHFT_TYPE_WRITE | PTH_RWSHFT_TYPE_YWRITE)) {
3427 case PTH_RWSHFT_TYPE_WRITE:
3428 limitrdnum = low_writer;
3429 if (((rwtype & PTH_RWSHFT_TYPE_LREAD) != 0) &&
3430 (is_seqlower(low_lreader, limitrdnum) != 0)) {
3431 longreadset = 1;
3432 }
3433 if (((flags & KW_UNLOCK_PREPOST_LREADLOCK) != 0) &&
3434 (is_seqlower(premgen, limitrdnum) != 0)) {
3435 longreadset = 1;
3436 }
3437 break;
3438 case PTH_RWSHFT_TYPE_YWRITE:
3439 /* all read ? */
3440 if (((rwtype & PTH_RWSHFT_TYPE_LREAD) != 0) &&
3441 (is_seqlower(low_lreader, low_ywriter) != 0)) {
3442 longreadset = 1;
3443 } else
3444 allreaders = 1;
3445 if (((flags & KW_UNLOCK_PREPOST_LREADLOCK) != 0) &&
3446 (is_seqlower(premgen, low_ywriter) != 0)) {
3447 longreadset = 1;
3448 allreaders = 0;
3449 }
3450
3451
3452 break;
3453 case (PTH_RWSHFT_TYPE_WRITE | PTH_RWSHFT_TYPE_YWRITE):
3454 if (is_seqlower(low_ywriter, low_writer) != 0) {
3455 limitrdnum = low_ywriter;
3456 } else
3457 limitrdnum = low_writer;
3458 if (((rwtype & PTH_RWSHFT_TYPE_LREAD) != 0) &&
3459 (is_seqlower(low_lreader, limitrdnum) != 0)) {
3460 longreadset = 1;
3461 }
3462 if (((flags & KW_UNLOCK_PREPOST_LREADLOCK) != 0) &&
3463 (is_seqlower(premgen, limitrdnum) != 0)) {
3464 longreadset = 1;
3465 }
3466 break;
3467 default: /* no writers at all */
3468 if ((rwtype & PTH_RWSHFT_TYPE_LREAD) != 0)
3469 longreadset = 1;
3470 else
3471 allreaders = 1;
3472 };
3473
3474 }
3475 numneeded = 0;
3476 if (longreadset != 0) {
3477 updatebits |= PTH_RWL_LBIT;
3478 updatebits &= ~PTH_RWL_KBIT;
3479 if ((flags & (KW_UNLOCK_PREPOST_READLOCK | KW_UNLOCK_PREPOST_LREADLOCK)) != 0)
3480 numneeded += 1;
3481 numneeded += kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count;
3482 numneeded += kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count;
3483 updatebits += (numneeded << PTHRW_COUNT_SHIFT);
3484 kwq->kw_overlapwatch = 1;
3485 } else {
3486 /* no longread, evaluate number of readers */
3487
3488 switch (rwtype & (PTH_RWSHFT_TYPE_WRITE | PTH_RWSHFT_TYPE_YWRITE)) {
3489 case PTH_RWSHFT_TYPE_WRITE:
3490 limitrdnum = low_writer;
3491 numneeded = ksyn_queue_count_tolowest(kq, limitrdnum);
3492 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) && (is_seqlower(premgen, limitrdnum) != 0)) {
3493 curthreturns = 1;
3494 numneeded += 1;
3495 }
3496 break;
3497 case PTH_RWSHFT_TYPE_YWRITE:
3498 /* all read ? */
3499 numneeded += kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count;
3500 if ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) {
3501 curthreturns = 1;
3502 numneeded += 1;
3503 }
3504 break;
3505 case (PTH_RWSHFT_TYPE_WRITE | PTH_RWSHFT_TYPE_YWRITE):
3506 limitrdnum = low_writer;
3507 numneeded = ksyn_queue_count_tolowest(kq, limitrdnum);
3508 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) && (is_seqlower(premgen, limitrdnum) != 0)) {
3509 curthreturns = 1;
3510 numneeded += 1;
3511 }
3512 break;
3513 default: /* no writers at all */
3514 /* no other waiters only readers */
3515 kwq->kw_overlapwatch = 1;
3516 numneeded += kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count;
3517 if ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) {
3518 curthreturns = 1;
3519 numneeded += 1;
3520 }
3521 };
3522
3523 updatebits += (numneeded << PTHRW_COUNT_SHIFT);
3524 }
3525 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
3526
3527 if (curthreturns != 0) {
3528 block = 0;
3529 uth = current_uthread();
3530 kwe = &uth->uu_kwe;
3531 kwe->kwe_psynchretval = updatebits;
3532 }
3533
3534
3535 failed = ksyn_wakeupreaders(kwq, limitrdnum, longreadset, allreaders, updatebits, &woken);
3536 #if _PSYNCH_TRACE_
3537 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 2, woken, failed, 0);
3538 #endif /* _PSYNCH_TRACE_ */
3539
3540 if (failed != 0) {
3541 kwq->kw_pre_intrcount = failed; /* actually a count */
3542 kwq->kw_pre_intrseq = limitrdnum;
3543 kwq->kw_pre_intrretbits = updatebits;
3544 if (longreadset)
3545 kwq->kw_pre_intrtype = PTH_RW_TYPE_LREAD;
3546 else
3547 kwq->kw_pre_intrtype = PTH_RW_TYPE_READ;
3548 }
3549
3550 error = 0;
3551
3552 if ((kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0) && ((updatebits & PTH_RWL_WBIT) == 0))
3553 panic("kwq_handle_unlock: writer pending but no writebit set %x\n", updatebits);
3554 }
3555 break;
3556
3557 case PTH_RW_TYPE_WRITE: {
3558
3559 /* only one thread is goin to be granted */
3560 updatebits |= (PTHRW_INC);
3561 updatebits |= PTH_RWL_KBIT| PTH_RWL_EBIT;
3562
3563 if (((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) && (low_writer == premgen)) {
3564 block = 0;
3565 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0)
3566 updatebits |= PTH_RWL_WBIT;
3567 if ((rwtype & PTH_RWSHFT_TYPE_YWRITE) != 0)
3568 updatebits |= PTH_RWL_YBIT;
3569 th = preth;
3570 uth = get_bsdthread_info(th);
3571 kwe = &uth->uu_kwe;
3572 kwe->kwe_psynchretval = updatebits;
3573 } else {
3574 /* we are not granting writelock to the preposting thread */
3575 kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwq);
3576
3577 /* if there are writers present or the preposting write thread then W bit is to be set */
3578 if ((kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) )
3579 updatebits |= PTH_RWL_WBIT;
3580 if ((rwtype & PTH_RWSHFT_TYPE_YWRITE) != 0)
3581 updatebits |= PTH_RWL_YBIT;
3582 kwe->kwe_psynchretval = updatebits;
3583 kwe->kwe_kwqqueue = NULL;
3584 /* setup next in the queue */
3585 kret = ksyn_wakeup_thread(kwq, kwe);
3586 #if _PSYNCH_TRACE_
3587 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 3, kret, 0, 0);
3588 #endif /* _PSYNCH_TRACE_ */
3589 #if __TESTPANICS__
3590 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3591 panic("kwq_handle_unlock: panic waking up writer\n");
3592 #endif /* __TESTPANICS__ */
3593 if (kret == KERN_NOT_WAITING) {
3594 kwq->kw_pre_intrcount = 1; /* actually a count */
3595 kwq->kw_pre_intrseq = low_writer;
3596 kwq->kw_pre_intrretbits = updatebits;
3597 kwq->kw_pre_intrtype = PTH_RW_TYPE_WRITE;
3598 }
3599 error = 0;
3600 }
3601 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
3602 if ((updatebits & (PTH_RWL_KBIT | PTH_RWL_EBIT)) != (PTH_RWL_KBIT | PTH_RWL_EBIT))
3603 panic("kwq_handle_unlock: writer lock granted but no ke set %x\n", updatebits);
3604
3605 }
3606 break;
3607
3608 case PTH_RW_TYPE_YWRITE: {
3609 /* can reader locks be granted ahead of this write? */
3610 if ((rwtype & PTH_RWSHFT_TYPE_READ) != 0) {
3611 if ((rwtype & PTH_RWSHFT_TYPE_MASK) != 0) {
3612 if (rwtype & PTH_RWSHFT_TYPE_WRITE)
3613 updatebits |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
3614 if (rwtype & PTH_RWSHFT_TYPE_YWRITE)
3615 updatebits |= PTH_RWL_YBIT;
3616 }
3617
3618 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0) {
3619 /* is lowest reader less than the low writer? */
3620 if (is_seqlower(low_reader,low_writer) == 0)
3621 goto yielditis;
3622
3623 numneeded = ksyn_queue_count_tolowest(kq, low_writer);
3624 updatebits += (numneeded << PTHRW_COUNT_SHIFT);
3625 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) && (is_seqlower(premgen, low_writer) != 0)) {
3626 uth = current_uthread();
3627 kwe = &uth->uu_kwe;
3628 /* add one more */
3629 updatebits += PTHRW_INC;
3630 kwe->kwe_psynchretval = updatebits;
3631 block = 0;
3632 }
3633
3634 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
3635
3636 /* there will be readers to wakeup , no need to check for woken */
3637 failed = ksyn_wakeupreaders(kwq, low_writer, 0, 0, updatebits, NULL);
3638 #if _PSYNCH_TRACE_
3639 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 2, woken, failed, 0);
3640 #endif /* _PSYNCH_TRACE_ */
3641 if (failed != 0) {
3642 kwq->kw_pre_intrcount = failed; /* actually a count */
3643 kwq->kw_pre_intrseq = low_writer;
3644 kwq->kw_pre_intrretbits = updatebits;
3645 kwq->kw_pre_intrtype = PTH_RW_TYPE_READ;
3646 }
3647 error = 0;
3648 } else {
3649 /* wakeup all readers */
3650 numneeded = kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count;
3651 updatebits += (numneeded << PTHRW_COUNT_SHIFT);
3652 if ((prepost != 0) && ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0)) {
3653 uth = current_uthread();
3654 kwe = &uth->uu_kwe;
3655 updatebits += PTHRW_INC;
3656 kwe->kwe_psynchretval = updatebits;
3657 block = 0;
3658 }
3659 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
3660 failed = ksyn_wakeupreaders(kwq, low_writer, 0, 1, updatebits, &woken);
3661 #if _PSYNCH_TRACE_
3662 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 2, woken, failed, 0);
3663 #endif /* _PSYNCH_TRACE_ */
3664 if (failed != 0) {
3665 kwq->kw_pre_intrcount = failed; /* actually a count */
3666 kwq->kw_pre_intrseq = kwq->kw_highseq;
3667 kwq->kw_pre_intrretbits = updatebits;
3668 kwq->kw_pre_intrtype = PTH_RW_TYPE_READ;
3669 }
3670 error = 0;
3671 }
3672 } else {
3673 yielditis:
3674 /* no reads, so granting yeilding writes */
3675 updatebits |= PTHRW_INC;
3676 updatebits |= PTH_RWL_KBIT| PTH_RWL_EBIT;
3677
3678 if (((flags & KW_UNLOCK_PREPOST_YWRLOCK) != 0) && (low_writer == premgen)) {
3679 /* preposting yielding write thread is being granted exclusive lock */
3680
3681 block = 0;
3682
3683 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0)
3684 updatebits |= PTH_RWL_WBIT;
3685 else if (kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0)
3686 updatebits |= PTH_RWL_YBIT;
3687
3688 th = preth;
3689 uth = get_bsdthread_info(th);
3690 kwe = &uth->uu_kwe;
3691 kwe->kwe_psynchretval = updatebits;
3692 } else {
3693 /* we are granting yield writelock to some other thread */
3694 kwe = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER], kwq);
3695
3696 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0)
3697 updatebits |= PTH_RWL_WBIT;
3698 /* if there are ywriters present or the preposting ywrite thread then W bit is to be set */
3699 else if ((kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_YWRLOCK) != 0) )
3700 updatebits |= PTH_RWL_YBIT;
3701
3702 kwe->kwe_psynchretval = updatebits;
3703 kwe->kwe_kwqqueue = NULL;
3704
3705 kret = ksyn_wakeup_thread(kwq, kwe);
3706 #if _PSYNCH_TRACE_
3707 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq->kw_addr, 3, kret, 0, 0);
3708 #endif /* _PSYNCH_TRACE_ */
3709 #if __TESTPANICS__
3710 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
3711 panic("kwq_handle_unlock : panic waking up readers\n");
3712 #endif /* __TESTPANICS__ */
3713 if (kret == KERN_NOT_WAITING) {
3714 kwq->kw_pre_intrcount = 1; /* actually a count */
3715 kwq->kw_pre_intrseq = low_ywriter;
3716 kwq->kw_pre_intrretbits = updatebits;
3717 kwq->kw_pre_intrtype = PTH_RW_TYPE_YWRITE;
3718 }
3719 error = 0;
3720 }
3721 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
3722 }
3723 }
3724 break;
3725
3726 default:
3727 panic("rwunlock: invalid type for lock grants");
3728
3729 };
3730
3731
3732 out:
3733 if (updatep != NULL)
3734 *updatep = updatebits;
3735 if (blockp != NULL)
3736 *blockp = block;
3737 #if _PSYNCH_TRACE_
3738 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_END, (uint32_t)kwq->kw_addr, 0, updatebits, block, 0);
3739 #endif /* _PSYNCH_TRACE_ */
3740 return(error);
3741 }
3742
3743 int
3744 kwq_handle_overlap(ksyn_wait_queue_t kwq, uint32_t lgenval, __unused uint32_t ugenval, uint32_t rw_wc, uint32_t *updatebitsp, __unused int flags , int * blockp)
3745 {
3746 uint32_t highword = kwq->kw_nextseqword & PTHRW_COUNT_MASK;
3747 uint32_t lowword = kwq->kw_lastseqword & PTHRW_COUNT_MASK;
3748 uint32_t val=0;
3749 int withinseq;
3750
3751
3752 /* overlap is set, so no need to check for valid state for overlap */
3753
3754 withinseq = ((is_seqlower_eq(rw_wc, highword) != 0) || (is_seqhigher_eq(lowword, rw_wc) != 0));
3755
3756 if (withinseq != 0) {
3757 if ((kwq->kw_nextseqword & PTH_RWL_LBIT) == 0) {
3758 /* if no writers ahead, overlap granted */
3759 if ((lgenval & PTH_RWL_WBIT) == 0) {
3760 goto grantoverlap;
3761 }
3762 } else {
3763 /* Lbit is set, and writers ahead does not count */
3764 goto grantoverlap;
3765 }
3766 }
3767
3768 *blockp = 1;
3769 return(0);
3770
3771 grantoverlap:
3772 /* increase the next expected seq by one */
3773 kwq->kw_nextseqword += PTHRW_INC;
3774 /* set count by one & bits from the nextseq and add M bit */
3775 val = PTHRW_INC;
3776 val |= ((kwq->kw_nextseqword & PTHRW_BIT_MASK) | PTH_RWL_MBIT);
3777 *updatebitsp = val;
3778 *blockp = 0;
3779 return(0);
3780 }
3781
3782 #if NOTYET
3783 /* handle downgrade actions */
3784 int
3785 kwq_handle_downgrade(ksyn_wait_queue_t kwq, uint32_t mgen, __unused int flags, __unused uint32_t premgen, __unused int * blockp)
3786 {
3787 uint32_t updatebits, lowriter = 0;
3788 int longreadset, allreaders, count;
3789
3790 /* can handle downgrade now */
3791 updatebits = mgen;
3792
3793 longreadset = 0;
3794 allreaders = 0;
3795 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count > 0) {
3796 lowriter = kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_firstnum;
3797 if (kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count > 0) {
3798 if (is_seqlower(kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_firstnum, lowriter) != 0)
3799 longreadset = 1;
3800 }
3801 } else {
3802 allreaders = 1;
3803 if (kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count > 0) {
3804 lowriter = kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_firstnum;
3805 if (kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count > 0) {
3806 if (is_seqlower(kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_firstnum, lowriter) != 0)
3807 longreadset = 1;
3808 }
3809 }
3810 }
3811
3812 count = ksyn_wakeupreaders(kwq, lowriter, longreadset, allreaders, updatebits, NULL);
3813 if (count != 0) {
3814 kwq->kw_pre_limrd = count;
3815 kwq->kw_pre_limrdseq = lowriter;
3816 kwq->kw_pre_limrdbits = lowriter;
3817 /* need to handle prepost */
3818 }
3819 return(0);
3820 }
3821
3822 #endif /* NOTYET */
3823
3824 /************* Indiv queue support routines ************************/
3825 void
3826 ksyn_queue_init(ksyn_queue_t kq)
3827 {
3828 TAILQ_INIT(&kq->ksynq_kwelist);
3829 kq->ksynq_count = 0;
3830 kq->ksynq_firstnum = 0;
3831 kq->ksynq_lastnum = 0;
3832 }
3833
3834 int
3835 ksyn_queue_insert(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t mgen, struct uthread * uth, ksyn_waitq_element_t kwe, int fit)
3836 {
3837 uint32_t lockseq = mgen & PTHRW_COUNT_MASK;
3838 ksyn_waitq_element_t q_kwe, r_kwe;
3839 int res = 0;
3840 uthread_t nuth = NULL;
3841
3842 if (kq->ksynq_count == 0) {
3843 TAILQ_INSERT_HEAD(&kq->ksynq_kwelist, kwe, kwe_list);
3844 kq->ksynq_firstnum = lockseq;
3845 kq->ksynq_lastnum = lockseq;
3846 goto out;
3847 }
3848
3849 if (fit == FIRSTFIT) {
3850 /* TBD: if retry bit is set for mutex, add it to the head */
3851 /* firstfit, arriving order */
3852 TAILQ_INSERT_TAIL(&kq->ksynq_kwelist, kwe, kwe_list);
3853 if (is_seqlower (lockseq, kq->ksynq_firstnum) != 0)
3854 kq->ksynq_firstnum = lockseq;
3855 if (is_seqhigher (lockseq, kq->ksynq_lastnum) != 0)
3856 kq->ksynq_lastnum = lockseq;
3857 goto out;
3858 }
3859
3860 if ((lockseq == kq->ksynq_firstnum) || (lockseq == kq->ksynq_lastnum)) {
3861 /* During prepost when a thread is getting cancelled, we could have two with same seq */
3862 if (kwe->kwe_flags == KWE_THREAD_PREPOST) {
3863 q_kwe = ksyn_queue_find_seq(kwq, kq, lockseq, 0);
3864 if ((q_kwe != NULL) && ((nuth = (uthread_t)q_kwe->kwe_uth) != NULL) &&
3865 ((nuth->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL)) {
3866 TAILQ_INSERT_TAIL(&kq->ksynq_kwelist, kwe, kwe_list);
3867 goto out;
3868
3869 } else {
3870 __FAILEDUSERTEST__("ksyn_queue_insert: two threads with same lockseq ");
3871 res = EBUSY;
3872 goto out1;
3873 }
3874 } else {
3875 __FAILEDUSERTEST__("ksyn_queue_insert: two threads with same lockseq ");
3876 res = EBUSY;
3877 goto out1;
3878 }
3879 }
3880
3881 /* check for next seq one */
3882 if (is_seqlower(kq->ksynq_lastnum, lockseq) != 0) {
3883 TAILQ_INSERT_TAIL(&kq->ksynq_kwelist, kwe, kwe_list);
3884 kq->ksynq_lastnum = lockseq;
3885 goto out;
3886 }
3887
3888 if (is_seqlower(lockseq, kq->ksynq_firstnum) != 0) {
3889 TAILQ_INSERT_HEAD(&kq->ksynq_kwelist, kwe, kwe_list);
3890 kq->ksynq_firstnum = lockseq;
3891 goto out;
3892 }
3893
3894 /* goto slow insert mode */
3895 TAILQ_FOREACH_SAFE(q_kwe, &kq->ksynq_kwelist, kwe_list, r_kwe) {
3896 if (is_seqhigher(q_kwe->kwe_lockseq, lockseq) != 0) {
3897 TAILQ_INSERT_BEFORE(q_kwe, kwe, kwe_list);
3898 goto out;
3899 }
3900 }
3901
3902 #if __TESTPANICS__
3903 panic("failed to insert \n");
3904 #endif /* __TESTPANICS__ */
3905
3906 out:
3907 if (uth != NULL)
3908 kwe->kwe_uth = uth;
3909 kq->ksynq_count++;
3910 kwq->kw_inqueue++;
3911 update_low_high(kwq, lockseq);
3912 out1:
3913 return(res);
3914 }
3915
3916 ksyn_waitq_element_t
3917 ksyn_queue_removefirst(ksyn_queue_t kq, ksyn_wait_queue_t kwq)
3918 {
3919 ksyn_waitq_element_t kwe = NULL;
3920 ksyn_waitq_element_t q_kwe;
3921 uint32_t curseq;
3922
3923 if (kq->ksynq_count != 0) {
3924 kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
3925 TAILQ_REMOVE(&kq->ksynq_kwelist, kwe, kwe_list);
3926 curseq = kwe->kwe_lockseq & PTHRW_COUNT_MASK;
3927 kq->ksynq_count--;
3928 kwq->kw_inqueue--;
3929
3930 if(kq->ksynq_count != 0) {
3931 q_kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
3932 kq->ksynq_firstnum = (q_kwe->kwe_lockseq & PTHRW_COUNT_MASK);
3933 } else {
3934 kq->ksynq_firstnum = 0;
3935 kq->ksynq_lastnum = 0;
3936
3937 }
3938 if (kwq->kw_inqueue == 0) {
3939 kwq->kw_lowseq = 0;
3940 kwq->kw_highseq = 0;
3941 } else {
3942 if (kwq->kw_lowseq == curseq)
3943 kwq->kw_lowseq = find_nextlowseq(kwq);
3944 if (kwq->kw_highseq == curseq)
3945 kwq->kw_highseq = find_nexthighseq(kwq);
3946 }
3947 }
3948 return(kwe);
3949 }
3950
3951 void
3952 ksyn_queue_removeitem(ksyn_wait_queue_t kwq, ksyn_queue_t kq, ksyn_waitq_element_t kwe)
3953 {
3954 ksyn_waitq_element_t q_kwe;
3955 uint32_t curseq;
3956
3957 if (kq->ksynq_count > 0) {
3958 TAILQ_REMOVE(&kq->ksynq_kwelist, kwe, kwe_list);
3959 kq->ksynq_count--;
3960 if(kq->ksynq_count != 0) {
3961 q_kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
3962 kq->ksynq_firstnum = (q_kwe->kwe_lockseq & PTHRW_COUNT_MASK);
3963 q_kwe = TAILQ_LAST(&kq->ksynq_kwelist, ksynq_kwelist_head);
3964 kq->ksynq_lastnum = (q_kwe->kwe_lockseq & PTHRW_COUNT_MASK);
3965 } else {
3966 kq->ksynq_firstnum = 0;
3967 kq->ksynq_lastnum = 0;
3968
3969 }
3970 kwq->kw_inqueue--;
3971 curseq = kwe->kwe_lockseq & PTHRW_COUNT_MASK;
3972 if (kwq->kw_inqueue == 0) {
3973 kwq->kw_lowseq = 0;
3974 kwq->kw_highseq = 0;
3975 } else {
3976 if (kwq->kw_lowseq == curseq)
3977 kwq->kw_lowseq = find_nextlowseq(kwq);
3978 if (kwq->kw_highseq == curseq)
3979 kwq->kw_highseq = find_nexthighseq(kwq);
3980 }
3981 }
3982 }
3983
3984 /* find the thread and removes from the queue */
3985 ksyn_waitq_element_t
3986 ksyn_queue_find_seq(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t seq, int remove)
3987 {
3988 ksyn_waitq_element_t q_kwe, r_kwe;
3989
3990 /* TBD: bail out if higher seq is seen */
3991 /* case where wrap in the tail of the queue exists */
3992 TAILQ_FOREACH_SAFE(q_kwe, &kq->ksynq_kwelist, kwe_list, r_kwe) {
3993 if ((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK) == seq) {
3994 if (remove != 0)
3995 ksyn_queue_removeitem(kwq, kq, q_kwe);
3996 return(q_kwe);
3997 }
3998 }
3999 return(NULL);
4000 }
4001
4002
4003 /* find the thread at the target sequence (or a broadcast/prepost at or above) */
4004 ksyn_waitq_element_t
4005 ksyn_queue_find_cvpreposeq(ksyn_queue_t kq, uint32_t cgen)
4006 {
4007 ksyn_waitq_element_t q_kwe, r_kwe;
4008 uint32_t lgen = (cgen & PTHRW_COUNT_MASK);
4009
4010 /* case where wrap in the tail of the queue exists */
4011 TAILQ_FOREACH_SAFE(q_kwe, &kq->ksynq_kwelist, kwe_list, r_kwe) {
4012
4013 /* skip the lower entries */
4014 if (is_seqlower((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK), cgen) != 0)
4015 continue;
4016
4017 switch (q_kwe->kwe_flags) {
4018
4019 case KWE_THREAD_INWAIT:
4020 if ((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK) != lgen)
4021 break;
4022 /* fall thru */
4023
4024 case KWE_THREAD_BROADCAST:
4025 case KWE_THREAD_PREPOST:
4026 return (q_kwe);
4027 }
4028 }
4029 return(NULL);
4030 }
4031
4032 /* look for a thread at lockseq, a */
4033 ksyn_waitq_element_t
4034 ksyn_queue_find_signalseq(__unused ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t uptoseq, uint32_t signalseq)
4035 {
4036 ksyn_waitq_element_t q_kwe, r_kwe, t_kwe = NULL;
4037
4038 /* case where wrap in the tail of the queue exists */
4039 TAILQ_FOREACH_SAFE(q_kwe, &kq->ksynq_kwelist, kwe_list, r_kwe) {
4040
4041 switch (q_kwe->kwe_flags) {
4042
4043 case KWE_THREAD_PREPOST:
4044 if (is_seqhigher((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK), uptoseq))
4045 return t_kwe;
4046 /* fall thru */
4047
4048 case KWE_THREAD_BROADCAST:
4049 /* match any prepost at our same uptoseq or any broadcast above */
4050 if (is_seqlower((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK), uptoseq))
4051 continue;
4052 return q_kwe;
4053
4054 case KWE_THREAD_INWAIT:
4055 /*
4056 * Match any (non-cancelled) thread at or below our upto sequence -
4057 * but prefer an exact match to our signal sequence (if present) to
4058 * keep exact matches happening.
4059 */
4060 if (is_seqhigher((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK), uptoseq))
4061 return t_kwe;
4062
4063 if (q_kwe->kwe_kwqqueue == kwq) {
4064 uthread_t ut = q_kwe->kwe_uth;
4065 if ((ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) != UT_CANCEL) {
4066 /* if equal or higher than our signal sequence, return this one */
4067 if (is_seqhigher_eq((q_kwe->kwe_lockseq & PTHRW_COUNT_MASK), signalseq))
4068 return q_kwe;
4069
4070 /* otherwise, just remember this eligible thread and move on */
4071 if (t_kwe == NULL)
4072 t_kwe = q_kwe;
4073 }
4074 }
4075 break;
4076
4077 default:
4078 panic("ksyn_queue_find_signalseq(): unknow wait queue element type (%d)\n", q_kwe->kwe_flags);
4079 break;
4080 }
4081 }
4082 return t_kwe;
4083 }
4084
4085
4086 int
4087 ksyn_queue_move_tofree(ksyn_wait_queue_t ckwq, ksyn_queue_t kq, uint32_t upto, ksyn_queue_t kfreeq, int all, int release)
4088 {
4089 ksyn_waitq_element_t kwe;
4090 int count = 0;
4091 uint32_t tseq = upto & PTHRW_COUNT_MASK;
4092 #if _PSYNCH_TRACE_
4093 uthread_t ut;
4094 #endif /* _PSYNCH_TRACE_ */
4095
4096 ksyn_queue_init(kfreeq);
4097
4098 /* free all the entries, must be only fakes.. */
4099 kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
4100 while (kwe != NULL) {
4101 if ((all == 0) && (is_seqhigher((kwe->kwe_lockseq & PTHRW_COUNT_MASK), tseq) != 0))
4102 break;
4103 if (kwe->kwe_flags == KWE_THREAD_INWAIT) {
4104 /*
4105 * This scenario is typically noticed when the cvar is
4106 * reinited and the new waiters are waiting. We can
4107 * return them as spurious wait so the cvar state gets
4108 * reset correctly.
4109 */
4110 #if _PSYNCH_TRACE_
4111 ut = (uthread_t)kwe->kwe_uth;
4112 #endif /* _PSYNCH_TRACE_ */
4113
4114 /* skip canceled ones */
4115 /* wake the rest */
4116 ksyn_queue_removeitem(ckwq, kq, kwe);
4117 /* set M bit to indicate to waking CV to retun Inc val */
4118 kwe->kwe_psynchretval = PTHRW_INC | (PTH_RWS_CV_MBIT | PTH_RWL_MTX_WAIT);
4119 kwe->kwe_kwqqueue = NULL;
4120 #if _PSYNCH_TRACE_
4121 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xcafecaf3, (uint32_t)(thread_tid((struct thread *)(((struct uthread *)(kwe->kwe_uth))->uu_context.vc_thread))), kwe->kwe_psynchretval, 0);
4122 #endif /* _PSYNCH_TRACE_ */
4123 (void)ksyn_wakeup_thread(ckwq, kwe);
4124 } else {
4125 ksyn_queue_removeitem(ckwq, kq, kwe);
4126 TAILQ_INSERT_TAIL(&kfreeq->ksynq_kwelist, kwe, kwe_list);
4127 ckwq->kw_fakecount--;
4128 count++;
4129 }
4130 kwe = TAILQ_FIRST(&kq->ksynq_kwelist);
4131 }
4132
4133 if ((release != 0) && (count != 0)) {
4134 kwe = TAILQ_FIRST(&kfreeq->ksynq_kwelist);
4135 while (kwe != NULL) {
4136 TAILQ_REMOVE(&kfreeq->ksynq_kwelist, kwe, kwe_list);
4137 zfree(kwe_zone, kwe);
4138 kwe = TAILQ_FIRST(&kfreeq->ksynq_kwelist);
4139 }
4140 }
4141
4142 return(count);
4143 }
4144
4145 /*************************************************************************/
4146
4147 void
4148 update_low_high(ksyn_wait_queue_t kwq, uint32_t lockseq)
4149 {
4150 if (kwq->kw_inqueue == 1) {
4151 kwq->kw_lowseq = lockseq;
4152 kwq->kw_highseq = lockseq;
4153 } else {
4154 if (is_seqlower(lockseq, kwq->kw_lowseq) != 0)
4155 kwq->kw_lowseq = lockseq;
4156 if (is_seqhigher(lockseq, kwq->kw_highseq) != 0)
4157 kwq->kw_highseq = lockseq;
4158 }
4159 }
4160
4161 uint32_t
4162 find_nextlowseq(ksyn_wait_queue_t kwq)
4163 {
4164 uint32_t numbers[4];
4165 int count = 0, i;
4166 uint32_t lowest;
4167
4168 for(i = 0; i< KSYN_QUEUE_MAX; i++) {
4169 if (kwq->kw_ksynqueues[i].ksynq_count != 0) {
4170 numbers[count]= kwq->kw_ksynqueues[i].ksynq_firstnum;
4171 count++;
4172 }
4173 }
4174
4175 if (count == 0)
4176 return(0);
4177 lowest = numbers[0];
4178 if (count > 1) {
4179 for (i = 1; i< count; i++) {
4180 if(is_seqlower(numbers[i] , lowest) != 0)
4181 lowest = numbers[count];
4182
4183 }
4184 }
4185 return(lowest);
4186 }
4187
4188 uint32_t
4189 find_nexthighseq(ksyn_wait_queue_t kwq)
4190 {
4191 uint32_t numbers[4];
4192 int count = 0, i;
4193 uint32_t highest;
4194
4195 for(i = 0; i< KSYN_QUEUE_MAX; i++) {
4196 if (kwq->kw_ksynqueues[i].ksynq_count != 0) {
4197 numbers[count]= kwq->kw_ksynqueues[i].ksynq_lastnum;
4198 count++;
4199 }
4200 }
4201
4202
4203
4204 if (count == 0)
4205 return(0);
4206 highest = numbers[0];
4207 if (count > 1) {
4208 for (i = 1; i< count; i++) {
4209 if(is_seqhigher(numbers[i], highest) != 0)
4210 highest = numbers[i];
4211
4212 }
4213 }
4214 return(highest);
4215 }
4216
4217 int
4218 is_seqlower(uint32_t x, uint32_t y)
4219 {
4220 if (x < y) {
4221 if ((y-x) < (PTHRW_MAX_READERS/2))
4222 return(1);
4223 } else {
4224 if ((x-y) > (PTHRW_MAX_READERS/2))
4225 return(1);
4226 }
4227 return(0);
4228 }
4229
4230 int
4231 is_seqlower_eq(uint32_t x, uint32_t y)
4232 {
4233 if (x==y)
4234 return(1);
4235 else
4236 return(is_seqlower(x,y));
4237 }
4238
4239 int
4240 is_seqhigher(uint32_t x, uint32_t y)
4241 {
4242 if (x > y) {
4243 if ((x-y) < (PTHRW_MAX_READERS/2))
4244 return(1);
4245 } else {
4246 if ((y-x) > (PTHRW_MAX_READERS/2))
4247 return(1);
4248 }
4249 return(0);
4250 }
4251
4252 int
4253 is_seqhigher_eq(uint32_t x, uint32_t y)
4254 {
4255 if (x==y)
4256 return(1);
4257 else
4258 return(is_seqhigher(x,y));
4259 }
4260
4261
4262 int
4263 find_diff(uint32_t upto, uint32_t lowest)
4264 {
4265 uint32_t diff;
4266
4267 if (upto == lowest)
4268 return(0);
4269 #if 0
4270 diff = diff_genseq(upto, lowest);
4271 #else
4272 if (is_seqlower(upto, lowest) != 0)
4273 diff = diff_genseq(lowest, upto);
4274 else
4275 diff = diff_genseq(upto, lowest);
4276 #endif
4277 diff = (diff >> PTHRW_COUNT_SHIFT);
4278 return(diff);
4279 }
4280
4281
4282 int
4283 find_seq_till(ksyn_wait_queue_t kwq, uint32_t upto, uint32_t nwaiters, uint32_t *countp)
4284 {
4285 int i;
4286 uint32_t count = 0;
4287
4288
4289 #if _PSYNCH_TRACE_
4290 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL | DBG_FUNC_START, 0, 0, upto, nwaiters, 0);
4291 #endif /* _PSYNCH_TRACE_ */
4292
4293 for (i= 0; i< KSYN_QUEUE_MAX; i++) {
4294 count += ksyn_queue_count_tolowest(&kwq->kw_ksynqueues[i], upto);
4295 #if _PSYNCH_TRACE_
4296 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL | DBG_FUNC_NONE, 0, 1, i, count, 0);
4297 #endif /* _PSYNCH_TRACE_ */
4298 if (count >= nwaiters) {
4299 break;
4300 }
4301 }
4302
4303 if (countp != NULL) {
4304 *countp = count;
4305 }
4306 #if _PSYNCH_TRACE_
4307 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL | DBG_FUNC_END, 0, 0, count, nwaiters, 0);
4308 #endif /* _PSYNCH_TRACE_ */
4309 if (count == 0)
4310 return(0);
4311 else if (count >= nwaiters)
4312 return(1);
4313 else
4314 return(0);
4315 }
4316
4317
4318 uint32_t
4319 ksyn_queue_count_tolowest(ksyn_queue_t kq, uint32_t upto)
4320 {
4321 uint32_t i = 0;
4322 ksyn_waitq_element_t kwe, newkwe;
4323 uint32_t curval;
4324
4325 /* if nothing or the first num is greater than upto, return none */
4326 if ((kq->ksynq_count == 0) || (is_seqhigher(kq->ksynq_firstnum, upto) != 0))
4327 return(0);
4328 if (upto == kq->ksynq_firstnum)
4329 return(1);
4330
4331 TAILQ_FOREACH_SAFE(kwe, &kq->ksynq_kwelist, kwe_list, newkwe) {
4332 curval = (kwe->kwe_lockseq & PTHRW_COUNT_MASK);
4333 if (upto == curval) {
4334 i++;
4335 break;
4336 } else if (is_seqhigher(curval, upto) != 0) {
4337 break;
4338 } else {
4339 /* seq is lower */
4340 i++;
4341 }
4342 }
4343 return(i);
4344 }
4345
4346
4347 /* handles the cond broadcast of cvar and returns number of woken threads and bits for syscall return */
4348 void
4349 ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq, uint32_t upto, uint32_t * updatep)
4350 {
4351 kern_return_t kret;
4352 ksyn_queue_t kq;
4353 ksyn_waitq_element_t kwe, newkwe;
4354 uint32_t updatebits = 0;
4355 struct ksyn_queue kfreeq;
4356 uthread_t ut;
4357
4358 #if _PSYNCH_TRACE_
4359 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD | DBG_FUNC_START, 0xcbcbcbc2, upto, 0, 0, 0);
4360 #endif /* _PSYNCH_TRACE_ */
4361
4362 ksyn_queue_init(&kfreeq);
4363 kq = &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER];
4364
4365 retry:
4366 TAILQ_FOREACH_SAFE(kwe, &kq->ksynq_kwelist, kwe_list, newkwe) {
4367
4368 if (is_seqhigher((kwe->kwe_lockseq & PTHRW_COUNT_MASK), upto)) /* outside our range */
4369 break;
4370
4371 /* now handle the one we found (inside the range) */
4372 switch (kwe->kwe_flags) {
4373
4374 case KWE_THREAD_INWAIT:
4375 ut = (uthread_t)kwe->kwe_uth;
4376
4377 /* skip canceled ones */
4378 if (kwe->kwe_kwqqueue != ckwq ||
4379 (ut->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL)
4380 break;
4381
4382 /* wake the rest */
4383 ksyn_queue_removeitem(ckwq, kq, kwe);
4384 kwe->kwe_psynchretval = PTH_RWL_MTX_WAIT;
4385 kwe->kwe_kwqqueue = NULL;
4386 #if _PSYNCH_TRACE_
4387 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xcafecaf2, (uint32_t)(thread_tid((struct thread *)(((struct uthread *)(kwe->kwe_uth))->uu_context.vc_thread))), kwe->kwe_psynchretval, 0);
4388 #endif /* _PSYNCH_TRACE_ */
4389 kret = ksyn_wakeup_thread(ckwq, kwe);
4390 #if __TESTPANICS__
4391 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
4392 panic("ksyn_wakeupreaders: panic waking up readers\n");
4393 #endif /* __TESTPANICS__ */
4394 updatebits += PTHRW_INC;
4395 break;
4396
4397 case KWE_THREAD_BROADCAST:
4398 case KWE_THREAD_PREPOST:
4399 ksyn_queue_removeitem(ckwq, kq, kwe);
4400 TAILQ_INSERT_TAIL(&kfreeq.ksynq_kwelist, kwe, kwe_list);
4401 ckwq->kw_fakecount--;
4402 break;
4403
4404 default:
4405 panic("unknown kweflags\n");
4406 break;
4407 }
4408 }
4409
4410 /* Need to enter a broadcast in the queue (if not already at L == S) */
4411
4412 if ((ckwq->kw_lword & PTHRW_COUNT_MASK) != (ckwq->kw_sword & PTHRW_COUNT_MASK)) {
4413
4414 newkwe = TAILQ_FIRST(&kfreeq.ksynq_kwelist);
4415 if (newkwe == NULL) {
4416 ksyn_wqunlock(ckwq);
4417 newkwe = (ksyn_waitq_element_t)zalloc(kwe_zone);
4418 TAILQ_INSERT_TAIL(&kfreeq.ksynq_kwelist, newkwe, kwe_list);
4419 ksyn_wqlock(ckwq);
4420 goto retry;
4421 }
4422
4423 TAILQ_REMOVE(&kfreeq.ksynq_kwelist, newkwe, kwe_list);
4424 bzero(newkwe, sizeof(struct ksyn_waitq_element));
4425 newkwe->kwe_kwqqueue = ckwq;
4426 newkwe->kwe_flags = KWE_THREAD_BROADCAST;
4427 newkwe->kwe_lockseq = upto;
4428 newkwe->kwe_count = 0;
4429 newkwe->kwe_uth = NULL;
4430 newkwe->kwe_psynchretval = 0;
4431
4432 #if _PSYNCH_TRACE_
4433 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD | DBG_FUNC_NONE, (uint32_t)ckwq->kw_addr, 0xfeedfeed, upto, 0, 0);
4434 #endif /* _PSYNCH_TRACE_ */
4435
4436 (void)ksyn_queue_insert(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], upto, NULL, newkwe, SEQFIT);
4437 ckwq->kw_fakecount++;
4438 }
4439
4440 /* free up any remaining things stumbled across above */
4441 kwe = TAILQ_FIRST(&kfreeq.ksynq_kwelist);
4442 while (kwe != NULL) {
4443 TAILQ_REMOVE(&kfreeq.ksynq_kwelist, kwe, kwe_list);
4444 zfree(kwe_zone, kwe);
4445 kwe = TAILQ_FIRST(&kfreeq.ksynq_kwelist);
4446 }
4447
4448 if (updatep != NULL)
4449 *updatep = updatebits;
4450
4451 #if _PSYNCH_TRACE_
4452 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD | DBG_FUNC_END, 0xeeeeeeed, updatebits, 0, 0, 0);
4453 #endif /* _PSYNCH_TRACE_ */
4454 }
4455
4456 void
4457 ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq, uint32_t *updatep, ksyn_queue_t kfreeq, int release)
4458 {
4459 uint32_t updatebits = 0;
4460
4461 if (updatep != NULL)
4462 updatebits = *updatep;
4463 if ((ckwq->kw_lword & PTHRW_COUNT_MASK) == (ckwq->kw_sword & PTHRW_COUNT_MASK)) {
4464 updatebits |= PTH_RWS_CV_CBIT;
4465 if (ckwq->kw_inqueue != 0) {
4466 /* FREE THE QUEUE */
4467 ksyn_queue_move_tofree(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], ckwq->kw_lword, kfreeq, 0, release);
4468 #if __TESTPANICS__
4469 if (ckwq->kw_inqueue != 0)
4470 panic("ksyn_cvupdate_fixup: L == S, but entries in queue beyond S");
4471 #endif /* __TESTPANICS__ */
4472 }
4473 ckwq->kw_lword = ckwq->kw_uword = ckwq->kw_sword = 0;
4474 ckwq->kw_kflags |= KSYN_KWF_ZEROEDOUT;
4475 } else if ((ckwq->kw_inqueue != 0) && (ckwq->kw_fakecount == ckwq->kw_inqueue)) {
4476 /* only fake entries are present in the queue */
4477 updatebits |= PTH_RWS_CV_PBIT;
4478 }
4479 if (updatep != NULL)
4480 *updatep = updatebits;
4481 }
4482
4483 void
4484 psynch_zoneinit(void)
4485 {
4486 kwq_zone = (zone_t)zinit(sizeof(struct ksyn_wait_queue), 8192 * sizeof(struct ksyn_wait_queue), 4096, "ksyn_waitqueue zone");
4487 kwe_zone = (zone_t)zinit(sizeof(struct ksyn_waitq_element), 8192 * sizeof(struct ksyn_waitq_element), 4096, "ksyn_waitq_element zone");
4488 }
4489 #endif /* PSYNCH */