2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995-2005 Apple Computer, Inc. All Rights Reserved */
35 #include <sys/param.h>
36 #include <sys/queue.h>
37 #include <sys/resourcevar.h>
38 #include <sys/proc_internal.h>
39 #include <sys/kauth.h>
40 #include <sys/systm.h>
41 #include <sys/timeb.h>
42 #include <sys/times.h>
45 #include <sys/kernel.h>
47 #include <sys/signalvar.h>
48 #include <sys/syslog.h>
51 #include <sys/kdebug.h>
52 #include <sys/sysproto.h>
53 #include <sys/pthread_internal.h>
57 #include <mach/mach_types.h>
58 #include <mach/vm_prot.h>
59 #include <mach/semaphore.h>
60 #include <mach/sync_policy.h>
61 #include <mach/task.h>
62 #include <kern/kern_types.h>
63 #include <kern/task.h>
64 #include <kern/clock.h>
65 #include <mach/kern_return.h>
66 #include <kern/thread.h>
67 #include <kern/sched_prim.h>
68 #include <kern/thread_call.h>
69 #include <kern/kalloc.h>
70 #include <kern/zalloc.h>
71 #include <kern/sched_prim.h>
72 #include <kern/processor.h>
73 #include <kern/affinity.h>
74 #include <kern/wait_queue.h>
75 #include <kern/mach_param.h>
76 #include <mach/mach_vm.h>
77 #include <mach/mach_param.h>
78 #include <mach/thread_policy.h>
79 #include <mach/message.h>
80 #include <mach/port.h>
81 #include <vm/vm_protos.h>
82 #include <vm/vm_map.h>
83 #include <mach/vm_region.h>
85 #include <libkern/OSAtomic.h>
87 #include <pexpert/pexpert.h>
89 #define __PSYNCH_DEBUG__ 0 /* debug panic actions */
90 #define _PSYNCH_TRACE_ 1 /* kdebug trace */
92 #define __TESTMODE__ 2 /* 0 - return error on user error conditions */
93 /* 1 - log error on user error conditions */
94 /* 2 - abort caller on user error conditions */
95 /* 3 - panic on user error conditions */
96 static int __test_panics__
;
97 static int __test_aborts__
;
98 static int __test_prints__
;
100 static inline void __FAILEDUSERTEST__(const char *str
)
104 if (__test_panics__
!= 0)
107 if (__test_aborts__
!= 0 || __test_prints__
!= 0)
110 if (__test_prints__
!= 0)
111 printf("PSYNCH: pid[%d]: %s\n", p
->p_pid
, str
);
113 if (__test_aborts__
!= 0)
118 #define _PSYNCH_TRACE_MLWAIT 0x9000000
119 #define _PSYNCH_TRACE_MLDROP 0x9000004
120 #define _PSYNCH_TRACE_CVWAIT 0x9000008
121 #define _PSYNCH_TRACE_CVSIGNAL 0x900000c
122 #define _PSYNCH_TRACE_CVBROAD 0x9000010
123 #define _PSYNCH_TRACE_KMDROP 0x9000014
124 #define _PSYNCH_TRACE_RWRDLOCK 0x9000018
125 #define _PSYNCH_TRACE_RWLRDLOCK 0x900001c
126 #define _PSYNCH_TRACE_RWWRLOCK 0x9000020
127 #define _PSYNCH_TRACE_RWYWRLOCK 0x9000024
128 #define _PSYNCH_TRACE_RWUPGRADE 0x9000028
129 #define _PSYNCH_TRACE_RWDOWNGRADE 0x900002c
130 #define _PSYNCH_TRACE_RWUNLOCK 0x9000030
131 #define _PSYNCH_TRACE_RWUNLOCK2 0x9000034
132 #define _PSYNCH_TRACE_RWHANDLEU 0x9000038
133 #define _PSYNCH_TRACE_FSEQTILL 0x9000040
134 #define _PSYNCH_TRACE_CLRPRE 0x9000044
135 #define _PSYNCH_TRACE_CVHBROAD 0x9000048
136 #define _PSYNCH_TRACE_CVSEQ 0x900004c
137 #define _PSYNCH_TRACE_THWAKEUP 0x9000050
139 #define _PSYNCH_TRACE_UM_LOCK 0x9000060
140 #define _PSYNCH_TRACE_UM_UNLOCK 0x9000064
141 #define _PSYNCH_TRACE_UM_MHOLD 0x9000068
142 #define _PSYNCH_TRACE_UM_MDROP 0x900006c
143 #define _PSYNCH_TRACE_UM_CVWAIT 0x9000070
144 #define _PSYNCH_TRACE_UM_CVSIG 0x9000074
145 #define _PSYNCH_TRACE_UM_CVBRD 0x9000078
147 proc_t pthread_debug_proc
= PROC_NULL
;
148 static inline void __PTHREAD_TRACE_DEBUG(uint32_t debugid
, uintptr_t arg1
,
154 proc_t p
= current_proc();
156 if ((pthread_debug_proc
!= NULL
) && (p
== pthread_debug_proc
))
157 KERNEL_DEBUG_CONSTANT(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
160 #endif /* _PSYNCH_TRACE_ */
162 #define ECVCERORR 256
163 #define ECVPERORR 512
165 lck_mtx_t
* pthread_list_mlock
;
167 #define PTHHASH(addr) (&pthashtbl[(addr) & pthhash])
168 extern LIST_HEAD(pthhashhead
, ksyn_wait_queue
) *pth_glob_hashtbl
;
169 struct pthhashhead
* pth_glob_hashtbl
;
172 LIST_HEAD(, ksyn_wait_queue
) pth_free_list
;
173 int num_total_kwq
= 0; /* number of kwq in use currently */
174 int num_infreekwq
= 0; /* number of kwq in free list */
175 int num_freekwq
= 0; /* number of kwq actually freed from the free the list */
176 int num_reusekwq
= 0; /* number of kwq pulled back for reuse from free list */
177 int num_addedfreekwq
= 0; /* number of added free kwq from the last instance */
178 int num_lastfreekwqcount
= 0; /* the free count from the last time */
180 static int PTH_HASHSIZE
= 100;
182 static zone_t kwq_zone
; /* zone for allocation of ksyn_queue */
183 static zone_t kwe_zone
; /* zone for allocation of ksyn_waitq_element */
189 TAILQ_HEAD(ksynq_kwelist_head
, ksyn_waitq_element
) ksynq_kwelist
;
190 uint32_t ksynq_count
; /* number of entries in queue */
191 uint32_t ksynq_firstnum
; /* lowest seq in queue */
192 uint32_t ksynq_lastnum
; /* highest seq in queue */
194 typedef struct ksyn_queue
* ksyn_queue_t
;
196 #define KSYN_QUEUE_READ 0
197 #define KSYN_QUEUE_LREAD 1
198 #define KSYN_QUEUE_WRITER 2
199 #define KSYN_QUEUE_YWRITER 3
200 #define KSYN_QUEUE_UPGRADE 4
201 #define KSYN_QUEUE_MAX 5
203 struct ksyn_wait_queue
{
204 LIST_ENTRY(ksyn_wait_queue
) kw_hash
;
205 LIST_ENTRY(ksyn_wait_queue
) kw_list
;
208 uint64_t kw_object
; /* object backing in shared mode */
209 uint64_t kw_offset
; /* offset inside the object in shared mode */
210 int kw_flags
; /* mutex, cvar options/flags */
211 int kw_pflags
; /* flags under listlock protection */
212 struct timeval kw_ts
; /* timeval need for upkeep before free */
213 int kw_iocount
; /* inuse reference */
214 int kw_dropcount
; /* current users unlocking... */
216 int kw_type
; /* queue type like mutex, cvar, etc */
217 uint32_t kw_inqueue
; /* num of waiters held */
218 uint32_t kw_fakecount
; /* number of error/prepost fakes */
219 uint32_t kw_highseq
; /* highest seq in the queue */
220 uint32_t kw_lowseq
; /* lowest seq in the queue */
221 uint32_t kw_lword
; /* L value from userland */
222 uint32_t kw_uword
; /* U world value from userland */
223 uint32_t kw_sword
; /* S word value from userland */
224 uint32_t kw_lastunlockseq
; /* the last seq that unlocked */
225 /* for CV to be used as the seq kernel has seen so far */
226 #define kw_cvkernelseq kw_lastunlockseq
227 uint32_t kw_lastseqword
; /* the last seq that unlocked */
228 /* for mutex and cvar we need to track I bit values */
229 uint32_t kw_nextseqword
; /* the last seq that unlocked; with num of waiters */
230 #define kw_initrecv kw_nextseqword /* number of incoming waiters with Ibit seen sofar */
231 uint32_t kw_overlapwatch
; /* chance for overlaps */
232 #define kw_initcount kw_overlapwatch /* number of incoming waiters with Ibit expected */
233 uint32_t kw_initcountseq
; /* highest seq with Ibit on for mutex and cvar*/
234 uint32_t kw_pre_rwwc
; /* prepost count */
235 uint32_t kw_pre_lockseq
; /* prepost target seq */
236 uint32_t kw_pre_sseq
; /* prepost target sword, in cvar used for mutexowned */
237 uint32_t kw_pre_intrcount
; /* prepost of missed wakeup due to intrs */
238 uint32_t kw_pre_intrseq
; /* prepost of missed wakeup limit seq */
239 uint32_t kw_pre_intrretbits
; /* return bits value for missed wakeup threads */
240 uint32_t kw_pre_intrtype
; /* type of failed wakueps*/
243 struct ksyn_queue kw_ksynqueues
[KSYN_QUEUE_MAX
]; /* queues to hold threads */
244 lck_mtx_t kw_lock
; /* mutex lock protecting this structure */
246 typedef struct ksyn_wait_queue
* ksyn_wait_queue_t
;
248 #define PTHRW_INC 0x100
249 #define PTHRW_BIT_MASK 0x000000ff
251 #define PTHRW_COUNT_SHIFT 8
252 #define PTHRW_COUNT_MASK 0xffffff00
253 #define PTHRW_MAX_READERS 0xffffff00
255 /* New model bits on Lword */
256 #define PTH_RWL_KBIT 0x01 /* users cannot acquire in user mode */
257 #define PTH_RWL_EBIT 0x02 /* exclusive lock in progress */
258 #define PTH_RWL_WBIT 0x04 /* write waiters pending in kernel */
259 #define PTH_RWL_PBIT 0x04 /* prepost (cv) pending in kernel */
260 #define PTH_RWL_YBIT 0x08 /* yielding write waiters pending in kernel */
261 #define PTH_RWL_RETRYBIT 0x08 /* mutex retry wait */
262 #define PTH_RWL_LBIT 0x10 /* long read in progress */
263 #define PTH_RWL_MTXNONE 0x10 /* indicates the cvwait does not have mutex held */
264 #define PTH_RWL_UBIT 0x20 /* upgrade request pending */
265 #define PTH_RWL_MTX_WAIT 0x20 /* in cvar in mutex wait */
266 #define PTH_RWL_RBIT 0x40 /* reader pending in kernel(not used) */
267 #define PTH_RWL_MBIT 0x40 /* overlapping grants from kernel */
268 #define PTH_RWL_TRYLKBIT 0x40 /* trylock attempt (mutex only) */
269 #define PTH_RWL_IBIT 0x80 /* lcok reset, held untill first succeesful unlock */
272 /* UBIT values for mutex, cvar */
273 #define PTH_RWU_SBIT 0x01
274 #define PTH_RWU_BBIT 0x02
276 #define PTHRW_RWL_INIT PTH_RWL_IBIT /* reset state on the lock bits (U)*/
278 /* New model bits on Sword */
279 #define PTH_RWS_SBIT 0x01 /* kernel transition seq not set yet*/
280 #define PTH_RWS_IBIT 0x02 /* Sequence is not set on return from kernel */
281 #define PTH_RWS_CV_CBIT PTH_RWS_SBIT /* kernel has cleared all info w.r.s.t CV */
282 #define PTH_RWS_CV_PBIT PTH_RWS_IBIT /* kernel has prepost/fake structs only,no waiters */
283 #define PTH_RWS_CV_MBIT PTH_RWL_MBIT /* to indicate prepost return */
284 #define PTH_RWS_WSVBIT 0x04 /* save W bit */
285 #define PTH_RWS_USVBIT 0x08 /* save U bit */
286 #define PTH_RWS_YSVBIT 0x10 /* save Y bit */
287 #define PTHRW_RWS_INIT PTH_RWS_SBIT /* reset on the lock bits (U)*/
288 #define PTHRW_RWS_SAVEMASK (PTH_RWS_WSVBIT|PTH_RWS_USVBIT|PTH_RWS_YSVBIT) /*save bits mask*/
289 #define PTHRW_SW_Reset_BIT_MASK 0x000000fe /* remove S bit and get rest of the bits */
291 #define PTHRW_RWS_INIT PTH_RWS_SBIT /* reset on the lock bits (U)*/
294 #define PTHRW_UN_BIT_MASK 0x000000bf /* remove overlap bit */
297 #define PTHREAD_MTX_TID_SWITCHING (uint64_t)-1
299 /* new L word defns */
300 #define is_rwl_readinuser(x) ((((x) & (PTH_RWL_UBIT | PTH_RWL_KBIT)) == 0)||(((x) & PTH_RWL_LBIT) != 0))
301 #define is_rwl_ebit_set(x) (((x) & PTH_RWL_EBIT) != 0)
302 #define is_rwl_lbit_set(x) (((x) & PTH_RWL_LBIT) != 0)
303 #define is_rwl_readoverlap(x) (((x) & PTH_RWL_MBIT) != 0)
304 #define is_rw_ubit_set(x) (((x) & PTH_RWL_UBIT) != 0)
307 #define is_rws_setseq(x) (((x) & PTH_RWS_SBIT))
308 #define is_rws_setunlockinit(x) (((x) & PTH_RWS_IBIT))
310 /* first contended seq that kernel sees */
311 #define KW_MTXFIRST_KSEQ 0x200
312 #define KW_CVFIRST_KSEQ 1
313 #define KW_RWFIRST_KSEQ 0x200
315 int is_seqlower(uint32_t x
, uint32_t y
);
316 int is_seqlower_eq(uint32_t x
, uint32_t y
);
317 int is_seqhigher(uint32_t x
, uint32_t y
);
318 int is_seqhigher_eq(uint32_t x
, uint32_t y
);
319 int find_diff(uint32_t upto
, uint32_t lowest
);
322 static inline int diff_genseq(uint32_t x
, uint32_t y
) {
326 return((PTHRW_MAX_READERS
- y
) + x
+ PTHRW_INC
);
330 #define TID_ZERO (uint64_t)0
332 /* bits needed in handling the rwlock unlock */
333 #define PTH_RW_TYPE_READ 0x01
334 #define PTH_RW_TYPE_LREAD 0x02
335 #define PTH_RW_TYPE_WRITE 0x04
336 #define PTH_RW_TYPE_YWRITE 0x08
337 #define PTH_RW_TYPE_UPGRADE 0x10
338 #define PTH_RW_TYPE_MASK 0xff
339 #define PTH_RW_TYPE_SHIFT 8
341 #define PTH_RWSHFT_TYPE_READ 0x0100
342 #define PTH_RWSHFT_TYPE_LREAD 0x0200
343 #define PTH_RWSHFT_TYPE_WRITE 0x0400
344 #define PTH_RWSHFT_TYPE_YWRITE 0x0800
345 #define PTH_RWSHFT_TYPE_MASK 0xff00
348 * Mutex protocol attributes
350 #define PTHREAD_PRIO_NONE 0
351 #define PTHREAD_PRIO_INHERIT 1
352 #define PTHREAD_PRIO_PROTECT 2
353 #define PTHREAD_PROTOCOL_FLAGS_MASK 0x3
356 * Mutex type attributes
358 #define PTHREAD_MUTEX_NORMAL 0
359 #define PTHREAD_MUTEX_ERRORCHECK 4
360 #define PTHREAD_MUTEX_RECURSIVE 8
361 #define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_NORMAL
362 #define PTHREAD_TYPE_FLAGS_MASK 0xc
365 * Mutex pshared attributes
367 #define PTHREAD_PROCESS_SHARED 0x10
368 #define PTHREAD_PROCESS_PRIVATE 0x20
369 #define PTHREAD_PSHARED_FLAGS_MASK 0x30
372 * Mutex policy attributes
374 #define _PTHREAD_MUTEX_POLICY_NONE 0
375 #define _PTHREAD_MUTEX_POLICY_FAIRSHARE 0x040 /* 1 */
376 #define _PTHREAD_MUTEX_POLICY_FIRSTFIT 0x080 /* 2 */
377 #define _PTHREAD_MUTEX_POLICY_REALTIME 0x0c0 /* 3 */
378 #define _PTHREAD_MUTEX_POLICY_ADAPTIVE 0x100 /* 4 */
379 #define _PTHREAD_MUTEX_POLICY_PRIPROTECT 0x140 /* 5 */
380 #define _PTHREAD_MUTEX_POLICY_PRIINHERIT 0x180 /* 6 */
381 #define PTHREAD_POLICY_FLAGS_MASK 0x1c0
383 #define _PTHREAD_MTX_OPT_HOLDLOCK 0x200
384 #define _PTHREAD_MTX_OPT_NOMTX 0x400
386 #define _PTHREAD_MTX_OPT_NOTIFY 0x1000
387 #define _PTHREAD_MTX_OPT_MUTEX 0x2000 /* this is a mutex type */
389 #define _PTHREAD_RWLOCK_UPGRADE_TRY 0x10000
392 #define KSYN_WQ_INLIST 1
393 #define KSYN_WQ_INHASH 2
394 #define KSYN_WQ_SHARED 4
395 #define KSYN_WQ_WAITING 8 /* threads waiting for this wq to be available */
396 #define KSYN_WQ_FLIST 0X10 /* in free list to be freed after a short delay */
399 #define KSYN_KWF_INITCLEARED 1 /* the init status found and preposts cleared */
400 #define KSYN_KWF_ZEROEDOUT 2 /* the lword, etc are inited to 0 */
402 #define KSYN_CLEANUP_DEADLINE 10
403 int psynch_cleanupset
;
404 thread_call_t psynch_thcall
;
406 #define KSYN_WQTYPE_INWAIT 0x1000
407 #define KSYN_WQTYPE_INDROP 0x2000
408 #define KSYN_WQTYPE_MTX 0x1
409 #define KSYN_WQTYPE_CVAR 0x2
410 #define KSYN_WQTYPE_RWLOCK 0x4
411 #define KSYN_WQTYPE_SEMA 0x8
412 #define KSYN_WQTYPE_BARR 0x10
413 #define KSYN_WQTYPE_MASK 0x00ff
415 #define KSYN_MTX_MAX 0x0fffffff
416 #define KSYN_WQTYPE_MUTEXDROP (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX)
418 #define KW_UNLOCK_PREPOST 0x01
419 #define KW_UNLOCK_PREPOST_UPGRADE 0x02
420 #define KW_UNLOCK_PREPOST_DOWNGRADE 0x04
421 #define KW_UNLOCK_PREPOST_READLOCK 0x08
422 #define KW_UNLOCK_PREPOST_LREADLOCK 0x10
423 #define KW_UNLOCK_PREPOST_WRLOCK 0x20
424 #define KW_UNLOCK_PREPOST_YWRLOCK 0x40
426 #define CLEAR_PREPOST_BITS(kwq) {\
427 kwq->kw_pre_lockseq = 0; \
428 kwq->kw_pre_sseq = PTHRW_RWS_INIT; \
429 kwq->kw_pre_rwwc = 0; \
432 #define CLEAR_INITCOUNT_BITS(kwq) {\
433 kwq->kw_initcount = 0; \
434 kwq->kw_initrecv = 0; \
435 kwq->kw_initcountseq = 0; \
438 #define CLEAR_INTR_PREPOST_BITS(kwq) {\
439 kwq->kw_pre_intrcount = 0; \
440 kwq->kw_pre_intrseq = 0; \
441 kwq->kw_pre_intrretbits = 0; \
442 kwq->kw_pre_intrtype = 0; \
445 #define CLEAR_REINIT_BITS(kwq) {\
446 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) { \
447 if((kwq->kw_inqueue != 0) && (kwq->kw_inqueue != kwq->kw_fakecount)) \
448 panic("CV:entries in queue durinmg reinit %d:%d\n",kwq->kw_inqueue, kwq->kw_fakecount); \
450 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_RWLOCK) { \
451 kwq->kw_nextseqword = PTHRW_RWS_INIT; \
452 kwq->kw_overlapwatch = 0; \
454 kwq->kw_pre_lockseq = 0; \
455 kwq->kw_pre_rwwc = 0; \
456 kwq->kw_pre_sseq = PTHRW_RWS_INIT; \
457 kwq->kw_lastunlockseq = PTHRW_RWL_INIT; \
458 kwq->kw_lastseqword = PTHRW_RWS_INIT; \
459 kwq->kw_pre_intrcount = 0; \
460 kwq->kw_pre_intrseq = 0; \
461 kwq->kw_pre_intrretbits = 0; \
462 kwq->kw_pre_intrtype = 0; \
465 kwq->kw_sword = PTHRW_RWS_INIT; \
468 void pthread_list_lock(void);
469 void pthread_list_unlock(void);
470 void pthread_list_lock_spin(void);
471 void pthread_list_lock_convert_spin(void);
472 void ksyn_wqlock(ksyn_wait_queue_t kwq
);
473 void ksyn_wqunlock(ksyn_wait_queue_t kwq
);
474 ksyn_wait_queue_t
ksyn_wq_hash_lookup(user_addr_t mutex
, proc_t p
, int flags
, uint64_t object
, uint64_t offset
);
475 int ksyn_wqfind(user_addr_t mutex
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, uint64_t tid
, int flags
, int wqtype
, ksyn_wait_queue_t
* wq
);
476 void ksyn_wqrelease(ksyn_wait_queue_t mkwq
, ksyn_wait_queue_t ckwq
, int qfreenow
, int wqtype
);
477 extern int ksyn_findobj(uint64_t mutex
, uint64_t * object
, uint64_t * offset
);
478 static void UPDATE_CVKWQ(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, uint64_t tid
, int wqtype
);
479 extern thread_t
port_name_to_thread(mach_port_name_t port_name
);
481 int ksyn_block_thread_locked(ksyn_wait_queue_t kwq
, uint64_t abstime
, ksyn_waitq_element_t kwe
, int log
);
482 kern_return_t
ksyn_wakeup_thread(ksyn_wait_queue_t kwq
, ksyn_waitq_element_t kwe
);
483 void ksyn_freeallkwe(ksyn_queue_t kq
);
485 uint32_t psynch_mutexdrop_internal(ksyn_wait_queue_t kwq
, uint32_t lkseq
, uint32_t ugen
, int flags
);
486 int kwq_handle_unlock(ksyn_wait_queue_t
, uint32_t mgen
, uint32_t rw_wc
, uint32_t * updatep
, int flags
, int *blockp
, uint32_t premgen
);
488 void ksyn_queue_init(ksyn_queue_t kq
);
489 int ksyn_queue_insert(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t mgen
, struct uthread
* uth
, ksyn_waitq_element_t kwe
, int firstfit
);
490 ksyn_waitq_element_t
ksyn_queue_removefirst(ksyn_queue_t kq
, ksyn_wait_queue_t kwq
);
491 void ksyn_queue_removeitem(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, ksyn_waitq_element_t kwe
);
492 int ksyn_queue_move_tofree(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t upto
, ksyn_queue_t freeq
, int all
, int reease
);
493 void update_low_high(ksyn_wait_queue_t kwq
, uint32_t lockseq
);
494 uint32_t find_nextlowseq(ksyn_wait_queue_t kwq
);
495 uint32_t find_nexthighseq(ksyn_wait_queue_t kwq
);
497 int find_seq_till(ksyn_wait_queue_t kwq
, uint32_t upto
, uint32_t nwaiters
, uint32_t *countp
);
498 uint32_t ksyn_queue_count_tolowest(ksyn_queue_t kq
, uint32_t upto
);
500 ksyn_waitq_element_t
ksyn_queue_find_cvpreposeq(ksyn_queue_t kq
, uint32_t cgen
);
501 uint32_t ksyn_queue_cvcount_entries(ksyn_queue_t kq
, uint32_t upto
, uint32_t from
, int * numwaitersp
, int * numintrp
, int * numprepop
);
502 void ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq
, uint32_t upto
, uint32_t *updatep
);
503 void ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq
, uint32_t *updatep
, ksyn_queue_t kfreeq
, int release
);
504 ksyn_waitq_element_t
ksyn_queue_find_signalseq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t toseq
, uint32_t lockseq
);
505 ksyn_waitq_element_t
ksyn_queue_find_threadseq(ksyn_wait_queue_t ckwq
, ksyn_queue_t kq
, thread_t th
, uint32_t toseq
);
507 int ksyn_wakeupreaders(ksyn_wait_queue_t kwq
, uint32_t limitread
, int longreadset
, int allreaders
, uint32_t updatebits
, int * wokenp
);
508 int kwq_find_rw_lowest(ksyn_wait_queue_t kwq
, int flags
, uint32_t premgen
, int * type
, uint32_t lowest
[]);
509 ksyn_waitq_element_t
ksyn_queue_find_seq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t seq
, int remove
);
510 int kwq_handle_overlap(ksyn_wait_queue_t kwq
, uint32_t lgenval
, uint32_t ugenval
, uint32_t rw_wc
, uint32_t *updatebitsp
, int flags
, int * blockp
);
511 int kwq_handle_downgrade(ksyn_wait_queue_t kwq
, uint32_t mgen
, int flags
, uint32_t premgen
, int * blockp
);
514 UPDATE_CVKWQ(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, __unused
uint64_t tid
, __unused
int wqtype
)
516 if ((kwq
->kw_type
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_CVAR
) {
517 if ((kwq
->kw_kflags
& KSYN_KWF_ZEROEDOUT
) != 0) {
518 /* the values of L,U and S are cleared out due to L==S in previous transition */
519 kwq
->kw_lword
= mgen
;
520 kwq
->kw_uword
= ugen
;
521 kwq
->kw_sword
= rw_wc
;
522 kwq
->kw_kflags
&= ~KSYN_KWF_ZEROEDOUT
;
524 if (is_seqhigher((mgen
& PTHRW_COUNT_MASK
), (kwq
->kw_lword
& PTHRW_COUNT_MASK
)) != 0)
525 kwq
->kw_lword
= mgen
;
526 if (is_seqhigher((ugen
& PTHRW_COUNT_MASK
), (kwq
->kw_uword
& PTHRW_COUNT_MASK
)) != 0)
527 kwq
->kw_uword
= ugen
;
528 if ((rw_wc
& PTH_RWS_CV_CBIT
) != 0) {
529 if(is_seqlower(kwq
->kw_cvkernelseq
, (rw_wc
& PTHRW_COUNT_MASK
)) != 0) {
530 kwq
->kw_cvkernelseq
= (rw_wc
& PTHRW_COUNT_MASK
);
532 if (is_seqhigher((rw_wc
& PTHRW_COUNT_MASK
), (kwq
->kw_sword
& PTHRW_COUNT_MASK
)) != 0)
533 kwq
->kw_sword
= rw_wc
;
539 /* to protect the hashes, iocounts, freelist */
541 pthread_list_lock(void)
543 lck_mtx_lock(pthread_list_mlock
);
547 pthread_list_lock_spin(void)
549 lck_mtx_lock_spin(pthread_list_mlock
);
553 pthread_list_lock_convert_spin(void)
555 lck_mtx_convert_spin(pthread_list_mlock
);
560 pthread_list_unlock(void)
562 lck_mtx_unlock(pthread_list_mlock
);
565 /* to protect the indiv queue */
567 ksyn_wqlock(ksyn_wait_queue_t kwq
)
570 lck_mtx_lock(&kwq
->kw_lock
);
574 ksyn_wqunlock(ksyn_wait_queue_t kwq
)
576 lck_mtx_unlock(&kwq
->kw_lock
);
580 /* routine to drop the mutex unlocks , used both for mutexunlock system call and drop during cond wait */
582 psynch_mutexdrop_internal(ksyn_wait_queue_t kwq
, uint32_t lkseq
, uint32_t ugen
, int flags
)
584 uint32_t nextgen
, low_writer
, updatebits
, returnbits
= 0;
585 int firstfit
= flags
& _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
586 ksyn_waitq_element_t kwe
= NULL
;
587 kern_return_t kret
= KERN_SUCCESS
;
589 nextgen
= (ugen
+ PTHRW_INC
);
592 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_START
, (uint32_t)kwq
->kw_addr
, lkseq
, ugen
, flags
, 0);
593 #endif /* _PSYNCH_TRACE_ */
599 if (kwq
->kw_inqueue
!= 0) {
600 updatebits
= (kwq
->kw_highseq
& PTHRW_COUNT_MASK
) | (PTH_RWL_EBIT
| PTH_RWL_KBIT
);
601 kwq
->kw_lastunlockseq
= (ugen
& PTHRW_COUNT_MASK
);
604 /* first fit , pick any one */
605 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwq
);
606 kwe
->kwe_psynchretval
= updatebits
;
607 kwe
->kwe_kwqqueue
= NULL
;
610 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xcafecaf1, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
611 #endif /* _PSYNCH_TRACE_ */
613 kret
= ksyn_wakeup_thread(kwq
, kwe
);
615 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
616 panic("psynch_mutexdrop_internal: panic unable to wakeup firstfit mutex thread\n");
617 #endif /* __TESTPANICS__ */
618 if (kret
== KERN_NOT_WAITING
)
621 /* handle fairshare */
622 low_writer
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_firstnum
;
623 low_writer
&= PTHRW_COUNT_MASK
;
625 if (low_writer
== nextgen
) {
626 /* next seq to be granted found */
627 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwq
);
629 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
630 kwe
->kwe_psynchretval
= updatebits
| PTH_RWL_MTX_WAIT
;
631 kwe
->kwe_kwqqueue
= NULL
;
634 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xcafecaf2, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
635 #endif /* _PSYNCH_TRACE_ */
637 kret
= ksyn_wakeup_thread(kwq
, kwe
);
639 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
640 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
641 #endif /* __TESTPANICS__ */
642 if (kret
== KERN_NOT_WAITING
) {
644 kwq
->kw_pre_intrcount
= 1;
645 kwq
->kw_pre_intrseq
= nextgen
;
646 kwq
->kw_pre_intrretbits
= updatebits
;
647 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_WRITE
;
649 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfafafaf1, nextgen
, kwq
->kw_pre_intrretbits
, 0);
650 #endif /* _PSYNCH_TRACE_ */
653 } else if (is_seqhigher(low_writer
, nextgen
) != 0) {
656 if (kwq
->kw_pre_rwwc
> 1) {
657 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (1)\n");
661 kwq
->kw_pre_lockseq
= (nextgen
& PTHRW_COUNT_MASK
);
663 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef1, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
664 #endif /* _PSYNCH_TRACE_ */
667 //__FAILEDUSERTEST__("psynch_mutexdrop_internal: FS mutex unlock sequence higher than the lowest one is queue\n");
669 kwe
= ksyn_queue_find_seq(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], (nextgen
& PTHRW_COUNT_MASK
), 1);
671 /* next seq to be granted found */
672 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
673 kwe
->kwe_psynchretval
= updatebits
| PTH_RWL_MTX_WAIT
;
674 kwe
->kwe_kwqqueue
= NULL
;
676 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xcafecaf3, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
677 #endif /* _PSYNCH_TRACE_ */
678 kret
= ksyn_wakeup_thread(kwq
, kwe
);
680 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
681 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
682 #endif /* __TESTPANICS__ */
683 if (kret
== KERN_NOT_WAITING
)
686 /* next seq to be granted not found, prepost */
689 if (kwq
->kw_pre_rwwc
> 1) {
690 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (2)\n");
694 kwq
->kw_pre_lockseq
= (nextgen
& PTHRW_COUNT_MASK
);
696 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
697 #endif /* _PSYNCH_TRACE_ */
703 /* if firstfit the last one could be spurious */
705 kwq
->kw_lastunlockseq
= (ugen
& PTHRW_COUNT_MASK
);
708 if (kwq
->kw_pre_rwwc
> 1) {
709 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (3)\n");
713 kwq
->kw_pre_lockseq
= (nextgen
& PTHRW_COUNT_MASK
);
715 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef3, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
716 #endif /* _PSYNCH_TRACE_ */
720 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef3, kwq
->kw_lastunlockseq
, kwq
->kw_pre_lockseq
, 0);
721 #endif /* _PSYNCH_TRACE_ */
722 kwq
->kw_lastunlockseq
= (ugen
& PTHRW_COUNT_MASK
);
723 /* not set or the new lkseq is higher */
724 if ((kwq
->kw_pre_rwwc
== 0) || (is_seqlower(kwq
->kw_pre_lockseq
, lkseq
) == 0))
725 kwq
->kw_pre_lockseq
= (lkseq
& PTHRW_COUNT_MASK
);
726 kwq
->kw_pre_rwwc
= 1;
728 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef3, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
729 #endif /* _PSYNCH_TRACE_ */
731 /* indicate prepost content in kernel */
732 returnbits
= lkseq
| PTH_RWL_PBIT
;
740 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_END
, (uint32_t)kwq
->kw_addr
, 0xeeeeeeed, 0, 0, 0);
741 #endif /* _PSYNCH_TRACE_ */
742 ksyn_wqrelease(kwq
, NULL
, 1, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_MTX
));
747 * psynch_mutexwait: This system call is used for contended psynch mutexes to block.
751 psynch_mutexwait(__unused proc_t p
, struct psynch_mutexwait_args
* uap
, uint32_t * retval
)
753 user_addr_t mutex
= uap
->mutex
;
754 uint32_t mgen
= uap
->mgen
;
755 uint32_t ugen
= uap
->ugen
;
756 uint64_t tid
= uap
->tid
;
757 int flags
= uap
->flags
;
758 ksyn_wait_queue_t kwq
;
760 int ins_flags
, retry
;
762 int firstfit
= flags
& _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
763 uint32_t lockseq
, updatebits
=0;
764 ksyn_waitq_element_t kwe
;
767 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_START
, (uint32_t)mutex
, mgen
, ugen
, flags
, 0);
768 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)mutex
, mgen
, ugen
, (uint32_t)tid
, 0);
769 #endif /* _PSYNCH_TRACE_ */
771 uth
= current_uthread();
774 kwe
->kwe_lockseq
= uap
->mgen
;
776 kwe
->kwe_psynchretval
= 0;
777 kwe
->kwe_kwqqueue
= NULL
;
778 lockseq
= (uap
->mgen
& PTHRW_COUNT_MASK
);
784 ins_flags
= FIRSTFIT
;
787 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, tid
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_MTX
), &kwq
);
790 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 1, 0xdeadbeef, error
, 0);
791 #endif /* _PSYNCH_TRACE_ */
798 if ((mgen
& PTH_RWL_RETRYBIT
) != 0) {
800 mgen
&= ~PTH_RWL_RETRYBIT
;
803 /* handle first the missed wakeups */
804 if ((kwq
->kw_pre_intrcount
!= 0) &&
805 ((kwq
->kw_pre_intrtype
== PTH_RW_TYPE_WRITE
)) &&
806 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
807 kwq
->kw_pre_intrcount
--;
808 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
809 if (kwq
->kw_pre_intrcount
==0)
810 CLEAR_INTR_PREPOST_BITS(kwq
);
812 *retval
= kwe
->kwe_psynchretval
;
814 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)mutex
, 0xfafafaf1, kwe
->kwe_psynchretval
, kwq
->kw_pre_intrcount
, 0);
815 #endif /* _PSYNCH_TRACE_ */
819 if ((kwq
->kw_pre_rwwc
!= 0) && ((ins_flags
== FIRSTFIT
) || ((lockseq
& PTHRW_COUNT_MASK
) == (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
) ))) {
820 /* got preposted lock */
822 if (kwq
->kw_pre_rwwc
== 0) {
823 CLEAR_PREPOST_BITS(kwq
);
824 kwq
->kw_lastunlockseq
= PTHRW_RWL_INIT
;
825 if (kwq
->kw_inqueue
== 0) {
826 updatebits
= lockseq
| (PTH_RWL_KBIT
| PTH_RWL_EBIT
);
828 updatebits
= (kwq
->kw_highseq
& PTHRW_COUNT_MASK
) | (PTH_RWL_KBIT
| PTH_RWL_EBIT
);
830 updatebits
&= ~PTH_RWL_MTX_WAIT
;
832 kwe
->kwe_psynchretval
= updatebits
;
834 if (updatebits
== 0) {
835 __FAILEDUSERTEST__("psynch_mutexwait(prepost): returning 0 lseq in mutexwait with no EBIT \n");
838 *retval
= updatebits
;
840 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef1, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
841 #endif /* _PSYNCH_TRACE_ */
844 __FAILEDUSERTEST__("psynch_mutexwait: more than one prepost\n");
845 kwq
->kw_pre_lockseq
+= PTHRW_INC
; /* look for next one */
853 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfeedfeed, mgen
, ins_flags
, 0);
854 #endif /* _PSYNCH_TRACE_ */
856 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], mgen
, uth
, kwe
, ins_flags
);
860 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 2, 0xdeadbeef, error
, 0);
861 #endif /* _PSYNCH_TRACE_ */
865 error
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0);
866 /* drops the wq lock */
872 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)mutex
, 3, 0xdeadbeef, error
, 0);
873 #endif /* _PSYNCH_TRACE_ */
874 if (kwe
->kwe_kwqqueue
!= NULL
)
875 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwe
);
878 updatebits
= kwe
->kwe_psynchretval
;
879 updatebits
&= ~PTH_RWL_MTX_WAIT
;
880 *retval
= updatebits
;
883 __FAILEDUSERTEST__("psynch_mutexwait: returning 0 lseq in mutexwait with no EBIT \n");
886 ksyn_wqrelease(kwq
, NULL
, 1, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_MTX
));
888 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 0xeeeeeeed, updatebits
, error
, 0);
889 #endif /* _PSYNCH_TRACE_ */
895 * psynch_mutexdrop: This system call is used for unlock postings on contended psynch mutexes.
898 psynch_mutexdrop(__unused proc_t p
, struct psynch_mutexdrop_args
* uap
, uint32_t * retval
)
900 user_addr_t mutex
= uap
->mutex
;
901 uint32_t mgen
= uap
->mgen
;
902 uint32_t ugen
= uap
->ugen
;
903 uint64_t tid
= uap
->tid
;
904 int flags
= uap
->flags
;
905 ksyn_wait_queue_t kwq
;
909 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, tid
, flags
, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_MTX
), &kwq
);
914 updateval
= psynch_mutexdrop_internal(kwq
, mgen
, ugen
, flags
);
915 /* drops the kwq reference */
923 * psynch_cvbroad: This system call is used for broadcast posting on blocked waiters of psynch cvars.
926 psynch_cvbroad(__unused proc_t p
, struct psynch_cvbroad_args
* uap
, uint32_t * retval
)
928 user_addr_t cond
= uap
->cv
;
929 uint64_t cvlsgen
= uap
->cvlsgen
;
930 uint64_t cvudgen
= uap
->cvudgen
;
931 uint32_t cgen
, cugen
, csgen
, diffgen
;
932 uint32_t uptoseq
, fromseq
;
933 int flags
= uap
->flags
;
934 ksyn_wait_queue_t ckwq
;
936 uint32_t updatebits
= 0;
938 struct ksyn_queue kfreeq
;
940 csgen
= (uint32_t)((cvlsgen
>> 32) & 0xffffffff);
941 cgen
= ((uint32_t)(cvlsgen
& 0xffffffff));
942 cugen
= (uint32_t)((cvudgen
>> 32) & 0xffffffff);
943 diffgen
= ((uint32_t)(cvudgen
& 0xffffffff));
944 count
= (diffgen
>> PTHRW_COUNT_SHIFT
);
947 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_START
, (uint32_t)cond
, cgen
, cugen
, csgen
, 0);
948 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_NONE
, (uint32_t)cond
, 0xcbcbcbc1, diffgen
,flags
, 0);
949 #endif /* _PSYNCH_TRACE_ */
951 uptoseq
= cgen
& PTHRW_COUNT_MASK
;
952 fromseq
= (cugen
& PTHRW_COUNT_MASK
) + PTHRW_INC
;
954 if (is_seqhigher(fromseq
, uptoseq
) || is_seqhigher((csgen
& PTHRW_COUNT_MASK
), uptoseq
)) {
955 __FAILEDUSERTEST__("cvbroad: invalid L, U and S values\n");
958 if (count
> (uint32_t)task_threadmax
) {
959 __FAILEDUSERTEST__("cvbroad: difference greater than maximum possible thread count\n");
965 error
= ksyn_wqfind(cond
, cgen
, cugen
, csgen
, 0, flags
, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
), &ckwq
);
968 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0xdeadbeef, error
, 0);
969 #endif /* _PSYNCH_TRACE_ */
977 /* update L, U and S... */
978 UPDATE_CVKWQ(ckwq
, cgen
, cugen
, csgen
, 0, KSYN_WQTYPE_CVAR
);
980 /* broadcast wakeups/prepost handling */
981 ksyn_handle_cvbroad(ckwq
, uptoseq
, &updatebits
);
983 /* set C or P bits and free if needed */
984 ckwq
->kw_sword
+= (updatebits
& PTHRW_COUNT_MASK
);
985 ksyn_cvupdate_fixup(ckwq
, &updatebits
, &kfreeq
, 1);
988 *retval
= updatebits
;
990 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_CVAR
));
992 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_END
, (uint32_t)cond
, 0xeeeeeeed, (uint32_t)*retval
, error
, 0);
993 #endif /* _PSYNCH_TRACE_ */
999 ksyn_queue_find_threadseq(ksyn_wait_queue_t ckwq
, __unused ksyn_queue_t kq
, thread_t th
, uint32_t upto
)
1001 uthread_t uth
= get_bsdthread_info(th
);
1002 ksyn_waitq_element_t kwe
= &uth
->uu_kwe
;
1004 if (kwe
->kwe_kwqqueue
!= ckwq
||
1005 is_seqhigher((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), upto
)) {
1006 /* the thread is not waiting in the cv (or wasn't when the wakeup happened) */
1013 * psynch_cvsignal: This system call is used for signalling the blocked waiters of psynch cvars.
1016 psynch_cvsignal(__unused proc_t p
, struct psynch_cvsignal_args
* uap
, uint32_t * retval
)
1018 user_addr_t cond
= uap
->cv
;
1019 uint64_t cvlsgen
= uap
->cvlsgen
;
1020 uint32_t cgen
, csgen
, signalseq
, uptoseq
;
1021 uint32_t cugen
= uap
->cvugen
;
1022 int threadport
= uap
->thread_port
;
1023 int flags
= uap
->flags
;
1024 ksyn_wait_queue_t ckwq
= NULL
;
1025 ksyn_waitq_element_t kwe
, nkwe
= NULL
;
1028 thread_t th
= THREAD_NULL
;
1029 uint32_t updatebits
= 0;
1031 struct ksyn_queue kfreeq
;
1034 csgen
= (uint32_t)((cvlsgen
>> 32) & 0xffffffff);
1035 cgen
= ((uint32_t)(cvlsgen
& 0xffffffff));
1038 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_START
, (uint32_t)cond
, cgen
, cugen
, threadport
, 0);
1039 #endif /* _PSYNCH_TRACE_ */
1041 uptoseq
= cgen
& PTHRW_COUNT_MASK
;
1042 signalseq
= (cugen
& PTHRW_COUNT_MASK
) + PTHRW_INC
;
1044 /* validate sane L, U, and S values */
1045 if (((threadport
== 0) && (is_seqhigher(signalseq
, uptoseq
))) || is_seqhigher((csgen
& PTHRW_COUNT_MASK
), uptoseq
)) {
1046 __FAILEDUSERTEST__("psync_cvsignal; invalid sequence numbers\n");
1051 /* If we are looking for a specific thread, grab a reference for it */
1052 if (threadport
!= 0) {
1053 th
= (thread_t
)port_name_to_thread((mach_port_name_t
)threadport
);
1054 if (th
== THREAD_NULL
) {
1060 error
= ksyn_wqfind(cond
, cgen
, cugen
, csgen
, 0, flags
, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
), &ckwq
);
1063 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0xdeadbeef, error
, 0);
1064 #endif /* _PSYNCH_TRACE_ */
1070 /* update L, U and S... */
1071 UPDATE_CVKWQ(ckwq
, cgen
, cugen
, csgen
, 0, KSYN_WQTYPE_CVAR
);
1073 kq
= &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
];
1076 /* Only bother if we aren't already balanced */
1077 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) != (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
1079 kwe
= (th
!= NULL
) ? ksyn_queue_find_threadseq(ckwq
, kq
, th
, uptoseq
) :
1080 ksyn_queue_find_signalseq(ckwq
, kq
, uptoseq
, signalseq
);
1082 switch (kwe
->kwe_flags
) {
1084 case KWE_THREAD_BROADCAST
:
1085 /* broadcasts swallow our signal */
1088 case KWE_THREAD_PREPOST
:
1089 /* merge in with existing prepost at our same uptoseq */
1090 kwe
->kwe_count
+= 1;
1093 case KWE_THREAD_INWAIT
:
1094 if (is_seqlower((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), signalseq
)) {
1096 * A valid thread in our range, but lower than our signal.
1097 * Matching it may leave our match with nobody to wake it if/when
1098 * it arrives (the signal originally meant for this thread might
1099 * not successfully wake it).
1101 * Convert to broadcast - may cause some spurious wakeups
1102 * (allowed by spec), but avoids starvation (better choice).
1105 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xc1c1c1c1, uptoseq
, 0, 0);
1106 #endif /* _PSYNCH_TRACE_ */
1107 ksyn_handle_cvbroad(ckwq
, uptoseq
, &updatebits
);
1109 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
1110 kwe
->kwe_psynchretval
= PTH_RWL_MTX_WAIT
;
1111 kwe
->kwe_kwqqueue
= NULL
;
1113 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xcafecaf2, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
1114 #endif /* _PSYNCH_TRACE_ */
1115 kret
= ksyn_wakeup_thread(ckwq
, kwe
);
1117 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
1118 panic("ksyn_wakeup_thread: panic waking up condition waiter\n");
1119 #endif /* __TESTPANICS__ */
1120 updatebits
+= PTHRW_INC
;
1123 ckwq
->kw_sword
+= (updatebits
& PTHRW_COUNT_MASK
);
1127 panic("unknown kweflags\n");
1131 } else if (th
!= NULL
) {
1133 * Could not find the thread, post a broadcast,
1134 * otherwise the waiter will be stuck. Use to send
1135 * ESRCH here, did lead to rare hangs.
1137 ksyn_handle_cvbroad(ckwq
, uptoseq
, &updatebits
);
1138 ckwq
->kw_sword
+= (updatebits
& PTHRW_COUNT_MASK
);
1139 } else if (nkwe
== NULL
) {
1140 ksyn_wqunlock(ckwq
);
1141 nkwe
= (ksyn_waitq_element_t
)zalloc(kwe_zone
);
1146 /* no eligible entries - add prepost */
1147 bzero(nkwe
, sizeof(struct ksyn_waitq_element
));
1148 nkwe
->kwe_kwqqueue
= ckwq
;
1149 nkwe
->kwe_flags
= KWE_THREAD_PREPOST
;
1150 nkwe
->kwe_lockseq
= uptoseq
;
1151 nkwe
->kwe_count
= 1;
1152 nkwe
->kwe_uth
= NULL
;
1153 nkwe
->kwe_psynchretval
= 0;
1156 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xfeedfefe, uptoseq
, 0, 0);
1157 #endif /* _PSYNCH_TRACE_ */
1159 (void)ksyn_queue_insert(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], uptoseq
, NULL
, nkwe
, SEQFIT
);
1160 ckwq
->kw_fakecount
++;
1164 /* set C or P bits and free if needed */
1165 ksyn_cvupdate_fixup(ckwq
, &updatebits
, &kfreeq
, 1);
1168 ksyn_wqunlock(ckwq
);
1170 zfree(kwe_zone
, nkwe
);
1172 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_CVAR
));
1176 thread_deallocate(th
);
1178 *retval
= updatebits
;
1180 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_END
, (uint32_t)cond
, 0xeeeeeeed, updatebits
, error
, 0);
1181 #endif /* _PSYNCH_TRACE_ */
1187 * psynch_cvwait: This system call is used for psynch cvar waiters to block in kernel.
1190 psynch_cvwait(__unused proc_t p
, struct psynch_cvwait_args
* uap
, uint32_t * retval
)
1192 user_addr_t cond
= uap
->cv
;
1193 uint64_t cvlsgen
= uap
->cvlsgen
;
1194 uint32_t cgen
, csgen
;
1195 uint32_t cugen
= uap
->cvugen
;
1196 user_addr_t mutex
= uap
->mutex
;
1197 uint64_t mugen
= uap
->mugen
;
1198 uint32_t mgen
, ugen
;
1199 int flags
= uap
->flags
;
1200 ksyn_wait_queue_t kwq
, ckwq
;
1201 int error
=0, local_error
= 0;
1202 uint64_t abstime
= 0;
1203 uint32_t lockseq
, updatebits
=0;
1206 ksyn_waitq_element_t kwe
, nkwe
= NULL
;
1207 struct ksyn_queue
*kq
, kfreeq
;
1209 //int timeoutval = 3; /* 3 secs */
1210 //u_int64_t ntime = 0;
1211 #endif /* __TESTPANICS__ */
1213 /* for conformance reasons */
1214 __pthread_testcancel(0);
1216 csgen
= (uint32_t)((cvlsgen
>> 32) & 0xffffffff);
1217 cgen
= ((uint32_t)(cvlsgen
& 0xffffffff));
1218 ugen
= (uint32_t)((mugen
>> 32) & 0xffffffff);
1219 mgen
= ((uint32_t)(mugen
& 0xffffffff));
1222 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_START
, (uint32_t)cond
, cgen
, cugen
, csgen
, 0);
1223 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)mutex
, mgen
, ugen
, flags
, 0);
1224 #endif /* _PSYNCH_TRACE_ */
1226 lockseq
= (cgen
& PTHRW_COUNT_MASK
);
1228 * In cvwait U word can be out of range as cond could be used only for
1229 * timeouts. However S word needs to be within bounds and validated at
1230 * user level as well.
1232 if (is_seqhigher_eq((csgen
& PTHRW_COUNT_MASK
), lockseq
) != 0) {
1233 __FAILEDUSERTEST__("psync_cvwait; invalid sequence numbers\n");
1238 error
= ksyn_wqfind(cond
, cgen
, cugen
, csgen
, 0, flags
, KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INWAIT
, &ckwq
);
1241 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)cond
, 1, 0xdeadbeef, error
, 0);
1242 #endif /* _PSYNCH_TRACE_ */
1247 //clock_interval_to_deadline(timeoutval, NSEC_PER_SEC, &ntime);
1248 #endif /* __TESTPANICS__ */
1250 if (mutex
!= (user_addr_t
)0) {
1251 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, 0, flags
, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_MTX
), &kwq
);
1253 local_error
= error
;
1255 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 2, 0xdeadbeef, error
, 0);
1256 #endif /* _PSYNCH_TRACE_ */
1260 (void)psynch_mutexdrop_internal(kwq
, mgen
, ugen
, flags
);
1261 /* drops kwq reference */
1265 if (uap
->sec
!= 0 || (uap
->nsec
& 0x3fffffff) != 0) {
1266 ts
.tv_sec
= uap
->sec
;
1267 ts
.tv_nsec
= (uap
->nsec
& 0x3fffffff);
1268 nanoseconds_to_absolutetime((uint64_t)ts
.tv_sec
* NSEC_PER_SEC
+ ts
.tv_nsec
, &abstime
);
1269 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
1274 /* update L, U and S... */
1275 UPDATE_CVKWQ(ckwq
, cgen
, cugen
, csgen
, 0, KSYN_WQTYPE_CVAR
);
1277 /* Look for the sequence for prepost (or conflicting thread */
1278 kq
= &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
];
1279 kwe
= ksyn_queue_find_cvpreposeq(kq
, lockseq
);
1282 switch (kwe
->kwe_flags
) {
1284 case KWE_THREAD_INWAIT
:
1285 ksyn_wqunlock(ckwq
);
1286 __FAILEDUSERTEST__("cvwait: thread entry with same sequence already present\n");
1287 local_error
= EBUSY
;
1290 case KWE_THREAD_BROADCAST
:
1293 case KWE_THREAD_PREPOST
:
1294 if ((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
) == lockseq
) {
1295 /* we can safely consume a reference, so do so */
1296 if (--kwe
->kwe_count
== 0) {
1297 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
1298 ckwq
->kw_fakecount
--;
1303 * consuming a prepost higher than our lock sequence is valid, but
1304 * can leave the higher thread without a match. Convert the entry
1305 * to a broadcast to compensate for this.
1308 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xc2c2c2c2, kwe
->kwe_lockseq
, 0, 0);
1309 #endif /* _PSYNCH_TRACE_ */
1311 ksyn_handle_cvbroad(ckwq
, kwe
->kwe_lockseq
, &updatebits
);
1313 if (updatebits
!= 0)
1314 panic("psync_cvwait: convert pre-post to broadcast: woke up %d threads that shouldn't be there\n",
1316 #endif /* __TESTPANICS__ */
1322 panic("psync_cvwait: unexpected wait queue element type\n");
1326 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xfefefefe, kwe
->kwe_lockseq
, 0, 0);
1327 #endif /* _PSYNCH_TRACE_ */
1330 updatebits
= PTHRW_INC
;
1331 ckwq
->kw_sword
+= PTHRW_INC
;
1333 /* set C or P bits and free if needed */
1334 ksyn_cvupdate_fixup(ckwq
, &updatebits
, &kfreeq
, 1);
1339 *retval
= updatebits
;
1341 ksyn_wqunlock(ckwq
);
1344 zfree(kwe_zone
, nkwe
);
1350 uth
= current_uthread();
1352 kwe
->kwe_kwqqueue
= ckwq
;
1353 kwe
->kwe_flags
= KWE_THREAD_INWAIT
;
1354 kwe
->kwe_lockseq
= lockseq
;
1357 kwe
->kwe_psynchretval
= 0;
1360 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xfeedfeed, cgen
, 0, 0);
1361 #endif /* _PSYNCH_TRACE_ */
1363 error
= ksyn_queue_insert(ckwq
, kq
, cgen
, uth
, kwe
, SEQFIT
);
1365 ksyn_wqunlock(ckwq
);
1366 local_error
= error
;
1370 #if 0 /* __TESTPANICS__ */
1371 /* if no timeout is passed, set 5 secs timeout to catch hangs */
1372 error
= ksyn_block_thread_locked(ckwq
, (abstime
== 0) ? ntime
: abstime
, kwe
, 1);
1374 error
= ksyn_block_thread_locked(ckwq
, abstime
, kwe
, 1);
1375 #endif /* __TESTPANICS__ */
1379 local_error
= error
;
1382 /* just in case it got woken up as we were granting */
1383 *retval
= kwe
->kwe_psynchretval
;
1386 if ((kwe
->kwe_kwqqueue
!= NULL
) && (kwe
->kwe_kwqqueue
!= ckwq
))
1387 panic("cvwait waiting on some other kwq\n");
1389 #endif /* __TESTPANICS__ */
1392 if (kwe
->kwe_kwqqueue
!= NULL
) {
1393 ksyn_queue_removeitem(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwe
);
1394 kwe
->kwe_kwqqueue
= NULL
;
1396 if ((kwe
->kwe_psynchretval
& PTH_RWL_MTX_WAIT
) != 0) {
1397 /* the condition var granted.
1398 * reset the error so that the thread returns back.
1401 /* no need to set any bits just return as cvsig/broad covers this */
1402 ksyn_wqunlock(ckwq
);
1407 ckwq
->kw_sword
+= PTHRW_INC
;
1409 /* set C and P bits, in the local error as well as updatebits */
1410 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) == (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
1411 updatebits
|= PTH_RWS_CV_CBIT
;
1412 local_error
|= ECVCERORR
;
1413 if (ckwq
->kw_inqueue
!= 0) {
1414 (void)ksyn_queue_move_tofree(ckwq
, kq
, (ckwq
->kw_lword
& PTHRW_COUNT_MASK
), &kfreeq
, 1, 1);
1416 ckwq
->kw_lword
= ckwq
->kw_uword
= ckwq
->kw_sword
= 0;
1417 ckwq
->kw_kflags
|= KSYN_KWF_ZEROEDOUT
;
1419 /* everythig in the queue is a fake entry ? */
1420 if ((ckwq
->kw_inqueue
!= 0) && (ckwq
->kw_fakecount
== ckwq
->kw_inqueue
)) {
1421 updatebits
|= PTH_RWS_CV_PBIT
;
1422 local_error
|= ECVPERORR
;
1425 ksyn_wqunlock(ckwq
);
1428 /* PTH_RWL_MTX_WAIT is removed */
1429 if ((kwe
->kwe_psynchretval
& PTH_RWS_CV_MBIT
) != 0)
1430 *retval
= PTHRW_INC
| PTH_RWS_CV_CBIT
;
1437 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)cond
, 0xeeeeeeed, (uint32_t)*retval
, local_error
, 0);
1438 #endif /* _PSYNCH_TRACE_ */
1439 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_CVAR
));
1440 return(local_error
);
1444 * psynch_cvclrprepost: This system call clears pending prepost if present.
1447 psynch_cvclrprepost(__unused proc_t p
, struct psynch_cvclrprepost_args
* uap
, __unused
int * retval
)
1449 user_addr_t cond
= uap
->cv
;
1450 uint32_t cgen
= uap
->cvgen
;
1451 uint32_t cugen
= uap
->cvugen
;
1452 uint32_t csgen
= uap
->cvsgen
;
1453 uint32_t pseq
= uap
->preposeq
;
1454 uint32_t flags
= uap
->flags
;
1456 ksyn_wait_queue_t ckwq
= NULL
;
1457 struct ksyn_queue kfreeq
;
1460 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_START
, (uint32_t)cond
, cgen
, cugen
, csgen
, 0);
1461 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_NONE
, (uint32_t)cond
, 0xcececece, pseq
, flags
, 0);
1462 #endif /* _PSYNCH_TRACE_ */
1464 if ((flags
& _PTHREAD_MTX_OPT_MUTEX
) == 0) {
1465 error
= ksyn_wqfind(cond
, cgen
, cugen
, csgen
, 0, flags
, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
), &ckwq
);
1469 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0xdeadbeef, error
, 0);
1470 #endif /* _PSYNCH_TRACE_ */
1475 (void)ksyn_queue_move_tofree(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], (pseq
& PTHRW_COUNT_MASK
), &kfreeq
, 0, 1);
1476 ksyn_wqunlock(ckwq
);
1477 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
));
1480 error
= ksyn_wqfind(cond
, cgen
, cugen
, 0, 0, flags
, (KSYN_WQTYPE_MTX
| KSYN_WQTYPE_INDROP
), &ckwq
);
1484 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0xdeadbeef, error
, 0);
1485 #endif /* _PSYNCH_TRACE_ */
1490 if (((flags
& _PTHREAD_MUTEX_POLICY_FIRSTFIT
) != 0) && (ckwq
->kw_pre_rwwc
!= 0)) {
1491 if (is_seqlower_eq(ckwq
->kw_pre_lockseq
, cgen
) != 0) {
1493 ckwq
->kw_pre_rwwc
= 0;
1494 ckwq
->kw_pre_lockseq
= 0;
1497 ksyn_wqunlock(ckwq
);
1498 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_MTX
| KSYN_WQTYPE_INDROP
));
1502 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_END
, (uint32_t)cond
, 0xeeeeeeed, 0, 0, 0);
1503 #endif /* _PSYNCH_TRACE_ */
1507 /* ***************** pthread_rwlock ************************ */
1509 * psynch_rw_rdlock: This system call is used for psync rwlock readers to block.
1512 psynch_rw_rdlock(__unused proc_t p
, struct psynch_rw_rdlock_args
* uap
, uint32_t * retval
)
1514 user_addr_t rwlock
= uap
->rwlock
;
1515 uint32_t lgen
= uap
->lgenval
;
1516 uint32_t ugen
= uap
->ugenval
;
1517 uint32_t rw_wc
= uap
->rw_wc
;
1518 //uint64_t tid = uap->tid;
1519 int flags
= uap
->flags
;
1520 int error
= 0, block
;
1521 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0, prerw_wc
= 0;
1522 ksyn_wait_queue_t kwq
;
1524 int isinit
= lgen
& PTHRW_RWL_INIT
;
1525 uint32_t returnbits
= 0;
1526 ksyn_waitq_element_t kwe
;
1529 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1530 #endif /* _PSYNCH_TRACE_ */
1531 uth
= current_uthread();
1533 /* preserve the seq number */
1535 kwe
->kwe_lockseq
= lgen
;
1537 kwe
->kwe_psynchretval
= 0;
1538 kwe
->kwe_kwqqueue
= NULL
;
1540 lockseq
= lgen
& PTHRW_COUNT_MASK
;
1543 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1546 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1547 #endif /* _PSYNCH_TRACE_ */
1554 lgen
&= ~PTHRW_RWL_INIT
;
1555 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
1556 /* first to notice the reset of the lock, clear preposts */
1557 CLEAR_REINIT_BITS(kwq
);
1558 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
1560 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
1561 #endif /* _PSYNCH_TRACE_ */
1565 /* handle first the missed wakeups */
1566 if ((kwq
->kw_pre_intrcount
!= 0) &&
1567 ((kwq
->kw_pre_intrtype
== PTH_RW_TYPE_READ
) || (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_LREAD
)) &&
1568 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1570 kwq
->kw_pre_intrcount
--;
1571 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
1572 if (kwq
->kw_pre_intrcount
==0)
1573 CLEAR_INTR_PREPOST_BITS(kwq
);
1578 /* handle overlap first as they are not counted against pre_rwwc */
1580 /* check for overlap and if no pending W bit (indicates writers) */
1581 if ((kwq
->kw_overlapwatch
!= 0) && ((rw_wc
& PTHRW_RWS_SAVEMASK
) == 0) && ((lgen
& PTH_RWL_WBIT
) == 0)) {
1583 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 10, kwq
->kw_nextseqword
, kwq
->kw_lastseqword
, 0);
1584 #endif /* _PSYNCH_TRACE_ */
1585 error
= kwq_handle_overlap(kwq
, lgen
, ugen
, rw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_READLOCK
|KW_UNLOCK_PREPOST
), &block
);
1588 panic("rw_rdlock: kwq_handle_overlap failed %d\n",error
);
1589 #endif /* __TESTPANICS__ */
1592 kwe
->kwe_psynchretval
= updatebits
;
1594 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0xff, updatebits
, 0xee, 0);
1595 #endif /* _PSYNCH_TRACE_ */
1601 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1603 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1604 #endif /* _PSYNCH_TRACE_ */
1606 if (kwq
->kw_pre_rwwc
== 0) {
1607 preseq
= kwq
->kw_pre_lockseq
;
1608 prerw_wc
= kwq
->kw_pre_sseq
;
1609 CLEAR_PREPOST_BITS(kwq
);
1610 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
1611 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
1613 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
1614 #endif /* _PSYNCH_TRACE_ */
1616 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_READLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1619 panic("rw_rdlock: kwq_handle_unlock failed %d\n",error
);
1620 #endif /* __TESTPANICS__ */
1625 /* insert to q and proceed as ususal */
1631 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1632 #endif /* _PSYNCH_TRACE_ */
1633 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
], lgen
, uth
, kwe
, SEQFIT
);
1636 panic("psynch_rw_rdlock: failed to enqueue\n");
1637 #endif /* __TESTPANICS__ */
1638 error
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0);
1639 /* drops the kwq lock */
1644 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
1645 #endif /* _PSYNCH_TRACE_ */
1647 if (kwe
->kwe_kwqqueue
!= NULL
)
1648 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
], kwe
);
1652 *retval
= kwe
->kwe_psynchretval
;
1653 returnbits
= kwe
->kwe_psynchretval
;
1655 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
));
1657 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, returnbits
, error
, 0);
1658 #endif /* _PSYNCH_TRACE_ */
1663 * psynch_rw_longrdlock: This system call is used for psync rwlock long readers to block.
1666 psynch_rw_longrdlock(__unused proc_t p
, __unused
struct psynch_rw_longrdlock_args
* uap
, __unused
uint32_t * retval
)
1668 user_addr_t rwlock
= uap
->rwlock
;
1669 uint32_t lgen
= uap
->lgenval
;
1670 uint32_t ugen
= uap
->ugenval
;
1671 uint32_t rw_wc
= uap
->rw_wc
;
1672 //uint64_t tid = uap->tid;
1673 int flags
= uap
->flags
;
1674 int isinit
= lgen
& PTHRW_RWL_INIT
;
1675 uint32_t returnbits
=0;
1676 ksyn_waitq_element_t kwe
;
1678 ksyn_wait_queue_t kwq
;
1679 int error
=0, block
= 0 ;
1681 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0, prerw_wc
= 0;
1684 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1685 #endif /* _PSYNCH_TRACE_ */
1686 uth
= current_uthread();
1688 kwe
->kwe_lockseq
= lgen
;
1690 kwe
->kwe_psynchretval
= 0;
1691 kwe
->kwe_kwqqueue
= NULL
;
1692 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
1694 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1697 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1698 #endif /* _PSYNCH_TRACE_ */
1705 lgen
&= ~PTHRW_RWL_INIT
;
1706 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
1707 /* first to notice the reset of the lock, clear preposts */
1708 CLEAR_REINIT_BITS(kwq
);
1709 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
1711 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
1712 #endif /* _PSYNCH_TRACE_ */
1716 /* handle first the missed wakeups */
1717 if ((kwq
->kw_pre_intrcount
!= 0) &&
1718 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_LREAD
) &&
1719 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1721 kwq
->kw_pre_intrcount
--;
1722 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
1723 if (kwq
->kw_pre_intrcount
==0)
1724 CLEAR_INTR_PREPOST_BITS(kwq
);
1730 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1732 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1733 #endif /* _PSYNCH_TRACE_ */
1735 if (kwq
->kw_pre_rwwc
== 0) {
1736 preseq
= kwq
->kw_pre_lockseq
;
1737 prerw_wc
= kwq
->kw_pre_sseq
;
1738 CLEAR_PREPOST_BITS(kwq
);
1739 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
1740 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
1742 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
1743 #endif /* _PSYNCH_TRACE_ */
1745 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_LREADLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1748 panic("kwq_handle_unlock failed %d\n",error
);
1749 #endif /* __TESTPANICS__ */
1754 /* insert to q and proceed as ususal */
1759 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1760 #endif /* _PSYNCH_TRACE_ */
1761 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
], lgen
, uth
, kwe
, SEQFIT
);
1764 panic("psynch_rw_longrdlock: failed to enqueue\n");
1765 #endif /* __TESTPANICS__ */
1767 error
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0);
1768 /* drops the kwq lock */
1772 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1773 #endif /* _PSYNCH_TRACE_ */
1775 if (kwe
->kwe_kwqqueue
!= NULL
)
1776 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
], kwe
);
1780 *retval
= kwe
->kwe_psynchretval
;
1781 returnbits
= kwe
->kwe_psynchretval
;
1784 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
));
1787 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, returnbits
, error
, 0);
1788 #endif /* _PSYNCH_TRACE_ */
1793 * psynch_rw_wrlock: This system call is used for psync rwlock writers to block.
1796 psynch_rw_wrlock(__unused proc_t p
, struct psynch_rw_wrlock_args
* uap
, uint32_t * retval
)
1798 user_addr_t rwlock
= uap
->rwlock
;
1799 uint32_t lgen
= uap
->lgenval
;
1800 uint32_t ugen
= uap
->ugenval
;
1801 uint32_t rw_wc
= uap
->rw_wc
;
1802 //uint64_t tid = uap->tid;
1803 int flags
= uap
->flags
;
1805 ksyn_wait_queue_t kwq
;
1808 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0, prerw_wc
= 0;
1809 int isinit
= lgen
& PTHRW_RWL_INIT
;
1810 uint32_t returnbits
= 0;
1811 ksyn_waitq_element_t kwe
;
1814 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1815 #endif /* _PSYNCH_TRACE_ */
1816 uth
= current_uthread();
1818 kwe
->kwe_lockseq
= lgen
;
1820 kwe
->kwe_psynchretval
= 0;
1821 kwe
->kwe_kwqqueue
= NULL
;
1822 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
1824 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1827 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1828 #endif /* _PSYNCH_TRACE_ */
1836 lgen
&= ~PTHRW_RWL_INIT
;
1837 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
1838 /* first to notice the reset of the lock, clear preposts */
1839 CLEAR_REINIT_BITS(kwq
);
1840 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
1842 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
1843 #endif /* _PSYNCH_TRACE_ */
1848 /* handle first the missed wakeups */
1849 if ((kwq
->kw_pre_intrcount
!= 0) &&
1850 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_WRITE
) &&
1851 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1853 kwq
->kw_pre_intrcount
--;
1854 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
1855 if (kwq
->kw_pre_intrcount
==0)
1856 CLEAR_INTR_PREPOST_BITS(kwq
);
1862 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1864 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1865 #endif /* _PSYNCH_TRACE_ */
1867 if (kwq
->kw_pre_rwwc
== 0) {
1868 preseq
= kwq
->kw_pre_lockseq
;
1869 prerw_wc
= kwq
->kw_pre_sseq
;
1870 CLEAR_PREPOST_BITS(kwq
);
1871 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
1872 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
1874 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
1875 #endif /* _PSYNCH_TRACE_ */
1877 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_WRLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1880 panic("rw_wrlock: kwq_handle_unlock failed %d\n",error
);
1881 #endif /* __TESTPANICS__ */
1884 *retval
= updatebits
;
1887 /* insert to q and proceed as ususal */
1891 /* No overlap watch needed go ahead and block */
1894 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1895 #endif /* _PSYNCH_TRACE_ */
1896 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], lgen
, uth
, kwe
, SEQFIT
);
1899 panic("psynch_rw_wrlock: failed to enqueue\n");
1900 #endif /* __TESTPANICS__ */
1902 error
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0);
1903 /* drops the wq lock */
1908 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
1909 #endif /* _PSYNCH_TRACE_ */
1911 if (kwe
->kwe_kwqqueue
!= NULL
)
1912 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwe
);
1916 *retval
= kwe
->kwe_psynchretval
;
1917 returnbits
= kwe
->kwe_psynchretval
;
1920 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
));
1923 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, returnbits
, error
, 0);
1924 #endif /* _PSYNCH_TRACE_ */
1929 * psynch_rw_yieldwrlock: This system call is used for psync rwlock yielding writers to block.
1932 psynch_rw_yieldwrlock(__unused proc_t p
, __unused
struct psynch_rw_yieldwrlock_args
* uap
, __unused
uint32_t * retval
)
1934 user_addr_t rwlock
= uap
->rwlock
;
1935 uint32_t lgen
= uap
->lgenval
;
1936 uint32_t ugen
= uap
->ugenval
;
1937 uint32_t rw_wc
= uap
->rw_wc
;
1938 //uint64_t tid = uap->tid;
1939 int flags
= uap
->flags
;
1941 ksyn_wait_queue_t kwq
;
1943 int isinit
= lgen
& PTHRW_RWL_INIT
;
1945 uint32_t returnbits
=0;
1946 ksyn_waitq_element_t kwe
;
1949 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1950 #endif /* _PSYNCH_TRACE_ */
1951 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0, prerw_wc
= 0;
1953 uth
= current_uthread();
1955 kwe
->kwe_lockseq
= lgen
;
1957 kwe
->kwe_psynchretval
= 0;
1958 kwe
->kwe_kwqqueue
= NULL
;
1959 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
1961 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1964 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1965 #endif /* _PSYNCH_TRACE_ */
1972 lgen
&= ~PTHRW_RWL_INIT
;
1973 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
1974 /* first to notice the reset of the lock, clear preposts */
1975 CLEAR_REINIT_BITS(kwq
);
1976 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
1978 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
1979 #endif /* _PSYNCH_TRACE_ */
1983 /* handle first the missed wakeups */
1984 if ((kwq
->kw_pre_intrcount
!= 0) &&
1985 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_YWRITE
) &&
1986 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1988 kwq
->kw_pre_intrcount
--;
1989 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
1990 if (kwq
->kw_pre_intrcount
==0)
1991 CLEAR_INTR_PREPOST_BITS(kwq
);
1996 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1998 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1999 #endif /* _PSYNCH_TRACE_ */
2001 if (kwq
->kw_pre_rwwc
== 0) {
2002 preseq
= kwq
->kw_pre_lockseq
;
2003 prerw_wc
= kwq
->kw_pre_sseq
;
2004 CLEAR_PREPOST_BITS(kwq
);
2005 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
2006 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
2008 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
2009 #endif /* _PSYNCH_TRACE_ */
2011 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_YWRLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
2014 panic("kwq_handle_unlock failed %d\n",error
);
2015 #endif /* __TESTPANICS__ */
2018 *retval
= updatebits
;
2021 /* insert to q and proceed as ususal */
2026 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
2027 #endif /* _PSYNCH_TRACE_ */
2028 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
], lgen
, uth
, kwe
, SEQFIT
);
2031 panic("psynch_rw_yieldwrlock: failed to enqueue\n");
2032 #endif /* __TESTPANICS__ */
2034 error
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0);
2039 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
2040 #endif /* _PSYNCH_TRACE_ */
2042 if (kwe
->kwe_kwqqueue
!= NULL
)
2043 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
], kwe
);
2047 *retval
= kwe
->kwe_psynchretval
;
2048 returnbits
= kwe
->kwe_psynchretval
;
2051 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_RWLOCK
));
2054 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, returnbits
, error
, 0);
2055 #endif /* _PSYNCH_TRACE_ */
2061 * psynch_rw_downgrade: This system call is used for wakeup blocked readers who are eligible to run due to downgrade.
2064 psynch_rw_downgrade(__unused proc_t p
, struct psynch_rw_downgrade_args
* uap
, __unused
int * retval
)
2066 user_addr_t rwlock
= uap
->rwlock
;
2067 uint32_t lgen
= uap
->lgenval
;
2068 uint32_t ugen
= uap
->ugenval
;
2069 uint32_t rw_wc
= uap
->rw_wc
;
2070 //uint64_t tid = uap->tid;
2071 int flags
= uap
->flags
;
2073 int isinit
= lgen
& PTHRW_RWL_INIT
;
2074 ksyn_wait_queue_t kwq
;
2077 uint32_t curgen
= 0;
2080 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
2081 #endif /* _PSYNCH_TRACE_ */
2082 uth
= current_uthread();
2084 curgen
= (lgen
& PTHRW_COUNT_MASK
);
2086 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
), &kwq
);
2089 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2090 #endif /* _PSYNCH_TRACE_ */
2096 if ((lgen
& PTHRW_RWL_INIT
) != 0) {
2097 lgen
&= ~PTHRW_RWL_INIT
;
2098 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0){
2099 CLEAR_REINIT_BITS(kwq
);
2100 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
2102 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
2103 #endif /* _PSYNCH_TRACE_ */
2108 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
2109 if ((kwq
->kw_lastunlockseq
!= PTHRW_RWL_INIT
) && (is_seqlower(ugen
, kwq
->kw_lastunlockseq
)!= 0)) {
2110 /* spurious updatebits?? */
2117 /* If L-U != num of waiters, then it needs to be preposted or spr */
2118 diff
= find_diff(lgen
, ugen
);
2119 /* take count of the downgrade thread itself */
2124 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_inqueue
, curgen
, 0);
2125 #endif /* _PSYNCH_TRACE_ */
2126 if (find_seq_till(kwq
, curgen
, diff
, &count
) == 0) {
2127 if (count
< (uint32_t)diff
)
2131 /* no prepost and all threads are in place, reset the bit */
2132 if ((isinit
!= 0) && ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0)){
2133 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
2135 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
2136 #endif /* _PSYNCH_TRACE_ */
2139 /* can handle unlock now */
2141 CLEAR_PREPOST_BITS(kwq
);
2145 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
2146 #endif /* _PSYNCH_TRACE_ */
2147 error
= kwq_handle_downgrade(kwq
, lgen
, 0, 0, NULL
);
2151 panic("psynch_rw_downgrade: failed to wakeup\n");
2152 #endif /* __TESTPANICS__ */
2157 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, 0, error
, 0);
2158 #endif /* _PSYNCH_TRACE_ */
2159 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
));
2164 kwq
->kw_pre_rwwc
= (rw_wc
- count
);
2165 kwq
->kw_pre_lockseq
= lgen
;
2167 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
2168 #endif /* _PSYNCH_TRACE_ */
2175 * psynch_rw_upgrade: This system call is used by an reader to block waiting for upgrade to be granted.
2178 psynch_rw_upgrade(__unused proc_t p
, struct psynch_rw_upgrade_args
* uap
, uint32_t * retval
)
2180 user_addr_t rwlock
= uap
->rwlock
;
2181 uint32_t lgen
= uap
->lgenval
;
2182 uint32_t ugen
= uap
->ugenval
;
2183 uint32_t rw_wc
= uap
->rw_wc
;
2184 //uint64_t tid = uap->tid;
2185 int flags
= uap
->flags
;
2187 ksyn_wait_queue_t kwq
;
2190 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0;
2191 int isinit
= lgen
& PTHRW_RWL_INIT
;
2192 ksyn_waitq_element_t kwe
;
2195 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
2196 #endif /* _PSYNCH_TRACE_ */
2197 uth
= current_uthread();
2199 kwe
->kwe_lockseq
= lgen
;
2201 kwe
->kwe_psynchretval
= 0;
2202 kwe
->kwe_kwqqueue
= NULL
;
2203 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
2205 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_RWLOCK
), &kwq
);
2208 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2209 #endif /* _PSYNCH_TRACE_ */
2216 lgen
&= ~PTHRW_RWL_INIT
;
2217 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
2218 /* first to notice the reset of the lock, clear preposts */
2219 CLEAR_REINIT_BITS(kwq
);
2220 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
2222 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
2223 #endif /* _PSYNCH_TRACE_ */
2227 /* handle first the missed wakeups */
2228 if ((kwq
->kw_pre_intrcount
!= 0) &&
2229 ((kwq
->kw_pre_intrtype
== PTH_RW_TYPE_READ
) || (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_LREAD
)) &&
2230 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
2232 kwq
->kw_pre_intrcount
--;
2233 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
2234 if (kwq
->kw_pre_intrcount
==0)
2235 CLEAR_INTR_PREPOST_BITS(kwq
);
2240 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
2242 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
2243 #endif /* _PSYNCH_TRACE_ */
2245 if (kwq
->kw_pre_rwwc
== 0) {
2246 preseq
= kwq
->kw_pre_lockseq
;
2247 prerw_wc
= kwq
->kw_pre_sseq
;
2248 CLEAR_PREPOST_BITS(kwq
);
2249 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
2250 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
2252 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
2253 #endif /* _PSYNCH_TRACE_ */
2255 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_UPGRADE
|KW_UNLOCK_PREPOST
), &block
, lgen
);
2258 panic("rw_rdlock: kwq_handle_unlock failed %d\n",error
);
2259 #endif /* __TESTPANICS__ */
2264 /* insert to q and proceed as ususal */
2270 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
2271 #endif /* _PSYNCH_TRACE_ */
2272 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
], lgen
, uth
, kwe
, SEQFIT
);
2275 panic("psynch_rw_upgrade: failed to enqueue\n");
2276 #endif /* __TESTPANICS__ */
2279 error
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0);
2280 /* drops the lock */
2285 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
2286 #endif /* _PSYNCH_TRACE_ */
2288 if (kwe
->kwe_kwqqueue
!= NULL
)
2289 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
], kwe
);
2293 *retval
= kwe
->kwe_psynchretval
;
2296 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_RWLOCK
));
2298 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2299 #endif /* _PSYNCH_TRACE_ */
2306 psynch_rw_upgrade(__unused proc_t p
, __unused
struct psynch_rw_upgrade_args
* uap
, __unused
uint32_t * retval
)
2311 psynch_rw_downgrade(__unused proc_t p
, __unused
struct psynch_rw_downgrade_args
* uap
, __unused
int * retval
)
2317 * psynch_rw_unlock: This system call is used for unlock state postings. This will grant appropriate
2318 * reader/writer variety lock.
2322 psynch_rw_unlock(__unused proc_t p
, struct psynch_rw_unlock_args
* uap
, uint32_t * retval
)
2324 user_addr_t rwlock
= uap
->rwlock
;
2325 uint32_t lgen
= uap
->lgenval
;
2326 uint32_t ugen
= uap
->ugenval
;
2327 uint32_t rw_wc
= uap
->rw_wc
;
2329 //uint64_t tid = uap->tid;
2330 int flags
= uap
->flags
;
2332 ksyn_wait_queue_t kwq
;
2333 uint32_t updatebits
= 0;
2340 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
2341 #endif /* _PSYNCH_TRACE_ */
2342 uth
= current_uthread();
2344 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
), &kwq
);
2347 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2348 #endif /* _PSYNCH_TRACE_ */
2352 curgen
= lgen
& PTHRW_COUNT_MASK
;
2356 if ((lgen
& PTHRW_RWL_INIT
) != 0) {
2357 lgen
&= ~PTHRW_RWL_INIT
;
2358 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0){
2359 CLEAR_REINIT_BITS(kwq
);
2360 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
2362 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
2363 #endif /* _PSYNCH_TRACE_ */
2368 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
2369 if ((kwq
->kw_lastunlockseq
!= PTHRW_RWL_INIT
) && (is_seqlower(ugen
, kwq
->kw_lastunlockseq
)!= 0)) {
2371 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, (uint32_t)0xeeeeeeee, rw_wc
, kwq
->kw_lastunlockseq
, 0);
2372 #endif /* _PSYNCH_TRACE_ */
2377 /* If L-U != num of waiters, then it needs to be preposted or spr */
2378 diff
= find_diff(lgen
, ugen
);
2381 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_inqueue
, curgen
, 0);
2382 #endif /* _PSYNCH_TRACE_ */
2383 if (find_seq_till(kwq
, curgen
, diff
, &count
) == 0) {
2384 if ((count
== 0) || (count
< (uint32_t)diff
))
2388 /* no prepost and all threads are in place, reset the bit */
2389 if ((isinit
!= 0) && ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0)){
2390 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
2392 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
2393 #endif /* _PSYNCH_TRACE_ */
2396 /* can handle unlock now */
2398 CLEAR_PREPOST_BITS(kwq
);
2401 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, 0, 0, 0);
2402 #endif /* _PSYNCH_TRACE_ */
2403 error
= kwq_handle_unlock(kwq
, lgen
, rw_wc
, &updatebits
, 0, NULL
, 0);
2406 panic("psynch_rw_unlock: kwq_handle_unlock failed %d\n",error
);
2407 #endif /* __TESTPANICS__ */
2411 *retval
= updatebits
;
2417 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
));
2419 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, updatebits
, error
, 0);
2420 #endif /* _PSYNCH_TRACE_ */
2425 /* update if the new seq is higher than prev prepost, or first set */
2426 if ((is_rws_setseq(kwq
->kw_pre_sseq
) != 0) ||
2427 (is_seqhigher_eq((rw_wc
& PTHRW_COUNT_MASK
), (kwq
->kw_pre_sseq
& PTHRW_COUNT_MASK
)) != 0)) {
2428 kwq
->kw_pre_rwwc
= (diff
- count
);
2429 kwq
->kw_pre_lockseq
= curgen
;
2430 kwq
->kw_pre_sseq
= rw_wc
;
2432 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, rw_wc
, count
, 0);
2433 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
2434 #endif /* _PSYNCH_TRACE_ */
2435 updatebits
= lgen
; /* let this not do unlock handling */
2443 * psynch_rw_unlock2: This system call is used to wakeup pending readers when unlock grant frm kernel
2444 * to new reader arrival races
2447 psynch_rw_unlock2(__unused proc_t p
, __unused
struct psynch_rw_unlock2_args
* uap
, __unused
uint32_t * retval
)
2453 /* ************************************************************************** */
2455 pth_global_hashinit()
2459 pth_glob_hashtbl
= hashinit(PTH_HASHSIZE
* 4, M_PROC
, &pthhash
);
2462 * pthtest={0,1,2,3} (override default aborting behavior on pthread sync failures)
2463 * 0 - just return errors
2464 * 1 - print and return errors
2465 * 2 - abort user, print and return errors
2468 if (!PE_parse_boot_argn("pthtest", &arg
, sizeof(arg
)))
2472 __test_panics__
= 1;
2473 printf("Pthread support PANICS when sync kernel primitives misused\n");
2474 } else if (arg
== 2) {
2475 __test_aborts__
= 1;
2476 __test_prints__
= 1;
2477 printf("Pthread support ABORTS when sync kernel primitives misused\n");
2478 } else if (arg
== 1) {
2479 __test_prints__
= 1;
2480 printf("Pthread support LOGS when sync kernel primitives misused\n");
2485 pth_proc_hashinit(proc_t p
)
2487 p
->p_pthhash
= hashinit(PTH_HASHSIZE
, M_PROC
, &pthhash
);
2488 if (p
->p_pthhash
== NULL
)
2489 panic("pth_proc_hashinit: hash init returned 0\n");
2494 ksyn_wq_hash_lookup(user_addr_t mutex
, proc_t p
, int flags
, uint64_t object
, uint64_t objoffset
)
2496 ksyn_wait_queue_t kwq
;
2497 struct pthhashhead
* hashptr
;
2499 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
)
2501 hashptr
= pth_glob_hashtbl
;
2502 kwq
= (&hashptr
[object
& pthhash
])->lh_first
;
2504 for (; kwq
!= NULL
; kwq
= kwq
->kw_hash
.le_next
) {
2505 if ((kwq
->kw_object
== object
) &&(kwq
->kw_offset
== objoffset
)) {
2511 hashptr
= p
->p_pthhash
;
2512 kwq
= (&hashptr
[mutex
& pthhash
])->lh_first
;
2514 for (; kwq
!= NULL
; kwq
= kwq
->kw_hash
.le_next
) {
2515 if (kwq
->kw_addr
== mutex
) {
2524 pth_proc_hashdelete(proc_t p
)
2526 struct pthhashhead
* hashptr
;
2527 ksyn_wait_queue_t kwq
;
2528 int hashsize
= pthhash
+ 1;
2532 if ((pthread_debug_proc
!= NULL
) && (p
== pthread_debug_proc
))
2533 pthread_debug_proc
= PROC_NULL
;
2534 #endif /* _PSYNCH_TRACE_ */
2535 hashptr
= p
->p_pthhash
;
2536 if (hashptr
== NULL
)
2539 for(i
= 0; i
< hashsize
; i
++) {
2540 while ((kwq
= LIST_FIRST(&hashptr
[i
])) != NULL
) {
2541 pthread_list_lock();
2542 if ((kwq
->kw_pflags
& KSYN_WQ_INHASH
) != 0) {
2543 kwq
->kw_pflags
&= ~KSYN_WQ_INHASH
;
2544 LIST_REMOVE(kwq
, kw_hash
);
2546 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
2547 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
2548 LIST_REMOVE(kwq
, kw_list
);
2552 pthread_list_unlock();
2553 /* release fake entries if present for cvars */
2554 if (((kwq
->kw_type
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_CVAR
) && (kwq
->kw_inqueue
!= 0))
2555 ksyn_freeallkwe(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
]);
2556 lck_mtx_destroy(&kwq
->kw_lock
, pthread_lck_grp
);
2557 zfree(kwq_zone
, kwq
);
2560 FREE(p
->p_pthhash
, M_PROC
);
2561 p
->p_pthhash
= NULL
;
2564 /* no lock held for this as the waitqueue is getting freed */
2566 ksyn_freeallkwe(ksyn_queue_t kq
)
2568 ksyn_waitq_element_t kwe
;
2570 /* free all the fake entries, dequeue rest */
2571 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
2572 while (kwe
!= NULL
) {
2573 if (kwe
->kwe_flags
!= KWE_THREAD_INWAIT
) {
2574 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
2575 zfree(kwe_zone
, kwe
);
2577 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
2579 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
2583 /* find kernel waitqueue, if not present create one. Grants a reference */
2585 ksyn_wqfind(user_addr_t mutex
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, uint64_t tid
, int flags
, int wqtype
, ksyn_wait_queue_t
* kwqp
)
2587 ksyn_wait_queue_t kwq
;
2588 ksyn_wait_queue_t nkwq
;
2589 struct pthhashhead
* hashptr
;
2590 uint64_t object
= 0, offset
= 0;
2592 proc_t p
= current_proc();
2593 int retry
= mgen
& PTH_RWL_RETRYBIT
;
2594 struct ksyn_queue kfreeq
;
2597 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
)
2599 (void)ksyn_findobj(mutex
, &object
, &offset
);
2601 hashptr
= pth_glob_hashtbl
;
2603 hashptr
= p
->p_pthhash
;
2606 ksyn_queue_init(&kfreeq
);
2608 if (((wqtype
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_MTX
) && (retry
!= 0))
2609 mgen
&= ~PTH_RWL_RETRYBIT
;
2612 //pthread_list_lock_spin();
2613 pthread_list_lock();
2615 kwq
= ksyn_wq_hash_lookup(mutex
, p
, flags
, object
, offset
);
2618 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
2619 LIST_REMOVE(kwq
, kw_list
);
2620 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
2624 if ((kwq
->kw_type
& KSYN_WQTYPE_MASK
) != (wqtype
&KSYN_WQTYPE_MASK
)) {
2625 if ((kwq
->kw_inqueue
== 0) && (kwq
->kw_pre_rwwc
==0) && (kwq
->kw_pre_intrcount
== 0)) {
2626 if (kwq
->kw_iocount
== 0) {
2627 kwq
->kw_addr
= mutex
;
2628 kwq
->kw_flags
= flags
;
2629 kwq
->kw_object
= object
;
2630 kwq
->kw_offset
= offset
;
2631 kwq
->kw_type
= (wqtype
& KSYN_WQTYPE_MASK
);
2632 CLEAR_REINIT_BITS(kwq
);
2633 CLEAR_INTR_PREPOST_BITS(kwq
);
2634 CLEAR_PREPOST_BITS(kwq
);
2635 kwq
->kw_lword
= mgen
;
2636 kwq
->kw_uword
= ugen
;
2637 kwq
->kw_sword
= rw_wc
;
2638 kwq
->kw_owner
= tid
;
2639 } else if ((kwq
->kw_iocount
== 1) && (kwq
->kw_dropcount
== kwq
->kw_iocount
)) {
2640 /* if all users are unlockers then wait for it to finish */
2641 kwq
->kw_pflags
|= KSYN_WQ_WAITING
;
2642 /* wait for the wq to be free */
2643 (void)msleep(&kwq
->kw_pflags
, pthread_list_mlock
, PDROP
, "ksyn_wqfind", 0);
2644 /* does not have list lock */
2647 __FAILEDUSERTEST__("address already known to kernel for another (busy) synchronizer type\n");
2648 pthread_list_unlock();
2652 __FAILEDUSERTEST__("address already known to kernel for another (busy) synchronizer type(1)\n");
2653 pthread_list_unlock();
2658 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
)
2659 kwq
->kw_dropcount
++;
2662 pthread_list_unlock();
2666 pthread_list_unlock();
2668 nkwq
= (ksyn_wait_queue_t
)zalloc(kwq_zone
);
2669 bzero(nkwq
, sizeof(struct ksyn_wait_queue
));
2670 nkwq
->kw_addr
= mutex
;
2671 nkwq
->kw_flags
= flags
;
2672 nkwq
->kw_iocount
= 1;
2673 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
)
2674 nkwq
->kw_dropcount
++;
2675 nkwq
->kw_object
= object
;
2676 nkwq
->kw_offset
= offset
;
2677 nkwq
->kw_type
= (wqtype
& KSYN_WQTYPE_MASK
);
2678 nkwq
->kw_lastseqword
= PTHRW_RWS_INIT
;
2679 if (nkwq
->kw_type
== KSYN_WQTYPE_RWLOCK
)
2680 nkwq
->kw_nextseqword
= PTHRW_RWS_INIT
;
2682 nkwq
->kw_pre_sseq
= PTHRW_RWS_INIT
;
2684 CLEAR_PREPOST_BITS(nkwq
);
2685 CLEAR_INTR_PREPOST_BITS(nkwq
);
2686 CLEAR_REINIT_BITS(nkwq
);
2687 nkwq
->kw_lword
= mgen
;
2688 nkwq
->kw_uword
= ugen
;
2689 nkwq
->kw_sword
= rw_wc
;
2690 nkwq
->kw_owner
= tid
;
2693 for (i
=0; i
< KSYN_QUEUE_MAX
; i
++)
2694 ksyn_queue_init(&nkwq
->kw_ksynqueues
[i
]);
2696 lck_mtx_init(&nkwq
->kw_lock
, pthread_lck_grp
, pthread_lck_attr
);
2698 //pthread_list_lock_spin();
2699 pthread_list_lock();
2700 /* see whether it is alread allocated */
2701 kwq
= ksyn_wq_hash_lookup(mutex
, p
, flags
, object
, offset
);
2704 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
2705 LIST_REMOVE(kwq
, kw_list
);
2706 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
2710 if ((kwq
->kw_type
& KSYN_WQTYPE_MASK
) != (wqtype
&KSYN_WQTYPE_MASK
)) {
2711 if ((kwq
->kw_inqueue
== 0) && (kwq
->kw_pre_rwwc
==0) && (kwq
->kw_pre_intrcount
== 0)) {
2712 if (kwq
->kw_iocount
== 0) {
2713 kwq
->kw_addr
= mutex
;
2714 kwq
->kw_flags
= flags
;
2715 kwq
->kw_object
= object
;
2716 kwq
->kw_offset
= offset
;
2717 kwq
->kw_type
= (wqtype
& KSYN_WQTYPE_MASK
);
2718 CLEAR_REINIT_BITS(kwq
);
2719 CLEAR_INTR_PREPOST_BITS(kwq
);
2720 CLEAR_PREPOST_BITS(kwq
);
2721 kwq
->kw_lword
= mgen
;
2722 kwq
->kw_uword
= ugen
;
2723 kwq
->kw_sword
= rw_wc
;
2724 kwq
->kw_owner
= tid
;
2725 } else if ((kwq
->kw_iocount
== 1) && (kwq
->kw_dropcount
== kwq
->kw_iocount
)) {
2726 kwq
->kw_pflags
|= KSYN_WQ_WAITING
;
2727 /* wait for the wq to be free */
2728 (void)msleep(&kwq
->kw_pflags
, pthread_list_mlock
, PDROP
, "ksyn_wqfind", 0);
2730 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2731 zfree(kwq_zone
, nkwq
);
2732 /* will acquire lock again */
2736 __FAILEDUSERTEST__("address already known to kernel for another [busy] synchronizer type(2)\n");
2737 pthread_list_unlock();
2738 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2739 zfree(kwq_zone
, nkwq
);
2743 __FAILEDUSERTEST__("address already known to kernel for another [busy] synchronizer type(3)\n");
2744 pthread_list_unlock();
2745 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2746 zfree(kwq_zone
, nkwq
);
2751 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
)
2752 kwq
->kw_dropcount
++;
2755 pthread_list_unlock();
2756 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2757 zfree(kwq_zone
, nkwq
);
2763 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, kwq
->kw_lword
, kwq
->kw_uword
, kwq
->kw_sword
, 0xffff, 0);
2764 #endif /* _PSYNCH_TRACE_ */
2765 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
)
2767 kwq
->kw_pflags
|= KSYN_WQ_SHARED
;
2768 LIST_INSERT_HEAD(&hashptr
[kwq
->kw_object
& pthhash
], kwq
, kw_hash
);
2770 LIST_INSERT_HEAD(&hashptr
[mutex
& pthhash
], kwq
, kw_hash
);
2772 kwq
->kw_pflags
|= KSYN_WQ_INHASH
;
2775 pthread_list_unlock();
2782 /* Reference from find is dropped here. Starts the free process if needed */
2784 ksyn_wqrelease(ksyn_wait_queue_t kwq
, ksyn_wait_queue_t ckwq
, int qfreenow
, int wqtype
)
2789 ksyn_wait_queue_t free_elem
= NULL
;
2790 ksyn_wait_queue_t free_elem1
= NULL
;
2792 //pthread_list_lock_spin();
2793 pthread_list_lock();
2795 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
) {
2796 kwq
->kw_dropcount
--;
2798 if (kwq
->kw_iocount
== 0) {
2799 if ((kwq
->kw_pflags
& KSYN_WQ_WAITING
) != 0) {
2800 /* some one is waiting for the waitqueue, wake them up */
2801 kwq
->kw_pflags
&= ~KSYN_WQ_WAITING
;
2802 wakeup(&kwq
->kw_pflags
);
2805 if ((kwq
->kw_pre_rwwc
== 0) && (kwq
->kw_inqueue
== 0) && (kwq
->kw_pre_intrcount
== 0)) {
2806 if (qfreenow
== 0) {
2807 microuptime(&kwq
->kw_ts
);
2808 LIST_INSERT_HEAD(&pth_free_list
, kwq
, kw_list
);
2809 kwq
->kw_pflags
|= KSYN_WQ_FLIST
;
2813 /* remove from the only list it is in ie hash */
2814 kwq
->kw_pflags
&= ~(KSYN_WQ_FLIST
| KSYN_WQ_INHASH
);
2815 LIST_REMOVE(kwq
, kw_hash
);
2816 lck_mtx_destroy(&kwq
->kw_lock
, pthread_lck_grp
);
2829 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
) {
2830 kwq
->kw_dropcount
--;
2832 if ( ckwq
->kw_iocount
== 0) {
2833 if ((kwq
->kw_pflags
& KSYN_WQ_WAITING
) != 0) {
2834 /* some one is waiting for the waitqueue, wake them up */
2835 kwq
->kw_pflags
&= ~KSYN_WQ_WAITING
;
2836 wakeup(&kwq
->kw_pflags
);
2838 if ((ckwq
->kw_pre_rwwc
== 0) && (ckwq
->kw_inqueue
== 0) && (ckwq
->kw_pre_intrcount
== 0)) {
2839 if (qfreenow
== 0) {
2840 /* mark for free if we can */
2841 microuptime(&ckwq
->kw_ts
);
2842 LIST_INSERT_HEAD(&pth_free_list
, ckwq
, kw_list
);
2843 ckwq
->kw_pflags
|= KSYN_WQ_FLIST
;
2847 /* remove from the only list it is in ie hash */
2848 ckwq
->kw_pflags
&= ~(KSYN_WQ_FLIST
| KSYN_WQ_INHASH
);
2849 LIST_REMOVE(ckwq
, kw_hash
);
2850 lck_mtx_destroy(&ckwq
->kw_lock
, pthread_lck_grp
);
2862 if (sched
== 1 && psynch_cleanupset
== 0) {
2863 psynch_cleanupset
= 1;
2865 t
.tv_sec
+= KSYN_CLEANUP_DEADLINE
;
2867 deadline
= tvtoabstime(&t
);
2868 thread_call_enter_delayed(psynch_thcall
, deadline
);
2870 pthread_list_unlock();
2871 if (free_elem
!= NULL
)
2872 zfree(kwq_zone
, free_elem
);
2873 if (free_elem1
!= NULL
)
2874 zfree(kwq_zone
, free_elem1
);
2877 /* responsible to free the waitqueues */
2879 psynch_wq_cleanup(__unused
void * param
, __unused
void * param1
)
2881 ksyn_wait_queue_t kwq
;
2883 LIST_HEAD(, ksyn_wait_queue
) freelist
= {NULL
};
2884 int count
= 0, delayed
= 0, diff
;
2885 uint64_t deadline
= 0;
2887 //pthread_list_lock_spin();
2888 pthread_list_lock();
2890 num_addedfreekwq
= num_infreekwq
- num_lastfreekwqcount
;
2891 num_lastfreekwqcount
= num_infreekwq
;
2894 LIST_FOREACH(kwq
, &pth_free_list
, kw_list
) {
2895 if ((kwq
->kw_iocount
!= 0) || (kwq
->kw_pre_rwwc
!= 0) || (kwq
->kw_inqueue
!= 0) || (kwq
->kw_pre_intrcount
!= 0)) {
2899 diff
= t
.tv_sec
- kwq
->kw_ts
.tv_sec
;
2902 if (diff
>= KSYN_CLEANUP_DEADLINE
) {
2904 kwq
->kw_pflags
&= ~(KSYN_WQ_FLIST
| KSYN_WQ_INHASH
);
2907 LIST_REMOVE(kwq
, kw_hash
);
2908 LIST_REMOVE(kwq
, kw_list
);
2909 LIST_INSERT_HEAD(&freelist
, kwq
, kw_list
);
2918 t
.tv_sec
+= KSYN_CLEANUP_DEADLINE
;
2920 deadline
= tvtoabstime(&t
);
2921 thread_call_enter_delayed(psynch_thcall
, deadline
);
2922 psynch_cleanupset
= 1;
2924 psynch_cleanupset
= 0;
2926 pthread_list_unlock();
2929 while ((kwq
= LIST_FIRST(&freelist
)) != NULL
) {
2930 LIST_REMOVE(kwq
, kw_list
);
2931 lck_mtx_destroy(&kwq
->kw_lock
, pthread_lck_grp
);
2932 zfree(kwq_zone
, kwq
);
2938 ksyn_block_thread_locked(ksyn_wait_queue_t kwq
, uint64_t abstime
, ksyn_waitq_element_t kwe
, int mylog
)
2943 uthread_t uth
= NULL
;
2944 #endif /* _PSYNCH_TRACE_ */
2946 kwe
->kwe_kwqqueue
= (void *)kwq
;
2947 assert_wait_deadline(&kwe
->kwe_psynchretval
, THREAD_ABORTSAFE
, abstime
);
2950 kret
= thread_block(NULL
);
2952 case THREAD_TIMED_OUT
:
2955 case THREAD_INTERRUPTED
:
2960 uth
= current_uthread();
2961 #if defined(__i386__)
2963 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP
| DBG_FUNC_NONE
, 0xf4f3f2f1, (uint32_t)uth
, kret
, 0, 0);
2966 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP
| DBG_FUNC_NONE
, 0xeeeeeeee, kret
, error
, 0xeeeeeeee, 0);
2968 #endif /* _PSYNCH_TRACE_ */
2974 ksyn_wakeup_thread(__unused ksyn_wait_queue_t kwq
, ksyn_waitq_element_t kwe
)
2978 uthread_t uth
= NULL
;
2979 #endif /* _PSYNCH_TRACE_ */
2981 kret
= thread_wakeup_one((caddr_t
)&kwe
->kwe_psynchretval
);
2983 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
2984 panic("ksyn_wakeup_thread: panic waking up thread %x\n", kret
);
2987 #if defined(__i386__)
2988 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP
| DBG_FUNC_NONE
, 0xf1f2f3f4, (uint32_t)uth
, kret
, 0, 0);
2990 #endif /* _PSYNCH_TRACE_ */
2995 /* find the true shared obect/offset for shared mutexes */
2997 ksyn_findobj(uint64_t mutex
, uint64_t * objectp
, uint64_t * offsetp
)
2999 vm_page_info_basic_data_t info
;
3001 mach_msg_type_number_t count
= VM_PAGE_INFO_BASIC_COUNT
;
3003 kret
= vm_map_page_info(current_map(), mutex
, VM_PAGE_INFO_BASIC
,
3004 (vm_page_info_t
)&info
, &count
);
3006 if (kret
!= KERN_SUCCESS
)
3009 if (objectp
!= NULL
)
3010 *objectp
= (uint64_t)info
.object_id
;
3011 if (offsetp
!= NULL
)
3012 *offsetp
= (uint64_t)info
.offset
;
3018 /* lowest of kw_fr, kw_flr, kw_fwr, kw_fywr */
3020 kwq_find_rw_lowest(ksyn_wait_queue_t kwq
, int flags
, uint32_t premgen
, int * typep
, uint32_t lowest
[])
3023 uint32_t kw_fr
, kw_flr
, kw_fwr
, kw_fywr
, low
;
3024 int type
= 0, lowtype
, typenum
[4];
3025 uint32_t numbers
[4];
3029 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0)) {
3030 type
|= PTH_RWSHFT_TYPE_READ
;
3031 /* read entries are present */
3032 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
!= 0) {
3033 kw_fr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_firstnum
;
3034 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, kw_fr
) != 0))
3039 lowest
[KSYN_QUEUE_READ
] = kw_fr
;
3040 numbers
[count
]= kw_fr
;
3041 typenum
[count
] = PTH_RW_TYPE_READ
;
3044 lowest
[KSYN_QUEUE_READ
] = 0;
3046 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0)) {
3047 type
|= PTH_RWSHFT_TYPE_LREAD
;
3048 /* read entries are present */
3049 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
!= 0) {
3050 kw_flr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
;
3051 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) && (is_seqlower(premgen
, kw_flr
) != 0))
3056 lowest
[KSYN_QUEUE_LREAD
] = kw_flr
;
3057 numbers
[count
]= kw_flr
;
3058 typenum
[count
] = PTH_RW_TYPE_LREAD
;
3061 lowest
[KSYN_QUEUE_LREAD
] = 0;
3064 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0)) {
3065 type
|= PTH_RWSHFT_TYPE_WRITE
;
3066 /* read entries are present */
3067 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) {
3068 kw_fwr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_firstnum
;
3069 if (((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) && (is_seqlower(premgen
, kw_fwr
) != 0))
3074 lowest
[KSYN_QUEUE_WRITER
] = kw_fwr
;
3075 numbers
[count
]= kw_fwr
;
3076 typenum
[count
] = PTH_RW_TYPE_WRITE
;
3079 lowest
[KSYN_QUEUE_WRITER
] = 0;
3081 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0)) {
3082 type
|= PTH_RWSHFT_TYPE_YWRITE
;
3083 /* read entries are present */
3084 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0) {
3085 kw_fywr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_firstnum
;
3086 if (((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0) && (is_seqlower(premgen
, kw_fywr
) != 0))
3091 lowest
[KSYN_QUEUE_YWRITER
] = kw_fywr
;
3092 numbers
[count
]= kw_fywr
;
3093 typenum
[count
] = PTH_RW_TYPE_YWRITE
;
3096 lowest
[KSYN_QUEUE_YWRITER
] = 0;
3101 panic("nothing in the queue???\n");
3102 #endif /* __TESTPANICS__ */
3105 lowtype
= typenum
[0];
3107 for (i
= 1; i
< count
; i
++) {
3108 if(is_seqlower(numbers
[i
] , low
) != 0) {
3110 lowtype
= typenum
[i
];
3121 /* wakeup readers and longreaders to upto the writer limits */
3123 ksyn_wakeupreaders(ksyn_wait_queue_t kwq
, uint32_t limitread
, int longreadset
, int allreaders
, uint32_t updatebits
, int * wokenp
)
3125 ksyn_waitq_element_t kwe
= NULL
;
3127 int failedwakeup
= 0;
3129 kern_return_t kret
= KERN_SUCCESS
;
3133 if (longreadset
!= 0) {
3134 /* clear all read and longreads */
3135 while ((kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
], kwq
)) != NULL
) {
3136 kwe
->kwe_psynchretval
= lbits
;
3137 kwe
->kwe_kwqqueue
= NULL
;
3140 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3142 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3143 panic("ksyn_wakeupreaders: panic waking up readers\n");
3144 #endif /* __TESTPANICS__ */
3145 if (kret
== KERN_NOT_WAITING
) {
3149 while ((kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
], kwq
)) != NULL
) {
3150 kwe
->kwe_psynchretval
= lbits
;
3151 kwe
->kwe_kwqqueue
= NULL
;
3153 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3155 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3156 panic("ksyn_wakeupreaders: panic waking up lreaders\n");
3157 #endif /* __TESTPANICS__ */
3158 if (kret
== KERN_NOT_WAITING
) {
3163 kq
= &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
];
3164 while ((kq
->ksynq_count
!= 0) && (allreaders
|| (is_seqlower(kq
->ksynq_firstnum
, limitread
) != 0))) {
3165 kwe
= ksyn_queue_removefirst(kq
, kwq
);
3166 kwe
->kwe_psynchretval
= lbits
;
3167 kwe
->kwe_kwqqueue
= NULL
;
3169 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3171 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3172 panic("ksyn_wakeupreaders: panic waking up readers\n");
3173 #endif /* __TESTPANICS__ */
3174 if (kret
== KERN_NOT_WAITING
) {
3182 return(failedwakeup
);
3186 /* This handles the unlock grants for next set on rw_unlock() or on arrival of all preposted waiters */
3188 kwq_handle_unlock(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t rw_wc
, uint32_t * updatep
, int flags
, int * blockp
, uint32_t premgen
)
3190 uint32_t low_reader
, low_writer
, low_ywriter
, low_lreader
,limitrdnum
;
3191 int rwtype
, error
=0;
3192 int longreadset
= 0, allreaders
, failed
;
3193 uint32_t updatebits
=0, numneeded
= 0;;
3194 int prepost
= flags
& KW_UNLOCK_PREPOST
;
3195 thread_t preth
= THREAD_NULL
;
3196 ksyn_waitq_element_t kwe
;
3201 uint32_t lowest
[KSYN_QUEUE_MAX
]; /* np need for upgrade as it is handled separately */
3202 kern_return_t kret
= KERN_SUCCESS
;
3204 int curthreturns
= 0;
3207 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_START
, (uint32_t)kwq
->kw_addr
, mgen
, premgen
, rw_wc
, 0);
3208 #endif /* _PSYNCH_TRACE_ */
3210 preth
= current_thread();
3213 kq
= &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
];
3214 kwq
->kw_lastseqword
= rw_wc
;
3215 kwq
->kw_lastunlockseq
= (rw_wc
& PTHRW_COUNT_MASK
);
3216 kwq
->kw_overlapwatch
= 0;
3218 /* upgrade pending */
3219 if (is_rw_ubit_set(mgen
)) {
3221 panic("NO UBIT SHOULD BE SET\n");
3222 updatebits
= PTH_RWL_EBIT
| PTH_RWL_KBIT
;
3223 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0)
3224 updatebits
|= PTH_RWL_WBIT
;
3225 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0)
3226 updatebits
|= PTH_RWL_YBIT
;
3228 if((flags
& KW_UNLOCK_PREPOST_UPGRADE
) != 0) {
3229 /* upgrade thread calling the prepost */
3230 /* upgrade granted */
3236 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
].ksynq_count
> 0) {
3237 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
], kwq
);
3239 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3240 kwe
->kwe_psynchretval
= updatebits
;
3241 kwe
->kwe_kwqqueue
= NULL
;
3242 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3243 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3244 panic("kwq_handle_unlock: panic waking up the upgrade thread \n");
3245 if (kret
== KERN_NOT_WAITING
) {
3246 kwq
->kw_pre_intrcount
= 1; /* actually a count */
3247 kwq
->kw_pre_intrseq
= mgen
;
3248 kwq
->kw_pre_intrretbits
= kwe
->kwe_psynchretval
;
3249 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_UPGRADE
;
3253 panic("panic unable to find the upgrade thread\n");
3255 #endif /* __TESTPANICS__ */
3260 error
= kwq_find_rw_lowest(kwq
, flags
, premgen
, &rwtype
, lowest
);
3263 panic("rwunlock: cannot fails to slot next round of threads");
3264 #endif /* __TESTPANICS__ */
3267 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 1, rwtype
, 0, 0);
3268 #endif /* _PSYNCH_TRACE_ */
3269 low_reader
= lowest
[KSYN_QUEUE_READ
];
3270 low_lreader
= lowest
[KSYN_QUEUE_LREAD
];
3271 low_writer
= lowest
[KSYN_QUEUE_WRITER
];
3272 low_ywriter
= lowest
[KSYN_QUEUE_YWRITER
];
3280 switch (rwtype
& PTH_RW_TYPE_MASK
) {
3281 case PTH_RW_TYPE_LREAD
:
3284 case PTH_RW_TYPE_READ
: {
3285 /* what about the preflight which is LREAD or READ ?? */
3286 if ((rwtype
& PTH_RWSHFT_TYPE_MASK
) != 0) {
3287 if (rwtype
& PTH_RWSHFT_TYPE_WRITE
)
3288 updatebits
|= (PTH_RWL_WBIT
| PTH_RWL_KBIT
);
3289 if (rwtype
& PTH_RWSHFT_TYPE_YWRITE
)
3290 updatebits
|= PTH_RWL_YBIT
;
3293 if (longreadset
== 0) {
3294 switch (rwtype
& (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
)) {
3295 case PTH_RWSHFT_TYPE_WRITE
:
3296 limitrdnum
= low_writer
;
3297 if (((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0) &&
3298 (is_seqlower(low_lreader
, limitrdnum
) != 0)) {
3301 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) &&
3302 (is_seqlower(premgen
, limitrdnum
) != 0)) {
3306 case PTH_RWSHFT_TYPE_YWRITE
:
3308 if (((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0) &&
3309 (is_seqlower(low_lreader
, low_ywriter
) != 0)) {
3313 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) &&
3314 (is_seqlower(premgen
, low_ywriter
) != 0)) {
3321 case (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
):
3322 if (is_seqlower(low_ywriter
, low_writer
) != 0) {
3323 limitrdnum
= low_ywriter
;
3325 limitrdnum
= low_writer
;
3326 if (((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0) &&
3327 (is_seqlower(low_lreader
, limitrdnum
) != 0)) {
3330 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) &&
3331 (is_seqlower(premgen
, limitrdnum
) != 0)) {
3335 default: /* no writers at all */
3336 if ((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0)
3344 if (longreadset
!= 0) {
3345 updatebits
|= PTH_RWL_LBIT
;
3346 updatebits
&= ~PTH_RWL_KBIT
;
3347 if ((flags
& (KW_UNLOCK_PREPOST_READLOCK
| KW_UNLOCK_PREPOST_LREADLOCK
)) != 0)
3349 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
3350 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
;
3351 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
3352 kwq
->kw_overlapwatch
= 1;
3354 /* no longread, evaluate number of readers */
3356 switch (rwtype
& (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
)) {
3357 case PTH_RWSHFT_TYPE_WRITE
:
3358 limitrdnum
= low_writer
;
3359 numneeded
= ksyn_queue_count_tolowest(kq
, limitrdnum
);
3360 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, limitrdnum
) != 0)) {
3365 case PTH_RWSHFT_TYPE_YWRITE
:
3367 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
3368 if ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) {
3373 case (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
):
3374 limitrdnum
= low_writer
;
3375 numneeded
= ksyn_queue_count_tolowest(kq
, limitrdnum
);
3376 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, limitrdnum
) != 0)) {
3381 default: /* no writers at all */
3382 /* no other waiters only readers */
3383 kwq
->kw_overlapwatch
= 1;
3384 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
3385 if ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) {
3391 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
3393 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3395 if (curthreturns
!= 0) {
3397 uth
= current_uthread();
3399 kwe
->kwe_psynchretval
= updatebits
;
3403 failed
= ksyn_wakeupreaders(kwq
, limitrdnum
, longreadset
, allreaders
, updatebits
, &woken
);
3405 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 2, woken
, failed
, 0);
3406 #endif /* _PSYNCH_TRACE_ */
3409 kwq
->kw_pre_intrcount
= failed
; /* actually a count */
3410 kwq
->kw_pre_intrseq
= limitrdnum
;
3411 kwq
->kw_pre_intrretbits
= updatebits
;
3413 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_LREAD
;
3415 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_READ
;
3420 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) && ((updatebits
& PTH_RWL_WBIT
) == 0))
3421 panic("kwq_handle_unlock: writer pending but no writebit set %x\n", updatebits
);
3425 case PTH_RW_TYPE_WRITE
: {
3427 /* only one thread is goin to be granted */
3428 updatebits
|= (PTHRW_INC
);
3429 updatebits
|= PTH_RWL_KBIT
| PTH_RWL_EBIT
;
3431 if (((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) && (low_writer
== premgen
)) {
3433 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0)
3434 updatebits
|= PTH_RWL_WBIT
;
3435 if ((rwtype
& PTH_RWSHFT_TYPE_YWRITE
) != 0)
3436 updatebits
|= PTH_RWL_YBIT
;
3438 uth
= get_bsdthread_info(th
);
3440 kwe
->kwe_psynchretval
= updatebits
;
3442 /* we are not granting writelock to the preposting thread */
3443 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwq
);
3445 /* if there are writers present or the preposting write thread then W bit is to be set */
3446 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) )
3447 updatebits
|= PTH_RWL_WBIT
;
3448 if ((rwtype
& PTH_RWSHFT_TYPE_YWRITE
) != 0)
3449 updatebits
|= PTH_RWL_YBIT
;
3450 kwe
->kwe_psynchretval
= updatebits
;
3451 kwe
->kwe_kwqqueue
= NULL
;
3452 /* setup next in the queue */
3453 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3455 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 3, kret
, 0, 0);
3456 #endif /* _PSYNCH_TRACE_ */
3458 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3459 panic("kwq_handle_unlock: panic waking up writer\n");
3460 #endif /* __TESTPANICS__ */
3461 if (kret
== KERN_NOT_WAITING
) {
3462 kwq
->kw_pre_intrcount
= 1; /* actually a count */
3463 kwq
->kw_pre_intrseq
= low_writer
;
3464 kwq
->kw_pre_intrretbits
= updatebits
;
3465 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_WRITE
;
3469 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3470 if ((updatebits
& (PTH_RWL_KBIT
| PTH_RWL_EBIT
)) != (PTH_RWL_KBIT
| PTH_RWL_EBIT
))
3471 panic("kwq_handle_unlock: writer lock granted but no ke set %x\n", updatebits
);
3476 case PTH_RW_TYPE_YWRITE
: {
3477 /* can reader locks be granted ahead of this write? */
3478 if ((rwtype
& PTH_RWSHFT_TYPE_READ
) != 0) {
3479 if ((rwtype
& PTH_RWSHFT_TYPE_MASK
) != 0) {
3480 if (rwtype
& PTH_RWSHFT_TYPE_WRITE
)
3481 updatebits
|= (PTH_RWL_WBIT
| PTH_RWL_KBIT
);
3482 if (rwtype
& PTH_RWSHFT_TYPE_YWRITE
)
3483 updatebits
|= PTH_RWL_YBIT
;
3486 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0) {
3487 /* is lowest reader less than the low writer? */
3488 if (is_seqlower(low_reader
,low_writer
) == 0)
3491 numneeded
= ksyn_queue_count_tolowest(kq
, low_writer
);
3492 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
3493 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, low_writer
) != 0)) {
3494 uth
= current_uthread();
3497 updatebits
+= PTHRW_INC
;
3498 kwe
->kwe_psynchretval
= updatebits
;
3502 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3504 /* there will be readers to wakeup , no need to check for woken */
3505 failed
= ksyn_wakeupreaders(kwq
, low_writer
, 0, 0, updatebits
, NULL
);
3507 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 2, woken
, failed
, 0);
3508 #endif /* _PSYNCH_TRACE_ */
3510 kwq
->kw_pre_intrcount
= failed
; /* actually a count */
3511 kwq
->kw_pre_intrseq
= low_writer
;
3512 kwq
->kw_pre_intrretbits
= updatebits
;
3513 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_READ
;
3517 /* wakeup all readers */
3518 numneeded
= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
3519 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
3520 if ((prepost
!= 0) && ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0)) {
3521 uth
= current_uthread();
3523 updatebits
+= PTHRW_INC
;
3524 kwe
->kwe_psynchretval
= updatebits
;
3527 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3528 failed
= ksyn_wakeupreaders(kwq
, low_writer
, 0, 1, updatebits
, &woken
);
3530 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 2, woken
, failed
, 0);
3531 #endif /* _PSYNCH_TRACE_ */
3533 kwq
->kw_pre_intrcount
= failed
; /* actually a count */
3534 kwq
->kw_pre_intrseq
= kwq
->kw_highseq
;
3535 kwq
->kw_pre_intrretbits
= updatebits
;
3536 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_READ
;
3542 /* no reads, so granting yeilding writes */
3543 updatebits
|= PTHRW_INC
;
3544 updatebits
|= PTH_RWL_KBIT
| PTH_RWL_EBIT
;
3546 if (((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0) && (low_writer
== premgen
)) {
3547 /* preposting yielding write thread is being granted exclusive lock */
3551 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0)
3552 updatebits
|= PTH_RWL_WBIT
;
3553 else if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0)
3554 updatebits
|= PTH_RWL_YBIT
;
3557 uth
= get_bsdthread_info(th
);
3559 kwe
->kwe_psynchretval
= updatebits
;
3561 /* we are granting yield writelock to some other thread */
3562 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
], kwq
);
3564 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0)
3565 updatebits
|= PTH_RWL_WBIT
;
3566 /* if there are ywriters present or the preposting ywrite thread then W bit is to be set */
3567 else if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0) )
3568 updatebits
|= PTH_RWL_YBIT
;
3570 kwe
->kwe_psynchretval
= updatebits
;
3571 kwe
->kwe_kwqqueue
= NULL
;
3573 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3575 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 3, kret
, 0, 0);
3576 #endif /* _PSYNCH_TRACE_ */
3578 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3579 panic("kwq_handle_unlock : panic waking up readers\n");
3580 #endif /* __TESTPANICS__ */
3581 if (kret
== KERN_NOT_WAITING
) {
3582 kwq
->kw_pre_intrcount
= 1; /* actually a count */
3583 kwq
->kw_pre_intrseq
= low_ywriter
;
3584 kwq
->kw_pre_intrretbits
= updatebits
;
3585 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_YWRITE
;
3589 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3595 panic("rwunlock: invalid type for lock grants");
3601 if (updatep
!= NULL
)
3602 *updatep
= updatebits
;
3606 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_END
, (uint32_t)kwq
->kw_addr
, 0, updatebits
, block
, 0);
3607 #endif /* _PSYNCH_TRACE_ */
3612 kwq_handle_overlap(ksyn_wait_queue_t kwq
, uint32_t lgenval
, __unused
uint32_t ugenval
, uint32_t rw_wc
, uint32_t *updatebitsp
, __unused
int flags
, int * blockp
)
3614 uint32_t highword
= kwq
->kw_nextseqword
& PTHRW_COUNT_MASK
;
3615 uint32_t lowword
= kwq
->kw_lastseqword
& PTHRW_COUNT_MASK
;
3620 /* overlap is set, so no need to check for valid state for overlap */
3622 withinseq
= ((is_seqlower_eq(rw_wc
, highword
) != 0) || (is_seqhigher_eq(lowword
, rw_wc
) != 0));
3624 if (withinseq
!= 0) {
3625 if ((kwq
->kw_nextseqword
& PTH_RWL_LBIT
) == 0) {
3626 /* if no writers ahead, overlap granted */
3627 if ((lgenval
& PTH_RWL_WBIT
) == 0) {
3631 /* Lbit is set, and writers ahead does not count */
3640 /* increase the next expected seq by one */
3641 kwq
->kw_nextseqword
+= PTHRW_INC
;
3642 /* set count by one & bits from the nextseq and add M bit */
3644 val
|= ((kwq
->kw_nextseqword
& PTHRW_BIT_MASK
) | PTH_RWL_MBIT
);
3651 /* handle downgrade actions */
3653 kwq_handle_downgrade(ksyn_wait_queue_t kwq
, uint32_t mgen
, __unused
int flags
, __unused
uint32_t premgen
, __unused
int * blockp
)
3655 uint32_t updatebits
, lowriter
= 0;
3656 int longreadset
, allreaders
, count
;
3658 /* can handle downgrade now */
3663 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
> 0) {
3664 lowriter
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_firstnum
;
3665 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
> 0) {
3666 if (is_seqlower(kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
, lowriter
) != 0)
3671 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
> 0) {
3672 lowriter
= kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_firstnum
;
3673 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
> 0) {
3674 if (is_seqlower(kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
, lowriter
) != 0)
3680 count
= ksyn_wakeupreaders(kwq
, lowriter
, longreadset
, allreaders
, updatebits
, NULL
);
3682 kwq
->kw_pre_limrd
= count
;
3683 kwq
->kw_pre_limrdseq
= lowriter
;
3684 kwq
->kw_pre_limrdbits
= lowriter
;
3685 /* need to handle prepost */
3692 /************* Indiv queue support routines ************************/
3694 ksyn_queue_init(ksyn_queue_t kq
)
3696 TAILQ_INIT(&kq
->ksynq_kwelist
);
3697 kq
->ksynq_count
= 0;
3698 kq
->ksynq_firstnum
= 0;
3699 kq
->ksynq_lastnum
= 0;
3703 ksyn_queue_insert(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t mgen
, struct uthread
* uth
, ksyn_waitq_element_t kwe
, int fit
)
3705 uint32_t lockseq
= mgen
& PTHRW_COUNT_MASK
;
3706 ksyn_waitq_element_t q_kwe
, r_kwe
;
3708 uthread_t nuth
= NULL
;
3710 if (kq
->ksynq_count
== 0) {
3711 TAILQ_INSERT_HEAD(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3712 kq
->ksynq_firstnum
= lockseq
;
3713 kq
->ksynq_lastnum
= lockseq
;
3717 if (fit
== FIRSTFIT
) {
3718 /* TBD: if retry bit is set for mutex, add it to the head */
3719 /* firstfit, arriving order */
3720 TAILQ_INSERT_TAIL(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3721 if (is_seqlower (lockseq
, kq
->ksynq_firstnum
) != 0)
3722 kq
->ksynq_firstnum
= lockseq
;
3723 if (is_seqhigher (lockseq
, kq
->ksynq_lastnum
) != 0)
3724 kq
->ksynq_lastnum
= lockseq
;
3728 if ((lockseq
== kq
->ksynq_firstnum
) || (lockseq
== kq
->ksynq_lastnum
)) {
3729 /* During prepost when a thread is getting cancelled, we could have two with same seq */
3730 if (kwe
->kwe_flags
== KWE_THREAD_PREPOST
) {
3731 q_kwe
= ksyn_queue_find_seq(kwq
, kq
, lockseq
, 0);
3732 if ((q_kwe
!= NULL
) && ((nuth
= (uthread_t
)q_kwe
->kwe_uth
) != NULL
) &&
3733 ((nuth
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
)) {
3734 TAILQ_INSERT_TAIL(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3738 __FAILEDUSERTEST__("ksyn_queue_insert: two threads with same lockseq ");
3743 __FAILEDUSERTEST__("ksyn_queue_insert: two threads with same lockseq ");
3749 /* check for next seq one */
3750 if (is_seqlower(kq
->ksynq_lastnum
, lockseq
) != 0) {
3751 TAILQ_INSERT_TAIL(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3752 kq
->ksynq_lastnum
= lockseq
;
3756 if (is_seqlower(lockseq
, kq
->ksynq_firstnum
) != 0) {
3757 TAILQ_INSERT_HEAD(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3758 kq
->ksynq_firstnum
= lockseq
;
3762 /* goto slow insert mode */
3763 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
3764 if (is_seqhigher(q_kwe
->kwe_lockseq
, lockseq
) != 0) {
3765 TAILQ_INSERT_BEFORE(q_kwe
, kwe
, kwe_list
);
3771 panic("failed to insert \n");
3772 #endif /* __TESTPANICS__ */
3779 update_low_high(kwq
, lockseq
);
3784 ksyn_waitq_element_t
3785 ksyn_queue_removefirst(ksyn_queue_t kq
, ksyn_wait_queue_t kwq
)
3787 ksyn_waitq_element_t kwe
= NULL
;
3788 ksyn_waitq_element_t q_kwe
;
3791 if (kq
->ksynq_count
!= 0) {
3792 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
3793 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3794 curseq
= kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
;
3798 if(kq
->ksynq_count
!= 0) {
3799 q_kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
3800 kq
->ksynq_firstnum
= (q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
3802 kq
->ksynq_firstnum
= 0;
3803 kq
->ksynq_lastnum
= 0;
3806 if (kwq
->kw_inqueue
== 0) {
3808 kwq
->kw_highseq
= 0;
3810 if (kwq
->kw_lowseq
== curseq
)
3811 kwq
->kw_lowseq
= find_nextlowseq(kwq
);
3812 if (kwq
->kw_highseq
== curseq
)
3813 kwq
->kw_highseq
= find_nexthighseq(kwq
);
3820 ksyn_queue_removeitem(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, ksyn_waitq_element_t kwe
)
3822 ksyn_waitq_element_t q_kwe
;
3825 if (kq
->ksynq_count
> 0) {
3826 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3828 if(kq
->ksynq_count
!= 0) {
3829 q_kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
3830 kq
->ksynq_firstnum
= (q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
3831 q_kwe
= TAILQ_LAST(&kq
->ksynq_kwelist
, ksynq_kwelist_head
);
3832 kq
->ksynq_lastnum
= (q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
3834 kq
->ksynq_firstnum
= 0;
3835 kq
->ksynq_lastnum
= 0;
3839 curseq
= kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
;
3840 if (kwq
->kw_inqueue
== 0) {
3842 kwq
->kw_highseq
= 0;
3844 if (kwq
->kw_lowseq
== curseq
)
3845 kwq
->kw_lowseq
= find_nextlowseq(kwq
);
3846 if (kwq
->kw_highseq
== curseq
)
3847 kwq
->kw_highseq
= find_nexthighseq(kwq
);
3852 /* find the thread and removes from the queue */
3853 ksyn_waitq_element_t
3854 ksyn_queue_find_seq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t seq
, int remove
)
3856 ksyn_waitq_element_t q_kwe
, r_kwe
;
3858 /* TBD: bail out if higher seq is seen */
3859 /* case where wrap in the tail of the queue exists */
3860 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
3861 if ((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
) == seq
) {
3863 ksyn_queue_removeitem(kwq
, kq
, q_kwe
);
3871 /* find the thread at the target sequence (or a broadcast/prepost at or above) */
3872 ksyn_waitq_element_t
3873 ksyn_queue_find_cvpreposeq(ksyn_queue_t kq
, uint32_t cgen
)
3875 ksyn_waitq_element_t q_kwe
, r_kwe
;
3876 uint32_t lgen
= (cgen
& PTHRW_COUNT_MASK
);
3878 /* case where wrap in the tail of the queue exists */
3879 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
3881 /* skip the lower entries */
3882 if (is_seqlower((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), cgen
) != 0)
3885 switch (q_kwe
->kwe_flags
) {
3887 case KWE_THREAD_INWAIT
:
3888 if ((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
) != lgen
)
3892 case KWE_THREAD_BROADCAST
:
3893 case KWE_THREAD_PREPOST
:
3900 /* look for a thread at lockseq, a */
3901 ksyn_waitq_element_t
3902 ksyn_queue_find_signalseq(__unused ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t uptoseq
, uint32_t signalseq
)
3904 ksyn_waitq_element_t q_kwe
, r_kwe
, t_kwe
= NULL
;
3906 /* case where wrap in the tail of the queue exists */
3907 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
3909 switch (q_kwe
->kwe_flags
) {
3911 case KWE_THREAD_PREPOST
:
3912 if (is_seqhigher((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), uptoseq
))
3916 case KWE_THREAD_BROADCAST
:
3917 /* match any prepost at our same uptoseq or any broadcast above */
3918 if (is_seqlower((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), uptoseq
))
3922 case KWE_THREAD_INWAIT
:
3924 * Match any (non-cancelled) thread at or below our upto sequence -
3925 * but prefer an exact match to our signal sequence (if present) to
3926 * keep exact matches happening.
3928 if (is_seqhigher((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), uptoseq
))
3931 if (q_kwe
->kwe_kwqqueue
== kwq
) {
3932 uthread_t ut
= q_kwe
->kwe_uth
;
3933 if ((ut
->uu_flag
& ( UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) != UT_CANCEL
) {
3934 /* if equal or higher than our signal sequence, return this one */
3935 if (is_seqhigher_eq((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), signalseq
))
3938 /* otherwise, just remember this eligible thread and move on */
3946 panic("ksyn_queue_find_signalseq(): unknow wait queue element type (%d)\n", q_kwe
->kwe_flags
);
3955 ksyn_queue_move_tofree(ksyn_wait_queue_t ckwq
, ksyn_queue_t kq
, uint32_t upto
, ksyn_queue_t kfreeq
, int all
, int release
)
3957 ksyn_waitq_element_t kwe
;
3959 uint32_t tseq
= upto
& PTHRW_COUNT_MASK
;
3962 #endif /* _PSYNCH_TRACE_ */
3964 ksyn_queue_init(kfreeq
);
3966 /* free all the entries, must be only fakes.. */
3967 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
3968 while (kwe
!= NULL
) {
3969 if ((all
== 0) && (is_seqhigher((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), tseq
) != 0))
3971 if (kwe
->kwe_flags
== KWE_THREAD_INWAIT
) {
3973 * This scenario is typically noticed when the cvar is
3974 * reinited and the new waiters are waiting. We can
3975 * return them as spurious wait so the cvar state gets
3979 ut
= (uthread_t
)kwe
->kwe_uth
;
3980 #endif /* _PSYNCH_TRACE_ */
3982 /* skip canceled ones */
3984 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
3985 /* set M bit to indicate to waking CV to retun Inc val */
3986 kwe
->kwe_psynchretval
= PTHRW_INC
| (PTH_RWS_CV_MBIT
| PTH_RWL_MTX_WAIT
);
3987 kwe
->kwe_kwqqueue
= NULL
;
3989 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xcafecaf3, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
3990 #endif /* _PSYNCH_TRACE_ */
3991 (void)ksyn_wakeup_thread(ckwq
, kwe
);
3993 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
3994 TAILQ_INSERT_TAIL(&kfreeq
->ksynq_kwelist
, kwe
, kwe_list
);
3995 ckwq
->kw_fakecount
--;
3998 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
4001 if ((release
!= 0) && (count
!= 0)) {
4002 kwe
= TAILQ_FIRST(&kfreeq
->ksynq_kwelist
);
4003 while (kwe
!= NULL
) {
4004 TAILQ_REMOVE(&kfreeq
->ksynq_kwelist
, kwe
, kwe_list
);
4005 zfree(kwe_zone
, kwe
);
4006 kwe
= TAILQ_FIRST(&kfreeq
->ksynq_kwelist
);
4013 /*************************************************************************/
4016 update_low_high(ksyn_wait_queue_t kwq
, uint32_t lockseq
)
4018 if (kwq
->kw_inqueue
== 1) {
4019 kwq
->kw_lowseq
= lockseq
;
4020 kwq
->kw_highseq
= lockseq
;
4022 if (is_seqlower(lockseq
, kwq
->kw_lowseq
) != 0)
4023 kwq
->kw_lowseq
= lockseq
;
4024 if (is_seqhigher(lockseq
, kwq
->kw_highseq
) != 0)
4025 kwq
->kw_highseq
= lockseq
;
4030 find_nextlowseq(ksyn_wait_queue_t kwq
)
4032 uint32_t numbers
[4];
4036 for(i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
4037 if (kwq
->kw_ksynqueues
[i
].ksynq_count
!= 0) {
4038 numbers
[count
]= kwq
->kw_ksynqueues
[i
].ksynq_firstnum
;
4045 lowest
= numbers
[0];
4047 for (i
= 1; i
< count
; i
++) {
4048 if(is_seqlower(numbers
[i
] , lowest
) != 0)
4049 lowest
= numbers
[count
];
4057 find_nexthighseq(ksyn_wait_queue_t kwq
)
4059 uint32_t numbers
[4];
4063 for(i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
4064 if (kwq
->kw_ksynqueues
[i
].ksynq_count
!= 0) {
4065 numbers
[count
]= kwq
->kw_ksynqueues
[i
].ksynq_lastnum
;
4074 highest
= numbers
[0];
4076 for (i
= 1; i
< count
; i
++) {
4077 if(is_seqhigher(numbers
[i
], highest
) != 0)
4078 highest
= numbers
[i
];
4086 is_seqlower(uint32_t x
, uint32_t y
)
4089 if ((y
-x
) < (PTHRW_MAX_READERS
/2))
4092 if ((x
-y
) > (PTHRW_MAX_READERS
/2))
4099 is_seqlower_eq(uint32_t x
, uint32_t y
)
4104 return(is_seqlower(x
,y
));
4108 is_seqhigher(uint32_t x
, uint32_t y
)
4111 if ((x
-y
) < (PTHRW_MAX_READERS
/2))
4114 if ((y
-x
) > (PTHRW_MAX_READERS
/2))
4121 is_seqhigher_eq(uint32_t x
, uint32_t y
)
4126 return(is_seqhigher(x
,y
));
4131 find_diff(uint32_t upto
, uint32_t lowest
)
4138 diff
= diff_genseq(upto
, lowest
);
4140 if (is_seqlower(upto
, lowest
) != 0)
4141 diff
= diff_genseq(lowest
, upto
);
4143 diff
= diff_genseq(upto
, lowest
);
4145 diff
= (diff
>> PTHRW_COUNT_SHIFT
);
4151 find_seq_till(ksyn_wait_queue_t kwq
, uint32_t upto
, uint32_t nwaiters
, uint32_t *countp
)
4158 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL
| DBG_FUNC_START
, 0, 0, upto
, nwaiters
, 0);
4159 #endif /* _PSYNCH_TRACE_ */
4161 for (i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
4162 count
+= ksyn_queue_count_tolowest(&kwq
->kw_ksynqueues
[i
], upto
);
4164 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL
| DBG_FUNC_NONE
, 0, 1, i
, count
, 0);
4165 #endif /* _PSYNCH_TRACE_ */
4166 if (count
>= nwaiters
) {
4171 if (countp
!= NULL
) {
4175 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL
| DBG_FUNC_END
, 0, 0, count
, nwaiters
, 0);
4176 #endif /* _PSYNCH_TRACE_ */
4179 else if (count
>= nwaiters
)
4187 ksyn_queue_count_tolowest(ksyn_queue_t kq
, uint32_t upto
)
4190 ksyn_waitq_element_t kwe
, newkwe
;
4193 /* if nothing or the first num is greater than upto, return none */
4194 if ((kq
->ksynq_count
== 0) || (is_seqhigher(kq
->ksynq_firstnum
, upto
) != 0))
4196 if (upto
== kq
->ksynq_firstnum
)
4199 TAILQ_FOREACH_SAFE(kwe
, &kq
->ksynq_kwelist
, kwe_list
, newkwe
) {
4200 curval
= (kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
4201 if (upto
== curval
) {
4204 } else if (is_seqhigher(curval
, upto
) != 0) {
4215 /* handles the cond broadcast of cvar and returns number of woken threads and bits for syscall return */
4217 ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq
, uint32_t upto
, uint32_t * updatep
)
4221 ksyn_waitq_element_t kwe
, newkwe
;
4222 uint32_t updatebits
= 0;
4223 struct ksyn_queue kfreeq
;
4227 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_START
, 0xcbcbcbc2, upto
, 0, 0, 0);
4228 #endif /* _PSYNCH_TRACE_ */
4230 ksyn_queue_init(&kfreeq
);
4231 kq
= &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
];
4234 TAILQ_FOREACH_SAFE(kwe
, &kq
->ksynq_kwelist
, kwe_list
, newkwe
) {
4236 if (is_seqhigher((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), upto
)) /* outside our range */
4239 /* now handle the one we found (inside the range) */
4240 switch (kwe
->kwe_flags
) {
4242 case KWE_THREAD_INWAIT
:
4243 ut
= (uthread_t
)kwe
->kwe_uth
;
4245 /* skip canceled ones */
4246 if (kwe
->kwe_kwqqueue
!= ckwq
||
4247 (ut
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
)
4251 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
4252 kwe
->kwe_psynchretval
= PTH_RWL_MTX_WAIT
;
4253 kwe
->kwe_kwqqueue
= NULL
;
4255 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xcafecaf2, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
4256 #endif /* _PSYNCH_TRACE_ */
4257 kret
= ksyn_wakeup_thread(ckwq
, kwe
);
4259 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
4260 panic("ksyn_wakeupreaders: panic waking up readers\n");
4261 #endif /* __TESTPANICS__ */
4262 updatebits
+= PTHRW_INC
;
4265 case KWE_THREAD_BROADCAST
:
4266 case KWE_THREAD_PREPOST
:
4267 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
4268 TAILQ_INSERT_TAIL(&kfreeq
.ksynq_kwelist
, kwe
, kwe_list
);
4269 ckwq
->kw_fakecount
--;
4273 panic("unknown kweflags\n");
4278 /* Need to enter a broadcast in the queue (if not already at L == S) */
4280 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) != (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
4282 newkwe
= TAILQ_FIRST(&kfreeq
.ksynq_kwelist
);
4283 if (newkwe
== NULL
) {
4284 ksyn_wqunlock(ckwq
);
4285 newkwe
= (ksyn_waitq_element_t
)zalloc(kwe_zone
);
4286 TAILQ_INSERT_TAIL(&kfreeq
.ksynq_kwelist
, newkwe
, kwe_list
);
4291 TAILQ_REMOVE(&kfreeq
.ksynq_kwelist
, newkwe
, kwe_list
);
4292 bzero(newkwe
, sizeof(struct ksyn_waitq_element
));
4293 newkwe
->kwe_kwqqueue
= ckwq
;
4294 newkwe
->kwe_flags
= KWE_THREAD_BROADCAST
;
4295 newkwe
->kwe_lockseq
= upto
;
4296 newkwe
->kwe_count
= 0;
4297 newkwe
->kwe_uth
= NULL
;
4298 newkwe
->kwe_psynchretval
= 0;
4301 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xfeedfeed, upto
, 0, 0);
4302 #endif /* _PSYNCH_TRACE_ */
4304 (void)ksyn_queue_insert(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], upto
, NULL
, newkwe
, SEQFIT
);
4305 ckwq
->kw_fakecount
++;
4308 /* free up any remaining things stumbled across above */
4309 kwe
= TAILQ_FIRST(&kfreeq
.ksynq_kwelist
);
4310 while (kwe
!= NULL
) {
4311 TAILQ_REMOVE(&kfreeq
.ksynq_kwelist
, kwe
, kwe_list
);
4312 zfree(kwe_zone
, kwe
);
4313 kwe
= TAILQ_FIRST(&kfreeq
.ksynq_kwelist
);
4316 if (updatep
!= NULL
)
4317 *updatep
= updatebits
;
4320 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_END
, 0xeeeeeeed, updatebits
, 0, 0, 0);
4321 #endif /* _PSYNCH_TRACE_ */
4325 ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq
, uint32_t *updatep
, ksyn_queue_t kfreeq
, int release
)
4327 uint32_t updatebits
= 0;
4329 if (updatep
!= NULL
)
4330 updatebits
= *updatep
;
4331 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) == (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
4332 updatebits
|= PTH_RWS_CV_CBIT
;
4333 if (ckwq
->kw_inqueue
!= 0) {
4334 /* FREE THE QUEUE */
4335 ksyn_queue_move_tofree(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], ckwq
->kw_lword
, kfreeq
, 0, release
);
4337 if (ckwq
->kw_inqueue
!= 0)
4338 panic("ksyn_cvupdate_fixup: L == S, but entries in queue beyond S");
4339 #endif /* __TESTPANICS__ */
4341 ckwq
->kw_lword
= ckwq
->kw_uword
= ckwq
->kw_sword
= 0;
4342 ckwq
->kw_kflags
|= KSYN_KWF_ZEROEDOUT
;
4343 } else if ((ckwq
->kw_inqueue
!= 0) && (ckwq
->kw_fakecount
== ckwq
->kw_inqueue
)) {
4344 /* only fake entries are present in the queue */
4345 updatebits
|= PTH_RWS_CV_PBIT
;
4347 if (updatep
!= NULL
)
4348 *updatep
= updatebits
;
4352 psynch_zoneinit(void)
4354 kwq_zone
= (zone_t
)zinit(sizeof(struct ksyn_wait_queue
), 8192 * sizeof(struct ksyn_wait_queue
), 4096, "ksyn_waitqueue zone");
4355 kwe_zone
= (zone_t
)zinit(sizeof(struct ksyn_waitq_element
), 8192 * sizeof(struct ksyn_waitq_element
), 4096, "ksyn_waitq_element zone");