2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995-2005 Apple Computer, Inc. All Rights Reserved */
35 #include <sys/param.h>
36 #include <sys/queue.h>
37 #include <sys/resourcevar.h>
38 #include <sys/proc_internal.h>
39 #include <sys/kauth.h>
40 #include <sys/systm.h>
41 #include <sys/timeb.h>
42 #include <sys/times.h>
45 #include <sys/kernel.h>
47 #include <sys/signalvar.h>
48 #include <sys/syslog.h>
51 #include <sys/kdebug.h>
52 #include <sys/sysproto.h>
53 #include <sys/pthread_internal.h>
57 #include <mach/mach_types.h>
58 #include <mach/vm_prot.h>
59 #include <mach/semaphore.h>
60 #include <mach/sync_policy.h>
61 #include <mach/task.h>
62 #include <kern/kern_types.h>
63 #include <kern/task.h>
64 #include <kern/clock.h>
65 #include <mach/kern_return.h>
66 #include <kern/thread.h>
67 #include <kern/sched_prim.h>
68 #include <kern/thread_call.h>
69 #include <kern/kalloc.h>
70 #include <kern/zalloc.h>
71 #include <kern/sched_prim.h>
72 #include <kern/processor.h>
73 #include <kern/affinity.h>
74 #include <kern/wait_queue.h>
75 #include <kern/mach_param.h>
76 #include <mach/mach_vm.h>
77 #include <mach/mach_param.h>
78 #include <mach/thread_policy.h>
79 #include <mach/message.h>
80 #include <mach/port.h>
81 #include <vm/vm_protos.h>
82 #include <vm/vm_map.h>
83 #include <mach/vm_region.h>
85 #include <libkern/OSAtomic.h>
87 #include <pexpert/pexpert.h>
89 #define __PSYNCH_DEBUG__ 0 /* debug panic actions */
90 #define _PSYNCH_TRACE_ 1 /* kdebug trace */
92 #define __TESTMODE__ 2 /* 0 - return error on user error conditions */
93 /* 1 - log error on user error conditions */
94 /* 2 - abort caller on user error conditions */
95 /* 3 - panic on user error conditions */
96 static int __test_panics__
;
97 static int __test_aborts__
;
98 static int __test_prints__
;
100 static inline void __FAILEDUSERTEST__(const char *str
)
104 if (__test_panics__
!= 0)
107 if (__test_aborts__
!= 0 || __test_prints__
!= 0)
110 if (__test_prints__
!= 0)
111 printf("PSYNCH: pid[%d]: %s\n", p
->p_pid
, str
);
113 if (__test_aborts__
!= 0)
118 #define _PSYNCH_TRACE_MLWAIT 0x9000000
119 #define _PSYNCH_TRACE_MLDROP 0x9000004
120 #define _PSYNCH_TRACE_CVWAIT 0x9000008
121 #define _PSYNCH_TRACE_CVSIGNAL 0x900000c
122 #define _PSYNCH_TRACE_CVBROAD 0x9000010
123 #define _PSYNCH_TRACE_KMDROP 0x9000014
124 #define _PSYNCH_TRACE_RWRDLOCK 0x9000018
125 #define _PSYNCH_TRACE_RWLRDLOCK 0x900001c
126 #define _PSYNCH_TRACE_RWWRLOCK 0x9000020
127 #define _PSYNCH_TRACE_RWYWRLOCK 0x9000024
128 #define _PSYNCH_TRACE_RWUPGRADE 0x9000028
129 #define _PSYNCH_TRACE_RWDOWNGRADE 0x900002c
130 #define _PSYNCH_TRACE_RWUNLOCK 0x9000030
131 #define _PSYNCH_TRACE_RWUNLOCK2 0x9000034
132 #define _PSYNCH_TRACE_RWHANDLEU 0x9000038
133 #define _PSYNCH_TRACE_FSEQTILL 0x9000040
134 #define _PSYNCH_TRACE_CLRPRE 0x9000044
135 #define _PSYNCH_TRACE_CVHBROAD 0x9000048
136 #define _PSYNCH_TRACE_CVSEQ 0x900004c
137 #define _PSYNCH_TRACE_THWAKEUP 0x9000050
139 #define _PSYNCH_TRACE_UM_LOCK 0x9000060
140 #define _PSYNCH_TRACE_UM_UNLOCK 0x9000064
141 #define _PSYNCH_TRACE_UM_MHOLD 0x9000068
142 #define _PSYNCH_TRACE_UM_MDROP 0x900006c
143 #define _PSYNCH_TRACE_UM_CVWAIT 0x9000070
144 #define _PSYNCH_TRACE_UM_CVSIG 0x9000074
145 #define _PSYNCH_TRACE_UM_CVBRD 0x9000078
147 proc_t pthread_debug_proc
= PROC_NULL
;
148 static inline void __PTHREAD_TRACE_DEBUG(uint32_t debugid
, uintptr_t arg1
,
154 proc_t p
= current_proc();
156 if ((pthread_debug_proc
!= NULL
) && (p
== pthread_debug_proc
))
157 KERNEL_DEBUG_CONSTANT(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
160 #endif /* _PSYNCH_TRACE_ */
162 #define ECVCERORR 256
163 #define ECVPERORR 512
165 lck_mtx_t
* pthread_list_mlock
;
167 #define PTHHASH(addr) (&pthashtbl[(addr) & pthhash])
168 extern LIST_HEAD(pthhashhead
, ksyn_wait_queue
) *pth_glob_hashtbl
;
169 struct pthhashhead
* pth_glob_hashtbl
;
172 LIST_HEAD(, ksyn_wait_queue
) pth_free_list
;
173 int num_total_kwq
= 0; /* number of kwq in use currently */
174 int num_infreekwq
= 0; /* number of kwq in free list */
175 int num_freekwq
= 0; /* number of kwq actually freed from the free the list */
176 int num_reusekwq
= 0; /* number of kwq pulled back for reuse from free list */
177 int num_addedfreekwq
= 0; /* number of added free kwq from the last instance */
178 int num_lastfreekwqcount
= 0; /* the free count from the last time */
180 static int PTH_HASHSIZE
= 100;
182 static zone_t kwq_zone
; /* zone for allocation of ksyn_queue */
183 static zone_t kwe_zone
; /* zone for allocation of ksyn_waitq_element */
189 TAILQ_HEAD(ksynq_kwelist_head
, ksyn_waitq_element
) ksynq_kwelist
;
190 uint32_t ksynq_count
; /* number of entries in queue */
191 uint32_t ksynq_firstnum
; /* lowest seq in queue */
192 uint32_t ksynq_lastnum
; /* highest seq in queue */
194 typedef struct ksyn_queue
* ksyn_queue_t
;
196 #define KSYN_QUEUE_READ 0
197 #define KSYN_QUEUE_LREAD 1
198 #define KSYN_QUEUE_WRITER 2
199 #define KSYN_QUEUE_YWRITER 3
200 #define KSYN_QUEUE_UPGRADE 4
201 #define KSYN_QUEUE_MAX 5
203 struct ksyn_wait_queue
{
204 LIST_ENTRY(ksyn_wait_queue
) kw_hash
;
205 LIST_ENTRY(ksyn_wait_queue
) kw_list
;
208 uint64_t kw_object
; /* object backing in shared mode */
209 uint64_t kw_offset
; /* offset inside the object in shared mode */
210 int kw_flags
; /* mutex, cvar options/flags */
211 int kw_pflags
; /* flags under listlock protection */
212 struct timeval kw_ts
; /* timeval need for upkeep before free */
213 int kw_iocount
; /* inuse reference */
214 int kw_dropcount
; /* current users unlocking... */
216 int kw_type
; /* queue type like mutex, cvar, etc */
217 uint32_t kw_inqueue
; /* num of waiters held */
218 uint32_t kw_fakecount
; /* number of error/prepost fakes */
219 uint32_t kw_highseq
; /* highest seq in the queue */
220 uint32_t kw_lowseq
; /* lowest seq in the queue */
221 uint32_t kw_lword
; /* L value from userland */
222 uint32_t kw_uword
; /* U world value from userland */
223 uint32_t kw_sword
; /* S word value from userland */
224 uint32_t kw_lastunlockseq
; /* the last seq that unlocked */
225 /* for CV to be used as the seq kernel has seen so far */
226 #define kw_cvkernelseq kw_lastunlockseq
227 uint32_t kw_lastseqword
; /* the last seq that unlocked */
228 /* for mutex and cvar we need to track I bit values */
229 uint32_t kw_nextseqword
; /* the last seq that unlocked; with num of waiters */
230 #define kw_initrecv kw_nextseqword /* number of incoming waiters with Ibit seen sofar */
231 uint32_t kw_overlapwatch
; /* chance for overlaps */
232 #define kw_initcount kw_overlapwatch /* number of incoming waiters with Ibit expected */
233 uint32_t kw_initcountseq
; /* highest seq with Ibit on for mutex and cvar*/
234 uint32_t kw_pre_rwwc
; /* prepost count */
235 uint32_t kw_pre_lockseq
; /* prepost target seq */
236 uint32_t kw_pre_sseq
; /* prepost target sword, in cvar used for mutexowned */
237 uint32_t kw_pre_intrcount
; /* prepost of missed wakeup due to intrs */
238 uint32_t kw_pre_intrseq
; /* prepost of missed wakeup limit seq */
239 uint32_t kw_pre_intrretbits
; /* return bits value for missed wakeup threads */
240 uint32_t kw_pre_intrtype
; /* type of failed wakueps*/
243 struct ksyn_queue kw_ksynqueues
[KSYN_QUEUE_MAX
]; /* queues to hold threads */
244 lck_mtx_t kw_lock
; /* mutex lock protecting this structure */
246 typedef struct ksyn_wait_queue
* ksyn_wait_queue_t
;
248 #define PTHRW_INC 0x100
249 #define PTHRW_BIT_MASK 0x000000ff
251 #define PTHRW_COUNT_SHIFT 8
252 #define PTHRW_COUNT_MASK 0xffffff00
253 #define PTHRW_MAX_READERS 0xffffff00
255 /* New model bits on Lword */
256 #define PTH_RWL_KBIT 0x01 /* users cannot acquire in user mode */
257 #define PTH_RWL_EBIT 0x02 /* exclusive lock in progress */
258 #define PTH_RWL_WBIT 0x04 /* write waiters pending in kernel */
259 #define PTH_RWL_PBIT 0x04 /* prepost (cv) pending in kernel */
260 #define PTH_RWL_YBIT 0x08 /* yielding write waiters pending in kernel */
261 #define PTH_RWL_RETRYBIT 0x08 /* mutex retry wait */
262 #define PTH_RWL_LBIT 0x10 /* long read in progress */
263 #define PTH_RWL_MTXNONE 0x10 /* indicates the cvwait does not have mutex held */
264 #define PTH_RWL_UBIT 0x20 /* upgrade request pending */
265 #define PTH_RWL_MTX_WAIT 0x20 /* in cvar in mutex wait */
266 #define PTH_RWL_RBIT 0x40 /* reader pending in kernel(not used) */
267 #define PTH_RWL_MBIT 0x40 /* overlapping grants from kernel */
268 #define PTH_RWL_TRYLKBIT 0x40 /* trylock attempt (mutex only) */
269 #define PTH_RWL_IBIT 0x80 /* lcok reset, held untill first succeesful unlock */
272 /* UBIT values for mutex, cvar */
273 #define PTH_RWU_SBIT 0x01
274 #define PTH_RWU_BBIT 0x02
276 #define PTHRW_RWL_INIT PTH_RWL_IBIT /* reset state on the lock bits (U)*/
278 /* New model bits on Sword */
279 #define PTH_RWS_SBIT 0x01 /* kernel transition seq not set yet*/
280 #define PTH_RWS_IBIT 0x02 /* Sequence is not set on return from kernel */
281 #define PTH_RWS_CV_CBIT PTH_RWS_SBIT /* kernel has cleared all info w.r.s.t CV */
282 #define PTH_RWS_CV_PBIT PTH_RWS_IBIT /* kernel has prepost/fake structs only,no waiters */
283 #define PTH_RWS_CV_MBIT PTH_RWL_MBIT /* to indicate prepost return */
284 #define PTH_RWS_WSVBIT 0x04 /* save W bit */
285 #define PTH_RWS_USVBIT 0x08 /* save U bit */
286 #define PTH_RWS_YSVBIT 0x10 /* save Y bit */
287 #define PTHRW_RWS_INIT PTH_RWS_SBIT /* reset on the lock bits (U)*/
288 #define PTHRW_RWS_SAVEMASK (PTH_RWS_WSVBIT|PTH_RWS_USVBIT|PTH_RWS_YSVBIT) /*save bits mask*/
289 #define PTHRW_SW_Reset_BIT_MASK 0x000000fe /* remove S bit and get rest of the bits */
291 #define PTHRW_RWS_INIT PTH_RWS_SBIT /* reset on the lock bits (U)*/
294 #define PTHRW_UN_BIT_MASK 0x000000bf /* remove overlap bit */
297 #define PTHREAD_MTX_TID_SWITCHING (uint64_t)-1
299 /* new L word defns */
300 #define is_rwl_readinuser(x) ((((x) & (PTH_RWL_UBIT | PTH_RWL_KBIT)) == 0)||(((x) & PTH_RWL_LBIT) != 0))
301 #define is_rwl_ebit_set(x) (((x) & PTH_RWL_EBIT) != 0)
302 #define is_rwl_lbit_set(x) (((x) & PTH_RWL_LBIT) != 0)
303 #define is_rwl_readoverlap(x) (((x) & PTH_RWL_MBIT) != 0)
304 #define is_rw_ubit_set(x) (((x) & PTH_RWL_UBIT) != 0)
307 #define is_rws_setseq(x) (((x) & PTH_RWS_SBIT))
308 #define is_rws_setunlockinit(x) (((x) & PTH_RWS_IBIT))
310 /* first contended seq that kernel sees */
311 #define KW_MTXFIRST_KSEQ 0x200
312 #define KW_CVFIRST_KSEQ 1
313 #define KW_RWFIRST_KSEQ 0x200
315 int is_seqlower(uint32_t x
, uint32_t y
);
316 int is_seqlower_eq(uint32_t x
, uint32_t y
);
317 int is_seqhigher(uint32_t x
, uint32_t y
);
318 int is_seqhigher_eq(uint32_t x
, uint32_t y
);
319 int find_diff(uint32_t upto
, uint32_t lowest
);
322 static inline int diff_genseq(uint32_t x
, uint32_t y
) {
326 return((PTHRW_MAX_READERS
- y
) + x
+ PTHRW_INC
);
330 #define TID_ZERO (uint64_t)0
332 /* bits needed in handling the rwlock unlock */
333 #define PTH_RW_TYPE_READ 0x01
334 #define PTH_RW_TYPE_LREAD 0x02
335 #define PTH_RW_TYPE_WRITE 0x04
336 #define PTH_RW_TYPE_YWRITE 0x08
337 #define PTH_RW_TYPE_UPGRADE 0x10
338 #define PTH_RW_TYPE_MASK 0xff
339 #define PTH_RW_TYPE_SHIFT 8
341 #define PTH_RWSHFT_TYPE_READ 0x0100
342 #define PTH_RWSHFT_TYPE_LREAD 0x0200
343 #define PTH_RWSHFT_TYPE_WRITE 0x0400
344 #define PTH_RWSHFT_TYPE_YWRITE 0x0800
345 #define PTH_RWSHFT_TYPE_MASK 0xff00
348 * Mutex protocol attributes
350 #define PTHREAD_PRIO_NONE 0
351 #define PTHREAD_PRIO_INHERIT 1
352 #define PTHREAD_PRIO_PROTECT 2
353 #define PTHREAD_PROTOCOL_FLAGS_MASK 0x3
356 * Mutex type attributes
358 #define PTHREAD_MUTEX_NORMAL 0
359 #define PTHREAD_MUTEX_ERRORCHECK 4
360 #define PTHREAD_MUTEX_RECURSIVE 8
361 #define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_NORMAL
362 #define PTHREAD_TYPE_FLAGS_MASK 0xc
365 * Mutex pshared attributes
367 #define PTHREAD_PROCESS_SHARED 0x10
368 #define PTHREAD_PROCESS_PRIVATE 0x20
369 #define PTHREAD_PSHARED_FLAGS_MASK 0x30
372 * Mutex policy attributes
374 #define _PTHREAD_MUTEX_POLICY_NONE 0
375 #define _PTHREAD_MUTEX_POLICY_FAIRSHARE 0x040 /* 1 */
376 #define _PTHREAD_MUTEX_POLICY_FIRSTFIT 0x080 /* 2 */
377 #define _PTHREAD_MUTEX_POLICY_REALTIME 0x0c0 /* 3 */
378 #define _PTHREAD_MUTEX_POLICY_ADAPTIVE 0x100 /* 4 */
379 #define _PTHREAD_MUTEX_POLICY_PRIPROTECT 0x140 /* 5 */
380 #define _PTHREAD_MUTEX_POLICY_PRIINHERIT 0x180 /* 6 */
381 #define PTHREAD_POLICY_FLAGS_MASK 0x1c0
383 #define _PTHREAD_MTX_OPT_HOLDLOCK 0x200
384 #define _PTHREAD_MTX_OPT_NOMTX 0x400
386 #define _PTHREAD_MTX_OPT_NOTIFY 0x1000
387 #define _PTHREAD_MTX_OPT_MUTEX 0x2000 /* this is a mutex type */
389 #define _PTHREAD_RWLOCK_UPGRADE_TRY 0x10000
392 #define KSYN_WQ_INLIST 1
393 #define KSYN_WQ_INHASH 2
394 #define KSYN_WQ_SHARED 4
395 #define KSYN_WQ_WAITING 8 /* threads waiting for this wq to be available */
396 #define KSYN_WQ_FLIST 0X10 /* in free list to be freed after a short delay */
399 #define KSYN_KWF_INITCLEARED 1 /* the init status found and preposts cleared */
400 #define KSYN_KWF_ZEROEDOUT 2 /* the lword, etc are inited to 0 */
402 #define KSYN_CLEANUP_DEADLINE 10
403 int psynch_cleanupset
;
404 thread_call_t psynch_thcall
;
406 #define KSYN_WQTYPE_INWAIT 0x1000
407 #define KSYN_WQTYPE_INDROP 0x2000
408 #define KSYN_WQTYPE_MTX 0x1
409 #define KSYN_WQTYPE_CVAR 0x2
410 #define KSYN_WQTYPE_RWLOCK 0x4
411 #define KSYN_WQTYPE_SEMA 0x8
412 #define KSYN_WQTYPE_BARR 0x10
413 #define KSYN_WQTYPE_MASK 0x00ff
415 #define KSYN_MTX_MAX 0x0fffffff
416 #define KSYN_WQTYPE_MUTEXDROP (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX)
418 #define KW_UNLOCK_PREPOST 0x01
419 #define KW_UNLOCK_PREPOST_UPGRADE 0x02
420 #define KW_UNLOCK_PREPOST_DOWNGRADE 0x04
421 #define KW_UNLOCK_PREPOST_READLOCK 0x08
422 #define KW_UNLOCK_PREPOST_LREADLOCK 0x10
423 #define KW_UNLOCK_PREPOST_WRLOCK 0x20
424 #define KW_UNLOCK_PREPOST_YWRLOCK 0x40
426 #define CLEAR_PREPOST_BITS(kwq) {\
427 kwq->kw_pre_lockseq = 0; \
428 kwq->kw_pre_sseq = PTHRW_RWS_INIT; \
429 kwq->kw_pre_rwwc = 0; \
432 #define CLEAR_INITCOUNT_BITS(kwq) {\
433 kwq->kw_initcount = 0; \
434 kwq->kw_initrecv = 0; \
435 kwq->kw_initcountseq = 0; \
438 #define CLEAR_INTR_PREPOST_BITS(kwq) {\
439 kwq->kw_pre_intrcount = 0; \
440 kwq->kw_pre_intrseq = 0; \
441 kwq->kw_pre_intrretbits = 0; \
442 kwq->kw_pre_intrtype = 0; \
445 #define CLEAR_REINIT_BITS(kwq) {\
446 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) { \
447 if((kwq->kw_inqueue != 0) && (kwq->kw_inqueue != kwq->kw_fakecount)) \
448 panic("CV:entries in queue durinmg reinit %d:%d\n",kwq->kw_inqueue, kwq->kw_fakecount); \
450 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_RWLOCK) { \
451 kwq->kw_nextseqword = PTHRW_RWS_INIT; \
452 kwq->kw_overlapwatch = 0; \
454 kwq->kw_pre_lockseq = 0; \
455 kwq->kw_pre_rwwc = 0; \
456 kwq->kw_pre_sseq = PTHRW_RWS_INIT; \
457 kwq->kw_lastunlockseq = PTHRW_RWL_INIT; \
458 kwq->kw_lastseqword = PTHRW_RWS_INIT; \
459 kwq->kw_pre_intrcount = 0; \
460 kwq->kw_pre_intrseq = 0; \
461 kwq->kw_pre_intrretbits = 0; \
462 kwq->kw_pre_intrtype = 0; \
465 kwq->kw_sword = PTHRW_RWS_INIT; \
468 void pthread_list_lock(void);
469 void pthread_list_unlock(void);
470 void pthread_list_lock_spin(void);
471 void pthread_list_lock_convert_spin(void);
472 void ksyn_wqlock(ksyn_wait_queue_t kwq
);
473 void ksyn_wqunlock(ksyn_wait_queue_t kwq
);
474 ksyn_wait_queue_t
ksyn_wq_hash_lookup(user_addr_t mutex
, proc_t p
, int flags
, uint64_t object
, uint64_t offset
);
475 int ksyn_wqfind(user_addr_t mutex
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, uint64_t tid
, int flags
, int wqtype
, ksyn_wait_queue_t
* wq
);
476 void ksyn_wqrelease(ksyn_wait_queue_t mkwq
, ksyn_wait_queue_t ckwq
, int qfreenow
, int wqtype
);
477 extern int ksyn_findobj(uint64_t mutex
, uint64_t * object
, uint64_t * offset
);
478 static void UPDATE_CVKWQ(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, uint64_t tid
, int wqtype
);
479 extern thread_t
port_name_to_thread(mach_port_name_t port_name
);
481 kern_return_t
ksyn_block_thread_locked(ksyn_wait_queue_t kwq
, uint64_t abstime
, ksyn_waitq_element_t kwe
, int log
, thread_continue_t
, void * parameter
);
482 kern_return_t
ksyn_wakeup_thread(ksyn_wait_queue_t kwq
, ksyn_waitq_element_t kwe
);
483 void ksyn_freeallkwe(ksyn_queue_t kq
);
485 uint32_t psynch_mutexdrop_internal(ksyn_wait_queue_t kwq
, uint32_t lkseq
, uint32_t ugen
, int flags
);
486 int kwq_handle_unlock(ksyn_wait_queue_t
, uint32_t mgen
, uint32_t rw_wc
, uint32_t * updatep
, int flags
, int *blockp
, uint32_t premgen
);
488 void ksyn_queue_init(ksyn_queue_t kq
);
489 int ksyn_queue_insert(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t mgen
, struct uthread
* uth
, ksyn_waitq_element_t kwe
, int firstfit
);
490 ksyn_waitq_element_t
ksyn_queue_removefirst(ksyn_queue_t kq
, ksyn_wait_queue_t kwq
);
491 void ksyn_queue_removeitem(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, ksyn_waitq_element_t kwe
);
492 int ksyn_queue_move_tofree(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t upto
, ksyn_queue_t freeq
, int all
, int reease
);
493 void update_low_high(ksyn_wait_queue_t kwq
, uint32_t lockseq
);
494 uint32_t find_nextlowseq(ksyn_wait_queue_t kwq
);
495 uint32_t find_nexthighseq(ksyn_wait_queue_t kwq
);
497 int find_seq_till(ksyn_wait_queue_t kwq
, uint32_t upto
, uint32_t nwaiters
, uint32_t *countp
);
498 uint32_t ksyn_queue_count_tolowest(ksyn_queue_t kq
, uint32_t upto
);
500 ksyn_waitq_element_t
ksyn_queue_find_cvpreposeq(ksyn_queue_t kq
, uint32_t cgen
);
501 uint32_t ksyn_queue_cvcount_entries(ksyn_queue_t kq
, uint32_t upto
, uint32_t from
, int * numwaitersp
, int * numintrp
, int * numprepop
);
502 void ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq
, uint32_t upto
, uint32_t *updatep
);
503 void ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq
, uint32_t *updatep
, ksyn_queue_t kfreeq
, int release
);
504 ksyn_waitq_element_t
ksyn_queue_find_signalseq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t toseq
, uint32_t lockseq
);
505 ksyn_waitq_element_t
ksyn_queue_find_threadseq(ksyn_wait_queue_t ckwq
, ksyn_queue_t kq
, thread_t th
, uint32_t toseq
);
506 void psynch_cvcontinue(void *, wait_result_t
);
507 void psynch_mtxcontinue(void *, wait_result_t
);
509 int ksyn_wakeupreaders(ksyn_wait_queue_t kwq
, uint32_t limitread
, int longreadset
, int allreaders
, uint32_t updatebits
, int * wokenp
);
510 int kwq_find_rw_lowest(ksyn_wait_queue_t kwq
, int flags
, uint32_t premgen
, int * type
, uint32_t lowest
[]);
511 ksyn_waitq_element_t
ksyn_queue_find_seq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t seq
, int remove
);
512 int kwq_handle_overlap(ksyn_wait_queue_t kwq
, uint32_t lgenval
, uint32_t ugenval
, uint32_t rw_wc
, uint32_t *updatebitsp
, int flags
, int * blockp
);
513 int kwq_handle_downgrade(ksyn_wait_queue_t kwq
, uint32_t mgen
, int flags
, uint32_t premgen
, int * blockp
);
516 UPDATE_CVKWQ(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, __unused
uint64_t tid
, __unused
int wqtype
)
518 if ((kwq
->kw_type
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_CVAR
) {
519 if ((kwq
->kw_kflags
& KSYN_KWF_ZEROEDOUT
) != 0) {
520 /* the values of L,U and S are cleared out due to L==S in previous transition */
521 kwq
->kw_lword
= mgen
;
522 kwq
->kw_uword
= ugen
;
523 kwq
->kw_sword
= rw_wc
;
524 kwq
->kw_kflags
&= ~KSYN_KWF_ZEROEDOUT
;
526 if (is_seqhigher((mgen
& PTHRW_COUNT_MASK
), (kwq
->kw_lword
& PTHRW_COUNT_MASK
)) != 0)
527 kwq
->kw_lword
= mgen
;
528 if (is_seqhigher((ugen
& PTHRW_COUNT_MASK
), (kwq
->kw_uword
& PTHRW_COUNT_MASK
)) != 0)
529 kwq
->kw_uword
= ugen
;
530 if ((rw_wc
& PTH_RWS_CV_CBIT
) != 0) {
531 if(is_seqlower(kwq
->kw_cvkernelseq
, (rw_wc
& PTHRW_COUNT_MASK
)) != 0) {
532 kwq
->kw_cvkernelseq
= (rw_wc
& PTHRW_COUNT_MASK
);
534 if (is_seqhigher((rw_wc
& PTHRW_COUNT_MASK
), (kwq
->kw_sword
& PTHRW_COUNT_MASK
)) != 0)
535 kwq
->kw_sword
= rw_wc
;
541 /* to protect the hashes, iocounts, freelist */
543 pthread_list_lock(void)
545 lck_mtx_lock(pthread_list_mlock
);
549 pthread_list_lock_spin(void)
551 lck_mtx_lock_spin(pthread_list_mlock
);
555 pthread_list_lock_convert_spin(void)
557 lck_mtx_convert_spin(pthread_list_mlock
);
562 pthread_list_unlock(void)
564 lck_mtx_unlock(pthread_list_mlock
);
567 /* to protect the indiv queue */
569 ksyn_wqlock(ksyn_wait_queue_t kwq
)
572 lck_mtx_lock(&kwq
->kw_lock
);
576 ksyn_wqunlock(ksyn_wait_queue_t kwq
)
578 lck_mtx_unlock(&kwq
->kw_lock
);
582 /* routine to drop the mutex unlocks , used both for mutexunlock system call and drop during cond wait */
584 psynch_mutexdrop_internal(ksyn_wait_queue_t kwq
, uint32_t lkseq
, uint32_t ugen
, int flags
)
586 uint32_t nextgen
, low_writer
, updatebits
, returnbits
= 0;
587 int firstfit
= flags
& _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
588 ksyn_waitq_element_t kwe
= NULL
;
589 kern_return_t kret
= KERN_SUCCESS
;
591 nextgen
= (ugen
+ PTHRW_INC
);
594 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_START
, (uint32_t)kwq
->kw_addr
, lkseq
, ugen
, flags
, 0);
595 #endif /* _PSYNCH_TRACE_ */
601 if (kwq
->kw_inqueue
!= 0) {
602 updatebits
= (kwq
->kw_highseq
& PTHRW_COUNT_MASK
) | (PTH_RWL_EBIT
| PTH_RWL_KBIT
);
603 kwq
->kw_lastunlockseq
= (ugen
& PTHRW_COUNT_MASK
);
606 /* first fit , pick any one */
607 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwq
);
608 kwe
->kwe_psynchretval
= updatebits
;
609 kwe
->kwe_kwqqueue
= NULL
;
612 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xcafecaf1, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
613 #endif /* _PSYNCH_TRACE_ */
615 kret
= ksyn_wakeup_thread(kwq
, kwe
);
617 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
618 panic("psynch_mutexdrop_internal: panic unable to wakeup firstfit mutex thread\n");
619 #endif /* __TESTPANICS__ */
620 if (kret
== KERN_NOT_WAITING
)
623 /* handle fairshare */
624 low_writer
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_firstnum
;
625 low_writer
&= PTHRW_COUNT_MASK
;
627 if (low_writer
== nextgen
) {
628 /* next seq to be granted found */
629 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwq
);
631 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
632 kwe
->kwe_psynchretval
= updatebits
| PTH_RWL_MTX_WAIT
;
633 kwe
->kwe_kwqqueue
= NULL
;
636 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xcafecaf2, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
637 #endif /* _PSYNCH_TRACE_ */
639 kret
= ksyn_wakeup_thread(kwq
, kwe
);
641 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
642 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
643 #endif /* __TESTPANICS__ */
644 if (kret
== KERN_NOT_WAITING
) {
646 kwq
->kw_pre_intrcount
= 1;
647 kwq
->kw_pre_intrseq
= nextgen
;
648 kwq
->kw_pre_intrretbits
= updatebits
;
649 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_WRITE
;
651 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfafafaf1, nextgen
, kwq
->kw_pre_intrretbits
, 0);
652 #endif /* _PSYNCH_TRACE_ */
655 } else if (is_seqhigher(low_writer
, nextgen
) != 0) {
658 if (kwq
->kw_pre_rwwc
> 1) {
659 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (1)\n");
663 kwq
->kw_pre_lockseq
= (nextgen
& PTHRW_COUNT_MASK
);
665 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef1, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
666 #endif /* _PSYNCH_TRACE_ */
669 //__FAILEDUSERTEST__("psynch_mutexdrop_internal: FS mutex unlock sequence higher than the lowest one is queue\n");
671 kwe
= ksyn_queue_find_seq(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], (nextgen
& PTHRW_COUNT_MASK
), 1);
673 /* next seq to be granted found */
674 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
675 kwe
->kwe_psynchretval
= updatebits
| PTH_RWL_MTX_WAIT
;
676 kwe
->kwe_kwqqueue
= NULL
;
678 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xcafecaf3, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
679 #endif /* _PSYNCH_TRACE_ */
680 kret
= ksyn_wakeup_thread(kwq
, kwe
);
682 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
683 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
684 #endif /* __TESTPANICS__ */
685 if (kret
== KERN_NOT_WAITING
)
688 /* next seq to be granted not found, prepost */
691 if (kwq
->kw_pre_rwwc
> 1) {
692 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (2)\n");
696 kwq
->kw_pre_lockseq
= (nextgen
& PTHRW_COUNT_MASK
);
698 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
699 #endif /* _PSYNCH_TRACE_ */
705 /* if firstfit the last one could be spurious */
707 kwq
->kw_lastunlockseq
= (ugen
& PTHRW_COUNT_MASK
);
710 if (kwq
->kw_pre_rwwc
> 1) {
711 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (3)\n");
715 kwq
->kw_pre_lockseq
= (nextgen
& PTHRW_COUNT_MASK
);
717 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef3, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
718 #endif /* _PSYNCH_TRACE_ */
722 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef3, kwq
->kw_lastunlockseq
, kwq
->kw_pre_lockseq
, 0);
723 #endif /* _PSYNCH_TRACE_ */
724 kwq
->kw_lastunlockseq
= (ugen
& PTHRW_COUNT_MASK
);
725 /* not set or the new lkseq is higher */
726 if ((kwq
->kw_pre_rwwc
== 0) || (is_seqlower(kwq
->kw_pre_lockseq
, lkseq
) == 0))
727 kwq
->kw_pre_lockseq
= (lkseq
& PTHRW_COUNT_MASK
);
728 kwq
->kw_pre_rwwc
= 1;
730 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef3, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
731 #endif /* _PSYNCH_TRACE_ */
733 /* indicate prepost content in kernel */
734 returnbits
= lkseq
| PTH_RWL_PBIT
;
742 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_END
, (uint32_t)kwq
->kw_addr
, 0xeeeeeeed, 0, 0, 0);
743 #endif /* _PSYNCH_TRACE_ */
744 ksyn_wqrelease(kwq
, NULL
, 1, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_MTX
));
749 * psynch_mutexwait: This system call is used for contended psynch mutexes to block.
753 psynch_mutexwait(__unused proc_t p
, struct psynch_mutexwait_args
* uap
, uint32_t * retval
)
755 user_addr_t mutex
= uap
->mutex
;
756 uint32_t mgen
= uap
->mgen
;
757 uint32_t ugen
= uap
->ugen
;
758 uint64_t tid
= uap
->tid
;
759 int flags
= uap
->flags
;
760 ksyn_wait_queue_t kwq
;
762 int ins_flags
, retry
;
764 int firstfit
= flags
& _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
765 uint32_t lockseq
, updatebits
=0;
766 ksyn_waitq_element_t kwe
;
770 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_START
, (uint32_t)mutex
, mgen
, ugen
, flags
, 0);
771 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)mutex
, mgen
, ugen
, (uint32_t)tid
, 0);
772 #endif /* _PSYNCH_TRACE_ */
774 uth
= current_uthread();
777 kwe
->kwe_lockseq
= uap
->mgen
;
779 kwe
->kwe_psynchretval
= 0;
780 kwe
->kwe_kwqqueue
= NULL
;
781 lockseq
= (uap
->mgen
& PTHRW_COUNT_MASK
);
787 ins_flags
= FIRSTFIT
;
790 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, tid
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_MTX
), &kwq
);
793 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 1, 0xdeadbeef, error
, 0);
794 #endif /* _PSYNCH_TRACE_ */
801 if ((mgen
& PTH_RWL_RETRYBIT
) != 0) {
803 mgen
&= ~PTH_RWL_RETRYBIT
;
806 /* handle first the missed wakeups */
807 if ((kwq
->kw_pre_intrcount
!= 0) &&
808 ((kwq
->kw_pre_intrtype
== PTH_RW_TYPE_WRITE
)) &&
809 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
810 kwq
->kw_pre_intrcount
--;
811 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
812 if (kwq
->kw_pre_intrcount
==0)
813 CLEAR_INTR_PREPOST_BITS(kwq
);
815 *retval
= kwe
->kwe_psynchretval
;
817 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)mutex
, 0xfafafaf1, kwe
->kwe_psynchretval
, kwq
->kw_pre_intrcount
, 0);
818 #endif /* _PSYNCH_TRACE_ */
822 if ((kwq
->kw_pre_rwwc
!= 0) && ((ins_flags
== FIRSTFIT
) || ((lockseq
& PTHRW_COUNT_MASK
) == (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
) ))) {
823 /* got preposted lock */
825 if (kwq
->kw_pre_rwwc
== 0) {
826 CLEAR_PREPOST_BITS(kwq
);
827 kwq
->kw_lastunlockseq
= PTHRW_RWL_INIT
;
828 if (kwq
->kw_inqueue
== 0) {
829 updatebits
= lockseq
| (PTH_RWL_KBIT
| PTH_RWL_EBIT
);
831 updatebits
= (kwq
->kw_highseq
& PTHRW_COUNT_MASK
) | (PTH_RWL_KBIT
| PTH_RWL_EBIT
);
833 updatebits
&= ~PTH_RWL_MTX_WAIT
;
835 kwe
->kwe_psynchretval
= updatebits
;
837 if (updatebits
== 0) {
838 __FAILEDUSERTEST__("psynch_mutexwait(prepost): returning 0 lseq in mutexwait with no EBIT \n");
841 *retval
= updatebits
;
843 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef1, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
844 #endif /* _PSYNCH_TRACE_ */
847 __FAILEDUSERTEST__("psynch_mutexwait: more than one prepost\n");
848 kwq
->kw_pre_lockseq
+= PTHRW_INC
; /* look for next one */
856 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfeedfeed, mgen
, ins_flags
, 0);
857 #endif /* _PSYNCH_TRACE_ */
859 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], mgen
, uth
, kwe
, ins_flags
);
863 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 2, 0xdeadbeef, error
, 0);
864 #endif /* _PSYNCH_TRACE_ */
868 kret
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0, psynch_mtxcontinue
, (void *)kwq
);
870 psynch_mtxcontinue((void *)kwq
, kret
);
872 /* not expected to return from unix_syscall_return */
873 panic("psynch_mtxcontinue returned from unix_syscall_return");
876 ksyn_wqrelease(kwq
, NULL
, 1, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_MTX
));
878 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 0xeeeeeeed, updatebits
, error
, 0);
879 #endif /* _PSYNCH_TRACE_ */
885 psynch_mtxcontinue(void * parameter
, wait_result_t result
)
888 uint32_t updatebits
= 0;
889 uthread_t uth
= current_uthread();
890 ksyn_wait_queue_t kwq
= (ksyn_wait_queue_t
)parameter
;
891 ksyn_waitq_element_t kwe
;
896 case THREAD_TIMED_OUT
:
899 case THREAD_INTERRUPTED
:
911 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 3, 0xdeadbeef, error
, 0);
912 #endif /* _PSYNCH_TRACE_ */
913 if (kwe
->kwe_kwqqueue
!= NULL
)
914 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwe
);
917 updatebits
= kwe
->kwe_psynchretval
;
918 updatebits
&= ~PTH_RWL_MTX_WAIT
;
919 uth
->uu_rval
[0] = updatebits
;
922 __FAILEDUSERTEST__("psynch_mutexwait: returning 0 lseq in mutexwait with no EBIT \n");
924 ksyn_wqrelease(kwq
, NULL
, 1, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_MTX
));
926 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)kwq
->kw_addr
, 0xeeeeeeed, updatebits
, error
, 0);
927 #endif /* _PSYNCH_TRACE_ */
929 unix_syscall_return(error
);
933 * psynch_mutexdrop: This system call is used for unlock postings on contended psynch mutexes.
936 psynch_mutexdrop(__unused proc_t p
, struct psynch_mutexdrop_args
* uap
, uint32_t * retval
)
938 user_addr_t mutex
= uap
->mutex
;
939 uint32_t mgen
= uap
->mgen
;
940 uint32_t ugen
= uap
->ugen
;
941 uint64_t tid
= uap
->tid
;
942 int flags
= uap
->flags
;
943 ksyn_wait_queue_t kwq
;
947 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, tid
, flags
, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_MTX
), &kwq
);
952 updateval
= psynch_mutexdrop_internal(kwq
, mgen
, ugen
, flags
);
953 /* drops the kwq reference */
961 * psynch_cvbroad: This system call is used for broadcast posting on blocked waiters of psynch cvars.
964 psynch_cvbroad(__unused proc_t p
, struct psynch_cvbroad_args
* uap
, uint32_t * retval
)
966 user_addr_t cond
= uap
->cv
;
967 uint64_t cvlsgen
= uap
->cvlsgen
;
968 uint64_t cvudgen
= uap
->cvudgen
;
969 uint32_t cgen
, cugen
, csgen
, diffgen
;
970 uint32_t uptoseq
, fromseq
;
971 int flags
= uap
->flags
;
972 ksyn_wait_queue_t ckwq
;
974 uint32_t updatebits
= 0;
976 struct ksyn_queue kfreeq
;
978 csgen
= (uint32_t)((cvlsgen
>> 32) & 0xffffffff);
979 cgen
= ((uint32_t)(cvlsgen
& 0xffffffff));
980 cugen
= (uint32_t)((cvudgen
>> 32) & 0xffffffff);
981 diffgen
= ((uint32_t)(cvudgen
& 0xffffffff));
982 count
= (diffgen
>> PTHRW_COUNT_SHIFT
);
985 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_START
, (uint32_t)cond
, cgen
, cugen
, csgen
, 0);
986 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_NONE
, (uint32_t)cond
, 0xcbcbcbc1, diffgen
,flags
, 0);
987 #endif /* _PSYNCH_TRACE_ */
989 uptoseq
= cgen
& PTHRW_COUNT_MASK
;
990 fromseq
= (cugen
& PTHRW_COUNT_MASK
) + PTHRW_INC
;
992 if (is_seqhigher(fromseq
, uptoseq
) || is_seqhigher((csgen
& PTHRW_COUNT_MASK
), uptoseq
)) {
993 __FAILEDUSERTEST__("cvbroad: invalid L, U and S values\n");
996 if (count
> (uint32_t)task_threadmax
) {
997 __FAILEDUSERTEST__("cvbroad: difference greater than maximum possible thread count\n");
1003 error
= ksyn_wqfind(cond
, cgen
, cugen
, csgen
, 0, flags
, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
), &ckwq
);
1006 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0xdeadbeef, error
, 0);
1007 #endif /* _PSYNCH_TRACE_ */
1015 /* update L, U and S... */
1016 UPDATE_CVKWQ(ckwq
, cgen
, cugen
, csgen
, 0, KSYN_WQTYPE_CVAR
);
1018 /* broadcast wakeups/prepost handling */
1019 ksyn_handle_cvbroad(ckwq
, uptoseq
, &updatebits
);
1021 /* set C or P bits and free if needed */
1022 ckwq
->kw_sword
+= (updatebits
& PTHRW_COUNT_MASK
);
1023 ksyn_cvupdate_fixup(ckwq
, &updatebits
, &kfreeq
, 1);
1024 ksyn_wqunlock(ckwq
);
1026 *retval
= updatebits
;
1028 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_CVAR
));
1030 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_END
, (uint32_t)cond
, 0xeeeeeeed, (uint32_t)*retval
, error
, 0);
1031 #endif /* _PSYNCH_TRACE_ */
1036 ksyn_waitq_element_t
1037 ksyn_queue_find_threadseq(ksyn_wait_queue_t ckwq
, __unused ksyn_queue_t kq
, thread_t th
, uint32_t upto
)
1039 uthread_t uth
= get_bsdthread_info(th
);
1040 ksyn_waitq_element_t kwe
= &uth
->uu_kwe
;
1042 if (kwe
->kwe_kwqqueue
!= ckwq
||
1043 is_seqhigher((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), upto
)) {
1044 /* the thread is not waiting in the cv (or wasn't when the wakeup happened) */
1051 * psynch_cvsignal: This system call is used for signalling the blocked waiters of psynch cvars.
1054 psynch_cvsignal(__unused proc_t p
, struct psynch_cvsignal_args
* uap
, uint32_t * retval
)
1056 user_addr_t cond
= uap
->cv
;
1057 uint64_t cvlsgen
= uap
->cvlsgen
;
1058 uint32_t cgen
, csgen
, signalseq
, uptoseq
;
1059 uint32_t cugen
= uap
->cvugen
;
1060 int threadport
= uap
->thread_port
;
1061 int flags
= uap
->flags
;
1062 ksyn_wait_queue_t ckwq
= NULL
;
1063 ksyn_waitq_element_t kwe
, nkwe
= NULL
;
1066 thread_t th
= THREAD_NULL
;
1067 uint32_t updatebits
= 0;
1069 struct ksyn_queue kfreeq
;
1072 csgen
= (uint32_t)((cvlsgen
>> 32) & 0xffffffff);
1073 cgen
= ((uint32_t)(cvlsgen
& 0xffffffff));
1076 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_START
, (uint32_t)cond
, cgen
, cugen
, threadport
, 0);
1077 #endif /* _PSYNCH_TRACE_ */
1079 uptoseq
= cgen
& PTHRW_COUNT_MASK
;
1080 signalseq
= (cugen
& PTHRW_COUNT_MASK
) + PTHRW_INC
;
1082 /* validate sane L, U, and S values */
1083 if (((threadport
== 0) && (is_seqhigher(signalseq
, uptoseq
))) || is_seqhigher((csgen
& PTHRW_COUNT_MASK
), uptoseq
)) {
1084 __FAILEDUSERTEST__("psync_cvsignal; invalid sequence numbers\n");
1089 /* If we are looking for a specific thread, grab a reference for it */
1090 if (threadport
!= 0) {
1091 th
= (thread_t
)port_name_to_thread((mach_port_name_t
)threadport
);
1092 if (th
== THREAD_NULL
) {
1098 error
= ksyn_wqfind(cond
, cgen
, cugen
, csgen
, 0, flags
, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
), &ckwq
);
1101 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0xdeadbeef, error
, 0);
1102 #endif /* _PSYNCH_TRACE_ */
1108 /* update L, U and S... */
1109 UPDATE_CVKWQ(ckwq
, cgen
, cugen
, csgen
, 0, KSYN_WQTYPE_CVAR
);
1111 kq
= &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
];
1114 /* Only bother if we aren't already balanced */
1115 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) != (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
1117 kwe
= (th
!= NULL
) ? ksyn_queue_find_threadseq(ckwq
, kq
, th
, uptoseq
) :
1118 ksyn_queue_find_signalseq(ckwq
, kq
, uptoseq
, signalseq
);
1120 switch (kwe
->kwe_flags
) {
1122 case KWE_THREAD_BROADCAST
:
1123 /* broadcasts swallow our signal */
1126 case KWE_THREAD_PREPOST
:
1127 /* merge in with existing prepost at our same uptoseq */
1128 kwe
->kwe_count
+= 1;
1131 case KWE_THREAD_INWAIT
:
1132 if (is_seqlower((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), signalseq
)) {
1134 * A valid thread in our range, but lower than our signal.
1135 * Matching it may leave our match with nobody to wake it if/when
1136 * it arrives (the signal originally meant for this thread might
1137 * not successfully wake it).
1139 * Convert to broadcast - may cause some spurious wakeups
1140 * (allowed by spec), but avoids starvation (better choice).
1143 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xc1c1c1c1, uptoseq
, 0, 0);
1144 #endif /* _PSYNCH_TRACE_ */
1145 ksyn_handle_cvbroad(ckwq
, uptoseq
, &updatebits
);
1147 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
1148 kwe
->kwe_psynchretval
= PTH_RWL_MTX_WAIT
;
1149 kwe
->kwe_kwqqueue
= NULL
;
1151 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xcafecaf2, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
1152 #endif /* _PSYNCH_TRACE_ */
1153 kret
= ksyn_wakeup_thread(ckwq
, kwe
);
1155 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
1156 panic("ksyn_wakeup_thread: panic waking up condition waiter\n");
1157 #endif /* __TESTPANICS__ */
1158 updatebits
+= PTHRW_INC
;
1161 ckwq
->kw_sword
+= (updatebits
& PTHRW_COUNT_MASK
);
1165 panic("unknown kweflags\n");
1169 } else if (th
!= NULL
) {
1171 * Could not find the thread, post a broadcast,
1172 * otherwise the waiter will be stuck. Use to send
1173 * ESRCH here, did lead to rare hangs.
1175 ksyn_handle_cvbroad(ckwq
, uptoseq
, &updatebits
);
1176 ckwq
->kw_sword
+= (updatebits
& PTHRW_COUNT_MASK
);
1177 } else if (nkwe
== NULL
) {
1178 ksyn_wqunlock(ckwq
);
1179 nkwe
= (ksyn_waitq_element_t
)zalloc(kwe_zone
);
1184 /* no eligible entries - add prepost */
1185 bzero(nkwe
, sizeof(struct ksyn_waitq_element
));
1186 nkwe
->kwe_kwqqueue
= ckwq
;
1187 nkwe
->kwe_flags
= KWE_THREAD_PREPOST
;
1188 nkwe
->kwe_lockseq
= uptoseq
;
1189 nkwe
->kwe_count
= 1;
1190 nkwe
->kwe_uth
= NULL
;
1191 nkwe
->kwe_psynchretval
= 0;
1194 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xfeedfefe, uptoseq
, 0, 0);
1195 #endif /* _PSYNCH_TRACE_ */
1197 (void)ksyn_queue_insert(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], uptoseq
, NULL
, nkwe
, SEQFIT
);
1198 ckwq
->kw_fakecount
++;
1202 /* set C or P bits and free if needed */
1203 ksyn_cvupdate_fixup(ckwq
, &updatebits
, &kfreeq
, 1);
1206 ksyn_wqunlock(ckwq
);
1208 zfree(kwe_zone
, nkwe
);
1210 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_CVAR
));
1214 thread_deallocate(th
);
1216 *retval
= updatebits
;
1218 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_END
, (uint32_t)cond
, 0xeeeeeeed, updatebits
, error
, 0);
1219 #endif /* _PSYNCH_TRACE_ */
1225 * psynch_cvwait: This system call is used for psynch cvar waiters to block in kernel.
1228 psynch_cvwait(__unused proc_t p
, struct psynch_cvwait_args
* uap
, uint32_t * retval
)
1230 user_addr_t cond
= uap
->cv
;
1231 uint64_t cvlsgen
= uap
->cvlsgen
;
1232 uint32_t cgen
, csgen
;
1233 uint32_t cugen
= uap
->cvugen
;
1234 user_addr_t mutex
= uap
->mutex
;
1235 uint64_t mugen
= uap
->mugen
;
1236 uint32_t mgen
, ugen
;
1237 int flags
= uap
->flags
;
1238 ksyn_wait_queue_t kwq
, ckwq
;
1239 int error
=0, local_error
= 0;
1240 uint64_t abstime
= 0;
1241 uint32_t lockseq
, updatebits
=0;
1244 ksyn_waitq_element_t kwe
, nkwe
= NULL
;
1245 struct ksyn_queue
*kq
, kfreeq
;
1248 /* for conformance reasons */
1249 __pthread_testcancel(0);
1251 csgen
= (uint32_t)((cvlsgen
>> 32) & 0xffffffff);
1252 cgen
= ((uint32_t)(cvlsgen
& 0xffffffff));
1253 ugen
= (uint32_t)((mugen
>> 32) & 0xffffffff);
1254 mgen
= ((uint32_t)(mugen
& 0xffffffff));
1257 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_START
, (uint32_t)cond
, cgen
, cugen
, csgen
, 0);
1258 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)mutex
, mgen
, ugen
, flags
, 0);
1259 #endif /* _PSYNCH_TRACE_ */
1261 lockseq
= (cgen
& PTHRW_COUNT_MASK
);
1263 * In cvwait U word can be out of range as cond could be used only for
1264 * timeouts. However S word needs to be within bounds and validated at
1265 * user level as well.
1267 if (is_seqhigher_eq((csgen
& PTHRW_COUNT_MASK
), lockseq
) != 0) {
1268 __FAILEDUSERTEST__("psync_cvwait; invalid sequence numbers\n");
1273 error
= ksyn_wqfind(cond
, cgen
, cugen
, csgen
, 0, flags
, KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INWAIT
, &ckwq
);
1276 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)cond
, 1, 0xdeadbeef, error
, 0);
1277 #endif /* _PSYNCH_TRACE_ */
1282 if (mutex
!= (user_addr_t
)0) {
1283 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, 0, flags
, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_MTX
), &kwq
);
1285 local_error
= error
;
1287 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 2, 0xdeadbeef, error
, 0);
1288 #endif /* _PSYNCH_TRACE_ */
1292 (void)psynch_mutexdrop_internal(kwq
, mgen
, ugen
, flags
);
1293 /* drops kwq reference */
1297 if (uap
->sec
!= 0 || (uap
->nsec
& 0x3fffffff) != 0) {
1298 ts
.tv_sec
= uap
->sec
;
1299 ts
.tv_nsec
= (uap
->nsec
& 0x3fffffff);
1300 nanoseconds_to_absolutetime((uint64_t)ts
.tv_sec
* NSEC_PER_SEC
+ ts
.tv_nsec
, &abstime
);
1301 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
1306 /* update L, U and S... */
1307 UPDATE_CVKWQ(ckwq
, cgen
, cugen
, csgen
, 0, KSYN_WQTYPE_CVAR
);
1309 /* Look for the sequence for prepost (or conflicting thread */
1310 kq
= &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
];
1311 kwe
= ksyn_queue_find_cvpreposeq(kq
, lockseq
);
1314 switch (kwe
->kwe_flags
) {
1316 case KWE_THREAD_INWAIT
:
1317 ksyn_wqunlock(ckwq
);
1318 __FAILEDUSERTEST__("cvwait: thread entry with same sequence already present\n");
1319 local_error
= EBUSY
;
1322 case KWE_THREAD_BROADCAST
:
1325 case KWE_THREAD_PREPOST
:
1326 if ((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
) == lockseq
) {
1327 /* we can safely consume a reference, so do so */
1328 if (--kwe
->kwe_count
== 0) {
1329 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
1330 ckwq
->kw_fakecount
--;
1335 * consuming a prepost higher than our lock sequence is valid, but
1336 * can leave the higher thread without a match. Convert the entry
1337 * to a broadcast to compensate for this.
1340 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xc2c2c2c2, kwe
->kwe_lockseq
, 0, 0);
1341 #endif /* _PSYNCH_TRACE_ */
1343 ksyn_handle_cvbroad(ckwq
, kwe
->kwe_lockseq
, &updatebits
);
1345 if (updatebits
!= 0)
1346 panic("psync_cvwait: convert pre-post to broadcast: woke up %d threads that shouldn't be there\n",
1348 #endif /* __TESTPANICS__ */
1354 panic("psync_cvwait: unexpected wait queue element type\n");
1358 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xfefefefe, kwe
->kwe_lockseq
, 0, 0);
1359 #endif /* _PSYNCH_TRACE_ */
1362 updatebits
= PTHRW_INC
;
1363 ckwq
->kw_sword
+= PTHRW_INC
;
1365 /* set C or P bits and free if needed */
1366 ksyn_cvupdate_fixup(ckwq
, &updatebits
, &kfreeq
, 1);
1371 *retval
= updatebits
;
1373 ksyn_wqunlock(ckwq
);
1376 zfree(kwe_zone
, nkwe
);
1382 uth
= current_uthread();
1384 kwe
->kwe_kwqqueue
= ckwq
;
1385 kwe
->kwe_flags
= KWE_THREAD_INWAIT
;
1386 kwe
->kwe_lockseq
= lockseq
;
1389 kwe
->kwe_psynchretval
= 0;
1392 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xfeedfeed, cgen
, 0, 0);
1393 #endif /* _PSYNCH_TRACE_ */
1395 error
= ksyn_queue_insert(ckwq
, kq
, cgen
, uth
, kwe
, SEQFIT
);
1397 ksyn_wqunlock(ckwq
);
1398 local_error
= error
;
1402 kret
= ksyn_block_thread_locked(ckwq
, abstime
, kwe
, 1, psynch_cvcontinue
, (void *)ckwq
);
1405 psynch_cvcontinue(ckwq
, kret
);
1406 /* not expected to return from unix_syscall_return */
1407 panic("psynch_cvcontinue returned from unix_syscall_return");
1411 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)cond
, 0xeeeeeeed, (uint32_t)*retval
, local_error
, 0);
1412 #endif /* _PSYNCH_TRACE_ */
1413 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_CVAR
));
1414 return(local_error
);
1419 psynch_cvcontinue(void * parameter
, wait_result_t result
)
1421 int error
= 0, local_error
= 0;
1422 uthread_t uth
= current_uthread();
1423 ksyn_wait_queue_t ckwq
= (ksyn_wait_queue_t
)parameter
;
1424 ksyn_waitq_element_t kwe
;
1425 struct ksyn_queue kfreeq
;
1428 case THREAD_TIMED_OUT
:
1431 case THREAD_INTERRUPTED
:
1439 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP
| DBG_FUNC_NONE
, 0xf4f3f2f1, (uintptr_t)uth
, result
, 0, 0);
1440 #endif /* _PSYNCH_TRACE_ */
1442 local_error
= error
;
1447 /* just in case it got woken up as we were granting */
1448 uth
->uu_rval
[0] = kwe
->kwe_psynchretval
;
1451 if ((kwe
->kwe_kwqqueue
!= NULL
) && (kwe
->kwe_kwqqueue
!= ckwq
))
1452 panic("cvwait waiting on some other kwq\n");
1454 #endif /* __TESTPANICS__ */
1457 if (kwe
->kwe_kwqqueue
!= NULL
) {
1458 ksyn_queue_removeitem(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwe
);
1459 kwe
->kwe_kwqqueue
= NULL
;
1461 if ((kwe
->kwe_psynchretval
& PTH_RWL_MTX_WAIT
) != 0) {
1462 /* the condition var granted.
1463 * reset the error so that the thread returns back.
1466 /* no need to set any bits just return as cvsig/broad covers this */
1467 ksyn_wqunlock(ckwq
);
1471 ckwq
->kw_sword
+= PTHRW_INC
;
1473 /* set C and P bits, in the local error */
1474 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) == (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
1475 local_error
|= ECVCERORR
;
1476 if (ckwq
->kw_inqueue
!= 0) {
1477 (void)ksyn_queue_move_tofree(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], (ckwq
->kw_lword
& PTHRW_COUNT_MASK
), &kfreeq
, 1, 1);
1479 ckwq
->kw_lword
= ckwq
->kw_uword
= ckwq
->kw_sword
= 0;
1480 ckwq
->kw_kflags
|= KSYN_KWF_ZEROEDOUT
;
1482 /* everythig in the queue is a fake entry ? */
1483 if ((ckwq
->kw_inqueue
!= 0) && (ckwq
->kw_fakecount
== ckwq
->kw_inqueue
)) {
1484 local_error
|= ECVPERORR
;
1487 ksyn_wqunlock(ckwq
);
1490 /* PTH_RWL_MTX_WAIT is removed */
1491 if ((kwe
->kwe_psynchretval
& PTH_RWS_CV_MBIT
) != 0)
1492 uth
->uu_rval
[0] = PTHRW_INC
| PTH_RWS_CV_CBIT
;
1494 uth
->uu_rval
[0] = 0;
1499 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)ckwq
->kw_addr
, 0xeeeeeeed, uth
->uu_rval
[0], local_error
, 0);
1500 #endif /* _PSYNCH_TRACE_ */
1501 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_CVAR
));
1503 unix_syscall_return(local_error
);
1508 * psynch_cvclrprepost: This system call clears pending prepost if present.
1511 psynch_cvclrprepost(__unused proc_t p
, struct psynch_cvclrprepost_args
* uap
, __unused
int * retval
)
1513 user_addr_t cond
= uap
->cv
;
1514 uint32_t cgen
= uap
->cvgen
;
1515 uint32_t cugen
= uap
->cvugen
;
1516 uint32_t csgen
= uap
->cvsgen
;
1517 uint32_t pseq
= uap
->preposeq
;
1518 uint32_t flags
= uap
->flags
;
1520 ksyn_wait_queue_t ckwq
= NULL
;
1521 struct ksyn_queue kfreeq
;
1524 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_START
, (uint32_t)cond
, cgen
, cugen
, csgen
, 0);
1525 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_NONE
, (uint32_t)cond
, 0xcececece, pseq
, flags
, 0);
1526 #endif /* _PSYNCH_TRACE_ */
1528 if ((flags
& _PTHREAD_MTX_OPT_MUTEX
) == 0) {
1529 error
= ksyn_wqfind(cond
, cgen
, cugen
, csgen
, 0, flags
, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
), &ckwq
);
1533 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0xdeadbeef, error
, 0);
1534 #endif /* _PSYNCH_TRACE_ */
1539 (void)ksyn_queue_move_tofree(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], (pseq
& PTHRW_COUNT_MASK
), &kfreeq
, 0, 1);
1540 ksyn_wqunlock(ckwq
);
1541 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
));
1544 error
= ksyn_wqfind(cond
, cgen
, cugen
, 0, 0, flags
, (KSYN_WQTYPE_MTX
| KSYN_WQTYPE_INDROP
), &ckwq
);
1548 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0xdeadbeef, error
, 0);
1549 #endif /* _PSYNCH_TRACE_ */
1554 if (((flags
& _PTHREAD_MUTEX_POLICY_FIRSTFIT
) != 0) && (ckwq
->kw_pre_rwwc
!= 0)) {
1555 if (is_seqlower_eq(ckwq
->kw_pre_lockseq
, cgen
) != 0) {
1557 ckwq
->kw_pre_rwwc
= 0;
1558 ckwq
->kw_pre_lockseq
= 0;
1561 ksyn_wqunlock(ckwq
);
1562 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_MTX
| KSYN_WQTYPE_INDROP
));
1566 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_END
, (uint32_t)cond
, 0xeeeeeeed, 0, 0, 0);
1567 #endif /* _PSYNCH_TRACE_ */
1571 /* ***************** pthread_rwlock ************************ */
1573 * psynch_rw_rdlock: This system call is used for psync rwlock readers to block.
1576 psynch_rw_rdlock(__unused proc_t p
, struct psynch_rw_rdlock_args
* uap
, uint32_t * retval
)
1578 user_addr_t rwlock
= uap
->rwlock
;
1579 uint32_t lgen
= uap
->lgenval
;
1580 uint32_t ugen
= uap
->ugenval
;
1581 uint32_t rw_wc
= uap
->rw_wc
;
1582 //uint64_t tid = uap->tid;
1583 int flags
= uap
->flags
;
1584 int error
= 0, block
;
1585 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0, prerw_wc
= 0;
1586 ksyn_wait_queue_t kwq
;
1588 int isinit
= lgen
& PTHRW_RWL_INIT
;
1589 uint32_t returnbits
= 0;
1590 ksyn_waitq_element_t kwe
;
1594 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1595 #endif /* _PSYNCH_TRACE_ */
1596 uth
= current_uthread();
1598 /* preserve the seq number */
1600 kwe
->kwe_lockseq
= lgen
;
1602 kwe
->kwe_psynchretval
= 0;
1603 kwe
->kwe_kwqqueue
= NULL
;
1605 lockseq
= lgen
& PTHRW_COUNT_MASK
;
1608 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1611 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1612 #endif /* _PSYNCH_TRACE_ */
1619 lgen
&= ~PTHRW_RWL_INIT
;
1620 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
1621 /* first to notice the reset of the lock, clear preposts */
1622 CLEAR_REINIT_BITS(kwq
);
1623 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
1625 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
1626 #endif /* _PSYNCH_TRACE_ */
1630 /* handle first the missed wakeups */
1631 if ((kwq
->kw_pre_intrcount
!= 0) &&
1632 ((kwq
->kw_pre_intrtype
== PTH_RW_TYPE_READ
) || (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_LREAD
)) &&
1633 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1635 kwq
->kw_pre_intrcount
--;
1636 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
1637 if (kwq
->kw_pre_intrcount
==0)
1638 CLEAR_INTR_PREPOST_BITS(kwq
);
1643 /* handle overlap first as they are not counted against pre_rwwc */
1645 /* check for overlap and if no pending W bit (indicates writers) */
1646 if ((kwq
->kw_overlapwatch
!= 0) && ((rw_wc
& PTHRW_RWS_SAVEMASK
) == 0) && ((lgen
& PTH_RWL_WBIT
) == 0)) {
1648 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 10, kwq
->kw_nextseqword
, kwq
->kw_lastseqword
, 0);
1649 #endif /* _PSYNCH_TRACE_ */
1650 error
= kwq_handle_overlap(kwq
, lgen
, ugen
, rw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_READLOCK
|KW_UNLOCK_PREPOST
), &block
);
1653 panic("rw_rdlock: kwq_handle_overlap failed %d\n",error
);
1654 #endif /* __TESTPANICS__ */
1657 kwe
->kwe_psynchretval
= updatebits
;
1659 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0xff, updatebits
, 0xee, 0);
1660 #endif /* _PSYNCH_TRACE_ */
1666 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1668 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1669 #endif /* _PSYNCH_TRACE_ */
1671 if (kwq
->kw_pre_rwwc
== 0) {
1672 preseq
= kwq
->kw_pre_lockseq
;
1673 prerw_wc
= kwq
->kw_pre_sseq
;
1674 CLEAR_PREPOST_BITS(kwq
);
1675 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
1676 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
1678 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
1679 #endif /* _PSYNCH_TRACE_ */
1681 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_READLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1684 panic("rw_rdlock: kwq_handle_unlock failed %d\n",error
);
1685 #endif /* __TESTPANICS__ */
1690 /* insert to q and proceed as ususal */
1696 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1697 #endif /* _PSYNCH_TRACE_ */
1698 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
], lgen
, uth
, kwe
, SEQFIT
);
1701 panic("psynch_rw_rdlock: failed to enqueue\n");
1702 #endif /* __TESTPANICS__ */
1703 kret
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0, THREAD_CONTINUE_NULL
, NULL
);
1704 /* drops the kwq lock */
1706 case THREAD_TIMED_OUT
:
1709 case THREAD_INTERRUPTED
:
1720 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
1721 #endif /* _PSYNCH_TRACE_ */
1723 if (kwe
->kwe_kwqqueue
!= NULL
)
1724 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
], kwe
);
1728 *retval
= kwe
->kwe_psynchretval
;
1729 returnbits
= kwe
->kwe_psynchretval
;
1731 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
));
1733 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, returnbits
, error
, 0);
1734 #endif /* _PSYNCH_TRACE_ */
1739 * psynch_rw_longrdlock: This system call is used for psync rwlock long readers to block.
1742 psynch_rw_longrdlock(__unused proc_t p
, __unused
struct psynch_rw_longrdlock_args
* uap
, __unused
uint32_t * retval
)
1744 user_addr_t rwlock
= uap
->rwlock
;
1745 uint32_t lgen
= uap
->lgenval
;
1746 uint32_t ugen
= uap
->ugenval
;
1747 uint32_t rw_wc
= uap
->rw_wc
;
1748 //uint64_t tid = uap->tid;
1749 int flags
= uap
->flags
;
1750 int isinit
= lgen
& PTHRW_RWL_INIT
;
1751 uint32_t returnbits
=0;
1752 ksyn_waitq_element_t kwe
;
1755 ksyn_wait_queue_t kwq
;
1756 int error
=0, block
= 0 ;
1758 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0, prerw_wc
= 0;
1761 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1762 #endif /* _PSYNCH_TRACE_ */
1763 uth
= current_uthread();
1765 kwe
->kwe_lockseq
= lgen
;
1767 kwe
->kwe_psynchretval
= 0;
1768 kwe
->kwe_kwqqueue
= NULL
;
1769 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
1771 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1774 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1775 #endif /* _PSYNCH_TRACE_ */
1782 lgen
&= ~PTHRW_RWL_INIT
;
1783 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
1784 /* first to notice the reset of the lock, clear preposts */
1785 CLEAR_REINIT_BITS(kwq
);
1786 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
1788 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
1789 #endif /* _PSYNCH_TRACE_ */
1793 /* handle first the missed wakeups */
1794 if ((kwq
->kw_pre_intrcount
!= 0) &&
1795 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_LREAD
) &&
1796 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1798 kwq
->kw_pre_intrcount
--;
1799 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
1800 if (kwq
->kw_pre_intrcount
==0)
1801 CLEAR_INTR_PREPOST_BITS(kwq
);
1807 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1809 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1810 #endif /* _PSYNCH_TRACE_ */
1812 if (kwq
->kw_pre_rwwc
== 0) {
1813 preseq
= kwq
->kw_pre_lockseq
;
1814 prerw_wc
= kwq
->kw_pre_sseq
;
1815 CLEAR_PREPOST_BITS(kwq
);
1816 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
1817 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
1819 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
1820 #endif /* _PSYNCH_TRACE_ */
1822 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_LREADLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1825 panic("kwq_handle_unlock failed %d\n",error
);
1826 #endif /* __TESTPANICS__ */
1831 /* insert to q and proceed as ususal */
1836 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1837 #endif /* _PSYNCH_TRACE_ */
1838 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
], lgen
, uth
, kwe
, SEQFIT
);
1841 panic("psynch_rw_longrdlock: failed to enqueue\n");
1842 #endif /* __TESTPANICS__ */
1844 kret
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0, THREAD_CONTINUE_NULL
, NULL
);
1845 /* drops the kwq lock */
1847 case THREAD_TIMED_OUT
:
1850 case THREAD_INTERRUPTED
:
1860 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1861 #endif /* _PSYNCH_TRACE_ */
1863 if (kwe
->kwe_kwqqueue
!= NULL
)
1864 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
], kwe
);
1868 *retval
= kwe
->kwe_psynchretval
;
1869 returnbits
= kwe
->kwe_psynchretval
;
1872 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
));
1875 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, returnbits
, error
, 0);
1876 #endif /* _PSYNCH_TRACE_ */
1881 * psynch_rw_wrlock: This system call is used for psync rwlock writers to block.
1884 psynch_rw_wrlock(__unused proc_t p
, struct psynch_rw_wrlock_args
* uap
, uint32_t * retval
)
1886 user_addr_t rwlock
= uap
->rwlock
;
1887 uint32_t lgen
= uap
->lgenval
;
1888 uint32_t ugen
= uap
->ugenval
;
1889 uint32_t rw_wc
= uap
->rw_wc
;
1890 //uint64_t tid = uap->tid;
1891 int flags
= uap
->flags
;
1893 ksyn_wait_queue_t kwq
;
1896 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0, prerw_wc
= 0;
1897 int isinit
= lgen
& PTHRW_RWL_INIT
;
1898 uint32_t returnbits
= 0;
1899 ksyn_waitq_element_t kwe
;
1903 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1904 #endif /* _PSYNCH_TRACE_ */
1905 uth
= current_uthread();
1907 kwe
->kwe_lockseq
= lgen
;
1909 kwe
->kwe_psynchretval
= 0;
1910 kwe
->kwe_kwqqueue
= NULL
;
1911 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
1913 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1916 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1917 #endif /* _PSYNCH_TRACE_ */
1925 lgen
&= ~PTHRW_RWL_INIT
;
1926 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
1927 /* first to notice the reset of the lock, clear preposts */
1928 CLEAR_REINIT_BITS(kwq
);
1929 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
1931 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
1932 #endif /* _PSYNCH_TRACE_ */
1937 /* handle first the missed wakeups */
1938 if ((kwq
->kw_pre_intrcount
!= 0) &&
1939 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_WRITE
) &&
1940 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1942 kwq
->kw_pre_intrcount
--;
1943 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
1944 if (kwq
->kw_pre_intrcount
==0)
1945 CLEAR_INTR_PREPOST_BITS(kwq
);
1951 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1953 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1954 #endif /* _PSYNCH_TRACE_ */
1956 if (kwq
->kw_pre_rwwc
== 0) {
1957 preseq
= kwq
->kw_pre_lockseq
;
1958 prerw_wc
= kwq
->kw_pre_sseq
;
1959 CLEAR_PREPOST_BITS(kwq
);
1960 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
1961 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
1963 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
1964 #endif /* _PSYNCH_TRACE_ */
1966 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_WRLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1969 panic("rw_wrlock: kwq_handle_unlock failed %d\n",error
);
1970 #endif /* __TESTPANICS__ */
1973 *retval
= updatebits
;
1976 /* insert to q and proceed as ususal */
1980 /* No overlap watch needed go ahead and block */
1983 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1984 #endif /* _PSYNCH_TRACE_ */
1985 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], lgen
, uth
, kwe
, SEQFIT
);
1988 panic("psynch_rw_wrlock: failed to enqueue\n");
1989 #endif /* __TESTPANICS__ */
1991 kret
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0, THREAD_CONTINUE_NULL
, NULL
);
1992 /* drops the wq lock */
1994 case THREAD_TIMED_OUT
:
1997 case THREAD_INTERRUPTED
:
2008 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
2009 #endif /* _PSYNCH_TRACE_ */
2011 if (kwe
->kwe_kwqqueue
!= NULL
)
2012 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwe
);
2016 *retval
= kwe
->kwe_psynchretval
;
2017 returnbits
= kwe
->kwe_psynchretval
;
2020 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
));
2023 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, returnbits
, error
, 0);
2024 #endif /* _PSYNCH_TRACE_ */
2029 * psynch_rw_yieldwrlock: This system call is used for psync rwlock yielding writers to block.
2032 psynch_rw_yieldwrlock(__unused proc_t p
, __unused
struct psynch_rw_yieldwrlock_args
* uap
, __unused
uint32_t * retval
)
2034 user_addr_t rwlock
= uap
->rwlock
;
2035 uint32_t lgen
= uap
->lgenval
;
2036 uint32_t ugen
= uap
->ugenval
;
2037 uint32_t rw_wc
= uap
->rw_wc
;
2038 //uint64_t tid = uap->tid;
2039 int flags
= uap
->flags
;
2041 ksyn_wait_queue_t kwq
;
2043 int isinit
= lgen
& PTHRW_RWL_INIT
;
2045 uint32_t returnbits
=0;
2046 ksyn_waitq_element_t kwe
;
2050 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
2051 #endif /* _PSYNCH_TRACE_ */
2052 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0, prerw_wc
= 0;
2054 uth
= current_uthread();
2056 kwe
->kwe_lockseq
= lgen
;
2058 kwe
->kwe_psynchretval
= 0;
2059 kwe
->kwe_kwqqueue
= NULL
;
2060 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
2062 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
2065 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2066 #endif /* _PSYNCH_TRACE_ */
2073 lgen
&= ~PTHRW_RWL_INIT
;
2074 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
2075 /* first to notice the reset of the lock, clear preposts */
2076 CLEAR_REINIT_BITS(kwq
);
2077 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
2079 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
2080 #endif /* _PSYNCH_TRACE_ */
2084 /* handle first the missed wakeups */
2085 if ((kwq
->kw_pre_intrcount
!= 0) &&
2086 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_YWRITE
) &&
2087 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
2089 kwq
->kw_pre_intrcount
--;
2090 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
2091 if (kwq
->kw_pre_intrcount
==0)
2092 CLEAR_INTR_PREPOST_BITS(kwq
);
2097 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
2099 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
2100 #endif /* _PSYNCH_TRACE_ */
2102 if (kwq
->kw_pre_rwwc
== 0) {
2103 preseq
= kwq
->kw_pre_lockseq
;
2104 prerw_wc
= kwq
->kw_pre_sseq
;
2105 CLEAR_PREPOST_BITS(kwq
);
2106 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
2107 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
2109 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
2110 #endif /* _PSYNCH_TRACE_ */
2112 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_YWRLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
2115 panic("kwq_handle_unlock failed %d\n",error
);
2116 #endif /* __TESTPANICS__ */
2119 *retval
= updatebits
;
2122 /* insert to q and proceed as ususal */
2127 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
2128 #endif /* _PSYNCH_TRACE_ */
2129 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
], lgen
, uth
, kwe
, SEQFIT
);
2132 panic("psynch_rw_yieldwrlock: failed to enqueue\n");
2133 #endif /* __TESTPANICS__ */
2135 kret
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0, THREAD_CONTINUE_NULL
, NULL
);
2137 case THREAD_TIMED_OUT
:
2140 case THREAD_INTERRUPTED
:
2151 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
2152 #endif /* _PSYNCH_TRACE_ */
2154 if (kwe
->kwe_kwqqueue
!= NULL
)
2155 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
], kwe
);
2159 *retval
= kwe
->kwe_psynchretval
;
2160 returnbits
= kwe
->kwe_psynchretval
;
2163 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_RWLOCK
));
2166 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, returnbits
, error
, 0);
2167 #endif /* _PSYNCH_TRACE_ */
2173 * psynch_rw_downgrade: This system call is used for wakeup blocked readers who are eligible to run due to downgrade.
2176 psynch_rw_downgrade(__unused proc_t p
, struct psynch_rw_downgrade_args
* uap
, __unused
int * retval
)
2178 user_addr_t rwlock
= uap
->rwlock
;
2179 uint32_t lgen
= uap
->lgenval
;
2180 uint32_t ugen
= uap
->ugenval
;
2181 uint32_t rw_wc
= uap
->rw_wc
;
2182 //uint64_t tid = uap->tid;
2183 int flags
= uap
->flags
;
2185 int isinit
= lgen
& PTHRW_RWL_INIT
;
2186 ksyn_wait_queue_t kwq
;
2189 uint32_t curgen
= 0;
2192 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
2193 #endif /* _PSYNCH_TRACE_ */
2194 uth
= current_uthread();
2196 curgen
= (lgen
& PTHRW_COUNT_MASK
);
2198 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
), &kwq
);
2201 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2202 #endif /* _PSYNCH_TRACE_ */
2208 if ((lgen
& PTHRW_RWL_INIT
) != 0) {
2209 lgen
&= ~PTHRW_RWL_INIT
;
2210 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0){
2211 CLEAR_REINIT_BITS(kwq
);
2212 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
2214 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
2215 #endif /* _PSYNCH_TRACE_ */
2220 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
2221 if ((kwq
->kw_lastunlockseq
!= PTHRW_RWL_INIT
) && (is_seqlower(ugen
, kwq
->kw_lastunlockseq
)!= 0)) {
2222 /* spurious updatebits?? */
2229 /* If L-U != num of waiters, then it needs to be preposted or spr */
2230 diff
= find_diff(lgen
, ugen
);
2231 /* take count of the downgrade thread itself */
2236 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_inqueue
, curgen
, 0);
2237 #endif /* _PSYNCH_TRACE_ */
2238 if (find_seq_till(kwq
, curgen
, diff
, &count
) == 0) {
2239 if (count
< (uint32_t)diff
)
2243 /* no prepost and all threads are in place, reset the bit */
2244 if ((isinit
!= 0) && ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0)){
2245 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
2247 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
2248 #endif /* _PSYNCH_TRACE_ */
2251 /* can handle unlock now */
2253 CLEAR_PREPOST_BITS(kwq
);
2257 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
2258 #endif /* _PSYNCH_TRACE_ */
2259 error
= kwq_handle_downgrade(kwq
, lgen
, 0, 0, NULL
);
2263 panic("psynch_rw_downgrade: failed to wakeup\n");
2264 #endif /* __TESTPANICS__ */
2269 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, 0, error
, 0);
2270 #endif /* _PSYNCH_TRACE_ */
2271 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
));
2276 kwq
->kw_pre_rwwc
= (rw_wc
- count
);
2277 kwq
->kw_pre_lockseq
= lgen
;
2279 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
2280 #endif /* _PSYNCH_TRACE_ */
2287 * psynch_rw_upgrade: This system call is used by an reader to block waiting for upgrade to be granted.
2290 psynch_rw_upgrade(__unused proc_t p
, struct psynch_rw_upgrade_args
* uap
, uint32_t * retval
)
2292 user_addr_t rwlock
= uap
->rwlock
;
2293 uint32_t lgen
= uap
->lgenval
;
2294 uint32_t ugen
= uap
->ugenval
;
2295 uint32_t rw_wc
= uap
->rw_wc
;
2296 //uint64_t tid = uap->tid;
2297 int flags
= uap
->flags
;
2299 ksyn_wait_queue_t kwq
;
2302 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0;
2303 int isinit
= lgen
& PTHRW_RWL_INIT
;
2304 ksyn_waitq_element_t kwe
;
2308 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
2309 #endif /* _PSYNCH_TRACE_ */
2310 uth
= current_uthread();
2312 kwe
->kwe_lockseq
= lgen
;
2314 kwe
->kwe_psynchretval
= 0;
2315 kwe
->kwe_kwqqueue
= NULL
;
2316 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
2318 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_RWLOCK
), &kwq
);
2321 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2322 #endif /* _PSYNCH_TRACE_ */
2329 lgen
&= ~PTHRW_RWL_INIT
;
2330 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
2331 /* first to notice the reset of the lock, clear preposts */
2332 CLEAR_REINIT_BITS(kwq
);
2333 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
2335 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
2336 #endif /* _PSYNCH_TRACE_ */
2340 /* handle first the missed wakeups */
2341 if ((kwq
->kw_pre_intrcount
!= 0) &&
2342 ((kwq
->kw_pre_intrtype
== PTH_RW_TYPE_READ
) || (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_LREAD
)) &&
2343 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
2345 kwq
->kw_pre_intrcount
--;
2346 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
2347 if (kwq
->kw_pre_intrcount
==0)
2348 CLEAR_INTR_PREPOST_BITS(kwq
);
2353 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
2355 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
2356 #endif /* _PSYNCH_TRACE_ */
2358 if (kwq
->kw_pre_rwwc
== 0) {
2359 preseq
= kwq
->kw_pre_lockseq
;
2360 prerw_wc
= kwq
->kw_pre_sseq
;
2361 CLEAR_PREPOST_BITS(kwq
);
2362 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
2363 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
2365 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
2366 #endif /* _PSYNCH_TRACE_ */
2368 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_UPGRADE
|KW_UNLOCK_PREPOST
), &block
, lgen
);
2371 panic("rw_rdlock: kwq_handle_unlock failed %d\n",error
);
2372 #endif /* __TESTPANICS__ */
2377 /* insert to q and proceed as ususal */
2383 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
2384 #endif /* _PSYNCH_TRACE_ */
2385 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
], lgen
, uth
, kwe
, SEQFIT
);
2388 panic("psynch_rw_upgrade: failed to enqueue\n");
2389 #endif /* __TESTPANICS__ */
2392 kret
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0, THREAD_CONTINUE_NULL
, NULL
);
2393 /* drops the lock */
2395 case THREAD_TIMED_OUT
:
2398 case THREAD_INTERRUPTED
:
2409 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
2410 #endif /* _PSYNCH_TRACE_ */
2412 if (kwe
->kwe_kwqqueue
!= NULL
)
2413 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
], kwe
);
2417 *retval
= kwe
->kwe_psynchretval
;
2420 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_RWLOCK
));
2422 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2423 #endif /* _PSYNCH_TRACE_ */
2430 psynch_rw_upgrade(__unused proc_t p
, __unused
struct psynch_rw_upgrade_args
* uap
, __unused
uint32_t * retval
)
2435 psynch_rw_downgrade(__unused proc_t p
, __unused
struct psynch_rw_downgrade_args
* uap
, __unused
int * retval
)
2441 * psynch_rw_unlock: This system call is used for unlock state postings. This will grant appropriate
2442 * reader/writer variety lock.
2446 psynch_rw_unlock(__unused proc_t p
, struct psynch_rw_unlock_args
* uap
, uint32_t * retval
)
2448 user_addr_t rwlock
= uap
->rwlock
;
2449 uint32_t lgen
= uap
->lgenval
;
2450 uint32_t ugen
= uap
->ugenval
;
2451 uint32_t rw_wc
= uap
->rw_wc
;
2453 //uint64_t tid = uap->tid;
2454 int flags
= uap
->flags
;
2456 ksyn_wait_queue_t kwq
;
2457 uint32_t updatebits
= 0;
2464 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
2465 #endif /* _PSYNCH_TRACE_ */
2466 uth
= current_uthread();
2468 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
), &kwq
);
2471 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2472 #endif /* _PSYNCH_TRACE_ */
2476 curgen
= lgen
& PTHRW_COUNT_MASK
;
2480 if ((lgen
& PTHRW_RWL_INIT
) != 0) {
2481 lgen
&= ~PTHRW_RWL_INIT
;
2482 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0){
2483 CLEAR_REINIT_BITS(kwq
);
2484 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
2486 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
2487 #endif /* _PSYNCH_TRACE_ */
2492 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
2493 if ((kwq
->kw_lastunlockseq
!= PTHRW_RWL_INIT
) && (is_seqlower(ugen
, kwq
->kw_lastunlockseq
)!= 0)) {
2495 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, (uint32_t)0xeeeeeeee, rw_wc
, kwq
->kw_lastunlockseq
, 0);
2496 #endif /* _PSYNCH_TRACE_ */
2501 /* If L-U != num of waiters, then it needs to be preposted or spr */
2502 diff
= find_diff(lgen
, ugen
);
2505 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_inqueue
, curgen
, 0);
2506 #endif /* _PSYNCH_TRACE_ */
2507 if (find_seq_till(kwq
, curgen
, diff
, &count
) == 0) {
2508 if ((count
== 0) || (count
< (uint32_t)diff
))
2512 /* no prepost and all threads are in place, reset the bit */
2513 if ((isinit
!= 0) && ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0)){
2514 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
2516 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
2517 #endif /* _PSYNCH_TRACE_ */
2520 /* can handle unlock now */
2522 CLEAR_PREPOST_BITS(kwq
);
2525 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, 0, 0, 0);
2526 #endif /* _PSYNCH_TRACE_ */
2527 error
= kwq_handle_unlock(kwq
, lgen
, rw_wc
, &updatebits
, 0, NULL
, 0);
2530 panic("psynch_rw_unlock: kwq_handle_unlock failed %d\n",error
);
2531 #endif /* __TESTPANICS__ */
2535 *retval
= updatebits
;
2541 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
));
2543 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, updatebits
, error
, 0);
2544 #endif /* _PSYNCH_TRACE_ */
2549 /* update if the new seq is higher than prev prepost, or first set */
2550 if ((is_rws_setseq(kwq
->kw_pre_sseq
) != 0) ||
2551 (is_seqhigher_eq((rw_wc
& PTHRW_COUNT_MASK
), (kwq
->kw_pre_sseq
& PTHRW_COUNT_MASK
)) != 0)) {
2552 kwq
->kw_pre_rwwc
= (diff
- count
);
2553 kwq
->kw_pre_lockseq
= curgen
;
2554 kwq
->kw_pre_sseq
= rw_wc
;
2556 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, rw_wc
, count
, 0);
2557 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
2558 #endif /* _PSYNCH_TRACE_ */
2559 updatebits
= lgen
; /* let this not do unlock handling */
2567 * psynch_rw_unlock2: This system call is used to wakeup pending readers when unlock grant frm kernel
2568 * to new reader arrival races
2571 psynch_rw_unlock2(__unused proc_t p
, __unused
struct psynch_rw_unlock2_args
* uap
, __unused
uint32_t * retval
)
2577 /* ************************************************************************** */
2579 pth_global_hashinit()
2583 pth_glob_hashtbl
= hashinit(PTH_HASHSIZE
* 4, M_PROC
, &pthhash
);
2586 * pthtest={0,1,2,3} (override default aborting behavior on pthread sync failures)
2587 * 0 - just return errors
2588 * 1 - print and return errors
2589 * 2 - abort user, print and return errors
2592 if (!PE_parse_boot_argn("pthtest", &arg
, sizeof(arg
)))
2596 __test_panics__
= 1;
2597 printf("Pthread support PANICS when sync kernel primitives misused\n");
2598 } else if (arg
== 2) {
2599 __test_aborts__
= 1;
2600 __test_prints__
= 1;
2601 printf("Pthread support ABORTS when sync kernel primitives misused\n");
2602 } else if (arg
== 1) {
2603 __test_prints__
= 1;
2604 printf("Pthread support LOGS when sync kernel primitives misused\n");
2609 pth_proc_hashinit(proc_t p
)
2611 p
->p_pthhash
= hashinit(PTH_HASHSIZE
, M_PROC
, &pthhash
);
2612 if (p
->p_pthhash
== NULL
)
2613 panic("pth_proc_hashinit: hash init returned 0\n");
2618 ksyn_wq_hash_lookup(user_addr_t mutex
, proc_t p
, int flags
, uint64_t object
, uint64_t objoffset
)
2620 ksyn_wait_queue_t kwq
;
2621 struct pthhashhead
* hashptr
;
2623 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
)
2625 hashptr
= pth_glob_hashtbl
;
2626 kwq
= (&hashptr
[object
& pthhash
])->lh_first
;
2628 for (; kwq
!= NULL
; kwq
= kwq
->kw_hash
.le_next
) {
2629 if ((kwq
->kw_object
== object
) &&(kwq
->kw_offset
== objoffset
)) {
2635 hashptr
= p
->p_pthhash
;
2636 kwq
= (&hashptr
[mutex
& pthhash
])->lh_first
;
2638 for (; kwq
!= NULL
; kwq
= kwq
->kw_hash
.le_next
) {
2639 if (kwq
->kw_addr
== mutex
) {
2648 pth_proc_hashdelete(proc_t p
)
2650 struct pthhashhead
* hashptr
;
2651 ksyn_wait_queue_t kwq
;
2652 int hashsize
= pthhash
+ 1;
2656 if ((pthread_debug_proc
!= NULL
) && (p
== pthread_debug_proc
))
2657 pthread_debug_proc
= PROC_NULL
;
2658 #endif /* _PSYNCH_TRACE_ */
2659 hashptr
= p
->p_pthhash
;
2660 if (hashptr
== NULL
)
2663 for(i
= 0; i
< hashsize
; i
++) {
2664 while ((kwq
= LIST_FIRST(&hashptr
[i
])) != NULL
) {
2665 pthread_list_lock();
2666 if ((kwq
->kw_pflags
& KSYN_WQ_INHASH
) != 0) {
2667 kwq
->kw_pflags
&= ~KSYN_WQ_INHASH
;
2668 LIST_REMOVE(kwq
, kw_hash
);
2670 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
2671 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
2672 LIST_REMOVE(kwq
, kw_list
);
2676 pthread_list_unlock();
2677 /* release fake entries if present for cvars */
2678 if (((kwq
->kw_type
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_CVAR
) && (kwq
->kw_inqueue
!= 0))
2679 ksyn_freeallkwe(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
]);
2680 lck_mtx_destroy(&kwq
->kw_lock
, pthread_lck_grp
);
2681 zfree(kwq_zone
, kwq
);
2684 FREE(p
->p_pthhash
, M_PROC
);
2685 p
->p_pthhash
= NULL
;
2688 /* no lock held for this as the waitqueue is getting freed */
2690 ksyn_freeallkwe(ksyn_queue_t kq
)
2692 ksyn_waitq_element_t kwe
;
2694 /* free all the fake entries, dequeue rest */
2695 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
2696 while (kwe
!= NULL
) {
2697 if (kwe
->kwe_flags
!= KWE_THREAD_INWAIT
) {
2698 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
2699 zfree(kwe_zone
, kwe
);
2701 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
2703 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
2707 /* find kernel waitqueue, if not present create one. Grants a reference */
2709 ksyn_wqfind(user_addr_t mutex
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, uint64_t tid
, int flags
, int wqtype
, ksyn_wait_queue_t
* kwqp
)
2711 ksyn_wait_queue_t kwq
;
2712 ksyn_wait_queue_t nkwq
;
2713 struct pthhashhead
* hashptr
;
2714 uint64_t object
= 0, offset
= 0;
2716 proc_t p
= current_proc();
2717 int retry
= mgen
& PTH_RWL_RETRYBIT
;
2718 struct ksyn_queue kfreeq
;
2721 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
)
2723 (void)ksyn_findobj(mutex
, &object
, &offset
);
2725 hashptr
= pth_glob_hashtbl
;
2727 hashptr
= p
->p_pthhash
;
2730 ksyn_queue_init(&kfreeq
);
2732 if (((wqtype
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_MTX
) && (retry
!= 0))
2733 mgen
&= ~PTH_RWL_RETRYBIT
;
2736 //pthread_list_lock_spin();
2737 pthread_list_lock();
2739 kwq
= ksyn_wq_hash_lookup(mutex
, p
, flags
, object
, offset
);
2742 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
2743 LIST_REMOVE(kwq
, kw_list
);
2744 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
2748 if ((kwq
->kw_type
& KSYN_WQTYPE_MASK
) != (wqtype
&KSYN_WQTYPE_MASK
)) {
2749 if ((kwq
->kw_inqueue
== 0) && (kwq
->kw_pre_rwwc
==0) && (kwq
->kw_pre_intrcount
== 0)) {
2750 if (kwq
->kw_iocount
== 0) {
2751 kwq
->kw_addr
= mutex
;
2752 kwq
->kw_flags
= flags
;
2753 kwq
->kw_object
= object
;
2754 kwq
->kw_offset
= offset
;
2755 kwq
->kw_type
= (wqtype
& KSYN_WQTYPE_MASK
);
2756 CLEAR_REINIT_BITS(kwq
);
2757 CLEAR_INTR_PREPOST_BITS(kwq
);
2758 CLEAR_PREPOST_BITS(kwq
);
2759 kwq
->kw_lword
= mgen
;
2760 kwq
->kw_uword
= ugen
;
2761 kwq
->kw_sword
= rw_wc
;
2762 kwq
->kw_owner
= tid
;
2763 } else if ((kwq
->kw_iocount
== 1) && (kwq
->kw_dropcount
== kwq
->kw_iocount
)) {
2764 /* if all users are unlockers then wait for it to finish */
2765 kwq
->kw_pflags
|= KSYN_WQ_WAITING
;
2766 /* wait for the wq to be free */
2767 (void)msleep(&kwq
->kw_pflags
, pthread_list_mlock
, PDROP
, "ksyn_wqfind", 0);
2768 /* does not have list lock */
2771 __FAILEDUSERTEST__("address already known to kernel for another (busy) synchronizer type\n");
2772 pthread_list_unlock();
2776 __FAILEDUSERTEST__("address already known to kernel for another (busy) synchronizer type(1)\n");
2777 pthread_list_unlock();
2782 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
)
2783 kwq
->kw_dropcount
++;
2786 pthread_list_unlock();
2790 pthread_list_unlock();
2792 nkwq
= (ksyn_wait_queue_t
)zalloc(kwq_zone
);
2793 bzero(nkwq
, sizeof(struct ksyn_wait_queue
));
2794 nkwq
->kw_addr
= mutex
;
2795 nkwq
->kw_flags
= flags
;
2796 nkwq
->kw_iocount
= 1;
2797 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
)
2798 nkwq
->kw_dropcount
++;
2799 nkwq
->kw_object
= object
;
2800 nkwq
->kw_offset
= offset
;
2801 nkwq
->kw_type
= (wqtype
& KSYN_WQTYPE_MASK
);
2802 nkwq
->kw_lastseqword
= PTHRW_RWS_INIT
;
2803 if (nkwq
->kw_type
== KSYN_WQTYPE_RWLOCK
)
2804 nkwq
->kw_nextseqword
= PTHRW_RWS_INIT
;
2806 nkwq
->kw_pre_sseq
= PTHRW_RWS_INIT
;
2808 CLEAR_PREPOST_BITS(nkwq
);
2809 CLEAR_INTR_PREPOST_BITS(nkwq
);
2810 CLEAR_REINIT_BITS(nkwq
);
2811 nkwq
->kw_lword
= mgen
;
2812 nkwq
->kw_uword
= ugen
;
2813 nkwq
->kw_sword
= rw_wc
;
2814 nkwq
->kw_owner
= tid
;
2817 for (i
=0; i
< KSYN_QUEUE_MAX
; i
++)
2818 ksyn_queue_init(&nkwq
->kw_ksynqueues
[i
]);
2820 lck_mtx_init(&nkwq
->kw_lock
, pthread_lck_grp
, pthread_lck_attr
);
2822 //pthread_list_lock_spin();
2823 pthread_list_lock();
2824 /* see whether it is alread allocated */
2825 kwq
= ksyn_wq_hash_lookup(mutex
, p
, flags
, object
, offset
);
2828 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
2829 LIST_REMOVE(kwq
, kw_list
);
2830 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
2834 if ((kwq
->kw_type
& KSYN_WQTYPE_MASK
) != (wqtype
&KSYN_WQTYPE_MASK
)) {
2835 if ((kwq
->kw_inqueue
== 0) && (kwq
->kw_pre_rwwc
==0) && (kwq
->kw_pre_intrcount
== 0)) {
2836 if (kwq
->kw_iocount
== 0) {
2837 kwq
->kw_addr
= mutex
;
2838 kwq
->kw_flags
= flags
;
2839 kwq
->kw_object
= object
;
2840 kwq
->kw_offset
= offset
;
2841 kwq
->kw_type
= (wqtype
& KSYN_WQTYPE_MASK
);
2842 CLEAR_REINIT_BITS(kwq
);
2843 CLEAR_INTR_PREPOST_BITS(kwq
);
2844 CLEAR_PREPOST_BITS(kwq
);
2845 kwq
->kw_lword
= mgen
;
2846 kwq
->kw_uword
= ugen
;
2847 kwq
->kw_sword
= rw_wc
;
2848 kwq
->kw_owner
= tid
;
2849 } else if ((kwq
->kw_iocount
== 1) && (kwq
->kw_dropcount
== kwq
->kw_iocount
)) {
2850 kwq
->kw_pflags
|= KSYN_WQ_WAITING
;
2851 /* wait for the wq to be free */
2852 (void)msleep(&kwq
->kw_pflags
, pthread_list_mlock
, PDROP
, "ksyn_wqfind", 0);
2854 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2855 zfree(kwq_zone
, nkwq
);
2856 /* will acquire lock again */
2860 __FAILEDUSERTEST__("address already known to kernel for another [busy] synchronizer type(2)\n");
2861 pthread_list_unlock();
2862 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2863 zfree(kwq_zone
, nkwq
);
2867 __FAILEDUSERTEST__("address already known to kernel for another [busy] synchronizer type(3)\n");
2868 pthread_list_unlock();
2869 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2870 zfree(kwq_zone
, nkwq
);
2875 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
)
2876 kwq
->kw_dropcount
++;
2879 pthread_list_unlock();
2880 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2881 zfree(kwq_zone
, nkwq
);
2887 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, kwq
->kw_lword
, kwq
->kw_uword
, kwq
->kw_sword
, 0xffff, 0);
2888 #endif /* _PSYNCH_TRACE_ */
2889 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
)
2891 kwq
->kw_pflags
|= KSYN_WQ_SHARED
;
2892 LIST_INSERT_HEAD(&hashptr
[kwq
->kw_object
& pthhash
], kwq
, kw_hash
);
2894 LIST_INSERT_HEAD(&hashptr
[mutex
& pthhash
], kwq
, kw_hash
);
2896 kwq
->kw_pflags
|= KSYN_WQ_INHASH
;
2899 pthread_list_unlock();
2906 /* Reference from find is dropped here. Starts the free process if needed */
2908 ksyn_wqrelease(ksyn_wait_queue_t kwq
, ksyn_wait_queue_t ckwq
, int qfreenow
, int wqtype
)
2913 ksyn_wait_queue_t free_elem
= NULL
;
2914 ksyn_wait_queue_t free_elem1
= NULL
;
2916 //pthread_list_lock_spin();
2917 pthread_list_lock();
2919 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
) {
2920 kwq
->kw_dropcount
--;
2922 if (kwq
->kw_iocount
== 0) {
2923 if ((kwq
->kw_pflags
& KSYN_WQ_WAITING
) != 0) {
2924 /* some one is waiting for the waitqueue, wake them up */
2925 kwq
->kw_pflags
&= ~KSYN_WQ_WAITING
;
2926 wakeup(&kwq
->kw_pflags
);
2929 if ((kwq
->kw_pre_rwwc
== 0) && (kwq
->kw_inqueue
== 0) && (kwq
->kw_pre_intrcount
== 0)) {
2930 if (qfreenow
== 0) {
2931 microuptime(&kwq
->kw_ts
);
2932 LIST_INSERT_HEAD(&pth_free_list
, kwq
, kw_list
);
2933 kwq
->kw_pflags
|= KSYN_WQ_FLIST
;
2937 /* remove from the only list it is in ie hash */
2938 kwq
->kw_pflags
&= ~(KSYN_WQ_FLIST
| KSYN_WQ_INHASH
);
2939 LIST_REMOVE(kwq
, kw_hash
);
2940 lck_mtx_destroy(&kwq
->kw_lock
, pthread_lck_grp
);
2953 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
) {
2954 kwq
->kw_dropcount
--;
2956 if ( ckwq
->kw_iocount
== 0) {
2957 if ((kwq
->kw_pflags
& KSYN_WQ_WAITING
) != 0) {
2958 /* some one is waiting for the waitqueue, wake them up */
2959 kwq
->kw_pflags
&= ~KSYN_WQ_WAITING
;
2960 wakeup(&kwq
->kw_pflags
);
2962 if ((ckwq
->kw_pre_rwwc
== 0) && (ckwq
->kw_inqueue
== 0) && (ckwq
->kw_pre_intrcount
== 0)) {
2963 if (qfreenow
== 0) {
2964 /* mark for free if we can */
2965 microuptime(&ckwq
->kw_ts
);
2966 LIST_INSERT_HEAD(&pth_free_list
, ckwq
, kw_list
);
2967 ckwq
->kw_pflags
|= KSYN_WQ_FLIST
;
2971 /* remove from the only list it is in ie hash */
2972 ckwq
->kw_pflags
&= ~(KSYN_WQ_FLIST
| KSYN_WQ_INHASH
);
2973 LIST_REMOVE(ckwq
, kw_hash
);
2974 lck_mtx_destroy(&ckwq
->kw_lock
, pthread_lck_grp
);
2986 if (sched
== 1 && psynch_cleanupset
== 0) {
2987 psynch_cleanupset
= 1;
2989 t
.tv_sec
+= KSYN_CLEANUP_DEADLINE
;
2991 deadline
= tvtoabstime(&t
);
2992 thread_call_enter_delayed(psynch_thcall
, deadline
);
2994 pthread_list_unlock();
2995 if (free_elem
!= NULL
)
2996 zfree(kwq_zone
, free_elem
);
2997 if (free_elem1
!= NULL
)
2998 zfree(kwq_zone
, free_elem1
);
3001 /* responsible to free the waitqueues */
3003 psynch_wq_cleanup(__unused
void * param
, __unused
void * param1
)
3005 ksyn_wait_queue_t kwq
;
3007 LIST_HEAD(, ksyn_wait_queue
) freelist
= {NULL
};
3008 int count
= 0, delayed
= 0, diff
;
3009 uint64_t deadline
= 0;
3011 //pthread_list_lock_spin();
3012 pthread_list_lock();
3014 num_addedfreekwq
= num_infreekwq
- num_lastfreekwqcount
;
3015 num_lastfreekwqcount
= num_infreekwq
;
3018 LIST_FOREACH(kwq
, &pth_free_list
, kw_list
) {
3019 if ((kwq
->kw_iocount
!= 0) || (kwq
->kw_pre_rwwc
!= 0) || (kwq
->kw_inqueue
!= 0) || (kwq
->kw_pre_intrcount
!= 0)) {
3023 diff
= t
.tv_sec
- kwq
->kw_ts
.tv_sec
;
3026 if (diff
>= KSYN_CLEANUP_DEADLINE
) {
3028 kwq
->kw_pflags
&= ~(KSYN_WQ_FLIST
| KSYN_WQ_INHASH
);
3031 LIST_REMOVE(kwq
, kw_hash
);
3032 LIST_REMOVE(kwq
, kw_list
);
3033 LIST_INSERT_HEAD(&freelist
, kwq
, kw_list
);
3042 t
.tv_sec
+= KSYN_CLEANUP_DEADLINE
;
3044 deadline
= tvtoabstime(&t
);
3045 thread_call_enter_delayed(psynch_thcall
, deadline
);
3046 psynch_cleanupset
= 1;
3048 psynch_cleanupset
= 0;
3050 pthread_list_unlock();
3053 while ((kwq
= LIST_FIRST(&freelist
)) != NULL
) {
3054 LIST_REMOVE(kwq
, kw_list
);
3055 lck_mtx_destroy(&kwq
->kw_lock
, pthread_lck_grp
);
3056 zfree(kwq_zone
, kwq
);
3063 ksyn_block_thread_locked(ksyn_wait_queue_t kwq
, uint64_t abstime
, ksyn_waitq_element_t kwe
, int mylog
, thread_continue_t continuation
, void * parameter
)
3065 ksyn_block_thread_locked(ksyn_wait_queue_t kwq
, uint64_t abstime
, ksyn_waitq_element_t kwe
, __unused
int mylog
, thread_continue_t continuation
, void * parameter
)
3071 uthread_t uth
= NULL
;
3072 #endif /* _PSYNCH_TRACE_ */
3074 kwe
->kwe_kwqqueue
= (void *)kwq
;
3075 assert_wait_deadline(&kwe
->kwe_psynchretval
, THREAD_ABORTSAFE
, abstime
);
3078 if (continuation
== THREAD_CONTINUE_NULL
)
3079 kret
= thread_block(NULL
);
3081 kret
= thread_block_parameter(continuation
, parameter
);
3085 case THREAD_TIMED_OUT
:
3088 case THREAD_INTERRUPTED
:
3092 uth
= current_uthread();
3093 #if defined(__i386__)
3095 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP
| DBG_FUNC_NONE
, 0xf4f3f2f1, (uint32_t)uth
, kret
, 0, 0);
3098 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP
| DBG_FUNC_NONE
, 0xeeeeeeee, kret
, error
, 0xeeeeeeee, 0);
3100 #endif /* _PSYNCH_TRACE_ */
3106 ksyn_wakeup_thread(__unused ksyn_wait_queue_t kwq
, ksyn_waitq_element_t kwe
)
3110 uthread_t uth
= NULL
;
3111 #endif /* _PSYNCH_TRACE_ */
3113 kret
= thread_wakeup_one((caddr_t
)&kwe
->kwe_psynchretval
);
3115 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3116 panic("ksyn_wakeup_thread: panic waking up thread %x\n", kret
);
3119 #if defined(__i386__)
3120 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP
| DBG_FUNC_NONE
, 0xf1f2f3f4, (uint32_t)uth
, kret
, 0, 0);
3122 #endif /* _PSYNCH_TRACE_ */
3127 /* find the true shared obect/offset for shared mutexes */
3129 ksyn_findobj(uint64_t mutex
, uint64_t * objectp
, uint64_t * offsetp
)
3131 vm_page_info_basic_data_t info
;
3133 mach_msg_type_number_t count
= VM_PAGE_INFO_BASIC_COUNT
;
3135 kret
= vm_map_page_info(current_map(), mutex
, VM_PAGE_INFO_BASIC
,
3136 (vm_page_info_t
)&info
, &count
);
3138 if (kret
!= KERN_SUCCESS
)
3141 if (objectp
!= NULL
)
3142 *objectp
= (uint64_t)info
.object_id
;
3143 if (offsetp
!= NULL
)
3144 *offsetp
= (uint64_t)info
.offset
;
3150 /* lowest of kw_fr, kw_flr, kw_fwr, kw_fywr */
3152 kwq_find_rw_lowest(ksyn_wait_queue_t kwq
, int flags
, uint32_t premgen
, int * typep
, uint32_t lowest
[])
3155 uint32_t kw_fr
, kw_flr
, kw_fwr
, kw_fywr
, low
;
3156 int type
= 0, lowtype
, typenum
[4];
3157 uint32_t numbers
[4];
3161 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0)) {
3162 type
|= PTH_RWSHFT_TYPE_READ
;
3163 /* read entries are present */
3164 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
!= 0) {
3165 kw_fr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_firstnum
;
3166 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, kw_fr
) != 0))
3171 lowest
[KSYN_QUEUE_READ
] = kw_fr
;
3172 numbers
[count
]= kw_fr
;
3173 typenum
[count
] = PTH_RW_TYPE_READ
;
3176 lowest
[KSYN_QUEUE_READ
] = 0;
3178 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0)) {
3179 type
|= PTH_RWSHFT_TYPE_LREAD
;
3180 /* read entries are present */
3181 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
!= 0) {
3182 kw_flr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
;
3183 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) && (is_seqlower(premgen
, kw_flr
) != 0))
3188 lowest
[KSYN_QUEUE_LREAD
] = kw_flr
;
3189 numbers
[count
]= kw_flr
;
3190 typenum
[count
] = PTH_RW_TYPE_LREAD
;
3193 lowest
[KSYN_QUEUE_LREAD
] = 0;
3196 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0)) {
3197 type
|= PTH_RWSHFT_TYPE_WRITE
;
3198 /* read entries are present */
3199 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) {
3200 kw_fwr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_firstnum
;
3201 if (((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) && (is_seqlower(premgen
, kw_fwr
) != 0))
3206 lowest
[KSYN_QUEUE_WRITER
] = kw_fwr
;
3207 numbers
[count
]= kw_fwr
;
3208 typenum
[count
] = PTH_RW_TYPE_WRITE
;
3211 lowest
[KSYN_QUEUE_WRITER
] = 0;
3213 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0)) {
3214 type
|= PTH_RWSHFT_TYPE_YWRITE
;
3215 /* read entries are present */
3216 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0) {
3217 kw_fywr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_firstnum
;
3218 if (((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0) && (is_seqlower(premgen
, kw_fywr
) != 0))
3223 lowest
[KSYN_QUEUE_YWRITER
] = kw_fywr
;
3224 numbers
[count
]= kw_fywr
;
3225 typenum
[count
] = PTH_RW_TYPE_YWRITE
;
3228 lowest
[KSYN_QUEUE_YWRITER
] = 0;
3233 panic("nothing in the queue???\n");
3234 #endif /* __TESTPANICS__ */
3237 lowtype
= typenum
[0];
3239 for (i
= 1; i
< count
; i
++) {
3240 if(is_seqlower(numbers
[i
] , low
) != 0) {
3242 lowtype
= typenum
[i
];
3253 /* wakeup readers and longreaders to upto the writer limits */
3255 ksyn_wakeupreaders(ksyn_wait_queue_t kwq
, uint32_t limitread
, int longreadset
, int allreaders
, uint32_t updatebits
, int * wokenp
)
3257 ksyn_waitq_element_t kwe
= NULL
;
3259 int failedwakeup
= 0;
3261 kern_return_t kret
= KERN_SUCCESS
;
3265 if (longreadset
!= 0) {
3266 /* clear all read and longreads */
3267 while ((kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
], kwq
)) != NULL
) {
3268 kwe
->kwe_psynchretval
= lbits
;
3269 kwe
->kwe_kwqqueue
= NULL
;
3272 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3274 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3275 panic("ksyn_wakeupreaders: panic waking up readers\n");
3276 #endif /* __TESTPANICS__ */
3277 if (kret
== KERN_NOT_WAITING
) {
3281 while ((kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
], kwq
)) != NULL
) {
3282 kwe
->kwe_psynchretval
= lbits
;
3283 kwe
->kwe_kwqqueue
= NULL
;
3285 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3287 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3288 panic("ksyn_wakeupreaders: panic waking up lreaders\n");
3289 #endif /* __TESTPANICS__ */
3290 if (kret
== KERN_NOT_WAITING
) {
3295 kq
= &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
];
3296 while ((kq
->ksynq_count
!= 0) && (allreaders
|| (is_seqlower(kq
->ksynq_firstnum
, limitread
) != 0))) {
3297 kwe
= ksyn_queue_removefirst(kq
, kwq
);
3298 kwe
->kwe_psynchretval
= lbits
;
3299 kwe
->kwe_kwqqueue
= NULL
;
3301 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3303 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3304 panic("ksyn_wakeupreaders: panic waking up readers\n");
3305 #endif /* __TESTPANICS__ */
3306 if (kret
== KERN_NOT_WAITING
) {
3314 return(failedwakeup
);
3318 /* This handles the unlock grants for next set on rw_unlock() or on arrival of all preposted waiters */
3320 kwq_handle_unlock(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t rw_wc
, uint32_t * updatep
, int flags
, int * blockp
, uint32_t premgen
)
3322 uint32_t low_reader
, low_writer
, low_ywriter
, low_lreader
,limitrdnum
;
3323 int rwtype
, error
=0;
3324 int longreadset
= 0, allreaders
, failed
;
3325 uint32_t updatebits
=0, numneeded
= 0;;
3326 int prepost
= flags
& KW_UNLOCK_PREPOST
;
3327 thread_t preth
= THREAD_NULL
;
3328 ksyn_waitq_element_t kwe
;
3333 uint32_t lowest
[KSYN_QUEUE_MAX
]; /* np need for upgrade as it is handled separately */
3334 kern_return_t kret
= KERN_SUCCESS
;
3336 int curthreturns
= 0;
3339 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_START
, (uint32_t)kwq
->kw_addr
, mgen
, premgen
, rw_wc
, 0);
3340 #endif /* _PSYNCH_TRACE_ */
3342 preth
= current_thread();
3345 kq
= &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
];
3346 kwq
->kw_lastseqword
= rw_wc
;
3347 kwq
->kw_lastunlockseq
= (rw_wc
& PTHRW_COUNT_MASK
);
3348 kwq
->kw_overlapwatch
= 0;
3350 /* upgrade pending */
3351 if (is_rw_ubit_set(mgen
)) {
3353 panic("NO UBIT SHOULD BE SET\n");
3354 updatebits
= PTH_RWL_EBIT
| PTH_RWL_KBIT
;
3355 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0)
3356 updatebits
|= PTH_RWL_WBIT
;
3357 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0)
3358 updatebits
|= PTH_RWL_YBIT
;
3360 if((flags
& KW_UNLOCK_PREPOST_UPGRADE
) != 0) {
3361 /* upgrade thread calling the prepost */
3362 /* upgrade granted */
3368 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
].ksynq_count
> 0) {
3369 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
], kwq
);
3371 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3372 kwe
->kwe_psynchretval
= updatebits
;
3373 kwe
->kwe_kwqqueue
= NULL
;
3374 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3375 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3376 panic("kwq_handle_unlock: panic waking up the upgrade thread \n");
3377 if (kret
== KERN_NOT_WAITING
) {
3378 kwq
->kw_pre_intrcount
= 1; /* actually a count */
3379 kwq
->kw_pre_intrseq
= mgen
;
3380 kwq
->kw_pre_intrretbits
= kwe
->kwe_psynchretval
;
3381 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_UPGRADE
;
3385 panic("panic unable to find the upgrade thread\n");
3387 #endif /* __TESTPANICS__ */
3392 error
= kwq_find_rw_lowest(kwq
, flags
, premgen
, &rwtype
, lowest
);
3395 panic("rwunlock: cannot fails to slot next round of threads");
3396 #endif /* __TESTPANICS__ */
3399 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 1, rwtype
, 0, 0);
3400 #endif /* _PSYNCH_TRACE_ */
3401 low_reader
= lowest
[KSYN_QUEUE_READ
];
3402 low_lreader
= lowest
[KSYN_QUEUE_LREAD
];
3403 low_writer
= lowest
[KSYN_QUEUE_WRITER
];
3404 low_ywriter
= lowest
[KSYN_QUEUE_YWRITER
];
3412 switch (rwtype
& PTH_RW_TYPE_MASK
) {
3413 case PTH_RW_TYPE_LREAD
:
3416 case PTH_RW_TYPE_READ
: {
3417 /* what about the preflight which is LREAD or READ ?? */
3418 if ((rwtype
& PTH_RWSHFT_TYPE_MASK
) != 0) {
3419 if (rwtype
& PTH_RWSHFT_TYPE_WRITE
)
3420 updatebits
|= (PTH_RWL_WBIT
| PTH_RWL_KBIT
);
3421 if (rwtype
& PTH_RWSHFT_TYPE_YWRITE
)
3422 updatebits
|= PTH_RWL_YBIT
;
3425 if (longreadset
== 0) {
3426 switch (rwtype
& (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
)) {
3427 case PTH_RWSHFT_TYPE_WRITE
:
3428 limitrdnum
= low_writer
;
3429 if (((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0) &&
3430 (is_seqlower(low_lreader
, limitrdnum
) != 0)) {
3433 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) &&
3434 (is_seqlower(premgen
, limitrdnum
) != 0)) {
3438 case PTH_RWSHFT_TYPE_YWRITE
:
3440 if (((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0) &&
3441 (is_seqlower(low_lreader
, low_ywriter
) != 0)) {
3445 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) &&
3446 (is_seqlower(premgen
, low_ywriter
) != 0)) {
3453 case (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
):
3454 if (is_seqlower(low_ywriter
, low_writer
) != 0) {
3455 limitrdnum
= low_ywriter
;
3457 limitrdnum
= low_writer
;
3458 if (((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0) &&
3459 (is_seqlower(low_lreader
, limitrdnum
) != 0)) {
3462 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) &&
3463 (is_seqlower(premgen
, limitrdnum
) != 0)) {
3467 default: /* no writers at all */
3468 if ((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0)
3476 if (longreadset
!= 0) {
3477 updatebits
|= PTH_RWL_LBIT
;
3478 updatebits
&= ~PTH_RWL_KBIT
;
3479 if ((flags
& (KW_UNLOCK_PREPOST_READLOCK
| KW_UNLOCK_PREPOST_LREADLOCK
)) != 0)
3481 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
3482 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
;
3483 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
3484 kwq
->kw_overlapwatch
= 1;
3486 /* no longread, evaluate number of readers */
3488 switch (rwtype
& (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
)) {
3489 case PTH_RWSHFT_TYPE_WRITE
:
3490 limitrdnum
= low_writer
;
3491 numneeded
= ksyn_queue_count_tolowest(kq
, limitrdnum
);
3492 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, limitrdnum
) != 0)) {
3497 case PTH_RWSHFT_TYPE_YWRITE
:
3499 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
3500 if ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) {
3505 case (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
):
3506 limitrdnum
= low_writer
;
3507 numneeded
= ksyn_queue_count_tolowest(kq
, limitrdnum
);
3508 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, limitrdnum
) != 0)) {
3513 default: /* no writers at all */
3514 /* no other waiters only readers */
3515 kwq
->kw_overlapwatch
= 1;
3516 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
3517 if ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) {
3523 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
3525 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3527 if (curthreturns
!= 0) {
3529 uth
= current_uthread();
3531 kwe
->kwe_psynchretval
= updatebits
;
3535 failed
= ksyn_wakeupreaders(kwq
, limitrdnum
, longreadset
, allreaders
, updatebits
, &woken
);
3537 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 2, woken
, failed
, 0);
3538 #endif /* _PSYNCH_TRACE_ */
3541 kwq
->kw_pre_intrcount
= failed
; /* actually a count */
3542 kwq
->kw_pre_intrseq
= limitrdnum
;
3543 kwq
->kw_pre_intrretbits
= updatebits
;
3545 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_LREAD
;
3547 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_READ
;
3552 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) && ((updatebits
& PTH_RWL_WBIT
) == 0))
3553 panic("kwq_handle_unlock: writer pending but no writebit set %x\n", updatebits
);
3557 case PTH_RW_TYPE_WRITE
: {
3559 /* only one thread is goin to be granted */
3560 updatebits
|= (PTHRW_INC
);
3561 updatebits
|= PTH_RWL_KBIT
| PTH_RWL_EBIT
;
3563 if (((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) && (low_writer
== premgen
)) {
3565 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0)
3566 updatebits
|= PTH_RWL_WBIT
;
3567 if ((rwtype
& PTH_RWSHFT_TYPE_YWRITE
) != 0)
3568 updatebits
|= PTH_RWL_YBIT
;
3570 uth
= get_bsdthread_info(th
);
3572 kwe
->kwe_psynchretval
= updatebits
;
3574 /* we are not granting writelock to the preposting thread */
3575 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwq
);
3577 /* if there are writers present or the preposting write thread then W bit is to be set */
3578 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) )
3579 updatebits
|= PTH_RWL_WBIT
;
3580 if ((rwtype
& PTH_RWSHFT_TYPE_YWRITE
) != 0)
3581 updatebits
|= PTH_RWL_YBIT
;
3582 kwe
->kwe_psynchretval
= updatebits
;
3583 kwe
->kwe_kwqqueue
= NULL
;
3584 /* setup next in the queue */
3585 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3587 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 3, kret
, 0, 0);
3588 #endif /* _PSYNCH_TRACE_ */
3590 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3591 panic("kwq_handle_unlock: panic waking up writer\n");
3592 #endif /* __TESTPANICS__ */
3593 if (kret
== KERN_NOT_WAITING
) {
3594 kwq
->kw_pre_intrcount
= 1; /* actually a count */
3595 kwq
->kw_pre_intrseq
= low_writer
;
3596 kwq
->kw_pre_intrretbits
= updatebits
;
3597 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_WRITE
;
3601 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3602 if ((updatebits
& (PTH_RWL_KBIT
| PTH_RWL_EBIT
)) != (PTH_RWL_KBIT
| PTH_RWL_EBIT
))
3603 panic("kwq_handle_unlock: writer lock granted but no ke set %x\n", updatebits
);
3608 case PTH_RW_TYPE_YWRITE
: {
3609 /* can reader locks be granted ahead of this write? */
3610 if ((rwtype
& PTH_RWSHFT_TYPE_READ
) != 0) {
3611 if ((rwtype
& PTH_RWSHFT_TYPE_MASK
) != 0) {
3612 if (rwtype
& PTH_RWSHFT_TYPE_WRITE
)
3613 updatebits
|= (PTH_RWL_WBIT
| PTH_RWL_KBIT
);
3614 if (rwtype
& PTH_RWSHFT_TYPE_YWRITE
)
3615 updatebits
|= PTH_RWL_YBIT
;
3618 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0) {
3619 /* is lowest reader less than the low writer? */
3620 if (is_seqlower(low_reader
,low_writer
) == 0)
3623 numneeded
= ksyn_queue_count_tolowest(kq
, low_writer
);
3624 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
3625 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, low_writer
) != 0)) {
3626 uth
= current_uthread();
3629 updatebits
+= PTHRW_INC
;
3630 kwe
->kwe_psynchretval
= updatebits
;
3634 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3636 /* there will be readers to wakeup , no need to check for woken */
3637 failed
= ksyn_wakeupreaders(kwq
, low_writer
, 0, 0, updatebits
, NULL
);
3639 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 2, woken
, failed
, 0);
3640 #endif /* _PSYNCH_TRACE_ */
3642 kwq
->kw_pre_intrcount
= failed
; /* actually a count */
3643 kwq
->kw_pre_intrseq
= low_writer
;
3644 kwq
->kw_pre_intrretbits
= updatebits
;
3645 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_READ
;
3649 /* wakeup all readers */
3650 numneeded
= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
3651 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
3652 if ((prepost
!= 0) && ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0)) {
3653 uth
= current_uthread();
3655 updatebits
+= PTHRW_INC
;
3656 kwe
->kwe_psynchretval
= updatebits
;
3659 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3660 failed
= ksyn_wakeupreaders(kwq
, low_writer
, 0, 1, updatebits
, &woken
);
3662 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 2, woken
, failed
, 0);
3663 #endif /* _PSYNCH_TRACE_ */
3665 kwq
->kw_pre_intrcount
= failed
; /* actually a count */
3666 kwq
->kw_pre_intrseq
= kwq
->kw_highseq
;
3667 kwq
->kw_pre_intrretbits
= updatebits
;
3668 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_READ
;
3674 /* no reads, so granting yeilding writes */
3675 updatebits
|= PTHRW_INC
;
3676 updatebits
|= PTH_RWL_KBIT
| PTH_RWL_EBIT
;
3678 if (((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0) && (low_writer
== premgen
)) {
3679 /* preposting yielding write thread is being granted exclusive lock */
3683 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0)
3684 updatebits
|= PTH_RWL_WBIT
;
3685 else if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0)
3686 updatebits
|= PTH_RWL_YBIT
;
3689 uth
= get_bsdthread_info(th
);
3691 kwe
->kwe_psynchretval
= updatebits
;
3693 /* we are granting yield writelock to some other thread */
3694 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
], kwq
);
3696 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0)
3697 updatebits
|= PTH_RWL_WBIT
;
3698 /* if there are ywriters present or the preposting ywrite thread then W bit is to be set */
3699 else if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0) )
3700 updatebits
|= PTH_RWL_YBIT
;
3702 kwe
->kwe_psynchretval
= updatebits
;
3703 kwe
->kwe_kwqqueue
= NULL
;
3705 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3707 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 3, kret
, 0, 0);
3708 #endif /* _PSYNCH_TRACE_ */
3710 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3711 panic("kwq_handle_unlock : panic waking up readers\n");
3712 #endif /* __TESTPANICS__ */
3713 if (kret
== KERN_NOT_WAITING
) {
3714 kwq
->kw_pre_intrcount
= 1; /* actually a count */
3715 kwq
->kw_pre_intrseq
= low_ywriter
;
3716 kwq
->kw_pre_intrretbits
= updatebits
;
3717 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_YWRITE
;
3721 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3727 panic("rwunlock: invalid type for lock grants");
3733 if (updatep
!= NULL
)
3734 *updatep
= updatebits
;
3738 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_END
, (uint32_t)kwq
->kw_addr
, 0, updatebits
, block
, 0);
3739 #endif /* _PSYNCH_TRACE_ */
3744 kwq_handle_overlap(ksyn_wait_queue_t kwq
, uint32_t lgenval
, __unused
uint32_t ugenval
, uint32_t rw_wc
, uint32_t *updatebitsp
, __unused
int flags
, int * blockp
)
3746 uint32_t highword
= kwq
->kw_nextseqword
& PTHRW_COUNT_MASK
;
3747 uint32_t lowword
= kwq
->kw_lastseqword
& PTHRW_COUNT_MASK
;
3752 /* overlap is set, so no need to check for valid state for overlap */
3754 withinseq
= ((is_seqlower_eq(rw_wc
, highword
) != 0) || (is_seqhigher_eq(lowword
, rw_wc
) != 0));
3756 if (withinseq
!= 0) {
3757 if ((kwq
->kw_nextseqword
& PTH_RWL_LBIT
) == 0) {
3758 /* if no writers ahead, overlap granted */
3759 if ((lgenval
& PTH_RWL_WBIT
) == 0) {
3763 /* Lbit is set, and writers ahead does not count */
3772 /* increase the next expected seq by one */
3773 kwq
->kw_nextseqword
+= PTHRW_INC
;
3774 /* set count by one & bits from the nextseq and add M bit */
3776 val
|= ((kwq
->kw_nextseqword
& PTHRW_BIT_MASK
) | PTH_RWL_MBIT
);
3783 /* handle downgrade actions */
3785 kwq_handle_downgrade(ksyn_wait_queue_t kwq
, uint32_t mgen
, __unused
int flags
, __unused
uint32_t premgen
, __unused
int * blockp
)
3787 uint32_t updatebits
, lowriter
= 0;
3788 int longreadset
, allreaders
, count
;
3790 /* can handle downgrade now */
3795 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
> 0) {
3796 lowriter
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_firstnum
;
3797 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
> 0) {
3798 if (is_seqlower(kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
, lowriter
) != 0)
3803 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
> 0) {
3804 lowriter
= kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_firstnum
;
3805 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
> 0) {
3806 if (is_seqlower(kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
, lowriter
) != 0)
3812 count
= ksyn_wakeupreaders(kwq
, lowriter
, longreadset
, allreaders
, updatebits
, NULL
);
3814 kwq
->kw_pre_limrd
= count
;
3815 kwq
->kw_pre_limrdseq
= lowriter
;
3816 kwq
->kw_pre_limrdbits
= lowriter
;
3817 /* need to handle prepost */
3824 /************* Indiv queue support routines ************************/
3826 ksyn_queue_init(ksyn_queue_t kq
)
3828 TAILQ_INIT(&kq
->ksynq_kwelist
);
3829 kq
->ksynq_count
= 0;
3830 kq
->ksynq_firstnum
= 0;
3831 kq
->ksynq_lastnum
= 0;
3835 ksyn_queue_insert(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t mgen
, struct uthread
* uth
, ksyn_waitq_element_t kwe
, int fit
)
3837 uint32_t lockseq
= mgen
& PTHRW_COUNT_MASK
;
3838 ksyn_waitq_element_t q_kwe
, r_kwe
;
3840 uthread_t nuth
= NULL
;
3842 if (kq
->ksynq_count
== 0) {
3843 TAILQ_INSERT_HEAD(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3844 kq
->ksynq_firstnum
= lockseq
;
3845 kq
->ksynq_lastnum
= lockseq
;
3849 if (fit
== FIRSTFIT
) {
3850 /* TBD: if retry bit is set for mutex, add it to the head */
3851 /* firstfit, arriving order */
3852 TAILQ_INSERT_TAIL(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3853 if (is_seqlower (lockseq
, kq
->ksynq_firstnum
) != 0)
3854 kq
->ksynq_firstnum
= lockseq
;
3855 if (is_seqhigher (lockseq
, kq
->ksynq_lastnum
) != 0)
3856 kq
->ksynq_lastnum
= lockseq
;
3860 if ((lockseq
== kq
->ksynq_firstnum
) || (lockseq
== kq
->ksynq_lastnum
)) {
3861 /* During prepost when a thread is getting cancelled, we could have two with same seq */
3862 if (kwe
->kwe_flags
== KWE_THREAD_PREPOST
) {
3863 q_kwe
= ksyn_queue_find_seq(kwq
, kq
, lockseq
, 0);
3864 if ((q_kwe
!= NULL
) && ((nuth
= (uthread_t
)q_kwe
->kwe_uth
) != NULL
) &&
3865 ((nuth
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
)) {
3866 TAILQ_INSERT_TAIL(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3870 __FAILEDUSERTEST__("ksyn_queue_insert: two threads with same lockseq ");
3875 __FAILEDUSERTEST__("ksyn_queue_insert: two threads with same lockseq ");
3881 /* check for next seq one */
3882 if (is_seqlower(kq
->ksynq_lastnum
, lockseq
) != 0) {
3883 TAILQ_INSERT_TAIL(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3884 kq
->ksynq_lastnum
= lockseq
;
3888 if (is_seqlower(lockseq
, kq
->ksynq_firstnum
) != 0) {
3889 TAILQ_INSERT_HEAD(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3890 kq
->ksynq_firstnum
= lockseq
;
3894 /* goto slow insert mode */
3895 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
3896 if (is_seqhigher(q_kwe
->kwe_lockseq
, lockseq
) != 0) {
3897 TAILQ_INSERT_BEFORE(q_kwe
, kwe
, kwe_list
);
3903 panic("failed to insert \n");
3904 #endif /* __TESTPANICS__ */
3911 update_low_high(kwq
, lockseq
);
3916 ksyn_waitq_element_t
3917 ksyn_queue_removefirst(ksyn_queue_t kq
, ksyn_wait_queue_t kwq
)
3919 ksyn_waitq_element_t kwe
= NULL
;
3920 ksyn_waitq_element_t q_kwe
;
3923 if (kq
->ksynq_count
!= 0) {
3924 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
3925 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3926 curseq
= kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
;
3930 if(kq
->ksynq_count
!= 0) {
3931 q_kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
3932 kq
->ksynq_firstnum
= (q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
3934 kq
->ksynq_firstnum
= 0;
3935 kq
->ksynq_lastnum
= 0;
3938 if (kwq
->kw_inqueue
== 0) {
3940 kwq
->kw_highseq
= 0;
3942 if (kwq
->kw_lowseq
== curseq
)
3943 kwq
->kw_lowseq
= find_nextlowseq(kwq
);
3944 if (kwq
->kw_highseq
== curseq
)
3945 kwq
->kw_highseq
= find_nexthighseq(kwq
);
3952 ksyn_queue_removeitem(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, ksyn_waitq_element_t kwe
)
3954 ksyn_waitq_element_t q_kwe
;
3957 if (kq
->ksynq_count
> 0) {
3958 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3960 if(kq
->ksynq_count
!= 0) {
3961 q_kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
3962 kq
->ksynq_firstnum
= (q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
3963 q_kwe
= TAILQ_LAST(&kq
->ksynq_kwelist
, ksynq_kwelist_head
);
3964 kq
->ksynq_lastnum
= (q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
3966 kq
->ksynq_firstnum
= 0;
3967 kq
->ksynq_lastnum
= 0;
3971 curseq
= kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
;
3972 if (kwq
->kw_inqueue
== 0) {
3974 kwq
->kw_highseq
= 0;
3976 if (kwq
->kw_lowseq
== curseq
)
3977 kwq
->kw_lowseq
= find_nextlowseq(kwq
);
3978 if (kwq
->kw_highseq
== curseq
)
3979 kwq
->kw_highseq
= find_nexthighseq(kwq
);
3984 /* find the thread and removes from the queue */
3985 ksyn_waitq_element_t
3986 ksyn_queue_find_seq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t seq
, int remove
)
3988 ksyn_waitq_element_t q_kwe
, r_kwe
;
3990 /* TBD: bail out if higher seq is seen */
3991 /* case where wrap in the tail of the queue exists */
3992 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
3993 if ((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
) == seq
) {
3995 ksyn_queue_removeitem(kwq
, kq
, q_kwe
);
4003 /* find the thread at the target sequence (or a broadcast/prepost at or above) */
4004 ksyn_waitq_element_t
4005 ksyn_queue_find_cvpreposeq(ksyn_queue_t kq
, uint32_t cgen
)
4007 ksyn_waitq_element_t q_kwe
, r_kwe
;
4008 uint32_t lgen
= (cgen
& PTHRW_COUNT_MASK
);
4010 /* case where wrap in the tail of the queue exists */
4011 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
4013 /* skip the lower entries */
4014 if (is_seqlower((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), cgen
) != 0)
4017 switch (q_kwe
->kwe_flags
) {
4019 case KWE_THREAD_INWAIT
:
4020 if ((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
) != lgen
)
4024 case KWE_THREAD_BROADCAST
:
4025 case KWE_THREAD_PREPOST
:
4032 /* look for a thread at lockseq, a */
4033 ksyn_waitq_element_t
4034 ksyn_queue_find_signalseq(__unused ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t uptoseq
, uint32_t signalseq
)
4036 ksyn_waitq_element_t q_kwe
, r_kwe
, t_kwe
= NULL
;
4038 /* case where wrap in the tail of the queue exists */
4039 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
4041 switch (q_kwe
->kwe_flags
) {
4043 case KWE_THREAD_PREPOST
:
4044 if (is_seqhigher((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), uptoseq
))
4048 case KWE_THREAD_BROADCAST
:
4049 /* match any prepost at our same uptoseq or any broadcast above */
4050 if (is_seqlower((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), uptoseq
))
4054 case KWE_THREAD_INWAIT
:
4056 * Match any (non-cancelled) thread at or below our upto sequence -
4057 * but prefer an exact match to our signal sequence (if present) to
4058 * keep exact matches happening.
4060 if (is_seqhigher((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), uptoseq
))
4063 if (q_kwe
->kwe_kwqqueue
== kwq
) {
4064 uthread_t ut
= q_kwe
->kwe_uth
;
4065 if ((ut
->uu_flag
& ( UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) != UT_CANCEL
) {
4066 /* if equal or higher than our signal sequence, return this one */
4067 if (is_seqhigher_eq((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), signalseq
))
4070 /* otherwise, just remember this eligible thread and move on */
4078 panic("ksyn_queue_find_signalseq(): unknow wait queue element type (%d)\n", q_kwe
->kwe_flags
);
4087 ksyn_queue_move_tofree(ksyn_wait_queue_t ckwq
, ksyn_queue_t kq
, uint32_t upto
, ksyn_queue_t kfreeq
, int all
, int release
)
4089 ksyn_waitq_element_t kwe
;
4091 uint32_t tseq
= upto
& PTHRW_COUNT_MASK
;
4094 #endif /* _PSYNCH_TRACE_ */
4096 ksyn_queue_init(kfreeq
);
4098 /* free all the entries, must be only fakes.. */
4099 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
4100 while (kwe
!= NULL
) {
4101 if ((all
== 0) && (is_seqhigher((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), tseq
) != 0))
4103 if (kwe
->kwe_flags
== KWE_THREAD_INWAIT
) {
4105 * This scenario is typically noticed when the cvar is
4106 * reinited and the new waiters are waiting. We can
4107 * return them as spurious wait so the cvar state gets
4111 ut
= (uthread_t
)kwe
->kwe_uth
;
4112 #endif /* _PSYNCH_TRACE_ */
4114 /* skip canceled ones */
4116 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
4117 /* set M bit to indicate to waking CV to retun Inc val */
4118 kwe
->kwe_psynchretval
= PTHRW_INC
| (PTH_RWS_CV_MBIT
| PTH_RWL_MTX_WAIT
);
4119 kwe
->kwe_kwqqueue
= NULL
;
4121 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xcafecaf3, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
4122 #endif /* _PSYNCH_TRACE_ */
4123 (void)ksyn_wakeup_thread(ckwq
, kwe
);
4125 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
4126 TAILQ_INSERT_TAIL(&kfreeq
->ksynq_kwelist
, kwe
, kwe_list
);
4127 ckwq
->kw_fakecount
--;
4130 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
4133 if ((release
!= 0) && (count
!= 0)) {
4134 kwe
= TAILQ_FIRST(&kfreeq
->ksynq_kwelist
);
4135 while (kwe
!= NULL
) {
4136 TAILQ_REMOVE(&kfreeq
->ksynq_kwelist
, kwe
, kwe_list
);
4137 zfree(kwe_zone
, kwe
);
4138 kwe
= TAILQ_FIRST(&kfreeq
->ksynq_kwelist
);
4145 /*************************************************************************/
4148 update_low_high(ksyn_wait_queue_t kwq
, uint32_t lockseq
)
4150 if (kwq
->kw_inqueue
== 1) {
4151 kwq
->kw_lowseq
= lockseq
;
4152 kwq
->kw_highseq
= lockseq
;
4154 if (is_seqlower(lockseq
, kwq
->kw_lowseq
) != 0)
4155 kwq
->kw_lowseq
= lockseq
;
4156 if (is_seqhigher(lockseq
, kwq
->kw_highseq
) != 0)
4157 kwq
->kw_highseq
= lockseq
;
4162 find_nextlowseq(ksyn_wait_queue_t kwq
)
4164 uint32_t numbers
[4];
4168 for(i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
4169 if (kwq
->kw_ksynqueues
[i
].ksynq_count
!= 0) {
4170 numbers
[count
]= kwq
->kw_ksynqueues
[i
].ksynq_firstnum
;
4177 lowest
= numbers
[0];
4179 for (i
= 1; i
< count
; i
++) {
4180 if(is_seqlower(numbers
[i
] , lowest
) != 0)
4181 lowest
= numbers
[count
];
4189 find_nexthighseq(ksyn_wait_queue_t kwq
)
4191 uint32_t numbers
[4];
4195 for(i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
4196 if (kwq
->kw_ksynqueues
[i
].ksynq_count
!= 0) {
4197 numbers
[count
]= kwq
->kw_ksynqueues
[i
].ksynq_lastnum
;
4206 highest
= numbers
[0];
4208 for (i
= 1; i
< count
; i
++) {
4209 if(is_seqhigher(numbers
[i
], highest
) != 0)
4210 highest
= numbers
[i
];
4218 is_seqlower(uint32_t x
, uint32_t y
)
4221 if ((y
-x
) < (PTHRW_MAX_READERS
/2))
4224 if ((x
-y
) > (PTHRW_MAX_READERS
/2))
4231 is_seqlower_eq(uint32_t x
, uint32_t y
)
4236 return(is_seqlower(x
,y
));
4240 is_seqhigher(uint32_t x
, uint32_t y
)
4243 if ((x
-y
) < (PTHRW_MAX_READERS
/2))
4246 if ((y
-x
) > (PTHRW_MAX_READERS
/2))
4253 is_seqhigher_eq(uint32_t x
, uint32_t y
)
4258 return(is_seqhigher(x
,y
));
4263 find_diff(uint32_t upto
, uint32_t lowest
)
4270 diff
= diff_genseq(upto
, lowest
);
4272 if (is_seqlower(upto
, lowest
) != 0)
4273 diff
= diff_genseq(lowest
, upto
);
4275 diff
= diff_genseq(upto
, lowest
);
4277 diff
= (diff
>> PTHRW_COUNT_SHIFT
);
4283 find_seq_till(ksyn_wait_queue_t kwq
, uint32_t upto
, uint32_t nwaiters
, uint32_t *countp
)
4290 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL
| DBG_FUNC_START
, 0, 0, upto
, nwaiters
, 0);
4291 #endif /* _PSYNCH_TRACE_ */
4293 for (i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
4294 count
+= ksyn_queue_count_tolowest(&kwq
->kw_ksynqueues
[i
], upto
);
4296 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL
| DBG_FUNC_NONE
, 0, 1, i
, count
, 0);
4297 #endif /* _PSYNCH_TRACE_ */
4298 if (count
>= nwaiters
) {
4303 if (countp
!= NULL
) {
4307 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL
| DBG_FUNC_END
, 0, 0, count
, nwaiters
, 0);
4308 #endif /* _PSYNCH_TRACE_ */
4311 else if (count
>= nwaiters
)
4319 ksyn_queue_count_tolowest(ksyn_queue_t kq
, uint32_t upto
)
4322 ksyn_waitq_element_t kwe
, newkwe
;
4325 /* if nothing or the first num is greater than upto, return none */
4326 if ((kq
->ksynq_count
== 0) || (is_seqhigher(kq
->ksynq_firstnum
, upto
) != 0))
4328 if (upto
== kq
->ksynq_firstnum
)
4331 TAILQ_FOREACH_SAFE(kwe
, &kq
->ksynq_kwelist
, kwe_list
, newkwe
) {
4332 curval
= (kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
4333 if (upto
== curval
) {
4336 } else if (is_seqhigher(curval
, upto
) != 0) {
4347 /* handles the cond broadcast of cvar and returns number of woken threads and bits for syscall return */
4349 ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq
, uint32_t upto
, uint32_t * updatep
)
4353 ksyn_waitq_element_t kwe
, newkwe
;
4354 uint32_t updatebits
= 0;
4355 struct ksyn_queue kfreeq
;
4359 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_START
, 0xcbcbcbc2, upto
, 0, 0, 0);
4360 #endif /* _PSYNCH_TRACE_ */
4362 ksyn_queue_init(&kfreeq
);
4363 kq
= &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
];
4366 TAILQ_FOREACH_SAFE(kwe
, &kq
->ksynq_kwelist
, kwe_list
, newkwe
) {
4368 if (is_seqhigher((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), upto
)) /* outside our range */
4371 /* now handle the one we found (inside the range) */
4372 switch (kwe
->kwe_flags
) {
4374 case KWE_THREAD_INWAIT
:
4375 ut
= (uthread_t
)kwe
->kwe_uth
;
4377 /* skip canceled ones */
4378 if (kwe
->kwe_kwqqueue
!= ckwq
||
4379 (ut
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
)
4383 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
4384 kwe
->kwe_psynchretval
= PTH_RWL_MTX_WAIT
;
4385 kwe
->kwe_kwqqueue
= NULL
;
4387 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xcafecaf2, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
4388 #endif /* _PSYNCH_TRACE_ */
4389 kret
= ksyn_wakeup_thread(ckwq
, kwe
);
4391 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
4392 panic("ksyn_wakeupreaders: panic waking up readers\n");
4393 #endif /* __TESTPANICS__ */
4394 updatebits
+= PTHRW_INC
;
4397 case KWE_THREAD_BROADCAST
:
4398 case KWE_THREAD_PREPOST
:
4399 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
4400 TAILQ_INSERT_TAIL(&kfreeq
.ksynq_kwelist
, kwe
, kwe_list
);
4401 ckwq
->kw_fakecount
--;
4405 panic("unknown kweflags\n");
4410 /* Need to enter a broadcast in the queue (if not already at L == S) */
4412 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) != (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
4414 newkwe
= TAILQ_FIRST(&kfreeq
.ksynq_kwelist
);
4415 if (newkwe
== NULL
) {
4416 ksyn_wqunlock(ckwq
);
4417 newkwe
= (ksyn_waitq_element_t
)zalloc(kwe_zone
);
4418 TAILQ_INSERT_TAIL(&kfreeq
.ksynq_kwelist
, newkwe
, kwe_list
);
4423 TAILQ_REMOVE(&kfreeq
.ksynq_kwelist
, newkwe
, kwe_list
);
4424 bzero(newkwe
, sizeof(struct ksyn_waitq_element
));
4425 newkwe
->kwe_kwqqueue
= ckwq
;
4426 newkwe
->kwe_flags
= KWE_THREAD_BROADCAST
;
4427 newkwe
->kwe_lockseq
= upto
;
4428 newkwe
->kwe_count
= 0;
4429 newkwe
->kwe_uth
= NULL
;
4430 newkwe
->kwe_psynchretval
= 0;
4433 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xfeedfeed, upto
, 0, 0);
4434 #endif /* _PSYNCH_TRACE_ */
4436 (void)ksyn_queue_insert(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], upto
, NULL
, newkwe
, SEQFIT
);
4437 ckwq
->kw_fakecount
++;
4440 /* free up any remaining things stumbled across above */
4441 kwe
= TAILQ_FIRST(&kfreeq
.ksynq_kwelist
);
4442 while (kwe
!= NULL
) {
4443 TAILQ_REMOVE(&kfreeq
.ksynq_kwelist
, kwe
, kwe_list
);
4444 zfree(kwe_zone
, kwe
);
4445 kwe
= TAILQ_FIRST(&kfreeq
.ksynq_kwelist
);
4448 if (updatep
!= NULL
)
4449 *updatep
= updatebits
;
4452 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_END
, 0xeeeeeeed, updatebits
, 0, 0, 0);
4453 #endif /* _PSYNCH_TRACE_ */
4457 ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq
, uint32_t *updatep
, ksyn_queue_t kfreeq
, int release
)
4459 uint32_t updatebits
= 0;
4461 if (updatep
!= NULL
)
4462 updatebits
= *updatep
;
4463 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) == (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
4464 updatebits
|= PTH_RWS_CV_CBIT
;
4465 if (ckwq
->kw_inqueue
!= 0) {
4466 /* FREE THE QUEUE */
4467 ksyn_queue_move_tofree(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], ckwq
->kw_lword
, kfreeq
, 0, release
);
4469 if (ckwq
->kw_inqueue
!= 0)
4470 panic("ksyn_cvupdate_fixup: L == S, but entries in queue beyond S");
4471 #endif /* __TESTPANICS__ */
4473 ckwq
->kw_lword
= ckwq
->kw_uword
= ckwq
->kw_sword
= 0;
4474 ckwq
->kw_kflags
|= KSYN_KWF_ZEROEDOUT
;
4475 } else if ((ckwq
->kw_inqueue
!= 0) && (ckwq
->kw_fakecount
== ckwq
->kw_inqueue
)) {
4476 /* only fake entries are present in the queue */
4477 updatebits
|= PTH_RWS_CV_PBIT
;
4479 if (updatep
!= NULL
)
4480 *updatep
= updatebits
;
4484 psynch_zoneinit(void)
4486 kwq_zone
= (zone_t
)zinit(sizeof(struct ksyn_wait_queue
), 8192 * sizeof(struct ksyn_wait_queue
), 4096, "ksyn_waitqueue zone");
4487 kwe_zone
= (zone_t
)zinit(sizeof(struct ksyn_waitq_element
), 8192 * sizeof(struct ksyn_waitq_element
), 4096, "ksyn_waitq_element zone");