2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995-2005 Apple Computer, Inc. All Rights Reserved */
35 #include <sys/param.h>
36 #include <sys/queue.h>
37 #include <sys/resourcevar.h>
38 #include <sys/proc_internal.h>
39 #include <sys/kauth.h>
40 #include <sys/systm.h>
41 #include <sys/timeb.h>
42 #include <sys/times.h>
45 #include <sys/kernel.h>
47 #include <sys/signalvar.h>
48 #include <sys/syslog.h>
51 #include <sys/kdebug.h>
52 #include <sys/sysproto.h>
53 #include <sys/pthread_internal.h>
57 #include <mach/mach_types.h>
58 #include <mach/vm_prot.h>
59 #include <mach/semaphore.h>
60 #include <mach/sync_policy.h>
61 #include <mach/task.h>
62 #include <kern/kern_types.h>
63 #include <kern/task.h>
64 #include <kern/clock.h>
65 #include <mach/kern_return.h>
66 #include <kern/thread.h>
67 #include <kern/sched_prim.h>
68 #include <kern/thread_call.h>
69 #include <kern/kalloc.h>
70 #include <kern/zalloc.h>
71 #include <kern/sched_prim.h>
72 #include <kern/processor.h>
73 #include <kern/affinity.h>
74 #include <kern/wait_queue.h>
75 #include <kern/mach_param.h>
76 #include <mach/mach_vm.h>
77 #include <mach/mach_param.h>
78 #include <mach/thread_policy.h>
79 #include <mach/message.h>
80 #include <mach/port.h>
81 #include <vm/vm_protos.h>
82 #include <vm/vm_map.h>
83 #include <mach/vm_region.h>
85 #include <libkern/OSAtomic.h>
87 #include <pexpert/pexpert.h>
89 #define __PSYNCH_DEBUG__ 0 /* debug panic actions */
90 #if (KDEBUG && STANDARD_KDEBUG)
91 #define _PSYNCH_TRACE_ 1 /* kdebug trace */
94 #define __TESTMODE__ 2 /* 0 - return error on user error conditions */
95 /* 1 - log error on user error conditions */
96 /* 2 - abort caller on user error conditions */
97 /* 3 - panic on user error conditions */
98 static int __test_panics__
;
99 static int __test_aborts__
;
100 static int __test_prints__
;
102 static inline void __FAILEDUSERTEST__(const char *str
)
106 if (__test_panics__
!= 0)
109 if (__test_aborts__
!= 0 || __test_prints__
!= 0)
112 if (__test_prints__
!= 0)
113 printf("PSYNCH: pid[%d]: %s\n", p
->p_pid
, str
);
115 if (__test_aborts__
!= 0)
120 #define _PSYNCH_TRACE_MLWAIT 0x9000000
121 #define _PSYNCH_TRACE_MLDROP 0x9000004
122 #define _PSYNCH_TRACE_CVWAIT 0x9000008
123 #define _PSYNCH_TRACE_CVSIGNAL 0x900000c
124 #define _PSYNCH_TRACE_CVBROAD 0x9000010
125 #define _PSYNCH_TRACE_KMDROP 0x9000014
126 #define _PSYNCH_TRACE_RWRDLOCK 0x9000018
127 #define _PSYNCH_TRACE_RWLRDLOCK 0x900001c
128 #define _PSYNCH_TRACE_RWWRLOCK 0x9000020
129 #define _PSYNCH_TRACE_RWYWRLOCK 0x9000024
130 #define _PSYNCH_TRACE_RWUPGRADE 0x9000028
131 #define _PSYNCH_TRACE_RWDOWNGRADE 0x900002c
132 #define _PSYNCH_TRACE_RWUNLOCK 0x9000030
133 #define _PSYNCH_TRACE_RWUNLOCK2 0x9000034
134 #define _PSYNCH_TRACE_RWHANDLEU 0x9000038
135 #define _PSYNCH_TRACE_FSEQTILL 0x9000040
136 #define _PSYNCH_TRACE_CLRPRE 0x9000044
137 #define _PSYNCH_TRACE_CVHBROAD 0x9000048
138 #define _PSYNCH_TRACE_CVSEQ 0x900004c
139 #define _PSYNCH_TRACE_THWAKEUP 0x9000050
141 #define _PSYNCH_TRACE_UM_LOCK 0x9000060
142 #define _PSYNCH_TRACE_UM_UNLOCK 0x9000064
143 #define _PSYNCH_TRACE_UM_MHOLD 0x9000068
144 #define _PSYNCH_TRACE_UM_MDROP 0x900006c
145 #define _PSYNCH_TRACE_UM_CVWAIT 0x9000070
146 #define _PSYNCH_TRACE_UM_CVSIG 0x9000074
147 #define _PSYNCH_TRACE_UM_CVBRD 0x9000078
149 proc_t pthread_debug_proc
= PROC_NULL
;
150 static inline void __PTHREAD_TRACE_DEBUG(uint32_t debugid
, uintptr_t arg1
,
156 proc_t p
= current_proc();
158 if ((pthread_debug_proc
!= NULL
) && (p
== pthread_debug_proc
))
159 KERNEL_DEBUG_CONSTANT(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
162 #endif /* _PSYNCH_TRACE_ */
164 #define ECVCERORR 256
165 #define ECVPERORR 512
167 lck_mtx_t
* pthread_list_mlock
;
169 #define PTHHASH(addr) (&pthashtbl[(addr) & pthhash])
170 extern LIST_HEAD(pthhashhead
, ksyn_wait_queue
) *pth_glob_hashtbl
;
171 struct pthhashhead
* pth_glob_hashtbl
;
174 LIST_HEAD(, ksyn_wait_queue
) pth_free_list
;
175 int num_total_kwq
= 0; /* number of kwq in use currently */
176 int num_infreekwq
= 0; /* number of kwq in free list */
177 int num_freekwq
= 0; /* number of kwq actually freed from the free the list */
178 int num_reusekwq
= 0; /* number of kwq pulled back for reuse from free list */
179 int num_addedfreekwq
= 0; /* number of added free kwq from the last instance */
180 int num_lastfreekwqcount
= 0; /* the free count from the last time */
182 static int PTH_HASHSIZE
= 100;
184 static zone_t kwq_zone
; /* zone for allocation of ksyn_queue */
185 static zone_t kwe_zone
; /* zone for allocation of ksyn_waitq_element */
191 TAILQ_HEAD(ksynq_kwelist_head
, ksyn_waitq_element
) ksynq_kwelist
;
192 uint32_t ksynq_count
; /* number of entries in queue */
193 uint32_t ksynq_firstnum
; /* lowest seq in queue */
194 uint32_t ksynq_lastnum
; /* highest seq in queue */
196 typedef struct ksyn_queue
* ksyn_queue_t
;
198 #define KSYN_QUEUE_READ 0
199 #define KSYN_QUEUE_LREAD 1
200 #define KSYN_QUEUE_WRITER 2
201 #define KSYN_QUEUE_YWRITER 3
202 #define KSYN_QUEUE_UPGRADE 4
203 #define KSYN_QUEUE_MAX 5
205 struct ksyn_wait_queue
{
206 LIST_ENTRY(ksyn_wait_queue
) kw_hash
;
207 LIST_ENTRY(ksyn_wait_queue
) kw_list
;
210 uint64_t kw_object
; /* object backing in shared mode */
211 uint64_t kw_offset
; /* offset inside the object in shared mode */
212 int kw_flags
; /* mutex, cvar options/flags */
213 int kw_pflags
; /* flags under listlock protection */
214 struct timeval kw_ts
; /* timeval need for upkeep before free */
215 int kw_iocount
; /* inuse reference */
216 int kw_dropcount
; /* current users unlocking... */
218 int kw_type
; /* queue type like mutex, cvar, etc */
219 uint32_t kw_inqueue
; /* num of waiters held */
220 uint32_t kw_fakecount
; /* number of error/prepost fakes */
221 uint32_t kw_highseq
; /* highest seq in the queue */
222 uint32_t kw_lowseq
; /* lowest seq in the queue */
223 uint32_t kw_lword
; /* L value from userland */
224 uint32_t kw_uword
; /* U world value from userland */
225 uint32_t kw_sword
; /* S word value from userland */
226 uint32_t kw_lastunlockseq
; /* the last seq that unlocked */
227 /* for CV to be used as the seq kernel has seen so far */
228 #define kw_cvkernelseq kw_lastunlockseq
229 uint32_t kw_lastseqword
; /* the last seq that unlocked */
230 /* for mutex and cvar we need to track I bit values */
231 uint32_t kw_nextseqword
; /* the last seq that unlocked; with num of waiters */
232 #define kw_initrecv kw_nextseqword /* number of incoming waiters with Ibit seen sofar */
233 uint32_t kw_overlapwatch
; /* chance for overlaps */
234 #define kw_initcount kw_overlapwatch /* number of incoming waiters with Ibit expected */
235 uint32_t kw_initcountseq
; /* highest seq with Ibit on for mutex and cvar*/
236 uint32_t kw_pre_rwwc
; /* prepost count */
237 uint32_t kw_pre_lockseq
; /* prepost target seq */
238 uint32_t kw_pre_sseq
; /* prepost target sword, in cvar used for mutexowned */
239 uint32_t kw_pre_intrcount
; /* prepost of missed wakeup due to intrs */
240 uint32_t kw_pre_intrseq
; /* prepost of missed wakeup limit seq */
241 uint32_t kw_pre_intrretbits
; /* return bits value for missed wakeup threads */
242 uint32_t kw_pre_intrtype
; /* type of failed wakueps*/
245 struct ksyn_queue kw_ksynqueues
[KSYN_QUEUE_MAX
]; /* queues to hold threads */
246 lck_mtx_t kw_lock
; /* mutex lock protecting this structure */
248 typedef struct ksyn_wait_queue
* ksyn_wait_queue_t
;
250 #define PTHRW_INC 0x100
251 #define PTHRW_BIT_MASK 0x000000ff
253 #define PTHRW_COUNT_SHIFT 8
254 #define PTHRW_COUNT_MASK 0xffffff00
255 #define PTHRW_MAX_READERS 0xffffff00
257 /* New model bits on Lword */
258 #define PTH_RWL_KBIT 0x01 /* users cannot acquire in user mode */
259 #define PTH_RWL_EBIT 0x02 /* exclusive lock in progress */
260 #define PTH_RWL_WBIT 0x04 /* write waiters pending in kernel */
261 #define PTH_RWL_PBIT 0x04 /* prepost (cv) pending in kernel */
262 #define PTH_RWL_YBIT 0x08 /* yielding write waiters pending in kernel */
263 #define PTH_RWL_RETRYBIT 0x08 /* mutex retry wait */
264 #define PTH_RWL_LBIT 0x10 /* long read in progress */
265 #define PTH_RWL_MTXNONE 0x10 /* indicates the cvwait does not have mutex held */
266 #define PTH_RWL_UBIT 0x20 /* upgrade request pending */
267 #define PTH_RWL_MTX_WAIT 0x20 /* in cvar in mutex wait */
268 #define PTH_RWL_RBIT 0x40 /* reader pending in kernel(not used) */
269 #define PTH_RWL_MBIT 0x40 /* overlapping grants from kernel */
270 #define PTH_RWL_TRYLKBIT 0x40 /* trylock attempt (mutex only) */
271 #define PTH_RWL_IBIT 0x80 /* lcok reset, held untill first succeesful unlock */
274 /* UBIT values for mutex, cvar */
275 #define PTH_RWU_SBIT 0x01
276 #define PTH_RWU_BBIT 0x02
278 #define PTHRW_RWL_INIT PTH_RWL_IBIT /* reset state on the lock bits (U)*/
280 /* New model bits on Sword */
281 #define PTH_RWS_SBIT 0x01 /* kernel transition seq not set yet*/
282 #define PTH_RWS_IBIT 0x02 /* Sequence is not set on return from kernel */
283 #define PTH_RWS_CV_CBIT PTH_RWS_SBIT /* kernel has cleared all info w.r.s.t CV */
284 #define PTH_RWS_CV_PBIT PTH_RWS_IBIT /* kernel has prepost/fake structs only,no waiters */
285 #define PTH_RWS_CV_MBIT PTH_RWL_MBIT /* to indicate prepost return */
286 #define PTH_RWS_WSVBIT 0x04 /* save W bit */
287 #define PTH_RWS_USVBIT 0x08 /* save U bit */
288 #define PTH_RWS_YSVBIT 0x10 /* save Y bit */
289 #define PTHRW_RWS_INIT PTH_RWS_SBIT /* reset on the lock bits (U)*/
290 #define PTHRW_RWS_SAVEMASK (PTH_RWS_WSVBIT|PTH_RWS_USVBIT|PTH_RWS_YSVBIT) /*save bits mask*/
291 #define PTHRW_SW_Reset_BIT_MASK 0x000000fe /* remove S bit and get rest of the bits */
293 #define PTHRW_RWS_INIT PTH_RWS_SBIT /* reset on the lock bits (U)*/
296 #define PTHRW_UN_BIT_MASK 0x000000bf /* remove overlap bit */
299 #define PTHREAD_MTX_TID_SWITCHING (uint64_t)-1
301 /* new L word defns */
302 #define is_rwl_readinuser(x) ((((x) & (PTH_RWL_UBIT | PTH_RWL_KBIT)) == 0)||(((x) & PTH_RWL_LBIT) != 0))
303 #define is_rwl_ebit_set(x) (((x) & PTH_RWL_EBIT) != 0)
304 #define is_rwl_lbit_set(x) (((x) & PTH_RWL_LBIT) != 0)
305 #define is_rwl_readoverlap(x) (((x) & PTH_RWL_MBIT) != 0)
306 #define is_rw_ubit_set(x) (((x) & PTH_RWL_UBIT) != 0)
309 #define is_rws_setseq(x) (((x) & PTH_RWS_SBIT))
310 #define is_rws_setunlockinit(x) (((x) & PTH_RWS_IBIT))
312 /* first contended seq that kernel sees */
313 #define KW_MTXFIRST_KSEQ 0x200
314 #define KW_CVFIRST_KSEQ 1
315 #define KW_RWFIRST_KSEQ 0x200
317 int is_seqlower(uint32_t x
, uint32_t y
);
318 int is_seqlower_eq(uint32_t x
, uint32_t y
);
319 int is_seqhigher(uint32_t x
, uint32_t y
);
320 int is_seqhigher_eq(uint32_t x
, uint32_t y
);
321 int find_diff(uint32_t upto
, uint32_t lowest
);
324 static inline int diff_genseq(uint32_t x
, uint32_t y
) {
328 return((PTHRW_MAX_READERS
- y
) + x
+ PTHRW_INC
);
332 #define TID_ZERO (uint64_t)0
334 /* bits needed in handling the rwlock unlock */
335 #define PTH_RW_TYPE_READ 0x01
336 #define PTH_RW_TYPE_LREAD 0x02
337 #define PTH_RW_TYPE_WRITE 0x04
338 #define PTH_RW_TYPE_YWRITE 0x08
339 #define PTH_RW_TYPE_UPGRADE 0x10
340 #define PTH_RW_TYPE_MASK 0xff
341 #define PTH_RW_TYPE_SHIFT 8
343 #define PTH_RWSHFT_TYPE_READ 0x0100
344 #define PTH_RWSHFT_TYPE_LREAD 0x0200
345 #define PTH_RWSHFT_TYPE_WRITE 0x0400
346 #define PTH_RWSHFT_TYPE_YWRITE 0x0800
347 #define PTH_RWSHFT_TYPE_MASK 0xff00
350 * Mutex protocol attributes
352 #define PTHREAD_PRIO_NONE 0
353 #define PTHREAD_PRIO_INHERIT 1
354 #define PTHREAD_PRIO_PROTECT 2
355 #define PTHREAD_PROTOCOL_FLAGS_MASK 0x3
358 * Mutex type attributes
360 #define PTHREAD_MUTEX_NORMAL 0
361 #define PTHREAD_MUTEX_ERRORCHECK 4
362 #define PTHREAD_MUTEX_RECURSIVE 8
363 #define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_NORMAL
364 #define PTHREAD_TYPE_FLAGS_MASK 0xc
367 * Mutex pshared attributes
369 #define PTHREAD_PROCESS_SHARED 0x10
370 #define PTHREAD_PROCESS_PRIVATE 0x20
371 #define PTHREAD_PSHARED_FLAGS_MASK 0x30
374 * Mutex policy attributes
376 #define _PTHREAD_MUTEX_POLICY_NONE 0
377 #define _PTHREAD_MUTEX_POLICY_FAIRSHARE 0x040 /* 1 */
378 #define _PTHREAD_MUTEX_POLICY_FIRSTFIT 0x080 /* 2 */
379 #define _PTHREAD_MUTEX_POLICY_REALTIME 0x0c0 /* 3 */
380 #define _PTHREAD_MUTEX_POLICY_ADAPTIVE 0x100 /* 4 */
381 #define _PTHREAD_MUTEX_POLICY_PRIPROTECT 0x140 /* 5 */
382 #define _PTHREAD_MUTEX_POLICY_PRIINHERIT 0x180 /* 6 */
383 #define PTHREAD_POLICY_FLAGS_MASK 0x1c0
385 #define _PTHREAD_MTX_OPT_HOLDLOCK 0x200
386 #define _PTHREAD_MTX_OPT_NOMTX 0x400
388 #define _PTHREAD_MTX_OPT_NOTIFY 0x1000
389 #define _PTHREAD_MTX_OPT_MUTEX 0x2000 /* this is a mutex type */
391 #define _PTHREAD_RWLOCK_UPGRADE_TRY 0x10000
394 #define KSYN_WQ_INLIST 1
395 #define KSYN_WQ_INHASH 2
396 #define KSYN_WQ_SHARED 4
397 #define KSYN_WQ_WAITING 8 /* threads waiting for this wq to be available */
398 #define KSYN_WQ_FLIST 0X10 /* in free list to be freed after a short delay */
401 #define KSYN_KWF_INITCLEARED 1 /* the init status found and preposts cleared */
402 #define KSYN_KWF_ZEROEDOUT 2 /* the lword, etc are inited to 0 */
404 #define KSYN_CLEANUP_DEADLINE 10
405 int psynch_cleanupset
;
406 thread_call_t psynch_thcall
;
408 #define KSYN_WQTYPE_INWAIT 0x1000
409 #define KSYN_WQTYPE_INDROP 0x2000
410 #define KSYN_WQTYPE_MTX 0x1
411 #define KSYN_WQTYPE_CVAR 0x2
412 #define KSYN_WQTYPE_RWLOCK 0x4
413 #define KSYN_WQTYPE_SEMA 0x8
414 #define KSYN_WQTYPE_BARR 0x10
415 #define KSYN_WQTYPE_MASK 0x00ff
417 #define KSYN_MTX_MAX 0x0fffffff
418 #define KSYN_WQTYPE_MUTEXDROP (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX)
420 #define KW_UNLOCK_PREPOST 0x01
421 #define KW_UNLOCK_PREPOST_UPGRADE 0x02
422 #define KW_UNLOCK_PREPOST_DOWNGRADE 0x04
423 #define KW_UNLOCK_PREPOST_READLOCK 0x08
424 #define KW_UNLOCK_PREPOST_LREADLOCK 0x10
425 #define KW_UNLOCK_PREPOST_WRLOCK 0x20
426 #define KW_UNLOCK_PREPOST_YWRLOCK 0x40
428 #define CLEAR_PREPOST_BITS(kwq) {\
429 kwq->kw_pre_lockseq = 0; \
430 kwq->kw_pre_sseq = PTHRW_RWS_INIT; \
431 kwq->kw_pre_rwwc = 0; \
434 #define CLEAR_INITCOUNT_BITS(kwq) {\
435 kwq->kw_initcount = 0; \
436 kwq->kw_initrecv = 0; \
437 kwq->kw_initcountseq = 0; \
440 #define CLEAR_INTR_PREPOST_BITS(kwq) {\
441 kwq->kw_pre_intrcount = 0; \
442 kwq->kw_pre_intrseq = 0; \
443 kwq->kw_pre_intrretbits = 0; \
444 kwq->kw_pre_intrtype = 0; \
447 #define CLEAR_REINIT_BITS(kwq) {\
448 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) { \
449 if((kwq->kw_inqueue != 0) && (kwq->kw_inqueue != kwq->kw_fakecount)) \
450 panic("CV:entries in queue durinmg reinit %d:%d\n",kwq->kw_inqueue, kwq->kw_fakecount); \
452 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_RWLOCK) { \
453 kwq->kw_nextseqword = PTHRW_RWS_INIT; \
454 kwq->kw_overlapwatch = 0; \
456 kwq->kw_pre_lockseq = 0; \
457 kwq->kw_pre_rwwc = 0; \
458 kwq->kw_pre_sseq = PTHRW_RWS_INIT; \
459 kwq->kw_lastunlockseq = PTHRW_RWL_INIT; \
460 kwq->kw_lastseqword = PTHRW_RWS_INIT; \
461 kwq->kw_pre_intrcount = 0; \
462 kwq->kw_pre_intrseq = 0; \
463 kwq->kw_pre_intrretbits = 0; \
464 kwq->kw_pre_intrtype = 0; \
467 kwq->kw_sword = PTHRW_RWS_INIT; \
470 void pthread_list_lock(void);
471 void pthread_list_unlock(void);
472 void pthread_list_lock_spin(void);
473 void pthread_list_lock_convert_spin(void);
474 void ksyn_wqlock(ksyn_wait_queue_t kwq
);
475 void ksyn_wqunlock(ksyn_wait_queue_t kwq
);
476 ksyn_wait_queue_t
ksyn_wq_hash_lookup(user_addr_t mutex
, proc_t p
, int flags
, uint64_t object
, uint64_t offset
);
477 int ksyn_wqfind(user_addr_t mutex
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, uint64_t tid
, int flags
, int wqtype
, ksyn_wait_queue_t
* wq
);
478 void ksyn_wqrelease(ksyn_wait_queue_t mkwq
, ksyn_wait_queue_t ckwq
, int qfreenow
, int wqtype
);
479 extern int ksyn_findobj(uint64_t mutex
, uint64_t * object
, uint64_t * offset
);
480 static void UPDATE_CVKWQ(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, uint64_t tid
, int wqtype
);
481 extern thread_t
port_name_to_thread(mach_port_name_t port_name
);
483 kern_return_t
ksyn_block_thread_locked(ksyn_wait_queue_t kwq
, uint64_t abstime
, ksyn_waitq_element_t kwe
, int log
, thread_continue_t
, void * parameter
);
484 kern_return_t
ksyn_wakeup_thread(ksyn_wait_queue_t kwq
, ksyn_waitq_element_t kwe
);
485 void ksyn_freeallkwe(ksyn_queue_t kq
);
487 uint32_t psynch_mutexdrop_internal(ksyn_wait_queue_t kwq
, uint32_t lkseq
, uint32_t ugen
, int flags
);
488 int kwq_handle_unlock(ksyn_wait_queue_t
, uint32_t mgen
, uint32_t rw_wc
, uint32_t * updatep
, int flags
, int *blockp
, uint32_t premgen
);
490 void ksyn_queue_init(ksyn_queue_t kq
);
491 int ksyn_queue_insert(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t mgen
, struct uthread
* uth
, ksyn_waitq_element_t kwe
, int firstfit
);
492 ksyn_waitq_element_t
ksyn_queue_removefirst(ksyn_queue_t kq
, ksyn_wait_queue_t kwq
);
493 void ksyn_queue_removeitem(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, ksyn_waitq_element_t kwe
);
494 int ksyn_queue_move_tofree(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t upto
, ksyn_queue_t freeq
, int all
, int reease
);
495 void update_low_high(ksyn_wait_queue_t kwq
, uint32_t lockseq
);
496 uint32_t find_nextlowseq(ksyn_wait_queue_t kwq
);
497 uint32_t find_nexthighseq(ksyn_wait_queue_t kwq
);
499 int find_seq_till(ksyn_wait_queue_t kwq
, uint32_t upto
, uint32_t nwaiters
, uint32_t *countp
);
500 uint32_t ksyn_queue_count_tolowest(ksyn_queue_t kq
, uint32_t upto
);
502 ksyn_waitq_element_t
ksyn_queue_find_cvpreposeq(ksyn_queue_t kq
, uint32_t cgen
);
503 uint32_t ksyn_queue_cvcount_entries(ksyn_queue_t kq
, uint32_t upto
, uint32_t from
, int * numwaitersp
, int * numintrp
, int * numprepop
);
504 void ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq
, uint32_t upto
, uint32_t *updatep
);
505 void ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq
, uint32_t *updatep
, ksyn_queue_t kfreeq
, int release
);
506 ksyn_waitq_element_t
ksyn_queue_find_signalseq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t toseq
, uint32_t lockseq
);
507 ksyn_waitq_element_t
ksyn_queue_find_threadseq(ksyn_wait_queue_t ckwq
, ksyn_queue_t kq
, thread_t th
, uint32_t toseq
);
508 void psynch_cvcontinue(void *, wait_result_t
);
509 void psynch_mtxcontinue(void *, wait_result_t
);
511 int ksyn_wakeupreaders(ksyn_wait_queue_t kwq
, uint32_t limitread
, int longreadset
, int allreaders
, uint32_t updatebits
, int * wokenp
);
512 int kwq_find_rw_lowest(ksyn_wait_queue_t kwq
, int flags
, uint32_t premgen
, int * type
, uint32_t lowest
[]);
513 ksyn_waitq_element_t
ksyn_queue_find_seq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t seq
, int remove
);
514 int kwq_handle_overlap(ksyn_wait_queue_t kwq
, uint32_t lgenval
, uint32_t ugenval
, uint32_t rw_wc
, uint32_t *updatebitsp
, int flags
, int * blockp
);
515 int kwq_handle_downgrade(ksyn_wait_queue_t kwq
, uint32_t mgen
, int flags
, uint32_t premgen
, int * blockp
);
518 UPDATE_CVKWQ(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, __unused
uint64_t tid
, __unused
int wqtype
)
520 if ((kwq
->kw_type
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_CVAR
) {
521 if ((kwq
->kw_kflags
& KSYN_KWF_ZEROEDOUT
) != 0) {
522 /* the values of L,U and S are cleared out due to L==S in previous transition */
523 kwq
->kw_lword
= mgen
;
524 kwq
->kw_uword
= ugen
;
525 kwq
->kw_sword
= rw_wc
;
526 kwq
->kw_kflags
&= ~KSYN_KWF_ZEROEDOUT
;
528 if (is_seqhigher((mgen
& PTHRW_COUNT_MASK
), (kwq
->kw_lword
& PTHRW_COUNT_MASK
)) != 0)
529 kwq
->kw_lword
= mgen
;
530 if (is_seqhigher((ugen
& PTHRW_COUNT_MASK
), (kwq
->kw_uword
& PTHRW_COUNT_MASK
)) != 0)
531 kwq
->kw_uword
= ugen
;
532 if ((rw_wc
& PTH_RWS_CV_CBIT
) != 0) {
533 if(is_seqlower(kwq
->kw_cvkernelseq
, (rw_wc
& PTHRW_COUNT_MASK
)) != 0) {
534 kwq
->kw_cvkernelseq
= (rw_wc
& PTHRW_COUNT_MASK
);
536 if (is_seqhigher((rw_wc
& PTHRW_COUNT_MASK
), (kwq
->kw_sword
& PTHRW_COUNT_MASK
)) != 0)
537 kwq
->kw_sword
= rw_wc
;
543 /* to protect the hashes, iocounts, freelist */
545 pthread_list_lock(void)
547 lck_mtx_lock(pthread_list_mlock
);
551 pthread_list_lock_spin(void)
553 lck_mtx_lock_spin(pthread_list_mlock
);
557 pthread_list_lock_convert_spin(void)
559 lck_mtx_convert_spin(pthread_list_mlock
);
564 pthread_list_unlock(void)
566 lck_mtx_unlock(pthread_list_mlock
);
569 /* to protect the indiv queue */
571 ksyn_wqlock(ksyn_wait_queue_t kwq
)
574 lck_mtx_lock(&kwq
->kw_lock
);
578 ksyn_wqunlock(ksyn_wait_queue_t kwq
)
580 lck_mtx_unlock(&kwq
->kw_lock
);
584 /* routine to drop the mutex unlocks , used both for mutexunlock system call and drop during cond wait */
586 psynch_mutexdrop_internal(ksyn_wait_queue_t kwq
, uint32_t lkseq
, uint32_t ugen
, int flags
)
588 uint32_t nextgen
, low_writer
, updatebits
, returnbits
= 0;
589 int firstfit
= flags
& _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
590 ksyn_waitq_element_t kwe
= NULL
;
591 kern_return_t kret
= KERN_SUCCESS
;
593 nextgen
= (ugen
+ PTHRW_INC
);
596 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_START
, (uint32_t)kwq
->kw_addr
, lkseq
, ugen
, flags
, 0);
597 #endif /* _PSYNCH_TRACE_ */
603 if (kwq
->kw_inqueue
!= 0) {
604 updatebits
= (kwq
->kw_highseq
& PTHRW_COUNT_MASK
) | (PTH_RWL_EBIT
| PTH_RWL_KBIT
);
605 kwq
->kw_lastunlockseq
= (ugen
& PTHRW_COUNT_MASK
);
608 /* first fit , pick any one */
609 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwq
);
610 kwe
->kwe_psynchretval
= updatebits
;
611 kwe
->kwe_kwqqueue
= NULL
;
614 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xcafecaf1, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
615 #endif /* _PSYNCH_TRACE_ */
617 kret
= ksyn_wakeup_thread(kwq
, kwe
);
619 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
620 panic("psynch_mutexdrop_internal: panic unable to wakeup firstfit mutex thread\n");
621 #endif /* __TESTPANICS__ */
622 if (kret
== KERN_NOT_WAITING
)
625 /* handle fairshare */
626 low_writer
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_firstnum
;
627 low_writer
&= PTHRW_COUNT_MASK
;
629 if (low_writer
== nextgen
) {
630 /* next seq to be granted found */
631 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwq
);
633 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
634 kwe
->kwe_psynchretval
= updatebits
| PTH_RWL_MTX_WAIT
;
635 kwe
->kwe_kwqqueue
= NULL
;
638 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xcafecaf2, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
639 #endif /* _PSYNCH_TRACE_ */
641 kret
= ksyn_wakeup_thread(kwq
, kwe
);
643 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
644 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
645 #endif /* __TESTPANICS__ */
646 if (kret
== KERN_NOT_WAITING
) {
648 kwq
->kw_pre_intrcount
= 1;
649 kwq
->kw_pre_intrseq
= nextgen
;
650 kwq
->kw_pre_intrretbits
= updatebits
;
651 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_WRITE
;
653 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfafafaf1, nextgen
, kwq
->kw_pre_intrretbits
, 0);
654 #endif /* _PSYNCH_TRACE_ */
657 } else if (is_seqhigher(low_writer
, nextgen
) != 0) {
660 if (kwq
->kw_pre_rwwc
> 1) {
661 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (1)\n");
665 kwq
->kw_pre_lockseq
= (nextgen
& PTHRW_COUNT_MASK
);
667 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef1, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
668 #endif /* _PSYNCH_TRACE_ */
671 //__FAILEDUSERTEST__("psynch_mutexdrop_internal: FS mutex unlock sequence higher than the lowest one is queue\n");
673 kwe
= ksyn_queue_find_seq(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], (nextgen
& PTHRW_COUNT_MASK
), 1);
675 /* next seq to be granted found */
676 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
677 kwe
->kwe_psynchretval
= updatebits
| PTH_RWL_MTX_WAIT
;
678 kwe
->kwe_kwqqueue
= NULL
;
680 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xcafecaf3, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
681 #endif /* _PSYNCH_TRACE_ */
682 kret
= ksyn_wakeup_thread(kwq
, kwe
);
684 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
685 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
686 #endif /* __TESTPANICS__ */
687 if (kret
== KERN_NOT_WAITING
)
690 /* next seq to be granted not found, prepost */
693 if (kwq
->kw_pre_rwwc
> 1) {
694 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (2)\n");
698 kwq
->kw_pre_lockseq
= (nextgen
& PTHRW_COUNT_MASK
);
700 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
701 #endif /* _PSYNCH_TRACE_ */
707 /* if firstfit the last one could be spurious */
709 kwq
->kw_lastunlockseq
= (ugen
& PTHRW_COUNT_MASK
);
712 if (kwq
->kw_pre_rwwc
> 1) {
713 __FAILEDUSERTEST__("psynch_mutexdrop_internal: prepost more than one (3)\n");
717 kwq
->kw_pre_lockseq
= (nextgen
& PTHRW_COUNT_MASK
);
719 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef3, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
720 #endif /* _PSYNCH_TRACE_ */
724 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef3, kwq
->kw_lastunlockseq
, kwq
->kw_pre_lockseq
, 0);
725 #endif /* _PSYNCH_TRACE_ */
726 kwq
->kw_lastunlockseq
= (ugen
& PTHRW_COUNT_MASK
);
727 /* not set or the new lkseq is higher */
728 if ((kwq
->kw_pre_rwwc
== 0) || (is_seqlower(kwq
->kw_pre_lockseq
, lkseq
) == 0))
729 kwq
->kw_pre_lockseq
= (lkseq
& PTHRW_COUNT_MASK
);
730 kwq
->kw_pre_rwwc
= 1;
732 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef3, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
733 #endif /* _PSYNCH_TRACE_ */
735 /* indicate prepost content in kernel */
736 returnbits
= lkseq
| PTH_RWL_PBIT
;
744 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_END
, (uint32_t)kwq
->kw_addr
, 0xeeeeeeed, 0, 0, 0);
745 #endif /* _PSYNCH_TRACE_ */
746 ksyn_wqrelease(kwq
, NULL
, 1, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_MTX
));
751 * psynch_mutexwait: This system call is used for contended psynch mutexes to block.
755 psynch_mutexwait(__unused proc_t p
, struct psynch_mutexwait_args
* uap
, uint32_t * retval
)
757 user_addr_t mutex
= uap
->mutex
;
758 uint32_t mgen
= uap
->mgen
;
759 uint32_t ugen
= uap
->ugen
;
760 uint64_t tid
= uap
->tid
;
761 int flags
= uap
->flags
;
762 ksyn_wait_queue_t kwq
;
764 int ins_flags
, retry
;
766 int firstfit
= flags
& _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
767 uint32_t lockseq
, updatebits
=0;
768 ksyn_waitq_element_t kwe
;
772 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_START
, (uint32_t)mutex
, mgen
, ugen
, flags
, 0);
773 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)mutex
, mgen
, ugen
, (uint32_t)tid
, 0);
774 #endif /* _PSYNCH_TRACE_ */
776 uth
= current_uthread();
779 kwe
->kwe_lockseq
= uap
->mgen
;
781 kwe
->kwe_psynchretval
= 0;
782 kwe
->kwe_kwqqueue
= NULL
;
783 lockseq
= (uap
->mgen
& PTHRW_COUNT_MASK
);
789 ins_flags
= FIRSTFIT
;
792 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, tid
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_MTX
), &kwq
);
795 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 1, 0xdeadbeef, error
, 0);
796 #endif /* _PSYNCH_TRACE_ */
803 if ((mgen
& PTH_RWL_RETRYBIT
) != 0) {
805 mgen
&= ~PTH_RWL_RETRYBIT
;
808 /* handle first the missed wakeups */
809 if ((kwq
->kw_pre_intrcount
!= 0) &&
810 ((kwq
->kw_pre_intrtype
== PTH_RW_TYPE_WRITE
)) &&
811 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
812 kwq
->kw_pre_intrcount
--;
813 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
814 if (kwq
->kw_pre_intrcount
==0)
815 CLEAR_INTR_PREPOST_BITS(kwq
);
817 *retval
= kwe
->kwe_psynchretval
;
819 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)mutex
, 0xfafafaf1, kwe
->kwe_psynchretval
, kwq
->kw_pre_intrcount
, 0);
820 #endif /* _PSYNCH_TRACE_ */
824 if ((kwq
->kw_pre_rwwc
!= 0) && ((ins_flags
== FIRSTFIT
) || ((lockseq
& PTHRW_COUNT_MASK
) == (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
) ))) {
825 /* got preposted lock */
827 if (kwq
->kw_pre_rwwc
== 0) {
828 CLEAR_PREPOST_BITS(kwq
);
829 kwq
->kw_lastunlockseq
= PTHRW_RWL_INIT
;
830 if (kwq
->kw_inqueue
== 0) {
831 updatebits
= lockseq
| (PTH_RWL_KBIT
| PTH_RWL_EBIT
);
833 updatebits
= (kwq
->kw_highseq
& PTHRW_COUNT_MASK
) | (PTH_RWL_KBIT
| PTH_RWL_EBIT
);
835 updatebits
&= ~PTH_RWL_MTX_WAIT
;
837 kwe
->kwe_psynchretval
= updatebits
;
839 if (updatebits
== 0) {
840 __FAILEDUSERTEST__("psynch_mutexwait(prepost): returning 0 lseq in mutexwait with no EBIT \n");
843 *retval
= updatebits
;
845 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfefefef1, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
846 #endif /* _PSYNCH_TRACE_ */
849 __FAILEDUSERTEST__("psynch_mutexwait: more than one prepost\n");
850 kwq
->kw_pre_lockseq
+= PTHRW_INC
; /* look for next one */
858 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 0xfeedfeed, mgen
, ins_flags
, 0);
859 #endif /* _PSYNCH_TRACE_ */
861 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], mgen
, uth
, kwe
, ins_flags
);
865 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 2, 0xdeadbeef, error
, 0);
866 #endif /* _PSYNCH_TRACE_ */
870 kret
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0, psynch_mtxcontinue
, (void *)kwq
);
872 psynch_mtxcontinue((void *)kwq
, kret
);
874 /* not expected to return from unix_syscall_return */
875 panic("psynch_mtxcontinue returned from unix_syscall_return");
878 ksyn_wqrelease(kwq
, NULL
, 1, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_MTX
));
880 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 0xeeeeeeed, updatebits
, error
, 0);
881 #endif /* _PSYNCH_TRACE_ */
887 psynch_mtxcontinue(void * parameter
, wait_result_t result
)
890 uint32_t updatebits
= 0;
891 uthread_t uth
= current_uthread();
892 ksyn_wait_queue_t kwq
= (ksyn_wait_queue_t
)parameter
;
893 ksyn_waitq_element_t kwe
;
898 case THREAD_TIMED_OUT
:
901 case THREAD_INTERRUPTED
:
913 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 3, 0xdeadbeef, error
, 0);
914 #endif /* _PSYNCH_TRACE_ */
915 if (kwe
->kwe_kwqqueue
!= NULL
)
916 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwe
);
919 updatebits
= kwe
->kwe_psynchretval
;
920 updatebits
&= ~PTH_RWL_MTX_WAIT
;
921 uth
->uu_rval
[0] = updatebits
;
924 __FAILEDUSERTEST__("psynch_mutexwait: returning 0 lseq in mutexwait with no EBIT \n");
926 ksyn_wqrelease(kwq
, NULL
, 1, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_MTX
));
928 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)kwq
->kw_addr
, 0xeeeeeeed, updatebits
, error
, 0);
929 #endif /* _PSYNCH_TRACE_ */
931 unix_syscall_return(error
);
935 * psynch_mutexdrop: This system call is used for unlock postings on contended psynch mutexes.
938 psynch_mutexdrop(__unused proc_t p
, struct psynch_mutexdrop_args
* uap
, uint32_t * retval
)
940 user_addr_t mutex
= uap
->mutex
;
941 uint32_t mgen
= uap
->mgen
;
942 uint32_t ugen
= uap
->ugen
;
943 uint64_t tid
= uap
->tid
;
944 int flags
= uap
->flags
;
945 ksyn_wait_queue_t kwq
;
949 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, tid
, flags
, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_MTX
), &kwq
);
954 updateval
= psynch_mutexdrop_internal(kwq
, mgen
, ugen
, flags
);
955 /* drops the kwq reference */
963 * psynch_cvbroad: This system call is used for broadcast posting on blocked waiters of psynch cvars.
966 psynch_cvbroad(__unused proc_t p
, struct psynch_cvbroad_args
* uap
, uint32_t * retval
)
968 user_addr_t cond
= uap
->cv
;
969 uint64_t cvlsgen
= uap
->cvlsgen
;
970 uint64_t cvudgen
= uap
->cvudgen
;
971 uint32_t cgen
, cugen
, csgen
, diffgen
;
972 uint32_t uptoseq
, fromseq
;
973 int flags
= uap
->flags
;
974 ksyn_wait_queue_t ckwq
;
976 uint32_t updatebits
= 0;
978 struct ksyn_queue kfreeq
;
980 csgen
= (uint32_t)((cvlsgen
>> 32) & 0xffffffff);
981 cgen
= ((uint32_t)(cvlsgen
& 0xffffffff));
982 cugen
= (uint32_t)((cvudgen
>> 32) & 0xffffffff);
983 diffgen
= ((uint32_t)(cvudgen
& 0xffffffff));
984 count
= (diffgen
>> PTHRW_COUNT_SHIFT
);
987 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_START
, (uint32_t)cond
, cgen
, cugen
, csgen
, 0);
988 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_NONE
, (uint32_t)cond
, 0xcbcbcbc1, diffgen
,flags
, 0);
989 #endif /* _PSYNCH_TRACE_ */
991 uptoseq
= cgen
& PTHRW_COUNT_MASK
;
992 fromseq
= (cugen
& PTHRW_COUNT_MASK
) + PTHRW_INC
;
994 if (is_seqhigher(fromseq
, uptoseq
) || is_seqhigher((csgen
& PTHRW_COUNT_MASK
), uptoseq
)) {
995 __FAILEDUSERTEST__("cvbroad: invalid L, U and S values\n");
998 if (count
> (uint32_t)task_threadmax
) {
999 __FAILEDUSERTEST__("cvbroad: difference greater than maximum possible thread count\n");
1005 error
= ksyn_wqfind(cond
, cgen
, cugen
, csgen
, 0, flags
, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
), &ckwq
);
1008 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0xdeadbeef, error
, 0);
1009 #endif /* _PSYNCH_TRACE_ */
1017 /* update L, U and S... */
1018 UPDATE_CVKWQ(ckwq
, cgen
, cugen
, csgen
, 0, KSYN_WQTYPE_CVAR
);
1020 /* broadcast wakeups/prepost handling */
1021 ksyn_handle_cvbroad(ckwq
, uptoseq
, &updatebits
);
1023 /* set C or P bits and free if needed */
1024 ckwq
->kw_sword
+= (updatebits
& PTHRW_COUNT_MASK
);
1025 ksyn_cvupdate_fixup(ckwq
, &updatebits
, &kfreeq
, 1);
1026 ksyn_wqunlock(ckwq
);
1028 *retval
= updatebits
;
1030 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_CVAR
));
1032 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_END
, (uint32_t)cond
, 0xeeeeeeed, (uint32_t)*retval
, error
, 0);
1033 #endif /* _PSYNCH_TRACE_ */
1038 ksyn_waitq_element_t
1039 ksyn_queue_find_threadseq(ksyn_wait_queue_t ckwq
, __unused ksyn_queue_t kq
, thread_t th
, uint32_t upto
)
1041 uthread_t uth
= get_bsdthread_info(th
);
1042 ksyn_waitq_element_t kwe
= &uth
->uu_kwe
;
1044 if (kwe
->kwe_kwqqueue
!= ckwq
||
1045 is_seqhigher((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), upto
)) {
1046 /* the thread is not waiting in the cv (or wasn't when the wakeup happened) */
1053 * psynch_cvsignal: This system call is used for signalling the blocked waiters of psynch cvars.
1056 psynch_cvsignal(__unused proc_t p
, struct psynch_cvsignal_args
* uap
, uint32_t * retval
)
1058 user_addr_t cond
= uap
->cv
;
1059 uint64_t cvlsgen
= uap
->cvlsgen
;
1060 uint32_t cgen
, csgen
, signalseq
, uptoseq
;
1061 uint32_t cugen
= uap
->cvugen
;
1062 int threadport
= uap
->thread_port
;
1063 int flags
= uap
->flags
;
1064 ksyn_wait_queue_t ckwq
= NULL
;
1065 ksyn_waitq_element_t kwe
, nkwe
= NULL
;
1068 thread_t th
= THREAD_NULL
;
1069 uint32_t updatebits
= 0;
1071 struct ksyn_queue kfreeq
;
1074 csgen
= (uint32_t)((cvlsgen
>> 32) & 0xffffffff);
1075 cgen
= ((uint32_t)(cvlsgen
& 0xffffffff));
1078 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_START
, (uint32_t)cond
, cgen
, cugen
, threadport
, 0);
1079 #endif /* _PSYNCH_TRACE_ */
1081 uptoseq
= cgen
& PTHRW_COUNT_MASK
;
1082 signalseq
= (cugen
& PTHRW_COUNT_MASK
) + PTHRW_INC
;
1084 /* validate sane L, U, and S values */
1085 if (((threadport
== 0) && (is_seqhigher(signalseq
, uptoseq
))) || is_seqhigher((csgen
& PTHRW_COUNT_MASK
), uptoseq
)) {
1086 __FAILEDUSERTEST__("psync_cvsignal; invalid sequence numbers\n");
1091 /* If we are looking for a specific thread, grab a reference for it */
1092 if (threadport
!= 0) {
1093 th
= (thread_t
)port_name_to_thread((mach_port_name_t
)threadport
);
1094 if (th
== THREAD_NULL
) {
1100 error
= ksyn_wqfind(cond
, cgen
, cugen
, csgen
, 0, flags
, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
), &ckwq
);
1103 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0xdeadbeef, error
, 0);
1104 #endif /* _PSYNCH_TRACE_ */
1110 /* update L, U and S... */
1111 UPDATE_CVKWQ(ckwq
, cgen
, cugen
, csgen
, 0, KSYN_WQTYPE_CVAR
);
1113 kq
= &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
];
1116 /* Only bother if we aren't already balanced */
1117 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) != (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
1119 kwe
= (th
!= NULL
) ? ksyn_queue_find_threadseq(ckwq
, kq
, th
, uptoseq
) :
1120 ksyn_queue_find_signalseq(ckwq
, kq
, uptoseq
, signalseq
);
1122 switch (kwe
->kwe_flags
) {
1124 case KWE_THREAD_BROADCAST
:
1125 /* broadcasts swallow our signal */
1128 case KWE_THREAD_PREPOST
:
1129 /* merge in with existing prepost at our same uptoseq */
1130 kwe
->kwe_count
+= 1;
1133 case KWE_THREAD_INWAIT
:
1134 if (is_seqlower((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), signalseq
)) {
1136 * A valid thread in our range, but lower than our signal.
1137 * Matching it may leave our match with nobody to wake it if/when
1138 * it arrives (the signal originally meant for this thread might
1139 * not successfully wake it).
1141 * Convert to broadcast - may cause some spurious wakeups
1142 * (allowed by spec), but avoids starvation (better choice).
1145 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xc1c1c1c1, uptoseq
, 0, 0);
1146 #endif /* _PSYNCH_TRACE_ */
1147 ksyn_handle_cvbroad(ckwq
, uptoseq
, &updatebits
);
1149 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
1150 kwe
->kwe_psynchretval
= PTH_RWL_MTX_WAIT
;
1151 kwe
->kwe_kwqqueue
= NULL
;
1153 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xcafecaf2, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
1154 #endif /* _PSYNCH_TRACE_ */
1155 kret
= ksyn_wakeup_thread(ckwq
, kwe
);
1157 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
1158 panic("ksyn_wakeup_thread: panic waking up condition waiter\n");
1159 #endif /* __TESTPANICS__ */
1160 updatebits
+= PTHRW_INC
;
1163 ckwq
->kw_sword
+= (updatebits
& PTHRW_COUNT_MASK
);
1167 panic("unknown kweflags\n");
1171 } else if (th
!= NULL
) {
1173 * Could not find the thread, post a broadcast,
1174 * otherwise the waiter will be stuck. Use to send
1175 * ESRCH here, did lead to rare hangs.
1177 ksyn_handle_cvbroad(ckwq
, uptoseq
, &updatebits
);
1178 ckwq
->kw_sword
+= (updatebits
& PTHRW_COUNT_MASK
);
1179 } else if (nkwe
== NULL
) {
1180 ksyn_wqunlock(ckwq
);
1181 nkwe
= (ksyn_waitq_element_t
)zalloc(kwe_zone
);
1186 /* no eligible entries - add prepost */
1187 bzero(nkwe
, sizeof(struct ksyn_waitq_element
));
1188 nkwe
->kwe_kwqqueue
= ckwq
;
1189 nkwe
->kwe_flags
= KWE_THREAD_PREPOST
;
1190 nkwe
->kwe_lockseq
= uptoseq
;
1191 nkwe
->kwe_count
= 1;
1192 nkwe
->kwe_uth
= NULL
;
1193 nkwe
->kwe_psynchretval
= 0;
1196 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xfeedfefe, uptoseq
, 0, 0);
1197 #endif /* _PSYNCH_TRACE_ */
1199 (void)ksyn_queue_insert(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], uptoseq
, NULL
, nkwe
, SEQFIT
);
1200 ckwq
->kw_fakecount
++;
1204 /* set C or P bits and free if needed */
1205 ksyn_cvupdate_fixup(ckwq
, &updatebits
, &kfreeq
, 1);
1208 ksyn_wqunlock(ckwq
);
1210 zfree(kwe_zone
, nkwe
);
1212 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_CVAR
));
1216 thread_deallocate(th
);
1218 *retval
= updatebits
;
1220 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_END
, (uint32_t)cond
, 0xeeeeeeed, updatebits
, error
, 0);
1221 #endif /* _PSYNCH_TRACE_ */
1227 * psynch_cvwait: This system call is used for psynch cvar waiters to block in kernel.
1230 psynch_cvwait(__unused proc_t p
, struct psynch_cvwait_args
* uap
, uint32_t * retval
)
1232 user_addr_t cond
= uap
->cv
;
1233 uint64_t cvlsgen
= uap
->cvlsgen
;
1234 uint32_t cgen
, csgen
;
1235 uint32_t cugen
= uap
->cvugen
;
1236 user_addr_t mutex
= uap
->mutex
;
1237 uint64_t mugen
= uap
->mugen
;
1238 uint32_t mgen
, ugen
;
1239 int flags
= uap
->flags
;
1240 ksyn_wait_queue_t kwq
, ckwq
;
1241 int error
=0, local_error
= 0;
1242 uint64_t abstime
= 0;
1243 uint32_t lockseq
, updatebits
=0;
1246 ksyn_waitq_element_t kwe
, nkwe
= NULL
;
1247 struct ksyn_queue
*kq
, kfreeq
;
1250 /* for conformance reasons */
1251 __pthread_testcancel(0);
1253 csgen
= (uint32_t)((cvlsgen
>> 32) & 0xffffffff);
1254 cgen
= ((uint32_t)(cvlsgen
& 0xffffffff));
1255 ugen
= (uint32_t)((mugen
>> 32) & 0xffffffff);
1256 mgen
= ((uint32_t)(mugen
& 0xffffffff));
1259 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_START
, (uint32_t)cond
, cgen
, cugen
, csgen
, 0);
1260 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)mutex
, mgen
, ugen
, flags
, 0);
1261 #endif /* _PSYNCH_TRACE_ */
1263 lockseq
= (cgen
& PTHRW_COUNT_MASK
);
1265 * In cvwait U word can be out of range as cond could be used only for
1266 * timeouts. However S word needs to be within bounds and validated at
1267 * user level as well.
1269 if (is_seqhigher_eq((csgen
& PTHRW_COUNT_MASK
), lockseq
) != 0) {
1270 __FAILEDUSERTEST__("psync_cvwait; invalid sequence numbers\n");
1275 error
= ksyn_wqfind(cond
, cgen
, cugen
, csgen
, 0, flags
, KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INWAIT
, &ckwq
);
1278 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)cond
, 1, 0xdeadbeef, error
, 0);
1279 #endif /* _PSYNCH_TRACE_ */
1284 if (mutex
!= (user_addr_t
)0) {
1285 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, 0, flags
, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_MTX
), &kwq
);
1287 local_error
= error
;
1289 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 2, 0xdeadbeef, error
, 0);
1290 #endif /* _PSYNCH_TRACE_ */
1294 (void)psynch_mutexdrop_internal(kwq
, mgen
, ugen
, flags
);
1295 /* drops kwq reference */
1299 if (uap
->sec
!= 0 || (uap
->nsec
& 0x3fffffff) != 0) {
1300 ts
.tv_sec
= uap
->sec
;
1301 ts
.tv_nsec
= (uap
->nsec
& 0x3fffffff);
1302 nanoseconds_to_absolutetime((uint64_t)ts
.tv_sec
* NSEC_PER_SEC
+ ts
.tv_nsec
, &abstime
);
1303 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
1308 /* update L, U and S... */
1309 UPDATE_CVKWQ(ckwq
, cgen
, cugen
, csgen
, 0, KSYN_WQTYPE_CVAR
);
1311 /* Look for the sequence for prepost (or conflicting thread */
1312 kq
= &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
];
1313 kwe
= ksyn_queue_find_cvpreposeq(kq
, lockseq
);
1316 switch (kwe
->kwe_flags
) {
1318 case KWE_THREAD_INWAIT
:
1319 ksyn_wqunlock(ckwq
);
1320 __FAILEDUSERTEST__("cvwait: thread entry with same sequence already present\n");
1321 local_error
= EBUSY
;
1324 case KWE_THREAD_BROADCAST
:
1327 case KWE_THREAD_PREPOST
:
1328 if ((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
) == lockseq
) {
1329 /* we can safely consume a reference, so do so */
1330 if (--kwe
->kwe_count
== 0) {
1331 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
1332 ckwq
->kw_fakecount
--;
1337 * consuming a prepost higher than our lock sequence is valid, but
1338 * can leave the higher thread without a match. Convert the entry
1339 * to a broadcast to compensate for this.
1342 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xc2c2c2c2, kwe
->kwe_lockseq
, 0, 0);
1343 #endif /* _PSYNCH_TRACE_ */
1345 ksyn_handle_cvbroad(ckwq
, kwe
->kwe_lockseq
, &updatebits
);
1347 if (updatebits
!= 0)
1348 panic("psync_cvwait: convert pre-post to broadcast: woke up %d threads that shouldn't be there\n",
1350 #endif /* __TESTPANICS__ */
1356 panic("psync_cvwait: unexpected wait queue element type\n");
1360 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xfefefefe, kwe
->kwe_lockseq
, 0, 0);
1361 #endif /* _PSYNCH_TRACE_ */
1364 updatebits
= PTHRW_INC
;
1365 ckwq
->kw_sword
+= PTHRW_INC
;
1367 /* set C or P bits and free if needed */
1368 ksyn_cvupdate_fixup(ckwq
, &updatebits
, &kfreeq
, 1);
1373 *retval
= updatebits
;
1375 ksyn_wqunlock(ckwq
);
1378 zfree(kwe_zone
, nkwe
);
1384 uth
= current_uthread();
1386 kwe
->kwe_kwqqueue
= ckwq
;
1387 kwe
->kwe_flags
= KWE_THREAD_INWAIT
;
1388 kwe
->kwe_lockseq
= lockseq
;
1391 kwe
->kwe_psynchretval
= 0;
1394 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xfeedfeed, cgen
, 0, 0);
1395 #endif /* _PSYNCH_TRACE_ */
1397 error
= ksyn_queue_insert(ckwq
, kq
, cgen
, uth
, kwe
, SEQFIT
);
1399 ksyn_wqunlock(ckwq
);
1400 local_error
= error
;
1404 kret
= ksyn_block_thread_locked(ckwq
, abstime
, kwe
, 1, psynch_cvcontinue
, (void *)ckwq
);
1407 psynch_cvcontinue(ckwq
, kret
);
1408 /* not expected to return from unix_syscall_return */
1409 panic("psynch_cvcontinue returned from unix_syscall_return");
1413 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)cond
, 0xeeeeeeed, (uint32_t)*retval
, local_error
, 0);
1414 #endif /* _PSYNCH_TRACE_ */
1415 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_CVAR
));
1416 return(local_error
);
1421 psynch_cvcontinue(void * parameter
, wait_result_t result
)
1423 int error
= 0, local_error
= 0;
1424 uthread_t uth
= current_uthread();
1425 ksyn_wait_queue_t ckwq
= (ksyn_wait_queue_t
)parameter
;
1426 ksyn_waitq_element_t kwe
;
1427 struct ksyn_queue kfreeq
;
1430 case THREAD_TIMED_OUT
:
1433 case THREAD_INTERRUPTED
:
1441 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP
| DBG_FUNC_NONE
, 0xf4f3f2f1, (uintptr_t)uth
, result
, 0, 0);
1442 #endif /* _PSYNCH_TRACE_ */
1444 local_error
= error
;
1449 /* just in case it got woken up as we were granting */
1450 uth
->uu_rval
[0] = kwe
->kwe_psynchretval
;
1453 if ((kwe
->kwe_kwqqueue
!= NULL
) && (kwe
->kwe_kwqqueue
!= ckwq
))
1454 panic("cvwait waiting on some other kwq\n");
1456 #endif /* __TESTPANICS__ */
1459 if (kwe
->kwe_kwqqueue
!= NULL
) {
1460 ksyn_queue_removeitem(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwe
);
1461 kwe
->kwe_kwqqueue
= NULL
;
1463 if ((kwe
->kwe_psynchretval
& PTH_RWL_MTX_WAIT
) != 0) {
1464 /* the condition var granted.
1465 * reset the error so that the thread returns back.
1468 /* no need to set any bits just return as cvsig/broad covers this */
1469 ksyn_wqunlock(ckwq
);
1473 ckwq
->kw_sword
+= PTHRW_INC
;
1475 /* set C and P bits, in the local error */
1476 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) == (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
1477 local_error
|= ECVCERORR
;
1478 if (ckwq
->kw_inqueue
!= 0) {
1479 (void)ksyn_queue_move_tofree(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], (ckwq
->kw_lword
& PTHRW_COUNT_MASK
), &kfreeq
, 1, 1);
1481 ckwq
->kw_lword
= ckwq
->kw_uword
= ckwq
->kw_sword
= 0;
1482 ckwq
->kw_kflags
|= KSYN_KWF_ZEROEDOUT
;
1484 /* everythig in the queue is a fake entry ? */
1485 if ((ckwq
->kw_inqueue
!= 0) && (ckwq
->kw_fakecount
== ckwq
->kw_inqueue
)) {
1486 local_error
|= ECVPERORR
;
1489 ksyn_wqunlock(ckwq
);
1492 /* PTH_RWL_MTX_WAIT is removed */
1493 if ((kwe
->kwe_psynchretval
& PTH_RWS_CV_MBIT
) != 0)
1494 uth
->uu_rval
[0] = PTHRW_INC
| PTH_RWS_CV_CBIT
;
1496 uth
->uu_rval
[0] = 0;
1501 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)ckwq
->kw_addr
, 0xeeeeeeed, uth
->uu_rval
[0], local_error
, 0);
1502 #endif /* _PSYNCH_TRACE_ */
1503 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_CVAR
));
1505 unix_syscall_return(local_error
);
1510 * psynch_cvclrprepost: This system call clears pending prepost if present.
1513 psynch_cvclrprepost(__unused proc_t p
, struct psynch_cvclrprepost_args
* uap
, __unused
int * retval
)
1515 user_addr_t cond
= uap
->cv
;
1516 uint32_t cgen
= uap
->cvgen
;
1517 uint32_t cugen
= uap
->cvugen
;
1518 uint32_t csgen
= uap
->cvsgen
;
1519 uint32_t pseq
= uap
->preposeq
;
1520 uint32_t flags
= uap
->flags
;
1522 ksyn_wait_queue_t ckwq
= NULL
;
1523 struct ksyn_queue kfreeq
;
1526 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_START
, (uint32_t)cond
, cgen
, cugen
, csgen
, 0);
1527 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_NONE
, (uint32_t)cond
, 0xcececece, pseq
, flags
, 0);
1528 #endif /* _PSYNCH_TRACE_ */
1530 if ((flags
& _PTHREAD_MTX_OPT_MUTEX
) == 0) {
1531 error
= ksyn_wqfind(cond
, cgen
, cugen
, csgen
, 0, flags
, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
), &ckwq
);
1535 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0xdeadbeef, error
, 0);
1536 #endif /* _PSYNCH_TRACE_ */
1541 (void)ksyn_queue_move_tofree(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], (pseq
& PTHRW_COUNT_MASK
), &kfreeq
, 0, 1);
1542 ksyn_wqunlock(ckwq
);
1543 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
));
1546 error
= ksyn_wqfind(cond
, cgen
, cugen
, 0, 0, flags
, (KSYN_WQTYPE_MTX
| KSYN_WQTYPE_INDROP
), &ckwq
);
1550 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0xdeadbeef, error
, 0);
1551 #endif /* _PSYNCH_TRACE_ */
1556 if (((flags
& _PTHREAD_MUTEX_POLICY_FIRSTFIT
) != 0) && (ckwq
->kw_pre_rwwc
!= 0)) {
1557 if (is_seqlower_eq(ckwq
->kw_pre_lockseq
, cgen
) != 0) {
1559 ckwq
->kw_pre_rwwc
= 0;
1560 ckwq
->kw_pre_lockseq
= 0;
1563 ksyn_wqunlock(ckwq
);
1564 ksyn_wqrelease(ckwq
, NULL
, 1, (KSYN_WQTYPE_MTX
| KSYN_WQTYPE_INDROP
));
1568 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CLRPRE
| DBG_FUNC_END
, (uint32_t)cond
, 0xeeeeeeed, 0, 0, 0);
1569 #endif /* _PSYNCH_TRACE_ */
1573 /* ***************** pthread_rwlock ************************ */
1575 * psynch_rw_rdlock: This system call is used for psync rwlock readers to block.
1578 psynch_rw_rdlock(__unused proc_t p
, struct psynch_rw_rdlock_args
* uap
, uint32_t * retval
)
1580 user_addr_t rwlock
= uap
->rwlock
;
1581 uint32_t lgen
= uap
->lgenval
;
1582 uint32_t ugen
= uap
->ugenval
;
1583 uint32_t rw_wc
= uap
->rw_wc
;
1584 //uint64_t tid = uap->tid;
1585 int flags
= uap
->flags
;
1586 int error
= 0, block
;
1587 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0, prerw_wc
= 0;
1588 ksyn_wait_queue_t kwq
;
1590 int isinit
= lgen
& PTHRW_RWL_INIT
;
1591 uint32_t returnbits
= 0;
1592 ksyn_waitq_element_t kwe
;
1596 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1597 #endif /* _PSYNCH_TRACE_ */
1598 uth
= current_uthread();
1600 /* preserve the seq number */
1602 kwe
->kwe_lockseq
= lgen
;
1604 kwe
->kwe_psynchretval
= 0;
1605 kwe
->kwe_kwqqueue
= NULL
;
1607 lockseq
= lgen
& PTHRW_COUNT_MASK
;
1610 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1613 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1614 #endif /* _PSYNCH_TRACE_ */
1621 lgen
&= ~PTHRW_RWL_INIT
;
1622 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
1623 /* first to notice the reset of the lock, clear preposts */
1624 CLEAR_REINIT_BITS(kwq
);
1625 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
1627 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
1628 #endif /* _PSYNCH_TRACE_ */
1632 /* handle first the missed wakeups */
1633 if ((kwq
->kw_pre_intrcount
!= 0) &&
1634 ((kwq
->kw_pre_intrtype
== PTH_RW_TYPE_READ
) || (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_LREAD
)) &&
1635 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1637 kwq
->kw_pre_intrcount
--;
1638 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
1639 if (kwq
->kw_pre_intrcount
==0)
1640 CLEAR_INTR_PREPOST_BITS(kwq
);
1645 /* handle overlap first as they are not counted against pre_rwwc */
1647 /* check for overlap and if no pending W bit (indicates writers) */
1648 if ((kwq
->kw_overlapwatch
!= 0) && ((rw_wc
& PTHRW_RWS_SAVEMASK
) == 0) && ((lgen
& PTH_RWL_WBIT
) == 0)) {
1650 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 10, kwq
->kw_nextseqword
, kwq
->kw_lastseqword
, 0);
1651 #endif /* _PSYNCH_TRACE_ */
1652 error
= kwq_handle_overlap(kwq
, lgen
, ugen
, rw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_READLOCK
|KW_UNLOCK_PREPOST
), &block
);
1655 panic("rw_rdlock: kwq_handle_overlap failed %d\n",error
);
1656 #endif /* __TESTPANICS__ */
1659 kwe
->kwe_psynchretval
= updatebits
;
1661 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0xff, updatebits
, 0xee, 0);
1662 #endif /* _PSYNCH_TRACE_ */
1668 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1670 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1671 #endif /* _PSYNCH_TRACE_ */
1673 if (kwq
->kw_pre_rwwc
== 0) {
1674 preseq
= kwq
->kw_pre_lockseq
;
1675 prerw_wc
= kwq
->kw_pre_sseq
;
1676 CLEAR_PREPOST_BITS(kwq
);
1677 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
1678 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
1680 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
1681 #endif /* _PSYNCH_TRACE_ */
1683 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_READLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1686 panic("rw_rdlock: kwq_handle_unlock failed %d\n",error
);
1687 #endif /* __TESTPANICS__ */
1692 /* insert to q and proceed as ususal */
1698 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1699 #endif /* _PSYNCH_TRACE_ */
1700 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
], lgen
, uth
, kwe
, SEQFIT
);
1703 panic("psynch_rw_rdlock: failed to enqueue\n");
1704 #endif /* __TESTPANICS__ */
1705 kret
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0, THREAD_CONTINUE_NULL
, NULL
);
1706 /* drops the kwq lock */
1708 case THREAD_TIMED_OUT
:
1711 case THREAD_INTERRUPTED
:
1722 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
1723 #endif /* _PSYNCH_TRACE_ */
1725 if (kwe
->kwe_kwqqueue
!= NULL
)
1726 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
], kwe
);
1730 *retval
= kwe
->kwe_psynchretval
;
1731 returnbits
= kwe
->kwe_psynchretval
;
1733 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
));
1735 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, returnbits
, error
, 0);
1736 #endif /* _PSYNCH_TRACE_ */
1741 * psynch_rw_longrdlock: This system call is used for psync rwlock long readers to block.
1745 psynch_rw_longrdlock(__unused proc_t p
, struct psynch_rw_longrdlock_args
* uap
, __unused
uint32_t * retval
)
1747 psynch_rw_longrdlock(__unused proc_t p
, __unused
struct psynch_rw_longrdlock_args
* uap
, __unused
uint32_t * retval
)
1751 user_addr_t rwlock
= uap
->rwlock
;
1752 uint32_t lgen
= uap
->lgenval
;
1753 uint32_t ugen
= uap
->ugenval
;
1754 uint32_t rw_wc
= uap
->rw_wc
;
1755 //uint64_t tid = uap->tid;
1756 int flags
= uap
->flags
;
1757 int isinit
= lgen
& PTHRW_RWL_INIT
;
1758 uint32_t returnbits
=0;
1759 ksyn_waitq_element_t kwe
;
1762 ksyn_wait_queue_t kwq
;
1763 int error
=0, block
= 0 ;
1765 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0, prerw_wc
= 0;
1768 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1769 #endif /* _PSYNCH_TRACE_ */
1770 uth
= current_uthread();
1772 kwe
->kwe_lockseq
= lgen
;
1774 kwe
->kwe_psynchretval
= 0;
1775 kwe
->kwe_kwqqueue
= NULL
;
1776 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
1778 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1781 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1782 #endif /* _PSYNCH_TRACE_ */
1789 lgen
&= ~PTHRW_RWL_INIT
;
1790 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
1791 /* first to notice the reset of the lock, clear preposts */
1792 CLEAR_REINIT_BITS(kwq
);
1793 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
1795 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
1796 #endif /* _PSYNCH_TRACE_ */
1800 /* handle first the missed wakeups */
1801 if ((kwq
->kw_pre_intrcount
!= 0) &&
1802 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_LREAD
) &&
1803 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1805 kwq
->kw_pre_intrcount
--;
1806 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
1807 if (kwq
->kw_pre_intrcount
==0)
1808 CLEAR_INTR_PREPOST_BITS(kwq
);
1814 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1816 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1817 #endif /* _PSYNCH_TRACE_ */
1819 if (kwq
->kw_pre_rwwc
== 0) {
1820 preseq
= kwq
->kw_pre_lockseq
;
1821 prerw_wc
= kwq
->kw_pre_sseq
;
1822 CLEAR_PREPOST_BITS(kwq
);
1823 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
1824 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
1826 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
1827 #endif /* _PSYNCH_TRACE_ */
1829 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_LREADLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1832 panic("kwq_handle_unlock failed %d\n",error
);
1833 #endif /* __TESTPANICS__ */
1838 /* insert to q and proceed as ususal */
1843 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1844 #endif /* _PSYNCH_TRACE_ */
1845 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
], lgen
, uth
, kwe
, SEQFIT
);
1848 panic("psynch_rw_longrdlock: failed to enqueue\n");
1849 #endif /* __TESTPANICS__ */
1851 kret
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0, THREAD_CONTINUE_NULL
, NULL
);
1852 /* drops the kwq lock */
1854 case THREAD_TIMED_OUT
:
1857 case THREAD_INTERRUPTED
:
1867 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1868 #endif /* _PSYNCH_TRACE_ */
1870 if (kwe
->kwe_kwqqueue
!= NULL
)
1871 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
], kwe
);
1875 *retval
= kwe
->kwe_psynchretval
;
1876 returnbits
= kwe
->kwe_psynchretval
;
1879 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
));
1882 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, returnbits
, error
, 0);
1883 #endif /* _PSYNCH_TRACE_ */
1892 * psynch_rw_wrlock: This system call is used for psync rwlock writers to block.
1895 psynch_rw_wrlock(__unused proc_t p
, struct psynch_rw_wrlock_args
* uap
, uint32_t * retval
)
1897 user_addr_t rwlock
= uap
->rwlock
;
1898 uint32_t lgen
= uap
->lgenval
;
1899 uint32_t ugen
= uap
->ugenval
;
1900 uint32_t rw_wc
= uap
->rw_wc
;
1901 //uint64_t tid = uap->tid;
1902 int flags
= uap
->flags
;
1904 ksyn_wait_queue_t kwq
;
1907 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0, prerw_wc
= 0;
1908 int isinit
= lgen
& PTHRW_RWL_INIT
;
1909 uint32_t returnbits
= 0;
1910 ksyn_waitq_element_t kwe
;
1914 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1915 #endif /* _PSYNCH_TRACE_ */
1916 uth
= current_uthread();
1918 kwe
->kwe_lockseq
= lgen
;
1920 kwe
->kwe_psynchretval
= 0;
1921 kwe
->kwe_kwqqueue
= NULL
;
1922 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
1924 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1927 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1928 #endif /* _PSYNCH_TRACE_ */
1936 lgen
&= ~PTHRW_RWL_INIT
;
1937 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
1938 /* first to notice the reset of the lock, clear preposts */
1939 CLEAR_REINIT_BITS(kwq
);
1940 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
1942 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
1943 #endif /* _PSYNCH_TRACE_ */
1948 /* handle first the missed wakeups */
1949 if ((kwq
->kw_pre_intrcount
!= 0) &&
1950 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_WRITE
) &&
1951 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1953 kwq
->kw_pre_intrcount
--;
1954 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
1955 if (kwq
->kw_pre_intrcount
==0)
1956 CLEAR_INTR_PREPOST_BITS(kwq
);
1962 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1964 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1965 #endif /* _PSYNCH_TRACE_ */
1967 if (kwq
->kw_pre_rwwc
== 0) {
1968 preseq
= kwq
->kw_pre_lockseq
;
1969 prerw_wc
= kwq
->kw_pre_sseq
;
1970 CLEAR_PREPOST_BITS(kwq
);
1971 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
1972 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
1974 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
1975 #endif /* _PSYNCH_TRACE_ */
1977 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_WRLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1980 panic("rw_wrlock: kwq_handle_unlock failed %d\n",error
);
1981 #endif /* __TESTPANICS__ */
1984 *retval
= updatebits
;
1987 /* insert to q and proceed as ususal */
1991 /* No overlap watch needed go ahead and block */
1994 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1995 #endif /* _PSYNCH_TRACE_ */
1996 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], lgen
, uth
, kwe
, SEQFIT
);
1999 panic("psynch_rw_wrlock: failed to enqueue\n");
2000 #endif /* __TESTPANICS__ */
2002 kret
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0, THREAD_CONTINUE_NULL
, NULL
);
2003 /* drops the wq lock */
2005 case THREAD_TIMED_OUT
:
2008 case THREAD_INTERRUPTED
:
2019 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
2020 #endif /* _PSYNCH_TRACE_ */
2022 if (kwe
->kwe_kwqqueue
!= NULL
)
2023 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwe
);
2027 *retval
= kwe
->kwe_psynchretval
;
2028 returnbits
= kwe
->kwe_psynchretval
;
2031 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
));
2034 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, returnbits
, error
, 0);
2035 #endif /* _PSYNCH_TRACE_ */
2040 * psynch_rw_yieldwrlock: This system call is used for psync rwlock yielding writers to block.
2044 psynch_rw_yieldwrlock(__unused proc_t p
, __unused
struct psynch_rw_yieldwrlock_args
* uap
, __unused
uint32_t * retval
)
2046 psynch_rw_yieldwrlock(__unused proc_t p
, __unused
struct __unused psynch_rw_yieldwrlock_args
* uap
, __unused
uint32_t * retval
)
2050 user_addr_t rwlock
= uap
->rwlock
;
2051 uint32_t lgen
= uap
->lgenval
;
2052 uint32_t ugen
= uap
->ugenval
;
2053 uint32_t rw_wc
= uap
->rw_wc
;
2054 //uint64_t tid = uap->tid;
2055 int flags
= uap
->flags
;
2057 ksyn_wait_queue_t kwq
;
2059 int isinit
= lgen
& PTHRW_RWL_INIT
;
2061 uint32_t returnbits
=0;
2062 ksyn_waitq_element_t kwe
;
2066 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
2067 #endif /* _PSYNCH_TRACE_ */
2068 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0, prerw_wc
= 0;
2070 uth
= current_uthread();
2072 kwe
->kwe_lockseq
= lgen
;
2074 kwe
->kwe_psynchretval
= 0;
2075 kwe
->kwe_kwqqueue
= NULL
;
2076 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
2078 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
2081 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2082 #endif /* _PSYNCH_TRACE_ */
2089 lgen
&= ~PTHRW_RWL_INIT
;
2090 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
2091 /* first to notice the reset of the lock, clear preposts */
2092 CLEAR_REINIT_BITS(kwq
);
2093 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
2095 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
2096 #endif /* _PSYNCH_TRACE_ */
2100 /* handle first the missed wakeups */
2101 if ((kwq
->kw_pre_intrcount
!= 0) &&
2102 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_YWRITE
) &&
2103 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
2105 kwq
->kw_pre_intrcount
--;
2106 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
2107 if (kwq
->kw_pre_intrcount
==0)
2108 CLEAR_INTR_PREPOST_BITS(kwq
);
2113 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
2115 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
2116 #endif /* _PSYNCH_TRACE_ */
2118 if (kwq
->kw_pre_rwwc
== 0) {
2119 preseq
= kwq
->kw_pre_lockseq
;
2120 prerw_wc
= kwq
->kw_pre_sseq
;
2121 CLEAR_PREPOST_BITS(kwq
);
2122 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
2123 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
2125 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
2126 #endif /* _PSYNCH_TRACE_ */
2128 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_YWRLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
2131 panic("kwq_handle_unlock failed %d\n",error
);
2132 #endif /* __TESTPANICS__ */
2135 *retval
= updatebits
;
2138 /* insert to q and proceed as ususal */
2143 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
2144 #endif /* _PSYNCH_TRACE_ */
2145 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
], lgen
, uth
, kwe
, SEQFIT
);
2148 panic("psynch_rw_yieldwrlock: failed to enqueue\n");
2149 #endif /* __TESTPANICS__ */
2151 kret
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0, THREAD_CONTINUE_NULL
, NULL
);
2153 case THREAD_TIMED_OUT
:
2156 case THREAD_INTERRUPTED
:
2167 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
2168 #endif /* _PSYNCH_TRACE_ */
2170 if (kwe
->kwe_kwqqueue
!= NULL
)
2171 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
], kwe
);
2175 *retval
= kwe
->kwe_psynchretval
;
2176 returnbits
= kwe
->kwe_psynchretval
;
2179 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_RWLOCK
));
2182 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, returnbits
, error
, 0);
2183 #endif /* _PSYNCH_TRACE_ */
2192 * psynch_rw_downgrade: This system call is used for wakeup blocked readers who are eligible to run due to downgrade.
2195 psynch_rw_downgrade(__unused proc_t p
, struct psynch_rw_downgrade_args
* uap
, __unused
int * retval
)
2197 user_addr_t rwlock
= uap
->rwlock
;
2198 uint32_t lgen
= uap
->lgenval
;
2199 uint32_t ugen
= uap
->ugenval
;
2200 uint32_t rw_wc
= uap
->rw_wc
;
2201 //uint64_t tid = uap->tid;
2202 int flags
= uap
->flags
;
2204 int isinit
= lgen
& PTHRW_RWL_INIT
;
2205 ksyn_wait_queue_t kwq
;
2208 uint32_t curgen
= 0;
2211 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
2212 #endif /* _PSYNCH_TRACE_ */
2213 uth
= current_uthread();
2215 curgen
= (lgen
& PTHRW_COUNT_MASK
);
2217 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
), &kwq
);
2220 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2221 #endif /* _PSYNCH_TRACE_ */
2227 if ((lgen
& PTHRW_RWL_INIT
) != 0) {
2228 lgen
&= ~PTHRW_RWL_INIT
;
2229 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0){
2230 CLEAR_REINIT_BITS(kwq
);
2231 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
2233 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
2234 #endif /* _PSYNCH_TRACE_ */
2239 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
2240 if ((kwq
->kw_lastunlockseq
!= PTHRW_RWL_INIT
) && (is_seqlower(ugen
, kwq
->kw_lastunlockseq
)!= 0)) {
2241 /* spurious updatebits?? */
2248 /* If L-U != num of waiters, then it needs to be preposted or spr */
2249 diff
= find_diff(lgen
, ugen
);
2250 /* take count of the downgrade thread itself */
2255 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_inqueue
, curgen
, 0);
2256 #endif /* _PSYNCH_TRACE_ */
2257 if (find_seq_till(kwq
, curgen
, diff
, &count
) == 0) {
2258 if (count
< (uint32_t)diff
)
2262 /* no prepost and all threads are in place, reset the bit */
2263 if ((isinit
!= 0) && ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0)){
2264 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
2266 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
2267 #endif /* _PSYNCH_TRACE_ */
2270 /* can handle unlock now */
2272 CLEAR_PREPOST_BITS(kwq
);
2276 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
2277 #endif /* _PSYNCH_TRACE_ */
2278 error
= kwq_handle_downgrade(kwq
, lgen
, 0, 0, NULL
);
2282 panic("psynch_rw_downgrade: failed to wakeup\n");
2283 #endif /* __TESTPANICS__ */
2288 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, 0, error
, 0);
2289 #endif /* _PSYNCH_TRACE_ */
2290 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
));
2295 kwq
->kw_pre_rwwc
= (rw_wc
- count
);
2296 kwq
->kw_pre_lockseq
= lgen
;
2298 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
2299 #endif /* _PSYNCH_TRACE_ */
2306 * psynch_rw_upgrade: This system call is used by an reader to block waiting for upgrade to be granted.
2309 psynch_rw_upgrade(__unused proc_t p
, struct psynch_rw_upgrade_args
* uap
, uint32_t * retval
)
2311 user_addr_t rwlock
= uap
->rwlock
;
2312 uint32_t lgen
= uap
->lgenval
;
2313 uint32_t ugen
= uap
->ugenval
;
2314 uint32_t rw_wc
= uap
->rw_wc
;
2315 //uint64_t tid = uap->tid;
2316 int flags
= uap
->flags
;
2318 ksyn_wait_queue_t kwq
;
2321 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0;
2322 int isinit
= lgen
& PTHRW_RWL_INIT
;
2323 ksyn_waitq_element_t kwe
;
2327 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
2328 #endif /* _PSYNCH_TRACE_ */
2329 uth
= current_uthread();
2331 kwe
->kwe_lockseq
= lgen
;
2333 kwe
->kwe_psynchretval
= 0;
2334 kwe
->kwe_kwqqueue
= NULL
;
2335 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
2337 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_RWLOCK
), &kwq
);
2340 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2341 #endif /* _PSYNCH_TRACE_ */
2348 lgen
&= ~PTHRW_RWL_INIT
;
2349 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
2350 /* first to notice the reset of the lock, clear preposts */
2351 CLEAR_REINIT_BITS(kwq
);
2352 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
2354 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
2355 #endif /* _PSYNCH_TRACE_ */
2359 /* handle first the missed wakeups */
2360 if ((kwq
->kw_pre_intrcount
!= 0) &&
2361 ((kwq
->kw_pre_intrtype
== PTH_RW_TYPE_READ
) || (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_LREAD
)) &&
2362 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
2364 kwq
->kw_pre_intrcount
--;
2365 kwe
->kwe_psynchretval
= kwq
->kw_pre_intrretbits
;
2366 if (kwq
->kw_pre_intrcount
==0)
2367 CLEAR_INTR_PREPOST_BITS(kwq
);
2372 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
2374 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
2375 #endif /* _PSYNCH_TRACE_ */
2377 if (kwq
->kw_pre_rwwc
== 0) {
2378 preseq
= kwq
->kw_pre_lockseq
;
2379 prerw_wc
= kwq
->kw_pre_sseq
;
2380 CLEAR_PREPOST_BITS(kwq
);
2381 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0){
2382 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
2384 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
2385 #endif /* _PSYNCH_TRACE_ */
2387 error
= kwq_handle_unlock(kwq
, preseq
, prerw_wc
, &updatebits
, (KW_UNLOCK_PREPOST_UPGRADE
|KW_UNLOCK_PREPOST
), &block
, lgen
);
2390 panic("rw_rdlock: kwq_handle_unlock failed %d\n",error
);
2391 #endif /* __TESTPANICS__ */
2396 /* insert to q and proceed as ususal */
2402 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
2403 #endif /* _PSYNCH_TRACE_ */
2404 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
], lgen
, uth
, kwe
, SEQFIT
);
2407 panic("psynch_rw_upgrade: failed to enqueue\n");
2408 #endif /* __TESTPANICS__ */
2411 kret
= ksyn_block_thread_locked(kwq
, (uint64_t)0, kwe
, 0, THREAD_CONTINUE_NULL
, NULL
);
2412 /* drops the lock */
2414 case THREAD_TIMED_OUT
:
2417 case THREAD_INTERRUPTED
:
2428 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
2429 #endif /* _PSYNCH_TRACE_ */
2431 if (kwe
->kwe_kwqqueue
!= NULL
)
2432 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
], kwe
);
2436 *retval
= kwe
->kwe_psynchretval
;
2439 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_RWLOCK
));
2441 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2442 #endif /* _PSYNCH_TRACE_ */
2449 psynch_rw_upgrade(__unused proc_t p
, __unused
struct psynch_rw_upgrade_args
* uap
, __unused
uint32_t * retval
)
2454 psynch_rw_downgrade(__unused proc_t p
, __unused
struct psynch_rw_downgrade_args
* uap
, __unused
int * retval
)
2460 * psynch_rw_unlock: This system call is used for unlock state postings. This will grant appropriate
2461 * reader/writer variety lock.
2465 psynch_rw_unlock(__unused proc_t p
, struct psynch_rw_unlock_args
* uap
, uint32_t * retval
)
2467 user_addr_t rwlock
= uap
->rwlock
;
2468 uint32_t lgen
= uap
->lgenval
;
2469 uint32_t ugen
= uap
->ugenval
;
2470 uint32_t rw_wc
= uap
->rw_wc
;
2472 //uint64_t tid = uap->tid;
2473 int flags
= uap
->flags
;
2475 ksyn_wait_queue_t kwq
;
2476 uint32_t updatebits
= 0;
2483 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
2484 #endif /* _PSYNCH_TRACE_ */
2485 uth
= current_uthread();
2487 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
), &kwq
);
2490 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
2491 #endif /* _PSYNCH_TRACE_ */
2495 curgen
= lgen
& PTHRW_COUNT_MASK
;
2499 if ((lgen
& PTHRW_RWL_INIT
) != 0) {
2500 lgen
&= ~PTHRW_RWL_INIT
;
2501 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0){
2502 CLEAR_REINIT_BITS(kwq
);
2503 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
2505 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 1, 0);
2506 #endif /* _PSYNCH_TRACE_ */
2511 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
2512 if ((kwq
->kw_lastunlockseq
!= PTHRW_RWL_INIT
) && (is_seqlower(ugen
, kwq
->kw_lastunlockseq
)!= 0)) {
2514 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, (uint32_t)0xeeeeeeee, rw_wc
, kwq
->kw_lastunlockseq
, 0);
2515 #endif /* _PSYNCH_TRACE_ */
2520 /* If L-U != num of waiters, then it needs to be preposted or spr */
2521 diff
= find_diff(lgen
, ugen
);
2524 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_inqueue
, curgen
, 0);
2525 #endif /* _PSYNCH_TRACE_ */
2526 if (find_seq_till(kwq
, curgen
, diff
, &count
) == 0) {
2527 if ((count
== 0) || (count
< (uint32_t)diff
))
2531 /* no prepost and all threads are in place, reset the bit */
2532 if ((isinit
!= 0) && ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0)){
2533 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
2535 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, lgen
, ugen
, rw_wc
, 0, 0);
2536 #endif /* _PSYNCH_TRACE_ */
2539 /* can handle unlock now */
2541 CLEAR_PREPOST_BITS(kwq
);
2544 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, 0, 0, 0);
2545 #endif /* _PSYNCH_TRACE_ */
2546 error
= kwq_handle_unlock(kwq
, lgen
, rw_wc
, &updatebits
, 0, NULL
, 0);
2549 panic("psynch_rw_unlock: kwq_handle_unlock failed %d\n",error
);
2550 #endif /* __TESTPANICS__ */
2554 *retval
= updatebits
;
2560 ksyn_wqrelease(kwq
, NULL
, 0, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
));
2562 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, updatebits
, error
, 0);
2563 #endif /* _PSYNCH_TRACE_ */
2568 /* update if the new seq is higher than prev prepost, or first set */
2569 if ((is_rws_setseq(kwq
->kw_pre_sseq
) != 0) ||
2570 (is_seqhigher_eq((rw_wc
& PTHRW_COUNT_MASK
), (kwq
->kw_pre_sseq
& PTHRW_COUNT_MASK
)) != 0)) {
2571 kwq
->kw_pre_rwwc
= (diff
- count
);
2572 kwq
->kw_pre_lockseq
= curgen
;
2573 kwq
->kw_pre_sseq
= rw_wc
;
2575 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, rw_wc
, count
, 0);
2576 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
2577 #endif /* _PSYNCH_TRACE_ */
2578 updatebits
= lgen
; /* let this not do unlock handling */
2586 * psynch_rw_unlock2: This system call is used to wakeup pending readers when unlock grant frm kernel
2587 * to new reader arrival races
2590 psynch_rw_unlock2(__unused proc_t p
, __unused
struct psynch_rw_unlock2_args
* uap
, __unused
uint32_t * retval
)
2596 /* ************************************************************************** */
2598 pth_global_hashinit()
2602 pth_glob_hashtbl
= hashinit(PTH_HASHSIZE
* 4, M_PROC
, &pthhash
);
2605 * pthtest={0,1,2,3} (override default aborting behavior on pthread sync failures)
2606 * 0 - just return errors
2607 * 1 - print and return errors
2608 * 2 - abort user, print and return errors
2611 if (!PE_parse_boot_argn("pthtest", &arg
, sizeof(arg
)))
2615 __test_panics__
= 1;
2616 printf("Pthread support PANICS when sync kernel primitives misused\n");
2617 } else if (arg
== 2) {
2618 __test_aborts__
= 1;
2619 __test_prints__
= 1;
2620 printf("Pthread support ABORTS when sync kernel primitives misused\n");
2621 } else if (arg
== 1) {
2622 __test_prints__
= 1;
2623 printf("Pthread support LOGS when sync kernel primitives misused\n");
2628 pth_proc_hashinit(proc_t p
)
2630 p
->p_pthhash
= hashinit(PTH_HASHSIZE
, M_PROC
, &pthhash
);
2631 if (p
->p_pthhash
== NULL
)
2632 panic("pth_proc_hashinit: hash init returned 0\n");
2637 ksyn_wq_hash_lookup(user_addr_t mutex
, proc_t p
, int flags
, uint64_t object
, uint64_t objoffset
)
2639 ksyn_wait_queue_t kwq
;
2640 struct pthhashhead
* hashptr
;
2642 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
)
2644 hashptr
= pth_glob_hashtbl
;
2645 kwq
= (&hashptr
[object
& pthhash
])->lh_first
;
2647 for (; kwq
!= NULL
; kwq
= kwq
->kw_hash
.le_next
) {
2648 if ((kwq
->kw_object
== object
) &&(kwq
->kw_offset
== objoffset
)) {
2654 hashptr
= p
->p_pthhash
;
2655 kwq
= (&hashptr
[mutex
& pthhash
])->lh_first
;
2657 for (; kwq
!= NULL
; kwq
= kwq
->kw_hash
.le_next
) {
2658 if (kwq
->kw_addr
== mutex
) {
2667 pth_proc_hashdelete(proc_t p
)
2669 struct pthhashhead
* hashptr
;
2670 ksyn_wait_queue_t kwq
;
2671 int hashsize
= pthhash
+ 1;
2675 if ((pthread_debug_proc
!= NULL
) && (p
== pthread_debug_proc
))
2676 pthread_debug_proc
= PROC_NULL
;
2677 #endif /* _PSYNCH_TRACE_ */
2678 hashptr
= p
->p_pthhash
;
2679 p
->p_pthhash
= NULL
;
2680 if (hashptr
== NULL
)
2683 pthread_list_lock();
2684 for(i
= 0; i
< hashsize
; i
++) {
2685 while ((kwq
= LIST_FIRST(&hashptr
[i
])) != NULL
) {
2686 if ((kwq
->kw_pflags
& KSYN_WQ_INHASH
) != 0) {
2687 kwq
->kw_pflags
&= ~KSYN_WQ_INHASH
;
2688 LIST_REMOVE(kwq
, kw_hash
);
2690 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
2691 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
2692 LIST_REMOVE(kwq
, kw_list
);
2696 pthread_list_unlock();
2697 /* release fake entries if present for cvars */
2698 if (((kwq
->kw_type
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_CVAR
) && (kwq
->kw_inqueue
!= 0))
2699 ksyn_freeallkwe(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
]);
2700 lck_mtx_destroy(&kwq
->kw_lock
, pthread_lck_grp
);
2701 zfree(kwq_zone
, kwq
);
2702 pthread_list_lock();
2705 pthread_list_unlock();
2706 FREE(hashptr
, M_PROC
);
2709 /* no lock held for this as the waitqueue is getting freed */
2711 ksyn_freeallkwe(ksyn_queue_t kq
)
2713 ksyn_waitq_element_t kwe
;
2715 /* free all the fake entries, dequeue rest */
2716 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
2717 while (kwe
!= NULL
) {
2718 if (kwe
->kwe_flags
!= KWE_THREAD_INWAIT
) {
2719 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
2720 zfree(kwe_zone
, kwe
);
2722 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
2724 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
2728 /* find kernel waitqueue, if not present create one. Grants a reference */
2730 ksyn_wqfind(user_addr_t mutex
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, uint64_t tid
, int flags
, int wqtype
, ksyn_wait_queue_t
* kwqp
)
2732 ksyn_wait_queue_t kwq
;
2733 ksyn_wait_queue_t nkwq
;
2734 struct pthhashhead
* hashptr
;
2735 uint64_t object
= 0, offset
= 0;
2737 proc_t p
= current_proc();
2738 int retry
= mgen
& PTH_RWL_RETRYBIT
;
2739 struct ksyn_queue kfreeq
;
2742 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
)
2744 (void)ksyn_findobj(mutex
, &object
, &offset
);
2746 hashptr
= pth_glob_hashtbl
;
2748 hashptr
= p
->p_pthhash
;
2751 ksyn_queue_init(&kfreeq
);
2753 if (((wqtype
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_MTX
) && (retry
!= 0))
2754 mgen
&= ~PTH_RWL_RETRYBIT
;
2757 //pthread_list_lock_spin();
2758 pthread_list_lock();
2760 kwq
= ksyn_wq_hash_lookup(mutex
, p
, flags
, object
, offset
);
2763 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
2764 LIST_REMOVE(kwq
, kw_list
);
2765 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
2769 if ((kwq
->kw_type
& KSYN_WQTYPE_MASK
) != (wqtype
&KSYN_WQTYPE_MASK
)) {
2770 if ((kwq
->kw_inqueue
== 0) && (kwq
->kw_pre_rwwc
==0) && (kwq
->kw_pre_intrcount
== 0)) {
2771 if (kwq
->kw_iocount
== 0) {
2772 kwq
->kw_addr
= mutex
;
2773 kwq
->kw_flags
= flags
;
2774 kwq
->kw_object
= object
;
2775 kwq
->kw_offset
= offset
;
2776 kwq
->kw_type
= (wqtype
& KSYN_WQTYPE_MASK
);
2777 CLEAR_REINIT_BITS(kwq
);
2778 CLEAR_INTR_PREPOST_BITS(kwq
);
2779 CLEAR_PREPOST_BITS(kwq
);
2780 kwq
->kw_lword
= mgen
;
2781 kwq
->kw_uword
= ugen
;
2782 kwq
->kw_sword
= rw_wc
;
2783 kwq
->kw_owner
= tid
;
2784 } else if ((kwq
->kw_iocount
== 1) && (kwq
->kw_dropcount
== kwq
->kw_iocount
)) {
2785 /* if all users are unlockers then wait for it to finish */
2786 kwq
->kw_pflags
|= KSYN_WQ_WAITING
;
2787 /* wait for the wq to be free */
2788 (void)msleep(&kwq
->kw_pflags
, pthread_list_mlock
, PDROP
, "ksyn_wqfind", 0);
2789 /* does not have list lock */
2792 __FAILEDUSERTEST__("address already known to kernel for another (busy) synchronizer type\n");
2793 pthread_list_unlock();
2797 __FAILEDUSERTEST__("address already known to kernel for another (busy) synchronizer type(1)\n");
2798 pthread_list_unlock();
2803 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
)
2804 kwq
->kw_dropcount
++;
2807 pthread_list_unlock();
2811 pthread_list_unlock();
2813 nkwq
= (ksyn_wait_queue_t
)zalloc(kwq_zone
);
2814 bzero(nkwq
, sizeof(struct ksyn_wait_queue
));
2815 nkwq
->kw_addr
= mutex
;
2816 nkwq
->kw_flags
= flags
;
2817 nkwq
->kw_iocount
= 1;
2818 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
)
2819 nkwq
->kw_dropcount
++;
2820 nkwq
->kw_object
= object
;
2821 nkwq
->kw_offset
= offset
;
2822 nkwq
->kw_type
= (wqtype
& KSYN_WQTYPE_MASK
);
2823 nkwq
->kw_lastseqword
= PTHRW_RWS_INIT
;
2824 if (nkwq
->kw_type
== KSYN_WQTYPE_RWLOCK
)
2825 nkwq
->kw_nextseqword
= PTHRW_RWS_INIT
;
2827 nkwq
->kw_pre_sseq
= PTHRW_RWS_INIT
;
2829 CLEAR_PREPOST_BITS(nkwq
);
2830 CLEAR_INTR_PREPOST_BITS(nkwq
);
2831 CLEAR_REINIT_BITS(nkwq
);
2832 nkwq
->kw_lword
= mgen
;
2833 nkwq
->kw_uword
= ugen
;
2834 nkwq
->kw_sword
= rw_wc
;
2835 nkwq
->kw_owner
= tid
;
2838 for (i
=0; i
< KSYN_QUEUE_MAX
; i
++)
2839 ksyn_queue_init(&nkwq
->kw_ksynqueues
[i
]);
2841 lck_mtx_init(&nkwq
->kw_lock
, pthread_lck_grp
, pthread_lck_attr
);
2843 //pthread_list_lock_spin();
2844 pthread_list_lock();
2845 /* see whether it is alread allocated */
2846 kwq
= ksyn_wq_hash_lookup(mutex
, p
, flags
, object
, offset
);
2849 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
2850 LIST_REMOVE(kwq
, kw_list
);
2851 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
2855 if ((kwq
->kw_type
& KSYN_WQTYPE_MASK
) != (wqtype
&KSYN_WQTYPE_MASK
)) {
2856 if ((kwq
->kw_inqueue
== 0) && (kwq
->kw_pre_rwwc
==0) && (kwq
->kw_pre_intrcount
== 0)) {
2857 if (kwq
->kw_iocount
== 0) {
2858 kwq
->kw_addr
= mutex
;
2859 kwq
->kw_flags
= flags
;
2860 kwq
->kw_object
= object
;
2861 kwq
->kw_offset
= offset
;
2862 kwq
->kw_type
= (wqtype
& KSYN_WQTYPE_MASK
);
2863 CLEAR_REINIT_BITS(kwq
);
2864 CLEAR_INTR_PREPOST_BITS(kwq
);
2865 CLEAR_PREPOST_BITS(kwq
);
2866 kwq
->kw_lword
= mgen
;
2867 kwq
->kw_uword
= ugen
;
2868 kwq
->kw_sword
= rw_wc
;
2869 kwq
->kw_owner
= tid
;
2870 } else if ((kwq
->kw_iocount
== 1) && (kwq
->kw_dropcount
== kwq
->kw_iocount
)) {
2871 kwq
->kw_pflags
|= KSYN_WQ_WAITING
;
2872 /* wait for the wq to be free */
2873 (void)msleep(&kwq
->kw_pflags
, pthread_list_mlock
, PDROP
, "ksyn_wqfind", 0);
2875 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2876 zfree(kwq_zone
, nkwq
);
2877 /* will acquire lock again */
2881 __FAILEDUSERTEST__("address already known to kernel for another [busy] synchronizer type(2)\n");
2882 pthread_list_unlock();
2883 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2884 zfree(kwq_zone
, nkwq
);
2888 __FAILEDUSERTEST__("address already known to kernel for another [busy] synchronizer type(3)\n");
2889 pthread_list_unlock();
2890 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2891 zfree(kwq_zone
, nkwq
);
2896 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
)
2897 kwq
->kw_dropcount
++;
2900 pthread_list_unlock();
2901 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2902 zfree(kwq_zone
, nkwq
);
2908 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVSEQ
| DBG_FUNC_NONE
, kwq
->kw_lword
, kwq
->kw_uword
, kwq
->kw_sword
, 0xffff, 0);
2909 #endif /* _PSYNCH_TRACE_ */
2910 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
)
2912 kwq
->kw_pflags
|= KSYN_WQ_SHARED
;
2913 LIST_INSERT_HEAD(&hashptr
[kwq
->kw_object
& pthhash
], kwq
, kw_hash
);
2915 LIST_INSERT_HEAD(&hashptr
[mutex
& pthhash
], kwq
, kw_hash
);
2917 kwq
->kw_pflags
|= KSYN_WQ_INHASH
;
2920 pthread_list_unlock();
2927 /* Reference from find is dropped here. Starts the free process if needed */
2929 ksyn_wqrelease(ksyn_wait_queue_t kwq
, ksyn_wait_queue_t ckwq
, int qfreenow
, int wqtype
)
2934 ksyn_wait_queue_t free_elem
= NULL
;
2935 ksyn_wait_queue_t free_elem1
= NULL
;
2937 //pthread_list_lock_spin();
2938 pthread_list_lock();
2940 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
) {
2941 kwq
->kw_dropcount
--;
2943 if (kwq
->kw_iocount
== 0) {
2944 if ((kwq
->kw_pflags
& KSYN_WQ_WAITING
) != 0) {
2945 /* some one is waiting for the waitqueue, wake them up */
2946 kwq
->kw_pflags
&= ~KSYN_WQ_WAITING
;
2947 wakeup(&kwq
->kw_pflags
);
2950 if ((kwq
->kw_pre_rwwc
== 0) && (kwq
->kw_inqueue
== 0) && (kwq
->kw_pre_intrcount
== 0)) {
2951 if (qfreenow
== 0) {
2952 microuptime(&kwq
->kw_ts
);
2953 LIST_INSERT_HEAD(&pth_free_list
, kwq
, kw_list
);
2954 kwq
->kw_pflags
|= KSYN_WQ_FLIST
;
2958 /* remove from the only list it is in ie hash */
2959 kwq
->kw_pflags
&= ~(KSYN_WQ_FLIST
| KSYN_WQ_INHASH
);
2960 LIST_REMOVE(kwq
, kw_hash
);
2961 lck_mtx_destroy(&kwq
->kw_lock
, pthread_lck_grp
);
2974 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
) {
2975 kwq
->kw_dropcount
--;
2977 if ( ckwq
->kw_iocount
== 0) {
2978 if ((kwq
->kw_pflags
& KSYN_WQ_WAITING
) != 0) {
2979 /* some one is waiting for the waitqueue, wake them up */
2980 kwq
->kw_pflags
&= ~KSYN_WQ_WAITING
;
2981 wakeup(&kwq
->kw_pflags
);
2983 if ((ckwq
->kw_pre_rwwc
== 0) && (ckwq
->kw_inqueue
== 0) && (ckwq
->kw_pre_intrcount
== 0)) {
2984 if (qfreenow
== 0) {
2985 /* mark for free if we can */
2986 microuptime(&ckwq
->kw_ts
);
2987 LIST_INSERT_HEAD(&pth_free_list
, ckwq
, kw_list
);
2988 ckwq
->kw_pflags
|= KSYN_WQ_FLIST
;
2992 /* remove from the only list it is in ie hash */
2993 ckwq
->kw_pflags
&= ~(KSYN_WQ_FLIST
| KSYN_WQ_INHASH
);
2994 LIST_REMOVE(ckwq
, kw_hash
);
2995 lck_mtx_destroy(&ckwq
->kw_lock
, pthread_lck_grp
);
3007 if (sched
== 1 && psynch_cleanupset
== 0) {
3008 psynch_cleanupset
= 1;
3010 t
.tv_sec
+= KSYN_CLEANUP_DEADLINE
;
3012 deadline
= tvtoabstime(&t
);
3013 thread_call_enter_delayed(psynch_thcall
, deadline
);
3015 pthread_list_unlock();
3016 if (free_elem
!= NULL
)
3017 zfree(kwq_zone
, free_elem
);
3018 if (free_elem1
!= NULL
)
3019 zfree(kwq_zone
, free_elem1
);
3022 /* responsible to free the waitqueues */
3024 psynch_wq_cleanup(__unused
void * param
, __unused
void * param1
)
3026 ksyn_wait_queue_t kwq
;
3028 LIST_HEAD(, ksyn_wait_queue
) freelist
= {NULL
};
3029 int count
= 0, delayed
= 0, diff
;
3030 uint64_t deadline
= 0;
3032 //pthread_list_lock_spin();
3033 pthread_list_lock();
3035 num_addedfreekwq
= num_infreekwq
- num_lastfreekwqcount
;
3036 num_lastfreekwqcount
= num_infreekwq
;
3039 LIST_FOREACH(kwq
, &pth_free_list
, kw_list
) {
3040 if ((kwq
->kw_iocount
!= 0) || (kwq
->kw_pre_rwwc
!= 0) || (kwq
->kw_inqueue
!= 0) || (kwq
->kw_pre_intrcount
!= 0)) {
3044 diff
= t
.tv_sec
- kwq
->kw_ts
.tv_sec
;
3047 if (diff
>= KSYN_CLEANUP_DEADLINE
) {
3049 kwq
->kw_pflags
&= ~(KSYN_WQ_FLIST
| KSYN_WQ_INHASH
);
3052 LIST_REMOVE(kwq
, kw_hash
);
3053 LIST_REMOVE(kwq
, kw_list
);
3054 LIST_INSERT_HEAD(&freelist
, kwq
, kw_list
);
3063 t
.tv_sec
+= KSYN_CLEANUP_DEADLINE
;
3065 deadline
= tvtoabstime(&t
);
3066 thread_call_enter_delayed(psynch_thcall
, deadline
);
3067 psynch_cleanupset
= 1;
3069 psynch_cleanupset
= 0;
3071 pthread_list_unlock();
3074 while ((kwq
= LIST_FIRST(&freelist
)) != NULL
) {
3075 LIST_REMOVE(kwq
, kw_list
);
3076 lck_mtx_destroy(&kwq
->kw_lock
, pthread_lck_grp
);
3077 zfree(kwq_zone
, kwq
);
3084 ksyn_block_thread_locked(ksyn_wait_queue_t kwq
, uint64_t abstime
, ksyn_waitq_element_t kwe
, int mylog
, thread_continue_t continuation
, void * parameter
)
3086 ksyn_block_thread_locked(ksyn_wait_queue_t kwq
, uint64_t abstime
, ksyn_waitq_element_t kwe
, __unused
int mylog
, thread_continue_t continuation
, void * parameter
)
3092 uthread_t uth
= NULL
;
3093 #endif /* _PSYNCH_TRACE_ */
3095 kwe
->kwe_kwqqueue
= (void *)kwq
;
3096 assert_wait_deadline(&kwe
->kwe_psynchretval
, THREAD_ABORTSAFE
, abstime
);
3099 if (continuation
== THREAD_CONTINUE_NULL
)
3100 kret
= thread_block(NULL
);
3102 kret
= thread_block_parameter(continuation
, parameter
);
3106 case THREAD_TIMED_OUT
:
3109 case THREAD_INTERRUPTED
:
3113 uth
= current_uthread();
3114 #if defined(__i386__)
3116 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP
| DBG_FUNC_NONE
, 0xf4f3f2f1, (uint32_t)uth
, kret
, 0, 0);
3119 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP
| DBG_FUNC_NONE
, 0xeeeeeeee, kret
, error
, 0xeeeeeeee, 0);
3121 #endif /* _PSYNCH_TRACE_ */
3127 ksyn_wakeup_thread(__unused ksyn_wait_queue_t kwq
, ksyn_waitq_element_t kwe
)
3131 uthread_t uth
= NULL
;
3132 #endif /* _PSYNCH_TRACE_ */
3134 kret
= thread_wakeup_one((caddr_t
)&kwe
->kwe_psynchretval
);
3136 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3137 panic("ksyn_wakeup_thread: panic waking up thread %x\n", kret
);
3140 #if defined(__i386__)
3141 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_THWAKEUP
| DBG_FUNC_NONE
, 0xf1f2f3f4, (uint32_t)uth
, kret
, 0, 0);
3143 #endif /* _PSYNCH_TRACE_ */
3148 /* find the true shared obect/offset for shared mutexes */
3150 ksyn_findobj(uint64_t mutex
, uint64_t * objectp
, uint64_t * offsetp
)
3152 vm_page_info_basic_data_t info
;
3154 mach_msg_type_number_t count
= VM_PAGE_INFO_BASIC_COUNT
;
3156 kret
= vm_map_page_info(current_map(), mutex
, VM_PAGE_INFO_BASIC
,
3157 (vm_page_info_t
)&info
, &count
);
3159 if (kret
!= KERN_SUCCESS
)
3162 if (objectp
!= NULL
)
3163 *objectp
= (uint64_t)info
.object_id
;
3164 if (offsetp
!= NULL
)
3165 *offsetp
= (uint64_t)info
.offset
;
3171 /* lowest of kw_fr, kw_flr, kw_fwr, kw_fywr */
3173 kwq_find_rw_lowest(ksyn_wait_queue_t kwq
, int flags
, uint32_t premgen
, int * typep
, uint32_t lowest
[])
3176 uint32_t kw_fr
, kw_flr
, kw_fwr
, kw_fywr
, low
;
3177 int type
= 0, lowtype
, typenum
[4];
3178 uint32_t numbers
[4];
3182 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0)) {
3183 type
|= PTH_RWSHFT_TYPE_READ
;
3184 /* read entries are present */
3185 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
!= 0) {
3186 kw_fr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_firstnum
;
3187 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, kw_fr
) != 0))
3192 lowest
[KSYN_QUEUE_READ
] = kw_fr
;
3193 numbers
[count
]= kw_fr
;
3194 typenum
[count
] = PTH_RW_TYPE_READ
;
3197 lowest
[KSYN_QUEUE_READ
] = 0;
3199 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0)) {
3200 type
|= PTH_RWSHFT_TYPE_LREAD
;
3201 /* read entries are present */
3202 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
!= 0) {
3203 kw_flr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
;
3204 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) && (is_seqlower(premgen
, kw_flr
) != 0))
3209 lowest
[KSYN_QUEUE_LREAD
] = kw_flr
;
3210 numbers
[count
]= kw_flr
;
3211 typenum
[count
] = PTH_RW_TYPE_LREAD
;
3214 lowest
[KSYN_QUEUE_LREAD
] = 0;
3217 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0)) {
3218 type
|= PTH_RWSHFT_TYPE_WRITE
;
3219 /* read entries are present */
3220 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) {
3221 kw_fwr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_firstnum
;
3222 if (((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) && (is_seqlower(premgen
, kw_fwr
) != 0))
3227 lowest
[KSYN_QUEUE_WRITER
] = kw_fwr
;
3228 numbers
[count
]= kw_fwr
;
3229 typenum
[count
] = PTH_RW_TYPE_WRITE
;
3232 lowest
[KSYN_QUEUE_WRITER
] = 0;
3234 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0)) {
3235 type
|= PTH_RWSHFT_TYPE_YWRITE
;
3236 /* read entries are present */
3237 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0) {
3238 kw_fywr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_firstnum
;
3239 if (((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0) && (is_seqlower(premgen
, kw_fywr
) != 0))
3244 lowest
[KSYN_QUEUE_YWRITER
] = kw_fywr
;
3245 numbers
[count
]= kw_fywr
;
3246 typenum
[count
] = PTH_RW_TYPE_YWRITE
;
3249 lowest
[KSYN_QUEUE_YWRITER
] = 0;
3254 panic("nothing in the queue???\n");
3255 #endif /* __TESTPANICS__ */
3258 lowtype
= typenum
[0];
3260 for (i
= 1; i
< count
; i
++) {
3261 if(is_seqlower(numbers
[i
] , low
) != 0) {
3263 lowtype
= typenum
[i
];
3274 /* wakeup readers and longreaders to upto the writer limits */
3276 ksyn_wakeupreaders(ksyn_wait_queue_t kwq
, uint32_t limitread
, int longreadset
, int allreaders
, uint32_t updatebits
, int * wokenp
)
3278 ksyn_waitq_element_t kwe
= NULL
;
3280 int failedwakeup
= 0;
3282 kern_return_t kret
= KERN_SUCCESS
;
3286 if (longreadset
!= 0) {
3287 /* clear all read and longreads */
3288 while ((kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
], kwq
)) != NULL
) {
3289 kwe
->kwe_psynchretval
= lbits
;
3290 kwe
->kwe_kwqqueue
= NULL
;
3293 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3295 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3296 panic("ksyn_wakeupreaders: panic waking up readers\n");
3297 #endif /* __TESTPANICS__ */
3298 if (kret
== KERN_NOT_WAITING
) {
3302 while ((kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
], kwq
)) != NULL
) {
3303 kwe
->kwe_psynchretval
= lbits
;
3304 kwe
->kwe_kwqqueue
= NULL
;
3306 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3308 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3309 panic("ksyn_wakeupreaders: panic waking up lreaders\n");
3310 #endif /* __TESTPANICS__ */
3311 if (kret
== KERN_NOT_WAITING
) {
3316 kq
= &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
];
3317 while ((kq
->ksynq_count
!= 0) && (allreaders
|| (is_seqlower(kq
->ksynq_firstnum
, limitread
) != 0))) {
3318 kwe
= ksyn_queue_removefirst(kq
, kwq
);
3319 kwe
->kwe_psynchretval
= lbits
;
3320 kwe
->kwe_kwqqueue
= NULL
;
3322 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3324 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3325 panic("ksyn_wakeupreaders: panic waking up readers\n");
3326 #endif /* __TESTPANICS__ */
3327 if (kret
== KERN_NOT_WAITING
) {
3335 return(failedwakeup
);
3339 /* This handles the unlock grants for next set on rw_unlock() or on arrival of all preposted waiters */
3341 kwq_handle_unlock(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t rw_wc
, uint32_t * updatep
, int flags
, int * blockp
, uint32_t premgen
)
3343 uint32_t low_reader
, low_writer
, low_ywriter
, low_lreader
,limitrdnum
;
3344 int rwtype
, error
=0;
3345 int longreadset
= 0, allreaders
, failed
;
3346 uint32_t updatebits
=0, numneeded
= 0;;
3347 int prepost
= flags
& KW_UNLOCK_PREPOST
;
3348 thread_t preth
= THREAD_NULL
;
3349 ksyn_waitq_element_t kwe
;
3354 uint32_t lowest
[KSYN_QUEUE_MAX
]; /* np need for upgrade as it is handled separately */
3355 kern_return_t kret
= KERN_SUCCESS
;
3357 int curthreturns
= 0;
3360 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_START
, (uint32_t)kwq
->kw_addr
, mgen
, premgen
, rw_wc
, 0);
3361 #endif /* _PSYNCH_TRACE_ */
3363 preth
= current_thread();
3366 kq
= &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
];
3367 kwq
->kw_lastseqword
= rw_wc
;
3368 kwq
->kw_lastunlockseq
= (rw_wc
& PTHRW_COUNT_MASK
);
3369 kwq
->kw_overlapwatch
= 0;
3371 /* upgrade pending */
3372 if (is_rw_ubit_set(mgen
)) {
3374 panic("NO UBIT SHOULD BE SET\n");
3375 updatebits
= PTH_RWL_EBIT
| PTH_RWL_KBIT
;
3376 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0)
3377 updatebits
|= PTH_RWL_WBIT
;
3378 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0)
3379 updatebits
|= PTH_RWL_YBIT
;
3381 if((flags
& KW_UNLOCK_PREPOST_UPGRADE
) != 0) {
3382 /* upgrade thread calling the prepost */
3383 /* upgrade granted */
3389 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
].ksynq_count
> 0) {
3390 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
], kwq
);
3392 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3393 kwe
->kwe_psynchretval
= updatebits
;
3394 kwe
->kwe_kwqqueue
= NULL
;
3395 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3396 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3397 panic("kwq_handle_unlock: panic waking up the upgrade thread \n");
3398 if (kret
== KERN_NOT_WAITING
) {
3399 kwq
->kw_pre_intrcount
= 1; /* actually a count */
3400 kwq
->kw_pre_intrseq
= mgen
;
3401 kwq
->kw_pre_intrretbits
= kwe
->kwe_psynchretval
;
3402 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_UPGRADE
;
3406 panic("panic unable to find the upgrade thread\n");
3408 #endif /* __TESTPANICS__ */
3413 error
= kwq_find_rw_lowest(kwq
, flags
, premgen
, &rwtype
, lowest
);
3416 panic("rwunlock: cannot fails to slot next round of threads");
3417 #endif /* __TESTPANICS__ */
3420 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 1, rwtype
, 0, 0);
3421 #endif /* _PSYNCH_TRACE_ */
3422 low_reader
= lowest
[KSYN_QUEUE_READ
];
3423 low_lreader
= lowest
[KSYN_QUEUE_LREAD
];
3424 low_writer
= lowest
[KSYN_QUEUE_WRITER
];
3425 low_ywriter
= lowest
[KSYN_QUEUE_YWRITER
];
3433 switch (rwtype
& PTH_RW_TYPE_MASK
) {
3434 case PTH_RW_TYPE_LREAD
:
3437 case PTH_RW_TYPE_READ
: {
3438 /* what about the preflight which is LREAD or READ ?? */
3439 if ((rwtype
& PTH_RWSHFT_TYPE_MASK
) != 0) {
3440 if (rwtype
& PTH_RWSHFT_TYPE_WRITE
)
3441 updatebits
|= (PTH_RWL_WBIT
| PTH_RWL_KBIT
);
3442 if (rwtype
& PTH_RWSHFT_TYPE_YWRITE
)
3443 updatebits
|= PTH_RWL_YBIT
;
3446 if (longreadset
== 0) {
3447 switch (rwtype
& (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
)) {
3448 case PTH_RWSHFT_TYPE_WRITE
:
3449 limitrdnum
= low_writer
;
3450 if (((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0) &&
3451 (is_seqlower(low_lreader
, limitrdnum
) != 0)) {
3454 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) &&
3455 (is_seqlower(premgen
, limitrdnum
) != 0)) {
3459 case PTH_RWSHFT_TYPE_YWRITE
:
3461 if (((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0) &&
3462 (is_seqlower(low_lreader
, low_ywriter
) != 0)) {
3466 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) &&
3467 (is_seqlower(premgen
, low_ywriter
) != 0)) {
3474 case (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
):
3475 if (is_seqlower(low_ywriter
, low_writer
) != 0) {
3476 limitrdnum
= low_ywriter
;
3478 limitrdnum
= low_writer
;
3479 if (((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0) &&
3480 (is_seqlower(low_lreader
, limitrdnum
) != 0)) {
3483 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) &&
3484 (is_seqlower(premgen
, limitrdnum
) != 0)) {
3488 default: /* no writers at all */
3489 if ((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0)
3497 if (longreadset
!= 0) {
3498 updatebits
|= PTH_RWL_LBIT
;
3499 updatebits
&= ~PTH_RWL_KBIT
;
3500 if ((flags
& (KW_UNLOCK_PREPOST_READLOCK
| KW_UNLOCK_PREPOST_LREADLOCK
)) != 0)
3502 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
3503 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
;
3504 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
3505 kwq
->kw_overlapwatch
= 1;
3507 /* no longread, evaluate number of readers */
3509 switch (rwtype
& (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
)) {
3510 case PTH_RWSHFT_TYPE_WRITE
:
3511 limitrdnum
= low_writer
;
3512 numneeded
= ksyn_queue_count_tolowest(kq
, limitrdnum
);
3513 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, limitrdnum
) != 0)) {
3518 case PTH_RWSHFT_TYPE_YWRITE
:
3520 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
3521 if ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) {
3526 case (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
):
3527 limitrdnum
= low_writer
;
3528 numneeded
= ksyn_queue_count_tolowest(kq
, limitrdnum
);
3529 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, limitrdnum
) != 0)) {
3534 default: /* no writers at all */
3535 /* no other waiters only readers */
3536 kwq
->kw_overlapwatch
= 1;
3537 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
3538 if ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) {
3544 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
3546 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3548 if (curthreturns
!= 0) {
3550 uth
= current_uthread();
3552 kwe
->kwe_psynchretval
= updatebits
;
3556 failed
= ksyn_wakeupreaders(kwq
, limitrdnum
, longreadset
, allreaders
, updatebits
, &woken
);
3558 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 2, woken
, failed
, 0);
3559 #endif /* _PSYNCH_TRACE_ */
3562 kwq
->kw_pre_intrcount
= failed
; /* actually a count */
3563 kwq
->kw_pre_intrseq
= limitrdnum
;
3564 kwq
->kw_pre_intrretbits
= updatebits
;
3566 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_LREAD
;
3568 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_READ
;
3573 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) && ((updatebits
& PTH_RWL_WBIT
) == 0))
3574 panic("kwq_handle_unlock: writer pending but no writebit set %x\n", updatebits
);
3578 case PTH_RW_TYPE_WRITE
: {
3580 /* only one thread is goin to be granted */
3581 updatebits
|= (PTHRW_INC
);
3582 updatebits
|= PTH_RWL_KBIT
| PTH_RWL_EBIT
;
3584 if (((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) && (low_writer
== premgen
)) {
3586 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0)
3587 updatebits
|= PTH_RWL_WBIT
;
3588 if ((rwtype
& PTH_RWSHFT_TYPE_YWRITE
) != 0)
3589 updatebits
|= PTH_RWL_YBIT
;
3591 uth
= get_bsdthread_info(th
);
3593 kwe
->kwe_psynchretval
= updatebits
;
3595 /* we are not granting writelock to the preposting thread */
3596 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwq
);
3598 /* if there are writers present or the preposting write thread then W bit is to be set */
3599 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) )
3600 updatebits
|= PTH_RWL_WBIT
;
3601 if ((rwtype
& PTH_RWSHFT_TYPE_YWRITE
) != 0)
3602 updatebits
|= PTH_RWL_YBIT
;
3603 kwe
->kwe_psynchretval
= updatebits
;
3604 kwe
->kwe_kwqqueue
= NULL
;
3605 /* setup next in the queue */
3606 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3608 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 3, kret
, 0, 0);
3609 #endif /* _PSYNCH_TRACE_ */
3611 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3612 panic("kwq_handle_unlock: panic waking up writer\n");
3613 #endif /* __TESTPANICS__ */
3614 if (kret
== KERN_NOT_WAITING
) {
3615 kwq
->kw_pre_intrcount
= 1; /* actually a count */
3616 kwq
->kw_pre_intrseq
= low_writer
;
3617 kwq
->kw_pre_intrretbits
= updatebits
;
3618 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_WRITE
;
3622 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3623 if ((updatebits
& (PTH_RWL_KBIT
| PTH_RWL_EBIT
)) != (PTH_RWL_KBIT
| PTH_RWL_EBIT
))
3624 panic("kwq_handle_unlock: writer lock granted but no ke set %x\n", updatebits
);
3629 case PTH_RW_TYPE_YWRITE
: {
3630 /* can reader locks be granted ahead of this write? */
3631 if ((rwtype
& PTH_RWSHFT_TYPE_READ
) != 0) {
3632 if ((rwtype
& PTH_RWSHFT_TYPE_MASK
) != 0) {
3633 if (rwtype
& PTH_RWSHFT_TYPE_WRITE
)
3634 updatebits
|= (PTH_RWL_WBIT
| PTH_RWL_KBIT
);
3635 if (rwtype
& PTH_RWSHFT_TYPE_YWRITE
)
3636 updatebits
|= PTH_RWL_YBIT
;
3639 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0) {
3640 /* is lowest reader less than the low writer? */
3641 if (is_seqlower(low_reader
,low_writer
) == 0)
3644 numneeded
= ksyn_queue_count_tolowest(kq
, low_writer
);
3645 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
3646 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, low_writer
) != 0)) {
3647 uth
= current_uthread();
3650 updatebits
+= PTHRW_INC
;
3651 kwe
->kwe_psynchretval
= updatebits
;
3655 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3657 /* there will be readers to wakeup , no need to check for woken */
3658 failed
= ksyn_wakeupreaders(kwq
, low_writer
, 0, 0, updatebits
, NULL
);
3660 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 2, woken
, failed
, 0);
3661 #endif /* _PSYNCH_TRACE_ */
3663 kwq
->kw_pre_intrcount
= failed
; /* actually a count */
3664 kwq
->kw_pre_intrseq
= low_writer
;
3665 kwq
->kw_pre_intrretbits
= updatebits
;
3666 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_READ
;
3670 /* wakeup all readers */
3671 numneeded
= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
3672 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
3673 if ((prepost
!= 0) && ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0)) {
3674 uth
= current_uthread();
3676 updatebits
+= PTHRW_INC
;
3677 kwe
->kwe_psynchretval
= updatebits
;
3680 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3681 failed
= ksyn_wakeupreaders(kwq
, low_writer
, 0, 1, updatebits
, &woken
);
3683 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 2, woken
, failed
, 0);
3684 #endif /* _PSYNCH_TRACE_ */
3686 kwq
->kw_pre_intrcount
= failed
; /* actually a count */
3687 kwq
->kw_pre_intrseq
= kwq
->kw_highseq
;
3688 kwq
->kw_pre_intrretbits
= updatebits
;
3689 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_READ
;
3695 /* no reads, so granting yeilding writes */
3696 updatebits
|= PTHRW_INC
;
3697 updatebits
|= PTH_RWL_KBIT
| PTH_RWL_EBIT
;
3699 if (((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0) && (low_writer
== premgen
)) {
3700 /* preposting yielding write thread is being granted exclusive lock */
3704 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0)
3705 updatebits
|= PTH_RWL_WBIT
;
3706 else if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0)
3707 updatebits
|= PTH_RWL_YBIT
;
3710 uth
= get_bsdthread_info(th
);
3712 kwe
->kwe_psynchretval
= updatebits
;
3714 /* we are granting yield writelock to some other thread */
3715 kwe
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
], kwq
);
3717 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0)
3718 updatebits
|= PTH_RWL_WBIT
;
3719 /* if there are ywriters present or the preposting ywrite thread then W bit is to be set */
3720 else if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0) )
3721 updatebits
|= PTH_RWL_YBIT
;
3723 kwe
->kwe_psynchretval
= updatebits
;
3724 kwe
->kwe_kwqqueue
= NULL
;
3726 kret
= ksyn_wakeup_thread(kwq
, kwe
);
3728 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
->kw_addr
, 3, kret
, 0, 0);
3729 #endif /* _PSYNCH_TRACE_ */
3731 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
3732 panic("kwq_handle_unlock : panic waking up readers\n");
3733 #endif /* __TESTPANICS__ */
3734 if (kret
== KERN_NOT_WAITING
) {
3735 kwq
->kw_pre_intrcount
= 1; /* actually a count */
3736 kwq
->kw_pre_intrseq
= low_ywriter
;
3737 kwq
->kw_pre_intrretbits
= updatebits
;
3738 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_YWRITE
;
3742 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
3748 panic("rwunlock: invalid type for lock grants");
3754 if (updatep
!= NULL
)
3755 *updatep
= updatebits
;
3759 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_END
, (uint32_t)kwq
->kw_addr
, 0, updatebits
, block
, 0);
3760 #endif /* _PSYNCH_TRACE_ */
3765 kwq_handle_overlap(ksyn_wait_queue_t kwq
, uint32_t lgenval
, __unused
uint32_t ugenval
, uint32_t rw_wc
, uint32_t *updatebitsp
, __unused
int flags
, int * blockp
)
3767 uint32_t highword
= kwq
->kw_nextseqword
& PTHRW_COUNT_MASK
;
3768 uint32_t lowword
= kwq
->kw_lastseqword
& PTHRW_COUNT_MASK
;
3773 /* overlap is set, so no need to check for valid state for overlap */
3775 withinseq
= ((is_seqlower_eq(rw_wc
, highword
) != 0) || (is_seqhigher_eq(lowword
, rw_wc
) != 0));
3777 if (withinseq
!= 0) {
3778 if ((kwq
->kw_nextseqword
& PTH_RWL_LBIT
) == 0) {
3779 /* if no writers ahead, overlap granted */
3780 if ((lgenval
& PTH_RWL_WBIT
) == 0) {
3784 /* Lbit is set, and writers ahead does not count */
3793 /* increase the next expected seq by one */
3794 kwq
->kw_nextseqword
+= PTHRW_INC
;
3795 /* set count by one & bits from the nextseq and add M bit */
3797 val
|= ((kwq
->kw_nextseqword
& PTHRW_BIT_MASK
) | PTH_RWL_MBIT
);
3804 /* handle downgrade actions */
3806 kwq_handle_downgrade(ksyn_wait_queue_t kwq
, uint32_t mgen
, __unused
int flags
, __unused
uint32_t premgen
, __unused
int * blockp
)
3808 uint32_t updatebits
, lowriter
= 0;
3809 int longreadset
, allreaders
, count
;
3811 /* can handle downgrade now */
3816 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
> 0) {
3817 lowriter
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_firstnum
;
3818 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
> 0) {
3819 if (is_seqlower(kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
, lowriter
) != 0)
3824 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
> 0) {
3825 lowriter
= kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_firstnum
;
3826 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
> 0) {
3827 if (is_seqlower(kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
, lowriter
) != 0)
3833 count
= ksyn_wakeupreaders(kwq
, lowriter
, longreadset
, allreaders
, updatebits
, NULL
);
3835 kwq
->kw_pre_limrd
= count
;
3836 kwq
->kw_pre_limrdseq
= lowriter
;
3837 kwq
->kw_pre_limrdbits
= lowriter
;
3838 /* need to handle prepost */
3845 /************* Indiv queue support routines ************************/
3847 ksyn_queue_init(ksyn_queue_t kq
)
3849 TAILQ_INIT(&kq
->ksynq_kwelist
);
3850 kq
->ksynq_count
= 0;
3851 kq
->ksynq_firstnum
= 0;
3852 kq
->ksynq_lastnum
= 0;
3856 ksyn_queue_insert(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t mgen
, struct uthread
* uth
, ksyn_waitq_element_t kwe
, int fit
)
3858 uint32_t lockseq
= mgen
& PTHRW_COUNT_MASK
;
3859 ksyn_waitq_element_t q_kwe
, r_kwe
;
3861 uthread_t nuth
= NULL
;
3863 if (kq
->ksynq_count
== 0) {
3864 TAILQ_INSERT_HEAD(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3865 kq
->ksynq_firstnum
= lockseq
;
3866 kq
->ksynq_lastnum
= lockseq
;
3870 if (fit
== FIRSTFIT
) {
3871 /* TBD: if retry bit is set for mutex, add it to the head */
3872 /* firstfit, arriving order */
3873 TAILQ_INSERT_TAIL(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3874 if (is_seqlower (lockseq
, kq
->ksynq_firstnum
) != 0)
3875 kq
->ksynq_firstnum
= lockseq
;
3876 if (is_seqhigher (lockseq
, kq
->ksynq_lastnum
) != 0)
3877 kq
->ksynq_lastnum
= lockseq
;
3881 if ((lockseq
== kq
->ksynq_firstnum
) || (lockseq
== kq
->ksynq_lastnum
)) {
3882 /* During prepost when a thread is getting cancelled, we could have two with same seq */
3883 if (kwe
->kwe_flags
== KWE_THREAD_PREPOST
) {
3884 q_kwe
= ksyn_queue_find_seq(kwq
, kq
, lockseq
, 0);
3885 if ((q_kwe
!= NULL
) && ((nuth
= (uthread_t
)q_kwe
->kwe_uth
) != NULL
) &&
3886 ((nuth
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
)) {
3887 TAILQ_INSERT_TAIL(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3891 __FAILEDUSERTEST__("ksyn_queue_insert: two threads with same lockseq ");
3896 __FAILEDUSERTEST__("ksyn_queue_insert: two threads with same lockseq ");
3902 /* check for next seq one */
3903 if (is_seqlower(kq
->ksynq_lastnum
, lockseq
) != 0) {
3904 TAILQ_INSERT_TAIL(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3905 kq
->ksynq_lastnum
= lockseq
;
3909 if (is_seqlower(lockseq
, kq
->ksynq_firstnum
) != 0) {
3910 TAILQ_INSERT_HEAD(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3911 kq
->ksynq_firstnum
= lockseq
;
3915 /* goto slow insert mode */
3916 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
3917 if (is_seqhigher(q_kwe
->kwe_lockseq
, lockseq
) != 0) {
3918 TAILQ_INSERT_BEFORE(q_kwe
, kwe
, kwe_list
);
3924 panic("failed to insert \n");
3925 #endif /* __TESTPANICS__ */
3932 update_low_high(kwq
, lockseq
);
3937 ksyn_waitq_element_t
3938 ksyn_queue_removefirst(ksyn_queue_t kq
, ksyn_wait_queue_t kwq
)
3940 ksyn_waitq_element_t kwe
= NULL
;
3941 ksyn_waitq_element_t q_kwe
;
3944 if (kq
->ksynq_count
!= 0) {
3945 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
3946 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3947 curseq
= kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
;
3951 if(kq
->ksynq_count
!= 0) {
3952 q_kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
3953 kq
->ksynq_firstnum
= (q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
3955 kq
->ksynq_firstnum
= 0;
3956 kq
->ksynq_lastnum
= 0;
3959 if (kwq
->kw_inqueue
== 0) {
3961 kwq
->kw_highseq
= 0;
3963 if (kwq
->kw_lowseq
== curseq
)
3964 kwq
->kw_lowseq
= find_nextlowseq(kwq
);
3965 if (kwq
->kw_highseq
== curseq
)
3966 kwq
->kw_highseq
= find_nexthighseq(kwq
);
3973 ksyn_queue_removeitem(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, ksyn_waitq_element_t kwe
)
3975 ksyn_waitq_element_t q_kwe
;
3978 if (kq
->ksynq_count
> 0) {
3979 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
3981 if(kq
->ksynq_count
!= 0) {
3982 q_kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
3983 kq
->ksynq_firstnum
= (q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
3984 q_kwe
= TAILQ_LAST(&kq
->ksynq_kwelist
, ksynq_kwelist_head
);
3985 kq
->ksynq_lastnum
= (q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
3987 kq
->ksynq_firstnum
= 0;
3988 kq
->ksynq_lastnum
= 0;
3992 curseq
= kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
;
3993 if (kwq
->kw_inqueue
== 0) {
3995 kwq
->kw_highseq
= 0;
3997 if (kwq
->kw_lowseq
== curseq
)
3998 kwq
->kw_lowseq
= find_nextlowseq(kwq
);
3999 if (kwq
->kw_highseq
== curseq
)
4000 kwq
->kw_highseq
= find_nexthighseq(kwq
);
4005 /* find the thread and removes from the queue */
4006 ksyn_waitq_element_t
4007 ksyn_queue_find_seq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t seq
, int remove
)
4009 ksyn_waitq_element_t q_kwe
, r_kwe
;
4011 /* TBD: bail out if higher seq is seen */
4012 /* case where wrap in the tail of the queue exists */
4013 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
4014 if ((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
) == seq
) {
4016 ksyn_queue_removeitem(kwq
, kq
, q_kwe
);
4024 /* find the thread at the target sequence (or a broadcast/prepost at or above) */
4025 ksyn_waitq_element_t
4026 ksyn_queue_find_cvpreposeq(ksyn_queue_t kq
, uint32_t cgen
)
4028 ksyn_waitq_element_t q_kwe
, r_kwe
;
4029 uint32_t lgen
= (cgen
& PTHRW_COUNT_MASK
);
4031 /* case where wrap in the tail of the queue exists */
4032 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
4034 /* skip the lower entries */
4035 if (is_seqlower((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), cgen
) != 0)
4038 switch (q_kwe
->kwe_flags
) {
4040 case KWE_THREAD_INWAIT
:
4041 if ((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
) != lgen
)
4045 case KWE_THREAD_BROADCAST
:
4046 case KWE_THREAD_PREPOST
:
4053 /* look for a thread at lockseq, a */
4054 ksyn_waitq_element_t
4055 ksyn_queue_find_signalseq(__unused ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t uptoseq
, uint32_t signalseq
)
4057 ksyn_waitq_element_t q_kwe
, r_kwe
, t_kwe
= NULL
;
4059 /* case where wrap in the tail of the queue exists */
4060 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
4062 switch (q_kwe
->kwe_flags
) {
4064 case KWE_THREAD_PREPOST
:
4065 if (is_seqhigher((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), uptoseq
))
4069 case KWE_THREAD_BROADCAST
:
4070 /* match any prepost at our same uptoseq or any broadcast above */
4071 if (is_seqlower((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), uptoseq
))
4075 case KWE_THREAD_INWAIT
:
4077 * Match any (non-cancelled) thread at or below our upto sequence -
4078 * but prefer an exact match to our signal sequence (if present) to
4079 * keep exact matches happening.
4081 if (is_seqhigher((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), uptoseq
))
4084 if (q_kwe
->kwe_kwqqueue
== kwq
) {
4085 uthread_t ut
= q_kwe
->kwe_uth
;
4086 if ((ut
->uu_flag
& ( UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) != UT_CANCEL
) {
4087 /* if equal or higher than our signal sequence, return this one */
4088 if (is_seqhigher_eq((q_kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), signalseq
))
4091 /* otherwise, just remember this eligible thread and move on */
4099 panic("ksyn_queue_find_signalseq(): unknow wait queue element type (%d)\n", q_kwe
->kwe_flags
);
4108 ksyn_queue_move_tofree(ksyn_wait_queue_t ckwq
, ksyn_queue_t kq
, uint32_t upto
, ksyn_queue_t kfreeq
, int all
, int release
)
4110 ksyn_waitq_element_t kwe
;
4112 uint32_t tseq
= upto
& PTHRW_COUNT_MASK
;
4115 #endif /* _PSYNCH_TRACE_ */
4117 ksyn_queue_init(kfreeq
);
4119 /* free all the entries, must be only fakes.. */
4120 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
4121 while (kwe
!= NULL
) {
4122 if ((all
== 0) && (is_seqhigher((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), tseq
) != 0))
4124 if (kwe
->kwe_flags
== KWE_THREAD_INWAIT
) {
4126 * This scenario is typically noticed when the cvar is
4127 * reinited and the new waiters are waiting. We can
4128 * return them as spurious wait so the cvar state gets
4132 ut
= (uthread_t
)kwe
->kwe_uth
;
4133 #endif /* _PSYNCH_TRACE_ */
4135 /* skip canceled ones */
4137 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
4138 /* set M bit to indicate to waking CV to retun Inc val */
4139 kwe
->kwe_psynchretval
= PTHRW_INC
| (PTH_RWS_CV_MBIT
| PTH_RWL_MTX_WAIT
);
4140 kwe
->kwe_kwqqueue
= NULL
;
4142 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xcafecaf3, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
4143 #endif /* _PSYNCH_TRACE_ */
4144 (void)ksyn_wakeup_thread(ckwq
, kwe
);
4146 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
4147 TAILQ_INSERT_TAIL(&kfreeq
->ksynq_kwelist
, kwe
, kwe_list
);
4148 ckwq
->kw_fakecount
--;
4151 kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
4154 if ((release
!= 0) && (count
!= 0)) {
4155 kwe
= TAILQ_FIRST(&kfreeq
->ksynq_kwelist
);
4156 while (kwe
!= NULL
) {
4157 TAILQ_REMOVE(&kfreeq
->ksynq_kwelist
, kwe
, kwe_list
);
4158 zfree(kwe_zone
, kwe
);
4159 kwe
= TAILQ_FIRST(&kfreeq
->ksynq_kwelist
);
4166 /*************************************************************************/
4169 update_low_high(ksyn_wait_queue_t kwq
, uint32_t lockseq
)
4171 if (kwq
->kw_inqueue
== 1) {
4172 kwq
->kw_lowseq
= lockseq
;
4173 kwq
->kw_highseq
= lockseq
;
4175 if (is_seqlower(lockseq
, kwq
->kw_lowseq
) != 0)
4176 kwq
->kw_lowseq
= lockseq
;
4177 if (is_seqhigher(lockseq
, kwq
->kw_highseq
) != 0)
4178 kwq
->kw_highseq
= lockseq
;
4183 find_nextlowseq(ksyn_wait_queue_t kwq
)
4185 uint32_t numbers
[KSYN_QUEUE_MAX
];
4189 for(i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
4190 if (kwq
->kw_ksynqueues
[i
].ksynq_count
!= 0) {
4191 numbers
[count
]= kwq
->kw_ksynqueues
[i
].ksynq_firstnum
;
4198 lowest
= numbers
[0];
4200 for (i
= 1; i
< count
; i
++) {
4201 if(is_seqlower(numbers
[i
] , lowest
) != 0)
4202 lowest
= numbers
[count
];
4210 find_nexthighseq(ksyn_wait_queue_t kwq
)
4212 uint32_t numbers
[KSYN_QUEUE_MAX
];
4216 for(i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
4217 if (kwq
->kw_ksynqueues
[i
].ksynq_count
!= 0) {
4218 numbers
[count
]= kwq
->kw_ksynqueues
[i
].ksynq_lastnum
;
4227 highest
= numbers
[0];
4229 for (i
= 1; i
< count
; i
++) {
4230 if(is_seqhigher(numbers
[i
], highest
) != 0)
4231 highest
= numbers
[i
];
4239 is_seqlower(uint32_t x
, uint32_t y
)
4242 if ((y
-x
) < (PTHRW_MAX_READERS
/2))
4245 if ((x
-y
) > (PTHRW_MAX_READERS
/2))
4252 is_seqlower_eq(uint32_t x
, uint32_t y
)
4257 return(is_seqlower(x
,y
));
4261 is_seqhigher(uint32_t x
, uint32_t y
)
4264 if ((x
-y
) < (PTHRW_MAX_READERS
/2))
4267 if ((y
-x
) > (PTHRW_MAX_READERS
/2))
4274 is_seqhigher_eq(uint32_t x
, uint32_t y
)
4279 return(is_seqhigher(x
,y
));
4284 find_diff(uint32_t upto
, uint32_t lowest
)
4291 diff
= diff_genseq(upto
, lowest
);
4293 if (is_seqlower(upto
, lowest
) != 0)
4294 diff
= diff_genseq(lowest
, upto
);
4296 diff
= diff_genseq(upto
, lowest
);
4298 diff
= (diff
>> PTHRW_COUNT_SHIFT
);
4304 find_seq_till(ksyn_wait_queue_t kwq
, uint32_t upto
, uint32_t nwaiters
, uint32_t *countp
)
4311 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL
| DBG_FUNC_START
, 0, 0, upto
, nwaiters
, 0);
4312 #endif /* _PSYNCH_TRACE_ */
4314 for (i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
4315 count
+= ksyn_queue_count_tolowest(&kwq
->kw_ksynqueues
[i
], upto
);
4317 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL
| DBG_FUNC_NONE
, 0, 1, i
, count
, 0);
4318 #endif /* _PSYNCH_TRACE_ */
4319 if (count
>= nwaiters
) {
4324 if (countp
!= NULL
) {
4328 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_FSEQTILL
| DBG_FUNC_END
, 0, 0, count
, nwaiters
, 0);
4329 #endif /* _PSYNCH_TRACE_ */
4332 else if (count
>= nwaiters
)
4340 ksyn_queue_count_tolowest(ksyn_queue_t kq
, uint32_t upto
)
4343 ksyn_waitq_element_t kwe
, newkwe
;
4346 /* if nothing or the first num is greater than upto, return none */
4347 if ((kq
->ksynq_count
== 0) || (is_seqhigher(kq
->ksynq_firstnum
, upto
) != 0))
4349 if (upto
== kq
->ksynq_firstnum
)
4352 TAILQ_FOREACH_SAFE(kwe
, &kq
->ksynq_kwelist
, kwe_list
, newkwe
) {
4353 curval
= (kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
4354 if (upto
== curval
) {
4357 } else if (is_seqhigher(curval
, upto
) != 0) {
4368 /* handles the cond broadcast of cvar and returns number of woken threads and bits for syscall return */
4370 ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq
, uint32_t upto
, uint32_t * updatep
)
4374 ksyn_waitq_element_t kwe
, newkwe
;
4375 uint32_t updatebits
= 0;
4376 struct ksyn_queue kfreeq
;
4380 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_START
, 0xcbcbcbc2, upto
, 0, 0, 0);
4381 #endif /* _PSYNCH_TRACE_ */
4383 ksyn_queue_init(&kfreeq
);
4384 kq
= &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
];
4387 TAILQ_FOREACH_SAFE(kwe
, &kq
->ksynq_kwelist
, kwe_list
, newkwe
) {
4389 if (is_seqhigher((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
), upto
)) /* outside our range */
4392 /* now handle the one we found (inside the range) */
4393 switch (kwe
->kwe_flags
) {
4395 case KWE_THREAD_INWAIT
:
4396 ut
= (uthread_t
)kwe
->kwe_uth
;
4398 /* skip canceled ones */
4399 if (kwe
->kwe_kwqqueue
!= ckwq
||
4400 (ut
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
)
4404 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
4405 kwe
->kwe_psynchretval
= PTH_RWL_MTX_WAIT
;
4406 kwe
->kwe_kwqqueue
= NULL
;
4408 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xcafecaf2, (uint32_t)(thread_tid((struct thread
*)(((struct uthread
*)(kwe
->kwe_uth
))->uu_context
.vc_thread
))), kwe
->kwe_psynchretval
, 0);
4409 #endif /* _PSYNCH_TRACE_ */
4410 kret
= ksyn_wakeup_thread(ckwq
, kwe
);
4412 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
4413 panic("ksyn_wakeupreaders: panic waking up readers\n");
4414 #endif /* __TESTPANICS__ */
4415 updatebits
+= PTHRW_INC
;
4418 case KWE_THREAD_BROADCAST
:
4419 case KWE_THREAD_PREPOST
:
4420 ksyn_queue_removeitem(ckwq
, kq
, kwe
);
4421 TAILQ_INSERT_TAIL(&kfreeq
.ksynq_kwelist
, kwe
, kwe_list
);
4422 ckwq
->kw_fakecount
--;
4426 panic("unknown kweflags\n");
4431 /* Need to enter a broadcast in the queue (if not already at L == S) */
4433 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) != (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
4435 newkwe
= TAILQ_FIRST(&kfreeq
.ksynq_kwelist
);
4436 if (newkwe
== NULL
) {
4437 ksyn_wqunlock(ckwq
);
4438 newkwe
= (ksyn_waitq_element_t
)zalloc(kwe_zone
);
4439 TAILQ_INSERT_TAIL(&kfreeq
.ksynq_kwelist
, newkwe
, kwe_list
);
4444 TAILQ_REMOVE(&kfreeq
.ksynq_kwelist
, newkwe
, kwe_list
);
4445 bzero(newkwe
, sizeof(struct ksyn_waitq_element
));
4446 newkwe
->kwe_kwqqueue
= ckwq
;
4447 newkwe
->kwe_flags
= KWE_THREAD_BROADCAST
;
4448 newkwe
->kwe_lockseq
= upto
;
4449 newkwe
->kwe_count
= 0;
4450 newkwe
->kwe_uth
= NULL
;
4451 newkwe
->kwe_psynchretval
= 0;
4454 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_NONE
, (uint32_t)ckwq
->kw_addr
, 0xfeedfeed, upto
, 0, 0);
4455 #endif /* _PSYNCH_TRACE_ */
4457 (void)ksyn_queue_insert(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], upto
, NULL
, newkwe
, SEQFIT
);
4458 ckwq
->kw_fakecount
++;
4461 /* free up any remaining things stumbled across above */
4462 kwe
= TAILQ_FIRST(&kfreeq
.ksynq_kwelist
);
4463 while (kwe
!= NULL
) {
4464 TAILQ_REMOVE(&kfreeq
.ksynq_kwelist
, kwe
, kwe_list
);
4465 zfree(kwe_zone
, kwe
);
4466 kwe
= TAILQ_FIRST(&kfreeq
.ksynq_kwelist
);
4469 if (updatep
!= NULL
)
4470 *updatep
= updatebits
;
4473 __PTHREAD_TRACE_DEBUG(_PSYNCH_TRACE_CVHBROAD
| DBG_FUNC_END
, 0xeeeeeeed, updatebits
, 0, 0, 0);
4474 #endif /* _PSYNCH_TRACE_ */
4478 ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq
, uint32_t *updatep
, ksyn_queue_t kfreeq
, int release
)
4480 uint32_t updatebits
= 0;
4482 if (updatep
!= NULL
)
4483 updatebits
= *updatep
;
4484 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) == (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
4485 updatebits
|= PTH_RWS_CV_CBIT
;
4486 if (ckwq
->kw_inqueue
!= 0) {
4487 /* FREE THE QUEUE */
4488 ksyn_queue_move_tofree(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], ckwq
->kw_lword
, kfreeq
, 0, release
);
4490 if (ckwq
->kw_inqueue
!= 0)
4491 panic("ksyn_cvupdate_fixup: L == S, but entries in queue beyond S");
4492 #endif /* __TESTPANICS__ */
4494 ckwq
->kw_lword
= ckwq
->kw_uword
= ckwq
->kw_sword
= 0;
4495 ckwq
->kw_kflags
|= KSYN_KWF_ZEROEDOUT
;
4496 } else if ((ckwq
->kw_inqueue
!= 0) && (ckwq
->kw_fakecount
== ckwq
->kw_inqueue
)) {
4497 /* only fake entries are present in the queue */
4498 updatebits
|= PTH_RWS_CV_PBIT
;
4500 if (updatep
!= NULL
)
4501 *updatep
= updatebits
;
4505 psynch_zoneinit(void)
4507 kwq_zone
= (zone_t
)zinit(sizeof(struct ksyn_wait_queue
), 8192 * sizeof(struct ksyn_wait_queue
), 4096, "ksyn_waitqueue zone");
4508 kwe_zone
= (zone_t
)zinit(sizeof(struct ksyn_waitq_element
), 8192 * sizeof(struct ksyn_waitq_element
), 4096, "ksyn_waitq_element zone");