]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/pthread_support.c
xnu-1504.9.26.tar.gz
[apple/xnu.git] / bsd / kern / pthread_support.c
CommitLineData
2d21ac55 1/*
b0d623f7 2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
2d21ac55
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995-2005 Apple Computer, Inc. All Rights Reserved */
29/*
30 * pthread_support.c
31 */
32
b0d623f7 33#if PSYNCH
2d21ac55
A
34
35#include <sys/param.h>
b0d623f7 36#include <sys/queue.h>
2d21ac55
A
37#include <sys/resourcevar.h>
38#include <sys/proc_internal.h>
39#include <sys/kauth.h>
40#include <sys/systm.h>
41#include <sys/timeb.h>
42#include <sys/times.h>
b0d623f7 43#include <sys/time.h>
2d21ac55 44#include <sys/acct.h>
2d21ac55
A
45#include <sys/kernel.h>
46#include <sys/wait.h>
47#include <sys/signalvar.h>
48#include <sys/syslog.h>
49#include <sys/stat.h>
50#include <sys/lock.h>
51#include <sys/kdebug.h>
2d21ac55 52#include <sys/sysproto.h>
b0d623f7 53#include <sys/pthread_internal.h>
2d21ac55 54#include <sys/vm.h>
b0d623f7
A
55#include <sys/user.h>
56
57#include <mach/mach_types.h>
58#include <mach/vm_prot.h>
59#include <mach/semaphore.h>
60#include <mach/sync_policy.h>
61#include <mach/task.h>
62#include <kern/kern_types.h>
63#include <kern/task.h>
64#include <kern/clock.h>
65#include <mach/kern_return.h>
2d21ac55 66#include <kern/thread.h>
b0d623f7
A
67#include <kern/sched_prim.h>
68#include <kern/thread_call.h>
69#include <kern/kalloc.h>
70#include <kern/sched_prim.h>
71#include <kern/processor.h>
72#include <kern/affinity.h>
73#include <kern/wait_queue.h>
74#include <mach/mach_vm.h>
75#include <mach/mach_param.h>
76#include <mach/thread_policy.h>
77#include <mach/message.h>
78#include <mach/port.h>
79#include <vm/vm_protos.h>
80#include <vm/vm_map.h>
81#include <mach/vm_region.h>
2d21ac55 82
b0d623f7 83#include <libkern/OSAtomic.h>
2d21ac55 84
b0d623f7
A
85#define _PSYNCH_TRACE_ 0 /* kdebug trace */
86#define __TESTPANICS__ 0 /* panics for error conditions */
87#define COND_MTX_WAITQUEUEMOVE 0 /* auto move from cvar wait queue to mutex waitqueue */
2d21ac55 88
b0d623f7
A
89#if _PSYNCH_TRACE_
90#define _PSYNCH_TRACE_MLWAIT 0x9000000
91#define _PSYNCH_TRACE_MLDROP 0x9000004
92#define _PSYNCH_TRACE_CVWAIT 0x9000008
93#define _PSYNCH_TRACE_CVSIGNAL 0x900000c
94#define _PSYNCH_TRACE_CVBROAD 0x9000010
95#define _PSYNCH_TRACE_KMDROP 0x9000014
96#define _PSYNCH_TRACE_RWRDLOCK 0x9000018
97#define _PSYNCH_TRACE_RWLRDLOCK 0x900001c
98#define _PSYNCH_TRACE_RWWRLOCK 0x9000020
99#define _PSYNCH_TRACE_RWYWRLOCK 0x9000024
100#define _PSYNCH_TRACE_RWUPGRADE 0x9000028
101#define _PSYNCH_TRACE_RWDOWNGRADE 0x900002c
102#define _PSYNCH_TRACE_RWUNLOCK 0x9000030
103#define _PSYNCH_TRACE_RWUNLOCK2 0x9000034
104#define _PSYNCH_TRACE_RWHANDLEU 0x9000038
105#define _PSYNCH_TRACE_FSEQTILL 0x9000040
106/* user side */
107#define _PSYNCH_TRACE_UM_LOCK 0x9000060
108#define _PSYNCH_TRACE_UM_UNLOCK 0x9000064
109#define _PSYNCH_TRACE_UM_MHOLD 0x9000068
110#define _PSYNCH_TRACE_UM_MDROP 0x900006c
111#define _PSYNCH_TRACE_UM_CVWAIT 0x9000070
112#define _PSYNCH_TRACE_UM_CVSIG 0x9000074
113#define _PSYNCH_TRACE_UM_CVBRD 0x9000078
2d21ac55 114
b0d623f7 115#endif /* _PSYNCH_TRACE_ */
2d21ac55 116
b0d623f7
A
117lck_mtx_t * pthread_list_mlock;
118
119#define PTHHASH(addr) (&pthashtbl[(addr) & pthhash])
120extern LIST_HEAD(pthhashhead, ksyn_wait_queue) *pth_glob_hashtbl;
121struct pthhashhead * pth_glob_hashtbl;
122u_long pthhash;
123
124LIST_HEAD(, ksyn_wait_queue) pth_free_list;
125
126static int PTH_HASHSIZE = 100;
127
128
129#define SEQFIT 0
130#define FIRSTFIT 1
131
132struct ksyn_queue {
133 TAILQ_HEAD(, uthread) ksynq_uthlist;
134 uint32_t ksynq_count; /* number of entries in queue */
135 uint32_t ksynq_firstnum; /* lowest seq in queue */
136 uint32_t ksynq_lastnum; /* highest seq in queue */
137};
138
139#define KSYN_QUEUE_READ 0
140#define KSYN_QUEUE_LREAD 1
141#define KSYN_QUEUE_WRITER 2
142#define KSYN_QUEUE_YWRITER 3
143#define KSYN_QUEUE_UPGRADE 4
144#define KSYN_QUEUE_MAX 5
145
146struct ksyn_wait_queue {
147 LIST_ENTRY(ksyn_wait_queue) kw_hash;
148 LIST_ENTRY(ksyn_wait_queue) kw_list;
149#if USE_WAITQUEUE
150 struct wait_queue kw_wq;
151#endif /* USE_WAITQUEUE */
152 user_addr_t kw_addr;
153 uint64_t kw_owner;
154 uint64_t kw_object; /* object backing in shared mode */
155 uint64_t kw_offset; /* offset inside the object in shared mode */
156 int kw_flags; /* mutex, cvar options/flags */
157 int kw_pflags; /* flags under listlock protection */
158 struct timeval kw_ts; /* timeval need for upkeep before free */
159 int kw_iocount; /* inuse reference */
160
161 int kw_type; /* queue type like mutex, cvar, etc */
162 uint32_t kw_inqueue; /* num of waiters held */
163 uint32_t kw_highseq; /* highest seq in the queue */
164 uint32_t kw_lowseq; /* lowest seq in the queue */
165 uint32_t kw_lastunlockseq; /* the last seq that unlocked */
166 uint32_t kw_pre_rwwc; /* prepost count */
167 uint32_t kw_pre_lockseq; /* prepost target seq */
168 uint32_t kw_pre_cvretval; /* retval for cwait on prepost */
169 uint32_t kw_pre_limrd; /* prepost read only(rwlock) */
170 uint32_t kw_pre_limrdseq; /* prepost limit seq for reads(rwlock) */
171 uint32_t kw_pre_limrdbits; /* seqbit needed for updates on prepost */
172 uint32_t kw_pre_intrcount; /* prepost of missed wakeup due to intrs */
173 uint32_t kw_pre_intrseq; /* prepost of missed wakeup limit seq */
174 uint32_t kw_pre_intrretbits; /* return bits value for missed wakeup threads */
175 uint32_t kw_pre_intrtype; /* type of failed wakueps*/
176
177 int kw_kflags;
178 TAILQ_HEAD(, uthread) kw_uthlist; /* List of uthreads */
179 struct ksyn_queue kw_ksynqueues[KSYN_QUEUE_MAX]; /* queues to hold threads */
180 lck_mtx_t kw_lock; /* mutex lock protecting this structure */
181 struct ksyn_wait_queue * kw_attq; /* attached queue (cvar->mutex, need in prepost */
182};
183
184typedef struct ksyn_queue * ksyn_queue_t;
185typedef struct ksyn_wait_queue * ksyn_wait_queue_t;
186
187#define PTHRW_EBIT 0x01
188#define PTHRW_LBIT 0x02
189#define PTHRW_YBIT 0x04
190#define PTHRW_WBIT 0x08
191#define PTHRW_UBIT 0x10
192#define PTHRW_RETRYBIT 0x20
193/* same as 0x20, shadow W bit for rwlock */
194#define PTHRW_SHADOW_W 0x20
195
196#define PTHRW_TRYLKBIT 0x40
197#define PTHRW_RW_HUNLOCK 0x40 /* returning read thread responsible to handle unlock */
198
199#define PTHRW_MTX_NONE 0x80
200#define PTHRW_RW_INIT 0x80 /* reset on the lock bits */
201/* same as 0x80, spurious rwlock unlock ret from kernel */
202#define PTHRW_RW_SPURIOUS 0x80
203
204#define PTHRW_INC 0x100
205
206#define PTHRW_BIT_MASK 0x000000ff;
207
208#define PTHRW_COUNT_SHIFT 8
209#define PTHRW_COUNT_MASK 0xffffff00
210#define PTHRW_MAX_READERS 0xffffff00
211
212/* first contended seq that kernel sees */
213#define KW_MTXFIRST_KSEQ 0x200
214#define KW_CVFIRST_KSEQ 1
215#define KW_RWFIRST_KSEQ 0x200
216
217#define is_rw_ewubit_set(x) ((x & (PTHRW_EBIT | PTHRW_WBIT | PTHRW_UBIT)) != 0)
218#define is_rw_lybit_set(x) ((x & (PTHRW_LBIT | PTHRW_YBIT)) != 0)
219#define is_rw_ebit_set(x) ((x & PTHRW_EBIT) != 0)
220#define is_rw_uebit_set(x) ((x & (PTHRW_EBIT | PTHRW_UBIT)) != 0)
221#define is_rw_ubit_set(x) ((x & PTHRW_UBIT) != 0)
222#define is_rw_either_ewyubit_set(x) ((x & (PTHRW_EBIT | PTHRW_WBIT | PTHRW_UBIT | PTHRW_YBIT)) != 0)
223
224
225/* is x lower than Y */
226#define is_seqlower(x, y) ((x < y) || ((x - y) > (PTHRW_MAX_READERS/2)))
227/* is x lower than or eq Y */
228#define is_seqlower_eq(x, y) ((x <= y) || ((x - y) > (PTHRW_MAX_READERS/2)))
229
230/* is x greater than Y */
231#define is_seqhigher(x, y) ((x > y) || ((y - x) > (PTHRW_MAX_READERS/2)))
232
233static inline int diff_genseq(uint32_t x, uint32_t y) {
234 if (x > y) {
235 return(x-y);
236 } else {
237 return((PTHRW_MAX_READERS - y) + x + PTHRW_INC);
2d21ac55 238 }
2d21ac55
A
239}
240
b0d623f7
A
241#define TID_ZERO (uint64_t)0
242
243/* bits needed in handling the rwlock unlock */
244#define PTH_RW_TYPE_READ 0x01
245#define PTH_RW_TYPE_LREAD 0x02
246#define PTH_RW_TYPE_WRITE 0x04
247#define PTH_RW_TYPE_YWRITE 0x08
248#define PTH_RW_TYPE_UPGRADE 0x10
249#define PTH_RW_TYPE_MASK 0xff
250#define PTH_RW_TYPE_SHIFT 8
251
252#define PTH_RWSHFT_TYPE_READ 0x0100
253#define PTH_RWSHFT_TYPE_LREAD 0x0200
254#define PTH_RWSHFT_TYPE_WRITE 0x0400
255#define PTH_RWSHFT_TYPE_YWRITE 0x0800
256#define PTH_RWSHFT_TYPE_MASK 0xff00
257
258/*
259 * Mutex protocol attributes
260 */
261#define PTHREAD_PRIO_NONE 0
262#define PTHREAD_PRIO_INHERIT 1
263#define PTHREAD_PRIO_PROTECT 2
264#define PTHREAD_PROTOCOL_FLAGS_MASK 0x3
265
266/*
267 * Mutex type attributes
268 */
269#define PTHREAD_MUTEX_NORMAL 0
270#define PTHREAD_MUTEX_ERRORCHECK 4
271#define PTHREAD_MUTEX_RECURSIVE 8
272#define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_NORMAL
273#define PTHREAD_TYPE_FLAGS_MASK 0xc
274
275/*
276 * Mutex pshared attributes
277 */
278#define PTHREAD_PROCESS_SHARED 0x10
279#define PTHREAD_PROCESS_PRIVATE 0x20
280#define PTHREAD_PSHARED_FLAGS_MASK 0x30
281
282/*
283 * Mutex policy attributes
284 */
285#define _PTHREAD_MUTEX_POLICY_NONE 0
286#define _PTHREAD_MUTEX_POLICY_FAIRSHARE 0x040 /* 1 */
287#define _PTHREAD_MUTEX_POLICY_FIRSTFIT 0x080 /* 2 */
288#define _PTHREAD_MUTEX_POLICY_REALTIME 0x0c0 /* 3 */
289#define _PTHREAD_MUTEX_POLICY_ADAPTIVE 0x100 /* 4 */
290#define _PTHREAD_MUTEX_POLICY_PRIPROTECT 0x140 /* 5 */
291#define _PTHREAD_MUTEX_POLICY_PRIINHERIT 0x180 /* 6 */
292#define PTHREAD_POLICY_FLAGS_MASK 0x1c0
293
294#define _PTHREAD_MTX_OPT_HOLDLOCK 0x200
295#define _PTHREAD_MTX_OPT_NOHOLDLOCK 0x400
296#define _PTHREAD_MTX_OPT_LASTDROP (_PTHREAD_MTX_OPT_HOLDLOCK | _PTHREAD_MTX_OPT_NOHOLDLOCK)
297
298#define KSYN_WQ_INLIST 1
299#define KSYN_WQ_INHASH 2
300#define KSYN_WQ_SHARED 4
301#define KSYN_WQ_FLIST 0X10 /* in free list to be freed after a short delay */
302
303#define KSYN_CLEANUP_DEADLINE 10
304int psynch_cleanupset;
305thread_call_t psynch_thcall;
2d21ac55 306
b0d623f7
A
307#define KSYN_WQTYPE_INWAIT 0x1000
308#define KSYN_WQTYPE_MTX 0x1
309#define KSYN_WQTYPE_CVAR 0x2
310#define KSYN_WQTYPE_RWLOCK 0x4
311#define KSYN_WQTYPE_SEMA 0x8
312#define KSYN_WQTYPE_BARR 0x10
313#define KSYN_WQTYPE_MASK 0xffff
314
315#define KSYN_MTX_MAX 0x0fffffff
316
317#define KW_UNLOCK_PREPOST 0x01
318#define KW_UNLOCK_PREPOST_UPGRADE 0x02
319#define KW_UNLOCK_PREPOST_DOWNGRADE 0x04
320#define KW_UNLOCK_PREPOST_READLOCK 0x08
321#define KW_UNLOCK_PREPOST_LREADLOCK 0x10
322#define KW_UNLOCK_PREPOST_WRLOCK 0x20
323#define KW_UNLOCK_PREPOST_YWRLOCK 0x40
324
325#define CLEAR_PREPOST_BITS(kwq) {\
326 kwq->kw_pre_lockseq = 0; \
327 kwq->kw_pre_rwwc = 0; \
328 kwq->kw_pre_cvretval = 0; \
329 }
330
331#define CLEAR_READ_PREPOST_BITS(kwq) {\
332 kwq->kw_pre_limrd = 0; \
333 kwq->kw_pre_limrdseq = 0; \
334 kwq->kw_pre_limrdbits = 0; \
335 }
336
337#define CLEAR_INTR_PREPOST_BITS(kwq) {\
338 kwq->kw_pre_intrcount = 0; \
339 kwq->kw_pre_intrseq = 0; \
340 kwq->kw_pre_intrretbits = 0; \
341 kwq->kw_pre_intrtype = 0; \
342 }
343
344void pthread_list_lock(void);
345void pthread_list_unlock(void);
346void pthread_list_lock_spin(void);
347void pthread_list_lock_convert_spin(void);
348void ksyn_wqlock(ksyn_wait_queue_t kwq);
349void ksyn_wqunlock(ksyn_wait_queue_t kwq);
350ksyn_wait_queue_t ksyn_wq_hash_lookup(user_addr_t mutex, proc_t p, int flags, uint64_t object, uint64_t offset);
351int ksyn_wqfind(user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, uint64_t tid, int flags, int wqtype , ksyn_wait_queue_t * wq);
352void ksyn_wqrelease(ksyn_wait_queue_t mkwq, ksyn_wait_queue_t ckwq);
353int ksyn_block_thread_locked(ksyn_wait_queue_t kwq, uint64_t abstime, uthread_t uth);
354kern_return_t ksyn_wakeup_thread(ksyn_wait_queue_t kwq, uthread_t uth);
355void ksyn_move_wqthread(ksyn_wait_queue_t ckwq, ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t updateval, int diffgen, int nomutex);
356extern thread_t port_name_to_thread(mach_port_name_t port_name);
357extern int ksyn_findobj(uint64_t mutex, uint64_t * object, uint64_t * offset);
358static void UPDATE_KWQ(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, uint64_t tid, int wqtype, int retry);
359void psynch_mutexdrop_internal(ksyn_wait_queue_t kwq, uint32_t lkseq, uint32_t ugen, int flags);
360
361#if USE_WAITQUEUE
362kern_return_t wait_queue_move_all(wait_queue_t from, event64_t eventfrom, wait_queue_t to, event64_t eventto);
363kern_return_t wait_queue_move_thread(wait_queue_t from, event64_t eventfrom, thread_t th, wait_queue_t to, event64_t eventto, thread_t * mthp);
364#endif /* USE_WAITQUEUE */
365int kwq_handle_unlock(ksyn_wait_queue_t, uint32_t mgen, uint32_t * updatep, int flags, int *blockp, uint32_t premgen);
366void ksyn_queue_init(ksyn_queue_t kq);
367int ksyn_queue_insert(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t mgen, struct uthread * uth, int firstfit);
368struct uthread * ksyn_queue_removefirst(ksyn_queue_t kq, ksyn_wait_queue_t kwq);
369void ksyn_queue_removeitem(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uthread_t uth);
370void update_low_high(ksyn_wait_queue_t kwq, uint32_t lockseq);
371uint32_t find_nextlowseq(ksyn_wait_queue_t kwq);
372uint32_t find_nexthighseq(ksyn_wait_queue_t kwq);
373int find_seq_till(ksyn_wait_queue_t kwq, uint32_t upto, uint32_t nwaiters, uint32_t *countp);
374int find_diff(uint32_t upto, uint32_t lowest);
375uint32_t ksyn_queue_count_tolowest(ksyn_queue_t kq, uint32_t upto);
376int ksyn_wakeupreaders(ksyn_wait_queue_t kwq, uint32_t limitread, int longreadset, int allreaders, uint32_t updatebits, int * wokenp);
377int kwq_find_rw_lowest(ksyn_wait_queue_t kwq, int flags, uint32_t premgen, int * type, uint32_t lowest[]);
378uthread_t ksyn_queue_find_seq(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t seq);
379int kwq_handle_downgrade(ksyn_wait_queue_t kwq, uint32_t mgen, int flags, uint32_t premgen, int * blockp);
380
381
382static void
383UPDATE_KWQ(__unused ksyn_wait_queue_t kwq, __unused uint32_t mgen, __unused uint32_t ugen, __unused uint32_t rw_wc, __unused uint64_t tid, __unused int wqtype, __unused int retry)
2d21ac55 384{
b0d623f7 385}
2d21ac55 386
b0d623f7
A
387/* to protect the hashes, iocounts, freelist */
388void
389pthread_list_lock(void)
390{
391 lck_mtx_lock(pthread_list_mlock);
2d21ac55
A
392}
393
b0d623f7
A
394void
395pthread_list_lock_spin(void)
396{
397 lck_mtx_lock_spin(pthread_list_mlock);
398}
2d21ac55
A
399
400void
b0d623f7 401pthread_list_lock_convert_spin(void)
2d21ac55 402{
b0d623f7
A
403 lck_mtx_convert_spin(pthread_list_mlock);
404}
405
406
407void
408pthread_list_unlock(void)
409{
410 lck_mtx_unlock(pthread_list_mlock);
411}
412
413/* to protect the indiv queue */
414void
415ksyn_wqlock(ksyn_wait_queue_t kwq)
416{
417
418 lck_mtx_lock(&kwq->kw_lock);
419}
420
421void
422ksyn_wqunlock(ksyn_wait_queue_t kwq)
423{
424 lck_mtx_unlock(&kwq->kw_lock);
425}
426
427
428/* routine to drop the mutex unlocks , used both for mutexunlock system call and drop during cond wait */
429void
430psynch_mutexdrop_internal(ksyn_wait_queue_t kwq, uint32_t lkseq, uint32_t ugen, int flags)
431{
432 uint32_t nextgen, low_writer, updatebits;
433 int firstfit = flags & _PTHREAD_MUTEX_POLICY_FIRSTFIT;
434 uthread_t uth;
435 kern_return_t kret = KERN_SUCCESS;
436
437
438 nextgen = (ugen + PTHRW_INC);
439
440#if _PSYNCH_TRACE_
441 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP | DBG_FUNC_START, kwq, lkseq, ugen, flags, 0);
442#endif /* _PSYNCH_TRACE_ */
443
444 ksyn_wqlock(kwq);
445
446redrive:
447
448#if _PSYNCH_TRACE_
449 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, kwq, 1, kwq->kw_inqueue, nextgen, 0);
450#endif /* _PSYNCH_TRACE_ */
451 if (kwq->kw_inqueue != 0) {
452 updatebits = (kwq->kw_highseq & PTHRW_COUNT_MASK) | PTHRW_EBIT;
453 kwq->kw_lastunlockseq = ugen;
454 if (firstfit != 0)
455 {
456#if __TESTPANICS__
457 panic("psynch_mutexdrop_internal: first fit mutex arrives, not enabled yet \n");
458#endif /* __TESTPANICS__ */
459 /* first fit , pick any one */
460 uth = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwq);
461
462 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0)
463 updatebits |= PTHRW_WBIT;
464#if _PSYNCH_TRACE_
465 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, kwq, 2, uth, updatebits, 0);
466#endif /* _PSYNCH_TRACE_ */
467
468 uth->uu_psynchretval = updatebits;
469 uth->uu_kwqqueue = NULL;
470
471 kret = ksyn_wakeup_thread(kwq, uth);
472 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
473 panic("psynch_mutexdrop_internal: panic unable to wakeup firstfit mutex thread\n");
474 if (kret == KERN_NOT_WAITING)
475 goto redrive;
476 } else {
477 /* handle fairshare */
478 low_writer = kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_firstnum;
479 low_writer &= PTHRW_COUNT_MASK;
480
481 if (low_writer == nextgen) {
482#if _PSYNCH_TRACE_
483 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, kwq, 3, low_writer, nextgen, 0);
484#endif /* _PSYNCH_TRACE_ */
485 /* next seq to be granted found */
486 uth = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwq);
487 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0)
488 updatebits |= PTHRW_WBIT;
489
490 uth->uu_psynchretval = updatebits;
491 uth->uu_kwqqueue = NULL;
492
493 kret = ksyn_wakeup_thread(kwq, uth);
494 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
495 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
496 if (kret == KERN_NOT_WAITING)
497 goto redrive;
498
499 } else if (is_seqhigher(low_writer, nextgen) != 0) {
500#if _PSYNCH_TRACE_
501 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, kwq, 4, low_writer, nextgen, 0);
502#endif /* _PSYNCH_TRACE_ */
503 kwq->kw_pre_rwwc++;
504 kwq->kw_pre_lockseq = (nextgen & PTHRW_COUNT_MASK);
505 } else {
506#if __TESTPANICS__
507 panic("psynch_mutexdrop_internal: FS mutex unlock sequence higher than the lowest one is queue\n");
508#endif /* __TESTPANICS__ */
509#if _PSYNCH_TRACE_
510 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, kwq, 5, low_writer, nextgen, 0);
511#endif /* _PSYNCH_TRACE_ */
512 uth = ksyn_queue_find_seq(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], nextgen);
513 if (uth != NULL) {
514 /* next seq to be granted found */
515
516 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0)
517 updatebits |= PTHRW_WBIT;
518
519#if _PSYNCH_TRACE_
520 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, kwq, 6, updatebits, 0, 0);
521#endif /* _PSYNCH_TRACE_ */
522 uth->uu_psynchretval = updatebits;
523 uth->uu_kwqqueue = NULL;
524
525 kret = ksyn_wakeup_thread(kwq, uth);
526 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
527 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
528 if (kret == KERN_NOT_WAITING)
529 goto redrive;
530 } else {
531 /* next seq to be granted not found, prepost */
532#if _PSYNCH_TRACE_
533 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, kwq, 7, 0, 0, 0);
534#endif /* _PSYNCH_TRACE_ */
535 kwq->kw_pre_rwwc++;
536 kwq->kw_pre_lockseq = (nextgen & PTHRW_COUNT_MASK);
537 }
538 }
539 }
540 } else {
541#if _PSYNCH_TRACE_
542 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, kwq, 8, 0, 0, 0);
543#endif /* _PSYNCH_TRACE_ */
544 /* if firstfit the last one could be spurious */
545 if ((firstfit == 0) || ((lkseq & PTHRW_COUNT_MASK) != nextgen)) {
546#if _PSYNCH_TRACE_
547 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP | DBG_FUNC_NONE, kwq, 9, 0, 0, 0);
548#endif /* _PSYNCH_TRACE_ */
549 kwq->kw_lastunlockseq = ugen;
550 kwq->kw_pre_rwwc++;
551 kwq->kw_pre_lockseq = (nextgen & PTHRW_COUNT_MASK);
552 }
2d21ac55 553 }
b0d623f7
A
554
555 ksyn_wqunlock(kwq);
556
557#if _PSYNCH_TRACE_
558 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP | DBG_FUNC_END, kwq, 0, 0, 0, 0);
559#endif /* _PSYNCH_TRACE_ */
560 ksyn_wqrelease(kwq, NULL);
561 return;
2d21ac55
A
562}
563
b0d623f7
A
564/*
565 * psynch_mutexwait: This system call is used for contended psynch mutexes to block.
566 */
2d21ac55 567
b0d623f7
A
568int
569psynch_mutexwait(__unused proc_t p, struct psynch_mutexwait_args * uap, uint32_t * retval)
2d21ac55 570{
b0d623f7
A
571 user_addr_t mutex = uap->mutex;
572 uint32_t mgen = uap->mgen;
573 uint32_t ugen = uap->ugen;
574 uint64_t tid = uap->tid;
575 int flags = uap->flags;
576 ksyn_wait_queue_t kwq;
577 int error=0;
578 int ins_flags;
579 uthread_t uth;
580 int firstfit = flags & _PTHREAD_MUTEX_POLICY_FIRSTFIT;
581 uint32_t lockseq, updatebits;
582
583
584#if _PSYNCH_TRACE_
585 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_START, (uint32_t)mutex, mgen, ugen, flags, 0);
586#endif /* _PSYNCH_TRACE_ */
587
588 uth = current_uthread();
589
590 uth->uu_lockseq = uap->mgen;
591 lockseq = (uap->mgen & PTHRW_COUNT_MASK);
592
593 if (firstfit == 0) {
594 ins_flags = SEQFIT;
595 } else {
596 /* first fit */
597 ins_flags = FIRSTFIT;
598 }
599
600 error = ksyn_wqfind(mutex, mgen, ugen, 0, tid, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_MTX), &kwq);
601 if (error != 0) {
602#if _PSYNCH_TRACE_
603 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_END, (uint32_t)mutex, 1, 0, error, 0);
604#endif /* _PSYNCH_TRACE_ */
605 return(error);
606 }
607
608 ksyn_wqlock(kwq);
609
610
611 if ((kwq->kw_pre_rwwc != 0) && ((ins_flags == FIRSTFIT) || (lockseq == kwq->kw_pre_lockseq ))) {
612 /* got preposted lock */
613 kwq->kw_pre_rwwc--;
614 if (kwq->kw_pre_rwwc == 0) {
615 CLEAR_PREPOST_BITS(kwq);
616 kwq->kw_lastunlockseq = 0;
617 } else {
618 panic("psynch_mutexwait: more than one prepost %d\n", (kwq->kw_pre_rwwc + 1));
619 kwq->kw_pre_lockseq += PTHRW_INC; /* look for next one */
620 }
621 if (kwq->kw_inqueue == 0) {
622 updatebits = lockseq | PTHRW_EBIT;
623 } else {
624 updatebits = (kwq->kw_highseq & PTHRW_COUNT_MASK) | (PTHRW_EBIT | PTHRW_WBIT);
625 }
626
627 uth->uu_psynchretval = updatebits;
628#if __TESTPANICS__
629 if ((updatebits & PTHRW_COUNT_MASK) == 0)
630 panic("psynch_mutexwait: (prepost)returning 0 lseq in mutexwait with EBIT \n");
631#endif /* __TESTPANICS__ */
632 ksyn_wqunlock(kwq);
633 *retval = updatebits;
634 goto out;
635 }
636
637 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], mgen, uth, ins_flags);
638 if (error != 0)
639 panic("psynch_mutexwait: failed to enqueue\n");
640
641 error = ksyn_block_thread_locked(kwq, (uint64_t)0, uth);
642 /* drops the wq lock */
643
644 if (error != 0) {
645 ksyn_wqlock(kwq);
646#if _PSYNCH_TRACE_
647 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_NONE, (uint32_t)mutex, 2, 0, error, 0);
648#endif /* _PSYNCH_TRACE_ */
649 if (uth->uu_kwqqueue != NULL)
650 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], uth);
651 ksyn_wqunlock(kwq);
652 } else {
653 updatebits = uth->uu_psynchretval;
654 *retval = updatebits;
655#if __TESTPANICS__
656 if ((updatebits & PTHRW_COUNT_MASK) == 0)
657 panic("psynch_mutexwait: returning 0 lseq in mutexwait with EBIT \n");
658#endif /* __TESTPANICS__ */
659 }
660out:
661 ksyn_wqrelease(kwq, NULL);
662#if _PSYNCH_TRACE_
663 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLWAIT | DBG_FUNC_END, (uint32_t)mutex, 0, 0, error, 0);
664#endif /* _PSYNCH_TRACE_ */
665
666 return(error);
2d21ac55
A
667}
668
b0d623f7
A
669/*
670 * psynch_mutexdrop: This system call is used for unlock postings on contended psynch mutexes.
671 */
672int
673psynch_mutexdrop(__unused proc_t p, struct psynch_mutexdrop_args * uap, __unused uint32_t * retval)
674{
675 user_addr_t mutex = uap->mutex;
676 uint32_t mgen = uap->mgen;
677 uint32_t lkseq = mgen & PTHRW_COUNT_MASK;
678 uint32_t ugen = uap->ugen;
679 uint64_t tid = uap->tid;
680 int flags = uap->flags;
681 ksyn_wait_queue_t kwq;
682 int error=0;
683
684#if _PSYNCH_TRACE_
685 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLDROP | DBG_FUNC_START, (uint32_t)mutex, mgen, ugen, flags, 0);
686#endif /* _PSYNCH_TRACE_ */
687 error = ksyn_wqfind(mutex, mgen, ugen, 0, tid, flags, KSYN_WQTYPE_MTX, &kwq);
688 if (error != 0) {
689#if _PSYNCH_TRACE_
690 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLDROP | DBG_FUNC_END, (uint32_t)mutex, 1, 0, error, 0);
691#endif /* _PSYNCH_TRACE_ */
692 return(error);
693 }
694 psynch_mutexdrop_internal(kwq, lkseq, ugen, flags);
695 /* drops the kwq reference */
696#if _PSYNCH_TRACE_
697 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLDROP | DBG_FUNC_END, (uint32_t)mutex, 0, 0, error, 0);
698#endif /* _PSYNCH_TRACE_ */
699 return(0);
700
701}
2d21ac55 702
b0d623f7
A
703/*
704 * psynch_cvbroad: This system call is used for broadcast posting on blocked waiters of psynch cvars.
705 */
706int
707psynch_cvbroad(__unused proc_t p, struct psynch_cvbroad_args * uap, int * retval)
2d21ac55 708{
b0d623f7
A
709 user_addr_t cond = uap->cv;
710 uint32_t cgen = uap->cvgen;
711 uint32_t diffgen = uap->diffgen;
712 uint32_t mgen = uap->mgen;
713 int flags = uap->flags;
714 ksyn_wait_queue_t kwq, ckwq;
715 int error=0;
716#if COND_MTX_WAITQUEUEMOVE
717 int mutexowned = flags & _PTHREAD_MTX_OPT_HOLDLOCK;
718 int nomutex = flags & _PTHREAD_MTX_OPT_NOHOLDLOCK;
719 user_addr_t mutex = uap->mutex;
720 uint32_t ugen = uap->ugen;
721 uint64_t tid = uap->tid;
722 uthread_t uth;
723 kern_return_t kret = KERN_SUCCESS;
724#else /* COND_MTX_WAITQUEUEMOVE */
725 int nomutex = _PTHREAD_MTX_OPT_NOHOLDLOCK;
726#endif /* COND_MTX_WAITQUEUEMOVE */
727 uint32_t nextgen, ngen;
728 int updatebits = 0;
2d21ac55 729
b0d623f7
A
730#if _PSYNCH_TRACE_
731 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_START, (uint32_t)cond, (uint32_t) 0, cgen, mgen, 0);
732#endif /* _PSYNCH_TRACE_ */
733 error = ksyn_wqfind(cond, cgen, cgen, 0, 0, flags, KSYN_WQTYPE_CVAR, &ckwq);
734 if (error != 0) {
735#if _PSYNCH_TRACE_
736 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_END, (uint32_t)cond, 1, 0, error, 0);
737#endif /* _PSYNCH_TRACE_ */
738 return(error);
739 }
2d21ac55 740
b0d623f7
A
741#if COND_MTX_WAITQUEUEMOVE
742 ngen = mgen + (PTHRW_INC * diffgen);
743 if (nomutex ==0) {
744 error = ksyn_wqfind(mutex, ngen, ugen, 0, tid, flags, KSYN_WQTYPE_MTX, &kwq);
745 if (error != 0) {
746 kwq = NULL;
747 goto out;
748 }
749 }
750#else /* COND_MTX_WAITQUEUEMOVE */
751 nomutex = _PTHREAD_MTX_OPT_NOHOLDLOCK;
752 kwq= NULL;
753 ngen = 0;
754#endif /* COND_MTX_WAITQUEUEMOVE */
755
756
757 ksyn_wqlock(ckwq);
758#if COND_MTX_WAITQUEUEMOVE
759redrive:
760#endif /* COND_MTX_WAITQUEUEMOVE */
761 if (diffgen > ckwq->kw_inqueue) {
762 ckwq->kw_pre_rwwc = diffgen - ckwq->kw_inqueue;
763 ckwq->kw_pre_lockseq = cgen & PTHRW_BIT_MASK;
764 updatebits = ckwq->kw_pre_rwwc; /* unused mutex refs */
765 nextgen = (mgen + (ckwq->kw_pre_rwwc * PTHRW_INC));
766 } else {
767 updatebits = 0;
768 nextgen = mgen + PTHRW_INC;
769 }
770
771 if (ckwq->kw_inqueue != 0) {
772#if COND_MTX_WAITQUEUEMOVE
773 if (mutexowned != 0) {
774#if _PSYNCH_TRACE_
775 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_NONE, (uint32_t)cond, 0, 1, ckwq->kw_inqueue, 0);
776#endif /* _PSYNCH_TRACE_ */
777 uth = ksyn_queue_removefirst(&ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER],ckwq);
778 uth->uu_psynchretval = ngen;
779 uth->uu_kwqqueue = NULL;
780
781 kret = ksyn_wakeup_thread(ckwq, uth);
782 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
783 panic("cvbraoad: failed to remove\n");
784 if (kret == KERN_NOT_WAITING) {
785 /*
786 * trying to wake one thread to return, so if
787 * failed to wakeup get the next one..
788 */
789 goto redrive;
790 }
791 nextgen = nextgen + PTHRW_INC;
792 diffgen -= 1;
793 }
794#else /* COND_MTX_WAITQUEUEMOVE */
795 updatebits = 0;
796#endif /* COND_MTX_WAITQUEUEMOVE */
797
798 /* nomutex case or in mutexowned case after the first one */
799 /* move them all to the mutex waitqueue */
800 if ((ckwq->kw_inqueue != 0) && (diffgen > 0)) {
801 /* atleast one more posting needed and there are waiting threads */
802 /* drops the ckwq lock */
803#if _PSYNCH_TRACE_
804 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_NONE, (uint32_t)cond, 0, 2, diffgen, 0);
805#endif /* _PSYNCH_TRACE_ */
806 /* move threads from ckwq to kwq if COND_MTX_WAITQUEUEMOVE, else wakeup */
807 ksyn_move_wqthread(ckwq, kwq, nextgen, ngen, diffgen, nomutex);
808 } else
809 ksyn_wqunlock(ckwq);
810 } else {
811 /* no need for prepost as it is covered before */
812 ksyn_wqunlock(ckwq);
813 }
814
815 if (error == 0) {
816 *retval = updatebits;
2d21ac55 817 }
b0d623f7
A
818
819#if COND_MTX_WAITQUEUEMOVE
820out:
821#endif /* COND_MTX_WAITQUEUEMOVE */
822 ksyn_wqrelease(ckwq, kwq);
823#if _PSYNCH_TRACE_
824 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVBROAD | DBG_FUNC_END, (uint32_t)cond, 1, 0, error, 0);
825#endif /* _PSYNCH_TRACE_ */
826
827 return(error);
2d21ac55
A
828}
829
b0d623f7
A
830/*
831 * psynch_cvsignal: This system call is used for signalling the blocked waiters of psynch cvars.
832 */
833int
834psynch_cvsignal(__unused proc_t p, struct psynch_cvsignal_args * uap, int * retval)
835{
836 user_addr_t cond = uap->cv;
837 uint32_t cgen = uap->cvgen;
838 uint32_t cugen = uap->cvugen;
839 uint32_t mgen = uap->mgen;
840 int threadport = uap->thread_port;
841 int flags = uap->flags;
842 ksyn_wait_queue_t kwq, ckwq;
843 int error=0, kret;
844 uthread_t uth;
845#if USE_WAITQUEUE
846 thread_t th = THREAD_NULL, mth;
847#else /* USE_WAITQUEUE */
848 thread_t th = THREAD_NULL;
849#endif /* USE_WAITQUEUE */
850#if COND_MTX_WAITQUEUEMOVE
851 user_addr_t mutex = uap->mutex;
852 uint32_t ugen = uap->ugen;
853 int mutexowned = flags & _PTHREAD_MTX_OPT_HOLDLOCK;
854 int nomutex = flags & _PTHREAD_MTX_OPT_NOHOLDLOCK;
855#else /* COND_MTX_WAITQUEUEMOVE */
856 int nomutex = _PTHREAD_MTX_OPT_NOHOLDLOCK;
857#endif /* COND_MTX_WAITQUEUEMOVE */
858 uint32_t retbits, ngen, lockseq;
859
860
861 if (nomutex != 0)
862 retbits = 0;
863 else
864 retbits = 1;
865#if _PSYNCH_TRACE_
866 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_START, (uint32_t)cond, (uint32_t) 0, cgen, mgen, 0);
867 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_NONE, (uint32_t)cond, (uint32_t)cugen , flags, mgen, 0);
868#endif /* _PSYNCH_TRACE_ */
869
870 error = ksyn_wqfind(cond, cgen, cugen, 0, 0, flags, KSYN_WQTYPE_CVAR, &ckwq);
871 if (error != 0) {
872 *retval = retbits;
873#if _PSYNCH_TRACE_
874 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_END, (uint32_t)cond, 1, 0, error, 0);
875#endif /* _PSYNCH_TRACE_ */
876 return(error);
877 }
878
879
880 if ((flags & _PTHREAD_MTX_OPT_LASTDROP) == _PTHREAD_MTX_OPT_LASTDROP) {
881
882 ksyn_wqlock(ckwq);
883 lockseq = cgen & PTHRW_COUNT_MASK;
884 /* do we need to check for lockseq as this is from last waiter, may be race ? */
885 if ((ckwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, ckwq->kw_pre_lockseq) != 0)) {
886 ckwq->kw_pre_rwwc--;
887 if (ckwq->kw_pre_rwwc == 0)
888 CLEAR_PREPOST_BITS(ckwq);
889 }
890 ksyn_wqunlock(ckwq);
891 /* no mutex or thread is associated with this, just notificaion */
892 th = THREAD_NULL;
893 error = 0;
894 goto out;
895 }
896
897 ngen = mgen + PTHRW_INC;
898
899#if COND_MTX_WAITQUEUEMOVE
900 if (nomutex == 0) {
901 /* mutex was not operated on, ignore it */
902 error = ksyn_wqfind(mutex, ngen, ugen, 0, 0, flags, KSYN_WQTYPE_MTX, &kwq);
903 if (error != 0) {
904 *retval = retbits;
905 kwq = NULL;
906 goto out;
907 }
908 } else {
909#endif /* COND_MTX_WAITQUEUEMOVE */
910 kwq = NULL;
911#if COND_MTX_WAITQUEUEMOVE
912 }
913#endif /* COND_MTX_WAITQUEUEMOVE */
914
915
916 if (threadport != 0) {
917 th = (thread_t)port_name_to_thread((mach_port_name_t)threadport);
918 if (th == THREAD_NULL) {
919 *retval = retbits;
920 error = ESRCH;
921 goto out;
922 }
923 }
924
925 ksyn_wqlock(ckwq);
926redrive:
927 if (ckwq->kw_inqueue != 0) {
928 *retval = 0;
929#if COND_MTX_WAITQUEUEMOVE
930 if ((mutexowned != 0) || (nomutex != 0)) {
931#if _PSYNCH_TRACE_
932 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_NONE, (uint32_t)cond, 0, 1, ckwq->kw_inqueue, 0);
933#endif /* _PSYNCH_TRACE_ */
934 if (th != THREAD_NULL) {
935 uth = get_bsdthread_info(th);
936 if (nomutex != 0)
937 ngen |= PTHRW_MTX_NONE;
938 uth->uu_psynchretval = ngen;
939 uth->uu_kwqqueue = NULL;
940 ksyn_queue_removeitem(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], uth);
941 kret = ksyn_wakeup_thread(ckwq, uth);
942 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
943 panic("psynch_cvsignal: panic waking in cvsignal\n");
944 if (kret == KERN_NOT_WAITING) {
945 if (threadport != 0) {
946 error = 0;
947 } else
948 goto redrive;
949 }
950 } else {
951 uth = ksyn_queue_removefirst(&ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER],ckwq);
952 if (nomutex != 0)
953 ngen |= PTHRW_MTX_NONE;
954 uth->uu_psynchretval = ngen;
955 uth->uu_kwqqueue = NULL;
956 kret = ksyn_wakeup_thread(ckwq, uth);
957 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
958 panic("psynch_cvsignal: panic waking in cvsignal\n");
959 if (kret == KERN_NOT_WAITING) {
960 if (threadport != 0) {
961 error = 0;
962 } else
963 goto redrive;
964 }
965 }
966 ksyn_wqunlock(ckwq);
967 } else {
968#endif /* COND_MTX_WAITQUEUEMOVE */
969 /* need to move a thread to another queue */
970#if _PSYNCH_TRACE_
971 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_NONE, (uint32_t)cond, 0, 2, ckwq->kw_inqueue, 0);
972#endif /* _PSYNCH_TRACE_ */
973 if (th != THREAD_NULL) {
974 uth = get_bsdthread_info(th);
975 /* if given thread not blocked in cvwait , return error */
976 if (uth->uu_kwqqueue != ckwq) {
977 error = EINVAL;
978 ksyn_wqunlock(ckwq);
979 goto out;
980 }
981 ksyn_queue_removeitem(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], uth);
982 } else {
983 uth = ksyn_queue_removefirst(&ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER],ckwq);
984 if (uth == NULL)
985 panic("cvsign: null uthread after rem");
986 }
987#if COND_MTX_WAITQUEUEMOVE
988 ksyn_wqunlock(ckwq);
989#else /* COND_MTX_WAITQUEUEMOVE */
990 uth->uu_psynchretval = 0;
991 uth->uu_kwqqueue = NULL;
992 kret = ksyn_wakeup_thread(ckwq, uth);
993 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
994 panic("psynch_cvsignal: panic waking in cvsignal\n");
995 if (kret == KERN_NOT_WAITING) {
996 error = 0;
997 if (threadport == 0)
998 goto redrive;
999 }
1000
1001 ksyn_wqunlock(ckwq);
1002 error = 0;
1003#endif /* COND_MTX_WAITQUEUEMOVE */
1004
1005#if COND_MTX_WAITQUEUEMOVE
1006 ksyn_wqlock(kwq);
1007 ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], ngen, uth, SEQFIT);
1008#if USE_WAITQUEUE
1009 kret = wait_queue_move_thread(&ckwq->kw_wq, ckwq->kw_addr, th, &kwq->kw_wq, kwq->kw_addr, &mth);
1010 if (kret == KERN_SUCCESS) {
1011 if (mth != THREAD_NULL) {
1012 uth = (struct uthread *)get_bsdthread_info(mth);
1013 uth->uu_lockseq = ngen;
1014 TAILQ_INSERT_TAIL(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_uthlist, uth, uu_mtxlist);
1015 }
1016 }
1017#else /* USE_WAITQUEUE */
1018 /* no need to move anything, just update the sequence */
1019 uth->uu_lockseq = ngen;
1020
1021#endif /* USE_WAITQUEUE */
1022 ksyn_wqunlock(kwq);
1023 }
1024#endif /* COND_MTX_WAITQUEUEMOVE */
1025 } else {
1026 /* prepost */
1027#if _PSYNCH_TRACE_
1028 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_NONE, (uint32_t)cond, 0, 3, ckwq->kw_inqueue, 0);
1029#endif /* _PSYNCH_TRACE_ */
1030 if (threadport != 0) {
1031 error = EINVAL;
1032 ksyn_wqunlock(ckwq);
1033 goto out;
1034 }
1035
1036 ckwq->kw_pre_rwwc++;
1037 ckwq->kw_attq = kwq;
1038 ckwq->kw_pre_lockseq = cgen & PTHRW_BIT_MASK;
1039 ckwq->kw_pre_cvretval = ngen;
1040 *retval = retbits;
1041 ksyn_wqunlock(ckwq);
1042 }
1043 /* ckwq is unlocked here */
1044
1045out:
1046 ksyn_wqrelease(ckwq, kwq);
1047 if (th != THREAD_NULL)
1048 thread_deallocate(th);
1049#if _PSYNCH_TRACE_
1050 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL | DBG_FUNC_END, (uint32_t)cond, 0, 0, error, 0);
1051#endif /* _PSYNCH_TRACE_ */
1052
1053 return(error);
1054}
2d21ac55 1055
b0d623f7
A
1056/*
1057 * psynch_cvwait: This system call is used for psynch cvar waiters to block in kernel.
1058 */
1059int
1060psynch_cvwait(__unused proc_t p, struct psynch_cvwait_args * uap, uint32_t * retval)
2d21ac55 1061{
b0d623f7
A
1062 user_addr_t cond = uap->cv;
1063 uint32_t cgen = uap->cvgen;
1064 uint32_t cugen = uap->cvugen;
1065 user_addr_t mutex = uap->mutex;
1066 uint32_t mgen =0, ugen;
1067 int flags = 0;
1068 ksyn_wait_queue_t kwq, ckwq;
1069 int error=0;
1070 uint64_t abstime = 0;
1071 uint32_t lockseq, updatebits;
1072 struct timespec ts;
1073 uthread_t uth;
2d21ac55 1074
b0d623f7
A
1075 /* for conformance reasons */
1076 __pthread_testcancel(0);
1077
1078#if _PSYNCH_TRACE_
1079 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_START, (uint32_t)cond, (uint32_t) mutex, cgen, mgen, 0);
1080#endif /* _PSYNCH_TRACE_ */
1081 flags = 0;
1082 if ((uap->usec & 0xc0000000) != 0) {
1083 if (uap->usec & 0x40000000)
1084 flags |= PTHREAD_PROCESS_SHARED;
1085 if (uap->usec & 0x80000000)
1086 flags |= _PTHREAD_MUTEX_POLICY_FIRSTFIT;
1087 }
1088
1089 error = ksyn_wqfind(cond, cgen, cugen, 0, 0, flags, KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INWAIT, &ckwq);
1090 if (error != 0) {
1091#if _PSYNCH_TRACE_
1092 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_END, (uint32_t)cond, 1, 0, error, 0);
1093#endif /* _PSYNCH_TRACE_ */
1094 return(error);
1095 }
1096
1097 if (mutex != (user_addr_t)0) {
1098 mgen = uap->mgen;
1099 ugen = uap->ugen;
1100
1101 error = ksyn_wqfind(mutex, mgen, ugen, 0, 0, flags, KSYN_WQTYPE_MTX, &kwq); {
1102 if (error != 0)
1103 goto out;
2d21ac55 1104 }
b0d623f7
A
1105
1106 psynch_mutexdrop_internal(kwq, mgen, ugen, flags);
1107 /* drops kwq reference */
2d21ac55 1108 }
b0d623f7
A
1109
1110 uth = current_uthread();
1111 uth->uu_lockseq = cgen;
1112 lockseq = (cgen & PTHRW_COUNT_MASK);
1113
1114 if (uap->sec != 0 || (uap->usec & 0x3fffffff) != 0) {
1115 ts.tv_sec = uap->sec;
1116 ts.tv_nsec = (uap->usec & 0xc0000000);
1117 nanoseconds_to_absolutetime((uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec, &abstime );
1118 clock_absolutetime_interval_to_deadline( abstime, &abstime );
1119 }
1120 ksyn_wqlock(ckwq);
1121 if ((ckwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, ckwq->kw_pre_lockseq) != 0)) {
1122#if _PSYNCH_TRACE_
1123 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 0, 1, 0, 0);
1124#endif /* _PSYNCH_TRACE_ */
1125
1126#if COND_MTX_WAITQUEUEMOVE
1127 updatebits = ckwq->kw_pre_cvretval | PTHRW_MTX_NONE;
1128#else /* COND_MTX_WAITQUEUEMOVE */
1129 updatebits = 0;
1130#endif /* COND_MTX_WAITQUEUEMOVE */
1131 ckwq->kw_pre_rwwc--;
1132 if (ckwq->kw_pre_rwwc == 0)
1133 CLEAR_PREPOST_BITS(ckwq);
1134 *retval = updatebits;
1135 error = 0;
1136 ksyn_wqunlock(ckwq);
1137 goto out;
1138
1139 } else {
1140#if _PSYNCH_TRACE_
1141 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 0, 2, cgen, 0);
1142#endif /* _PSYNCH_TRACE_ */
1143 error = ksyn_queue_insert(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], cgen, uth, FIRSTFIT);
1144 if (error != 0)
1145 panic("psynch_cvwait: failed to enqueue\n");
1146 error = ksyn_block_thread_locked(ckwq, abstime, uth);
1147 /* drops the lock */
1148 }
1149
1150 if (error != 0) {
1151 ksyn_wqlock(ckwq);
1152#if _PSYNCH_TRACE_
1153 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 0, 3, error, 0);
1154#endif /* _PSYNCH_TRACE_ */
1155 if (uth->uu_kwqqueue != NULL) {
1156 ksyn_queue_removeitem(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], uth);
1157 }
1158 ksyn_wqunlock(ckwq);
1159 } else {
1160 *retval = uth->uu_psynchretval;
1161
1162 }
1163out:
1164#if _PSYNCH_TRACE_
1165 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVWAIT | DBG_FUNC_END, (uint32_t)cond, 0, 0, error, 0);
1166#endif /* _PSYNCH_TRACE_ */
1167 ksyn_wqrelease(ckwq, NULL);
1168 return(error);
2d21ac55
A
1169}
1170
b0d623f7
A
1171/* ***************** pthread_rwlock ************************ */
1172/*
1173 * psynch_rw_rdlock: This system call is used for psync rwlock readers to block.
1174 */
1175int
1176psynch_rw_rdlock(__unused proc_t p, struct psynch_rw_rdlock_args * uap, uint32_t * retval)
1177{
1178 user_addr_t rwlock = uap->rwlock;
1179 uint32_t lgen = uap->lgenval;
1180 uint32_t ugen = uap->ugenval;
1181 uint32_t rw_wc = uap->rw_wc;
1182 //uint64_t tid = uap->tid;
1183 int flags = uap->flags;
1184 int error = 0, block;
1185 uint32_t lockseq = 0, updatebits = 0, preseq = 0;
1186 ksyn_wait_queue_t kwq;
1187 uthread_t uth;
2d21ac55 1188
b0d623f7
A
1189#if _PSYNCH_TRACE_
1190 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1191#endif /* _PSYNCH_TRACE_ */
1192 uth = current_uthread();
1193
1194 /* preserve the seq number */
1195 uth->uu_lockseq = lgen;
1196 lockseq = lgen & PTHRW_COUNT_MASK;
1197
1198 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1199 if (error != 0) {
1200#if _PSYNCH_TRACE_
1201 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1202#endif /* _PSYNCH_TRACE_ */
1203 return(error);
1204 }
1205
1206 ksyn_wqlock(kwq);
1207
1208 /* handle first the missed wakeups */
1209 if ((kwq->kw_pre_intrcount != 0) &&
1210 ((kwq->kw_pre_intrtype == PTH_RW_TYPE_READ) || (kwq->kw_pre_intrtype == PTH_RW_TYPE_LREAD)) &&
1211 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
1212
1213 kwq->kw_pre_intrcount--;
1214 uth->uu_psynchretval = kwq->kw_pre_intrretbits;
1215 if (kwq->kw_pre_intrcount==0)
1216 CLEAR_INTR_PREPOST_BITS(kwq);
1217 ksyn_wqunlock(kwq);
1218 goto out;
1219 }
1220
1221 /* handle unlock2/downgrade first */
1222 if ((kwq->kw_pre_limrd != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_limrdseq & PTHRW_COUNT_MASK)) != 0)) {
1223#if _PSYNCH_TRACE_
1224 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 1, kwq->kw_pre_limrd, kwq->kw_pre_limrdseq, 0);
1225#endif /* _PSYNCH_TRACE_ */
1226 kwq->kw_pre_limrd--;
1227 /* acquired the locks, so return */
1228 uth->uu_psynchretval = kwq->kw_pre_limrdbits;
1229 if (kwq->kw_pre_limrd == 0)
1230 CLEAR_READ_PREPOST_BITS(kwq);
1231 ksyn_wqunlock(kwq);
1232 goto out;
1233 }
1234
1235 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
1236#if _PSYNCH_TRACE_
1237 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1238#endif /* _PSYNCH_TRACE_ */
1239 kwq->kw_pre_rwwc--;
1240 if (kwq->kw_pre_rwwc == 0) {
1241 preseq = kwq->kw_pre_lockseq;
1242 CLEAR_PREPOST_BITS(kwq);
1243 error = kwq_handle_unlock(kwq, preseq, &updatebits, (KW_UNLOCK_PREPOST_READLOCK|KW_UNLOCK_PREPOST), &block, lgen);
1244 if (error != 0)
1245 panic("kwq_handle_unlock failed %d\n",error);
1246 if (block == 0) {
1247 ksyn_wqunlock(kwq);
1248 goto out;
1249 }
1250 /* insert to q and proceed as ususal */
1251 }
1252 }
1253
1254#if _PSYNCH_TRACE_
1255 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1256#endif /* _PSYNCH_TRACE_ */
1257 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_READ], lgen, uth, SEQFIT);
1258 if (error != 0)
1259 panic("psynch_rw_rdlock: failed to enqueue\n");
1260 error = ksyn_block_thread_locked(kwq, (uint64_t)0, uth);
1261 /* drops the kwq lock */
1262
1263out:
1264 if (error != 0) {
1265#if _PSYNCH_TRACE_
1266 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 4, error, 0, 0);
1267#endif /* _PSYNCH_TRACE_ */
1268 ksyn_wqlock(kwq);
1269 if (uth->uu_kwqqueue != NULL)
1270 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_READ], uth);
1271 ksyn_wqunlock(kwq);
1272 } else {
1273 /* update bits */
1274 *retval = uth->uu_psynchretval;
1275 }
1276 ksyn_wqrelease(kwq, NULL);
1277#if _PSYNCH_TRACE_
1278 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1279#endif /* _PSYNCH_TRACE_ */
1280 return(error);
1281}
1282
1283/*
1284 * psynch_rw_longrdlock: This system call is used for psync rwlock long readers to block.
1285 */
1286int
1287psynch_rw_longrdlock(__unused proc_t p, struct psynch_rw_longrdlock_args * uap, uint32_t * retval)
2d21ac55 1288{
b0d623f7
A
1289 user_addr_t rwlock = uap->rwlock;
1290 uint32_t lgen = uap->lgenval;
1291 uint32_t ugen = uap->ugenval;
1292 uint32_t rw_wc = uap->rw_wc;
1293 //uint64_t tid = uap->tid;
1294 int flags = uap->flags;
1295
1296 ksyn_wait_queue_t kwq;
1297 int error=0, block = 0 ;
1298 uthread_t uth;
1299 uint32_t lockseq = 0, updatebits = 0, preseq = 0;
1300
1301#if _PSYNCH_TRACE_
1302 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1303#endif /* _PSYNCH_TRACE_ */
1304 uth = current_uthread();
1305
1306 uth->uu_lockseq = lgen;
1307 lockseq = (lgen & PTHRW_COUNT_MASK);
1308
1309 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1310 if (error != 0) {
1311#if _PSYNCH_TRACE_
1312 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1313#endif /* _PSYNCH_TRACE_ */
1314 return(error);
2d21ac55 1315 }
b0d623f7
A
1316
1317 ksyn_wqlock(kwq);
1318
1319 /* handle first the missed wakeups */
1320 if ((kwq->kw_pre_intrcount != 0) &&
1321 (kwq->kw_pre_intrtype == PTH_RW_TYPE_LREAD) &&
1322 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
1323
1324 kwq->kw_pre_intrcount--;
1325 uth->uu_psynchretval = kwq->kw_pre_intrretbits;
1326 if (kwq->kw_pre_intrcount==0)
1327 CLEAR_INTR_PREPOST_BITS(kwq);
1328 ksyn_wqunlock(kwq);
1329 goto out;
1330 }
1331
1332 /* handle unlock2/downgrade first */
1333 if ((kwq->kw_pre_limrd != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_limrdseq & PTHRW_COUNT_MASK)) != 0)) {
1334#if _PSYNCH_TRACE_
1335 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 1, kwq->kw_pre_limrd, kwq->kw_pre_limrdseq, 0);
1336#endif /* _PSYNCH_TRACE_ */
1337 kwq->kw_pre_limrd--;
1338 if (kwq->kw_pre_limrd == 0)
1339 CLEAR_READ_PREPOST_BITS(kwq);
1340 /* not a read proceed */
1341 }
1342
1343 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
1344#if _PSYNCH_TRACE_
1345 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1346#endif /* _PSYNCH_TRACE_ */
1347 kwq->kw_pre_rwwc--;
1348 if (kwq->kw_pre_rwwc == 0) {
1349 preseq = kwq->kw_pre_lockseq;
1350 CLEAR_PREPOST_BITS(kwq);
1351 error = kwq_handle_unlock(kwq, preseq, &updatebits, (KW_UNLOCK_PREPOST_LREADLOCK|KW_UNLOCK_PREPOST), &block, lgen);
1352 if (error != 0)
1353 panic("kwq_handle_unlock failed %d\n",error);
1354 if (block == 0) {
1355 ksyn_wqunlock(kwq);
1356 goto out;
1357 }
1358 /* insert to q and proceed as ususal */
1359 }
1360 }
1361
1362#if _PSYNCH_TRACE_
1363 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1364#endif /* _PSYNCH_TRACE_ */
1365 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_LREAD], lgen, uth, SEQFIT);
1366 if (error != 0)
1367 panic("psynch_rw_longrdlock: failed to enqueue\n");
1368
1369 error = ksyn_block_thread_locked(kwq, (uint64_t)0, uth);
1370 /* drops the kwq lock */
1371out:
1372 if (error != 0) {
1373#if _PSYNCH_TRACE_
1374 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1375#endif /* _PSYNCH_TRACE_ */
1376 ksyn_wqlock(kwq);
1377 if (uth->uu_kwqqueue != NULL)
1378 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_LREAD], uth);
1379 ksyn_wqunlock(kwq);
1380 } else {
1381 /* update bits */
1382 *retval = uth->uu_psynchretval;
1383 }
1384
1385 ksyn_wqrelease(kwq, NULL);
1386
1387#if _PSYNCH_TRACE_
1388 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0, 0, error, 0);
1389#endif /* _PSYNCH_TRACE_ */
1390 return(error);
2d21ac55
A
1391}
1392
b0d623f7
A
1393/*
1394 * psynch_rw_wrlock: This system call is used for psync rwlock writers to block.
1395 */
1396int
1397psynch_rw_wrlock(__unused proc_t p, struct psynch_rw_wrlock_args * uap, uint32_t * retval)
1398{
1399 user_addr_t rwlock = uap->rwlock;
1400 uint32_t lgen = uap->lgenval;
1401 uint32_t ugen = uap->ugenval;
1402 uint32_t rw_wc = uap->rw_wc;
1403 //uint64_t tid = uap->tid;
1404 int flags = uap->flags;
1405 int block;
1406 ksyn_wait_queue_t kwq;
1407 int error=0;
1408 uthread_t uth;
1409 uint32_t lockseq = 0, updatebits = 0, preseq = 0;
2d21ac55 1410
b0d623f7
A
1411#if _PSYNCH_TRACE_
1412 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1413#endif /* _PSYNCH_TRACE_ */
1414 uth = current_uthread();
1415
1416 uth->uu_lockseq = lgen;
1417 lockseq = (lgen & PTHRW_COUNT_MASK);
1418
1419 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1420 if (error != 0) {
1421#if _PSYNCH_TRACE_
1422 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1423#endif /* _PSYNCH_TRACE_ */
1424 return(error);
1425 }
1426
1427 ksyn_wqlock(kwq);
1428
1429 /* handle first the missed wakeups */
1430 if ((kwq->kw_pre_intrcount != 0) &&
1431 (kwq->kw_pre_intrtype == PTH_RW_TYPE_WRITE) &&
1432 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
1433
1434 kwq->kw_pre_intrcount--;
1435 uth->uu_psynchretval = kwq->kw_pre_intrretbits;
1436 if (kwq->kw_pre_intrcount==0)
1437 CLEAR_INTR_PREPOST_BITS(kwq);
1438 ksyn_wqunlock(kwq);
1439 goto out;
1440 }
1441
1442 /* handle unlock2/downgrade first */
1443 if ((kwq->kw_pre_limrd != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_limrdseq & PTHRW_COUNT_MASK)) != 0)) {
1444#if _PSYNCH_TRACE_
1445 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 1, kwq->kw_pre_limrd, kwq->kw_pre_limrdseq, 0);
1446#endif /* _PSYNCH_TRACE_ */
1447 kwq->kw_pre_limrd--;
1448 if (kwq->kw_pre_limrd == 0)
1449 CLEAR_READ_PREPOST_BITS(kwq);
1450 /* not a read proceed */
1451 }
1452
1453 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
1454#if _PSYNCH_TRACE_
1455 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1456#endif /* _PSYNCH_TRACE_ */
1457 kwq->kw_pre_rwwc--;
1458 if (kwq->kw_pre_rwwc == 0) {
1459 preseq = kwq->kw_pre_lockseq;
1460 CLEAR_PREPOST_BITS(kwq);
1461 error = kwq_handle_unlock(kwq, preseq, &updatebits, (KW_UNLOCK_PREPOST_WRLOCK|KW_UNLOCK_PREPOST), &block, lgen);
1462 if (error != 0)
1463 panic("kwq_handle_unlock failed %d\n",error);
1464 if (block == 0) {
1465 ksyn_wqunlock(kwq);
1466 goto out;
1467 }
1468 /* insert to q and proceed as ususal */
1469 }
1470 }
1471
1472#if _PSYNCH_TRACE_
1473 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1474#endif /* _PSYNCH_TRACE_ */
1475 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], lgen, uth, SEQFIT);
1476 if (error != 0)
1477 panic("psynch_rw_wrlock: failed to enqueue\n");
1478
1479 error = ksyn_block_thread_locked(kwq, (uint64_t)0, uth);
1480 /* drops the wq lock */
1481
1482out:
1483 if (error != 0) {
1484#if _PSYNCH_TRACE_
1485 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 4, error, 0, 0);
1486#endif /* _PSYNCH_TRACE_ */
1487 ksyn_wqlock(kwq);
1488 if (uth->uu_kwqqueue != NULL)
1489 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], uth);
1490 ksyn_wqunlock(kwq);
1491 } else {
1492 /* update bits */
1493 *retval = uth->uu_psynchretval;
1494 }
1495
1496 ksyn_wqrelease(kwq, NULL);
1497
1498#if _PSYNCH_TRACE_
1499 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0, 0, error, 0);
1500#endif /* _PSYNCH_TRACE_ */
1501 return(error);
1502}
1503
1504/*
1505 * psynch_rw_yieldwrlock: This system call is used for psync rwlock yielding writers to block.
1506 */
1507int
1508psynch_rw_yieldwrlock(__unused proc_t p, struct psynch_rw_yieldwrlock_args * uap, uint32_t * retval)
2d21ac55 1509{
b0d623f7
A
1510 user_addr_t rwlock = uap->rwlock;
1511 uint32_t lgen = uap->lgenval;
1512 uint32_t ugen = uap->ugenval;
1513 uint32_t rw_wc = uap->rw_wc;
1514 //uint64_t tid = uap->tid;
1515 int flags = uap->flags;
1516 int block;
1517 ksyn_wait_queue_t kwq;
1518 int error=0;
1519 uthread_t uth;
1520
1521#if _PSYNCH_TRACE_
1522 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1523#endif /* _PSYNCH_TRACE_ */
1524 uint32_t lockseq = 0, updatebits = 0, preseq = 0;
1525
1526 uth = current_uthread();
1527
1528 uth->uu_lockseq = lgen;
1529 lockseq = (lgen & PTHRW_COUNT_MASK);
1530
1531 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1532 if (error != 0) {
1533#if _PSYNCH_TRACE_
1534 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1535#endif /* _PSYNCH_TRACE_ */
1536 return(error);
1537 }
1538
1539 ksyn_wqlock(kwq);
1540
1541 /* handle first the missed wakeups */
1542 if ((kwq->kw_pre_intrcount != 0) &&
1543 (kwq->kw_pre_intrtype == PTH_RW_TYPE_YWRITE) &&
1544 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
1545
1546 kwq->kw_pre_intrcount--;
1547 uth->uu_psynchretval = kwq->kw_pre_intrretbits;
1548 if (kwq->kw_pre_intrcount==0)
1549 CLEAR_INTR_PREPOST_BITS(kwq);
1550 ksyn_wqunlock(kwq);
1551 goto out;
1552 }
1553
1554 /* handle unlock2/downgrade first */
1555 if ((kwq->kw_pre_limrd != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_limrdseq & PTHRW_COUNT_MASK)) != 0)) {
1556#if _PSYNCH_TRACE_
1557 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 1, kwq->kw_pre_limrd, kwq->kw_pre_limrdseq, 0);
1558#endif /* _PSYNCH_TRACE_ */
1559 kwq->kw_pre_limrd--;
1560 if (kwq->kw_pre_limrd == 0)
1561 CLEAR_READ_PREPOST_BITS(kwq);
1562 /* not a read proceed */
1563 }
1564
1565 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
1566#if _PSYNCH_TRACE_
1567 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1568#endif /* _PSYNCH_TRACE_ */
1569 kwq->kw_pre_rwwc--;
1570 if (kwq->kw_pre_rwwc == 0) {
1571 preseq = kwq->kw_pre_lockseq;
1572 CLEAR_PREPOST_BITS(kwq);
1573 error = kwq_handle_unlock(kwq, preseq, &updatebits, (KW_UNLOCK_PREPOST_YWRLOCK|KW_UNLOCK_PREPOST), &block, lgen);
1574 if (error != 0)
1575 panic("kwq_handle_unlock failed %d\n",error);
1576 if (block == 0) {
1577 ksyn_wqunlock(kwq);
1578 goto out;
1579 }
1580 /* insert to q and proceed as ususal */
1581 }
1582 }
1583
1584#if _PSYNCH_TRACE_
1585 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1586#endif /* _PSYNCH_TRACE_ */
1587 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER], lgen, uth, SEQFIT);
1588 if (error != 0)
1589 panic("psynch_rw_yieldwrlock: failed to enqueue\n");
1590
1591 error = ksyn_block_thread_locked(kwq, (uint64_t)0, uth);
1592
1593out:
1594 if (error != 0) {
1595#if _PSYNCH_TRACE_
1596 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 4, error, 0, 0);
1597#endif /* _PSYNCH_TRACE_ */
1598 ksyn_wqlock(kwq);
1599 if (uth->uu_kwqqueue != NULL)
1600 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER], uth);
1601 ksyn_wqunlock(kwq);
1602 } else {
1603 /* update bits */
1604 *retval = uth->uu_psynchretval;
1605 }
1606
1607 ksyn_wqrelease(kwq, NULL);
1608
1609#if _PSYNCH_TRACE_
1610 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1611#endif /* _PSYNCH_TRACE_ */
1612 return(error);
1613}
1614
1615
1616/*
1617 * psynch_rw_downgrade: This system call is used for wakeup blocked readers who are eligible to run due to downgrade.
1618 */
1619int
1620psynch_rw_downgrade(__unused proc_t p, struct psynch_rw_downgrade_args * uap, __unused int * retval)
1621{
1622 user_addr_t rwlock = uap->rwlock;
1623 uint32_t lgen = uap->lgenval;
1624 uint32_t ugen = uap->ugenval;
1625 uint32_t rw_wc = uap->rw_wc;
1626 //uint64_t tid = uap->tid;
1627 int flags = uap->flags;
1628 uint32_t count = 0;
1629
1630 ksyn_wait_queue_t kwq;
1631 int error=0;
1632 uthread_t uth;
1633 uint32_t curgen = 0;
1634
1635#if _PSYNCH_TRACE_
1636 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1637#endif /* _PSYNCH_TRACE_ */
1638 uth = current_uthread();
1639
1640 curgen = (lgen & PTHRW_COUNT_MASK);
1641
1642 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1643 if (error != 0) {
1644#if _PSYNCH_TRACE_
1645 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1646#endif /* _PSYNCH_TRACE_ */
1647 return(error);
1648 }
1649
1650 ksyn_wqlock(kwq);
1651
1652 if (is_seqlower(ugen, kwq->kw_lastunlockseq)!= 0) {
1653 /* spurious updatebits?? */
1654 goto out;
1655 }
1656 /* fast path for default case */
1657 if((rw_wc == kwq->kw_inqueue) && (kwq->kw_highseq == curgen))
1658 goto dounlock;
1659
1660 /* have we seen all the waiters? */
1661 if(rw_wc > kwq->kw_inqueue) {
1662 goto prepost;
1663 }
1664
1665 if (is_seqhigher(curgen, kwq->kw_highseq) != 0) {
1666 goto prepost;
1667 } else {
1668 if (find_seq_till(kwq, curgen, rw_wc, &count) == 0) {
1669 if (count < rw_wc) {
1670 kwq->kw_pre_limrd = rw_wc - count;
1671 kwq->kw_pre_limrdseq = lgen;
1672 kwq->kw_pre_limrdbits = lgen;
1673 /* found none ? */
1674 if (count == 0)
1675 goto out;
1676 }
1677 }
1678 }
1679
1680dounlock:
1681#if _PSYNCH_TRACE_
1682 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1683#endif /* _PSYNCH_TRACE_ */
1684 error = kwq_handle_downgrade(kwq, lgen, 0, 0, NULL);
1685
1686 if (error != 0)
1687 panic("psynch_rw_downgrade: failed to wakeup\n");
1688
1689out:
1690 ksyn_wqunlock(kwq);
1691#if _PSYNCH_TRACE_
1692 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_END, (uint32_t)rwlock, 0, 0, error, 0);
1693#endif /* _PSYNCH_TRACE_ */
1694 ksyn_wqrelease(kwq, NULL);
1695
1696 return(error);
1697
1698prepost:
1699 kwq->kw_pre_rwwc = (rw_wc - count);
1700 kwq->kw_pre_lockseq = lgen;
1701#if _PSYNCH_TRACE_
1702 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWDOWNGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 1, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1703#endif /* _PSYNCH_TRACE_ */
1704 error = 0;
1705 goto out;
1706}
1707
1708
1709/*
1710 * psynch_rw_upgrade: This system call is used by an reader to block waiting for upgrade to be granted.
1711 */
1712int
1713psynch_rw_upgrade(__unused proc_t p, struct psynch_rw_upgrade_args * uap, uint32_t * retval)
1714{
1715 user_addr_t rwlock = uap->rwlock;
1716 uint32_t lgen = uap->lgenval;
1717 uint32_t ugen = uap->ugenval;
1718 uint32_t rw_wc = uap->rw_wc;
1719 //uint64_t tid = uap->tid;
1720 int flags = uap->flags;
1721 int block;
1722 ksyn_wait_queue_t kwq;
1723 int error=0;
1724 uthread_t uth;
1725 uint32_t lockseq = 0, updatebits = 0, preseq = 0;
1726
1727#if _PSYNCH_TRACE_
1728 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1729#endif /* _PSYNCH_TRACE_ */
1730 uth = current_uthread();
1731
1732 uth->uu_lockseq = lgen;
1733 lockseq = (lgen & PTHRW_COUNT_MASK);
1734
1735 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_RWLOCK), &kwq);
1736 if (error != 0) {
1737#if _PSYNCH_TRACE_
1738 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1739#endif /* _PSYNCH_TRACE_ */
1740 return(error);
1741 }
1742
1743 ksyn_wqlock(kwq);
1744
1745 /* handle first the missed wakeups */
1746 if ((kwq->kw_pre_intrcount != 0) &&
1747 (kwq->kw_pre_intrtype == PTH_RW_TYPE_UPGRADE) &&
1748 (is_seqlower_eq(lockseq, (kwq->kw_pre_intrseq & PTHRW_COUNT_MASK)) != 0)) {
1749
1750 kwq->kw_pre_intrcount--;
1751 uth->uu_psynchretval = kwq->kw_pre_intrretbits;
1752 if (kwq->kw_pre_intrcount==0)
1753 CLEAR_INTR_PREPOST_BITS(kwq);
1754 ksyn_wqunlock(kwq);
1755 goto out;
1756 }
1757
1758 if ((kwq->kw_pre_rwwc != 0) && (is_seqlower_eq(lockseq, (kwq->kw_pre_lockseq & PTHRW_COUNT_MASK)) != 0)) {
1759#if _PSYNCH_TRACE_
1760 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 2, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1761#endif /* _PSYNCH_TRACE_ */
1762 kwq->kw_pre_rwwc--;
1763 if (kwq->kw_pre_rwwc == 0) {
1764 preseq = kwq->kw_pre_lockseq;
1765 CLEAR_PREPOST_BITS(kwq);
1766 error = kwq_handle_unlock(kwq, preseq, &updatebits, (KW_UNLOCK_PREPOST_UPGRADE|KW_UNLOCK_PREPOST), &block, lgen);
1767 if (error != 0)
1768 panic("kwq_handle_unlock failed %d\n",error);
1769 if (block == 0) {
1770 ksyn_wqunlock(kwq);
1771 goto out;
1772 }
1773 /* insert to q and proceed as ususal */
1774 }
1775 }
1776
1777
1778#if _PSYNCH_TRACE_
1779 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1780#endif /* _PSYNCH_TRACE_ */
1781 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_UPGRADE], lgen, uth, SEQFIT);
1782 if (error != 0)
1783 panic("psynch_rw_upgrade: failed to enqueue\n");
1784
1785
1786 error = ksyn_block_thread_locked(kwq, (uint64_t)0, uth);
1787 /* drops the lock */
1788
1789out:
1790 if (error != 0) {
1791#if _PSYNCH_TRACE_
1792 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_NONE, (uint32_t)rwlock, 4, error, 0, 0);
1793#endif /* _PSYNCH_TRACE_ */
1794 ksyn_wqlock(kwq);
1795 if (uth->uu_kwqqueue != NULL)
1796 ksyn_queue_removeitem(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_UPGRADE], uth);
1797 ksyn_wqunlock(kwq);
1798 } else {
1799 /* update bits */
1800 *retval = uth->uu_psynchretval;
1801 }
1802
1803 ksyn_wqrelease(kwq, NULL);
1804#if _PSYNCH_TRACE_
1805 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUPGRADE | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1806#endif /* _PSYNCH_TRACE_ */
1807 return(error);
1808}
1809
1810/*
1811 * psynch_rw_unlock: This system call is used for unlock state postings. This will grant appropriate
1812 * reader/writer variety lock.
1813 */
1814
1815int
1816psynch_rw_unlock(__unused proc_t p, struct psynch_rw_unlock_args * uap, uint32_t * retval)
1817{
1818 user_addr_t rwlock = uap->rwlock;
1819 uint32_t lgen = uap->lgenval;
1820 uint32_t ugen = uap->ugenval;
1821 uint32_t rw_wc = uap->rw_wc;
1822 uint32_t curgen;
1823 //uint64_t tid = uap->tid;
1824 int flags = uap->flags;
1825 uthread_t uth;
1826 ksyn_wait_queue_t kwq;
1827 uint32_t updatebits = 0;
1828 int error=0;
1829 uint32_t count = 0;
1830
1831
1832#if _PSYNCH_TRACE_
1833 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1834#endif /* _PSYNCH_TRACE_ */
1835 uth = current_uthread();
1836
1837 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_RWLOCK), &kwq);
1838 if (error != 0) {
1839#if _PSYNCH_TRACE_
1840 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1841#endif /* _PSYNCH_TRACE_ */
1842 return(error);
1843 }
1844
1845 curgen = lgen & PTHRW_COUNT_MASK;
1846
1847 ksyn_wqlock(kwq);
1848
1849 if ((lgen & PTHRW_RW_INIT) != 0) {
1850 kwq->kw_lastunlockseq = 0;
1851 lgen &= ~PTHRW_RW_INIT;
1852 } else if (is_seqlower(ugen, kwq->kw_lastunlockseq) != 0) {
1853 /* spurious updatebits set */
1854 updatebits = PTHRW_RW_SPURIOUS;
1855 goto out;
1856 }
1857
1858
1859#if _PSYNCH_TRACE_
1860 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 1, kwq->kw_inqueue, curgen, 0);
1861#endif /* _PSYNCH_TRACE_ */
1862 if (find_seq_till(kwq, curgen, rw_wc, &count) == 0) {
1863 if (count < rw_wc)
1864 goto prepost;
1865 }
1866
1867
1868 /* can handle unlock now */
1869
1870 CLEAR_PREPOST_BITS(kwq);
1871 kwq->kw_lastunlockseq = ugen;
1872
1873#if _PSYNCH_TRACE_
1874 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 2, 0, 0, 0);
1875#endif /* _PSYNCH_TRACE_ */
1876 error = kwq_handle_unlock(kwq, lgen, &updatebits, 0, NULL, 0);
1877 if (error != 0)
1878 panic("psynch_rw_unlock: kwq_handle_unlock failed %d\n",error);
1879out:
1880 if (error == 0) {
1881 /* update bits?? */
1882 *retval = updatebits;
1883 }
1884 ksyn_wqunlock(kwq);
1885
1886 ksyn_wqrelease(kwq, NULL);
1887#if _PSYNCH_TRACE_
1888 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0, 0, error, 0);
1889#endif /* _PSYNCH_TRACE_ */
1890
1891 return(error);
1892
1893prepost:
1894 kwq->kw_pre_rwwc = (rw_wc - count);
1895 kwq->kw_pre_lockseq = curgen;
1896 kwq->kw_lastunlockseq = ugen;
1897#if _PSYNCH_TRACE_
1898 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 3, rw_wc, count, 0);
1899 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 4, kwq->kw_pre_rwwc, kwq->kw_pre_lockseq, 0);
1900#endif /* _PSYNCH_TRACE_ */
1901 updatebits = (lgen | PTHRW_RW_SPURIOUS);/* let this not do unlock handling */
1902 error = 0;
1903 goto out;
1904}
1905
1906
1907/*
1908 * psynch_rw_unlock2: This system call is used to wakeup pending readers when unlock grant frm kernel
1909 * to new reader arrival races
1910 */
1911int
1912psynch_rw_unlock2(__unused proc_t p, struct psynch_rw_unlock2_args * uap, uint32_t * retval)
1913{
1914 user_addr_t rwlock = uap->rwlock;
1915 uint32_t lgen = uap->lgenval;
1916 uint32_t ugen = uap->ugenval;
1917 uint32_t rw_wc = uap->rw_wc;
1918 //uint64_t tid = uap->tid;
1919 int flags = uap->flags;
1920 uthread_t uth;
1921 uint32_t num_lreader, limitread, curgen, updatebits;
1922 ksyn_wait_queue_t kwq;
1923 int error=0, longreadset = 0;
1924 int diff;
1925 uint32_t count=0;
1926
1927#if _PSYNCH_TRACE_
1928 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK2 | DBG_FUNC_START, (uint32_t)rwlock, lgen, ugen, rw_wc, 0);
1929#endif /* _PSYNCH_TRACE_ */
1930 uth = current_uthread();
1931
1932 error = ksyn_wqfind(rwlock, lgen, ugen, rw_wc, TID_ZERO, flags, (KSYN_WQTYPE_RWLOCK), &kwq);
1933 if (error != 0) {
1934#if _PSYNCH_TRACE_
1935 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK2 | DBG_FUNC_END, (uint32_t)rwlock, 1, 0, error, 0);
1936#endif /* _PSYNCH_TRACE_ */
1937 return(error);
1938 }
1939
1940 ksyn_wqlock(kwq);
1941
1942 curgen = (lgen & PTHRW_COUNT_MASK);
1943 diff = find_diff(lgen, ugen);
1944
1945 limitread = lgen & PTHRW_COUNT_MASK;
1946
1947 if (find_seq_till(kwq, curgen, diff, &count) == 0) {
1948 kwq->kw_pre_limrd = diff - count;
1949 kwq->kw_pre_limrdseq = lgen;
1950 kwq->kw_pre_limrdbits = lgen;
1951 /* found none ? */
1952 if (count == 0)
1953 goto out;
1954 }
1955
1956 if (kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count != 0) {
1957 num_lreader = kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_firstnum;
1958 if (is_seqlower_eq(num_lreader, limitread) != 0)
1959 longreadset = 1;
1960 }
1961
1962 updatebits = lgen;
1963#if _PSYNCH_TRACE_
1964 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK2 | DBG_FUNC_NONE, (uint32_t)rwlock, 3, 0, 0, 0);
1965#endif /* _PSYNCH_TRACE_ */
1966 count = ksyn_wakeupreaders(kwq, limitread, longreadset, 0, updatebits, NULL);
1967
1968 if (count != 0) {
1969 if (kwq->kw_pre_limrd != 0) {
1970 kwq->kw_pre_limrd += count;
1971 } else {
1972 kwq->kw_pre_limrd = count;
1973 kwq->kw_pre_limrdseq = lgen;
1974 kwq->kw_pre_limrdbits = lgen;
1975 }
1976 }
1977 error = 0;
1978
1979out:
1980 if (error == 0) {
1981 /* update bits?? */
1982 *retval = uth->uu_psynchretval;
1983 }
1984 ksyn_wqunlock(kwq);
1985
1986 ksyn_wqrelease(kwq, NULL);
1987#if _PSYNCH_TRACE_
1988 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK2 | DBG_FUNC_END, (uint32_t)rwlock, 0, 0, error, 0);
1989#endif /* _PSYNCH_TRACE_ */
1990
1991 return(error);
1992}
1993
1994
1995/* ************************************************************************** */
1996void
1997pth_global_hashinit()
1998{
1999 pth_glob_hashtbl = hashinit(PTH_HASHSIZE * 4, M_PROC, &pthhash);
2000}
2001
2002void
2003pth_proc_hashinit(proc_t p)
2004{
2005 p->p_pthhash = hashinit(PTH_HASHSIZE, M_PROC, &pthhash);
2006 if (p->p_pthhash == NULL)
2007 panic("pth_proc_hashinit: hash init returned 0\n");
2008}
2009
2010
2011ksyn_wait_queue_t
2012ksyn_wq_hash_lookup(user_addr_t mutex, proc_t p, int flags, uint64_t object, uint64_t objoffset)
2013{
2014 ksyn_wait_queue_t kwq;
2015 struct pthhashhead * hashptr;
2016
2017 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED)
2018 {
2019 hashptr = pth_glob_hashtbl;
2020 kwq = (&hashptr[object & pthhash])->lh_first;
2021 if (kwq != 0) {
2022 for (; kwq != NULL; kwq = kwq->kw_hash.le_next) {
2023 if ((kwq->kw_object == object) &&(kwq->kw_offset == objoffset)) {
2024 return (kwq);
2025 }
2026 }
2027 }
2028 } else {
2029 hashptr = p->p_pthhash;
2030 kwq = (&hashptr[mutex & pthhash])->lh_first;
2031 if (kwq != 0)
2032 for (; kwq != NULL; kwq = kwq->kw_hash.le_next) {
2033 if (kwq->kw_addr == mutex) {
2034 return (kwq);
2035 }
2036 }
2037 }
2038 return(NULL);
2039}
2040
2041void
2042pth_proc_hashdelete(proc_t p)
2043{
2044 struct pthhashhead * hashptr;
2045 ksyn_wait_queue_t kwq;
2046 int hashsize = pthhash + 1;
2047 int i;
2048
2049 hashptr = p->p_pthhash;
2050 if (hashptr == NULL)
2051 return;
2052
2053 for(i= 0; i < hashsize; i++) {
2054 while ((kwq = LIST_FIRST(&hashptr[i])) != NULL) {
2055 pthread_list_lock();
2056 if ((kwq->kw_pflags & KSYN_WQ_INHASH) != 0) {
2057 kwq->kw_pflags &= ~KSYN_WQ_INHASH;
2058 LIST_REMOVE(kwq, kw_hash);
2059 }
2060 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
2061 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
2062 LIST_REMOVE(kwq, kw_list);
2063 }
2064 pthread_list_unlock();
2065 lck_mtx_destroy(&kwq->kw_lock, pthread_lck_grp);
2066 kfree(kwq, sizeof(struct ksyn_wait_queue));
2067 }
2068 }
2069 FREE(p->p_pthhash, M_PROC);
2070 p->p_pthhash = NULL;
2071}
2072
2073
2074/* find kernel waitqueue, if not present create one. Grants a reference */
2075int
2076ksyn_wqfind(user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, uint64_t tid, int flags, int wqtype, ksyn_wait_queue_t * kwqp)
2077{
2078 ksyn_wait_queue_t kwq;
2079 ksyn_wait_queue_t nkwq;
2080 struct pthhashhead * hashptr;
2081 uint64_t object = 0, offset = 0;
2082 uint64_t hashhint;
2083 proc_t p = current_proc();
2084 int retry = mgen & PTHRW_RETRYBIT;
2085 int i;
2086
2087 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED)
2088 {
2089 (void)ksyn_findobj(mutex, &object, &offset);
2090 hashhint = object;
2091 hashptr = pth_glob_hashtbl;
2092 } else {
2093 hashptr = p->p_pthhash;
2094 }
2095
2096 //pthread_list_lock_spin();
2097 pthread_list_lock();
2098
2099 kwq = ksyn_wq_hash_lookup(mutex, p, flags, object, offset);
2100
2101 if (kwq != NULL) {
2102 kwq->kw_iocount++;
2103 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
2104 LIST_REMOVE(kwq, kw_list);
2105 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
2106 }
2107 UPDATE_KWQ(kwq, mgen, ugen, rw_wc, tid, wqtype, retry);
2108 if (kwqp != NULL)
2109 *kwqp = kwq;
2110 pthread_list_unlock();
2111 return (0);
2112 }
2113
2114 pthread_list_unlock();
2115
2116 nkwq = kalloc(sizeof(struct ksyn_wait_queue));
2117 bzero(nkwq, sizeof(struct ksyn_wait_queue));
2118 nkwq->kw_addr = mutex;
2119 nkwq->kw_flags = flags;
2120 nkwq->kw_iocount = 1;
2121 nkwq->kw_object = object;
2122 nkwq->kw_offset = offset;
2123 nkwq->kw_type = (wqtype & KSYN_WQTYPE_MASK);
2124 TAILQ_INIT(&nkwq->kw_uthlist);
2125
2126 for (i=0; i< KSYN_QUEUE_MAX; i++)
2127 ksyn_queue_init(&nkwq->kw_ksynqueues[i]);
2128
2129 UPDATE_KWQ(nkwq, mgen, ugen, rw_wc, tid, wqtype, retry);
2130#if USE_WAITQUEUE
2131 wait_queue_init(&nkwq->kw_wq, SYNC_POLICY_FIFO);
2132#endif /* USE_WAITQUEUE */
2133 lck_mtx_init(&nkwq->kw_lock, pthread_lck_grp, pthread_lck_attr);
2134
2135 //pthread_list_lock_spin();
2136 pthread_list_lock();
2137 /* see whether it is alread allocated */
2138 kwq = ksyn_wq_hash_lookup(mutex, p, flags, object, offset);
2139
2140 if (kwq != NULL) {
2141 kwq->kw_iocount++;
2142 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
2143 LIST_REMOVE(kwq, kw_list);
2144 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
2145 }
2146 UPDATE_KWQ(kwq, mgen, ugen, rw_wc, tid, wqtype, retry);
2147 if (kwqp != NULL)
2148 *kwqp = kwq;
2149 pthread_list_unlock();
2150 lck_mtx_destroy(&nkwq->kw_lock, pthread_lck_grp);
2151 kfree(nkwq, sizeof(struct ksyn_wait_queue));
2152 return (0);
2153 }
2154 kwq = nkwq;
2155
2156 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED)
2157 {
2158 kwq->kw_pflags |= KSYN_WQ_SHARED;
2159 LIST_INSERT_HEAD(&hashptr[kwq->kw_object & pthhash], kwq, kw_hash);
2160 } else
2161 LIST_INSERT_HEAD(&hashptr[mutex & pthhash], kwq, kw_hash);
2162
2163 kwq->kw_pflags |= KSYN_WQ_INHASH;
2164
2165 pthread_list_unlock();
2166
2167 if (kwqp != NULL)
2168 *kwqp = kwq;
2169 return (0);
2170}
2171
2172/* Reference from find is dropped here. Starts the free process if needed */
2173void
2174ksyn_wqrelease(ksyn_wait_queue_t kwq, ksyn_wait_queue_t ckwq)
2175{
2176 uint64_t deadline;
2177 struct timeval t;
2178 int sched = 0;
2179
2180
2181 //pthread_list_lock_spin();
2182 pthread_list_lock();
2183 kwq->kw_iocount--;
2184 if (kwq->kw_iocount == 0) {
2185 if ((kwq->kw_pre_rwwc == 0) && (kwq->kw_inqueue == 0)) {
2186 microuptime(&kwq->kw_ts);
2187 LIST_INSERT_HEAD(&pth_free_list, kwq, kw_list);
2188 kwq->kw_pflags |= KSYN_WQ_FLIST;
2189 }
2190 sched = 1;
2191 }
2192 if (ckwq != NULL){
2193 ckwq->kw_iocount--;
2194 if ( ckwq->kw_iocount == 0) {
2195 if ((ckwq->kw_pre_rwwc == 0) && (ckwq->kw_inqueue == 0)) {
2196 /* mark for free if we can */
2197 microuptime(&ckwq->kw_ts);
2198 LIST_INSERT_HEAD(&pth_free_list, ckwq, kw_list);
2199 ckwq->kw_pflags |= KSYN_WQ_FLIST;
2200 }
2201 sched = 1;
2202 }
2203 }
2204
2205 if (sched == 1 && psynch_cleanupset == 0) {
2206 psynch_cleanupset = 1;
2207 microuptime(&t);
2208 t.tv_sec += KSYN_CLEANUP_DEADLINE;
2209
2210 deadline = tvtoabstime(&t);
2211 thread_call_enter_delayed(psynch_thcall, deadline);
2212 }
2213 pthread_list_unlock();
2214}
2215
2216/* responsible to free the waitqueues */
2217void
2218psynch_wq_cleanup(__unused void * param, __unused void * param1)
2219{
2220 ksyn_wait_queue_t kwq;
2221 struct timeval t;
2222 LIST_HEAD(, ksyn_wait_queue) freelist = {NULL};
2223 int count = 0, delayed = 0, diff;
2224 uint64_t deadline = 0;
2225
2226 //pthread_list_lock_spin();
2227 pthread_list_lock();
2228
2229 microuptime(&t);
2230
2231 LIST_FOREACH(kwq, &pth_free_list, kw_list) {
2232
2233 if (count > 100) {
2234 delayed = 1;
2235 break;
2236 }
2237 if ((kwq->kw_iocount != 0) && (kwq->kw_inqueue != 0)) {
2238 /* still in freelist ??? */
2239 continue;
2240 }
2241 diff = t.tv_sec - kwq->kw_ts.tv_sec;
2242 if (diff < 0)
2243 diff *= -1;
2244 if (diff >= KSYN_CLEANUP_DEADLINE) {
2245 /* out of hash */
2246 kwq->kw_pflags &= ~(KSYN_WQ_FLIST | KSYN_WQ_INHASH);
2247 LIST_REMOVE(kwq, kw_hash);
2248 LIST_REMOVE(kwq, kw_list);
2249 LIST_INSERT_HEAD(&freelist, kwq, kw_list);
2250 count ++;
2251 } else {
2252 delayed = 1;
2253 }
2254
2255 }
2256 if (delayed != 0) {
2257 t.tv_sec += KSYN_CLEANUP_DEADLINE;
2258
2259 deadline = tvtoabstime(&t);
2260 thread_call_enter_delayed(psynch_thcall, deadline);
2261 psynch_cleanupset = 1;
2262 } else
2263 psynch_cleanupset = 0;
2264
2265 pthread_list_unlock();
2266
2267
2268 while ((kwq = LIST_FIRST(&freelist)) != NULL) {
2269 LIST_REMOVE(kwq, kw_list);
2270 lck_mtx_destroy(&kwq->kw_lock, pthread_lck_grp);
2271 kfree(kwq, sizeof(struct ksyn_wait_queue));
2272 }
2273}
2274
2275
2276int
2277ksyn_block_thread_locked(ksyn_wait_queue_t kwq, uint64_t abstime, uthread_t uth)
2278{
2279 kern_return_t kret;
2280 int error = 0;
2281
2282 uth->uu_kwqqueue = (void *)kwq;
2283#if USE_WAITQUEUE
2284 kret = wait_queue_assert_wait64(&kwq->kw_wq, kwq->kw_addr, THREAD_ABORTSAFE, abstime);
2285#else /* USE_WAITQUEUE */
2286 assert_wait_deadline(&uth->uu_psynchretval, THREAD_ABORTSAFE, abstime);
2287#endif /* USE_WAITQUEUE */
2288 ksyn_wqunlock(kwq);
2289
2290 kret = thread_block(NULL);
2291 switch (kret) {
2292 case THREAD_TIMED_OUT:
2293 error = ETIMEDOUT;
2294 break;
2295 case THREAD_INTERRUPTED:
2296 error = EINTR;
2297 break;
2298 }
2299 return(error);
2300}
2301
2302kern_return_t
2303#if USE_WAITQUEUE
2304ksyn_wakeup_thread(ksyn_wait_queue_t kwq, uthread_t uth)
2305#else /* USE_WAITQUEUE */
2306ksyn_wakeup_thread(__unused ksyn_wait_queue_t kwq, uthread_t uth)
2307#endif /* USE_WAITQUEUE */
2308{
2309 thread_t th;
2310 kern_return_t kret;
2311 th = uth->uu_context.vc_thread;
2312
2313#if USE_WAITQUEUE
2314 kret = wait_queue_wakeup64_thread(&kwq->kw_wq, kwq->kw_addr, th, THREAD_AWAKENED);
2315#else /* USE_WAITQUEUE */
2316 kret = thread_wakeup_prim((caddr_t)&uth->uu_psynchretval, TRUE, THREAD_AWAKENED);
2317#endif /* USE_WAITQUEUE */
2318
2319 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
2320 panic("ksyn_wakeup_thread: panic waking up thread %x\n", kret);
2321
2322
2323
2324 return(kret);
2325}
2326
2327/* move from one waitqueue to another */
2328#if COND_MTX_WAITQUEUEMOVE
2329void
2330ksyn_move_wqthread( ksyn_wait_queue_t ckwq, ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t updateval, int diffgen, int nomutex)
2331#else /* COND_MTX_WAITQUEUEMOVE */
2332void
2333ksyn_move_wqthread( ksyn_wait_queue_t ckwq, __unused ksyn_wait_queue_t kwq, __unused uint32_t mgen, uint32_t updateval, __unused int diffgen, int nomutex)
2334#endif /* COND_MTX_WAITQUEUEMOVE */
2335{
2336 kern_return_t kret;
2337 uthread_t uth;
2338#if COND_MTX_WAITQUEUEMOVE
2339 int count = 0, error, kret;
2340 uint32_t nextgen = mgen;
2341#endif /* COND_MTX_WAITQUEUEMOVE */
2342 struct ksyn_queue kq;
2343 uint32_t upgen;
2344
2345 ksyn_queue_init(&kq);
2346#if USE_WAITQUEUE
2347 /* TBD wq move */
2348 kret = wait_queue_move_all(&ckwq->kw_wq, ckwq->kw_addr, &kwq->kw_wq, kwq->kw_addr);
2349#else /* USE_WAITQUEUE */
2350 /* no need to move as the thread is blocked at uthread address */
2351 kret = KERN_SUCCESS;
2352#endif /* USE_WAITQUEUE */
2353
2354 if (nomutex != 0)
2355 upgen = updateval | PTHRW_MTX_NONE;
2356 else
2357 upgen = updateval;
2358
2359 if (kret== KERN_SUCCESS) {
2360redrive:
2361 while ((uth = ksyn_queue_removefirst(&ckwq->kw_ksynqueues[KSYN_QUEUE_WRITER], ckwq)) != NULL) {
2362 if (nomutex != 0) {
2363#if COND_MTX_WAITQUEUEMOVE
2364 uth->uu_psynchretval = upgen;
2365#else /* COND_MTX_WAITQUEUEMOVE */
2366 uth->uu_psynchretval = 0;
2367 uth->uu_kwqqueue = NULL;
2368 kret = ksyn_wakeup_thread(ckwq, uth);
2369 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
2370 panic("ksyn_move_wqthread: panic waking up \n");
2371 if (kret == KERN_NOT_WAITING)
2372 goto redrive;
2373#endif /* COND_MTX_WAITQUEUEMOVE */
2374 }
2375#if COND_MTX_WAITQUEUEMOVE
2376 else {
2377 count++;
2378 if (count >diffgen)
2379 panic("movethread inserting more than expected\n");
2380 TAILQ_INSERT_TAIL(&kq.ksynq_uthlist, uth, uu_mtxlist);
2381 }
2382#endif /* COND_MTX_WAITQUEUEMOVE */
2383
2384 }
2385 ksyn_wqunlock(ckwq);
2386
2387#if COND_MTX_WAITQUEUEMOVE
2388 if ( (nomutex == 0) && (count > 0)) {
2389 ksyn_wqlock(kwq);
2390 uth = TAILQ_FIRST(&kq.ksynq_uthlist);
2391 while(uth != NULL) {
2392 TAILQ_REMOVE(&kq.ksynq_uthlist, uth, uu_mtxlist);
2393 error = ksyn_queue_insert(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], nextgen, uth, SEQFIT);
2394 if (error != 0) {
2395 panic("movethread insert failed\n");
2396 }
2397 uth->uu_lockseq = nextgen;
2398 nextgen += PTHRW_INC;
2399 uth = TAILQ_FIRST(&kq.ksynq_uthlist);
2400 }
2401 ksyn_wqunlock(kwq);
2402 }
2403#endif /* COND_MTX_WAITQUEUEMOVE */
2404 } else
2405 panic("movethread : wq move all failed\n");
2406 return;
2407}
2408
2409/* find the true shared obect/offset for shared mutexes */
2410int
2411ksyn_findobj(uint64_t mutex, uint64_t * objectp, uint64_t * offsetp)
2412{
2413 vm_page_info_basic_data_t info;
2414 kern_return_t kret;
2415 mach_msg_type_number_t count = VM_PAGE_INFO_BASIC_COUNT;
2416
2417 kret = vm_map_page_info(current_map(), mutex, VM_PAGE_INFO_BASIC,
2418 (vm_page_info_t)&info, &count);
2419
2420 if (kret != KERN_SUCCESS)
2421 return(EINVAL);
2422
2423 if (objectp != NULL)
2424 *objectp = (uint64_t)info.object_id;
2425 if (offsetp != NULL)
2426 *offsetp = (uint64_t)info.offset;
2427
2428 return(0);
2429}
2430
2431
2432/* lowest of kw_fr, kw_flr, kw_fwr, kw_fywr */
2433int
2434kwq_find_rw_lowest(ksyn_wait_queue_t kwq, int flags, uint32_t premgen, int * typep, uint32_t lowest[])
2435{
2436
2437 uint32_t kw_fr, kw_flr, kw_fwr, kw_fywr, low;
2438 int type = 0, lowtype, typenum[4];
2439 uint32_t numbers[4];
2440 int count = 0, i;
2441
2442
2443 if ((kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0)) {
2444 type |= PTH_RWSHFT_TYPE_READ;
2445 /* read entries are present */
2446 if (kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count != 0) {
2447 kw_fr = kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_firstnum;
2448 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) && (is_seqlower(premgen, kw_fr) != 0))
2449 kw_fr = premgen;
2450 } else
2451 kw_fr = premgen;
2452
2453 lowest[KSYN_QUEUE_READ] = kw_fr;
2454 numbers[count]= kw_fr;
2455 typenum[count] = PTH_RW_TYPE_READ;
2456 count++;
2457 } else
2458 lowest[KSYN_QUEUE_READ] = 0;
2459
2460 if ((kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_LREADLOCK) != 0)) {
2461 type |= PTH_RWSHFT_TYPE_LREAD;
2462 /* read entries are present */
2463 if (kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count != 0) {
2464 kw_flr = kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_firstnum;
2465 if (((flags & KW_UNLOCK_PREPOST_LREADLOCK) != 0) && (is_seqlower(premgen, kw_flr) != 0))
2466 kw_flr = premgen;
2467 } else
2468 kw_flr = premgen;
2469
2470 lowest[KSYN_QUEUE_LREAD] = kw_flr;
2471 numbers[count]= kw_flr;
2472 typenum[count] = PTH_RW_TYPE_LREAD;
2473 count++;
2474 } else
2475 lowest[KSYN_QUEUE_LREAD] = 0;
2476
2477
2478 if ((kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0)) {
2479 type |= PTH_RWSHFT_TYPE_WRITE;
2480 /* read entries are present */
2481 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0) {
2482 kw_fwr = kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_firstnum;
2483 if (((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) && (is_seqlower(premgen, kw_fwr) != 0))
2484 kw_fwr = premgen;
2485 } else
2486 kw_fwr = premgen;
2487
2488 lowest[KSYN_QUEUE_WRITER] = kw_fwr;
2489 numbers[count]= kw_fwr;
2490 typenum[count] = PTH_RW_TYPE_WRITE;
2491 count++;
2492 } else
2493 lowest[KSYN_QUEUE_WRITER] = 0;
2494
2495 if ((kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_YWRLOCK) != 0)) {
2496 type |= PTH_RWSHFT_TYPE_YWRITE;
2497 /* read entries are present */
2498 if (kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0) {
2499 kw_fywr = kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_firstnum;
2500 if (((flags & KW_UNLOCK_PREPOST_YWRLOCK) != 0) && (is_seqlower(premgen, kw_fywr) != 0))
2501 kw_fywr = premgen;
2502 } else
2503 kw_fywr = premgen;
2504
2505 lowest[KSYN_QUEUE_YWRITER] = kw_fywr;
2506 numbers[count]= kw_fywr;
2507 typenum[count] = PTH_RW_TYPE_YWRITE;
2508 count++;
2509 } else
2510 lowest[KSYN_QUEUE_YWRITER] = 0;
2511
2512
2513
2514 if (count == 0)
2515 panic("nothing in the queue???\n");
2516
2517 low = numbers[0];
2518 lowtype = typenum[0];
2519 if (count > 1) {
2520 for (i = 1; i< count; i++) {
2521 if(is_seqlower(numbers[i] , low) != 0) {
2522 low = numbers[i];
2523 lowtype = typenum[i];
2524 }
2525 }
2526 }
2527 type |= lowtype;
2528
2529 if (typep != 0)
2530 *typep = type;
2531 return(0);
2532}
2533
2534/* wakeup readers and longreaders to upto the writer limits */
2535int
2536ksyn_wakeupreaders(ksyn_wait_queue_t kwq, uint32_t limitread, int longreadset, int allreaders, uint32_t updatebits, int * wokenp)
2537{
2538 uthread_t uth;
2539 ksyn_queue_t kq;
2540 int failedwakeup = 0;
2541 int numwoken = 0;
2542 kern_return_t kret = KERN_SUCCESS;
2543 int resetbit = updatebits & PTHRW_RW_HUNLOCK;
2544 uint32_t lbits = 0;
2545
2546 lbits = updatebits;
2547 if (longreadset != 0) {
2548 /* clear all read and longreads */
2549 while ((uth = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_READ], kwq)) != NULL) {
2550 uth->uu_psynchretval = lbits;
2551 /* set on one thread */
2552 if (resetbit != 0) {
2553 lbits &= ~PTHRW_RW_HUNLOCK;
2554 resetbit = 0;
2555 }
2556 numwoken++;
2557 uth->uu_kwqqueue = NULL;
2558 kret = ksyn_wakeup_thread(kwq, uth);
2559 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
2560 panic("ksyn_wakeupreaders: panic waking up readers\n");
2561 if (kret == KERN_NOT_WAITING) {
2562 failedwakeup++;
2563 }
2564 }
2565 while ((uth = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_LREAD], kwq)) != NULL) {
2566 uth->uu_psynchretval = lbits;
2567 uth->uu_kwqqueue = NULL;
2568 if (resetbit != 0) {
2569 lbits &= ~PTHRW_RW_HUNLOCK;
2570 resetbit = 0;
2571 }
2572 numwoken++;
2573 kret = ksyn_wakeup_thread(kwq, uth);
2574 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
2575 panic("ksyn_wakeupreaders: panic waking up lreaders\n");
2576 if (kret == KERN_NOT_WAITING) {
2577 failedwakeup++;
2578 }
2579 }
2580 } else {
2581 kq = &kwq->kw_ksynqueues[KSYN_QUEUE_READ];
2582 while ((kq->ksynq_count != 0) && (allreaders || (is_seqlower(kq->ksynq_firstnum, limitread) != 0))) {
2583 uth = ksyn_queue_removefirst(kq, kwq);
2584 uth->uu_psynchretval = lbits;
2585 if (resetbit != 0) {
2586 lbits &= ~PTHRW_RW_HUNLOCK;
2587 resetbit = 0;
2588 }
2589 numwoken++;
2590 uth->uu_kwqqueue = NULL;
2591 kret = ksyn_wakeup_thread(kwq, uth);
2592 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
2593 panic("ksyn_wakeupreaders: panic waking up readers\n");
2594 if (kret == KERN_NOT_WAITING) {
2595 failedwakeup++;
2596 }
2597 }
2598 }
2599
2600 if (wokenp != NULL)
2601 *wokenp = numwoken;
2602 return(failedwakeup);
2603}
2604
2605
2606/* This handles the unlock grants for next set on rw_unlock() or on arrival of all preposted waiters */
2607int
2608kwq_handle_unlock(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t * updatep, int flags, int * blockp, uint32_t premgen)
2609{
2610 uint32_t low_reader, low_writer, low_ywriter, low_lreader,limitrdnum;
2611 int rwtype, error=0;
2612 int longreadset = 0, allreaders, failed;
2613 uint32_t updatebits;
2614 int prepost = flags & KW_UNLOCK_PREPOST;
2615 thread_t preth = THREAD_NULL;
2616 uthread_t uth;
2617 thread_t th;
2618 int woken = 0;
2619 int block = 1;
2620 uint32_t lowest[KSYN_QUEUE_MAX]; /* np need for upgrade as it is handled separately */
2621 kern_return_t kret = KERN_SUCCESS;
2622
2623#if _PSYNCH_TRACE_
2624#if defined(__i386__)
2625 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_START, (uint32_t)kwq, mgen, premgen, 0, 0);
2626#endif
2627#endif /* _PSYNCH_TRACE_ */
2628 if (prepost != 0) {
2629 preth = current_thread();
2630 }
2631
2632 /* upgrade pending */
2633 if (is_rw_ubit_set(mgen)) {
2634 if (prepost != 0) {
2635 if((flags & KW_UNLOCK_PREPOST_UPGRADE) != 0) {
2636 /* upgrade thread calling the prepost */
2637 /* upgrade granted */
2638 block = 0;
2639 goto out;
2640 }
2641
2642 }
2643 if (kwq->kw_ksynqueues[KSYN_QUEUE_UPGRADE].ksynq_count > 0) {
2644 uth = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_UPGRADE], kwq);
2645 uth->uu_psynchretval = (mgen | PTHRW_EBIT) & ~PTHRW_UBIT;
2646 uth->uu_kwqqueue = NULL;
2647 kret = ksyn_wakeup_thread(kwq, uth);
2648 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
2649 panic("kwq_handle_unlock: panic waking up the upgrade thread \n");
2650 if (kret == KERN_NOT_WAITING) {
2651 kwq->kw_pre_intrcount = 1; /* actually a count */
2652 kwq->kw_pre_intrseq = mgen;
2653 kwq->kw_pre_intrretbits = uth->uu_psynchretval;
2654 kwq->kw_pre_intrtype = PTH_RW_TYPE_UPGRADE;
2655 }
2656 error = 0;
2657 } else {
2658 panic("panic unable to find the upgrade thread\n");
2659 }
2660 ksyn_wqunlock(kwq);
2661 goto out;
2662 }
2663
2664 error = kwq_find_rw_lowest(kwq, flags, premgen, &rwtype, lowest);
2665 if (error != 0)
2666 panic("rwunlock: cannot fails to slot next round of threads");
2667
2668#if _PSYNCH_TRACE_
2669#if defined(__i386__)
2670 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_NONE, (uint32_t)kwq, 1, rwtype, lowest, 0);
2671#endif
2672#endif /* _PSYNCH_TRACE_ */
2673 low_reader = lowest[KSYN_QUEUE_READ];
2674 low_lreader = lowest[KSYN_QUEUE_LREAD];
2675 low_writer = lowest[KSYN_QUEUE_WRITER];
2676 low_ywriter = lowest[KSYN_QUEUE_YWRITER];
2677
2678
2679 updatebits = mgen & ~( PTHRW_EBIT | PTHRW_WBIT |PTHRW_YBIT | PTHRW_UBIT | PTHRW_LBIT);
2680
2681 longreadset = 0;
2682 allreaders = 0;
2683 switch (rwtype & PTH_RW_TYPE_MASK) {
2684 case PTH_RW_TYPE_LREAD:
2685 longreadset = 1;
2686 case PTH_RW_TYPE_READ: {
2687 limitrdnum = 0;
2688 if (longreadset == 0) {
2689 switch (rwtype & (PTH_RWSHFT_TYPE_WRITE | PTH_RWSHFT_TYPE_YWRITE)) {
2690 case PTH_RWSHFT_TYPE_WRITE:
2691 limitrdnum = low_writer;
2692 if (((rwtype & PTH_RWSHFT_TYPE_LREAD) != 0) &&
2693 (is_seqlower(low_lreader, low_writer) != 0)) {
2694 longreadset = 1;
2695 }
2696
2697 break;
2698 case PTH_RWSHFT_TYPE_YWRITE:
2699 /* all read ? */
2700 if (((rwtype & PTH_RWSHFT_TYPE_LREAD) != 0) &&
2701 (is_seqlower(low_lreader, low_ywriter) != 0)) {
2702 longreadset = 1;
2703 } else
2704 allreaders = 1;
2705 break;
2706 case (PTH_RWSHFT_TYPE_WRITE | PTH_RWSHFT_TYPE_YWRITE):
2707 limitrdnum = low_writer;
2708 if (((rwtype & PTH_RWSHFT_TYPE_LREAD) != 0) &&
2709 (is_seqlower(low_lreader, low_ywriter) != 0)) {
2710 longreadset = 1;
2711 }
2712 break;
2713 default: /* no writers at all */
2714 if ((rwtype & PTH_RWSHFT_TYPE_LREAD) != 0)
2715 longreadset = 1;
2716 else
2717 allreaders = 1;
2718 };
2719
2720 }
2721
2722 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0)
2723 updatebits |= PTHRW_WBIT;
2724 else if ((rwtype & PTH_RWSHFT_TYPE_YWRITE) != 0)
2725 updatebits |= PTHRW_YBIT;
2726
2727 if (longreadset == 0) {
2728 if((prepost != 0) &&
2729 ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) &&
2730 ((allreaders != 0) || (is_seqlower(premgen, limitrdnum) != 0))) {
2731 block = 0;
2732 uth = current_uthread();
2733 uth->uu_psynchretval = updatebits;
2734 }
2735 } else {
2736 updatebits |= PTHRW_LBIT;
2737 if ((prepost != 0) &&
2738 ((flags & (KW_UNLOCK_PREPOST_READLOCK | KW_UNLOCK_PREPOST_LREADLOCK)) != 0)) {
2739 block = 0;
2740 uth = current_uthread();
2741 uth->uu_psynchretval = updatebits;
2742 }
2743 }
2744
2745 if (prepost != 0) {
2746 updatebits |= PTHRW_RW_HUNLOCK;
2747 }
2748
2749 failed = ksyn_wakeupreaders(kwq, limitrdnum, longreadset, allreaders, updatebits, &woken);
2750 if (failed != 0) {
2751 kwq->kw_pre_intrcount = failed; /* actually a count */
2752 kwq->kw_pre_intrseq = limitrdnum;
2753 kwq->kw_pre_intrretbits = updatebits;
2754 if (longreadset)
2755 kwq->kw_pre_intrtype = PTH_RW_TYPE_LREAD;
2756 else
2757 kwq->kw_pre_intrtype = PTH_RW_TYPE_READ;
2758 }
2759
2760 /* if we woken up no one and the current thread is returning, ensure it is doing unlock */
2761 if ((prepost != 0) && (woken == 0) && (block == 0)&& ((updatebits & PTHRW_RW_HUNLOCK) != 0)) {
2762 uth = current_uthread();
2763 uth->uu_psynchretval = updatebits;
2764 }
2765
2766 error = 0;
2767
2768 }
2769 break;
2770
2771 case PTH_RW_TYPE_WRITE: {
2772 updatebits |= PTHRW_EBIT;
2773 if (((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) && (low_writer == premgen)) {
2774 block = 0;
2775 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0)
2776 updatebits |= PTHRW_WBIT;
2777 else if ((rwtype & PTH_RWSHFT_TYPE_YWRITE) != 0)
2778 updatebits |= PTHRW_YBIT;
2779 th = preth;
2780 uth = get_bsdthread_info(th);
2781 uth->uu_psynchretval = updatebits;
2782 } else {
2783 /* we are not granting writelock to the preposting thread */
2784 uth = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER], kwq);
2785
2786 /* if there are writers present or the preposting write thread then W bit is to be set */
2787 if ((kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) )
2788 updatebits |= PTHRW_WBIT;
2789 else if ((rwtype & PTH_RWSHFT_TYPE_YWRITE) != 0)
2790 updatebits |= PTHRW_YBIT;
2791 uth->uu_psynchretval = updatebits;
2792 uth->uu_kwqqueue = NULL;
2793 /* setup next in the queue */
2794 kret = ksyn_wakeup_thread(kwq, uth);
2795 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
2796 panic("kwq_handle_unlock: panic waking up writer\n");
2797 if (kret == KERN_NOT_WAITING) {
2798 kwq->kw_pre_intrcount = 1; /* actually a count */
2799 kwq->kw_pre_intrseq = low_writer;
2800 kwq->kw_pre_intrretbits = updatebits;
2801 kwq->kw_pre_intrtype = PTH_RW_TYPE_WRITE;
2802 }
2803 error = 0;
2804 }
2805
2806 }
2807 break;
2808
2809 case PTH_RW_TYPE_YWRITE: {
2810 /* can reader locks be granted ahead of this write? */
2811 if ((rwtype & PTH_RWSHFT_TYPE_READ) != 0) {
2812 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0)
2813 updatebits |= PTHRW_WBIT;
2814 else if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0)
2815 updatebits |= PTHRW_YBIT;
2816
2817 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0) {
2818 /* is lowest reader less than the low writer? */
2819 if (is_seqlower(low_reader,low_writer) == 0)
2820 goto yielditis;
2821 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) && (is_seqlower(premgen, low_writer) != 0)) {
2822 uth = current_uthread();
2823 uth->uu_psynchretval = updatebits;
2824 block = 0;
2825 }
2826 if (prepost != 0) {
2827 updatebits |= PTHRW_RW_HUNLOCK;
2828 }
2829
2830 /* there will be readers to wakeup , no need to check for woken */
2831 failed = ksyn_wakeupreaders(kwq, low_writer, 0, 0, updatebits, NULL);
2832 if (failed != 0) {
2833 kwq->kw_pre_intrcount = failed; /* actually a count */
2834 kwq->kw_pre_intrseq = low_writer;
2835 kwq->kw_pre_intrretbits = updatebits;
2836 kwq->kw_pre_intrtype = PTH_RW_TYPE_READ;
2837 }
2838 error = 0;
2839 } else {
2840 /* wakeup all readers */
2841 if ((prepost != 0) && ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0)) {
2842 uth = current_uthread();
2843 uth->uu_psynchretval = updatebits;
2844 block = 0;
2845 }
2846 if (prepost != 0) {
2847 updatebits |= PTHRW_RW_HUNLOCK;
2848 }
2849 failed = ksyn_wakeupreaders(kwq, low_writer, 0, 1, updatebits, &woken);
2850 if (failed != 0) {
2851 kwq->kw_pre_intrcount = failed; /* actually a count */
2852 kwq->kw_pre_intrseq = kwq->kw_highseq;
2853 kwq->kw_pre_intrretbits = updatebits;
2854 kwq->kw_pre_intrtype = PTH_RW_TYPE_READ;
2855 }
2856 /* if we woken up no one and the current thread is returning, ensure it is doing unlock */
2857 if ((prepost != 0) && (woken ==0) && (block == 0)&& ((updatebits & PTHRW_RW_HUNLOCK) != 0)) {
2858 uth = current_uthread();
2859 uth->uu_psynchretval = updatebits;
2860 }
2861 error = 0;
2862 }
2863 } else {
2864yielditis:
2865 /* no reads, so granting yeilding writes */
2866 updatebits |= PTHRW_EBIT;
2867
2868 if (((flags & KW_UNLOCK_PREPOST_YWRLOCK) != 0) && (low_writer == premgen)) {
2869 /* preposting yielding write thread is being granted exclusive lock */
2870
2871 block = 0;
2872
2873 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0)
2874 updatebits |= PTHRW_WBIT;
2875 else if (kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0)
2876 updatebits |= PTHRW_YBIT;
2877
2878 th = preth;
2879 uth = get_bsdthread_info(th);
2880 uth->uu_psynchretval = updatebits;
2881 } else {
2882 /* we are granting yield writelock to some other thread */
2883 uth = ksyn_queue_removefirst(&kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER], kwq);
2884
2885 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0)
2886 updatebits |= PTHRW_WBIT;
2887 /* if there are ywriters present or the preposting ywrite thread then W bit is to be set */
2888 else if ((kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count != 0) || ((flags & KW_UNLOCK_PREPOST_YWRLOCK) != 0) )
2889 updatebits |= PTHRW_YBIT;
2890
2891 uth->uu_psynchretval = updatebits;
2892 uth->uu_kwqqueue = NULL;
2893
2894 kret = ksyn_wakeup_thread(kwq, uth);
2895 if ((kret != KERN_SUCCESS) && (kret != KERN_NOT_WAITING))
2896 panic("kwq_handle_unlock : panic waking up readers\n");
2897 if (kret == KERN_NOT_WAITING) {
2898 kwq->kw_pre_intrcount = 1; /* actually a count */
2899 kwq->kw_pre_intrseq = low_ywriter;
2900 kwq->kw_pre_intrretbits = updatebits;
2901 kwq->kw_pre_intrtype = PTH_RW_TYPE_YWRITE;
2902 }
2903 error = 0;
2904 }
2905 }
2906 }
2907 break;
2908
2909 default:
2910 panic("rwunlock: invalid type for lock grants");
2911
2912 };
2913
2914 if (updatep != NULL)
2915 *updatep = updatebits;
2916
2917out:
2918 if (blockp != NULL)
2919 *blockp = block;
2920#if _PSYNCH_TRACE_
2921#if defined(__i386__)
2922 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWHANDLEU | DBG_FUNC_END, (uint32_t)kwq, 0, 0, block, 0);
2923#endif
2924#endif /* _PSYNCH_TRACE_ */
2925 return(error);
2926}
2927
2928
2929/* handle downgrade actions */
2930int
2931kwq_handle_downgrade(ksyn_wait_queue_t kwq, uint32_t mgen, __unused int flags, __unused uint32_t premgen, __unused int * blockp)
2932{
2933 uint32_t updatebits, lowriter = 0;
2934 int longreadset, allreaders, count;
2935
2936 /* can handle downgrade now */
2937 updatebits = mgen;
2938
2939 longreadset = 0;
2940 allreaders = 0;
2941 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_count > 0) {
2942 lowriter = kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_firstnum;
2943 if (kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count > 0) {
2944 if (is_seqlower(kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_firstnum, lowriter) != 0)
2945 longreadset = 1;
2946 }
2947 } else {
2948 allreaders = 1;
2949 if (kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_count > 0) {
2950 lowriter = kwq->kw_ksynqueues[KSYN_QUEUE_YWRITER].ksynq_firstnum;
2951 if (kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_count > 0) {
2952 if (is_seqlower(kwq->kw_ksynqueues[KSYN_QUEUE_LREAD].ksynq_firstnum, lowriter) != 0)
2953 longreadset = 1;
2954 }
2955 }
2956 }
2957
2958 count = ksyn_wakeupreaders(kwq, lowriter, longreadset, allreaders, updatebits, NULL);
2959 if (count != 0) {
2960 kwq->kw_pre_limrd = count;
2961 kwq->kw_pre_limrdseq = lowriter;
2962 kwq->kw_pre_limrdbits = lowriter;
2963 /* need to handle prepost */
2964 }
2965 return(0);
2966}
2967/************* Indiv queue support routines ************************/
2968void
2969ksyn_queue_init(ksyn_queue_t kq)
2970{
2971 TAILQ_INIT(&kq->ksynq_uthlist);
2972 kq->ksynq_count = 0;
2973 kq->ksynq_firstnum = 0;
2974 kq->ksynq_lastnum = 0;
2975}
2976
2977
2978int
2979ksyn_queue_insert(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t mgen, struct uthread * uth, int fit)
2980{
2981 uint32_t lockseq = mgen & PTHRW_COUNT_MASK;
2982 struct uthread * q_uth, * r_uth;
2983
2984 if (kq->ksynq_count == 0) {
2985 TAILQ_INSERT_HEAD(&kq->ksynq_uthlist, uth, uu_mtxlist);
2986 kq->ksynq_firstnum = lockseq;
2987 kq->ksynq_lastnum = lockseq;
2988 goto out;
2989 }
2990
2991 if (fit == FIRSTFIT) {
2992 /* firstfit, arriving order */
2993 TAILQ_INSERT_TAIL(&kq->ksynq_uthlist, uth, uu_mtxlist);
2994 if (is_seqlower (lockseq, kq->ksynq_firstnum) != 0)
2995 kq->ksynq_firstnum = lockseq;
2996 if (is_seqhigher (lockseq, kq->ksynq_lastnum) != 0)
2997 kq->ksynq_lastnum = lockseq;
2998 goto out;
2999 }
3000
3001 if ((lockseq == kq->ksynq_firstnum) || (lockseq == kq->ksynq_lastnum))
3002 panic("ksyn_queue_insert: two threads with same lockseq ");
3003
3004 /* check for next seq one */
3005 if (is_seqlower(kq->ksynq_lastnum, lockseq) != 0) {
3006 TAILQ_INSERT_TAIL(&kq->ksynq_uthlist, uth, uu_mtxlist);
3007 kq->ksynq_lastnum = lockseq;
3008 goto out;
3009 }
3010
3011 if (is_seqlower(lockseq, kq->ksynq_firstnum) != 0) {
3012 TAILQ_INSERT_HEAD(&kq->ksynq_uthlist, uth, uu_mtxlist);
3013 kq->ksynq_firstnum = lockseq;
3014 goto out;
3015 }
3016
3017 /* goto slow insert mode */
3018 TAILQ_FOREACH_SAFE(q_uth, &kq->ksynq_uthlist, uu_mtxlist, r_uth) {
3019 if (is_seqhigher(q_uth->uu_lockseq, lockseq) != 0) {
3020 TAILQ_INSERT_BEFORE(q_uth, uth, uu_mtxlist);
3021 goto out;
3022 }
3023 }
3024
3025 panic("failed to insert \n");
3026out:
3027 kq->ksynq_count++;
3028 kwq->kw_inqueue++;
3029 update_low_high(kwq, lockseq);
3030 return(0);
3031}
3032
3033struct uthread *
3034ksyn_queue_removefirst(ksyn_queue_t kq, ksyn_wait_queue_t kwq)
3035{
3036 uthread_t uth = NULL;
3037 uthread_t q_uth;
3038 uint32_t curseq;
3039
3040 if (kq->ksynq_count != 0) {
3041 uth = TAILQ_FIRST(&kq->ksynq_uthlist);
3042 TAILQ_REMOVE(&kq->ksynq_uthlist, uth, uu_mtxlist);
3043 curseq = uth->uu_lockseq & PTHRW_COUNT_MASK;
3044 kq->ksynq_count--;
3045 kwq->kw_inqueue--;
3046
3047 if(kq->ksynq_count != 0) {
3048 q_uth = TAILQ_FIRST(&kq->ksynq_uthlist);
3049 kq->ksynq_firstnum = (q_uth->uu_lockseq & PTHRW_COUNT_MASK);
3050 } else {
3051 kq->ksynq_firstnum = 0;
3052 kq->ksynq_lastnum = 0;
3053
3054 }
3055 if (kwq->kw_inqueue == 0) {
3056 kwq->kw_lowseq = 0;
3057 kwq->kw_highseq = 0;
3058 } else {
3059 if (kwq->kw_lowseq == curseq)
3060 kwq->kw_lowseq = find_nextlowseq(kwq);
3061 if (kwq->kw_highseq == curseq)
3062 kwq->kw_highseq = find_nexthighseq(kwq);
3063 }
3064 }
3065 return(uth);
3066}
3067
3068void
3069ksyn_queue_removeitem(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uthread_t uth)
3070{
3071 uthread_t q_uth;
3072 uint32_t curseq;
3073
3074 if (kq->ksynq_count > 0) {
3075 TAILQ_REMOVE(&kq->ksynq_uthlist, uth, uu_mtxlist);
3076 kq->ksynq_count--;
3077 if(kq->ksynq_count != 0) {
3078 q_uth = TAILQ_FIRST(&kq->ksynq_uthlist);
3079 kq->ksynq_firstnum = (q_uth->uu_lockseq & PTHRW_COUNT_MASK);
3080 } else {
3081 kq->ksynq_firstnum = 0;
3082 kq->ksynq_lastnum = 0;
3083
3084 }
3085 kwq->kw_inqueue--;
3086 curseq = uth->uu_lockseq & PTHRW_COUNT_MASK;
3087 if (kwq->kw_inqueue == 0) {
3088 kwq->kw_lowseq = 0;
3089 kwq->kw_highseq = 0;
3090 } else {
3091 if (kwq->kw_lowseq == curseq)
3092 kwq->kw_lowseq = find_nextlowseq(kwq);
3093 if (kwq->kw_highseq == curseq)
3094 kwq->kw_highseq = find_nexthighseq(kwq);
3095 }
3096 }
3097}
3098
3099
3100void
3101update_low_high(ksyn_wait_queue_t kwq, uint32_t lockseq)
3102{
3103 if (kwq->kw_inqueue == 1) {
3104 kwq->kw_lowseq = lockseq;
3105 kwq->kw_highseq = lockseq;
3106 } else {
3107 if (is_seqlower(lockseq, kwq->kw_lowseq) != 0)
3108 kwq->kw_lowseq = lockseq;
3109 if (is_seqhigher(lockseq, kwq->kw_highseq) != 0)
3110 kwq->kw_highseq = lockseq;
3111 }
3112}
3113
3114uint32_t
3115find_nextlowseq(ksyn_wait_queue_t kwq)
3116{
3117 uint32_t numbers[4];
3118 int count = 0, i;
3119 uint32_t lowest;
3120
3121 for(i = 0; i< KSYN_QUEUE_MAX; i++) {
3122 if (kwq->kw_ksynqueues[i].ksynq_count != 0) {
3123 numbers[count]= kwq->kw_ksynqueues[i].ksynq_firstnum;
3124 count++;
3125 }
3126 }
3127
3128 if (count == 0)
3129 return(0);
3130 lowest = numbers[0];
3131 if (count > 1) {
3132 for (i = 1; i< count; i++) {
3133 if(is_seqlower(numbers[i] , lowest) != 0)
3134 lowest = numbers[count];
3135
3136 }
3137 }
3138 return(lowest);
3139}
3140
3141uint32_t
3142find_nexthighseq(ksyn_wait_queue_t kwq)
3143{
3144 uint32_t numbers[4];
3145 int count = 0, i;
3146 uint32_t highest;
3147
3148 for(i = 0; i< KSYN_QUEUE_MAX; i++) {
3149 if (kwq->kw_ksynqueues[i].ksynq_count != 0) {
3150 numbers[count]= kwq->kw_ksynqueues[i].ksynq_lastnum;
3151 count++;
3152 }
3153 }
3154
3155
3156
3157 if (count == 0)
3158 return(0);
3159 highest = numbers[0];
3160 if (count > 1) {
3161 for (i = 1; i< count; i++) {
3162 if(is_seqhigher(numbers[i], highest) != 0)
3163 highest = numbers[i];
3164
3165 }
3166 }
3167 return(highest);
3168}
3169
3170int
3171find_diff(uint32_t upto, uint32_t lowest)
3172{
3173 uint32_t diff;
3174
3175 if (upto == lowest)
3176 return(0);
3177 diff = diff_genseq(upto, lowest);
3178 diff = (diff >> PTHRW_COUNT_SHIFT);
3179 return(diff);
3180}
3181
3182
3183int
3184find_seq_till(ksyn_wait_queue_t kwq, uint32_t upto, uint32_t nwaiters, uint32_t *countp)
3185{
3186 int i;
3187 uint32_t count = 0;
3188
3189
3190#if _PSYNCH_TRACE_
3191 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_FSEQTILL | DBG_FUNC_START, 0, 0, upto, nwaiters, 0);
3192#endif /* _PSYNCH_TRACE_ */
3193
3194 for (i= 0; i< KSYN_QUEUE_MAX; i++) {
3195 count += ksyn_queue_count_tolowest(&kwq->kw_ksynqueues[i], upto);
3196#if _PSYNCH_TRACE_
3197 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_FSEQTILL | DBG_FUNC_NONE, 0, 1, i, count, 0);
3198#endif /* _PSYNCH_TRACE_ */
3199 if (count >= nwaiters) {
3200 break;
3201 }
3202 }
3203
3204 if (countp != NULL) {
3205 *countp = count;
3206 }
3207#if _PSYNCH_TRACE_
3208 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_FSEQTILL | DBG_FUNC_END, 0, 0, count, nwaiters, 0);
3209#endif /* _PSYNCH_TRACE_ */
3210 if (count >= nwaiters)
3211 return(1);
3212 else
3213 return(0);
3214}
3215
3216
3217uint32_t
3218ksyn_queue_count_tolowest(ksyn_queue_t kq, uint32_t upto)
3219{
3220 uint32_t i = 0;
3221 uthread_t uth, newuth;
3222 uint32_t curval;
3223
3224 /* if nothing or the first num is greater than upto, return none */
3225 if ((kq->ksynq_count == 0) || (is_seqhigher(kq->ksynq_firstnum, upto) != 0))
3226 return(0);
3227 if (upto == kq->ksynq_firstnum)
3228 return(1);
3229
3230 TAILQ_FOREACH_SAFE(uth, &kq->ksynq_uthlist, uu_mtxlist, newuth) {
3231 curval = (uth->uu_lockseq & PTHRW_COUNT_MASK);
3232 if (upto == curval) {
3233 i++;
3234 break;
3235 } else if (is_seqhigher(curval, upto) != 0) {
3236 break;
3237 } else {
3238 /* seq is lower */
3239 i++;
3240 }
3241 }
3242 return(i);
3243}
3244
3245/* find the thread and removes from the queue */
3246uthread_t
3247ksyn_queue_find_seq(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t seq)
3248{
3249 uthread_t q_uth, r_uth;
3250 /* case where wrap in the tail of the queue exists */
3251 TAILQ_FOREACH_SAFE(q_uth, &kq->ksynq_uthlist, uu_mtxlist, r_uth) {
3252 if (q_uth->uu_lockseq == seq) {
3253 ksyn_queue_removeitem(kwq, kq, q_uth);
3254 return(q_uth);
3255 }
3256 }
3257 return(NULL);
2d21ac55
A
3258}
3259
b0d623f7 3260#endif /* PSYNCH */