2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995-2005 Apple Computer, Inc. All Rights Reserved */
35 #include <sys/param.h>
36 #include <sys/queue.h>
37 #include <sys/resourcevar.h>
38 #include <sys/proc_internal.h>
39 #include <sys/kauth.h>
40 #include <sys/systm.h>
41 #include <sys/timeb.h>
42 #include <sys/times.h>
45 #include <sys/kernel.h>
47 #include <sys/signalvar.h>
48 #include <sys/syslog.h>
51 #include <sys/kdebug.h>
52 #include <sys/sysproto.h>
53 #include <sys/pthread_internal.h>
57 #include <mach/mach_types.h>
58 #include <mach/vm_prot.h>
59 #include <mach/semaphore.h>
60 #include <mach/sync_policy.h>
61 #include <mach/task.h>
62 #include <kern/kern_types.h>
63 #include <kern/task.h>
64 #include <kern/clock.h>
65 #include <mach/kern_return.h>
66 #include <kern/thread.h>
67 #include <kern/sched_prim.h>
68 #include <kern/thread_call.h>
69 #include <kern/kalloc.h>
70 #include <kern/sched_prim.h>
71 #include <kern/processor.h>
72 #include <kern/affinity.h>
73 #include <kern/wait_queue.h>
74 #include <mach/mach_vm.h>
75 #include <mach/mach_param.h>
76 #include <mach/thread_policy.h>
77 #include <mach/message.h>
78 #include <mach/port.h>
79 #include <vm/vm_protos.h>
80 #include <vm/vm_map.h>
81 #include <mach/vm_region.h>
83 #include <libkern/OSAtomic.h>
85 #define _PSYNCH_TRACE_ 0 /* kdebug trace */
86 #define __TESTPANICS__ 0 /* panics for error conditions */
87 #define COND_MTX_WAITQUEUEMOVE 0 /* auto move from cvar wait queue to mutex waitqueue */
90 #define _PSYNCH_TRACE_MLWAIT 0x9000000
91 #define _PSYNCH_TRACE_MLDROP 0x9000004
92 #define _PSYNCH_TRACE_CVWAIT 0x9000008
93 #define _PSYNCH_TRACE_CVSIGNAL 0x900000c
94 #define _PSYNCH_TRACE_CVBROAD 0x9000010
95 #define _PSYNCH_TRACE_KMDROP 0x9000014
96 #define _PSYNCH_TRACE_RWRDLOCK 0x9000018
97 #define _PSYNCH_TRACE_RWLRDLOCK 0x900001c
98 #define _PSYNCH_TRACE_RWWRLOCK 0x9000020
99 #define _PSYNCH_TRACE_RWYWRLOCK 0x9000024
100 #define _PSYNCH_TRACE_RWUPGRADE 0x9000028
101 #define _PSYNCH_TRACE_RWDOWNGRADE 0x900002c
102 #define _PSYNCH_TRACE_RWUNLOCK 0x9000030
103 #define _PSYNCH_TRACE_RWUNLOCK2 0x9000034
104 #define _PSYNCH_TRACE_RWHANDLEU 0x9000038
105 #define _PSYNCH_TRACE_FSEQTILL 0x9000040
107 #define _PSYNCH_TRACE_UM_LOCK 0x9000060
108 #define _PSYNCH_TRACE_UM_UNLOCK 0x9000064
109 #define _PSYNCH_TRACE_UM_MHOLD 0x9000068
110 #define _PSYNCH_TRACE_UM_MDROP 0x900006c
111 #define _PSYNCH_TRACE_UM_CVWAIT 0x9000070
112 #define _PSYNCH_TRACE_UM_CVSIG 0x9000074
113 #define _PSYNCH_TRACE_UM_CVBRD 0x9000078
115 #endif /* _PSYNCH_TRACE_ */
117 lck_mtx_t
* pthread_list_mlock
;
119 #define PTHHASH(addr) (&pthashtbl[(addr) & pthhash])
120 extern LIST_HEAD(pthhashhead
, ksyn_wait_queue
) *pth_glob_hashtbl
;
121 struct pthhashhead
* pth_glob_hashtbl
;
124 LIST_HEAD(, ksyn_wait_queue
) pth_free_list
;
126 static int PTH_HASHSIZE
= 100;
133 TAILQ_HEAD(, uthread
) ksynq_uthlist
;
134 uint32_t ksynq_count
; /* number of entries in queue */
135 uint32_t ksynq_firstnum
; /* lowest seq in queue */
136 uint32_t ksynq_lastnum
; /* highest seq in queue */
139 #define KSYN_QUEUE_READ 0
140 #define KSYN_QUEUE_LREAD 1
141 #define KSYN_QUEUE_WRITER 2
142 #define KSYN_QUEUE_YWRITER 3
143 #define KSYN_QUEUE_UPGRADE 4
144 #define KSYN_QUEUE_MAX 5
146 struct ksyn_wait_queue
{
147 LIST_ENTRY(ksyn_wait_queue
) kw_hash
;
148 LIST_ENTRY(ksyn_wait_queue
) kw_list
;
150 struct wait_queue kw_wq
;
151 #endif /* USE_WAITQUEUE */
154 uint64_t kw_object
; /* object backing in shared mode */
155 uint64_t kw_offset
; /* offset inside the object in shared mode */
156 int kw_flags
; /* mutex, cvar options/flags */
157 int kw_pflags
; /* flags under listlock protection */
158 struct timeval kw_ts
; /* timeval need for upkeep before free */
159 int kw_iocount
; /* inuse reference */
161 int kw_type
; /* queue type like mutex, cvar, etc */
162 uint32_t kw_inqueue
; /* num of waiters held */
163 uint32_t kw_highseq
; /* highest seq in the queue */
164 uint32_t kw_lowseq
; /* lowest seq in the queue */
165 uint32_t kw_lastunlockseq
; /* the last seq that unlocked */
166 uint32_t kw_pre_rwwc
; /* prepost count */
167 uint32_t kw_pre_lockseq
; /* prepost target seq */
168 uint32_t kw_pre_cvretval
; /* retval for cwait on prepost */
169 uint32_t kw_pre_limrd
; /* prepost read only(rwlock) */
170 uint32_t kw_pre_limrdseq
; /* prepost limit seq for reads(rwlock) */
171 uint32_t kw_pre_limrdbits
; /* seqbit needed for updates on prepost */
172 uint32_t kw_pre_intrcount
; /* prepost of missed wakeup due to intrs */
173 uint32_t kw_pre_intrseq
; /* prepost of missed wakeup limit seq */
174 uint32_t kw_pre_intrretbits
; /* return bits value for missed wakeup threads */
175 uint32_t kw_pre_intrtype
; /* type of failed wakueps*/
178 TAILQ_HEAD(, uthread
) kw_uthlist
; /* List of uthreads */
179 struct ksyn_queue kw_ksynqueues
[KSYN_QUEUE_MAX
]; /* queues to hold threads */
180 lck_mtx_t kw_lock
; /* mutex lock protecting this structure */
181 struct ksyn_wait_queue
* kw_attq
; /* attached queue (cvar->mutex, need in prepost */
184 typedef struct ksyn_queue
* ksyn_queue_t
;
185 typedef struct ksyn_wait_queue
* ksyn_wait_queue_t
;
187 #define PTHRW_EBIT 0x01
188 #define PTHRW_LBIT 0x02
189 #define PTHRW_YBIT 0x04
190 #define PTHRW_WBIT 0x08
191 #define PTHRW_UBIT 0x10
192 #define PTHRW_RETRYBIT 0x20
193 /* same as 0x20, shadow W bit for rwlock */
194 #define PTHRW_SHADOW_W 0x20
196 #define PTHRW_TRYLKBIT 0x40
197 #define PTHRW_RW_HUNLOCK 0x40 /* returning read thread responsible to handle unlock */
199 #define PTHRW_MTX_NONE 0x80
200 #define PTHRW_RW_INIT 0x80 /* reset on the lock bits */
201 /* same as 0x80, spurious rwlock unlock ret from kernel */
202 #define PTHRW_RW_SPURIOUS 0x80
204 #define PTHRW_INC 0x100
206 #define PTHRW_BIT_MASK 0x000000ff;
208 #define PTHRW_COUNT_SHIFT 8
209 #define PTHRW_COUNT_MASK 0xffffff00
210 #define PTHRW_MAX_READERS 0xffffff00
212 /* first contended seq that kernel sees */
213 #define KW_MTXFIRST_KSEQ 0x200
214 #define KW_CVFIRST_KSEQ 1
215 #define KW_RWFIRST_KSEQ 0x200
217 #define is_rw_ewubit_set(x) ((x & (PTHRW_EBIT | PTHRW_WBIT | PTHRW_UBIT)) != 0)
218 #define is_rw_lybit_set(x) ((x & (PTHRW_LBIT | PTHRW_YBIT)) != 0)
219 #define is_rw_ebit_set(x) ((x & PTHRW_EBIT) != 0)
220 #define is_rw_uebit_set(x) ((x & (PTHRW_EBIT | PTHRW_UBIT)) != 0)
221 #define is_rw_ubit_set(x) ((x & PTHRW_UBIT) != 0)
222 #define is_rw_either_ewyubit_set(x) ((x & (PTHRW_EBIT | PTHRW_WBIT | PTHRW_UBIT | PTHRW_YBIT)) != 0)
225 /* is x lower than Y */
226 #define is_seqlower(x, y) ((x < y) || ((x - y) > (PTHRW_MAX_READERS/2)))
227 /* is x lower than or eq Y */
228 #define is_seqlower_eq(x, y) ((x <= y) || ((x - y) > (PTHRW_MAX_READERS/2)))
230 /* is x greater than Y */
231 #define is_seqhigher(x, y) ((x > y) || ((y - x) > (PTHRW_MAX_READERS/2)))
233 static inline int diff_genseq(uint32_t x
, uint32_t y
) {
237 return((PTHRW_MAX_READERS
- y
) + x
+ PTHRW_INC
);
241 #define TID_ZERO (uint64_t)0
243 /* bits needed in handling the rwlock unlock */
244 #define PTH_RW_TYPE_READ 0x01
245 #define PTH_RW_TYPE_LREAD 0x02
246 #define PTH_RW_TYPE_WRITE 0x04
247 #define PTH_RW_TYPE_YWRITE 0x08
248 #define PTH_RW_TYPE_UPGRADE 0x10
249 #define PTH_RW_TYPE_MASK 0xff
250 #define PTH_RW_TYPE_SHIFT 8
252 #define PTH_RWSHFT_TYPE_READ 0x0100
253 #define PTH_RWSHFT_TYPE_LREAD 0x0200
254 #define PTH_RWSHFT_TYPE_WRITE 0x0400
255 #define PTH_RWSHFT_TYPE_YWRITE 0x0800
256 #define PTH_RWSHFT_TYPE_MASK 0xff00
259 * Mutex protocol attributes
261 #define PTHREAD_PRIO_NONE 0
262 #define PTHREAD_PRIO_INHERIT 1
263 #define PTHREAD_PRIO_PROTECT 2
264 #define PTHREAD_PROTOCOL_FLAGS_MASK 0x3
267 * Mutex type attributes
269 #define PTHREAD_MUTEX_NORMAL 0
270 #define PTHREAD_MUTEX_ERRORCHECK 4
271 #define PTHREAD_MUTEX_RECURSIVE 8
272 #define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_NORMAL
273 #define PTHREAD_TYPE_FLAGS_MASK 0xc
276 * Mutex pshared attributes
278 #define PTHREAD_PROCESS_SHARED 0x10
279 #define PTHREAD_PROCESS_PRIVATE 0x20
280 #define PTHREAD_PSHARED_FLAGS_MASK 0x30
283 * Mutex policy attributes
285 #define _PTHREAD_MUTEX_POLICY_NONE 0
286 #define _PTHREAD_MUTEX_POLICY_FAIRSHARE 0x040 /* 1 */
287 #define _PTHREAD_MUTEX_POLICY_FIRSTFIT 0x080 /* 2 */
288 #define _PTHREAD_MUTEX_POLICY_REALTIME 0x0c0 /* 3 */
289 #define _PTHREAD_MUTEX_POLICY_ADAPTIVE 0x100 /* 4 */
290 #define _PTHREAD_MUTEX_POLICY_PRIPROTECT 0x140 /* 5 */
291 #define _PTHREAD_MUTEX_POLICY_PRIINHERIT 0x180 /* 6 */
292 #define PTHREAD_POLICY_FLAGS_MASK 0x1c0
294 #define _PTHREAD_MTX_OPT_HOLDLOCK 0x200
295 #define _PTHREAD_MTX_OPT_NOHOLDLOCK 0x400
296 #define _PTHREAD_MTX_OPT_LASTDROP (_PTHREAD_MTX_OPT_HOLDLOCK | _PTHREAD_MTX_OPT_NOHOLDLOCK)
298 #define KSYN_WQ_INLIST 1
299 #define KSYN_WQ_INHASH 2
300 #define KSYN_WQ_SHARED 4
301 #define KSYN_WQ_FLIST 0X10 /* in free list to be freed after a short delay */
303 #define KSYN_CLEANUP_DEADLINE 10
304 int psynch_cleanupset
;
305 thread_call_t psynch_thcall
;
307 #define KSYN_WQTYPE_INWAIT 0x1000
308 #define KSYN_WQTYPE_MTX 0x1
309 #define KSYN_WQTYPE_CVAR 0x2
310 #define KSYN_WQTYPE_RWLOCK 0x4
311 #define KSYN_WQTYPE_SEMA 0x8
312 #define KSYN_WQTYPE_BARR 0x10
313 #define KSYN_WQTYPE_MASK 0xffff
315 #define KSYN_MTX_MAX 0x0fffffff
317 #define KW_UNLOCK_PREPOST 0x01
318 #define KW_UNLOCK_PREPOST_UPGRADE 0x02
319 #define KW_UNLOCK_PREPOST_DOWNGRADE 0x04
320 #define KW_UNLOCK_PREPOST_READLOCK 0x08
321 #define KW_UNLOCK_PREPOST_LREADLOCK 0x10
322 #define KW_UNLOCK_PREPOST_WRLOCK 0x20
323 #define KW_UNLOCK_PREPOST_YWRLOCK 0x40
325 #define CLEAR_PREPOST_BITS(kwq) {\
326 kwq->kw_pre_lockseq = 0; \
327 kwq->kw_pre_rwwc = 0; \
328 kwq->kw_pre_cvretval = 0; \
331 #define CLEAR_READ_PREPOST_BITS(kwq) {\
332 kwq->kw_pre_limrd = 0; \
333 kwq->kw_pre_limrdseq = 0; \
334 kwq->kw_pre_limrdbits = 0; \
337 #define CLEAR_INTR_PREPOST_BITS(kwq) {\
338 kwq->kw_pre_intrcount = 0; \
339 kwq->kw_pre_intrseq = 0; \
340 kwq->kw_pre_intrretbits = 0; \
341 kwq->kw_pre_intrtype = 0; \
344 void pthread_list_lock(void);
345 void pthread_list_unlock(void);
346 void pthread_list_lock_spin(void);
347 void pthread_list_lock_convert_spin(void);
348 void ksyn_wqlock(ksyn_wait_queue_t kwq
);
349 void ksyn_wqunlock(ksyn_wait_queue_t kwq
);
350 ksyn_wait_queue_t
ksyn_wq_hash_lookup(user_addr_t mutex
, proc_t p
, int flags
, uint64_t object
, uint64_t offset
);
351 int ksyn_wqfind(user_addr_t mutex
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, uint64_t tid
, int flags
, int wqtype
, ksyn_wait_queue_t
* wq
);
352 void ksyn_wqrelease(ksyn_wait_queue_t mkwq
, ksyn_wait_queue_t ckwq
);
353 int ksyn_block_thread_locked(ksyn_wait_queue_t kwq
, uint64_t abstime
, uthread_t uth
);
354 kern_return_t
ksyn_wakeup_thread(ksyn_wait_queue_t kwq
, uthread_t uth
);
355 void ksyn_move_wqthread(ksyn_wait_queue_t ckwq
, ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t updateval
, int diffgen
, int nomutex
);
356 extern thread_t
port_name_to_thread(mach_port_name_t port_name
);
357 extern int ksyn_findobj(uint64_t mutex
, uint64_t * object
, uint64_t * offset
);
358 static void UPDATE_KWQ(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, uint64_t tid
, int wqtype
, int retry
);
359 void psynch_mutexdrop_internal(ksyn_wait_queue_t kwq
, uint32_t lkseq
, uint32_t ugen
, int flags
);
362 kern_return_t
wait_queue_move_all(wait_queue_t from
, event64_t eventfrom
, wait_queue_t to
, event64_t eventto
);
363 kern_return_t
wait_queue_move_thread(wait_queue_t from
, event64_t eventfrom
, thread_t th
, wait_queue_t to
, event64_t eventto
, thread_t
* mthp
);
364 #endif /* USE_WAITQUEUE */
365 int kwq_handle_unlock(ksyn_wait_queue_t
, uint32_t mgen
, uint32_t * updatep
, int flags
, int *blockp
, uint32_t premgen
);
366 void ksyn_queue_init(ksyn_queue_t kq
);
367 int ksyn_queue_insert(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t mgen
, struct uthread
* uth
, int firstfit
);
368 struct uthread
* ksyn_queue_removefirst(ksyn_queue_t kq
, ksyn_wait_queue_t kwq
);
369 void ksyn_queue_removeitem(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uthread_t uth
);
370 void update_low_high(ksyn_wait_queue_t kwq
, uint32_t lockseq
);
371 uint32_t find_nextlowseq(ksyn_wait_queue_t kwq
);
372 uint32_t find_nexthighseq(ksyn_wait_queue_t kwq
);
373 int find_seq_till(ksyn_wait_queue_t kwq
, uint32_t upto
, uint32_t nwaiters
, uint32_t *countp
);
374 int find_diff(uint32_t upto
, uint32_t lowest
);
375 uint32_t ksyn_queue_count_tolowest(ksyn_queue_t kq
, uint32_t upto
);
376 int ksyn_wakeupreaders(ksyn_wait_queue_t kwq
, uint32_t limitread
, int longreadset
, int allreaders
, uint32_t updatebits
, int * wokenp
);
377 int kwq_find_rw_lowest(ksyn_wait_queue_t kwq
, int flags
, uint32_t premgen
, int * type
, uint32_t lowest
[]);
378 uthread_t
ksyn_queue_find_seq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t seq
);
379 int kwq_handle_downgrade(ksyn_wait_queue_t kwq
, uint32_t mgen
, int flags
, uint32_t premgen
, int * blockp
);
383 UPDATE_KWQ(__unused ksyn_wait_queue_t kwq
, __unused
uint32_t mgen
, __unused
uint32_t ugen
, __unused
uint32_t rw_wc
, __unused
uint64_t tid
, __unused
int wqtype
, __unused
int retry
)
387 /* to protect the hashes, iocounts, freelist */
389 pthread_list_lock(void)
391 lck_mtx_lock(pthread_list_mlock
);
395 pthread_list_lock_spin(void)
397 lck_mtx_lock_spin(pthread_list_mlock
);
401 pthread_list_lock_convert_spin(void)
403 lck_mtx_convert_spin(pthread_list_mlock
);
408 pthread_list_unlock(void)
410 lck_mtx_unlock(pthread_list_mlock
);
413 /* to protect the indiv queue */
415 ksyn_wqlock(ksyn_wait_queue_t kwq
)
418 lck_mtx_lock(&kwq
->kw_lock
);
422 ksyn_wqunlock(ksyn_wait_queue_t kwq
)
424 lck_mtx_unlock(&kwq
->kw_lock
);
428 /* routine to drop the mutex unlocks , used both for mutexunlock system call and drop during cond wait */
430 psynch_mutexdrop_internal(ksyn_wait_queue_t kwq
, uint32_t lkseq
, uint32_t ugen
, int flags
)
432 uint32_t nextgen
, low_writer
, updatebits
;
433 int firstfit
= flags
& _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
435 kern_return_t kret
= KERN_SUCCESS
;
438 nextgen
= (ugen
+ PTHRW_INC
);
441 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_START
, kwq
, lkseq
, ugen
, flags
, 0);
442 #endif /* _PSYNCH_TRACE_ */
449 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, kwq
, 1, kwq
->kw_inqueue
, nextgen
, 0);
450 #endif /* _PSYNCH_TRACE_ */
451 if (kwq
->kw_inqueue
!= 0) {
452 updatebits
= (kwq
->kw_highseq
& PTHRW_COUNT_MASK
) | PTHRW_EBIT
;
453 kwq
->kw_lastunlockseq
= ugen
;
457 panic("psynch_mutexdrop_internal: first fit mutex arrives, not enabled yet \n");
458 #endif /* __TESTPANICS__ */
459 /* first fit , pick any one */
460 uth
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwq
);
462 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0)
463 updatebits
|= PTHRW_WBIT
;
465 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, kwq
, 2, uth
, updatebits
, 0);
466 #endif /* _PSYNCH_TRACE_ */
468 uth
->uu_psynchretval
= updatebits
;
469 uth
->uu_kwqqueue
= NULL
;
471 kret
= ksyn_wakeup_thread(kwq
, uth
);
472 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
473 panic("psynch_mutexdrop_internal: panic unable to wakeup firstfit mutex thread\n");
474 if (kret
== KERN_NOT_WAITING
)
477 /* handle fairshare */
478 low_writer
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_firstnum
;
479 low_writer
&= PTHRW_COUNT_MASK
;
481 if (low_writer
== nextgen
) {
483 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, kwq
, 3, low_writer
, nextgen
, 0);
484 #endif /* _PSYNCH_TRACE_ */
485 /* next seq to be granted found */
486 uth
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwq
);
487 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0)
488 updatebits
|= PTHRW_WBIT
;
490 uth
->uu_psynchretval
= updatebits
;
491 uth
->uu_kwqqueue
= NULL
;
493 kret
= ksyn_wakeup_thread(kwq
, uth
);
494 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
495 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
496 if (kret
== KERN_NOT_WAITING
)
499 } else if (is_seqhigher(low_writer
, nextgen
) != 0) {
501 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, kwq
, 4, low_writer
, nextgen
, 0);
502 #endif /* _PSYNCH_TRACE_ */
504 kwq
->kw_pre_lockseq
= (nextgen
& PTHRW_COUNT_MASK
);
507 panic("psynch_mutexdrop_internal: FS mutex unlock sequence higher than the lowest one is queue\n");
508 #endif /* __TESTPANICS__ */
510 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, kwq
, 5, low_writer
, nextgen
, 0);
511 #endif /* _PSYNCH_TRACE_ */
512 uth
= ksyn_queue_find_seq(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], nextgen
);
514 /* next seq to be granted found */
516 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0)
517 updatebits
|= PTHRW_WBIT
;
520 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, kwq
, 6, updatebits
, 0, 0);
521 #endif /* _PSYNCH_TRACE_ */
522 uth
->uu_psynchretval
= updatebits
;
523 uth
->uu_kwqqueue
= NULL
;
525 kret
= ksyn_wakeup_thread(kwq
, uth
);
526 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
527 panic("psynch_mutexdrop_internal: panic unable to wakeup fairshare mutex thread\n");
528 if (kret
== KERN_NOT_WAITING
)
531 /* next seq to be granted not found, prepost */
533 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, kwq
, 7, 0, 0, 0);
534 #endif /* _PSYNCH_TRACE_ */
536 kwq
->kw_pre_lockseq
= (nextgen
& PTHRW_COUNT_MASK
);
542 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, kwq
, 8, 0, 0, 0);
543 #endif /* _PSYNCH_TRACE_ */
544 /* if firstfit the last one could be spurious */
545 if ((firstfit
== 0) || ((lkseq
& PTHRW_COUNT_MASK
) != nextgen
)) {
547 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_NONE
, kwq
, 9, 0, 0, 0);
548 #endif /* _PSYNCH_TRACE_ */
549 kwq
->kw_lastunlockseq
= ugen
;
551 kwq
->kw_pre_lockseq
= (nextgen
& PTHRW_COUNT_MASK
);
558 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_KMDROP
| DBG_FUNC_END
, kwq
, 0, 0, 0, 0);
559 #endif /* _PSYNCH_TRACE_ */
560 ksyn_wqrelease(kwq
, NULL
);
565 * psynch_mutexwait: This system call is used for contended psynch mutexes to block.
569 psynch_mutexwait(__unused proc_t p
, struct psynch_mutexwait_args
* uap
, uint32_t * retval
)
571 user_addr_t mutex
= uap
->mutex
;
572 uint32_t mgen
= uap
->mgen
;
573 uint32_t ugen
= uap
->ugen
;
574 uint64_t tid
= uap
->tid
;
575 int flags
= uap
->flags
;
576 ksyn_wait_queue_t kwq
;
580 int firstfit
= flags
& _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
581 uint32_t lockseq
, updatebits
;
585 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_START
, (uint32_t)mutex
, mgen
, ugen
, flags
, 0);
586 #endif /* _PSYNCH_TRACE_ */
588 uth
= current_uthread();
590 uth
->uu_lockseq
= uap
->mgen
;
591 lockseq
= (uap
->mgen
& PTHRW_COUNT_MASK
);
597 ins_flags
= FIRSTFIT
;
600 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, tid
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_MTX
), &kwq
);
603 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 1, 0, error
, 0);
604 #endif /* _PSYNCH_TRACE_ */
611 if ((kwq
->kw_pre_rwwc
!= 0) && ((ins_flags
== FIRSTFIT
) || (lockseq
== kwq
->kw_pre_lockseq
))) {
612 /* got preposted lock */
614 if (kwq
->kw_pre_rwwc
== 0) {
615 CLEAR_PREPOST_BITS(kwq
);
616 kwq
->kw_lastunlockseq
= 0;
618 panic("psynch_mutexwait: more than one prepost %d\n", (kwq
->kw_pre_rwwc
+ 1));
619 kwq
->kw_pre_lockseq
+= PTHRW_INC
; /* look for next one */
621 if (kwq
->kw_inqueue
== 0) {
622 updatebits
= lockseq
| PTHRW_EBIT
;
624 updatebits
= (kwq
->kw_highseq
& PTHRW_COUNT_MASK
) | (PTHRW_EBIT
| PTHRW_WBIT
);
627 uth
->uu_psynchretval
= updatebits
;
629 if ((updatebits
& PTHRW_COUNT_MASK
) == 0)
630 panic("psynch_mutexwait: (prepost)returning 0 lseq in mutexwait with EBIT \n");
631 #endif /* __TESTPANICS__ */
633 *retval
= updatebits
;
637 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], mgen
, uth
, ins_flags
);
639 panic("psynch_mutexwait: failed to enqueue\n");
641 error
= ksyn_block_thread_locked(kwq
, (uint64_t)0, uth
);
642 /* drops the wq lock */
647 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_NONE
, (uint32_t)mutex
, 2, 0, error
, 0);
648 #endif /* _PSYNCH_TRACE_ */
649 if (uth
->uu_kwqqueue
!= NULL
)
650 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], uth
);
653 updatebits
= uth
->uu_psynchretval
;
654 *retval
= updatebits
;
656 if ((updatebits
& PTHRW_COUNT_MASK
) == 0)
657 panic("psynch_mutexwait: returning 0 lseq in mutexwait with EBIT \n");
658 #endif /* __TESTPANICS__ */
661 ksyn_wqrelease(kwq
, NULL
);
663 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLWAIT
| DBG_FUNC_END
, (uint32_t)mutex
, 0, 0, error
, 0);
664 #endif /* _PSYNCH_TRACE_ */
670 * psynch_mutexdrop: This system call is used for unlock postings on contended psynch mutexes.
673 psynch_mutexdrop(__unused proc_t p
, struct psynch_mutexdrop_args
* uap
, __unused
uint32_t * retval
)
675 user_addr_t mutex
= uap
->mutex
;
676 uint32_t mgen
= uap
->mgen
;
677 uint32_t lkseq
= mgen
& PTHRW_COUNT_MASK
;
678 uint32_t ugen
= uap
->ugen
;
679 uint64_t tid
= uap
->tid
;
680 int flags
= uap
->flags
;
681 ksyn_wait_queue_t kwq
;
685 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLDROP
| DBG_FUNC_START
, (uint32_t)mutex
, mgen
, ugen
, flags
, 0);
686 #endif /* _PSYNCH_TRACE_ */
687 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, tid
, flags
, KSYN_WQTYPE_MTX
, &kwq
);
690 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLDROP
| DBG_FUNC_END
, (uint32_t)mutex
, 1, 0, error
, 0);
691 #endif /* _PSYNCH_TRACE_ */
694 psynch_mutexdrop_internal(kwq
, lkseq
, ugen
, flags
);
695 /* drops the kwq reference */
697 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_MLDROP
| DBG_FUNC_END
, (uint32_t)mutex
, 0, 0, error
, 0);
698 #endif /* _PSYNCH_TRACE_ */
704 * psynch_cvbroad: This system call is used for broadcast posting on blocked waiters of psynch cvars.
707 psynch_cvbroad(__unused proc_t p
, struct psynch_cvbroad_args
* uap
, int * retval
)
709 user_addr_t cond
= uap
->cv
;
710 uint32_t cgen
= uap
->cvgen
;
711 uint32_t diffgen
= uap
->diffgen
;
712 uint32_t mgen
= uap
->mgen
;
713 int flags
= uap
->flags
;
714 ksyn_wait_queue_t kwq
, ckwq
;
716 #if COND_MTX_WAITQUEUEMOVE
717 int mutexowned
= flags
& _PTHREAD_MTX_OPT_HOLDLOCK
;
718 int nomutex
= flags
& _PTHREAD_MTX_OPT_NOHOLDLOCK
;
719 user_addr_t mutex
= uap
->mutex
;
720 uint32_t ugen
= uap
->ugen
;
721 uint64_t tid
= uap
->tid
;
723 kern_return_t kret
= KERN_SUCCESS
;
724 #else /* COND_MTX_WAITQUEUEMOVE */
725 int nomutex
= _PTHREAD_MTX_OPT_NOHOLDLOCK
;
726 #endif /* COND_MTX_WAITQUEUEMOVE */
727 uint32_t nextgen
, ngen
;
731 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_START
, (uint32_t)cond
, (uint32_t) 0, cgen
, mgen
, 0);
732 #endif /* _PSYNCH_TRACE_ */
733 error
= ksyn_wqfind(cond
, cgen
, cgen
, 0, 0, flags
, KSYN_WQTYPE_CVAR
, &ckwq
);
736 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_END
, (uint32_t)cond
, 1, 0, error
, 0);
737 #endif /* _PSYNCH_TRACE_ */
741 #if COND_MTX_WAITQUEUEMOVE
742 ngen
= mgen
+ (PTHRW_INC
* diffgen
);
744 error
= ksyn_wqfind(mutex
, ngen
, ugen
, 0, tid
, flags
, KSYN_WQTYPE_MTX
, &kwq
);
750 #else /* COND_MTX_WAITQUEUEMOVE */
751 nomutex
= _PTHREAD_MTX_OPT_NOHOLDLOCK
;
754 #endif /* COND_MTX_WAITQUEUEMOVE */
758 #if COND_MTX_WAITQUEUEMOVE
760 #endif /* COND_MTX_WAITQUEUEMOVE */
761 if (diffgen
> ckwq
->kw_inqueue
) {
762 ckwq
->kw_pre_rwwc
= diffgen
- ckwq
->kw_inqueue
;
763 ckwq
->kw_pre_lockseq
= cgen
& PTHRW_BIT_MASK
;
764 updatebits
= ckwq
->kw_pre_rwwc
; /* unused mutex refs */
765 nextgen
= (mgen
+ (ckwq
->kw_pre_rwwc
* PTHRW_INC
));
768 nextgen
= mgen
+ PTHRW_INC
;
771 if (ckwq
->kw_inqueue
!= 0) {
772 #if COND_MTX_WAITQUEUEMOVE
773 if (mutexowned
!= 0) {
775 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_NONE
, (uint32_t)cond
, 0, 1, ckwq
->kw_inqueue
, 0);
776 #endif /* _PSYNCH_TRACE_ */
777 uth
= ksyn_queue_removefirst(&ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
],ckwq
);
778 uth
->uu_psynchretval
= ngen
;
779 uth
->uu_kwqqueue
= NULL
;
781 kret
= ksyn_wakeup_thread(ckwq
, uth
);
782 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
783 panic("cvbraoad: failed to remove\n");
784 if (kret
== KERN_NOT_WAITING
) {
786 * trying to wake one thread to return, so if
787 * failed to wakeup get the next one..
791 nextgen
= nextgen
+ PTHRW_INC
;
794 #else /* COND_MTX_WAITQUEUEMOVE */
796 #endif /* COND_MTX_WAITQUEUEMOVE */
798 /* nomutex case or in mutexowned case after the first one */
799 /* move them all to the mutex waitqueue */
800 if ((ckwq
->kw_inqueue
!= 0) && (diffgen
> 0)) {
801 /* atleast one more posting needed and there are waiting threads */
802 /* drops the ckwq lock */
804 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_NONE
, (uint32_t)cond
, 0, 2, diffgen
, 0);
805 #endif /* _PSYNCH_TRACE_ */
806 /* move threads from ckwq to kwq if COND_MTX_WAITQUEUEMOVE, else wakeup */
807 ksyn_move_wqthread(ckwq
, kwq
, nextgen
, ngen
, diffgen
, nomutex
);
811 /* no need for prepost as it is covered before */
816 *retval
= updatebits
;
819 #if COND_MTX_WAITQUEUEMOVE
821 #endif /* COND_MTX_WAITQUEUEMOVE */
822 ksyn_wqrelease(ckwq
, kwq
);
824 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVBROAD
| DBG_FUNC_END
, (uint32_t)cond
, 1, 0, error
, 0);
825 #endif /* _PSYNCH_TRACE_ */
831 * psynch_cvsignal: This system call is used for signalling the blocked waiters of psynch cvars.
834 psynch_cvsignal(__unused proc_t p
, struct psynch_cvsignal_args
* uap
, int * retval
)
836 user_addr_t cond
= uap
->cv
;
837 uint32_t cgen
= uap
->cvgen
;
838 uint32_t cugen
= uap
->cvugen
;
839 uint32_t mgen
= uap
->mgen
;
840 int threadport
= uap
->thread_port
;
841 int flags
= uap
->flags
;
842 ksyn_wait_queue_t kwq
, ckwq
;
846 thread_t th
= THREAD_NULL
, mth
;
847 #else /* USE_WAITQUEUE */
848 thread_t th
= THREAD_NULL
;
849 #endif /* USE_WAITQUEUE */
850 #if COND_MTX_WAITQUEUEMOVE
851 user_addr_t mutex
= uap
->mutex
;
852 uint32_t ugen
= uap
->ugen
;
853 int mutexowned
= flags
& _PTHREAD_MTX_OPT_HOLDLOCK
;
854 int nomutex
= flags
& _PTHREAD_MTX_OPT_NOHOLDLOCK
;
855 #else /* COND_MTX_WAITQUEUEMOVE */
856 int nomutex
= _PTHREAD_MTX_OPT_NOHOLDLOCK
;
857 #endif /* COND_MTX_WAITQUEUEMOVE */
858 uint32_t retbits
, ngen
, lockseq
;
866 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_START
, (uint32_t)cond
, (uint32_t) 0, cgen
, mgen
, 0);
867 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)cond
, (uint32_t)cugen
, flags
, mgen
, 0);
868 #endif /* _PSYNCH_TRACE_ */
870 error
= ksyn_wqfind(cond
, cgen
, cugen
, 0, 0, flags
, KSYN_WQTYPE_CVAR
, &ckwq
);
874 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_END
, (uint32_t)cond
, 1, 0, error
, 0);
875 #endif /* _PSYNCH_TRACE_ */
880 if ((flags
& _PTHREAD_MTX_OPT_LASTDROP
) == _PTHREAD_MTX_OPT_LASTDROP
) {
883 lockseq
= cgen
& PTHRW_COUNT_MASK
;
884 /* do we need to check for lockseq as this is from last waiter, may be race ? */
885 if ((ckwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, ckwq
->kw_pre_lockseq
) != 0)) {
887 if (ckwq
->kw_pre_rwwc
== 0)
888 CLEAR_PREPOST_BITS(ckwq
);
891 /* no mutex or thread is associated with this, just notificaion */
897 ngen
= mgen
+ PTHRW_INC
;
899 #if COND_MTX_WAITQUEUEMOVE
901 /* mutex was not operated on, ignore it */
902 error
= ksyn_wqfind(mutex
, ngen
, ugen
, 0, 0, flags
, KSYN_WQTYPE_MTX
, &kwq
);
909 #endif /* COND_MTX_WAITQUEUEMOVE */
911 #if COND_MTX_WAITQUEUEMOVE
913 #endif /* COND_MTX_WAITQUEUEMOVE */
916 if (threadport
!= 0) {
917 th
= (thread_t
)port_name_to_thread((mach_port_name_t
)threadport
);
918 if (th
== THREAD_NULL
) {
927 if (ckwq
->kw_inqueue
!= 0) {
929 #if COND_MTX_WAITQUEUEMOVE
930 if ((mutexowned
!= 0) || (nomutex
!= 0)) {
932 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)cond
, 0, 1, ckwq
->kw_inqueue
, 0);
933 #endif /* _PSYNCH_TRACE_ */
934 if (th
!= THREAD_NULL
) {
935 uth
= get_bsdthread_info(th
);
937 ngen
|= PTHRW_MTX_NONE
;
938 uth
->uu_psynchretval
= ngen
;
939 uth
->uu_kwqqueue
= NULL
;
940 ksyn_queue_removeitem(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], uth
);
941 kret
= ksyn_wakeup_thread(ckwq
, uth
);
942 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
943 panic("psynch_cvsignal: panic waking in cvsignal\n");
944 if (kret
== KERN_NOT_WAITING
) {
945 if (threadport
!= 0) {
951 uth
= ksyn_queue_removefirst(&ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
],ckwq
);
953 ngen
|= PTHRW_MTX_NONE
;
954 uth
->uu_psynchretval
= ngen
;
955 uth
->uu_kwqqueue
= NULL
;
956 kret
= ksyn_wakeup_thread(ckwq
, uth
);
957 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
958 panic("psynch_cvsignal: panic waking in cvsignal\n");
959 if (kret
== KERN_NOT_WAITING
) {
960 if (threadport
!= 0) {
968 #endif /* COND_MTX_WAITQUEUEMOVE */
969 /* need to move a thread to another queue */
971 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)cond
, 0, 2, ckwq
->kw_inqueue
, 0);
972 #endif /* _PSYNCH_TRACE_ */
973 if (th
!= THREAD_NULL
) {
974 uth
= get_bsdthread_info(th
);
975 /* if given thread not blocked in cvwait , return error */
976 if (uth
->uu_kwqqueue
!= ckwq
) {
981 ksyn_queue_removeitem(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], uth
);
983 uth
= ksyn_queue_removefirst(&ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
],ckwq
);
985 panic("cvsign: null uthread after rem");
987 #if COND_MTX_WAITQUEUEMOVE
989 #else /* COND_MTX_WAITQUEUEMOVE */
990 uth
->uu_psynchretval
= 0;
991 uth
->uu_kwqqueue
= NULL
;
992 kret
= ksyn_wakeup_thread(ckwq
, uth
);
993 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
994 panic("psynch_cvsignal: panic waking in cvsignal\n");
995 if (kret
== KERN_NOT_WAITING
) {
1001 ksyn_wqunlock(ckwq
);
1003 #endif /* COND_MTX_WAITQUEUEMOVE */
1005 #if COND_MTX_WAITQUEUEMOVE
1007 ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], ngen
, uth
, SEQFIT
);
1009 kret
= wait_queue_move_thread(&ckwq
->kw_wq
, ckwq
->kw_addr
, th
, &kwq
->kw_wq
, kwq
->kw_addr
, &mth
);
1010 if (kret
== KERN_SUCCESS
) {
1011 if (mth
!= THREAD_NULL
) {
1012 uth
= (struct uthread
*)get_bsdthread_info(mth
);
1013 uth
->uu_lockseq
= ngen
;
1014 TAILQ_INSERT_TAIL(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_uthlist
, uth
, uu_mtxlist
);
1017 #else /* USE_WAITQUEUE */
1018 /* no need to move anything, just update the sequence */
1019 uth
->uu_lockseq
= ngen
;
1021 #endif /* USE_WAITQUEUE */
1024 #endif /* COND_MTX_WAITQUEUEMOVE */
1028 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_NONE
, (uint32_t)cond
, 0, 3, ckwq
->kw_inqueue
, 0);
1029 #endif /* _PSYNCH_TRACE_ */
1030 if (threadport
!= 0) {
1032 ksyn_wqunlock(ckwq
);
1036 ckwq
->kw_pre_rwwc
++;
1037 ckwq
->kw_attq
= kwq
;
1038 ckwq
->kw_pre_lockseq
= cgen
& PTHRW_BIT_MASK
;
1039 ckwq
->kw_pre_cvretval
= ngen
;
1041 ksyn_wqunlock(ckwq
);
1043 /* ckwq is unlocked here */
1046 ksyn_wqrelease(ckwq
, kwq
);
1047 if (th
!= THREAD_NULL
)
1048 thread_deallocate(th
);
1050 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVSIGNAL
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0, error
, 0);
1051 #endif /* _PSYNCH_TRACE_ */
1057 * psynch_cvwait: This system call is used for psynch cvar waiters to block in kernel.
1060 psynch_cvwait(__unused proc_t p
, struct psynch_cvwait_args
* uap
, uint32_t * retval
)
1062 user_addr_t cond
= uap
->cv
;
1063 uint32_t cgen
= uap
->cvgen
;
1064 uint32_t cugen
= uap
->cvugen
;
1065 user_addr_t mutex
= uap
->mutex
;
1066 uint32_t mgen
=0, ugen
;
1068 ksyn_wait_queue_t kwq
, ckwq
;
1070 uint64_t abstime
= 0;
1071 uint32_t lockseq
, updatebits
;
1075 /* for conformance reasons */
1076 __pthread_testcancel(0);
1079 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_START
, (uint32_t)cond
, (uint32_t) mutex
, cgen
, mgen
, 0);
1080 #endif /* _PSYNCH_TRACE_ */
1082 if ((uap
->usec
& 0xc0000000) != 0) {
1083 if (uap
->usec
& 0x40000000)
1084 flags
|= PTHREAD_PROCESS_SHARED
;
1085 if (uap
->usec
& 0x80000000)
1086 flags
|= _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
1089 error
= ksyn_wqfind(cond
, cgen
, cugen
, 0, 0, flags
, KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INWAIT
, &ckwq
);
1092 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)cond
, 1, 0, error
, 0);
1093 #endif /* _PSYNCH_TRACE_ */
1097 if (mutex
!= (user_addr_t
)0) {
1101 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, 0, flags
, KSYN_WQTYPE_MTX
, &kwq
); {
1106 psynch_mutexdrop_internal(kwq
, mgen
, ugen
, flags
);
1107 /* drops kwq reference */
1110 uth
= current_uthread();
1111 uth
->uu_lockseq
= cgen
;
1112 lockseq
= (cgen
& PTHRW_COUNT_MASK
);
1114 if (uap
->sec
!= 0 || (uap
->usec
& 0x3fffffff) != 0) {
1115 ts
.tv_sec
= uap
->sec
;
1116 ts
.tv_nsec
= (uap
->usec
& 0xc0000000);
1117 nanoseconds_to_absolutetime((uint64_t)ts
.tv_sec
* NSEC_PER_SEC
+ ts
.tv_nsec
, &abstime
);
1118 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
1121 if ((ckwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, ckwq
->kw_pre_lockseq
) != 0)) {
1123 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)cond
, 0, 1, 0, 0);
1124 #endif /* _PSYNCH_TRACE_ */
1126 #if COND_MTX_WAITQUEUEMOVE
1127 updatebits
= ckwq
->kw_pre_cvretval
| PTHRW_MTX_NONE
;
1128 #else /* COND_MTX_WAITQUEUEMOVE */
1130 #endif /* COND_MTX_WAITQUEUEMOVE */
1131 ckwq
->kw_pre_rwwc
--;
1132 if (ckwq
->kw_pre_rwwc
== 0)
1133 CLEAR_PREPOST_BITS(ckwq
);
1134 *retval
= updatebits
;
1136 ksyn_wqunlock(ckwq
);
1141 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)cond
, 0, 2, cgen
, 0);
1142 #endif /* _PSYNCH_TRACE_ */
1143 error
= ksyn_queue_insert(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], cgen
, uth
, FIRSTFIT
);
1145 panic("psynch_cvwait: failed to enqueue\n");
1146 error
= ksyn_block_thread_locked(ckwq
, abstime
, uth
);
1147 /* drops the lock */
1153 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)cond
, 0, 3, error
, 0);
1154 #endif /* _PSYNCH_TRACE_ */
1155 if (uth
->uu_kwqqueue
!= NULL
) {
1156 ksyn_queue_removeitem(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], uth
);
1158 ksyn_wqunlock(ckwq
);
1160 *retval
= uth
->uu_psynchretval
;
1165 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_CVWAIT
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0, error
, 0);
1166 #endif /* _PSYNCH_TRACE_ */
1167 ksyn_wqrelease(ckwq
, NULL
);
1171 /* ***************** pthread_rwlock ************************ */
1173 * psynch_rw_rdlock: This system call is used for psync rwlock readers to block.
1176 psynch_rw_rdlock(__unused proc_t p
, struct psynch_rw_rdlock_args
* uap
, uint32_t * retval
)
1178 user_addr_t rwlock
= uap
->rwlock
;
1179 uint32_t lgen
= uap
->lgenval
;
1180 uint32_t ugen
= uap
->ugenval
;
1181 uint32_t rw_wc
= uap
->rw_wc
;
1182 //uint64_t tid = uap->tid;
1183 int flags
= uap
->flags
;
1184 int error
= 0, block
;
1185 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0;
1186 ksyn_wait_queue_t kwq
;
1190 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1191 #endif /* _PSYNCH_TRACE_ */
1192 uth
= current_uthread();
1194 /* preserve the seq number */
1195 uth
->uu_lockseq
= lgen
;
1196 lockseq
= lgen
& PTHRW_COUNT_MASK
;
1198 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1201 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1202 #endif /* _PSYNCH_TRACE_ */
1208 /* handle first the missed wakeups */
1209 if ((kwq
->kw_pre_intrcount
!= 0) &&
1210 ((kwq
->kw_pre_intrtype
== PTH_RW_TYPE_READ
) || (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_LREAD
)) &&
1211 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1213 kwq
->kw_pre_intrcount
--;
1214 uth
->uu_psynchretval
= kwq
->kw_pre_intrretbits
;
1215 if (kwq
->kw_pre_intrcount
==0)
1216 CLEAR_INTR_PREPOST_BITS(kwq
);
1221 /* handle unlock2/downgrade first */
1222 if ((kwq
->kw_pre_limrd
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_limrdseq
& PTHRW_COUNT_MASK
)) != 0)) {
1224 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_pre_limrd
, kwq
->kw_pre_limrdseq
, 0);
1225 #endif /* _PSYNCH_TRACE_ */
1226 kwq
->kw_pre_limrd
--;
1227 /* acquired the locks, so return */
1228 uth
->uu_psynchretval
= kwq
->kw_pre_limrdbits
;
1229 if (kwq
->kw_pre_limrd
== 0)
1230 CLEAR_READ_PREPOST_BITS(kwq
);
1235 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1237 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1238 #endif /* _PSYNCH_TRACE_ */
1240 if (kwq
->kw_pre_rwwc
== 0) {
1241 preseq
= kwq
->kw_pre_lockseq
;
1242 CLEAR_PREPOST_BITS(kwq
);
1243 error
= kwq_handle_unlock(kwq
, preseq
, &updatebits
, (KW_UNLOCK_PREPOST_READLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1245 panic("kwq_handle_unlock failed %d\n",error
);
1250 /* insert to q and proceed as ususal */
1255 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1256 #endif /* _PSYNCH_TRACE_ */
1257 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
], lgen
, uth
, SEQFIT
);
1259 panic("psynch_rw_rdlock: failed to enqueue\n");
1260 error
= ksyn_block_thread_locked(kwq
, (uint64_t)0, uth
);
1261 /* drops the kwq lock */
1266 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
1267 #endif /* _PSYNCH_TRACE_ */
1269 if (uth
->uu_kwqqueue
!= NULL
)
1270 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
], uth
);
1274 *retval
= uth
->uu_psynchretval
;
1276 ksyn_wqrelease(kwq
, NULL
);
1278 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1279 #endif /* _PSYNCH_TRACE_ */
1284 * psynch_rw_longrdlock: This system call is used for psync rwlock long readers to block.
1287 psynch_rw_longrdlock(__unused proc_t p
, struct psynch_rw_longrdlock_args
* uap
, uint32_t * retval
)
1289 user_addr_t rwlock
= uap
->rwlock
;
1290 uint32_t lgen
= uap
->lgenval
;
1291 uint32_t ugen
= uap
->ugenval
;
1292 uint32_t rw_wc
= uap
->rw_wc
;
1293 //uint64_t tid = uap->tid;
1294 int flags
= uap
->flags
;
1296 ksyn_wait_queue_t kwq
;
1297 int error
=0, block
= 0 ;
1299 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0;
1302 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1303 #endif /* _PSYNCH_TRACE_ */
1304 uth
= current_uthread();
1306 uth
->uu_lockseq
= lgen
;
1307 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
1309 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1312 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1313 #endif /* _PSYNCH_TRACE_ */
1319 /* handle first the missed wakeups */
1320 if ((kwq
->kw_pre_intrcount
!= 0) &&
1321 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_LREAD
) &&
1322 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1324 kwq
->kw_pre_intrcount
--;
1325 uth
->uu_psynchretval
= kwq
->kw_pre_intrretbits
;
1326 if (kwq
->kw_pre_intrcount
==0)
1327 CLEAR_INTR_PREPOST_BITS(kwq
);
1332 /* handle unlock2/downgrade first */
1333 if ((kwq
->kw_pre_limrd
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_limrdseq
& PTHRW_COUNT_MASK
)) != 0)) {
1335 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_pre_limrd
, kwq
->kw_pre_limrdseq
, 0);
1336 #endif /* _PSYNCH_TRACE_ */
1337 kwq
->kw_pre_limrd
--;
1338 if (kwq
->kw_pre_limrd
== 0)
1339 CLEAR_READ_PREPOST_BITS(kwq
);
1340 /* not a read proceed */
1343 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1345 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1346 #endif /* _PSYNCH_TRACE_ */
1348 if (kwq
->kw_pre_rwwc
== 0) {
1349 preseq
= kwq
->kw_pre_lockseq
;
1350 CLEAR_PREPOST_BITS(kwq
);
1351 error
= kwq_handle_unlock(kwq
, preseq
, &updatebits
, (KW_UNLOCK_PREPOST_LREADLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1353 panic("kwq_handle_unlock failed %d\n",error
);
1358 /* insert to q and proceed as ususal */
1363 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1364 #endif /* _PSYNCH_TRACE_ */
1365 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
], lgen
, uth
, SEQFIT
);
1367 panic("psynch_rw_longrdlock: failed to enqueue\n");
1369 error
= ksyn_block_thread_locked(kwq
, (uint64_t)0, uth
);
1370 /* drops the kwq lock */
1374 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1375 #endif /* _PSYNCH_TRACE_ */
1377 if (uth
->uu_kwqqueue
!= NULL
)
1378 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
], uth
);
1382 *retval
= uth
->uu_psynchretval
;
1385 ksyn_wqrelease(kwq
, NULL
);
1388 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWLRDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, 0, error
, 0);
1389 #endif /* _PSYNCH_TRACE_ */
1394 * psynch_rw_wrlock: This system call is used for psync rwlock writers to block.
1397 psynch_rw_wrlock(__unused proc_t p
, struct psynch_rw_wrlock_args
* uap
, uint32_t * retval
)
1399 user_addr_t rwlock
= uap
->rwlock
;
1400 uint32_t lgen
= uap
->lgenval
;
1401 uint32_t ugen
= uap
->ugenval
;
1402 uint32_t rw_wc
= uap
->rw_wc
;
1403 //uint64_t tid = uap->tid;
1404 int flags
= uap
->flags
;
1406 ksyn_wait_queue_t kwq
;
1409 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0;
1412 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1413 #endif /* _PSYNCH_TRACE_ */
1414 uth
= current_uthread();
1416 uth
->uu_lockseq
= lgen
;
1417 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
1419 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1422 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1423 #endif /* _PSYNCH_TRACE_ */
1429 /* handle first the missed wakeups */
1430 if ((kwq
->kw_pre_intrcount
!= 0) &&
1431 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_WRITE
) &&
1432 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1434 kwq
->kw_pre_intrcount
--;
1435 uth
->uu_psynchretval
= kwq
->kw_pre_intrretbits
;
1436 if (kwq
->kw_pre_intrcount
==0)
1437 CLEAR_INTR_PREPOST_BITS(kwq
);
1442 /* handle unlock2/downgrade first */
1443 if ((kwq
->kw_pre_limrd
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_limrdseq
& PTHRW_COUNT_MASK
)) != 0)) {
1445 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_pre_limrd
, kwq
->kw_pre_limrdseq
, 0);
1446 #endif /* _PSYNCH_TRACE_ */
1447 kwq
->kw_pre_limrd
--;
1448 if (kwq
->kw_pre_limrd
== 0)
1449 CLEAR_READ_PREPOST_BITS(kwq
);
1450 /* not a read proceed */
1453 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1455 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1456 #endif /* _PSYNCH_TRACE_ */
1458 if (kwq
->kw_pre_rwwc
== 0) {
1459 preseq
= kwq
->kw_pre_lockseq
;
1460 CLEAR_PREPOST_BITS(kwq
);
1461 error
= kwq_handle_unlock(kwq
, preseq
, &updatebits
, (KW_UNLOCK_PREPOST_WRLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1463 panic("kwq_handle_unlock failed %d\n",error
);
1468 /* insert to q and proceed as ususal */
1473 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1474 #endif /* _PSYNCH_TRACE_ */
1475 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], lgen
, uth
, SEQFIT
);
1477 panic("psynch_rw_wrlock: failed to enqueue\n");
1479 error
= ksyn_block_thread_locked(kwq
, (uint64_t)0, uth
);
1480 /* drops the wq lock */
1485 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
1486 #endif /* _PSYNCH_TRACE_ */
1488 if (uth
->uu_kwqqueue
!= NULL
)
1489 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], uth
);
1493 *retval
= uth
->uu_psynchretval
;
1496 ksyn_wqrelease(kwq
, NULL
);
1499 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, 0, error
, 0);
1500 #endif /* _PSYNCH_TRACE_ */
1505 * psynch_rw_yieldwrlock: This system call is used for psync rwlock yielding writers to block.
1508 psynch_rw_yieldwrlock(__unused proc_t p
, struct psynch_rw_yieldwrlock_args
* uap
, uint32_t * retval
)
1510 user_addr_t rwlock
= uap
->rwlock
;
1511 uint32_t lgen
= uap
->lgenval
;
1512 uint32_t ugen
= uap
->ugenval
;
1513 uint32_t rw_wc
= uap
->rw_wc
;
1514 //uint64_t tid = uap->tid;
1515 int flags
= uap
->flags
;
1517 ksyn_wait_queue_t kwq
;
1522 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1523 #endif /* _PSYNCH_TRACE_ */
1524 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0;
1526 uth
= current_uthread();
1528 uth
->uu_lockseq
= lgen
;
1529 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
1531 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1534 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1535 #endif /* _PSYNCH_TRACE_ */
1541 /* handle first the missed wakeups */
1542 if ((kwq
->kw_pre_intrcount
!= 0) &&
1543 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_YWRITE
) &&
1544 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1546 kwq
->kw_pre_intrcount
--;
1547 uth
->uu_psynchretval
= kwq
->kw_pre_intrretbits
;
1548 if (kwq
->kw_pre_intrcount
==0)
1549 CLEAR_INTR_PREPOST_BITS(kwq
);
1554 /* handle unlock2/downgrade first */
1555 if ((kwq
->kw_pre_limrd
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_limrdseq
& PTHRW_COUNT_MASK
)) != 0)) {
1557 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_pre_limrd
, kwq
->kw_pre_limrdseq
, 0);
1558 #endif /* _PSYNCH_TRACE_ */
1559 kwq
->kw_pre_limrd
--;
1560 if (kwq
->kw_pre_limrd
== 0)
1561 CLEAR_READ_PREPOST_BITS(kwq
);
1562 /* not a read proceed */
1565 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1567 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1568 #endif /* _PSYNCH_TRACE_ */
1570 if (kwq
->kw_pre_rwwc
== 0) {
1571 preseq
= kwq
->kw_pre_lockseq
;
1572 CLEAR_PREPOST_BITS(kwq
);
1573 error
= kwq_handle_unlock(kwq
, preseq
, &updatebits
, (KW_UNLOCK_PREPOST_YWRLOCK
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1575 panic("kwq_handle_unlock failed %d\n",error
);
1580 /* insert to q and proceed as ususal */
1585 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1586 #endif /* _PSYNCH_TRACE_ */
1587 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
], lgen
, uth
, SEQFIT
);
1589 panic("psynch_rw_yieldwrlock: failed to enqueue\n");
1591 error
= ksyn_block_thread_locked(kwq
, (uint64_t)0, uth
);
1596 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
1597 #endif /* _PSYNCH_TRACE_ */
1599 if (uth
->uu_kwqqueue
!= NULL
)
1600 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
], uth
);
1604 *retval
= uth
->uu_psynchretval
;
1607 ksyn_wqrelease(kwq
, NULL
);
1610 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWYWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1611 #endif /* _PSYNCH_TRACE_ */
1617 * psynch_rw_downgrade: This system call is used for wakeup blocked readers who are eligible to run due to downgrade.
1620 psynch_rw_downgrade(__unused proc_t p
, struct psynch_rw_downgrade_args
* uap
, __unused
int * retval
)
1622 user_addr_t rwlock
= uap
->rwlock
;
1623 uint32_t lgen
= uap
->lgenval
;
1624 uint32_t ugen
= uap
->ugenval
;
1625 uint32_t rw_wc
= uap
->rw_wc
;
1626 //uint64_t tid = uap->tid;
1627 int flags
= uap
->flags
;
1630 ksyn_wait_queue_t kwq
;
1633 uint32_t curgen
= 0;
1636 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1637 #endif /* _PSYNCH_TRACE_ */
1638 uth
= current_uthread();
1640 curgen
= (lgen
& PTHRW_COUNT_MASK
);
1642 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1645 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1646 #endif /* _PSYNCH_TRACE_ */
1652 if (is_seqlower(ugen
, kwq
->kw_lastunlockseq
)!= 0) {
1653 /* spurious updatebits?? */
1656 /* fast path for default case */
1657 if((rw_wc
== kwq
->kw_inqueue
) && (kwq
->kw_highseq
== curgen
))
1660 /* have we seen all the waiters? */
1661 if(rw_wc
> kwq
->kw_inqueue
) {
1665 if (is_seqhigher(curgen
, kwq
->kw_highseq
) != 0) {
1668 if (find_seq_till(kwq
, curgen
, rw_wc
, &count
) == 0) {
1669 if (count
< rw_wc
) {
1670 kwq
->kw_pre_limrd
= rw_wc
- count
;
1671 kwq
->kw_pre_limrdseq
= lgen
;
1672 kwq
->kw_pre_limrdbits
= lgen
;
1682 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1683 #endif /* _PSYNCH_TRACE_ */
1684 error
= kwq_handle_downgrade(kwq
, lgen
, 0, 0, NULL
);
1687 panic("psynch_rw_downgrade: failed to wakeup\n");
1692 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, 0, error
, 0);
1693 #endif /* _PSYNCH_TRACE_ */
1694 ksyn_wqrelease(kwq
, NULL
);
1699 kwq
->kw_pre_rwwc
= (rw_wc
- count
);
1700 kwq
->kw_pre_lockseq
= lgen
;
1702 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWDOWNGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1703 #endif /* _PSYNCH_TRACE_ */
1710 * psynch_rw_upgrade: This system call is used by an reader to block waiting for upgrade to be granted.
1713 psynch_rw_upgrade(__unused proc_t p
, struct psynch_rw_upgrade_args
* uap
, uint32_t * retval
)
1715 user_addr_t rwlock
= uap
->rwlock
;
1716 uint32_t lgen
= uap
->lgenval
;
1717 uint32_t ugen
= uap
->ugenval
;
1718 uint32_t rw_wc
= uap
->rw_wc
;
1719 //uint64_t tid = uap->tid;
1720 int flags
= uap
->flags
;
1722 ksyn_wait_queue_t kwq
;
1725 uint32_t lockseq
= 0, updatebits
= 0, preseq
= 0;
1728 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1729 #endif /* _PSYNCH_TRACE_ */
1730 uth
= current_uthread();
1732 uth
->uu_lockseq
= lgen
;
1733 lockseq
= (lgen
& PTHRW_COUNT_MASK
);
1735 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_INWAIT
|KSYN_WQTYPE_RWLOCK
), &kwq
);
1738 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1739 #endif /* _PSYNCH_TRACE_ */
1745 /* handle first the missed wakeups */
1746 if ((kwq
->kw_pre_intrcount
!= 0) &&
1747 (kwq
->kw_pre_intrtype
== PTH_RW_TYPE_UPGRADE
) &&
1748 (is_seqlower_eq(lockseq
, (kwq
->kw_pre_intrseq
& PTHRW_COUNT_MASK
)) != 0)) {
1750 kwq
->kw_pre_intrcount
--;
1751 uth
->uu_psynchretval
= kwq
->kw_pre_intrretbits
;
1752 if (kwq
->kw_pre_intrcount
==0)
1753 CLEAR_INTR_PREPOST_BITS(kwq
);
1758 if ((kwq
->kw_pre_rwwc
!= 0) && (is_seqlower_eq(lockseq
, (kwq
->kw_pre_lockseq
& PTHRW_COUNT_MASK
)) != 0)) {
1760 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1761 #endif /* _PSYNCH_TRACE_ */
1763 if (kwq
->kw_pre_rwwc
== 0) {
1764 preseq
= kwq
->kw_pre_lockseq
;
1765 CLEAR_PREPOST_BITS(kwq
);
1766 error
= kwq_handle_unlock(kwq
, preseq
, &updatebits
, (KW_UNLOCK_PREPOST_UPGRADE
|KW_UNLOCK_PREPOST
), &block
, lgen
);
1768 panic("kwq_handle_unlock failed %d\n",error
);
1773 /* insert to q and proceed as ususal */
1779 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1780 #endif /* _PSYNCH_TRACE_ */
1781 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
], lgen
, uth
, SEQFIT
);
1783 panic("psynch_rw_upgrade: failed to enqueue\n");
1786 error
= ksyn_block_thread_locked(kwq
, (uint64_t)0, uth
);
1787 /* drops the lock */
1792 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, error
, 0, 0);
1793 #endif /* _PSYNCH_TRACE_ */
1795 if (uth
->uu_kwqqueue
!= NULL
)
1796 ksyn_queue_removeitem(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
], uth
);
1800 *retval
= uth
->uu_psynchretval
;
1803 ksyn_wqrelease(kwq
, NULL
);
1805 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUPGRADE
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1806 #endif /* _PSYNCH_TRACE_ */
1811 * psynch_rw_unlock: This system call is used for unlock state postings. This will grant appropriate
1812 * reader/writer variety lock.
1816 psynch_rw_unlock(__unused proc_t p
, struct psynch_rw_unlock_args
* uap
, uint32_t * retval
)
1818 user_addr_t rwlock
= uap
->rwlock
;
1819 uint32_t lgen
= uap
->lgenval
;
1820 uint32_t ugen
= uap
->ugenval
;
1821 uint32_t rw_wc
= uap
->rw_wc
;
1823 //uint64_t tid = uap->tid;
1824 int flags
= uap
->flags
;
1826 ksyn_wait_queue_t kwq
;
1827 uint32_t updatebits
= 0;
1833 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1834 #endif /* _PSYNCH_TRACE_ */
1835 uth
= current_uthread();
1837 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_RWLOCK
), &kwq
);
1840 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1841 #endif /* _PSYNCH_TRACE_ */
1845 curgen
= lgen
& PTHRW_COUNT_MASK
;
1849 if ((lgen
& PTHRW_RW_INIT
) != 0) {
1850 kwq
->kw_lastunlockseq
= 0;
1851 lgen
&= ~PTHRW_RW_INIT
;
1852 } else if (is_seqlower(ugen
, kwq
->kw_lastunlockseq
) != 0) {
1853 /* spurious updatebits set */
1854 updatebits
= PTHRW_RW_SPURIOUS
;
1860 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 1, kwq
->kw_inqueue
, curgen
, 0);
1861 #endif /* _PSYNCH_TRACE_ */
1862 if (find_seq_till(kwq
, curgen
, rw_wc
, &count
) == 0) {
1868 /* can handle unlock now */
1870 CLEAR_PREPOST_BITS(kwq
);
1871 kwq
->kw_lastunlockseq
= ugen
;
1874 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 2, 0, 0, 0);
1875 #endif /* _PSYNCH_TRACE_ */
1876 error
= kwq_handle_unlock(kwq
, lgen
, &updatebits
, 0, NULL
, 0);
1878 panic("psynch_rw_unlock: kwq_handle_unlock failed %d\n",error
);
1882 *retval
= updatebits
;
1886 ksyn_wqrelease(kwq
, NULL
);
1888 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, 0, error
, 0);
1889 #endif /* _PSYNCH_TRACE_ */
1894 kwq
->kw_pre_rwwc
= (rw_wc
- count
);
1895 kwq
->kw_pre_lockseq
= curgen
;
1896 kwq
->kw_lastunlockseq
= ugen
;
1898 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, rw_wc
, count
, 0);
1899 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 4, kwq
->kw_pre_rwwc
, kwq
->kw_pre_lockseq
, 0);
1900 #endif /* _PSYNCH_TRACE_ */
1901 updatebits
= (lgen
| PTHRW_RW_SPURIOUS
);/* let this not do unlock handling */
1908 * psynch_rw_unlock2: This system call is used to wakeup pending readers when unlock grant frm kernel
1909 * to new reader arrival races
1912 psynch_rw_unlock2(__unused proc_t p
, struct psynch_rw_unlock2_args
* uap
, uint32_t * retval
)
1914 user_addr_t rwlock
= uap
->rwlock
;
1915 uint32_t lgen
= uap
->lgenval
;
1916 uint32_t ugen
= uap
->ugenval
;
1917 uint32_t rw_wc
= uap
->rw_wc
;
1918 //uint64_t tid = uap->tid;
1919 int flags
= uap
->flags
;
1921 uint32_t num_lreader
, limitread
, curgen
, updatebits
;
1922 ksyn_wait_queue_t kwq
;
1923 int error
=0, longreadset
= 0;
1928 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK2
| DBG_FUNC_START
, (uint32_t)rwlock
, lgen
, ugen
, rw_wc
, 0);
1929 #endif /* _PSYNCH_TRACE_ */
1930 uth
= current_uthread();
1932 error
= ksyn_wqfind(rwlock
, lgen
, ugen
, rw_wc
, TID_ZERO
, flags
, (KSYN_WQTYPE_RWLOCK
), &kwq
);
1935 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK2
| DBG_FUNC_END
, (uint32_t)rwlock
, 1, 0, error
, 0);
1936 #endif /* _PSYNCH_TRACE_ */
1942 curgen
= (lgen
& PTHRW_COUNT_MASK
);
1943 diff
= find_diff(lgen
, ugen
);
1945 limitread
= lgen
& PTHRW_COUNT_MASK
;
1947 if (find_seq_till(kwq
, curgen
, diff
, &count
) == 0) {
1948 kwq
->kw_pre_limrd
= diff
- count
;
1949 kwq
->kw_pre_limrdseq
= lgen
;
1950 kwq
->kw_pre_limrdbits
= lgen
;
1956 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
!= 0) {
1957 num_lreader
= kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
;
1958 if (is_seqlower_eq(num_lreader
, limitread
) != 0)
1964 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK2
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 3, 0, 0, 0);
1965 #endif /* _PSYNCH_TRACE_ */
1966 count
= ksyn_wakeupreaders(kwq
, limitread
, longreadset
, 0, updatebits
, NULL
);
1969 if (kwq
->kw_pre_limrd
!= 0) {
1970 kwq
->kw_pre_limrd
+= count
;
1972 kwq
->kw_pre_limrd
= count
;
1973 kwq
->kw_pre_limrdseq
= lgen
;
1974 kwq
->kw_pre_limrdbits
= lgen
;
1982 *retval
= uth
->uu_psynchretval
;
1986 ksyn_wqrelease(kwq
, NULL
);
1988 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWUNLOCK2
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, 0, error
, 0);
1989 #endif /* _PSYNCH_TRACE_ */
1995 /* ************************************************************************** */
1997 pth_global_hashinit()
1999 pth_glob_hashtbl
= hashinit(PTH_HASHSIZE
* 4, M_PROC
, &pthhash
);
2003 pth_proc_hashinit(proc_t p
)
2005 p
->p_pthhash
= hashinit(PTH_HASHSIZE
, M_PROC
, &pthhash
);
2006 if (p
->p_pthhash
== NULL
)
2007 panic("pth_proc_hashinit: hash init returned 0\n");
2012 ksyn_wq_hash_lookup(user_addr_t mutex
, proc_t p
, int flags
, uint64_t object
, uint64_t objoffset
)
2014 ksyn_wait_queue_t kwq
;
2015 struct pthhashhead
* hashptr
;
2017 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
)
2019 hashptr
= pth_glob_hashtbl
;
2020 kwq
= (&hashptr
[object
& pthhash
])->lh_first
;
2022 for (; kwq
!= NULL
; kwq
= kwq
->kw_hash
.le_next
) {
2023 if ((kwq
->kw_object
== object
) &&(kwq
->kw_offset
== objoffset
)) {
2029 hashptr
= p
->p_pthhash
;
2030 kwq
= (&hashptr
[mutex
& pthhash
])->lh_first
;
2032 for (; kwq
!= NULL
; kwq
= kwq
->kw_hash
.le_next
) {
2033 if (kwq
->kw_addr
== mutex
) {
2042 pth_proc_hashdelete(proc_t p
)
2044 struct pthhashhead
* hashptr
;
2045 ksyn_wait_queue_t kwq
;
2046 int hashsize
= pthhash
+ 1;
2049 hashptr
= p
->p_pthhash
;
2050 if (hashptr
== NULL
)
2053 for(i
= 0; i
< hashsize
; i
++) {
2054 while ((kwq
= LIST_FIRST(&hashptr
[i
])) != NULL
) {
2055 pthread_list_lock();
2056 if ((kwq
->kw_pflags
& KSYN_WQ_INHASH
) != 0) {
2057 kwq
->kw_pflags
&= ~KSYN_WQ_INHASH
;
2058 LIST_REMOVE(kwq
, kw_hash
);
2060 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
2061 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
2062 LIST_REMOVE(kwq
, kw_list
);
2064 pthread_list_unlock();
2065 lck_mtx_destroy(&kwq
->kw_lock
, pthread_lck_grp
);
2066 kfree(kwq
, sizeof(struct ksyn_wait_queue
));
2069 FREE(p
->p_pthhash
, M_PROC
);
2070 p
->p_pthhash
= NULL
;
2074 /* find kernel waitqueue, if not present create one. Grants a reference */
2076 ksyn_wqfind(user_addr_t mutex
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, uint64_t tid
, int flags
, int wqtype
, ksyn_wait_queue_t
* kwqp
)
2078 ksyn_wait_queue_t kwq
;
2079 ksyn_wait_queue_t nkwq
;
2080 struct pthhashhead
* hashptr
;
2081 uint64_t object
= 0, offset
= 0;
2083 proc_t p
= current_proc();
2084 int retry
= mgen
& PTHRW_RETRYBIT
;
2087 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
)
2089 (void)ksyn_findobj(mutex
, &object
, &offset
);
2091 hashptr
= pth_glob_hashtbl
;
2093 hashptr
= p
->p_pthhash
;
2096 //pthread_list_lock_spin();
2097 pthread_list_lock();
2099 kwq
= ksyn_wq_hash_lookup(mutex
, p
, flags
, object
, offset
);
2103 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
2104 LIST_REMOVE(kwq
, kw_list
);
2105 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
2107 UPDATE_KWQ(kwq
, mgen
, ugen
, rw_wc
, tid
, wqtype
, retry
);
2110 pthread_list_unlock();
2114 pthread_list_unlock();
2116 nkwq
= kalloc(sizeof(struct ksyn_wait_queue
));
2117 bzero(nkwq
, sizeof(struct ksyn_wait_queue
));
2118 nkwq
->kw_addr
= mutex
;
2119 nkwq
->kw_flags
= flags
;
2120 nkwq
->kw_iocount
= 1;
2121 nkwq
->kw_object
= object
;
2122 nkwq
->kw_offset
= offset
;
2123 nkwq
->kw_type
= (wqtype
& KSYN_WQTYPE_MASK
);
2124 TAILQ_INIT(&nkwq
->kw_uthlist
);
2126 for (i
=0; i
< KSYN_QUEUE_MAX
; i
++)
2127 ksyn_queue_init(&nkwq
->kw_ksynqueues
[i
]);
2129 UPDATE_KWQ(nkwq
, mgen
, ugen
, rw_wc
, tid
, wqtype
, retry
);
2131 wait_queue_init(&nkwq
->kw_wq
, SYNC_POLICY_FIFO
);
2132 #endif /* USE_WAITQUEUE */
2133 lck_mtx_init(&nkwq
->kw_lock
, pthread_lck_grp
, pthread_lck_attr
);
2135 //pthread_list_lock_spin();
2136 pthread_list_lock();
2137 /* see whether it is alread allocated */
2138 kwq
= ksyn_wq_hash_lookup(mutex
, p
, flags
, object
, offset
);
2142 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
2143 LIST_REMOVE(kwq
, kw_list
);
2144 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
2146 UPDATE_KWQ(kwq
, mgen
, ugen
, rw_wc
, tid
, wqtype
, retry
);
2149 pthread_list_unlock();
2150 lck_mtx_destroy(&nkwq
->kw_lock
, pthread_lck_grp
);
2151 kfree(nkwq
, sizeof(struct ksyn_wait_queue
));
2156 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
)
2158 kwq
->kw_pflags
|= KSYN_WQ_SHARED
;
2159 LIST_INSERT_HEAD(&hashptr
[kwq
->kw_object
& pthhash
], kwq
, kw_hash
);
2161 LIST_INSERT_HEAD(&hashptr
[mutex
& pthhash
], kwq
, kw_hash
);
2163 kwq
->kw_pflags
|= KSYN_WQ_INHASH
;
2165 pthread_list_unlock();
2172 /* Reference from find is dropped here. Starts the free process if needed */
2174 ksyn_wqrelease(ksyn_wait_queue_t kwq
, ksyn_wait_queue_t ckwq
)
2181 //pthread_list_lock_spin();
2182 pthread_list_lock();
2184 if (kwq
->kw_iocount
== 0) {
2185 if ((kwq
->kw_pre_rwwc
== 0) && (kwq
->kw_inqueue
== 0)) {
2186 microuptime(&kwq
->kw_ts
);
2187 LIST_INSERT_HEAD(&pth_free_list
, kwq
, kw_list
);
2188 kwq
->kw_pflags
|= KSYN_WQ_FLIST
;
2194 if ( ckwq
->kw_iocount
== 0) {
2195 if ((ckwq
->kw_pre_rwwc
== 0) && (ckwq
->kw_inqueue
== 0)) {
2196 /* mark for free if we can */
2197 microuptime(&ckwq
->kw_ts
);
2198 LIST_INSERT_HEAD(&pth_free_list
, ckwq
, kw_list
);
2199 ckwq
->kw_pflags
|= KSYN_WQ_FLIST
;
2205 if (sched
== 1 && psynch_cleanupset
== 0) {
2206 psynch_cleanupset
= 1;
2208 t
.tv_sec
+= KSYN_CLEANUP_DEADLINE
;
2210 deadline
= tvtoabstime(&t
);
2211 thread_call_enter_delayed(psynch_thcall
, deadline
);
2213 pthread_list_unlock();
2216 /* responsible to free the waitqueues */
2218 psynch_wq_cleanup(__unused
void * param
, __unused
void * param1
)
2220 ksyn_wait_queue_t kwq
;
2222 LIST_HEAD(, ksyn_wait_queue
) freelist
= {NULL
};
2223 int count
= 0, delayed
= 0, diff
;
2224 uint64_t deadline
= 0;
2226 //pthread_list_lock_spin();
2227 pthread_list_lock();
2231 LIST_FOREACH(kwq
, &pth_free_list
, kw_list
) {
2237 if ((kwq
->kw_iocount
!= 0) && (kwq
->kw_inqueue
!= 0)) {
2238 /* still in freelist ??? */
2241 diff
= t
.tv_sec
- kwq
->kw_ts
.tv_sec
;
2244 if (diff
>= KSYN_CLEANUP_DEADLINE
) {
2246 kwq
->kw_pflags
&= ~(KSYN_WQ_FLIST
| KSYN_WQ_INHASH
);
2247 LIST_REMOVE(kwq
, kw_hash
);
2248 LIST_REMOVE(kwq
, kw_list
);
2249 LIST_INSERT_HEAD(&freelist
, kwq
, kw_list
);
2257 t
.tv_sec
+= KSYN_CLEANUP_DEADLINE
;
2259 deadline
= tvtoabstime(&t
);
2260 thread_call_enter_delayed(psynch_thcall
, deadline
);
2261 psynch_cleanupset
= 1;
2263 psynch_cleanupset
= 0;
2265 pthread_list_unlock();
2268 while ((kwq
= LIST_FIRST(&freelist
)) != NULL
) {
2269 LIST_REMOVE(kwq
, kw_list
);
2270 lck_mtx_destroy(&kwq
->kw_lock
, pthread_lck_grp
);
2271 kfree(kwq
, sizeof(struct ksyn_wait_queue
));
2277 ksyn_block_thread_locked(ksyn_wait_queue_t kwq
, uint64_t abstime
, uthread_t uth
)
2282 uth
->uu_kwqqueue
= (void *)kwq
;
2284 kret
= wait_queue_assert_wait64(&kwq
->kw_wq
, kwq
->kw_addr
, THREAD_ABORTSAFE
, abstime
);
2285 #else /* USE_WAITQUEUE */
2286 assert_wait_deadline(&uth
->uu_psynchretval
, THREAD_ABORTSAFE
, abstime
);
2287 #endif /* USE_WAITQUEUE */
2290 kret
= thread_block(NULL
);
2292 case THREAD_TIMED_OUT
:
2295 case THREAD_INTERRUPTED
:
2304 ksyn_wakeup_thread(ksyn_wait_queue_t kwq
, uthread_t uth
)
2305 #else /* USE_WAITQUEUE */
2306 ksyn_wakeup_thread(__unused ksyn_wait_queue_t kwq
, uthread_t uth
)
2307 #endif /* USE_WAITQUEUE */
2311 th
= uth
->uu_context
.vc_thread
;
2314 kret
= wait_queue_wakeup64_thread(&kwq
->kw_wq
, kwq
->kw_addr
, th
, THREAD_AWAKENED
);
2315 #else /* USE_WAITQUEUE */
2316 kret
= thread_wakeup_prim((caddr_t
)&uth
->uu_psynchretval
, TRUE
, THREAD_AWAKENED
);
2317 #endif /* USE_WAITQUEUE */
2319 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
2320 panic("ksyn_wakeup_thread: panic waking up thread %x\n", kret
);
2327 /* move from one waitqueue to another */
2328 #if COND_MTX_WAITQUEUEMOVE
2330 ksyn_move_wqthread( ksyn_wait_queue_t ckwq
, ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t updateval
, int diffgen
, int nomutex
)
2331 #else /* COND_MTX_WAITQUEUEMOVE */
2333 ksyn_move_wqthread( ksyn_wait_queue_t ckwq
, __unused ksyn_wait_queue_t kwq
, __unused
uint32_t mgen
, uint32_t updateval
, __unused
int diffgen
, int nomutex
)
2334 #endif /* COND_MTX_WAITQUEUEMOVE */
2338 #if COND_MTX_WAITQUEUEMOVE
2339 int count
= 0, error
, kret
;
2340 uint32_t nextgen
= mgen
;
2341 #endif /* COND_MTX_WAITQUEUEMOVE */
2342 struct ksyn_queue kq
;
2345 ksyn_queue_init(&kq
);
2348 kret
= wait_queue_move_all(&ckwq
->kw_wq
, ckwq
->kw_addr
, &kwq
->kw_wq
, kwq
->kw_addr
);
2349 #else /* USE_WAITQUEUE */
2350 /* no need to move as the thread is blocked at uthread address */
2351 kret
= KERN_SUCCESS
;
2352 #endif /* USE_WAITQUEUE */
2355 upgen
= updateval
| PTHRW_MTX_NONE
;
2359 if (kret
== KERN_SUCCESS
) {
2361 while ((uth
= ksyn_queue_removefirst(&ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], ckwq
)) != NULL
) {
2363 #if COND_MTX_WAITQUEUEMOVE
2364 uth
->uu_psynchretval
= upgen
;
2365 #else /* COND_MTX_WAITQUEUEMOVE */
2366 uth
->uu_psynchretval
= 0;
2367 uth
->uu_kwqqueue
= NULL
;
2368 kret
= ksyn_wakeup_thread(ckwq
, uth
);
2369 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
2370 panic("ksyn_move_wqthread: panic waking up \n");
2371 if (kret
== KERN_NOT_WAITING
)
2373 #endif /* COND_MTX_WAITQUEUEMOVE */
2375 #if COND_MTX_WAITQUEUEMOVE
2379 panic("movethread inserting more than expected\n");
2380 TAILQ_INSERT_TAIL(&kq
.ksynq_uthlist
, uth
, uu_mtxlist
);
2382 #endif /* COND_MTX_WAITQUEUEMOVE */
2385 ksyn_wqunlock(ckwq
);
2387 #if COND_MTX_WAITQUEUEMOVE
2388 if ( (nomutex
== 0) && (count
> 0)) {
2390 uth
= TAILQ_FIRST(&kq
.ksynq_uthlist
);
2391 while(uth
!= NULL
) {
2392 TAILQ_REMOVE(&kq
.ksynq_uthlist
, uth
, uu_mtxlist
);
2393 error
= ksyn_queue_insert(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], nextgen
, uth
, SEQFIT
);
2395 panic("movethread insert failed\n");
2397 uth
->uu_lockseq
= nextgen
;
2398 nextgen
+= PTHRW_INC
;
2399 uth
= TAILQ_FIRST(&kq
.ksynq_uthlist
);
2403 #endif /* COND_MTX_WAITQUEUEMOVE */
2405 panic("movethread : wq move all failed\n");
2409 /* find the true shared obect/offset for shared mutexes */
2411 ksyn_findobj(uint64_t mutex
, uint64_t * objectp
, uint64_t * offsetp
)
2413 vm_page_info_basic_data_t info
;
2415 mach_msg_type_number_t count
= VM_PAGE_INFO_BASIC_COUNT
;
2417 kret
= vm_map_page_info(current_map(), mutex
, VM_PAGE_INFO_BASIC
,
2418 (vm_page_info_t
)&info
, &count
);
2420 if (kret
!= KERN_SUCCESS
)
2423 if (objectp
!= NULL
)
2424 *objectp
= (uint64_t)info
.object_id
;
2425 if (offsetp
!= NULL
)
2426 *offsetp
= (uint64_t)info
.offset
;
2432 /* lowest of kw_fr, kw_flr, kw_fwr, kw_fywr */
2434 kwq_find_rw_lowest(ksyn_wait_queue_t kwq
, int flags
, uint32_t premgen
, int * typep
, uint32_t lowest
[])
2437 uint32_t kw_fr
, kw_flr
, kw_fwr
, kw_fywr
, low
;
2438 int type
= 0, lowtype
, typenum
[4];
2439 uint32_t numbers
[4];
2443 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0)) {
2444 type
|= PTH_RWSHFT_TYPE_READ
;
2445 /* read entries are present */
2446 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
!= 0) {
2447 kw_fr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_firstnum
;
2448 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, kw_fr
) != 0))
2453 lowest
[KSYN_QUEUE_READ
] = kw_fr
;
2454 numbers
[count
]= kw_fr
;
2455 typenum
[count
] = PTH_RW_TYPE_READ
;
2458 lowest
[KSYN_QUEUE_READ
] = 0;
2460 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0)) {
2461 type
|= PTH_RWSHFT_TYPE_LREAD
;
2462 /* read entries are present */
2463 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
!= 0) {
2464 kw_flr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
;
2465 if (((flags
& KW_UNLOCK_PREPOST_LREADLOCK
) != 0) && (is_seqlower(premgen
, kw_flr
) != 0))
2470 lowest
[KSYN_QUEUE_LREAD
] = kw_flr
;
2471 numbers
[count
]= kw_flr
;
2472 typenum
[count
] = PTH_RW_TYPE_LREAD
;
2475 lowest
[KSYN_QUEUE_LREAD
] = 0;
2478 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0)) {
2479 type
|= PTH_RWSHFT_TYPE_WRITE
;
2480 /* read entries are present */
2481 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) {
2482 kw_fwr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_firstnum
;
2483 if (((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) && (is_seqlower(premgen
, kw_fwr
) != 0))
2488 lowest
[KSYN_QUEUE_WRITER
] = kw_fwr
;
2489 numbers
[count
]= kw_fwr
;
2490 typenum
[count
] = PTH_RW_TYPE_WRITE
;
2493 lowest
[KSYN_QUEUE_WRITER
] = 0;
2495 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0)) {
2496 type
|= PTH_RWSHFT_TYPE_YWRITE
;
2497 /* read entries are present */
2498 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0) {
2499 kw_fywr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_firstnum
;
2500 if (((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0) && (is_seqlower(premgen
, kw_fywr
) != 0))
2505 lowest
[KSYN_QUEUE_YWRITER
] = kw_fywr
;
2506 numbers
[count
]= kw_fywr
;
2507 typenum
[count
] = PTH_RW_TYPE_YWRITE
;
2510 lowest
[KSYN_QUEUE_YWRITER
] = 0;
2515 panic("nothing in the queue???\n");
2518 lowtype
= typenum
[0];
2520 for (i
= 1; i
< count
; i
++) {
2521 if(is_seqlower(numbers
[i
] , low
) != 0) {
2523 lowtype
= typenum
[i
];
2534 /* wakeup readers and longreaders to upto the writer limits */
2536 ksyn_wakeupreaders(ksyn_wait_queue_t kwq
, uint32_t limitread
, int longreadset
, int allreaders
, uint32_t updatebits
, int * wokenp
)
2540 int failedwakeup
= 0;
2542 kern_return_t kret
= KERN_SUCCESS
;
2543 int resetbit
= updatebits
& PTHRW_RW_HUNLOCK
;
2547 if (longreadset
!= 0) {
2548 /* clear all read and longreads */
2549 while ((uth
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
], kwq
)) != NULL
) {
2550 uth
->uu_psynchretval
= lbits
;
2551 /* set on one thread */
2552 if (resetbit
!= 0) {
2553 lbits
&= ~PTHRW_RW_HUNLOCK
;
2557 uth
->uu_kwqqueue
= NULL
;
2558 kret
= ksyn_wakeup_thread(kwq
, uth
);
2559 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
2560 panic("ksyn_wakeupreaders: panic waking up readers\n");
2561 if (kret
== KERN_NOT_WAITING
) {
2565 while ((uth
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
], kwq
)) != NULL
) {
2566 uth
->uu_psynchretval
= lbits
;
2567 uth
->uu_kwqqueue
= NULL
;
2568 if (resetbit
!= 0) {
2569 lbits
&= ~PTHRW_RW_HUNLOCK
;
2573 kret
= ksyn_wakeup_thread(kwq
, uth
);
2574 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
2575 panic("ksyn_wakeupreaders: panic waking up lreaders\n");
2576 if (kret
== KERN_NOT_WAITING
) {
2581 kq
= &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
];
2582 while ((kq
->ksynq_count
!= 0) && (allreaders
|| (is_seqlower(kq
->ksynq_firstnum
, limitread
) != 0))) {
2583 uth
= ksyn_queue_removefirst(kq
, kwq
);
2584 uth
->uu_psynchretval
= lbits
;
2585 if (resetbit
!= 0) {
2586 lbits
&= ~PTHRW_RW_HUNLOCK
;
2590 uth
->uu_kwqqueue
= NULL
;
2591 kret
= ksyn_wakeup_thread(kwq
, uth
);
2592 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
2593 panic("ksyn_wakeupreaders: panic waking up readers\n");
2594 if (kret
== KERN_NOT_WAITING
) {
2602 return(failedwakeup
);
2606 /* This handles the unlock grants for next set on rw_unlock() or on arrival of all preposted waiters */
2608 kwq_handle_unlock(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t * updatep
, int flags
, int * blockp
, uint32_t premgen
)
2610 uint32_t low_reader
, low_writer
, low_ywriter
, low_lreader
,limitrdnum
;
2611 int rwtype
, error
=0;
2612 int longreadset
= 0, allreaders
, failed
;
2613 uint32_t updatebits
;
2614 int prepost
= flags
& KW_UNLOCK_PREPOST
;
2615 thread_t preth
= THREAD_NULL
;
2620 uint32_t lowest
[KSYN_QUEUE_MAX
]; /* np need for upgrade as it is handled separately */
2621 kern_return_t kret
= KERN_SUCCESS
;
2624 #if defined(__i386__)
2625 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_START
, (uint32_t)kwq
, mgen
, premgen
, 0, 0);
2627 #endif /* _PSYNCH_TRACE_ */
2629 preth
= current_thread();
2632 /* upgrade pending */
2633 if (is_rw_ubit_set(mgen
)) {
2635 if((flags
& KW_UNLOCK_PREPOST_UPGRADE
) != 0) {
2636 /* upgrade thread calling the prepost */
2637 /* upgrade granted */
2643 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
].ksynq_count
> 0) {
2644 uth
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_UPGRADE
], kwq
);
2645 uth
->uu_psynchretval
= (mgen
| PTHRW_EBIT
) & ~PTHRW_UBIT
;
2646 uth
->uu_kwqqueue
= NULL
;
2647 kret
= ksyn_wakeup_thread(kwq
, uth
);
2648 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
2649 panic("kwq_handle_unlock: panic waking up the upgrade thread \n");
2650 if (kret
== KERN_NOT_WAITING
) {
2651 kwq
->kw_pre_intrcount
= 1; /* actually a count */
2652 kwq
->kw_pre_intrseq
= mgen
;
2653 kwq
->kw_pre_intrretbits
= uth
->uu_psynchretval
;
2654 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_UPGRADE
;
2658 panic("panic unable to find the upgrade thread\n");
2664 error
= kwq_find_rw_lowest(kwq
, flags
, premgen
, &rwtype
, lowest
);
2666 panic("rwunlock: cannot fails to slot next round of threads");
2669 #if defined(__i386__)
2670 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_NONE
, (uint32_t)kwq
, 1, rwtype
, lowest
, 0);
2672 #endif /* _PSYNCH_TRACE_ */
2673 low_reader
= lowest
[KSYN_QUEUE_READ
];
2674 low_lreader
= lowest
[KSYN_QUEUE_LREAD
];
2675 low_writer
= lowest
[KSYN_QUEUE_WRITER
];
2676 low_ywriter
= lowest
[KSYN_QUEUE_YWRITER
];
2679 updatebits
= mgen
& ~( PTHRW_EBIT
| PTHRW_WBIT
|PTHRW_YBIT
| PTHRW_UBIT
| PTHRW_LBIT
);
2683 switch (rwtype
& PTH_RW_TYPE_MASK
) {
2684 case PTH_RW_TYPE_LREAD
:
2686 case PTH_RW_TYPE_READ
: {
2688 if (longreadset
== 0) {
2689 switch (rwtype
& (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
)) {
2690 case PTH_RWSHFT_TYPE_WRITE
:
2691 limitrdnum
= low_writer
;
2692 if (((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0) &&
2693 (is_seqlower(low_lreader
, low_writer
) != 0)) {
2698 case PTH_RWSHFT_TYPE_YWRITE
:
2700 if (((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0) &&
2701 (is_seqlower(low_lreader
, low_ywriter
) != 0)) {
2706 case (PTH_RWSHFT_TYPE_WRITE
| PTH_RWSHFT_TYPE_YWRITE
):
2707 limitrdnum
= low_writer
;
2708 if (((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0) &&
2709 (is_seqlower(low_lreader
, low_ywriter
) != 0)) {
2713 default: /* no writers at all */
2714 if ((rwtype
& PTH_RWSHFT_TYPE_LREAD
) != 0)
2722 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0)
2723 updatebits
|= PTHRW_WBIT
;
2724 else if ((rwtype
& PTH_RWSHFT_TYPE_YWRITE
) != 0)
2725 updatebits
|= PTHRW_YBIT
;
2727 if (longreadset
== 0) {
2728 if((prepost
!= 0) &&
2729 ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) &&
2730 ((allreaders
!= 0) || (is_seqlower(premgen
, limitrdnum
) != 0))) {
2732 uth
= current_uthread();
2733 uth
->uu_psynchretval
= updatebits
;
2736 updatebits
|= PTHRW_LBIT
;
2737 if ((prepost
!= 0) &&
2738 ((flags
& (KW_UNLOCK_PREPOST_READLOCK
| KW_UNLOCK_PREPOST_LREADLOCK
)) != 0)) {
2740 uth
= current_uthread();
2741 uth
->uu_psynchretval
= updatebits
;
2746 updatebits
|= PTHRW_RW_HUNLOCK
;
2749 failed
= ksyn_wakeupreaders(kwq
, limitrdnum
, longreadset
, allreaders
, updatebits
, &woken
);
2751 kwq
->kw_pre_intrcount
= failed
; /* actually a count */
2752 kwq
->kw_pre_intrseq
= limitrdnum
;
2753 kwq
->kw_pre_intrretbits
= updatebits
;
2755 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_LREAD
;
2757 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_READ
;
2760 /* if we woken up no one and the current thread is returning, ensure it is doing unlock */
2761 if ((prepost
!= 0) && (woken
== 0) && (block
== 0)&& ((updatebits
& PTHRW_RW_HUNLOCK
) != 0)) {
2762 uth
= current_uthread();
2763 uth
->uu_psynchretval
= updatebits
;
2771 case PTH_RW_TYPE_WRITE
: {
2772 updatebits
|= PTHRW_EBIT
;
2773 if (((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) && (low_writer
== premgen
)) {
2775 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0)
2776 updatebits
|= PTHRW_WBIT
;
2777 else if ((rwtype
& PTH_RWSHFT_TYPE_YWRITE
) != 0)
2778 updatebits
|= PTHRW_YBIT
;
2780 uth
= get_bsdthread_info(th
);
2781 uth
->uu_psynchretval
= updatebits
;
2783 /* we are not granting writelock to the preposting thread */
2784 uth
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
], kwq
);
2786 /* if there are writers present or the preposting write thread then W bit is to be set */
2787 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) )
2788 updatebits
|= PTHRW_WBIT
;
2789 else if ((rwtype
& PTH_RWSHFT_TYPE_YWRITE
) != 0)
2790 updatebits
|= PTHRW_YBIT
;
2791 uth
->uu_psynchretval
= updatebits
;
2792 uth
->uu_kwqqueue
= NULL
;
2793 /* setup next in the queue */
2794 kret
= ksyn_wakeup_thread(kwq
, uth
);
2795 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
2796 panic("kwq_handle_unlock: panic waking up writer\n");
2797 if (kret
== KERN_NOT_WAITING
) {
2798 kwq
->kw_pre_intrcount
= 1; /* actually a count */
2799 kwq
->kw_pre_intrseq
= low_writer
;
2800 kwq
->kw_pre_intrretbits
= updatebits
;
2801 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_WRITE
;
2809 case PTH_RW_TYPE_YWRITE
: {
2810 /* can reader locks be granted ahead of this write? */
2811 if ((rwtype
& PTH_RWSHFT_TYPE_READ
) != 0) {
2812 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0)
2813 updatebits
|= PTHRW_WBIT
;
2814 else if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0)
2815 updatebits
|= PTHRW_YBIT
;
2817 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0) {
2818 /* is lowest reader less than the low writer? */
2819 if (is_seqlower(low_reader
,low_writer
) == 0)
2821 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, low_writer
) != 0)) {
2822 uth
= current_uthread();
2823 uth
->uu_psynchretval
= updatebits
;
2827 updatebits
|= PTHRW_RW_HUNLOCK
;
2830 /* there will be readers to wakeup , no need to check for woken */
2831 failed
= ksyn_wakeupreaders(kwq
, low_writer
, 0, 0, updatebits
, NULL
);
2833 kwq
->kw_pre_intrcount
= failed
; /* actually a count */
2834 kwq
->kw_pre_intrseq
= low_writer
;
2835 kwq
->kw_pre_intrretbits
= updatebits
;
2836 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_READ
;
2840 /* wakeup all readers */
2841 if ((prepost
!= 0) && ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0)) {
2842 uth
= current_uthread();
2843 uth
->uu_psynchretval
= updatebits
;
2847 updatebits
|= PTHRW_RW_HUNLOCK
;
2849 failed
= ksyn_wakeupreaders(kwq
, low_writer
, 0, 1, updatebits
, &woken
);
2851 kwq
->kw_pre_intrcount
= failed
; /* actually a count */
2852 kwq
->kw_pre_intrseq
= kwq
->kw_highseq
;
2853 kwq
->kw_pre_intrretbits
= updatebits
;
2854 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_READ
;
2856 /* if we woken up no one and the current thread is returning, ensure it is doing unlock */
2857 if ((prepost
!= 0) && (woken
==0) && (block
== 0)&& ((updatebits
& PTHRW_RW_HUNLOCK
) != 0)) {
2858 uth
= current_uthread();
2859 uth
->uu_psynchretval
= updatebits
;
2865 /* no reads, so granting yeilding writes */
2866 updatebits
|= PTHRW_EBIT
;
2868 if (((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0) && (low_writer
== premgen
)) {
2869 /* preposting yielding write thread is being granted exclusive lock */
2873 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0)
2874 updatebits
|= PTHRW_WBIT
;
2875 else if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0)
2876 updatebits
|= PTHRW_YBIT
;
2879 uth
= get_bsdthread_info(th
);
2880 uth
->uu_psynchretval
= updatebits
;
2882 /* we are granting yield writelock to some other thread */
2883 uth
= ksyn_queue_removefirst(&kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
], kwq
);
2885 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0)
2886 updatebits
|= PTHRW_WBIT
;
2887 /* if there are ywriters present or the preposting ywrite thread then W bit is to be set */
2888 else if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
!= 0) || ((flags
& KW_UNLOCK_PREPOST_YWRLOCK
) != 0) )
2889 updatebits
|= PTHRW_YBIT
;
2891 uth
->uu_psynchretval
= updatebits
;
2892 uth
->uu_kwqqueue
= NULL
;
2894 kret
= ksyn_wakeup_thread(kwq
, uth
);
2895 if ((kret
!= KERN_SUCCESS
) && (kret
!= KERN_NOT_WAITING
))
2896 panic("kwq_handle_unlock : panic waking up readers\n");
2897 if (kret
== KERN_NOT_WAITING
) {
2898 kwq
->kw_pre_intrcount
= 1; /* actually a count */
2899 kwq
->kw_pre_intrseq
= low_ywriter
;
2900 kwq
->kw_pre_intrretbits
= updatebits
;
2901 kwq
->kw_pre_intrtype
= PTH_RW_TYPE_YWRITE
;
2910 panic("rwunlock: invalid type for lock grants");
2914 if (updatep
!= NULL
)
2915 *updatep
= updatebits
;
2921 #if defined(__i386__)
2922 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_RWHANDLEU
| DBG_FUNC_END
, (uint32_t)kwq
, 0, 0, block
, 0);
2924 #endif /* _PSYNCH_TRACE_ */
2929 /* handle downgrade actions */
2931 kwq_handle_downgrade(ksyn_wait_queue_t kwq
, uint32_t mgen
, __unused
int flags
, __unused
uint32_t premgen
, __unused
int * blockp
)
2933 uint32_t updatebits
, lowriter
= 0;
2934 int longreadset
, allreaders
, count
;
2936 /* can handle downgrade now */
2941 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_count
> 0) {
2942 lowriter
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITER
].ksynq_firstnum
;
2943 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
> 0) {
2944 if (is_seqlower(kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
, lowriter
) != 0)
2949 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_count
> 0) {
2950 lowriter
= kwq
->kw_ksynqueues
[KSYN_QUEUE_YWRITER
].ksynq_firstnum
;
2951 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_count
> 0) {
2952 if (is_seqlower(kwq
->kw_ksynqueues
[KSYN_QUEUE_LREAD
].ksynq_firstnum
, lowriter
) != 0)
2958 count
= ksyn_wakeupreaders(kwq
, lowriter
, longreadset
, allreaders
, updatebits
, NULL
);
2960 kwq
->kw_pre_limrd
= count
;
2961 kwq
->kw_pre_limrdseq
= lowriter
;
2962 kwq
->kw_pre_limrdbits
= lowriter
;
2963 /* need to handle prepost */
2967 /************* Indiv queue support routines ************************/
2969 ksyn_queue_init(ksyn_queue_t kq
)
2971 TAILQ_INIT(&kq
->ksynq_uthlist
);
2972 kq
->ksynq_count
= 0;
2973 kq
->ksynq_firstnum
= 0;
2974 kq
->ksynq_lastnum
= 0;
2979 ksyn_queue_insert(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t mgen
, struct uthread
* uth
, int fit
)
2981 uint32_t lockseq
= mgen
& PTHRW_COUNT_MASK
;
2982 struct uthread
* q_uth
, * r_uth
;
2984 if (kq
->ksynq_count
== 0) {
2985 TAILQ_INSERT_HEAD(&kq
->ksynq_uthlist
, uth
, uu_mtxlist
);
2986 kq
->ksynq_firstnum
= lockseq
;
2987 kq
->ksynq_lastnum
= lockseq
;
2991 if (fit
== FIRSTFIT
) {
2992 /* firstfit, arriving order */
2993 TAILQ_INSERT_TAIL(&kq
->ksynq_uthlist
, uth
, uu_mtxlist
);
2994 if (is_seqlower (lockseq
, kq
->ksynq_firstnum
) != 0)
2995 kq
->ksynq_firstnum
= lockseq
;
2996 if (is_seqhigher (lockseq
, kq
->ksynq_lastnum
) != 0)
2997 kq
->ksynq_lastnum
= lockseq
;
3001 if ((lockseq
== kq
->ksynq_firstnum
) || (lockseq
== kq
->ksynq_lastnum
))
3002 panic("ksyn_queue_insert: two threads with same lockseq ");
3004 /* check for next seq one */
3005 if (is_seqlower(kq
->ksynq_lastnum
, lockseq
) != 0) {
3006 TAILQ_INSERT_TAIL(&kq
->ksynq_uthlist
, uth
, uu_mtxlist
);
3007 kq
->ksynq_lastnum
= lockseq
;
3011 if (is_seqlower(lockseq
, kq
->ksynq_firstnum
) != 0) {
3012 TAILQ_INSERT_HEAD(&kq
->ksynq_uthlist
, uth
, uu_mtxlist
);
3013 kq
->ksynq_firstnum
= lockseq
;
3017 /* goto slow insert mode */
3018 TAILQ_FOREACH_SAFE(q_uth
, &kq
->ksynq_uthlist
, uu_mtxlist
, r_uth
) {
3019 if (is_seqhigher(q_uth
->uu_lockseq
, lockseq
) != 0) {
3020 TAILQ_INSERT_BEFORE(q_uth
, uth
, uu_mtxlist
);
3025 panic("failed to insert \n");
3029 update_low_high(kwq
, lockseq
);
3034 ksyn_queue_removefirst(ksyn_queue_t kq
, ksyn_wait_queue_t kwq
)
3036 uthread_t uth
= NULL
;
3040 if (kq
->ksynq_count
!= 0) {
3041 uth
= TAILQ_FIRST(&kq
->ksynq_uthlist
);
3042 TAILQ_REMOVE(&kq
->ksynq_uthlist
, uth
, uu_mtxlist
);
3043 curseq
= uth
->uu_lockseq
& PTHRW_COUNT_MASK
;
3047 if(kq
->ksynq_count
!= 0) {
3048 q_uth
= TAILQ_FIRST(&kq
->ksynq_uthlist
);
3049 kq
->ksynq_firstnum
= (q_uth
->uu_lockseq
& PTHRW_COUNT_MASK
);
3051 kq
->ksynq_firstnum
= 0;
3052 kq
->ksynq_lastnum
= 0;
3055 if (kwq
->kw_inqueue
== 0) {
3057 kwq
->kw_highseq
= 0;
3059 if (kwq
->kw_lowseq
== curseq
)
3060 kwq
->kw_lowseq
= find_nextlowseq(kwq
);
3061 if (kwq
->kw_highseq
== curseq
)
3062 kwq
->kw_highseq
= find_nexthighseq(kwq
);
3069 ksyn_queue_removeitem(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uthread_t uth
)
3074 if (kq
->ksynq_count
> 0) {
3075 TAILQ_REMOVE(&kq
->ksynq_uthlist
, uth
, uu_mtxlist
);
3077 if(kq
->ksynq_count
!= 0) {
3078 q_uth
= TAILQ_FIRST(&kq
->ksynq_uthlist
);
3079 kq
->ksynq_firstnum
= (q_uth
->uu_lockseq
& PTHRW_COUNT_MASK
);
3081 kq
->ksynq_firstnum
= 0;
3082 kq
->ksynq_lastnum
= 0;
3086 curseq
= uth
->uu_lockseq
& PTHRW_COUNT_MASK
;
3087 if (kwq
->kw_inqueue
== 0) {
3089 kwq
->kw_highseq
= 0;
3091 if (kwq
->kw_lowseq
== curseq
)
3092 kwq
->kw_lowseq
= find_nextlowseq(kwq
);
3093 if (kwq
->kw_highseq
== curseq
)
3094 kwq
->kw_highseq
= find_nexthighseq(kwq
);
3101 update_low_high(ksyn_wait_queue_t kwq
, uint32_t lockseq
)
3103 if (kwq
->kw_inqueue
== 1) {
3104 kwq
->kw_lowseq
= lockseq
;
3105 kwq
->kw_highseq
= lockseq
;
3107 if (is_seqlower(lockseq
, kwq
->kw_lowseq
) != 0)
3108 kwq
->kw_lowseq
= lockseq
;
3109 if (is_seqhigher(lockseq
, kwq
->kw_highseq
) != 0)
3110 kwq
->kw_highseq
= lockseq
;
3115 find_nextlowseq(ksyn_wait_queue_t kwq
)
3117 uint32_t numbers
[4];
3121 for(i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
3122 if (kwq
->kw_ksynqueues
[i
].ksynq_count
!= 0) {
3123 numbers
[count
]= kwq
->kw_ksynqueues
[i
].ksynq_firstnum
;
3130 lowest
= numbers
[0];
3132 for (i
= 1; i
< count
; i
++) {
3133 if(is_seqlower(numbers
[i
] , lowest
) != 0)
3134 lowest
= numbers
[count
];
3142 find_nexthighseq(ksyn_wait_queue_t kwq
)
3144 uint32_t numbers
[4];
3148 for(i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
3149 if (kwq
->kw_ksynqueues
[i
].ksynq_count
!= 0) {
3150 numbers
[count
]= kwq
->kw_ksynqueues
[i
].ksynq_lastnum
;
3159 highest
= numbers
[0];
3161 for (i
= 1; i
< count
; i
++) {
3162 if(is_seqhigher(numbers
[i
], highest
) != 0)
3163 highest
= numbers
[i
];
3171 find_diff(uint32_t upto
, uint32_t lowest
)
3177 diff
= diff_genseq(upto
, lowest
);
3178 diff
= (diff
>> PTHRW_COUNT_SHIFT
);
3184 find_seq_till(ksyn_wait_queue_t kwq
, uint32_t upto
, uint32_t nwaiters
, uint32_t *countp
)
3191 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_FSEQTILL
| DBG_FUNC_START
, 0, 0, upto
, nwaiters
, 0);
3192 #endif /* _PSYNCH_TRACE_ */
3194 for (i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
3195 count
+= ksyn_queue_count_tolowest(&kwq
->kw_ksynqueues
[i
], upto
);
3197 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_FSEQTILL
| DBG_FUNC_NONE
, 0, 1, i
, count
, 0);
3198 #endif /* _PSYNCH_TRACE_ */
3199 if (count
>= nwaiters
) {
3204 if (countp
!= NULL
) {
3208 KERNEL_DEBUG_CONSTANT(_PSYNCH_TRACE_FSEQTILL
| DBG_FUNC_END
, 0, 0, count
, nwaiters
, 0);
3209 #endif /* _PSYNCH_TRACE_ */
3210 if (count
>= nwaiters
)
3218 ksyn_queue_count_tolowest(ksyn_queue_t kq
, uint32_t upto
)
3221 uthread_t uth
, newuth
;
3224 /* if nothing or the first num is greater than upto, return none */
3225 if ((kq
->ksynq_count
== 0) || (is_seqhigher(kq
->ksynq_firstnum
, upto
) != 0))
3227 if (upto
== kq
->ksynq_firstnum
)
3230 TAILQ_FOREACH_SAFE(uth
, &kq
->ksynq_uthlist
, uu_mtxlist
, newuth
) {
3231 curval
= (uth
->uu_lockseq
& PTHRW_COUNT_MASK
);
3232 if (upto
== curval
) {
3235 } else if (is_seqhigher(curval
, upto
) != 0) {
3245 /* find the thread and removes from the queue */
3247 ksyn_queue_find_seq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t seq
)
3249 uthread_t q_uth
, r_uth
;
3250 /* case where wrap in the tail of the queue exists */
3251 TAILQ_FOREACH_SAFE(q_uth
, &kq
->ksynq_uthlist
, uu_mtxlist
, r_uth
) {
3252 if (q_uth
->uu_lockseq
== seq
) {
3253 ksyn_queue_removeitem(kwq
, kq
, q_uth
);