2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995-2005 Apple Computer, Inc. All Rights Reserved */
33 #include <sys/param.h>
34 #include <sys/queue.h>
35 #include <sys/resourcevar.h>
36 //#include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/systm.h>
39 #include <sys/timeb.h>
40 #include <sys/times.h>
43 #include <sys/kernel.h>
45 #include <sys/signalvar.h>
46 #include <sys/syslog.h>
49 #include <sys/kdebug.h>
50 //#include <sys/sysproto.h>
51 //#include <sys/pthread_internal.h>
55 #include <mach/mach_types.h>
56 #include <mach/vm_prot.h>
57 #include <mach/semaphore.h>
58 #include <mach/sync_policy.h>
59 #include <mach/task.h>
60 #include <kern/kern_types.h>
61 #include <kern/task.h>
62 #include <kern/clock.h>
63 #include <mach/kern_return.h>
64 #include <kern/thread.h>
65 #include <kern/sched_prim.h>
66 #include <kern/thread_call.h>
67 #include <kern/kalloc.h>
68 #include <kern/zalloc.h>
69 #include <kern/sched_prim.h>
70 #include <kern/processor.h>
71 #include <kern/block_hint.h>
72 #include <kern/turnstile.h>
73 //#include <kern/mach_param.h>
74 #include <mach/mach_vm.h>
75 #include <mach/mach_param.h>
76 #include <mach/thread_policy.h>
77 #include <mach/message.h>
78 #include <mach/port.h>
79 //#include <vm/vm_protos.h>
80 #include <vm/vm_map.h>
81 #include <mach/vm_region.h>
83 #include <libkern/OSAtomic.h>
85 #include <pexpert/pexpert.h>
87 #include "kern_internal.h"
88 #include "synch_internal.h"
89 #include "kern_trace.h"
91 typedef struct uthread
*uthread_t
;
93 //#define __FAILEDUSERTEST__(s) do { panic(s); } while (0)
94 #define __FAILEDUSERTEST__(s) do { printf("PSYNCH: pid[%d]: %s\n", proc_pid(current_proc()), s); } while (0)
95 #define __FAILEDUSERTEST2__(s, x...) do { printf("PSYNCH: pid[%d]: " s "\n", proc_pid(current_proc()), x); } while (0)
97 lck_mtx_t
*pthread_list_mlock
;
99 #define PTH_HASHSIZE 100
101 static LIST_HEAD(pthhashhead
, ksyn_wait_queue
) *pth_glob_hashtbl
;
102 static unsigned long pthhash
;
104 static LIST_HEAD(, ksyn_wait_queue
) pth_free_list
;
106 static zone_t kwq_zone
; /* zone for allocation of ksyn_queue */
107 static zone_t kwe_zone
; /* zone for allocation of ksyn_waitq_element */
113 TAILQ_HEAD(ksynq_kwelist_head
, ksyn_waitq_element
) ksynq_kwelist
;
114 uint32_t ksynq_count
; /* number of entries in queue */
115 uint32_t ksynq_firstnum
; /* lowest seq in queue */
116 uint32_t ksynq_lastnum
; /* highest seq in queue */
118 typedef struct ksyn_queue
*ksyn_queue_t
;
129 KWQ_INTR_WRITE
= 0x2,
132 struct ksyn_wait_queue
{
133 LIST_ENTRY(ksyn_wait_queue
) kw_hash
;
134 LIST_ENTRY(ksyn_wait_queue
) kw_list
;
136 thread_t kw_owner
; /* current owner or THREAD_NULL, has a +1 */
137 uint64_t kw_object
; /* object backing in shared mode */
138 uint64_t kw_offset
; /* offset inside the object in shared mode */
139 int kw_pflags
; /* flags under listlock protection */
140 struct timeval kw_ts
; /* timeval need for upkeep before free */
141 int kw_iocount
; /* inuse reference */
142 int kw_dropcount
; /* current users unlocking... */
144 int kw_type
; /* queue type like mutex, cvar, etc */
145 uint32_t kw_inqueue
; /* num of waiters held */
146 uint32_t kw_fakecount
; /* number of error/prepost fakes */
147 uint32_t kw_highseq
; /* highest seq in the queue */
148 uint32_t kw_lowseq
; /* lowest seq in the queue */
149 uint32_t kw_lword
; /* L value from userland */
150 uint32_t kw_uword
; /* U world value from userland */
151 uint32_t kw_sword
; /* S word value from userland */
152 uint32_t kw_lastunlockseq
; /* the last seq that unlocked */
153 /* for CV to be used as the seq kernel has seen so far */
154 #define kw_cvkernelseq kw_lastunlockseq
155 uint32_t kw_lastseqword
; /* the last seq that unlocked */
156 /* for mutex and cvar we need to track I bit values */
157 uint32_t kw_nextseqword
; /* the last seq that unlocked; with num of waiters */
159 uint32_t count
; /* prepost count */
160 uint32_t lseq
; /* prepost target seq */
161 uint32_t sseq
; /* prepost target sword, in cvar used for mutexowned */
164 kwq_intr_type_t type
; /* type of failed wakueps */
165 uint32_t count
; /* prepost of missed wakeup due to intrs */
166 uint32_t seq
; /* prepost of missed wakeup limit seq */
167 uint32_t returnbits
; /* return bits value for missed wakeup threads */
171 int kw_qos_override
; /* QoS of max waiter during contention period */
172 struct turnstile
*kw_turnstile
;
173 struct ksyn_queue kw_ksynqueues
[KSYN_QUEUE_MAX
]; /* queues to hold threads */
174 lck_spin_t kw_lock
; /* spinlock protecting this structure */
176 typedef struct ksyn_wait_queue
* ksyn_wait_queue_t
;
178 #define TID_ZERO (uint64_t)0
180 /* bits needed in handling the rwlock unlock */
181 #define PTH_RW_TYPE_READ 0x01
182 #define PTH_RW_TYPE_WRITE 0x04
183 #define PTH_RW_TYPE_MASK 0xff
184 #define PTH_RW_TYPE_SHIFT 8
186 #define PTH_RWSHFT_TYPE_READ 0x0100
187 #define PTH_RWSHFT_TYPE_WRITE 0x0400
188 #define PTH_RWSHFT_TYPE_MASK 0xff00
191 * Mutex pshared attributes
193 #define PTHREAD_PROCESS_SHARED _PTHREAD_MTX_OPT_PSHARED
194 #define PTHREAD_PROCESS_PRIVATE 0x20
195 #define PTHREAD_PSHARED_FLAGS_MASK 0x30
198 * Mutex policy attributes
200 #define _PTHREAD_MTX_OPT_POLICY_FAIRSHARE 0x040 /* 1 */
201 #define _PTHREAD_MTX_OPT_POLICY_FIRSTFIT 0x080 /* 2 */
202 #define _PTHREAD_MTX_OPT_POLICY_MASK 0x1c0
205 #define KSYN_WQ_INHASH 2
206 #define KSYN_WQ_SHARED 4
207 #define KSYN_WQ_WAITING 8 /* threads waiting for this wq to be available */
208 #define KSYN_WQ_FLIST 0X10 /* in free list to be freed after a short delay */
211 #define KSYN_KWF_INITCLEARED 0x1 /* the init status found and preposts cleared */
212 #define KSYN_KWF_ZEROEDOUT 0x2 /* the lword, etc are inited to 0 */
213 #define KSYN_KWF_QOS_APPLIED 0x4 /* QoS override applied to owner */
214 #define KSYN_KWF_OVERLAP_GUARD 0x8 /* overlap guard */
216 #define KSYN_CLEANUP_DEADLINE 10
217 static int psynch_cleanupset
;
218 thread_call_t psynch_thcall
;
220 #define KSYN_WQTYPE_INWAIT 0x1000
221 #define KSYN_WQTYPE_INDROP 0x2000
222 #define KSYN_WQTYPE_MTX 0x01
223 #define KSYN_WQTYPE_CVAR 0x02
224 #define KSYN_WQTYPE_RWLOCK 0x04
225 #define KSYN_WQTYPE_SEMA 0x08
226 #define KSYN_WQTYPE_MASK 0xff
228 #define KSYN_WQTYPE_MUTEXDROP (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX)
231 _kwq_type(ksyn_wait_queue_t kwq
)
233 return (kwq
->kw_type
& KSYN_WQTYPE_MASK
);
237 _kwq_use_turnstile(ksyn_wait_queue_t kwq
)
239 // <rdar://problem/15926625> If we had writer-owner information from the
240 // rwlock then we could use the turnstile to push on it. For now, only
241 // plain mutexes use it.
242 return (_kwq_type(kwq
) == KSYN_WQTYPE_MTX
);
245 #define KW_UNLOCK_PREPOST 0x01
246 #define KW_UNLOCK_PREPOST_READLOCK 0x08
247 #define KW_UNLOCK_PREPOST_WRLOCK 0x20
249 static int ksyn_wq_hash_lookup(user_addr_t uaddr
, proc_t p
, int flags
, ksyn_wait_queue_t
*kwq
, struct pthhashhead
**hashptr
, uint64_t object
, uint64_t offset
);
250 static int ksyn_wqfind(user_addr_t mutex
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
, int flags
, int wqtype
, ksyn_wait_queue_t
*wq
);
251 static void ksyn_wqrelease(ksyn_wait_queue_t mkwq
, int qfreenow
, int wqtype
);
252 static int ksyn_findobj(user_addr_t uaddr
, uint64_t *objectp
, uint64_t *offsetp
);
254 static int _wait_result_to_errno(wait_result_t result
);
256 static int ksyn_wait(ksyn_wait_queue_t
, kwq_queue_type_t
, uint32_t, int, uint64_t, uint16_t, thread_continue_t
, block_hint_t
);
257 static kern_return_t
ksyn_signal(ksyn_wait_queue_t
, kwq_queue_type_t
, ksyn_waitq_element_t
, uint32_t);
258 static void ksyn_freeallkwe(ksyn_queue_t kq
);
260 static kern_return_t
ksyn_mtxsignal(ksyn_wait_queue_t
, ksyn_waitq_element_t kwe
, uint32_t, thread_t
*);
262 static int kwq_handle_unlock(ksyn_wait_queue_t
, uint32_t mgen
, uint32_t rw_wc
, uint32_t *updatep
, int flags
, int *blockp
, uint32_t premgen
);
264 static void ksyn_queue_init(ksyn_queue_t kq
);
265 static int ksyn_queue_insert(ksyn_wait_queue_t kwq
, int kqi
, ksyn_waitq_element_t kwe
, uint32_t mgen
, int firstfit
);
266 static void ksyn_queue_remove_item(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, ksyn_waitq_element_t kwe
);
267 static void ksyn_queue_free_items(ksyn_wait_queue_t kwq
, int kqi
, uint32_t upto
, int all
);
269 static void update_low_high(ksyn_wait_queue_t kwq
, uint32_t lockseq
);
270 static uint32_t find_nextlowseq(ksyn_wait_queue_t kwq
);
271 static uint32_t find_nexthighseq(ksyn_wait_queue_t kwq
);
272 static int find_seq_till(ksyn_wait_queue_t kwq
, uint32_t upto
, uint32_t nwaiters
, uint32_t *countp
);
274 static uint32_t ksyn_queue_count_tolowest(ksyn_queue_t kq
, uint32_t upto
);
276 static ksyn_waitq_element_t
ksyn_queue_find_cvpreposeq(ksyn_queue_t kq
, uint32_t cgen
);
277 static void ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq
, uint32_t upto
, uint32_t *updatep
);
278 static void ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq
, uint32_t *updatep
);
279 static ksyn_waitq_element_t
ksyn_queue_find_signalseq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t toseq
, uint32_t lockseq
);
281 static void __dead2
psynch_cvcontinue(void *, wait_result_t
);
282 static void __dead2
psynch_mtxcontinue(void *, wait_result_t
);
283 static void __dead2
psynch_rw_rdcontinue(void *, wait_result_t
);
284 static void __dead2
psynch_rw_wrcontinue(void *, wait_result_t
);
286 static int ksyn_wakeupreaders(ksyn_wait_queue_t kwq
, uint32_t limitread
, int allreaders
, uint32_t updatebits
, int *wokenp
);
287 static int kwq_find_rw_lowest(ksyn_wait_queue_t kwq
, int flags
, uint32_t premgen
, int *type
, uint32_t lowest
[]);
288 static ksyn_waitq_element_t
ksyn_queue_find_seq(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
, uint32_t seq
);
291 UPDATE_CVKWQ(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t ugen
, uint32_t rw_wc
)
293 int sinit
= ((rw_wc
& PTH_RWS_CV_CBIT
) != 0);
295 // assert((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR);
297 if ((kwq
->kw_kflags
& KSYN_KWF_ZEROEDOUT
) != 0) {
298 /* the values of L,U and S are cleared out due to L==S in previous transition */
299 kwq
->kw_lword
= mgen
;
300 kwq
->kw_uword
= ugen
;
301 kwq
->kw_sword
= rw_wc
;
302 kwq
->kw_kflags
&= ~KSYN_KWF_ZEROEDOUT
;
304 if (is_seqhigher(mgen
, kwq
->kw_lword
)) {
305 kwq
->kw_lword
= mgen
;
307 if (is_seqhigher(ugen
, kwq
->kw_uword
)) {
308 kwq
->kw_uword
= ugen
;
310 if (sinit
&& is_seqhigher(rw_wc
, kwq
->kw_sword
)) {
311 kwq
->kw_sword
= rw_wc
;
314 if (sinit
&& is_seqlower(kwq
->kw_cvkernelseq
, rw_wc
)) {
315 kwq
->kw_cvkernelseq
= (rw_wc
& PTHRW_COUNT_MASK
);
320 _kwq_clear_preposted_wakeup(ksyn_wait_queue_t kwq
)
322 kwq
->kw_prepost
.lseq
= 0;
323 kwq
->kw_prepost
.sseq
= PTHRW_RWS_INIT
;
324 kwq
->kw_prepost
.count
= 0;
328 _kwq_mark_preposted_wakeup(ksyn_wait_queue_t kwq
, uint32_t count
,
329 uint32_t lseq
, uint32_t sseq
)
331 kwq
->kw_prepost
.count
= count
;
332 kwq
->kw_prepost
.lseq
= lseq
;
333 kwq
->kw_prepost
.sseq
= sseq
;
337 _kwq_clear_interrupted_wakeup(ksyn_wait_queue_t kwq
)
339 kwq
->kw_intr
.type
= KWQ_INTR_NONE
;
340 kwq
->kw_intr
.count
= 0;
341 kwq
->kw_intr
.seq
= 0;
342 kwq
->kw_intr
.returnbits
= 0;
346 _kwq_mark_interruped_wakeup(ksyn_wait_queue_t kwq
, kwq_intr_type_t type
,
347 uint32_t count
, uint32_t lseq
, uint32_t returnbits
)
349 kwq
->kw_intr
.count
= count
;
350 kwq
->kw_intr
.seq
= lseq
;
351 kwq
->kw_intr
.returnbits
= returnbits
;
352 kwq
->kw_intr
.type
= type
;
356 _kwq_destroy(ksyn_wait_queue_t kwq
)
359 thread_deallocate(kwq
->kw_owner
);
361 lck_spin_destroy(&kwq
->kw_lock
, pthread_lck_grp
);
362 zfree(kwq_zone
, kwq
);
365 #define KWQ_SET_OWNER_TRANSFER_REF 0x1
367 static inline thread_t
368 _kwq_set_owner(ksyn_wait_queue_t kwq
, thread_t new_owner
, int flags
)
370 thread_t old_owner
= kwq
->kw_owner
;
371 if (old_owner
== new_owner
) {
372 if (flags
& KWQ_SET_OWNER_TRANSFER_REF
) return new_owner
;
375 if ((flags
& KWQ_SET_OWNER_TRANSFER_REF
) == 0) {
376 thread_reference(new_owner
);
378 kwq
->kw_owner
= new_owner
;
382 static inline thread_t
383 _kwq_clear_owner(ksyn_wait_queue_t kwq
)
385 return _kwq_set_owner(kwq
, THREAD_NULL
, KWQ_SET_OWNER_TRANSFER_REF
);
389 _kwq_cleanup_old_owner(thread_t
*thread
)
392 thread_deallocate(*thread
);
393 *thread
= THREAD_NULL
;
398 CLEAR_REINIT_BITS(ksyn_wait_queue_t kwq
)
400 if ((kwq
->kw_type
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_CVAR
) {
401 if (kwq
->kw_inqueue
!= 0 && kwq
->kw_inqueue
!= kwq
->kw_fakecount
) {
402 panic("CV:entries in queue durinmg reinit %d:%d\n",kwq
->kw_inqueue
, kwq
->kw_fakecount
);
405 if ((kwq
->kw_type
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_RWLOCK
) {
406 kwq
->kw_nextseqword
= PTHRW_RWS_INIT
;
407 kwq
->kw_kflags
&= ~KSYN_KWF_OVERLAP_GUARD
;
409 _kwq_clear_preposted_wakeup(kwq
);
410 kwq
->kw_lastunlockseq
= PTHRW_RWL_INIT
;
411 kwq
->kw_lastseqword
= PTHRW_RWS_INIT
;
412 _kwq_clear_interrupted_wakeup(kwq
);
415 kwq
->kw_sword
= PTHRW_RWS_INIT
;
419 _kwq_handle_preposted_wakeup(ksyn_wait_queue_t kwq
, uint32_t type
,
420 uint32_t lseq
, uint32_t *retval
)
422 if (kwq
->kw_prepost
.count
== 0 ||
423 !is_seqlower_eq(lseq
, kwq
->kw_prepost
.lseq
)) {
427 kwq
->kw_prepost
.count
--;
428 if (kwq
->kw_prepost
.count
> 0) {
432 int error
, should_block
= 0;
433 uint32_t updatebits
= 0;
434 uint32_t pp_lseq
= kwq
->kw_prepost
.lseq
;
435 uint32_t pp_sseq
= kwq
->kw_prepost
.sseq
;
436 _kwq_clear_preposted_wakeup(kwq
);
438 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
440 error
= kwq_handle_unlock(kwq
, pp_lseq
, pp_sseq
, &updatebits
,
441 (type
| KW_UNLOCK_PREPOST
), &should_block
, lseq
);
443 panic("_kwq_handle_preposted_wakeup: kwq_handle_unlock failed %d",
450 *retval
= updatebits
;
455 _kwq_handle_overlap(ksyn_wait_queue_t kwq
, uint32_t type
, uint32_t lgenval
,
456 uint32_t rw_wc
, uint32_t *retval
)
460 // overlaps only occur on read lockers
461 if (type
!= PTH_RW_TYPE_READ
) {
465 // check for overlap and no pending W bit (indicates writers)
466 if ((kwq
->kw_kflags
& KSYN_KWF_OVERLAP_GUARD
) &&
467 !is_rws_savemask_set(rw_wc
) && !is_rwl_wbit_set(lgenval
)) {
468 /* overlap is set, so no need to check for valid state for overlap */
470 if (is_seqlower_eq(rw_wc
, kwq
->kw_nextseqword
) || is_seqhigher_eq(kwq
->kw_lastseqword
, rw_wc
)) {
471 /* increase the next expected seq by one */
472 kwq
->kw_nextseqword
+= PTHRW_INC
;
473 /* set count by one & bits from the nextseq and add M bit */
474 *retval
= PTHRW_INC
| ((kwq
->kw_nextseqword
& PTHRW_BIT_MASK
) | PTH_RWL_MBIT
);
482 _kwq_is_used(ksyn_wait_queue_t kwq
)
484 return (kwq
->kw_inqueue
!= 0 || kwq
->kw_prepost
.count
!= 0 ||
485 kwq
->kw_intr
.count
!= 0);
489 * consumes a pending interrupted waiter, returns true if the current
490 * thread should return back to userspace because it was previously
494 _kwq_handle_interrupted_wakeup(ksyn_wait_queue_t kwq
, kwq_intr_type_t type
,
495 uint32_t lseq
, uint32_t *retval
)
497 if (kwq
->kw_intr
.count
!= 0 && kwq
->kw_intr
.type
== type
&&
498 (!kwq
->kw_intr
.seq
|| is_seqlower_eq(lseq
, kwq
->kw_intr
.seq
))) {
499 kwq
->kw_intr
.count
--;
500 *retval
= kwq
->kw_intr
.returnbits
;
501 if (kwq
->kw_intr
.returnbits
== 0) {
502 _kwq_clear_interrupted_wakeup(kwq
);
510 pthread_list_lock(void)
512 lck_mtx_lock_spin(pthread_list_mlock
);
516 pthread_list_unlock(void)
518 lck_mtx_unlock(pthread_list_mlock
);
522 ksyn_wqlock(ksyn_wait_queue_t kwq
)
524 lck_spin_lock(&kwq
->kw_lock
);
528 ksyn_wqunlock(ksyn_wait_queue_t kwq
)
530 lck_spin_unlock(&kwq
->kw_lock
);
533 /* routine to drop the mutex unlocks , used both for mutexunlock system call and drop during cond wait */
535 _psynch_mutexdrop_internal(ksyn_wait_queue_t kwq
, uint32_t mgen
, uint32_t ugen
,
539 uint32_t returnbits
= 0;
540 uint32_t updatebits
= 0;
541 int firstfit
= (flags
& _PTHREAD_MTX_OPT_POLICY_MASK
) ==
542 _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
;
543 uint32_t nextgen
= (ugen
+ PTHRW_INC
);
544 thread_t old_owner
= THREAD_NULL
;
547 kwq
->kw_lastunlockseq
= (ugen
& PTHRW_COUNT_MASK
);
550 updatebits
= (kwq
->kw_highseq
& PTHRW_COUNT_MASK
) |
551 (PTH_RWL_EBIT
| PTH_RWL_KBIT
);
554 if (kwq
->kw_inqueue
== 0) {
555 uint32_t count
= kwq
->kw_prepost
.count
+ 1;
556 // Increment the number of preposters we have waiting
557 _kwq_mark_preposted_wakeup(kwq
, count
, mgen
& PTHRW_COUNT_MASK
, 0);
558 // We don't know the current owner as we've determined this mutex
559 // drop should have a preposted locker inbound into the kernel but
560 // we have no way of knowing who it is. When it arrives, the lock
561 // path will update the turnstile owner and return it to userspace.
562 old_owner
= _kwq_clear_owner(kwq
);
563 pthread_kern
->psynch_wait_update_owner(kwq
, THREAD_NULL
,
565 PTHREAD_TRACE(psynch_mutex_kwqprepost
, kwq
->kw_addr
,
566 kwq
->kw_prepost
.lseq
, count
, 0);
568 // signal first waiter
569 ret
= ksyn_mtxsignal(kwq
, NULL
, updatebits
, &old_owner
);
570 if (ret
== KERN_NOT_WAITING
) {
571 // <rdar://problem/39093536> ksyn_mtxsignal attempts to signal
572 // the thread but it sets up the turnstile inheritor first.
573 // That means we can't redrive the mutex in a loop without
574 // dropping the wq lock and cleaning up the turnstile state.
576 pthread_kern
->psynch_wait_cleanup();
577 _kwq_cleanup_old_owner(&old_owner
);
583 bool prepost
= false;
584 if (kwq
->kw_inqueue
== 0) {
585 // No waiters in the queue.
588 uint32_t low_writer
= (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
].ksynq_firstnum
& PTHRW_COUNT_MASK
);
589 if (low_writer
== nextgen
) {
590 /* next seq to be granted found */
591 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
592 ret
= ksyn_mtxsignal(kwq
, NULL
,
593 updatebits
| PTH_RWL_MTX_WAIT
, &old_owner
);
594 if (ret
== KERN_NOT_WAITING
) {
596 _kwq_mark_interruped_wakeup(kwq
, KWQ_INTR_WRITE
, 1,
597 nextgen
, updatebits
);
599 } else if (is_seqhigher(low_writer
, nextgen
)) {
602 //__FAILEDUSERTEST__("psynch_mutexdrop_internal: FS mutex unlock sequence higher than the lowest one is queue\n");
603 ksyn_waitq_element_t kwe
;
604 kwe
= ksyn_queue_find_seq(kwq
,
605 &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
], nextgen
);
607 /* next seq to be granted found */
608 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
609 ret
= ksyn_mtxsignal(kwq
, kwe
,
610 updatebits
| PTH_RWL_MTX_WAIT
, &old_owner
);
611 if (ret
== KERN_NOT_WAITING
) {
620 if (kwq
->kw_prepost
.count
!= 0) {
621 __FAILEDUSERTEST__("_psynch_mutexdrop_internal: multiple preposts\n");
623 _kwq_mark_preposted_wakeup(kwq
, 1, nextgen
& PTHRW_COUNT_MASK
,
626 old_owner
= _kwq_clear_owner(kwq
);
627 pthread_kern
->psynch_wait_update_owner(kwq
, THREAD_NULL
,
633 pthread_kern
->psynch_wait_cleanup();
634 _kwq_cleanup_old_owner(&old_owner
);
635 ksyn_wqrelease(kwq
, 1, KSYN_WQTYPE_MUTEXDROP
);
640 _ksyn_check_init(ksyn_wait_queue_t kwq
, uint32_t lgenval
)
642 int res
= (lgenval
& PTHRW_RWL_INIT
) != 0;
644 if ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) == 0) {
645 /* first to notice the reset of the lock, clear preposts */
646 CLEAR_REINIT_BITS(kwq
);
647 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
654 * psynch_mutexwait: This system call is used for contended psynch mutexes to
658 _psynch_mutexwait(__unused proc_t p
, user_addr_t mutex
, uint32_t mgen
,
659 uint32_t ugen
, uint64_t tid
, uint32_t flags
, uint32_t *retval
)
661 ksyn_wait_queue_t kwq
;
663 int firstfit
= (flags
& _PTHREAD_MTX_OPT_POLICY_MASK
)
664 == _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
;
665 int ins_flags
= SEQFIT
;
666 uint32_t lseq
= (mgen
& PTHRW_COUNT_MASK
);
667 uint32_t updatebits
= 0;
668 thread_t tid_th
= THREAD_NULL
, old_owner
= THREAD_NULL
;
672 ins_flags
= FIRSTFIT
;
675 error
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, flags
,
676 (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_MTX
), &kwq
);
684 if (_kwq_handle_interrupted_wakeup(kwq
, KWQ_INTR_WRITE
, lseq
, retval
)) {
685 old_owner
= _kwq_set_owner(kwq
, current_thread(), 0);
686 pthread_kern
->psynch_wait_update_owner(kwq
, kwq
->kw_owner
,
689 _kwq_cleanup_old_owner(&old_owner
);
693 if (kwq
->kw_prepost
.count
&& (firstfit
|| (lseq
== kwq
->kw_prepost
.lseq
))) {
694 /* got preposted lock */
695 kwq
->kw_prepost
.count
--;
698 if (kwq
->kw_prepost
.count
> 0) {
699 __FAILEDUSERTEST__("psynch_mutexwait: more than one prepost\n");
700 kwq
->kw_prepost
.lseq
+= PTHRW_INC
; /* look for next one */
705 _kwq_clear_preposted_wakeup(kwq
);
708 if (kwq
->kw_inqueue
== 0) {
709 updatebits
= lseq
| (PTH_RWL_KBIT
| PTH_RWL_EBIT
);
711 updatebits
= (kwq
->kw_highseq
& PTHRW_COUNT_MASK
) |
712 (PTH_RWL_KBIT
| PTH_RWL_EBIT
);
714 updatebits
&= ~PTH_RWL_MTX_WAIT
;
716 if (updatebits
== 0) {
717 __FAILEDUSERTEST__("psynch_mutexwait(prepost): returning 0 lseq in mutexwait with no EBIT \n");
720 PTHREAD_TRACE(psynch_mutex_kwqprepost
, kwq
->kw_addr
,
721 kwq
->kw_prepost
.lseq
, kwq
->kw_prepost
.count
, 1);
723 old_owner
= _kwq_set_owner(kwq
, current_thread(), 0);
724 pthread_kern
->psynch_wait_update_owner(kwq
, kwq
->kw_owner
,
728 _kwq_cleanup_old_owner(&old_owner
);
729 *retval
= updatebits
;
733 // mutexwait passes in an owner hint at the time userspace contended for
734 // the mutex, however, the owner tid in the userspace data structure may be
735 // unset or SWITCHING (-1), or it may correspond to a stale snapshot after
736 // the lock has subsequently been unlocked by another thread.
737 if (tid
== thread_tid(kwq
->kw_owner
)) {
738 // userspace and kernel agree
739 } else if (tid
== 0) {
740 // contender came in before owner could write TID
741 // let's assume that what the kernel knows is accurate
742 // for all we know this waiter came in late in the kernel
743 } else if (kwq
->kw_lastunlockseq
!= PTHRW_RWL_INIT
&&
744 is_seqlower(ugen
, kwq
->kw_lastunlockseq
)) {
745 // owner is stale, someone has come in and unlocked since this
746 // contended read the TID, so assume what is known in the kernel is
748 } else if (tid
== PTHREAD_MTX_TID_SWITCHING
) {
749 // userspace didn't know the owner because it was being unlocked, but
750 // that unlocker hasn't reached the kernel yet. So assume what is known
751 // in the kernel is accurate
753 // hint is being passed in for a specific thread, and we have no reason
754 // not to trust it (like the kernel unlock sequence being higher)
756 // So resolve the hint to a thread_t if we haven't done so yet
757 // and redrive as we dropped the lock
758 if (tid_th
== THREAD_NULL
) {
760 tid_th
= pthread_kern
->task_findtid(current_task(), tid
);
761 if (tid_th
== THREAD_NULL
) tid
= 0;
764 tid_th
= _kwq_set_owner(kwq
, tid_th
, KWQ_SET_OWNER_TRANSFER_REF
);
768 // We are on our way to block, and can't drop the spinlock anymore
769 pthread_kern
->thread_deallocate_safe(tid_th
);
770 tid_th
= THREAD_NULL
;
772 error
= ksyn_wait(kwq
, KSYN_QUEUE_WRITE
, mgen
, ins_flags
, 0, 0,
773 psynch_mtxcontinue
, kThreadWaitPThreadMutex
);
774 // ksyn_wait drops wait queue lock
776 pthread_kern
->psynch_wait_cleanup();
777 ksyn_wqrelease(kwq
, 1, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_MTX
));
779 thread_deallocate(tid_th
);
785 psynch_mtxcontinue(void *parameter
, wait_result_t result
)
787 uthread_t uth
= current_uthread();
788 ksyn_wait_queue_t kwq
= parameter
;
789 ksyn_waitq_element_t kwe
= pthread_kern
->uthread_get_uukwe(uth
);
793 int error
= _wait_result_to_errno(result
);
795 if (kwe
->kwe_kwqqueue
) {
796 ksyn_queue_remove_item(kwq
, &kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
], kwe
);
799 uint32_t updatebits
= kwe
->kwe_psynchretval
& ~PTH_RWL_MTX_WAIT
;
800 pthread_kern
->uthread_set_returnval(uth
, updatebits
);
802 if (updatebits
== 0) {
803 __FAILEDUSERTEST__("psynch_mutexwait: returning 0 lseq in mutexwait with no EBIT \n");
807 pthread_kern
->psynch_wait_complete(kwq
, &kwq
->kw_turnstile
);
810 pthread_kern
->psynch_wait_cleanup();
811 ksyn_wqrelease(kwq
, 1, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_MTX
));
812 pthread_kern
->unix_syscall_return(error
);
813 __builtin_unreachable();
817 _psynch_rw_continue(ksyn_wait_queue_t kwq
, kwq_queue_type_t kqi
,
818 wait_result_t result
)
820 uthread_t uth
= current_uthread();
821 ksyn_waitq_element_t kwe
= pthread_kern
->uthread_get_uukwe(uth
);
825 int error
= _wait_result_to_errno(result
);
827 if (kwe
->kwe_kwqqueue
) {
828 ksyn_queue_remove_item(kwq
, &kwq
->kw_ksynqueues
[kqi
], kwe
);
831 pthread_kern
->uthread_set_returnval(uth
, kwe
->kwe_psynchretval
);
835 ksyn_wqrelease(kwq
, 0, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_RWLOCK
));
837 pthread_kern
->unix_syscall_return(error
);
838 __builtin_unreachable();
842 psynch_rw_rdcontinue(void *parameter
, wait_result_t result
)
844 _psynch_rw_continue(parameter
, KSYN_QUEUE_READ
, result
);
848 psynch_rw_wrcontinue(void *parameter
, wait_result_t result
)
850 _psynch_rw_continue(parameter
, KSYN_QUEUE_WRITE
, result
);
854 * psynch_mutexdrop: This system call is used for unlock postings on contended psynch mutexes.
857 _psynch_mutexdrop(__unused proc_t p
, user_addr_t mutex
, uint32_t mgen
,
858 uint32_t ugen
, uint64_t tid __unused
, uint32_t flags
, uint32_t *retval
)
861 ksyn_wait_queue_t kwq
;
863 res
= ksyn_wqfind(mutex
, mgen
, ugen
, 0, flags
, KSYN_WQTYPE_MUTEXDROP
, &kwq
);
865 uint32_t updateval
= _psynch_mutexdrop_internal(kwq
, mgen
, ugen
, flags
);
866 /* drops the kwq reference */
876 ksyn_mtxsignal(ksyn_wait_queue_t kwq
, ksyn_waitq_element_t kwe
,
877 uint32_t updateval
, thread_t
*old_owner
)
882 kwe
= TAILQ_FIRST(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
].ksynq_kwelist
);
884 panic("ksyn_mtxsignal: panic signaling empty queue");
888 PTHREAD_TRACE(psynch_mutex_kwqsignal
| DBG_FUNC_START
, kwq
->kw_addr
, kwe
,
889 thread_tid(kwe
->kwe_thread
), kwq
->kw_inqueue
);
891 ret
= ksyn_signal(kwq
, KSYN_QUEUE_WRITE
, kwe
, updateval
);
892 if (ret
== KERN_SUCCESS
) {
893 *old_owner
= _kwq_set_owner(kwq
, kwe
->kwe_thread
, 0);
895 *old_owner
= _kwq_clear_owner(kwq
);
897 PTHREAD_TRACE(psynch_mutex_kwqsignal
| DBG_FUNC_END
, kwq
->kw_addr
, kwe
,
904 ksyn_prepost(ksyn_wait_queue_t kwq
, ksyn_waitq_element_t kwe
, uint32_t state
,
907 bzero(kwe
, sizeof(*kwe
));
908 kwe
->kwe_state
= state
;
909 kwe
->kwe_lockseq
= lockseq
;
912 (void)ksyn_queue_insert(kwq
, KSYN_QUEUE_WRITE
, kwe
, lockseq
, SEQFIT
);
917 ksyn_cvsignal(ksyn_wait_queue_t ckwq
, thread_t th
, uint32_t uptoseq
,
918 uint32_t signalseq
, uint32_t *updatebits
, int *broadcast
,
919 ksyn_waitq_element_t
*nkwep
)
921 ksyn_waitq_element_t kwe
= NULL
;
922 ksyn_waitq_element_t nkwe
= NULL
;
923 ksyn_queue_t kq
= &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
];
925 uptoseq
&= PTHRW_COUNT_MASK
;
927 // Find the specified thread to wake.
928 if (th
!= THREAD_NULL
) {
929 uthread_t uth
= pthread_kern
->get_bsdthread_info(th
);
930 kwe
= pthread_kern
->uthread_get_uukwe(uth
);
931 if (kwe
->kwe_kwqqueue
!= ckwq
||
932 is_seqhigher(kwe
->kwe_lockseq
, uptoseq
)) {
933 // Unless it's no longer waiting on this CV...
935 // ...in which case we post a broadcast instead.
941 // If no thread was specified, find any thread to wake (with the right
943 while (th
== THREAD_NULL
) {
945 kwe
= ksyn_queue_find_signalseq(ckwq
, kq
, uptoseq
, signalseq
);
947 if (kwe
== NULL
&& nkwe
== NULL
) {
948 // No eligible entries; need to allocate a new
949 // entry to prepost. Loop to rescan after
950 // reacquiring the lock after allocation in
951 // case anything new shows up.
953 nkwe
= (ksyn_waitq_element_t
)zalloc(kwe_zone
);
961 // If we found a thread to wake...
962 if (kwe
->kwe_state
== KWE_THREAD_INWAIT
) {
963 if (is_seqlower(kwe
->kwe_lockseq
, signalseq
)) {
965 * A valid thread in our range, but lower than our signal.
966 * Matching it may leave our match with nobody to wake it if/when
967 * it arrives (the signal originally meant for this thread might
968 * not successfully wake it).
970 * Convert to broadcast - may cause some spurious wakeups
971 * (allowed by spec), but avoids starvation (better choice).
975 (void)ksyn_signal(ckwq
, KSYN_QUEUE_WRITE
, kwe
, PTH_RWL_MTX_WAIT
);
976 *updatebits
+= PTHRW_INC
;
978 } else if (kwe
->kwe_state
== KWE_THREAD_PREPOST
) {
979 // Merge with existing prepost at same uptoseq.
981 } else if (kwe
->kwe_state
== KWE_THREAD_BROADCAST
) {
982 // Existing broadcasts subsume this signal.
984 panic("unknown kwe state\n");
988 * If we allocated a new kwe above but then found a different kwe to
989 * use then we need to deallocate the spare one.
991 zfree(kwe_zone
, nkwe
);
994 } else if (nkwe
!= NULL
) {
995 // ... otherwise, insert the newly allocated prepost.
996 ksyn_prepost(ckwq
, nkwe
, KWE_THREAD_PREPOST
, uptoseq
);
999 panic("failed to allocate kwe\n");
1006 __psynch_cvsignal(user_addr_t cv
, uint32_t cgen
, uint32_t cugen
,
1007 uint32_t csgen
, uint32_t flags
, int broadcast
,
1008 mach_port_name_t threadport
, uint32_t *retval
)
1011 thread_t th
= THREAD_NULL
;
1012 ksyn_wait_queue_t kwq
;
1014 uint32_t uptoseq
= cgen
& PTHRW_COUNT_MASK
;
1015 uint32_t fromseq
= (cugen
& PTHRW_COUNT_MASK
) + PTHRW_INC
;
1017 // validate sane L, U, and S values
1018 if ((threadport
== 0 && is_seqhigher(fromseq
, uptoseq
)) || is_seqhigher(csgen
, uptoseq
)) {
1019 __FAILEDUSERTEST__("cvbroad: invalid L, U and S values\n");
1023 if (threadport
!= 0) {
1024 th
= port_name_to_thread((mach_port_name_t
)threadport
);
1025 if (th
== THREAD_NULL
) {
1030 error
= ksyn_wqfind(cv
, cgen
, cugen
, csgen
, flags
, (KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INDROP
), &kwq
);
1032 uint32_t updatebits
= 0;
1033 ksyn_waitq_element_t nkwe
= NULL
;
1037 // update L, U and S...
1038 UPDATE_CVKWQ(kwq
, cgen
, cugen
, csgen
);
1040 PTHREAD_TRACE(psynch_cvar_signal
| DBG_FUNC_START
, kwq
->kw_addr
,
1041 fromseq
, uptoseq
, broadcast
);
1044 // No need to signal if the CV is already balanced.
1045 if (diff_genseq(kwq
->kw_lword
, kwq
->kw_sword
)) {
1046 ksyn_cvsignal(kwq
, th
, uptoseq
, fromseq
, &updatebits
,
1048 PTHREAD_TRACE(psynch_cvar_signal
, kwq
->kw_addr
, broadcast
, 0,0);
1053 ksyn_handle_cvbroad(kwq
, uptoseq
, &updatebits
);
1056 kwq
->kw_sword
+= (updatebits
& PTHRW_COUNT_MASK
);
1057 // set C or P bits and free if needed
1058 ksyn_cvupdate_fixup(kwq
, &updatebits
);
1059 *retval
= updatebits
;
1061 PTHREAD_TRACE(psynch_cvar_signal
| DBG_FUNC_END
, kwq
->kw_addr
,
1066 pthread_kern
->psynch_wait_cleanup();
1069 zfree(kwe_zone
, nkwe
);
1072 ksyn_wqrelease(kwq
, 1, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_CVAR
));
1076 thread_deallocate(th
);
1083 * psynch_cvbroad: This system call is used for broadcast posting on blocked waiters of psynch cvars.
1086 _psynch_cvbroad(__unused proc_t p
, user_addr_t cv
, uint64_t cvlsgen
,
1087 uint64_t cvudgen
, uint32_t flags
, __unused user_addr_t mutex
,
1088 __unused
uint64_t mugen
, __unused
uint64_t tid
, uint32_t *retval
)
1090 uint32_t diffgen
= cvudgen
& 0xffffffff;
1091 uint32_t count
= diffgen
>> PTHRW_COUNT_SHIFT
;
1092 if (count
> pthread_kern
->get_task_threadmax()) {
1093 __FAILEDUSERTEST__("cvbroad: difference greater than maximum possible thread count\n");
1097 uint32_t csgen
= (cvlsgen
>> 32) & 0xffffffff;
1098 uint32_t cgen
= cvlsgen
& 0xffffffff;
1099 uint32_t cugen
= (cvudgen
>> 32) & 0xffffffff;
1101 return __psynch_cvsignal(cv
, cgen
, cugen
, csgen
, flags
, 1, 0, retval
);
1105 * psynch_cvsignal: This system call is used for signalling the blocked waiters of psynch cvars.
1108 _psynch_cvsignal(__unused proc_t p
, user_addr_t cv
, uint64_t cvlsgen
,
1109 uint32_t cvugen
, int threadport
, __unused user_addr_t mutex
,
1110 __unused
uint64_t mugen
, __unused
uint64_t tid
, uint32_t flags
,
1113 uint32_t csgen
= (cvlsgen
>> 32) & 0xffffffff;
1114 uint32_t cgen
= cvlsgen
& 0xffffffff;
1116 return __psynch_cvsignal(cv
, cgen
, cvugen
, csgen
, flags
, 0, threadport
, retval
);
1120 * psynch_cvwait: This system call is used for psynch cvar waiters to block in kernel.
1123 _psynch_cvwait(__unused proc_t p
, user_addr_t cv
, uint64_t cvlsgen
,
1124 uint32_t cvugen
, user_addr_t mutex
, uint64_t mugen
, uint32_t flags
,
1125 int64_t sec
, uint32_t nsec
, uint32_t *retval
)
1128 uint32_t updatebits
= 0;
1129 ksyn_wait_queue_t ckwq
= NULL
;
1130 ksyn_waitq_element_t kwe
, nkwe
= NULL
;
1132 /* for conformance reasons */
1133 pthread_kern
->__pthread_testcancel(0);
1135 uint32_t csgen
= (cvlsgen
>> 32) & 0xffffffff;
1136 uint32_t cgen
= cvlsgen
& 0xffffffff;
1137 uint32_t ugen
= (mugen
>> 32) & 0xffffffff;
1138 uint32_t mgen
= mugen
& 0xffffffff;
1140 uint32_t lockseq
= (cgen
& PTHRW_COUNT_MASK
);
1143 * In cvwait U word can be out of range as cv could be used only for
1144 * timeouts. However S word needs to be within bounds and validated at
1145 * user level as well.
1147 if (is_seqhigher_eq(csgen
, lockseq
) != 0) {
1148 __FAILEDUSERTEST__("psync_cvwait; invalid sequence numbers\n");
1152 PTHREAD_TRACE(psynch_cvar_kwait
| DBG_FUNC_START
, cv
, mutex
, cgen
, 0);
1154 error
= ksyn_wqfind(cv
, cgen
, cvugen
, csgen
, flags
, KSYN_WQTYPE_CVAR
| KSYN_WQTYPE_INWAIT
, &ckwq
);
1160 uint32_t mutexrv
= 0;
1161 error
= _psynch_mutexdrop(NULL
, mutex
, mgen
, ugen
, 0, flags
, &mutexrv
);
1169 // update L, U and S...
1170 UPDATE_CVKWQ(ckwq
, cgen
, cvugen
, csgen
);
1172 /* Look for the sequence for prepost (or conflicting thread */
1173 ksyn_queue_t kq
= &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
];
1174 kwe
= ksyn_queue_find_cvpreposeq(kq
, lockseq
);
1176 if (kwe
->kwe_state
== KWE_THREAD_PREPOST
) {
1177 if ((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
) == lockseq
) {
1178 /* we can safely consume a reference, so do so */
1179 if (--kwe
->kwe_count
== 0) {
1180 ksyn_queue_remove_item(ckwq
, kq
, kwe
);
1181 ckwq
->kw_fakecount
--;
1186 * consuming a prepost higher than our lock sequence is valid, but
1187 * can leave the higher thread without a match. Convert the entry
1188 * to a broadcast to compensate for this.
1190 ksyn_handle_cvbroad(ckwq
, kwe
->kwe_lockseq
, &updatebits
);
1192 if (updatebits
!= 0)
1193 panic("psync_cvwait: convert pre-post to broadcast: woke up %d threads that shouldn't be there\n", updatebits
);
1194 #endif /* __TESTPANICS__ */
1196 } else if (kwe
->kwe_state
== KWE_THREAD_BROADCAST
) {
1199 } else if (kwe
->kwe_state
== KWE_THREAD_INWAIT
) {
1200 __FAILEDUSERTEST__("cvwait: thread entry with same sequence already present\n");
1203 panic("psync_cvwait: unexpected wait queue element type\n");
1207 updatebits
|= PTHRW_INC
;
1208 ckwq
->kw_sword
+= PTHRW_INC
;
1210 /* set C or P bits and free if needed */
1211 ksyn_cvupdate_fixup(ckwq
, &updatebits
);
1212 *retval
= updatebits
;
1215 uint64_t abstime
= 0;
1216 uint16_t kwe_flags
= 0;
1218 if (sec
!= 0 || (nsec
& 0x3fffffff) != 0) {
1220 ts
.tv_sec
= (__darwin_time_t
)sec
;
1221 ts
.tv_nsec
= (nsec
& 0x3fffffff);
1222 nanoseconds_to_absolutetime(
1223 (uint64_t)ts
.tv_sec
* NSEC_PER_SEC
+ ts
.tv_nsec
, &abstime
);
1224 clock_absolutetime_interval_to_deadline(abstime
, &abstime
);
1227 PTHREAD_TRACE(psynch_cvar_kwait
, cv
, mutex
, kwe_flags
, 1);
1229 error
= ksyn_wait(ckwq
, KSYN_QUEUE_WRITE
, cgen
, SEQFIT
, abstime
,
1230 kwe_flags
, psynch_cvcontinue
, kThreadWaitPThreadCondVar
);
1231 // ksyn_wait drops wait queue lock
1234 ksyn_wqunlock(ckwq
);
1237 zfree(kwe_zone
, nkwe
);
1241 PTHREAD_TRACE(psynch_cvar_kwait
| DBG_FUNC_END
, cv
, error
, updatebits
, 2);
1243 ksyn_wqrelease(ckwq
, 1, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_CVAR
));
1249 psynch_cvcontinue(void *parameter
, wait_result_t result
)
1251 uthread_t uth
= current_uthread();
1252 ksyn_wait_queue_t ckwq
= parameter
;
1253 ksyn_waitq_element_t kwe
= pthread_kern
->uthread_get_uukwe(uth
);
1255 int error
= _wait_result_to_errno(result
);
1258 /* just in case it got woken up as we were granting */
1259 int retval
= kwe
->kwe_psynchretval
;
1260 pthread_kern
->uthread_set_returnval(uth
, retval
);
1262 if (kwe
->kwe_kwqqueue
) {
1263 ksyn_queue_remove_item(ckwq
, &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
], kwe
);
1265 if ((kwe
->kwe_psynchretval
& PTH_RWL_MTX_WAIT
) != 0) {
1266 /* the condition var granted.
1267 * reset the error so that the thread returns back.
1270 /* no need to set any bits just return as cvsig/broad covers this */
1272 ckwq
->kw_sword
+= PTHRW_INC
;
1274 /* set C and P bits, in the local error */
1275 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) == (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
1276 PTHREAD_TRACE(psynch_cvar_zeroed
, ckwq
->kw_addr
,
1277 ckwq
->kw_lword
, ckwq
->kw_sword
, ckwq
->kw_inqueue
);
1278 error
|= ECVCLEARED
;
1279 if (ckwq
->kw_inqueue
!= 0) {
1280 ksyn_queue_free_items(ckwq
, KSYN_QUEUE_WRITE
, ckwq
->kw_lword
, 1);
1282 ckwq
->kw_lword
= ckwq
->kw_uword
= ckwq
->kw_sword
= 0;
1283 ckwq
->kw_kflags
|= KSYN_KWF_ZEROEDOUT
;
1285 /* everythig in the queue is a fake entry ? */
1286 if (ckwq
->kw_inqueue
!= 0 && ckwq
->kw_fakecount
== ckwq
->kw_inqueue
) {
1287 error
|= ECVPREPOST
;
1291 ksyn_wqunlock(ckwq
);
1293 PTHREAD_TRACE(psynch_cvar_kwait
| DBG_FUNC_END
, ckwq
->kw_addr
,
1297 // PTH_RWL_MTX_WAIT is removed
1298 if ((kwe
->kwe_psynchretval
& PTH_RWS_CV_MBIT
) != 0) {
1299 val
= PTHRW_INC
| PTH_RWS_CV_CBIT
;
1301 PTHREAD_TRACE(psynch_cvar_kwait
| DBG_FUNC_END
, ckwq
->kw_addr
,
1303 pthread_kern
->uthread_set_returnval(uth
, val
);
1306 ksyn_wqrelease(ckwq
, 1, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_CVAR
));
1307 pthread_kern
->unix_syscall_return(error
);
1308 __builtin_unreachable();
1312 * psynch_cvclrprepost: This system call clears pending prepost if present.
1315 _psynch_cvclrprepost(__unused proc_t p
, user_addr_t cv
, uint32_t cvgen
,
1316 uint32_t cvugen
, uint32_t cvsgen
, __unused
uint32_t prepocnt
,
1317 uint32_t preposeq
, uint32_t flags
, int *retval
)
1320 int mutex
= (flags
& _PTHREAD_MTX_OPT_MUTEX
);
1321 int wqtype
= (mutex
? KSYN_WQTYPE_MTX
: KSYN_WQTYPE_CVAR
) | KSYN_WQTYPE_INDROP
;
1322 ksyn_wait_queue_t kwq
= NULL
;
1326 error
= ksyn_wqfind(cv
, cvgen
, cvugen
, mutex
? 0 : cvsgen
, flags
, wqtype
,
1335 int firstfit
= (flags
& _PTHREAD_MTX_OPT_POLICY_MASK
)
1336 == _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
;
1337 if (firstfit
&& kwq
->kw_prepost
.count
) {
1338 if (is_seqlower_eq(kwq
->kw_prepost
.lseq
, cvgen
)) {
1339 PTHREAD_TRACE(psynch_mutex_kwqprepost
, kwq
->kw_addr
,
1340 kwq
->kw_prepost
.lseq
, 0, 2);
1341 _kwq_clear_preposted_wakeup(kwq
);
1345 PTHREAD_TRACE(psynch_cvar_clrprepost
, kwq
->kw_addr
, wqtype
,
1347 ksyn_queue_free_items(kwq
, KSYN_QUEUE_WRITE
, preposeq
, 0);
1351 ksyn_wqrelease(kwq
, 1, wqtype
);
1355 /* ***************** pthread_rwlock ************************ */
1358 __psynch_rw_lock(int type
, user_addr_t rwlock
, uint32_t lgenval
,
1359 uint32_t ugenval
, uint32_t rw_wc
, int flags
, uint32_t *retval
)
1361 uint32_t lockseq
= lgenval
& PTHRW_COUNT_MASK
;
1362 ksyn_wait_queue_t kwq
;
1363 int error
, prepost_type
, kqi
;
1364 thread_continue_t tc
;
1366 if (type
== PTH_RW_TYPE_READ
) {
1367 prepost_type
= KW_UNLOCK_PREPOST_READLOCK
;
1368 kqi
= KSYN_QUEUE_READ
;
1369 tc
= psynch_rw_rdcontinue
;
1371 prepost_type
= KW_UNLOCK_PREPOST_WRLOCK
;
1372 kqi
= KSYN_QUEUE_WRITE
;
1373 tc
= psynch_rw_wrcontinue
;
1376 error
= ksyn_wqfind(rwlock
, lgenval
, ugenval
, rw_wc
, flags
,
1377 (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_RWLOCK
), &kwq
);
1383 _ksyn_check_init(kwq
, lgenval
);
1384 if (_kwq_handle_interrupted_wakeup(kwq
, type
, lockseq
, retval
) ||
1385 // handle overlap first as they are not counted against pre_rwwc
1386 // handle_overlap uses the flags in lgenval (vs. lockseq)
1387 _kwq_handle_overlap(kwq
, type
, lgenval
, rw_wc
, retval
) ||
1388 _kwq_handle_preposted_wakeup(kwq
, prepost_type
, lockseq
, retval
)) {
1393 block_hint_t block_hint
= type
== PTH_RW_TYPE_READ
?
1394 kThreadWaitPThreadRWLockRead
: kThreadWaitPThreadRWLockWrite
;
1395 error
= ksyn_wait(kwq
, kqi
, lgenval
, SEQFIT
, 0, 0, tc
, block_hint
);
1396 // ksyn_wait drops wait queue lock
1398 ksyn_wqrelease(kwq
, 0, (KSYN_WQTYPE_INWAIT
| KSYN_WQTYPE_RWLOCK
));
1403 * psynch_rw_rdlock: This system call is used for psync rwlock readers to block.
1406 _psynch_rw_rdlock(__unused proc_t p
, user_addr_t rwlock
, uint32_t lgenval
,
1407 uint32_t ugenval
, uint32_t rw_wc
, int flags
, uint32_t *retval
)
1409 return __psynch_rw_lock(PTH_RW_TYPE_READ
, rwlock
, lgenval
, ugenval
, rw_wc
,
1414 * psynch_rw_longrdlock: This system call is used for psync rwlock long readers to block.
1417 _psynch_rw_longrdlock(__unused proc_t p
, __unused user_addr_t rwlock
,
1418 __unused
uint32_t lgenval
, __unused
uint32_t ugenval
,
1419 __unused
uint32_t rw_wc
, __unused
int flags
, __unused
uint32_t *retval
)
1426 * psynch_rw_wrlock: This system call is used for psync rwlock writers to block.
1429 _psynch_rw_wrlock(__unused proc_t p
, user_addr_t rwlock
, uint32_t lgenval
,
1430 uint32_t ugenval
, uint32_t rw_wc
, int flags
, uint32_t *retval
)
1432 return __psynch_rw_lock(PTH_RW_TYPE_WRITE
, rwlock
, lgenval
, ugenval
,
1433 rw_wc
, flags
, retval
);
1437 * psynch_rw_yieldwrlock: This system call is used for psync rwlock yielding writers to block.
1440 _psynch_rw_yieldwrlock(__unused proc_t p
, __unused user_addr_t rwlock
,
1441 __unused
uint32_t lgenval
, __unused
uint32_t ugenval
,
1442 __unused
uint32_t rw_wc
, __unused
int flags
, __unused
uint32_t *retval
)
1448 * psynch_rw_unlock: This system call is used for unlock state postings. This will grant appropriate
1449 * reader/writer variety lock.
1452 _psynch_rw_unlock(__unused proc_t p
, user_addr_t rwlock
, uint32_t lgenval
,
1453 uint32_t ugenval
, uint32_t rw_wc
, int flags
, uint32_t *retval
)
1456 ksyn_wait_queue_t kwq
;
1457 uint32_t updatebits
= 0;
1460 uint32_t curgen
= lgenval
& PTHRW_COUNT_MASK
;
1461 int clearedkflags
= 0;
1463 error
= ksyn_wqfind(rwlock
, lgenval
, ugenval
, rw_wc
, flags
,
1464 (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
), &kwq
);
1470 int isinit
= _ksyn_check_init(kwq
, lgenval
);
1472 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
1473 if ((kwq
->kw_lastunlockseq
!= PTHRW_RWL_INIT
) &&
1474 (is_seqlower(ugenval
, kwq
->kw_lastunlockseq
)!= 0)) {
1479 /* If L-U != num of waiters, then it needs to be preposted or spr */
1480 diff
= find_diff(lgenval
, ugenval
);
1482 if (find_seq_till(kwq
, curgen
, diff
, &count
) == 0) {
1483 if ((count
== 0) || (count
< (uint32_t)diff
))
1487 /* no prepost and all threads are in place, reset the bit */
1488 if ((isinit
!= 0) && ((kwq
->kw_kflags
& KSYN_KWF_INITCLEARED
) != 0)){
1489 kwq
->kw_kflags
&= ~KSYN_KWF_INITCLEARED
;
1493 /* can handle unlock now */
1495 _kwq_clear_preposted_wakeup(kwq
);
1497 error
= kwq_handle_unlock(kwq
, lgenval
, rw_wc
, &updatebits
, 0, NULL
, 0);
1500 panic("psynch_rw_unlock: kwq_handle_unlock failed %d\n",error
);
1501 #endif /* __TESTPANICS__ */
1505 *retval
= updatebits
;
1508 // <rdar://problem/22244050> If any of the wakeups failed because they
1509 // already returned to userspace because of a signal then we need to ensure
1510 // that the reset state is not cleared when that thread returns. Otherwise,
1511 // _pthread_rwlock_lock will clear the interrupted state before it is read.
1512 if (clearedkflags
!= 0 && kwq
->kw_intr
.count
> 0) {
1513 kwq
->kw_kflags
|= KSYN_KWF_INITCLEARED
;
1517 pthread_kern
->psynch_wait_cleanup();
1518 ksyn_wqrelease(kwq
, 0, (KSYN_WQTYPE_INDROP
| KSYN_WQTYPE_RWLOCK
));
1523 /* update if the new seq is higher than prev prepost, or first set */
1524 if (is_rws_sbit_set(kwq
->kw_prepost
.sseq
) ||
1525 is_seqhigher_eq(rw_wc
, kwq
->kw_prepost
.sseq
)) {
1526 _kwq_mark_preposted_wakeup(kwq
, diff
- count
, curgen
, rw_wc
);
1527 updatebits
= lgenval
; /* let this not do unlock handling */
1534 /* ************************************************************************** */
1536 pth_global_hashinit(void)
1538 pth_glob_hashtbl
= hashinit(PTH_HASHSIZE
* 4, M_PROC
, &pthhash
);
1542 _pth_proc_hashinit(proc_t p
)
1544 void *ptr
= hashinit(PTH_HASHSIZE
, M_PCB
, &pthhash
);
1546 panic("pth_proc_hashinit: hash init returned 0\n");
1549 pthread_kern
->proc_set_pthhash(p
, ptr
);
1554 ksyn_wq_hash_lookup(user_addr_t uaddr
, proc_t p
, int flags
,
1555 ksyn_wait_queue_t
*out_kwq
, struct pthhashhead
**out_hashptr
,
1556 uint64_t object
, uint64_t offset
)
1559 ksyn_wait_queue_t kwq
;
1560 struct pthhashhead
*hashptr
;
1561 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
) {
1562 hashptr
= pth_glob_hashtbl
;
1563 LIST_FOREACH(kwq
, &hashptr
[object
& pthhash
], kw_hash
) {
1564 if (kwq
->kw_object
== object
&& kwq
->kw_offset
== offset
) {
1569 hashptr
= pthread_kern
->proc_get_pthhash(p
);
1570 LIST_FOREACH(kwq
, &hashptr
[uaddr
& pthhash
], kw_hash
) {
1571 if (kwq
->kw_addr
== uaddr
) {
1577 *out_hashptr
= hashptr
;
1582 _pth_proc_hashdelete(proc_t p
)
1584 struct pthhashhead
* hashptr
;
1585 ksyn_wait_queue_t kwq
;
1586 unsigned long hashsize
= pthhash
+ 1;
1589 hashptr
= pthread_kern
->proc_get_pthhash(p
);
1590 pthread_kern
->proc_set_pthhash(p
, NULL
);
1591 if (hashptr
== NULL
) {
1595 pthread_list_lock();
1596 for(i
= 0; i
< hashsize
; i
++) {
1597 while ((kwq
= LIST_FIRST(&hashptr
[i
])) != NULL
) {
1598 if ((kwq
->kw_pflags
& KSYN_WQ_INHASH
) != 0) {
1599 kwq
->kw_pflags
&= ~KSYN_WQ_INHASH
;
1600 LIST_REMOVE(kwq
, kw_hash
);
1602 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
1603 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
1604 LIST_REMOVE(kwq
, kw_list
);
1606 pthread_list_unlock();
1607 /* release fake entries if present for cvars */
1608 if (((kwq
->kw_type
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_CVAR
) && (kwq
->kw_inqueue
!= 0))
1609 ksyn_freeallkwe(&kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
]);
1611 pthread_list_lock();
1614 pthread_list_unlock();
1615 FREE(hashptr
, M_PROC
);
1618 /* no lock held for this as the waitqueue is getting freed */
1620 ksyn_freeallkwe(ksyn_queue_t kq
)
1622 ksyn_waitq_element_t kwe
;
1623 while ((kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
)) != NULL
) {
1624 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
1625 if (kwe
->kwe_state
!= KWE_THREAD_INWAIT
) {
1626 zfree(kwe_zone
, kwe
);
1632 _kwq_report_inuse(ksyn_wait_queue_t kwq
)
1634 if (kwq
->kw_prepost
.count
!= 0) {
1635 __FAILEDUSERTEST2__("uaddr 0x%llx busy for synch type 0x%x [pre %d:0x%x:0x%x]",
1636 (uint64_t)kwq
->kw_addr
, kwq
->kw_type
, kwq
->kw_prepost
.count
,
1637 kwq
->kw_prepost
.lseq
, kwq
->kw_prepost
.sseq
);
1638 PTHREAD_TRACE(psynch_mutex_kwqcollision
, kwq
->kw_addr
,
1639 kwq
->kw_type
, 1, 0);
1641 if (kwq
->kw_intr
.count
!= 0) {
1642 __FAILEDUSERTEST2__("uaddr 0x%llx busy for synch type 0x%x [intr %d:0x%x:0x%x:0x%x]",
1643 (uint64_t)kwq
->kw_addr
, kwq
->kw_type
, kwq
->kw_intr
.count
,
1644 kwq
->kw_intr
.type
, kwq
->kw_intr
.seq
,
1645 kwq
->kw_intr
.returnbits
);
1646 PTHREAD_TRACE(psynch_mutex_kwqcollision
, kwq
->kw_addr
,
1647 kwq
->kw_type
, 2, 0);
1649 if (kwq
->kw_iocount
) {
1650 __FAILEDUSERTEST2__("uaddr 0x%llx busy for synch type 0x%x [ioc %d:%d]",
1651 (uint64_t)kwq
->kw_addr
, kwq
->kw_type
, kwq
->kw_iocount
,
1653 PTHREAD_TRACE(psynch_mutex_kwqcollision
, kwq
->kw_addr
,
1654 kwq
->kw_type
, 3, 0);
1656 if (kwq
->kw_inqueue
) {
1657 __FAILEDUSERTEST2__("uaddr 0x%llx busy for synch type 0x%x [inq %d:%d]",
1658 (uint64_t)kwq
->kw_addr
, kwq
->kw_type
, kwq
->kw_inqueue
,
1660 PTHREAD_TRACE(psynch_mutex_kwqcollision
, kwq
->kw_addr
, kwq
->kw_type
,
1665 /* find kernel waitqueue, if not present create one. Grants a reference */
1667 ksyn_wqfind(user_addr_t uaddr
, uint32_t mgen
, uint32_t ugen
, uint32_t sgen
,
1668 int flags
, int wqtype
, ksyn_wait_queue_t
*kwqp
)
1671 ksyn_wait_queue_t kwq
= NULL
;
1672 ksyn_wait_queue_t nkwq
= NULL
;
1673 struct pthhashhead
*hashptr
;
1674 proc_t p
= current_proc();
1676 uint64_t object
= 0, offset
= 0;
1677 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
) {
1678 res
= ksyn_findobj(uaddr
, &object
, &offset
);
1679 hashptr
= pth_glob_hashtbl
;
1681 hashptr
= pthread_kern
->proc_get_pthhash(p
);
1685 pthread_list_lock();
1686 res
= ksyn_wq_hash_lookup(uaddr
, current_proc(), flags
, &kwq
, &hashptr
,
1689 pthread_list_unlock();
1692 if (kwq
== NULL
&& nkwq
== NULL
) {
1693 // Drop the lock to allocate a new kwq and retry.
1694 pthread_list_unlock();
1696 nkwq
= (ksyn_wait_queue_t
)zalloc(kwq_zone
);
1697 bzero(nkwq
, sizeof(struct ksyn_wait_queue
));
1699 for (i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
1700 ksyn_queue_init(&nkwq
->kw_ksynqueues
[i
]);
1702 lck_spin_init(&nkwq
->kw_lock
, pthread_lck_grp
, pthread_lck_attr
);
1704 } else if (kwq
== NULL
&& nkwq
!= NULL
) {
1705 // Still not found, add the new kwq to the hash.
1707 nkwq
= NULL
; // Don't free.
1708 if ((flags
& PTHREAD_PSHARED_FLAGS_MASK
) == PTHREAD_PROCESS_SHARED
) {
1709 kwq
->kw_pflags
|= KSYN_WQ_SHARED
;
1710 LIST_INSERT_HEAD(&hashptr
[object
& pthhash
], kwq
, kw_hash
);
1712 LIST_INSERT_HEAD(&hashptr
[uaddr
& pthhash
], kwq
, kw_hash
);
1714 kwq
->kw_pflags
|= KSYN_WQ_INHASH
;
1715 } else if (kwq
!= NULL
) {
1716 // Found an existing kwq, use it.
1717 if ((kwq
->kw_pflags
& KSYN_WQ_FLIST
) != 0) {
1718 LIST_REMOVE(kwq
, kw_list
);
1719 kwq
->kw_pflags
&= ~KSYN_WQ_FLIST
;
1721 if ((kwq
->kw_type
& KSYN_WQTYPE_MASK
) != (wqtype
& KSYN_WQTYPE_MASK
)) {
1722 if (!_kwq_is_used(kwq
)) {
1723 if (kwq
->kw_iocount
== 0) {
1724 kwq
->kw_type
= 0; // mark for reinitialization
1725 } else if (kwq
->kw_iocount
== 1 &&
1726 kwq
->kw_dropcount
== kwq
->kw_iocount
) {
1727 /* if all users are unlockers then wait for it to finish */
1728 kwq
->kw_pflags
|= KSYN_WQ_WAITING
;
1729 // Drop the lock and wait for the kwq to be free.
1730 (void)msleep(&kwq
->kw_pflags
, pthread_list_mlock
,
1731 PDROP
, "ksyn_wqfind", 0);
1734 _kwq_report_inuse(kwq
);
1738 _kwq_report_inuse(kwq
);
1744 if (kwq
->kw_type
== 0) {
1745 kwq
->kw_addr
= uaddr
;
1746 kwq
->kw_object
= object
;
1747 kwq
->kw_offset
= offset
;
1748 kwq
->kw_type
= (wqtype
& KSYN_WQTYPE_MASK
);
1749 CLEAR_REINIT_BITS(kwq
);
1750 kwq
->kw_lword
= mgen
;
1751 kwq
->kw_uword
= ugen
;
1752 kwq
->kw_sword
= sgen
;
1753 kwq
->kw_owner
= THREAD_NULL
;
1755 kwq
->kw_qos_override
= THREAD_QOS_UNSPECIFIED
;
1756 PTHREAD_TRACE(psynch_mutex_kwqallocate
| DBG_FUNC_START
, uaddr
,
1757 kwq
->kw_type
, kwq
, 0);
1758 PTHREAD_TRACE(psynch_mutex_kwqallocate
| DBG_FUNC_END
, uaddr
,
1762 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
) {
1763 kwq
->kw_dropcount
++;
1766 pthread_list_unlock();
1778 /* Reference from find is dropped here. Starts the free process if needed */
1780 ksyn_wqrelease(ksyn_wait_queue_t kwq
, int qfreenow
, int wqtype
)
1783 ksyn_wait_queue_t free_elem
= NULL
;
1785 pthread_list_lock();
1786 if (wqtype
== KSYN_WQTYPE_MUTEXDROP
) {
1787 kwq
->kw_dropcount
--;
1789 if (--kwq
->kw_iocount
== 0) {
1790 if ((kwq
->kw_pflags
& KSYN_WQ_WAITING
) != 0) {
1791 /* some one is waiting for the waitqueue, wake them up */
1792 kwq
->kw_pflags
&= ~KSYN_WQ_WAITING
;
1793 wakeup(&kwq
->kw_pflags
);
1796 if (!_kwq_is_used(kwq
)) {
1797 if (kwq
->kw_turnstile
) {
1798 panic("kw_turnstile still non-null upon release");
1801 PTHREAD_TRACE(psynch_mutex_kwqdeallocate
| DBG_FUNC_START
,
1802 kwq
->kw_addr
, kwq
->kw_type
, qfreenow
, 0);
1803 PTHREAD_TRACE(psynch_mutex_kwqdeallocate
| DBG_FUNC_END
,
1804 kwq
->kw_addr
, kwq
->kw_lword
, kwq
->kw_uword
, kwq
->kw_sword
);
1806 if (qfreenow
== 0) {
1807 microuptime(&kwq
->kw_ts
);
1808 LIST_INSERT_HEAD(&pth_free_list
, kwq
, kw_list
);
1809 kwq
->kw_pflags
|= KSYN_WQ_FLIST
;
1810 if (psynch_cleanupset
== 0) {
1813 t
.tv_sec
+= KSYN_CLEANUP_DEADLINE
;
1814 deadline
= tvtoabstime(&t
);
1815 thread_call_enter_delayed(psynch_thcall
, deadline
);
1816 psynch_cleanupset
= 1;
1819 kwq
->kw_pflags
&= ~KSYN_WQ_INHASH
;
1820 LIST_REMOVE(kwq
, kw_hash
);
1825 pthread_list_unlock();
1826 if (free_elem
!= NULL
) {
1827 _kwq_destroy(free_elem
);
1831 /* responsible to free the waitqueues */
1833 psynch_wq_cleanup(__unused
void *param
, __unused
void * param1
)
1835 ksyn_wait_queue_t kwq
, tmp
;
1838 uint64_t deadline
= 0;
1839 LIST_HEAD(, ksyn_wait_queue
) freelist
;
1840 LIST_INIT(&freelist
);
1842 pthread_list_lock();
1846 LIST_FOREACH(kwq
, &pth_free_list
, kw_list
) {
1847 if (_kwq_is_used(kwq
) || kwq
->kw_iocount
!= 0) {
1851 __darwin_time_t diff
= t
.tv_sec
- kwq
->kw_ts
.tv_sec
;
1854 if (diff
>= KSYN_CLEANUP_DEADLINE
) {
1855 kwq
->kw_pflags
&= ~(KSYN_WQ_FLIST
| KSYN_WQ_INHASH
);
1856 LIST_REMOVE(kwq
, kw_hash
);
1857 LIST_REMOVE(kwq
, kw_list
);
1858 LIST_INSERT_HEAD(&freelist
, kwq
, kw_list
);
1864 if (reschedule
!= 0) {
1865 t
.tv_sec
+= KSYN_CLEANUP_DEADLINE
;
1866 deadline
= tvtoabstime(&t
);
1867 thread_call_enter_delayed(psynch_thcall
, deadline
);
1868 psynch_cleanupset
= 1;
1870 psynch_cleanupset
= 0;
1872 pthread_list_unlock();
1874 LIST_FOREACH_SAFE(kwq
, &freelist
, kw_list
, tmp
) {
1880 _wait_result_to_errno(wait_result_t result
)
1884 case THREAD_TIMED_OUT
:
1887 case THREAD_INTERRUPTED
:
1895 ksyn_wait(ksyn_wait_queue_t kwq
, kwq_queue_type_t kqi
, uint32_t lockseq
,
1896 int fit
, uint64_t abstime
, uint16_t kwe_flags
,
1897 thread_continue_t continuation
, block_hint_t block_hint
)
1899 thread_t th
= current_thread();
1900 uthread_t uth
= pthread_kern
->get_bsdthread_info(th
);
1901 struct turnstile
**tstore
= NULL
;
1904 assert(continuation
!= THREAD_CONTINUE_NULL
);
1906 ksyn_waitq_element_t kwe
= pthread_kern
->uthread_get_uukwe(uth
);
1907 bzero(kwe
, sizeof(*kwe
));
1909 kwe
->kwe_lockseq
= lockseq
& PTHRW_COUNT_MASK
;
1910 kwe
->kwe_state
= KWE_THREAD_INWAIT
;
1912 kwe
->kwe_thread
= th
;
1913 kwe
->kwe_flags
= kwe_flags
;
1915 res
= ksyn_queue_insert(kwq
, kqi
, kwe
, lockseq
, fit
);
1917 //panic("psynch_rw_wrlock: failed to enqueue\n"); // XXX
1922 PTHREAD_TRACE(psynch_mutex_kwqwait
, kwq
->kw_addr
, kwq
->kw_inqueue
,
1923 kwq
->kw_prepost
.count
, kwq
->kw_intr
.count
);
1925 if (_kwq_use_turnstile(kwq
)) {
1926 // pthread mutexes and rwlocks both (at least sometimes) know their
1927 // owner and can use turnstiles. Otherwise, we pass NULL as the
1928 // tstore to the shims so they wait on the global waitq.
1929 tstore
= &kwq
->kw_turnstile
;
1932 pthread_kern
->psynch_wait_prepare((uintptr_t)kwq
, tstore
, kwq
->kw_owner
,
1933 block_hint
, abstime
);
1938 pthread_kern
->psynch_wait_update_complete(kwq
->kw_turnstile
);
1941 thread_block_parameter(continuation
, kwq
);
1944 panic("ksyn_wait continuation returned");
1945 __builtin_unreachable();
1949 ksyn_signal(ksyn_wait_queue_t kwq
, kwq_queue_type_t kqi
,
1950 ksyn_waitq_element_t kwe
, uint32_t updateval
)
1953 struct turnstile
**tstore
= NULL
;
1955 // If no wait element was specified, wake the first.
1957 kwe
= TAILQ_FIRST(&kwq
->kw_ksynqueues
[kqi
].ksynq_kwelist
);
1959 panic("ksyn_signal: panic signaling empty queue");
1963 if (kwe
->kwe_state
!= KWE_THREAD_INWAIT
) {
1964 panic("ksyn_signal: panic signaling non-waiting element");
1967 ksyn_queue_remove_item(kwq
, &kwq
->kw_ksynqueues
[kqi
], kwe
);
1968 kwe
->kwe_psynchretval
= updateval
;
1970 if (_kwq_use_turnstile(kwq
)) {
1971 tstore
= &kwq
->kw_turnstile
;
1974 ret
= pthread_kern
->psynch_wait_wakeup(kwq
, kwe
, tstore
);
1976 if (ret
!= KERN_SUCCESS
&& ret
!= KERN_NOT_WAITING
) {
1977 panic("ksyn_signal: panic waking up thread %x\n", ret
);
1983 ksyn_findobj(user_addr_t uaddr
, uint64_t *objectp
, uint64_t *offsetp
)
1986 vm_page_info_basic_data_t info
;
1987 mach_msg_type_number_t count
= VM_PAGE_INFO_BASIC_COUNT
;
1988 ret
= pthread_kern
->vm_map_page_info(pthread_kern
->current_map(), uaddr
,
1989 VM_PAGE_INFO_BASIC
, (vm_page_info_t
)&info
, &count
);
1990 if (ret
!= KERN_SUCCESS
) {
1994 if (objectp
!= NULL
) {
1995 *objectp
= (uint64_t)info
.object_id
;
1997 if (offsetp
!= NULL
) {
1998 *offsetp
= (uint64_t)info
.offset
;
2005 /* lowest of kw_fr, kw_flr, kw_fwr, kw_fywr */
2007 kwq_find_rw_lowest(ksyn_wait_queue_t kwq
, int flags
, uint32_t premgen
,
2008 int *typep
, uint32_t lowest
[])
2010 uint32_t kw_fr
, kw_fwr
, low
;
2011 int type
= 0, lowtype
, typenum
[2] = { 0 };
2012 uint32_t numbers
[2] = { 0 };
2015 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
!= 0) ||
2016 ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0)) {
2017 type
|= PTH_RWSHFT_TYPE_READ
;
2018 /* read entries are present */
2019 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
!= 0) {
2020 kw_fr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_firstnum
;
2021 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) &&
2022 (is_seqlower(premgen
, kw_fr
) != 0))
2027 lowest
[KSYN_QUEUE_READ
] = kw_fr
;
2028 numbers
[count
]= kw_fr
;
2029 typenum
[count
] = PTH_RW_TYPE_READ
;
2032 lowest
[KSYN_QUEUE_READ
] = 0;
2034 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
].ksynq_count
!= 0) ||
2035 ((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0)) {
2036 type
|= PTH_RWSHFT_TYPE_WRITE
;
2037 /* read entries are present */
2038 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
].ksynq_count
!= 0) {
2039 kw_fwr
= kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
].ksynq_firstnum
;
2040 if (((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) &&
2041 (is_seqlower(premgen
, kw_fwr
) != 0))
2046 lowest
[KSYN_QUEUE_WRITE
] = kw_fwr
;
2047 numbers
[count
]= kw_fwr
;
2048 typenum
[count
] = PTH_RW_TYPE_WRITE
;
2051 lowest
[KSYN_QUEUE_WRITE
] = 0;
2055 panic("nothing in the queue???\n");
2056 #endif /* __TESTPANICS__ */
2059 lowtype
= typenum
[0];
2061 for (i
= 1; i
< count
; i
++) {
2062 if (is_seqlower(numbers
[i
] , low
) != 0) {
2064 lowtype
= typenum
[i
];
2075 /* wakeup readers to upto the writer limits */
2077 ksyn_wakeupreaders(ksyn_wait_queue_t kwq
, uint32_t limitread
, int allreaders
,
2078 uint32_t updatebits
, int *wokenp
)
2081 int failedwakeup
= 0;
2083 kern_return_t kret
= KERN_SUCCESS
;
2088 kq
= &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
];
2089 while ((kq
->ksynq_count
!= 0) &&
2090 (allreaders
|| (is_seqlower(kq
->ksynq_firstnum
, limitread
) != 0))) {
2091 kret
= ksyn_signal(kwq
, KSYN_QUEUE_READ
, NULL
, lbits
);
2092 if (kret
== KERN_NOT_WAITING
) {
2100 return(failedwakeup
);
2105 * This handles the unlock grants for next set on rw_unlock() or on arrival
2106 * of all preposted waiters.
2109 kwq_handle_unlock(ksyn_wait_queue_t kwq
, __unused
uint32_t mgen
, uint32_t rw_wc
,
2110 uint32_t *updatep
, int flags
, int *blockp
, uint32_t premgen
)
2112 uint32_t low_writer
, limitrdnum
;
2113 int rwtype
, error
=0;
2114 int allreaders
, nfailed
;
2115 uint32_t updatebits
=0, numneeded
= 0;;
2116 int prepost
= flags
& KW_UNLOCK_PREPOST
;
2117 thread_t preth
= THREAD_NULL
;
2118 ksyn_waitq_element_t kwe
;
2123 uint32_t lowest
[KSYN_QUEUE_MAX
]; /* np need for upgrade as it is handled separately */
2124 kern_return_t kret
= KERN_SUCCESS
;
2126 int curthreturns
= 0;
2129 preth
= current_thread();
2132 kq
= &kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
];
2133 kwq
->kw_lastseqword
= rw_wc
;
2134 kwq
->kw_lastunlockseq
= (rw_wc
& PTHRW_COUNT_MASK
);
2135 kwq
->kw_kflags
&= ~KSYN_KWF_OVERLAP_GUARD
;
2137 error
= kwq_find_rw_lowest(kwq
, flags
, premgen
, &rwtype
, lowest
);
2140 panic("rwunlock: cannot fails to slot next round of threads");
2141 #endif /* __TESTPANICS__ */
2143 low_writer
= lowest
[KSYN_QUEUE_WRITE
];
2148 switch (rwtype
& PTH_RW_TYPE_MASK
) {
2149 case PTH_RW_TYPE_READ
: {
2151 /* what about the preflight which is LREAD or READ ?? */
2152 if ((rwtype
& PTH_RWSHFT_TYPE_MASK
) != 0) {
2153 if (rwtype
& PTH_RWSHFT_TYPE_WRITE
) {
2154 updatebits
|= (PTH_RWL_WBIT
| PTH_RWL_KBIT
);
2158 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0) {
2159 limitrdnum
= low_writer
;
2166 if ((rwtype
& PTH_RWSHFT_TYPE_WRITE
) != 0) {
2167 limitrdnum
= low_writer
;
2168 numneeded
= ksyn_queue_count_tolowest(kq
, limitrdnum
);
2169 if (((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) && (is_seqlower(premgen
, limitrdnum
) != 0)) {
2174 // no writers at all
2175 // no other waiters only readers
2176 kwq
->kw_kflags
|= KSYN_KWF_OVERLAP_GUARD
;
2177 numneeded
+= kwq
->kw_ksynqueues
[KSYN_QUEUE_READ
].ksynq_count
;
2178 if ((flags
& KW_UNLOCK_PREPOST_READLOCK
) != 0) {
2184 updatebits
+= (numneeded
<< PTHRW_COUNT_SHIFT
);
2186 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
2188 if (curthreturns
!= 0) {
2190 uth
= current_uthread();
2191 kwe
= pthread_kern
->uthread_get_uukwe(uth
);
2192 kwe
->kwe_psynchretval
= updatebits
;
2196 nfailed
= ksyn_wakeupreaders(kwq
, limitrdnum
, allreaders
,
2197 updatebits
, &woken
);
2199 _kwq_mark_interruped_wakeup(kwq
, KWQ_INTR_READ
, nfailed
,
2200 limitrdnum
, updatebits
);
2205 if ((kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
].ksynq_count
!= 0) &&
2206 ((updatebits
& PTH_RWL_WBIT
) == 0)) {
2207 panic("kwq_handle_unlock: writer pending but no writebit set %x\n", updatebits
);
2212 case PTH_RW_TYPE_WRITE
: {
2214 /* only one thread is goin to be granted */
2215 updatebits
|= (PTHRW_INC
);
2216 updatebits
|= PTH_RWL_KBIT
| PTH_RWL_EBIT
;
2218 if (((flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) && (low_writer
== premgen
)) {
2220 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
].ksynq_count
!= 0) {
2221 updatebits
|= PTH_RWL_WBIT
;
2224 uth
= pthread_kern
->get_bsdthread_info(th
);
2225 kwe
= pthread_kern
->uthread_get_uukwe(uth
);
2226 kwe
->kwe_psynchretval
= updatebits
;
2228 /* we are not granting writelock to the preposting thread */
2229 /* if there are writers present or the preposting write thread then W bit is to be set */
2230 if (kwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
].ksynq_count
> 1 ||
2231 (flags
& KW_UNLOCK_PREPOST_WRLOCK
) != 0) {
2232 updatebits
|= PTH_RWL_WBIT
;
2234 /* setup next in the queue */
2235 kret
= ksyn_signal(kwq
, KSYN_QUEUE_WRITE
, NULL
, updatebits
);
2236 if (kret
== KERN_NOT_WAITING
) {
2237 _kwq_mark_interruped_wakeup(kwq
, KWQ_INTR_WRITE
, 1,
2238 low_writer
, updatebits
);
2242 kwq
->kw_nextseqword
= (rw_wc
& PTHRW_COUNT_MASK
) + updatebits
;
2243 if ((updatebits
& (PTH_RWL_KBIT
| PTH_RWL_EBIT
)) !=
2244 (PTH_RWL_KBIT
| PTH_RWL_EBIT
)) {
2245 panic("kwq_handle_unlock: writer lock granted but no ke set %x\n", updatebits
);
2251 panic("rwunlock: invalid type for lock grants");
2255 if (updatep
!= NULL
)
2256 *updatep
= updatebits
;
2262 /************* Indiv queue support routines ************************/
2264 ksyn_queue_init(ksyn_queue_t kq
)
2266 TAILQ_INIT(&kq
->ksynq_kwelist
);
2267 kq
->ksynq_count
= 0;
2268 kq
->ksynq_firstnum
= 0;
2269 kq
->ksynq_lastnum
= 0;
2273 ksyn_queue_insert(ksyn_wait_queue_t kwq
, int kqi
, ksyn_waitq_element_t kwe
,
2274 uint32_t mgen
, int fit
)
2276 ksyn_queue_t kq
= &kwq
->kw_ksynqueues
[kqi
];
2277 uint32_t lockseq
= mgen
& PTHRW_COUNT_MASK
;
2280 if (kwe
->kwe_kwqqueue
!= NULL
) {
2281 panic("adding enqueued item to another queue");
2284 if (kq
->ksynq_count
== 0) {
2285 TAILQ_INSERT_HEAD(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
2286 kq
->ksynq_firstnum
= lockseq
;
2287 kq
->ksynq_lastnum
= lockseq
;
2288 } else if (fit
== FIRSTFIT
) {
2289 /* TBD: if retry bit is set for mutex, add it to the head */
2290 /* firstfit, arriving order */
2291 TAILQ_INSERT_TAIL(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
2292 if (is_seqlower(lockseq
, kq
->ksynq_firstnum
)) {
2293 kq
->ksynq_firstnum
= lockseq
;
2295 if (is_seqhigher(lockseq
, kq
->ksynq_lastnum
)) {
2296 kq
->ksynq_lastnum
= lockseq
;
2298 } else if (lockseq
== kq
->ksynq_firstnum
|| lockseq
== kq
->ksynq_lastnum
) {
2299 /* During prepost when a thread is getting cancelled, we could have
2300 * two with same seq */
2302 if (kwe
->kwe_state
== KWE_THREAD_PREPOST
) {
2303 ksyn_waitq_element_t tmp
= ksyn_queue_find_seq(kwq
, kq
, lockseq
);
2304 if (tmp
!= NULL
&& tmp
->kwe_uth
!= NULL
&&
2305 pthread_kern
->uthread_is_cancelled(tmp
->kwe_uth
)) {
2306 TAILQ_INSERT_TAIL(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
2310 } else if (is_seqlower(kq
->ksynq_lastnum
, lockseq
)) { // XXX is_seqhigher
2311 TAILQ_INSERT_TAIL(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
2312 kq
->ksynq_lastnum
= lockseq
;
2313 } else if (is_seqlower(lockseq
, kq
->ksynq_firstnum
)) {
2314 TAILQ_INSERT_HEAD(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
2315 kq
->ksynq_firstnum
= lockseq
;
2317 ksyn_waitq_element_t q_kwe
, r_kwe
;
2320 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
2321 if (is_seqhigher(q_kwe
->kwe_lockseq
, lockseq
)) {
2322 TAILQ_INSERT_BEFORE(q_kwe
, kwe
, kwe_list
);
2330 kwe
->kwe_kwqqueue
= kwq
;
2333 update_low_high(kwq
, lockseq
);
2339 ksyn_queue_remove_item(ksyn_wait_queue_t kwq
, ksyn_queue_t kq
,
2340 ksyn_waitq_element_t kwe
)
2342 if (kq
->ksynq_count
== 0) {
2343 panic("removing item from empty queue");
2346 if (kwe
->kwe_kwqqueue
!= kwq
) {
2347 panic("removing item from wrong queue");
2350 TAILQ_REMOVE(&kq
->ksynq_kwelist
, kwe
, kwe_list
);
2351 kwe
->kwe_list
.tqe_next
= NULL
;
2352 kwe
->kwe_list
.tqe_prev
= NULL
;
2353 kwe
->kwe_kwqqueue
= NULL
;
2355 if (--kq
->ksynq_count
> 0) {
2356 ksyn_waitq_element_t tmp
;
2357 tmp
= TAILQ_FIRST(&kq
->ksynq_kwelist
);
2358 kq
->ksynq_firstnum
= tmp
->kwe_lockseq
& PTHRW_COUNT_MASK
;
2359 tmp
= TAILQ_LAST(&kq
->ksynq_kwelist
, ksynq_kwelist_head
);
2360 kq
->ksynq_lastnum
= tmp
->kwe_lockseq
& PTHRW_COUNT_MASK
;
2362 kq
->ksynq_firstnum
= 0;
2363 kq
->ksynq_lastnum
= 0;
2366 if (--kwq
->kw_inqueue
> 0) {
2367 uint32_t curseq
= kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
;
2368 if (kwq
->kw_lowseq
== curseq
) {
2369 kwq
->kw_lowseq
= find_nextlowseq(kwq
);
2371 if (kwq
->kw_highseq
== curseq
) {
2372 kwq
->kw_highseq
= find_nexthighseq(kwq
);
2376 kwq
->kw_highseq
= 0;
2380 ksyn_waitq_element_t
2381 ksyn_queue_find_seq(__unused ksyn_wait_queue_t kwq
, ksyn_queue_t kq
,
2384 ksyn_waitq_element_t kwe
;
2386 // XXX: should stop searching when higher sequence number is seen
2387 TAILQ_FOREACH(kwe
, &kq
->ksynq_kwelist
, kwe_list
) {
2388 if ((kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
) == seq
) {
2395 /* find the thread at the target sequence (or a broadcast/prepost at or above) */
2396 ksyn_waitq_element_t
2397 ksyn_queue_find_cvpreposeq(ksyn_queue_t kq
, uint32_t cgen
)
2399 ksyn_waitq_element_t result
= NULL
;
2400 ksyn_waitq_element_t kwe
;
2401 uint32_t lgen
= (cgen
& PTHRW_COUNT_MASK
);
2403 TAILQ_FOREACH(kwe
, &kq
->ksynq_kwelist
, kwe_list
) {
2404 if (is_seqhigher_eq(kwe
->kwe_lockseq
, cgen
)) {
2407 // KWE_THREAD_INWAIT must be strictly equal
2408 if (kwe
->kwe_state
== KWE_THREAD_INWAIT
&&
2409 (kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
) != lgen
) {
2418 /* look for a thread at lockseq, a */
2419 ksyn_waitq_element_t
2420 ksyn_queue_find_signalseq(__unused ksyn_wait_queue_t kwq
, ksyn_queue_t kq
,
2421 uint32_t uptoseq
, uint32_t signalseq
)
2423 ksyn_waitq_element_t result
= NULL
;
2424 ksyn_waitq_element_t q_kwe
, r_kwe
;
2427 /* case where wrap in the tail of the queue exists */
2428 TAILQ_FOREACH_SAFE(q_kwe
, &kq
->ksynq_kwelist
, kwe_list
, r_kwe
) {
2429 if (q_kwe
->kwe_state
== KWE_THREAD_PREPOST
) {
2430 if (is_seqhigher(q_kwe
->kwe_lockseq
, uptoseq
)) {
2434 if (q_kwe
->kwe_state
== KWE_THREAD_PREPOST
|
2435 q_kwe
->kwe_state
== KWE_THREAD_BROADCAST
) {
2436 /* match any prepost at our same uptoseq or any broadcast above */
2437 if (is_seqlower(q_kwe
->kwe_lockseq
, uptoseq
)) {
2441 } else if (q_kwe
->kwe_state
== KWE_THREAD_INWAIT
) {
2443 * Match any (non-cancelled) thread at or below our upto sequence -
2444 * but prefer an exact match to our signal sequence (if present) to
2445 * keep exact matches happening.
2447 if (is_seqhigher(q_kwe
->kwe_lockseq
, uptoseq
)) {
2450 if (q_kwe
->kwe_kwqqueue
== kwq
) {
2451 if (!pthread_kern
->uthread_is_cancelled(q_kwe
->kwe_uth
)) {
2452 /* if equal or higher than our signal sequence, return this one */
2453 if (is_seqhigher_eq(q_kwe
->kwe_lockseq
, signalseq
)) {
2457 /* otherwise, just remember this eligible thread and move on */
2458 if (result
== NULL
) {
2464 panic("ksyn_queue_find_signalseq(): unknown wait queue element type (%d)\n", q_kwe
->kwe_state
);
2471 ksyn_queue_free_items(ksyn_wait_queue_t kwq
, int kqi
, uint32_t upto
, int all
)
2473 ksyn_waitq_element_t kwe
;
2474 uint32_t tseq
= upto
& PTHRW_COUNT_MASK
;
2475 ksyn_queue_t kq
= &kwq
->kw_ksynqueues
[kqi
];
2476 uint32_t freed
= 0, signaled
= 0;
2478 PTHREAD_TRACE(psynch_cvar_freeitems
| DBG_FUNC_START
, kwq
->kw_addr
,
2481 while ((kwe
= TAILQ_FIRST(&kq
->ksynq_kwelist
)) != NULL
) {
2482 if (all
== 0 && is_seqhigher(kwe
->kwe_lockseq
, tseq
)) {
2485 if (kwe
->kwe_state
== KWE_THREAD_INWAIT
) {
2487 * This scenario is typically noticed when the cvar is
2488 * reinited and the new waiters are waiting. We can
2489 * return them as spurious wait so the cvar state gets
2493 PTHREAD_TRACE(psynch_cvar_freeitems
, kwq
->kw_addr
, kwe
,
2494 kwq
->kw_inqueue
, 1);
2496 /* skip canceled ones */
2498 /* set M bit to indicate to waking CV to retun Inc val */
2499 (void)ksyn_signal(kwq
, kqi
, kwe
,
2500 PTHRW_INC
| PTH_RWS_CV_MBIT
| PTH_RWL_MTX_WAIT
);
2503 PTHREAD_TRACE(psynch_cvar_freeitems
, kwq
->kw_addr
, kwe
,
2504 kwq
->kw_inqueue
, 2);
2505 ksyn_queue_remove_item(kwq
, kq
, kwe
);
2506 zfree(kwe_zone
, kwe
);
2507 kwq
->kw_fakecount
--;
2512 PTHREAD_TRACE(psynch_cvar_freeitems
| DBG_FUNC_END
, kwq
->kw_addr
, freed
,
2513 signaled
, kwq
->kw_inqueue
);
2516 /*************************************************************************/
2519 update_low_high(ksyn_wait_queue_t kwq
, uint32_t lockseq
)
2521 if (kwq
->kw_inqueue
== 1) {
2522 kwq
->kw_lowseq
= lockseq
;
2523 kwq
->kw_highseq
= lockseq
;
2525 if (is_seqlower(lockseq
, kwq
->kw_lowseq
)) {
2526 kwq
->kw_lowseq
= lockseq
;
2528 if (is_seqhigher(lockseq
, kwq
->kw_highseq
)) {
2529 kwq
->kw_highseq
= lockseq
;
2535 find_nextlowseq(ksyn_wait_queue_t kwq
)
2537 uint32_t lowest
= 0;
2541 for (i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
2542 if (kwq
->kw_ksynqueues
[i
].ksynq_count
> 0) {
2543 uint32_t current
= kwq
->kw_ksynqueues
[i
].ksynq_firstnum
;
2544 if (first
|| is_seqlower(current
, lowest
)) {
2555 find_nexthighseq(ksyn_wait_queue_t kwq
)
2557 uint32_t highest
= 0;
2561 for (i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
2562 if (kwq
->kw_ksynqueues
[i
].ksynq_count
> 0) {
2563 uint32_t current
= kwq
->kw_ksynqueues
[i
].ksynq_lastnum
;
2564 if (first
|| is_seqhigher(current
, highest
)) {
2575 find_seq_till(ksyn_wait_queue_t kwq
, uint32_t upto
, uint32_t nwaiters
,
2581 for (i
= 0; i
< KSYN_QUEUE_MAX
; i
++) {
2582 count
+= ksyn_queue_count_tolowest(&kwq
->kw_ksynqueues
[i
], upto
);
2583 if (count
>= nwaiters
) {
2588 if (countp
!= NULL
) {
2594 } else if (count
>= nwaiters
) {
2603 ksyn_queue_count_tolowest(ksyn_queue_t kq
, uint32_t upto
)
2606 ksyn_waitq_element_t kwe
, newkwe
;
2608 if (kq
->ksynq_count
== 0 || is_seqhigher(kq
->ksynq_firstnum
, upto
)) {
2611 if (upto
== kq
->ksynq_firstnum
) {
2614 TAILQ_FOREACH_SAFE(kwe
, &kq
->ksynq_kwelist
, kwe_list
, newkwe
) {
2615 uint32_t curval
= (kwe
->kwe_lockseq
& PTHRW_COUNT_MASK
);
2616 if (is_seqhigher(curval
, upto
)) {
2620 if (upto
== curval
) {
2627 /* handles the cond broadcast of cvar and returns number of woken threads and bits for syscall return */
2629 ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq
, uint32_t upto
, uint32_t *updatep
)
2631 ksyn_waitq_element_t kwe
, newkwe
;
2632 uint32_t updatebits
= 0;
2633 ksyn_queue_t kq
= &ckwq
->kw_ksynqueues
[KSYN_QUEUE_WRITE
];
2635 struct ksyn_queue kfreeq
;
2636 ksyn_queue_init(&kfreeq
);
2638 PTHREAD_TRACE(psynch_cvar_broadcast
| DBG_FUNC_START
, ckwq
->kw_addr
, upto
,
2639 ckwq
->kw_inqueue
, 0);
2642 TAILQ_FOREACH_SAFE(kwe
, &kq
->ksynq_kwelist
, kwe_list
, newkwe
) {
2643 if (is_seqhigher(kwe
->kwe_lockseq
, upto
)) {
2644 // outside our range
2648 if (kwe
->kwe_state
== KWE_THREAD_INWAIT
) {
2649 // Wake only non-canceled threads waiting on this CV.
2650 if (!pthread_kern
->uthread_is_cancelled(kwe
->kwe_uth
)) {
2651 PTHREAD_TRACE(psynch_cvar_broadcast
, ckwq
->kw_addr
, kwe
, 0, 1);
2652 (void)ksyn_signal(ckwq
, KSYN_QUEUE_WRITE
, kwe
, PTH_RWL_MTX_WAIT
);
2653 updatebits
+= PTHRW_INC
;
2655 } else if (kwe
->kwe_state
== KWE_THREAD_BROADCAST
||
2656 kwe
->kwe_state
== KWE_THREAD_PREPOST
) {
2657 PTHREAD_TRACE(psynch_cvar_broadcast
, ckwq
->kw_addr
, kwe
,
2659 ksyn_queue_remove_item(ckwq
, kq
, kwe
);
2660 TAILQ_INSERT_TAIL(&kfreeq
.ksynq_kwelist
, kwe
, kwe_list
);
2661 ckwq
->kw_fakecount
--;
2663 panic("unknown kwe state\n");
2667 /* Need to enter a broadcast in the queue (if not already at L == S) */
2669 if (diff_genseq(ckwq
->kw_lword
, ckwq
->kw_sword
)) {
2670 PTHREAD_TRACE(psynch_cvar_broadcast
, ckwq
->kw_addr
, ckwq
->kw_lword
,
2673 newkwe
= TAILQ_FIRST(&kfreeq
.ksynq_kwelist
);
2674 if (newkwe
== NULL
) {
2675 ksyn_wqunlock(ckwq
);
2676 newkwe
= (ksyn_waitq_element_t
)zalloc(kwe_zone
);
2677 TAILQ_INSERT_TAIL(&kfreeq
.ksynq_kwelist
, newkwe
, kwe_list
);
2681 TAILQ_REMOVE(&kfreeq
.ksynq_kwelist
, newkwe
, kwe_list
);
2682 ksyn_prepost(ckwq
, newkwe
, KWE_THREAD_BROADCAST
, upto
);
2683 PTHREAD_TRACE(psynch_cvar_broadcast
, ckwq
->kw_addr
, newkwe
, 0, 4);
2687 // free up any remaining things stumbled across above
2688 while ((kwe
= TAILQ_FIRST(&kfreeq
.ksynq_kwelist
)) != NULL
) {
2689 TAILQ_REMOVE(&kfreeq
.ksynq_kwelist
, kwe
, kwe_list
);
2690 zfree(kwe_zone
, kwe
);
2693 PTHREAD_TRACE(psynch_cvar_broadcast
| DBG_FUNC_END
, ckwq
->kw_addr
,
2696 if (updatep
!= NULL
) {
2697 *updatep
|= updatebits
;
2702 ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq
, uint32_t *updatebits
)
2704 if ((ckwq
->kw_lword
& PTHRW_COUNT_MASK
) == (ckwq
->kw_sword
& PTHRW_COUNT_MASK
)) {
2705 if (ckwq
->kw_inqueue
!= 0) {
2706 /* FREE THE QUEUE */
2707 ksyn_queue_free_items(ckwq
, KSYN_QUEUE_WRITE
, ckwq
->kw_lword
, 0);
2709 if (ckwq
->kw_inqueue
!= 0)
2710 panic("ksyn_cvupdate_fixup: L == S, but entries in queue beyond S");
2711 #endif /* __TESTPANICS__ */
2713 ckwq
->kw_lword
= ckwq
->kw_uword
= ckwq
->kw_sword
= 0;
2714 ckwq
->kw_kflags
|= KSYN_KWF_ZEROEDOUT
;
2715 *updatebits
|= PTH_RWS_CV_CBIT
;
2716 } else if (ckwq
->kw_inqueue
!= 0 && ckwq
->kw_fakecount
== ckwq
->kw_inqueue
) {
2717 // only fake entries are present in the queue
2718 *updatebits
|= PTH_RWS_CV_PBIT
;
2723 psynch_zoneinit(void)
2725 kwq_zone
= zinit(sizeof(struct ksyn_wait_queue
),
2726 8192 * sizeof(struct ksyn_wait_queue
), 4096, "ksyn_wait_queue");
2727 kwe_zone
= zinit(sizeof(struct ksyn_waitq_element
),
2728 8192 * sizeof(struct ksyn_waitq_element
), 4096, "ksyn_waitq_element");
2732 _pthread_get_thread_kwq(thread_t thread
)
2735 struct uthread
* uthread
= pthread_kern
->get_bsdthread_info(thread
);
2737 ksyn_waitq_element_t kwe
= pthread_kern
->uthread_get_uukwe(uthread
);
2739 ksyn_wait_queue_t kwq
= kwe
->kwe_kwqqueue
;
2743 /* This function is used by stackshot to determine why a thread is blocked, and report
2744 * who owns the object that the thread is blocked on. It should *only* be called if the
2745 * `block_hint' field in the relevant thread's struct is populated with something related
2746 * to pthread sync objects.
2749 _pthread_find_owner(thread_t thread
,
2750 struct stackshot_thread_waitinfo
* waitinfo
)
2752 ksyn_wait_queue_t kwq
= _pthread_get_thread_kwq(thread
);
2753 switch (waitinfo
->wait_type
) {
2754 case kThreadWaitPThreadMutex
:
2755 assert((kwq
->kw_type
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_MTX
);
2756 waitinfo
->owner
= thread_tid(kwq
->kw_owner
);
2757 waitinfo
->context
= kwq
->kw_addr
;
2759 /* Owner of rwlock not stored in kernel space due to races. Punt
2760 * and hope that the userspace address is helpful enough. */
2761 case kThreadWaitPThreadRWLockRead
:
2762 case kThreadWaitPThreadRWLockWrite
:
2763 assert((kwq
->kw_type
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_RWLOCK
);
2764 waitinfo
->owner
= 0;
2765 waitinfo
->context
= kwq
->kw_addr
;
2767 /* Condvars don't have owners, so just give the userspace address. */
2768 case kThreadWaitPThreadCondVar
:
2769 assert((kwq
->kw_type
& KSYN_WQTYPE_MASK
) == KSYN_WQTYPE_CVAR
);
2770 waitinfo
->owner
= 0;
2771 waitinfo
->context
= kwq
->kw_addr
;
2773 case kThreadWaitNone
:
2775 waitinfo
->owner
= 0;
2776 waitinfo
->context
= 0;