]> git.saurik.com Git - apple/libpthread.git/blob - kern/kern_synch.c
583bea2a65e55148dea51649f7c849c338c249f2
[apple/libpthread.git] / kern / kern_synch.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995-2005 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * pthread_support.c
31 */
32
33 #include <sys/param.h>
34 #include <sys/queue.h>
35 #include <sys/resourcevar.h>
36 //#include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/systm.h>
39 #include <sys/timeb.h>
40 #include <sys/times.h>
41 #include <sys/time.h>
42 #include <sys/acct.h>
43 #include <sys/kernel.h>
44 #include <sys/wait.h>
45 #include <sys/signalvar.h>
46 #include <sys/syslog.h>
47 #include <sys/stat.h>
48 #include <sys/lock.h>
49 #include <sys/kdebug.h>
50 //#include <sys/sysproto.h>
51 //#include <sys/pthread_internal.h>
52 #include <sys/vm.h>
53 #include <sys/user.h>
54
55 #include <mach/mach_types.h>
56 #include <mach/vm_prot.h>
57 #include <mach/semaphore.h>
58 #include <mach/sync_policy.h>
59 #include <mach/task.h>
60 #include <kern/kern_types.h>
61 #include <kern/task.h>
62 #include <kern/clock.h>
63 #include <mach/kern_return.h>
64 #include <kern/thread.h>
65 #include <kern/sched_prim.h>
66 #include <kern/thread_call.h>
67 #include <kern/kalloc.h>
68 #include <kern/zalloc.h>
69 #include <kern/sched_prim.h>
70 #include <kern/processor.h>
71 #include <kern/block_hint.h>
72 #include <kern/turnstile.h>
73 //#include <kern/mach_param.h>
74 #include <mach/mach_vm.h>
75 #include <mach/mach_param.h>
76 #include <mach/thread_policy.h>
77 #include <mach/message.h>
78 #include <mach/port.h>
79 //#include <vm/vm_protos.h>
80 #include <vm/vm_map.h>
81 #include <mach/vm_region.h>
82
83 #include <libkern/OSAtomic.h>
84
85 #include <pexpert/pexpert.h>
86
87 #include "kern_internal.h"
88 #include "synch_internal.h"
89 #include "kern_trace.h"
90
91 typedef struct uthread *uthread_t;
92
93 //#define __FAILEDUSERTEST__(s) do { panic(s); } while (0)
94 #define __FAILEDUSERTEST__(s) do { printf("PSYNCH: pid[%d]: %s\n", proc_pid(current_proc()), s); } while (0)
95 #define __FAILEDUSERTEST2__(s, x...) do { printf("PSYNCH: pid[%d]: " s "\n", proc_pid(current_proc()), x); } while (0)
96
97 lck_mtx_t *pthread_list_mlock;
98
99 #define PTH_HASHSIZE 100
100
101 static LIST_HEAD(pthhashhead, ksyn_wait_queue) *pth_glob_hashtbl;
102 static unsigned long pthhash;
103
104 static LIST_HEAD(, ksyn_wait_queue) pth_free_list;
105
106 static zone_t kwq_zone; /* zone for allocation of ksyn_queue */
107 static zone_t kwe_zone; /* zone for allocation of ksyn_waitq_element */
108
109 #define SEQFIT 0
110 #define FIRSTFIT 1
111
112 struct ksyn_queue {
113 TAILQ_HEAD(ksynq_kwelist_head, ksyn_waitq_element) ksynq_kwelist;
114 uint32_t ksynq_count; /* number of entries in queue */
115 uint32_t ksynq_firstnum; /* lowest seq in queue */
116 uint32_t ksynq_lastnum; /* highest seq in queue */
117 };
118 typedef struct ksyn_queue *ksyn_queue_t;
119
120 typedef enum {
121 KSYN_QUEUE_READ = 0,
122 KSYN_QUEUE_WRITE,
123 KSYN_QUEUE_MAX,
124 } kwq_queue_type_t;
125
126 typedef enum {
127 KWQ_INTR_NONE = 0,
128 KWQ_INTR_READ = 0x1,
129 KWQ_INTR_WRITE = 0x2,
130 } kwq_intr_type_t;
131
132 struct ksyn_wait_queue {
133 LIST_ENTRY(ksyn_wait_queue) kw_hash;
134 LIST_ENTRY(ksyn_wait_queue) kw_list;
135 user_addr_t kw_addr;
136 thread_t kw_owner; /* current owner or THREAD_NULL, has a +1 */
137 uint64_t kw_object; /* object backing in shared mode */
138 uint64_t kw_offset; /* offset inside the object in shared mode */
139 int kw_pflags; /* flags under listlock protection */
140 struct timeval kw_ts; /* timeval need for upkeep before free */
141 int kw_iocount; /* inuse reference */
142 int kw_dropcount; /* current users unlocking... */
143
144 int kw_type; /* queue type like mutex, cvar, etc */
145 uint32_t kw_inqueue; /* num of waiters held */
146 uint32_t kw_fakecount; /* number of error/prepost fakes */
147 uint32_t kw_highseq; /* highest seq in the queue */
148 uint32_t kw_lowseq; /* lowest seq in the queue */
149 uint32_t kw_lword; /* L value from userland */
150 uint32_t kw_uword; /* U world value from userland */
151 uint32_t kw_sword; /* S word value from userland */
152 uint32_t kw_lastunlockseq; /* the last seq that unlocked */
153 /* for CV to be used as the seq kernel has seen so far */
154 #define kw_cvkernelseq kw_lastunlockseq
155 uint32_t kw_lastseqword; /* the last seq that unlocked */
156 /* for mutex and cvar we need to track I bit values */
157 uint32_t kw_nextseqword; /* the last seq that unlocked; with num of waiters */
158 struct {
159 uint32_t count; /* prepost count */
160 uint32_t lseq; /* prepost target seq */
161 uint32_t sseq; /* prepost target sword, in cvar used for mutexowned */
162 } kw_prepost;
163 struct {
164 kwq_intr_type_t type; /* type of failed wakueps */
165 uint32_t count; /* prepost of missed wakeup due to intrs */
166 uint32_t seq; /* prepost of missed wakeup limit seq */
167 uint32_t returnbits; /* return bits value for missed wakeup threads */
168 } kw_intr;
169
170 int kw_kflags;
171 int kw_qos_override; /* QoS of max waiter during contention period */
172 struct turnstile *kw_turnstile;
173 struct ksyn_queue kw_ksynqueues[KSYN_QUEUE_MAX]; /* queues to hold threads */
174 lck_spin_t kw_lock; /* spinlock protecting this structure */
175 };
176 typedef struct ksyn_wait_queue * ksyn_wait_queue_t;
177
178 #define TID_ZERO (uint64_t)0
179
180 /* bits needed in handling the rwlock unlock */
181 #define PTH_RW_TYPE_READ 0x01
182 #define PTH_RW_TYPE_WRITE 0x04
183 #define PTH_RW_TYPE_MASK 0xff
184 #define PTH_RW_TYPE_SHIFT 8
185
186 #define PTH_RWSHFT_TYPE_READ 0x0100
187 #define PTH_RWSHFT_TYPE_WRITE 0x0400
188 #define PTH_RWSHFT_TYPE_MASK 0xff00
189
190 /*
191 * Mutex pshared attributes
192 */
193 #define PTHREAD_PROCESS_SHARED _PTHREAD_MTX_OPT_PSHARED
194 #define PTHREAD_PROCESS_PRIVATE 0x20
195 #define PTHREAD_PSHARED_FLAGS_MASK 0x30
196
197 /*
198 * Mutex policy attributes
199 */
200 #define _PTHREAD_MTX_OPT_POLICY_FAIRSHARE 0x040 /* 1 */
201 #define _PTHREAD_MTX_OPT_POLICY_FIRSTFIT 0x080 /* 2 */
202 #define _PTHREAD_MTX_OPT_POLICY_MASK 0x1c0
203
204 /* pflags */
205 #define KSYN_WQ_INHASH 2
206 #define KSYN_WQ_SHARED 4
207 #define KSYN_WQ_WAITING 8 /* threads waiting for this wq to be available */
208 #define KSYN_WQ_FLIST 0X10 /* in free list to be freed after a short delay */
209
210 /* kflags */
211 #define KSYN_KWF_INITCLEARED 0x1 /* the init status found and preposts cleared */
212 #define KSYN_KWF_ZEROEDOUT 0x2 /* the lword, etc are inited to 0 */
213 #define KSYN_KWF_QOS_APPLIED 0x4 /* QoS override applied to owner */
214 #define KSYN_KWF_OVERLAP_GUARD 0x8 /* overlap guard */
215
216 #define KSYN_CLEANUP_DEADLINE 10
217 static int psynch_cleanupset;
218 thread_call_t psynch_thcall;
219
220 #define KSYN_WQTYPE_INWAIT 0x1000
221 #define KSYN_WQTYPE_INDROP 0x2000
222 #define KSYN_WQTYPE_MTX 0x01
223 #define KSYN_WQTYPE_CVAR 0x02
224 #define KSYN_WQTYPE_RWLOCK 0x04
225 #define KSYN_WQTYPE_SEMA 0x08
226 #define KSYN_WQTYPE_MASK 0xff
227
228 #define KSYN_WQTYPE_MUTEXDROP (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX)
229
230 static inline int
231 _kwq_type(ksyn_wait_queue_t kwq)
232 {
233 return (kwq->kw_type & KSYN_WQTYPE_MASK);
234 }
235
236 static inline bool
237 _kwq_use_turnstile(ksyn_wait_queue_t kwq)
238 {
239 // <rdar://problem/15926625> If we had writer-owner information from the
240 // rwlock then we could use the turnstile to push on it. For now, only
241 // plain mutexes use it.
242 return (_kwq_type(kwq) == KSYN_WQTYPE_MTX);
243 }
244
245 #define KW_UNLOCK_PREPOST 0x01
246 #define KW_UNLOCK_PREPOST_READLOCK 0x08
247 #define KW_UNLOCK_PREPOST_WRLOCK 0x20
248
249 static int ksyn_wq_hash_lookup(user_addr_t uaddr, proc_t p, int flags, ksyn_wait_queue_t *kwq, struct pthhashhead **hashptr, uint64_t object, uint64_t offset);
250 static int ksyn_wqfind(user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, int flags, int wqtype , ksyn_wait_queue_t *wq);
251 static void ksyn_wqrelease(ksyn_wait_queue_t mkwq, int qfreenow, int wqtype);
252 static int ksyn_findobj(user_addr_t uaddr, uint64_t *objectp, uint64_t *offsetp);
253
254 static int _wait_result_to_errno(wait_result_t result);
255
256 static int ksyn_wait(ksyn_wait_queue_t, kwq_queue_type_t, uint32_t, int, uint64_t, uint16_t, thread_continue_t, block_hint_t);
257 static kern_return_t ksyn_signal(ksyn_wait_queue_t, kwq_queue_type_t, ksyn_waitq_element_t, uint32_t);
258 static void ksyn_freeallkwe(ksyn_queue_t kq);
259
260 static kern_return_t ksyn_mtxsignal(ksyn_wait_queue_t, ksyn_waitq_element_t kwe, uint32_t, thread_t *);
261
262 static int kwq_handle_unlock(ksyn_wait_queue_t, uint32_t mgen, uint32_t rw_wc, uint32_t *updatep, int flags, int *blockp, uint32_t premgen);
263
264 static void ksyn_queue_init(ksyn_queue_t kq);
265 static int ksyn_queue_insert(ksyn_wait_queue_t kwq, int kqi, ksyn_waitq_element_t kwe, uint32_t mgen, int firstfit);
266 static void ksyn_queue_remove_item(ksyn_wait_queue_t kwq, ksyn_queue_t kq, ksyn_waitq_element_t kwe);
267 static void ksyn_queue_free_items(ksyn_wait_queue_t kwq, int kqi, uint32_t upto, int all);
268
269 static void update_low_high(ksyn_wait_queue_t kwq, uint32_t lockseq);
270 static uint32_t find_nextlowseq(ksyn_wait_queue_t kwq);
271 static uint32_t find_nexthighseq(ksyn_wait_queue_t kwq);
272 static int find_seq_till(ksyn_wait_queue_t kwq, uint32_t upto, uint32_t nwaiters, uint32_t *countp);
273
274 static uint32_t ksyn_queue_count_tolowest(ksyn_queue_t kq, uint32_t upto);
275
276 static ksyn_waitq_element_t ksyn_queue_find_cvpreposeq(ksyn_queue_t kq, uint32_t cgen);
277 static void ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq, uint32_t upto, uint32_t *updatep);
278 static void ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq, uint32_t *updatep);
279 static ksyn_waitq_element_t ksyn_queue_find_signalseq(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t toseq, uint32_t lockseq);
280
281 static void __dead2 psynch_cvcontinue(void *, wait_result_t);
282 static void __dead2 psynch_mtxcontinue(void *, wait_result_t);
283 static void __dead2 psynch_rw_rdcontinue(void *, wait_result_t);
284 static void __dead2 psynch_rw_wrcontinue(void *, wait_result_t);
285
286 static int ksyn_wakeupreaders(ksyn_wait_queue_t kwq, uint32_t limitread, int allreaders, uint32_t updatebits, int *wokenp);
287 static int kwq_find_rw_lowest(ksyn_wait_queue_t kwq, int flags, uint32_t premgen, int *type, uint32_t lowest[]);
288 static ksyn_waitq_element_t ksyn_queue_find_seq(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t seq);
289
290 static void
291 UPDATE_CVKWQ(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t ugen, uint32_t rw_wc)
292 {
293 int sinit = ((rw_wc & PTH_RWS_CV_CBIT) != 0);
294
295 // assert((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR);
296
297 if ((kwq->kw_kflags & KSYN_KWF_ZEROEDOUT) != 0) {
298 /* the values of L,U and S are cleared out due to L==S in previous transition */
299 kwq->kw_lword = mgen;
300 kwq->kw_uword = ugen;
301 kwq->kw_sword = rw_wc;
302 kwq->kw_kflags &= ~KSYN_KWF_ZEROEDOUT;
303 } else {
304 if (is_seqhigher(mgen, kwq->kw_lword)) {
305 kwq->kw_lword = mgen;
306 }
307 if (is_seqhigher(ugen, kwq->kw_uword)) {
308 kwq->kw_uword = ugen;
309 }
310 if (sinit && is_seqhigher(rw_wc, kwq->kw_sword)) {
311 kwq->kw_sword = rw_wc;
312 }
313 }
314 if (sinit && is_seqlower(kwq->kw_cvkernelseq, rw_wc)) {
315 kwq->kw_cvkernelseq = (rw_wc & PTHRW_COUNT_MASK);
316 }
317 }
318
319 static inline void
320 _kwq_clear_preposted_wakeup(ksyn_wait_queue_t kwq)
321 {
322 kwq->kw_prepost.lseq = 0;
323 kwq->kw_prepost.sseq = PTHRW_RWS_INIT;
324 kwq->kw_prepost.count = 0;
325 }
326
327 static inline void
328 _kwq_mark_preposted_wakeup(ksyn_wait_queue_t kwq, uint32_t count,
329 uint32_t lseq, uint32_t sseq)
330 {
331 kwq->kw_prepost.count = count;
332 kwq->kw_prepost.lseq = lseq;
333 kwq->kw_prepost.sseq = sseq;
334 }
335
336 static inline void
337 _kwq_clear_interrupted_wakeup(ksyn_wait_queue_t kwq)
338 {
339 kwq->kw_intr.type = KWQ_INTR_NONE;
340 kwq->kw_intr.count = 0;
341 kwq->kw_intr.seq = 0;
342 kwq->kw_intr.returnbits = 0;
343 }
344
345 static inline void
346 _kwq_mark_interruped_wakeup(ksyn_wait_queue_t kwq, kwq_intr_type_t type,
347 uint32_t count, uint32_t lseq, uint32_t returnbits)
348 {
349 kwq->kw_intr.count = count;
350 kwq->kw_intr.seq = lseq;
351 kwq->kw_intr.returnbits = returnbits;
352 kwq->kw_intr.type = type;
353 }
354
355 static void
356 _kwq_destroy(ksyn_wait_queue_t kwq)
357 {
358 if (kwq->kw_owner) {
359 thread_deallocate(kwq->kw_owner);
360 }
361 lck_spin_destroy(&kwq->kw_lock, pthread_lck_grp);
362 zfree(kwq_zone, kwq);
363 }
364
365 #define KWQ_SET_OWNER_TRANSFER_REF 0x1
366
367 static inline thread_t
368 _kwq_set_owner(ksyn_wait_queue_t kwq, thread_t new_owner, int flags)
369 {
370 thread_t old_owner = kwq->kw_owner;
371 if (old_owner == new_owner) {
372 if (flags & KWQ_SET_OWNER_TRANSFER_REF) return new_owner;
373 return THREAD_NULL;
374 }
375 if ((flags & KWQ_SET_OWNER_TRANSFER_REF) == 0) {
376 thread_reference(new_owner);
377 }
378 kwq->kw_owner = new_owner;
379 return old_owner;
380 }
381
382 static inline thread_t
383 _kwq_clear_owner(ksyn_wait_queue_t kwq)
384 {
385 return _kwq_set_owner(kwq, THREAD_NULL, KWQ_SET_OWNER_TRANSFER_REF);
386 }
387
388 static inline void
389 _kwq_cleanup_old_owner(thread_t *thread)
390 {
391 if (*thread) {
392 thread_deallocate(*thread);
393 *thread = THREAD_NULL;
394 }
395 }
396
397 static void
398 CLEAR_REINIT_BITS(ksyn_wait_queue_t kwq)
399 {
400 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) {
401 if (kwq->kw_inqueue != 0 && kwq->kw_inqueue != kwq->kw_fakecount) {
402 panic("CV:entries in queue durinmg reinit %d:%d\n",kwq->kw_inqueue, kwq->kw_fakecount);
403 }
404 };
405 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_RWLOCK) {
406 kwq->kw_nextseqword = PTHRW_RWS_INIT;
407 kwq->kw_kflags &= ~KSYN_KWF_OVERLAP_GUARD;
408 };
409 _kwq_clear_preposted_wakeup(kwq);
410 kwq->kw_lastunlockseq = PTHRW_RWL_INIT;
411 kwq->kw_lastseqword = PTHRW_RWS_INIT;
412 _kwq_clear_interrupted_wakeup(kwq);
413 kwq->kw_lword = 0;
414 kwq->kw_uword = 0;
415 kwq->kw_sword = PTHRW_RWS_INIT;
416 }
417
418 static bool
419 _kwq_handle_preposted_wakeup(ksyn_wait_queue_t kwq, uint32_t type,
420 uint32_t lseq, uint32_t *retval)
421 {
422 if (kwq->kw_prepost.count == 0 ||
423 !is_seqlower_eq(lseq, kwq->kw_prepost.lseq)) {
424 return false;
425 }
426
427 kwq->kw_prepost.count--;
428 if (kwq->kw_prepost.count > 0) {
429 return false;
430 }
431
432 int error, should_block = 0;
433 uint32_t updatebits = 0;
434 uint32_t pp_lseq = kwq->kw_prepost.lseq;
435 uint32_t pp_sseq = kwq->kw_prepost.sseq;
436 _kwq_clear_preposted_wakeup(kwq);
437
438 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
439
440 error = kwq_handle_unlock(kwq, pp_lseq, pp_sseq, &updatebits,
441 (type | KW_UNLOCK_PREPOST), &should_block, lseq);
442 if (error) {
443 panic("_kwq_handle_preposted_wakeup: kwq_handle_unlock failed %d",
444 error);
445 }
446
447 if (should_block) {
448 return false;
449 }
450 *retval = updatebits;
451 return true;
452 }
453
454 static bool
455 _kwq_handle_overlap(ksyn_wait_queue_t kwq, uint32_t type, uint32_t lgenval,
456 uint32_t rw_wc, uint32_t *retval)
457 {
458 int res = 0;
459
460 // overlaps only occur on read lockers
461 if (type != PTH_RW_TYPE_READ) {
462 return false;
463 }
464
465 // check for overlap and no pending W bit (indicates writers)
466 if ((kwq->kw_kflags & KSYN_KWF_OVERLAP_GUARD) &&
467 !is_rws_savemask_set(rw_wc) && !is_rwl_wbit_set(lgenval)) {
468 /* overlap is set, so no need to check for valid state for overlap */
469
470 if (is_seqlower_eq(rw_wc, kwq->kw_nextseqword) || is_seqhigher_eq(kwq->kw_lastseqword, rw_wc)) {
471 /* increase the next expected seq by one */
472 kwq->kw_nextseqword += PTHRW_INC;
473 /* set count by one & bits from the nextseq and add M bit */
474 *retval = PTHRW_INC | ((kwq->kw_nextseqword & PTHRW_BIT_MASK) | PTH_RWL_MBIT);
475 res = 1;
476 }
477 }
478 return res;
479 }
480
481 static inline bool
482 _kwq_is_used(ksyn_wait_queue_t kwq)
483 {
484 return (kwq->kw_inqueue != 0 || kwq->kw_prepost.count != 0 ||
485 kwq->kw_intr.count != 0);
486 }
487
488 /*
489 * consumes a pending interrupted waiter, returns true if the current
490 * thread should return back to userspace because it was previously
491 * interrupted.
492 */
493 static inline bool
494 _kwq_handle_interrupted_wakeup(ksyn_wait_queue_t kwq, kwq_intr_type_t type,
495 uint32_t lseq, uint32_t *retval)
496 {
497 if (kwq->kw_intr.count != 0 && kwq->kw_intr.type == type &&
498 (!kwq->kw_intr.seq || is_seqlower_eq(lseq, kwq->kw_intr.seq))) {
499 kwq->kw_intr.count--;
500 *retval = kwq->kw_intr.returnbits;
501 if (kwq->kw_intr.returnbits == 0) {
502 _kwq_clear_interrupted_wakeup(kwq);
503 }
504 return true;
505 }
506 return false;
507 }
508
509 static void
510 pthread_list_lock(void)
511 {
512 lck_mtx_lock_spin(pthread_list_mlock);
513 }
514
515 static void
516 pthread_list_unlock(void)
517 {
518 lck_mtx_unlock(pthread_list_mlock);
519 }
520
521 static void
522 ksyn_wqlock(ksyn_wait_queue_t kwq)
523 {
524 lck_spin_lock(&kwq->kw_lock);
525 }
526
527 static void
528 ksyn_wqunlock(ksyn_wait_queue_t kwq)
529 {
530 lck_spin_unlock(&kwq->kw_lock);
531 }
532
533 /* routine to drop the mutex unlocks , used both for mutexunlock system call and drop during cond wait */
534 static uint32_t
535 _psynch_mutexdrop_internal(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t ugen,
536 int flags)
537 {
538 kern_return_t ret;
539 uint32_t returnbits = 0;
540 uint32_t updatebits = 0;
541 int firstfit = (flags & _PTHREAD_MTX_OPT_POLICY_MASK) ==
542 _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
543 uint32_t nextgen = (ugen + PTHRW_INC);
544 thread_t old_owner = THREAD_NULL;
545
546 ksyn_wqlock(kwq);
547 kwq->kw_lastunlockseq = (ugen & PTHRW_COUNT_MASK);
548
549 redrive:
550 updatebits = (kwq->kw_highseq & PTHRW_COUNT_MASK) |
551 (PTH_RWL_EBIT | PTH_RWL_KBIT);
552
553 if (firstfit) {
554 if (kwq->kw_inqueue == 0) {
555 uint32_t count = kwq->kw_prepost.count + 1;
556 // Increment the number of preposters we have waiting
557 _kwq_mark_preposted_wakeup(kwq, count, mgen & PTHRW_COUNT_MASK, 0);
558 // We don't know the current owner as we've determined this mutex
559 // drop should have a preposted locker inbound into the kernel but
560 // we have no way of knowing who it is. When it arrives, the lock
561 // path will update the turnstile owner and return it to userspace.
562 old_owner = _kwq_clear_owner(kwq);
563 pthread_kern->psynch_wait_update_owner(kwq, THREAD_NULL,
564 &kwq->kw_turnstile);
565 PTHREAD_TRACE(psynch_mutex_kwqprepost, kwq->kw_addr,
566 kwq->kw_prepost.lseq, count, 0);
567 } else {
568 // signal first waiter
569 ret = ksyn_mtxsignal(kwq, NULL, updatebits, &old_owner);
570 if (ret == KERN_NOT_WAITING) {
571 // <rdar://problem/39093536> ksyn_mtxsignal attempts to signal
572 // the thread but it sets up the turnstile inheritor first.
573 // That means we can't redrive the mutex in a loop without
574 // dropping the wq lock and cleaning up the turnstile state.
575 ksyn_wqunlock(kwq);
576 pthread_kern->psynch_wait_cleanup();
577 _kwq_cleanup_old_owner(&old_owner);
578 ksyn_wqlock(kwq);
579 goto redrive;
580 }
581 }
582 } else {
583 bool prepost = false;
584 if (kwq->kw_inqueue == 0) {
585 // No waiters in the queue.
586 prepost = true;
587 } else {
588 uint32_t low_writer = (kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_firstnum & PTHRW_COUNT_MASK);
589 if (low_writer == nextgen) {
590 /* next seq to be granted found */
591 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
592 ret = ksyn_mtxsignal(kwq, NULL,
593 updatebits | PTH_RWL_MTX_WAIT, &old_owner);
594 if (ret == KERN_NOT_WAITING) {
595 /* interrupt post */
596 _kwq_mark_interruped_wakeup(kwq, KWQ_INTR_WRITE, 1,
597 nextgen, updatebits);
598 }
599 } else if (is_seqhigher(low_writer, nextgen)) {
600 prepost = true;
601 } else {
602 //__FAILEDUSERTEST__("psynch_mutexdrop_internal: FS mutex unlock sequence higher than the lowest one is queue\n");
603 ksyn_waitq_element_t kwe;
604 kwe = ksyn_queue_find_seq(kwq,
605 &kwq->kw_ksynqueues[KSYN_QUEUE_WRITE], nextgen);
606 if (kwe != NULL) {
607 /* next seq to be granted found */
608 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
609 ret = ksyn_mtxsignal(kwq, kwe,
610 updatebits | PTH_RWL_MTX_WAIT, &old_owner);
611 if (ret == KERN_NOT_WAITING) {
612 goto redrive;
613 }
614 } else {
615 prepost = true;
616 }
617 }
618 }
619 if (prepost) {
620 if (kwq->kw_prepost.count != 0) {
621 __FAILEDUSERTEST__("_psynch_mutexdrop_internal: multiple preposts\n");
622 } else {
623 _kwq_mark_preposted_wakeup(kwq, 1, nextgen & PTHRW_COUNT_MASK,
624 0);
625 }
626 old_owner = _kwq_clear_owner(kwq);
627 pthread_kern->psynch_wait_update_owner(kwq, THREAD_NULL,
628 &kwq->kw_turnstile);
629 }
630 }
631
632 ksyn_wqunlock(kwq);
633 pthread_kern->psynch_wait_cleanup();
634 _kwq_cleanup_old_owner(&old_owner);
635 ksyn_wqrelease(kwq, 1, KSYN_WQTYPE_MUTEXDROP);
636 return returnbits;
637 }
638
639 static int
640 _ksyn_check_init(ksyn_wait_queue_t kwq, uint32_t lgenval)
641 {
642 int res = (lgenval & PTHRW_RWL_INIT) != 0;
643 if (res) {
644 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0) {
645 /* first to notice the reset of the lock, clear preposts */
646 CLEAR_REINIT_BITS(kwq);
647 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
648 }
649 }
650 return res;
651 }
652
653 /*
654 * psynch_mutexwait: This system call is used for contended psynch mutexes to
655 * block.
656 */
657 int
658 _psynch_mutexwait(__unused proc_t p, user_addr_t mutex, uint32_t mgen,
659 uint32_t ugen, uint64_t tid, uint32_t flags, uint32_t *retval)
660 {
661 ksyn_wait_queue_t kwq;
662 int error = 0;
663 int firstfit = (flags & _PTHREAD_MTX_OPT_POLICY_MASK)
664 == _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
665 int ins_flags = SEQFIT;
666 uint32_t lseq = (mgen & PTHRW_COUNT_MASK);
667 uint32_t updatebits = 0;
668 thread_t tid_th = THREAD_NULL, old_owner = THREAD_NULL;
669
670 if (firstfit) {
671 /* first fit */
672 ins_flags = FIRSTFIT;
673 }
674
675 error = ksyn_wqfind(mutex, mgen, ugen, 0, flags,
676 (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_MTX), &kwq);
677 if (error != 0) {
678 return error;
679 }
680
681 again:
682 ksyn_wqlock(kwq);
683
684 if (_kwq_handle_interrupted_wakeup(kwq, KWQ_INTR_WRITE, lseq, retval)) {
685 old_owner = _kwq_set_owner(kwq, current_thread(), 0);
686 pthread_kern->psynch_wait_update_owner(kwq, kwq->kw_owner,
687 &kwq->kw_turnstile);
688 ksyn_wqunlock(kwq);
689 _kwq_cleanup_old_owner(&old_owner);
690 goto out;
691 }
692
693 if (kwq->kw_prepost.count && (firstfit || (lseq == kwq->kw_prepost.lseq))) {
694 /* got preposted lock */
695 kwq->kw_prepost.count--;
696
697 if (!firstfit) {
698 if (kwq->kw_prepost.count > 0) {
699 __FAILEDUSERTEST__("psynch_mutexwait: more than one prepost\n");
700 kwq->kw_prepost.lseq += PTHRW_INC; /* look for next one */
701 ksyn_wqunlock(kwq);
702 error = EINVAL;
703 goto out;
704 }
705 _kwq_clear_preposted_wakeup(kwq);
706 }
707
708 if (kwq->kw_inqueue == 0) {
709 updatebits = lseq | (PTH_RWL_KBIT | PTH_RWL_EBIT);
710 } else {
711 updatebits = (kwq->kw_highseq & PTHRW_COUNT_MASK) |
712 (PTH_RWL_KBIT | PTH_RWL_EBIT);
713 }
714 updatebits &= ~PTH_RWL_MTX_WAIT;
715
716 if (updatebits == 0) {
717 __FAILEDUSERTEST__("psynch_mutexwait(prepost): returning 0 lseq in mutexwait with no EBIT \n");
718 }
719
720 PTHREAD_TRACE(psynch_mutex_kwqprepost, kwq->kw_addr,
721 kwq->kw_prepost.lseq, kwq->kw_prepost.count, 1);
722
723 old_owner = _kwq_set_owner(kwq, current_thread(), 0);
724 pthread_kern->psynch_wait_update_owner(kwq, kwq->kw_owner,
725 &kwq->kw_turnstile);
726
727 ksyn_wqunlock(kwq);
728 _kwq_cleanup_old_owner(&old_owner);
729 *retval = updatebits;
730 goto out;
731 }
732
733 // mutexwait passes in an owner hint at the time userspace contended for
734 // the mutex, however, the owner tid in the userspace data structure may be
735 // unset or SWITCHING (-1), or it may correspond to a stale snapshot after
736 // the lock has subsequently been unlocked by another thread.
737 if (tid == thread_tid(kwq->kw_owner)) {
738 // userspace and kernel agree
739 } else if (tid == 0) {
740 // contender came in before owner could write TID
741 // let's assume that what the kernel knows is accurate
742 // for all we know this waiter came in late in the kernel
743 } else if (kwq->kw_lastunlockseq != PTHRW_RWL_INIT &&
744 is_seqlower(ugen, kwq->kw_lastunlockseq)) {
745 // owner is stale, someone has come in and unlocked since this
746 // contended read the TID, so assume what is known in the kernel is
747 // accurate
748 } else if (tid == PTHREAD_MTX_TID_SWITCHING) {
749 // userspace didn't know the owner because it was being unlocked, but
750 // that unlocker hasn't reached the kernel yet. So assume what is known
751 // in the kernel is accurate
752 } else {
753 // hint is being passed in for a specific thread, and we have no reason
754 // not to trust it (like the kernel unlock sequence being higher)
755 //
756 // So resolve the hint to a thread_t if we haven't done so yet
757 // and redrive as we dropped the lock
758 if (tid_th == THREAD_NULL) {
759 ksyn_wqunlock(kwq);
760 tid_th = pthread_kern->task_findtid(current_task(), tid);
761 if (tid_th == THREAD_NULL) tid = 0;
762 goto again;
763 }
764 tid_th = _kwq_set_owner(kwq, tid_th, KWQ_SET_OWNER_TRANSFER_REF);
765 }
766
767 if (tid_th) {
768 // We are on our way to block, and can't drop the spinlock anymore
769 pthread_kern->thread_deallocate_safe(tid_th);
770 tid_th = THREAD_NULL;
771 }
772 error = ksyn_wait(kwq, KSYN_QUEUE_WRITE, mgen, ins_flags, 0, 0,
773 psynch_mtxcontinue, kThreadWaitPThreadMutex);
774 // ksyn_wait drops wait queue lock
775 out:
776 pthread_kern->psynch_wait_cleanup();
777 ksyn_wqrelease(kwq, 1, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_MTX));
778 if (tid_th) {
779 thread_deallocate(tid_th);
780 }
781 return error;
782 }
783
784 void __dead2
785 psynch_mtxcontinue(void *parameter, wait_result_t result)
786 {
787 uthread_t uth = current_uthread();
788 ksyn_wait_queue_t kwq = parameter;
789 ksyn_waitq_element_t kwe = pthread_kern->uthread_get_uukwe(uth);
790
791 ksyn_wqlock(kwq);
792
793 int error = _wait_result_to_errno(result);
794 if (error != 0) {
795 if (kwe->kwe_kwqqueue) {
796 ksyn_queue_remove_item(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITE], kwe);
797 }
798 } else {
799 uint32_t updatebits = kwe->kwe_psynchretval & ~PTH_RWL_MTX_WAIT;
800 pthread_kern->uthread_set_returnval(uth, updatebits);
801
802 if (updatebits == 0) {
803 __FAILEDUSERTEST__("psynch_mutexwait: returning 0 lseq in mutexwait with no EBIT \n");
804 }
805 }
806
807 pthread_kern->psynch_wait_complete(kwq, &kwq->kw_turnstile);
808
809 ksyn_wqunlock(kwq);
810 pthread_kern->psynch_wait_cleanup();
811 ksyn_wqrelease(kwq, 1, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_MTX));
812 pthread_kern->unix_syscall_return(error);
813 __builtin_unreachable();
814 }
815
816 static void __dead2
817 _psynch_rw_continue(ksyn_wait_queue_t kwq, kwq_queue_type_t kqi,
818 wait_result_t result)
819 {
820 uthread_t uth = current_uthread();
821 ksyn_waitq_element_t kwe = pthread_kern->uthread_get_uukwe(uth);
822
823 ksyn_wqlock(kwq);
824
825 int error = _wait_result_to_errno(result);
826 if (error != 0) {
827 if (kwe->kwe_kwqqueue) {
828 ksyn_queue_remove_item(kwq, &kwq->kw_ksynqueues[kqi], kwe);
829 }
830 } else {
831 pthread_kern->uthread_set_returnval(uth, kwe->kwe_psynchretval);
832 }
833
834 ksyn_wqunlock(kwq);
835 ksyn_wqrelease(kwq, 0, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_RWLOCK));
836
837 pthread_kern->unix_syscall_return(error);
838 __builtin_unreachable();
839 }
840
841 void __dead2
842 psynch_rw_rdcontinue(void *parameter, wait_result_t result)
843 {
844 _psynch_rw_continue(parameter, KSYN_QUEUE_READ, result);
845 }
846
847 void __dead2
848 psynch_rw_wrcontinue(void *parameter, wait_result_t result)
849 {
850 _psynch_rw_continue(parameter, KSYN_QUEUE_WRITE, result);
851 }
852
853 /*
854 * psynch_mutexdrop: This system call is used for unlock postings on contended psynch mutexes.
855 */
856 int
857 _psynch_mutexdrop(__unused proc_t p, user_addr_t mutex, uint32_t mgen,
858 uint32_t ugen, uint64_t tid __unused, uint32_t flags, uint32_t *retval)
859 {
860 int res;
861 ksyn_wait_queue_t kwq;
862
863 res = ksyn_wqfind(mutex, mgen, ugen, 0, flags, KSYN_WQTYPE_MUTEXDROP, &kwq);
864 if (res == 0) {
865 uint32_t updateval = _psynch_mutexdrop_internal(kwq, mgen, ugen, flags);
866 /* drops the kwq reference */
867 if (retval) {
868 *retval = updateval;
869 }
870 }
871
872 return res;
873 }
874
875 static kern_return_t
876 ksyn_mtxsignal(ksyn_wait_queue_t kwq, ksyn_waitq_element_t kwe,
877 uint32_t updateval, thread_t *old_owner)
878 {
879 kern_return_t ret;
880
881 if (!kwe) {
882 kwe = TAILQ_FIRST(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_kwelist);
883 if (!kwe) {
884 panic("ksyn_mtxsignal: panic signaling empty queue");
885 }
886 }
887
888 PTHREAD_TRACE(psynch_mutex_kwqsignal | DBG_FUNC_START, kwq->kw_addr, kwe,
889 thread_tid(kwe->kwe_thread), kwq->kw_inqueue);
890
891 ret = ksyn_signal(kwq, KSYN_QUEUE_WRITE, kwe, updateval);
892 if (ret == KERN_SUCCESS) {
893 *old_owner = _kwq_set_owner(kwq, kwe->kwe_thread, 0);
894 } else {
895 *old_owner = _kwq_clear_owner(kwq);
896 }
897 PTHREAD_TRACE(psynch_mutex_kwqsignal | DBG_FUNC_END, kwq->kw_addr, kwe,
898 ret, 0);
899 return ret;
900 }
901
902
903 static void
904 ksyn_prepost(ksyn_wait_queue_t kwq, ksyn_waitq_element_t kwe, uint32_t state,
905 uint32_t lockseq)
906 {
907 bzero(kwe, sizeof(*kwe));
908 kwe->kwe_state = state;
909 kwe->kwe_lockseq = lockseq;
910 kwe->kwe_count = 1;
911
912 (void)ksyn_queue_insert(kwq, KSYN_QUEUE_WRITE, kwe, lockseq, SEQFIT);
913 kwq->kw_fakecount++;
914 }
915
916 static void
917 ksyn_cvsignal(ksyn_wait_queue_t ckwq, thread_t th, uint32_t uptoseq,
918 uint32_t signalseq, uint32_t *updatebits, int *broadcast,
919 ksyn_waitq_element_t *nkwep)
920 {
921 ksyn_waitq_element_t kwe = NULL;
922 ksyn_waitq_element_t nkwe = NULL;
923 ksyn_queue_t kq = &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITE];
924
925 uptoseq &= PTHRW_COUNT_MASK;
926
927 // Find the specified thread to wake.
928 if (th != THREAD_NULL) {
929 uthread_t uth = pthread_kern->get_bsdthread_info(th);
930 kwe = pthread_kern->uthread_get_uukwe(uth);
931 if (kwe->kwe_kwqqueue != ckwq ||
932 is_seqhigher(kwe->kwe_lockseq, uptoseq)) {
933 // Unless it's no longer waiting on this CV...
934 kwe = NULL;
935 // ...in which case we post a broadcast instead.
936 *broadcast = 1;
937 return;
938 }
939 }
940
941 // If no thread was specified, find any thread to wake (with the right
942 // sequence number).
943 while (th == THREAD_NULL) {
944 if (kwe == NULL) {
945 kwe = ksyn_queue_find_signalseq(ckwq, kq, uptoseq, signalseq);
946 }
947 if (kwe == NULL && nkwe == NULL) {
948 // No eligible entries; need to allocate a new
949 // entry to prepost. Loop to rescan after
950 // reacquiring the lock after allocation in
951 // case anything new shows up.
952 ksyn_wqunlock(ckwq);
953 nkwe = (ksyn_waitq_element_t)zalloc(kwe_zone);
954 ksyn_wqlock(ckwq);
955 } else {
956 break;
957 }
958 }
959
960 if (kwe != NULL) {
961 // If we found a thread to wake...
962 if (kwe->kwe_state == KWE_THREAD_INWAIT) {
963 if (is_seqlower(kwe->kwe_lockseq, signalseq)) {
964 /*
965 * A valid thread in our range, but lower than our signal.
966 * Matching it may leave our match with nobody to wake it if/when
967 * it arrives (the signal originally meant for this thread might
968 * not successfully wake it).
969 *
970 * Convert to broadcast - may cause some spurious wakeups
971 * (allowed by spec), but avoids starvation (better choice).
972 */
973 *broadcast = 1;
974 } else {
975 (void)ksyn_signal(ckwq, KSYN_QUEUE_WRITE, kwe, PTH_RWL_MTX_WAIT);
976 *updatebits += PTHRW_INC;
977 }
978 } else if (kwe->kwe_state == KWE_THREAD_PREPOST) {
979 // Merge with existing prepost at same uptoseq.
980 kwe->kwe_count += 1;
981 } else if (kwe->kwe_state == KWE_THREAD_BROADCAST) {
982 // Existing broadcasts subsume this signal.
983 } else {
984 panic("unknown kwe state\n");
985 }
986 if (nkwe) {
987 /*
988 * If we allocated a new kwe above but then found a different kwe to
989 * use then we need to deallocate the spare one.
990 */
991 zfree(kwe_zone, nkwe);
992 nkwe = NULL;
993 }
994 } else if (nkwe != NULL) {
995 // ... otherwise, insert the newly allocated prepost.
996 ksyn_prepost(ckwq, nkwe, KWE_THREAD_PREPOST, uptoseq);
997 nkwe = NULL;
998 } else {
999 panic("failed to allocate kwe\n");
1000 }
1001
1002 *nkwep = nkwe;
1003 }
1004
1005 static int
1006 __psynch_cvsignal(user_addr_t cv, uint32_t cgen, uint32_t cugen,
1007 uint32_t csgen, uint32_t flags, int broadcast,
1008 mach_port_name_t threadport, uint32_t *retval)
1009 {
1010 int error = 0;
1011 thread_t th = THREAD_NULL;
1012 ksyn_wait_queue_t kwq;
1013
1014 uint32_t uptoseq = cgen & PTHRW_COUNT_MASK;
1015 uint32_t fromseq = (cugen & PTHRW_COUNT_MASK) + PTHRW_INC;
1016
1017 // validate sane L, U, and S values
1018 if ((threadport == 0 && is_seqhigher(fromseq, uptoseq)) || is_seqhigher(csgen, uptoseq)) {
1019 __FAILEDUSERTEST__("cvbroad: invalid L, U and S values\n");
1020 return EINVAL;
1021 }
1022
1023 if (threadport != 0) {
1024 th = port_name_to_thread((mach_port_name_t)threadport);
1025 if (th == THREAD_NULL) {
1026 return ESRCH;
1027 }
1028 }
1029
1030 error = ksyn_wqfind(cv, cgen, cugen, csgen, flags, (KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INDROP), &kwq);
1031 if (error == 0) {
1032 uint32_t updatebits = 0;
1033 ksyn_waitq_element_t nkwe = NULL;
1034
1035 ksyn_wqlock(kwq);
1036
1037 // update L, U and S...
1038 UPDATE_CVKWQ(kwq, cgen, cugen, csgen);
1039
1040 PTHREAD_TRACE(psynch_cvar_signal | DBG_FUNC_START, kwq->kw_addr,
1041 fromseq, uptoseq, broadcast);
1042
1043 if (!broadcast) {
1044 // No need to signal if the CV is already balanced.
1045 if (diff_genseq(kwq->kw_lword, kwq->kw_sword)) {
1046 ksyn_cvsignal(kwq, th, uptoseq, fromseq, &updatebits,
1047 &broadcast, &nkwe);
1048 PTHREAD_TRACE(psynch_cvar_signal, kwq->kw_addr, broadcast, 0,0);
1049 }
1050 }
1051
1052 if (broadcast) {
1053 ksyn_handle_cvbroad(kwq, uptoseq, &updatebits);
1054 }
1055
1056 kwq->kw_sword += (updatebits & PTHRW_COUNT_MASK);
1057 // set C or P bits and free if needed
1058 ksyn_cvupdate_fixup(kwq, &updatebits);
1059 *retval = updatebits;
1060
1061 PTHREAD_TRACE(psynch_cvar_signal | DBG_FUNC_END, kwq->kw_addr,
1062 updatebits, 0, 0);
1063
1064 ksyn_wqunlock(kwq);
1065
1066 pthread_kern->psynch_wait_cleanup();
1067
1068 if (nkwe != NULL) {
1069 zfree(kwe_zone, nkwe);
1070 }
1071
1072 ksyn_wqrelease(kwq, 1, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_CVAR));
1073 }
1074
1075 if (th != NULL) {
1076 thread_deallocate(th);
1077 }
1078
1079 return error;
1080 }
1081
1082 /*
1083 * psynch_cvbroad: This system call is used for broadcast posting on blocked waiters of psynch cvars.
1084 */
1085 int
1086 _psynch_cvbroad(__unused proc_t p, user_addr_t cv, uint64_t cvlsgen,
1087 uint64_t cvudgen, uint32_t flags, __unused user_addr_t mutex,
1088 __unused uint64_t mugen, __unused uint64_t tid, uint32_t *retval)
1089 {
1090 uint32_t diffgen = cvudgen & 0xffffffff;
1091 uint32_t count = diffgen >> PTHRW_COUNT_SHIFT;
1092 if (count > pthread_kern->get_task_threadmax()) {
1093 __FAILEDUSERTEST__("cvbroad: difference greater than maximum possible thread count\n");
1094 return EBUSY;
1095 }
1096
1097 uint32_t csgen = (cvlsgen >> 32) & 0xffffffff;
1098 uint32_t cgen = cvlsgen & 0xffffffff;
1099 uint32_t cugen = (cvudgen >> 32) & 0xffffffff;
1100
1101 return __psynch_cvsignal(cv, cgen, cugen, csgen, flags, 1, 0, retval);
1102 }
1103
1104 /*
1105 * psynch_cvsignal: This system call is used for signalling the blocked waiters of psynch cvars.
1106 */
1107 int
1108 _psynch_cvsignal(__unused proc_t p, user_addr_t cv, uint64_t cvlsgen,
1109 uint32_t cvugen, int threadport, __unused user_addr_t mutex,
1110 __unused uint64_t mugen, __unused uint64_t tid, uint32_t flags,
1111 uint32_t *retval)
1112 {
1113 uint32_t csgen = (cvlsgen >> 32) & 0xffffffff;
1114 uint32_t cgen = cvlsgen & 0xffffffff;
1115
1116 return __psynch_cvsignal(cv, cgen, cvugen, csgen, flags, 0, threadport, retval);
1117 }
1118
1119 /*
1120 * psynch_cvwait: This system call is used for psynch cvar waiters to block in kernel.
1121 */
1122 int
1123 _psynch_cvwait(__unused proc_t p, user_addr_t cv, uint64_t cvlsgen,
1124 uint32_t cvugen, user_addr_t mutex, uint64_t mugen, uint32_t flags,
1125 int64_t sec, uint32_t nsec, uint32_t *retval)
1126 {
1127 int error = 0;
1128 uint32_t updatebits = 0;
1129 ksyn_wait_queue_t ckwq = NULL;
1130 ksyn_waitq_element_t kwe, nkwe = NULL;
1131
1132 /* for conformance reasons */
1133 pthread_kern->__pthread_testcancel(0);
1134
1135 uint32_t csgen = (cvlsgen >> 32) & 0xffffffff;
1136 uint32_t cgen = cvlsgen & 0xffffffff;
1137 uint32_t ugen = (mugen >> 32) & 0xffffffff;
1138 uint32_t mgen = mugen & 0xffffffff;
1139
1140 uint32_t lockseq = (cgen & PTHRW_COUNT_MASK);
1141
1142 /*
1143 * In cvwait U word can be out of range as cv could be used only for
1144 * timeouts. However S word needs to be within bounds and validated at
1145 * user level as well.
1146 */
1147 if (is_seqhigher_eq(csgen, lockseq) != 0) {
1148 __FAILEDUSERTEST__("psync_cvwait; invalid sequence numbers\n");
1149 return EINVAL;
1150 }
1151
1152 PTHREAD_TRACE(psynch_cvar_kwait | DBG_FUNC_START, cv, mutex, cgen, 0);
1153
1154 error = ksyn_wqfind(cv, cgen, cvugen, csgen, flags, KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INWAIT, &ckwq);
1155 if (error != 0) {
1156 return error;
1157 }
1158
1159 if (mutex != 0) {
1160 uint32_t mutexrv = 0;
1161 error = _psynch_mutexdrop(NULL, mutex, mgen, ugen, 0, flags, &mutexrv);
1162 if (error != 0) {
1163 goto out;
1164 }
1165 }
1166
1167 ksyn_wqlock(ckwq);
1168
1169 // update L, U and S...
1170 UPDATE_CVKWQ(ckwq, cgen, cvugen, csgen);
1171
1172 /* Look for the sequence for prepost (or conflicting thread */
1173 ksyn_queue_t kq = &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITE];
1174 kwe = ksyn_queue_find_cvpreposeq(kq, lockseq);
1175 if (kwe != NULL) {
1176 if (kwe->kwe_state == KWE_THREAD_PREPOST) {
1177 if ((kwe->kwe_lockseq & PTHRW_COUNT_MASK) == lockseq) {
1178 /* we can safely consume a reference, so do so */
1179 if (--kwe->kwe_count == 0) {
1180 ksyn_queue_remove_item(ckwq, kq, kwe);
1181 ckwq->kw_fakecount--;
1182 nkwe = kwe;
1183 }
1184 } else {
1185 /*
1186 * consuming a prepost higher than our lock sequence is valid, but
1187 * can leave the higher thread without a match. Convert the entry
1188 * to a broadcast to compensate for this.
1189 */
1190 ksyn_handle_cvbroad(ckwq, kwe->kwe_lockseq, &updatebits);
1191 #if __TESTPANICS__
1192 if (updatebits != 0)
1193 panic("psync_cvwait: convert pre-post to broadcast: woke up %d threads that shouldn't be there\n", updatebits);
1194 #endif /* __TESTPANICS__ */
1195 }
1196 } else if (kwe->kwe_state == KWE_THREAD_BROADCAST) {
1197 // XXX
1198 // Nothing to do.
1199 } else if (kwe->kwe_state == KWE_THREAD_INWAIT) {
1200 __FAILEDUSERTEST__("cvwait: thread entry with same sequence already present\n");
1201 error = EBUSY;
1202 } else {
1203 panic("psync_cvwait: unexpected wait queue element type\n");
1204 }
1205
1206 if (error == 0) {
1207 updatebits |= PTHRW_INC;
1208 ckwq->kw_sword += PTHRW_INC;
1209
1210 /* set C or P bits and free if needed */
1211 ksyn_cvupdate_fixup(ckwq, &updatebits);
1212 *retval = updatebits;
1213 }
1214 } else {
1215 uint64_t abstime = 0;
1216 uint16_t kwe_flags = 0;
1217
1218 if (sec != 0 || (nsec & 0x3fffffff) != 0) {
1219 struct timespec ts;
1220 ts.tv_sec = (__darwin_time_t)sec;
1221 ts.tv_nsec = (nsec & 0x3fffffff);
1222 nanoseconds_to_absolutetime(
1223 (uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec, &abstime);
1224 clock_absolutetime_interval_to_deadline(abstime, &abstime);
1225 }
1226
1227 PTHREAD_TRACE(psynch_cvar_kwait, cv, mutex, kwe_flags, 1);
1228
1229 error = ksyn_wait(ckwq, KSYN_QUEUE_WRITE, cgen, SEQFIT, abstime,
1230 kwe_flags, psynch_cvcontinue, kThreadWaitPThreadCondVar);
1231 // ksyn_wait drops wait queue lock
1232 }
1233
1234 ksyn_wqunlock(ckwq);
1235
1236 if (nkwe != NULL) {
1237 zfree(kwe_zone, nkwe);
1238 }
1239 out:
1240
1241 PTHREAD_TRACE(psynch_cvar_kwait | DBG_FUNC_END, cv, error, updatebits, 2);
1242
1243 ksyn_wqrelease(ckwq, 1, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_CVAR));
1244 return error;
1245 }
1246
1247
1248 void __dead2
1249 psynch_cvcontinue(void *parameter, wait_result_t result)
1250 {
1251 uthread_t uth = current_uthread();
1252 ksyn_wait_queue_t ckwq = parameter;
1253 ksyn_waitq_element_t kwe = pthread_kern->uthread_get_uukwe(uth);
1254
1255 int error = _wait_result_to_errno(result);
1256 if (error != 0) {
1257 ksyn_wqlock(ckwq);
1258 /* just in case it got woken up as we were granting */
1259 int retval = kwe->kwe_psynchretval;
1260 pthread_kern->uthread_set_returnval(uth, retval);
1261
1262 if (kwe->kwe_kwqqueue) {
1263 ksyn_queue_remove_item(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITE], kwe);
1264 }
1265 if ((kwe->kwe_psynchretval & PTH_RWL_MTX_WAIT) != 0) {
1266 /* the condition var granted.
1267 * reset the error so that the thread returns back.
1268 */
1269 error = 0;
1270 /* no need to set any bits just return as cvsig/broad covers this */
1271 } else {
1272 ckwq->kw_sword += PTHRW_INC;
1273
1274 /* set C and P bits, in the local error */
1275 if ((ckwq->kw_lword & PTHRW_COUNT_MASK) == (ckwq->kw_sword & PTHRW_COUNT_MASK)) {
1276 PTHREAD_TRACE(psynch_cvar_zeroed, ckwq->kw_addr,
1277 ckwq->kw_lword, ckwq->kw_sword, ckwq->kw_inqueue);
1278 error |= ECVCLEARED;
1279 if (ckwq->kw_inqueue != 0) {
1280 ksyn_queue_free_items(ckwq, KSYN_QUEUE_WRITE, ckwq->kw_lword, 1);
1281 }
1282 ckwq->kw_lword = ckwq->kw_uword = ckwq->kw_sword = 0;
1283 ckwq->kw_kflags |= KSYN_KWF_ZEROEDOUT;
1284 } else {
1285 /* everythig in the queue is a fake entry ? */
1286 if (ckwq->kw_inqueue != 0 && ckwq->kw_fakecount == ckwq->kw_inqueue) {
1287 error |= ECVPREPOST;
1288 }
1289 }
1290 }
1291 ksyn_wqunlock(ckwq);
1292
1293 PTHREAD_TRACE(psynch_cvar_kwait | DBG_FUNC_END, ckwq->kw_addr,
1294 error, 0, 3);
1295 } else {
1296 int val = 0;
1297 // PTH_RWL_MTX_WAIT is removed
1298 if ((kwe->kwe_psynchretval & PTH_RWS_CV_MBIT) != 0) {
1299 val = PTHRW_INC | PTH_RWS_CV_CBIT;
1300 }
1301 PTHREAD_TRACE(psynch_cvar_kwait | DBG_FUNC_END, ckwq->kw_addr,
1302 val, 0, 4);
1303 pthread_kern->uthread_set_returnval(uth, val);
1304 }
1305
1306 ksyn_wqrelease(ckwq, 1, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_CVAR));
1307 pthread_kern->unix_syscall_return(error);
1308 __builtin_unreachable();
1309 }
1310
1311 /*
1312 * psynch_cvclrprepost: This system call clears pending prepost if present.
1313 */
1314 int
1315 _psynch_cvclrprepost(__unused proc_t p, user_addr_t cv, uint32_t cvgen,
1316 uint32_t cvugen, uint32_t cvsgen, __unused uint32_t prepocnt,
1317 uint32_t preposeq, uint32_t flags, int *retval)
1318 {
1319 int error = 0;
1320 int mutex = (flags & _PTHREAD_MTX_OPT_MUTEX);
1321 int wqtype = (mutex ? KSYN_WQTYPE_MTX : KSYN_WQTYPE_CVAR) | KSYN_WQTYPE_INDROP;
1322 ksyn_wait_queue_t kwq = NULL;
1323
1324 *retval = 0;
1325
1326 error = ksyn_wqfind(cv, cvgen, cvugen, mutex ? 0 : cvsgen, flags, wqtype,
1327 &kwq);
1328 if (error != 0) {
1329 return error;
1330 }
1331
1332 ksyn_wqlock(kwq);
1333
1334 if (mutex) {
1335 int firstfit = (flags & _PTHREAD_MTX_OPT_POLICY_MASK)
1336 == _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
1337 if (firstfit && kwq->kw_prepost.count) {
1338 if (is_seqlower_eq(kwq->kw_prepost.lseq, cvgen)) {
1339 PTHREAD_TRACE(psynch_mutex_kwqprepost, kwq->kw_addr,
1340 kwq->kw_prepost.lseq, 0, 2);
1341 _kwq_clear_preposted_wakeup(kwq);
1342 }
1343 }
1344 } else {
1345 PTHREAD_TRACE(psynch_cvar_clrprepost, kwq->kw_addr, wqtype,
1346 preposeq, 0);
1347 ksyn_queue_free_items(kwq, KSYN_QUEUE_WRITE, preposeq, 0);
1348 }
1349
1350 ksyn_wqunlock(kwq);
1351 ksyn_wqrelease(kwq, 1, wqtype);
1352 return error;
1353 }
1354
1355 /* ***************** pthread_rwlock ************************ */
1356
1357 static int
1358 __psynch_rw_lock(int type, user_addr_t rwlock, uint32_t lgenval,
1359 uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval)
1360 {
1361 uint32_t lockseq = lgenval & PTHRW_COUNT_MASK;
1362 ksyn_wait_queue_t kwq;
1363 int error, prepost_type, kqi;
1364 thread_continue_t tc;
1365
1366 if (type == PTH_RW_TYPE_READ) {
1367 prepost_type = KW_UNLOCK_PREPOST_READLOCK;
1368 kqi = KSYN_QUEUE_READ;
1369 tc = psynch_rw_rdcontinue;
1370 } else {
1371 prepost_type = KW_UNLOCK_PREPOST_WRLOCK;
1372 kqi = KSYN_QUEUE_WRITE;
1373 tc = psynch_rw_wrcontinue;
1374 }
1375
1376 error = ksyn_wqfind(rwlock, lgenval, ugenval, rw_wc, flags,
1377 (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_RWLOCK), &kwq);
1378 if (error != 0) {
1379 return error;
1380 }
1381
1382 ksyn_wqlock(kwq);
1383 _ksyn_check_init(kwq, lgenval);
1384 if (_kwq_handle_interrupted_wakeup(kwq, type, lockseq, retval) ||
1385 // handle overlap first as they are not counted against pre_rwwc
1386 // handle_overlap uses the flags in lgenval (vs. lockseq)
1387 _kwq_handle_overlap(kwq, type, lgenval, rw_wc, retval) ||
1388 _kwq_handle_preposted_wakeup(kwq, prepost_type, lockseq, retval)) {
1389 ksyn_wqunlock(kwq);
1390 goto out;
1391 }
1392
1393 block_hint_t block_hint = type == PTH_RW_TYPE_READ ?
1394 kThreadWaitPThreadRWLockRead : kThreadWaitPThreadRWLockWrite;
1395 error = ksyn_wait(kwq, kqi, lgenval, SEQFIT, 0, 0, tc, block_hint);
1396 // ksyn_wait drops wait queue lock
1397 out:
1398 ksyn_wqrelease(kwq, 0, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_RWLOCK));
1399 return error;
1400 }
1401
1402 /*
1403 * psynch_rw_rdlock: This system call is used for psync rwlock readers to block.
1404 */
1405 int
1406 _psynch_rw_rdlock(__unused proc_t p, user_addr_t rwlock, uint32_t lgenval,
1407 uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval)
1408 {
1409 return __psynch_rw_lock(PTH_RW_TYPE_READ, rwlock, lgenval, ugenval, rw_wc,
1410 flags, retval);
1411 }
1412
1413 /*
1414 * psynch_rw_longrdlock: This system call is used for psync rwlock long readers to block.
1415 */
1416 int
1417 _psynch_rw_longrdlock(__unused proc_t p, __unused user_addr_t rwlock,
1418 __unused uint32_t lgenval, __unused uint32_t ugenval,
1419 __unused uint32_t rw_wc, __unused int flags, __unused uint32_t *retval)
1420 {
1421 return ESRCH;
1422 }
1423
1424
1425 /*
1426 * psynch_rw_wrlock: This system call is used for psync rwlock writers to block.
1427 */
1428 int
1429 _psynch_rw_wrlock(__unused proc_t p, user_addr_t rwlock, uint32_t lgenval,
1430 uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval)
1431 {
1432 return __psynch_rw_lock(PTH_RW_TYPE_WRITE, rwlock, lgenval, ugenval,
1433 rw_wc, flags, retval);
1434 }
1435
1436 /*
1437 * psynch_rw_yieldwrlock: This system call is used for psync rwlock yielding writers to block.
1438 */
1439 int
1440 _psynch_rw_yieldwrlock(__unused proc_t p, __unused user_addr_t rwlock,
1441 __unused uint32_t lgenval, __unused uint32_t ugenval,
1442 __unused uint32_t rw_wc, __unused int flags, __unused uint32_t *retval)
1443 {
1444 return ESRCH;
1445 }
1446
1447 /*
1448 * psynch_rw_unlock: This system call is used for unlock state postings. This will grant appropriate
1449 * reader/writer variety lock.
1450 */
1451 int
1452 _psynch_rw_unlock(__unused proc_t p, user_addr_t rwlock, uint32_t lgenval,
1453 uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval)
1454 {
1455 int error = 0;
1456 ksyn_wait_queue_t kwq;
1457 uint32_t updatebits = 0;
1458 int diff;
1459 uint32_t count = 0;
1460 uint32_t curgen = lgenval & PTHRW_COUNT_MASK;
1461 int clearedkflags = 0;
1462
1463 error = ksyn_wqfind(rwlock, lgenval, ugenval, rw_wc, flags,
1464 (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK), &kwq);
1465 if (error != 0) {
1466 return(error);
1467 }
1468
1469 ksyn_wqlock(kwq);
1470 int isinit = _ksyn_check_init(kwq, lgenval);
1471
1472 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
1473 if ((kwq->kw_lastunlockseq != PTHRW_RWL_INIT) &&
1474 (is_seqlower(ugenval, kwq->kw_lastunlockseq)!= 0)) {
1475 error = 0;
1476 goto out;
1477 }
1478
1479 /* If L-U != num of waiters, then it needs to be preposted or spr */
1480 diff = find_diff(lgenval, ugenval);
1481
1482 if (find_seq_till(kwq, curgen, diff, &count) == 0) {
1483 if ((count == 0) || (count < (uint32_t)diff))
1484 goto prepost;
1485 }
1486
1487 /* no prepost and all threads are in place, reset the bit */
1488 if ((isinit != 0) && ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0)){
1489 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
1490 clearedkflags = 1;
1491 }
1492
1493 /* can handle unlock now */
1494
1495 _kwq_clear_preposted_wakeup(kwq);
1496
1497 error = kwq_handle_unlock(kwq, lgenval, rw_wc, &updatebits, 0, NULL, 0);
1498 #if __TESTPANICS__
1499 if (error != 0)
1500 panic("psynch_rw_unlock: kwq_handle_unlock failed %d\n",error);
1501 #endif /* __TESTPANICS__ */
1502 out:
1503 if (error == 0) {
1504 /* update bits?? */
1505 *retval = updatebits;
1506 }
1507
1508 // <rdar://problem/22244050> If any of the wakeups failed because they
1509 // already returned to userspace because of a signal then we need to ensure
1510 // that the reset state is not cleared when that thread returns. Otherwise,
1511 // _pthread_rwlock_lock will clear the interrupted state before it is read.
1512 if (clearedkflags != 0 && kwq->kw_intr.count > 0) {
1513 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
1514 }
1515
1516 ksyn_wqunlock(kwq);
1517 pthread_kern->psynch_wait_cleanup();
1518 ksyn_wqrelease(kwq, 0, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK));
1519
1520 return(error);
1521
1522 prepost:
1523 /* update if the new seq is higher than prev prepost, or first set */
1524 if (is_rws_sbit_set(kwq->kw_prepost.sseq) ||
1525 is_seqhigher_eq(rw_wc, kwq->kw_prepost.sseq)) {
1526 _kwq_mark_preposted_wakeup(kwq, diff - count, curgen, rw_wc);
1527 updatebits = lgenval; /* let this not do unlock handling */
1528 }
1529 error = 0;
1530 goto out;
1531 }
1532
1533
1534 /* ************************************************************************** */
1535 void
1536 pth_global_hashinit(void)
1537 {
1538 pth_glob_hashtbl = hashinit(PTH_HASHSIZE * 4, M_PROC, &pthhash);
1539 }
1540
1541 void
1542 _pth_proc_hashinit(proc_t p)
1543 {
1544 void *ptr = hashinit(PTH_HASHSIZE, M_PCB, &pthhash);
1545 if (ptr == NULL) {
1546 panic("pth_proc_hashinit: hash init returned 0\n");
1547 }
1548
1549 pthread_kern->proc_set_pthhash(p, ptr);
1550 }
1551
1552
1553 static int
1554 ksyn_wq_hash_lookup(user_addr_t uaddr, proc_t p, int flags,
1555 ksyn_wait_queue_t *out_kwq, struct pthhashhead **out_hashptr,
1556 uint64_t object, uint64_t offset)
1557 {
1558 int res = 0;
1559 ksyn_wait_queue_t kwq;
1560 struct pthhashhead *hashptr;
1561 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED) {
1562 hashptr = pth_glob_hashtbl;
1563 LIST_FOREACH(kwq, &hashptr[object & pthhash], kw_hash) {
1564 if (kwq->kw_object == object && kwq->kw_offset == offset) {
1565 break;
1566 }
1567 }
1568 } else {
1569 hashptr = pthread_kern->proc_get_pthhash(p);
1570 LIST_FOREACH(kwq, &hashptr[uaddr & pthhash], kw_hash) {
1571 if (kwq->kw_addr == uaddr) {
1572 break;
1573 }
1574 }
1575 }
1576 *out_kwq = kwq;
1577 *out_hashptr = hashptr;
1578 return res;
1579 }
1580
1581 void
1582 _pth_proc_hashdelete(proc_t p)
1583 {
1584 struct pthhashhead * hashptr;
1585 ksyn_wait_queue_t kwq;
1586 unsigned long hashsize = pthhash + 1;
1587 unsigned long i;
1588
1589 hashptr = pthread_kern->proc_get_pthhash(p);
1590 pthread_kern->proc_set_pthhash(p, NULL);
1591 if (hashptr == NULL) {
1592 return;
1593 }
1594
1595 pthread_list_lock();
1596 for(i= 0; i < hashsize; i++) {
1597 while ((kwq = LIST_FIRST(&hashptr[i])) != NULL) {
1598 if ((kwq->kw_pflags & KSYN_WQ_INHASH) != 0) {
1599 kwq->kw_pflags &= ~KSYN_WQ_INHASH;
1600 LIST_REMOVE(kwq, kw_hash);
1601 }
1602 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
1603 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
1604 LIST_REMOVE(kwq, kw_list);
1605 }
1606 pthread_list_unlock();
1607 /* release fake entries if present for cvars */
1608 if (((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) && (kwq->kw_inqueue != 0))
1609 ksyn_freeallkwe(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITE]);
1610 _kwq_destroy(kwq);
1611 pthread_list_lock();
1612 }
1613 }
1614 pthread_list_unlock();
1615 FREE(hashptr, M_PROC);
1616 }
1617
1618 /* no lock held for this as the waitqueue is getting freed */
1619 void
1620 ksyn_freeallkwe(ksyn_queue_t kq)
1621 {
1622 ksyn_waitq_element_t kwe;
1623 while ((kwe = TAILQ_FIRST(&kq->ksynq_kwelist)) != NULL) {
1624 TAILQ_REMOVE(&kq->ksynq_kwelist, kwe, kwe_list);
1625 if (kwe->kwe_state != KWE_THREAD_INWAIT) {
1626 zfree(kwe_zone, kwe);
1627 }
1628 }
1629 }
1630
1631 static inline void
1632 _kwq_report_inuse(ksyn_wait_queue_t kwq)
1633 {
1634 if (kwq->kw_prepost.count != 0) {
1635 __FAILEDUSERTEST2__("uaddr 0x%llx busy for synch type 0x%x [pre %d:0x%x:0x%x]",
1636 (uint64_t)kwq->kw_addr, kwq->kw_type, kwq->kw_prepost.count,
1637 kwq->kw_prepost.lseq, kwq->kw_prepost.sseq);
1638 PTHREAD_TRACE(psynch_mutex_kwqcollision, kwq->kw_addr,
1639 kwq->kw_type, 1, 0);
1640 }
1641 if (kwq->kw_intr.count != 0) {
1642 __FAILEDUSERTEST2__("uaddr 0x%llx busy for synch type 0x%x [intr %d:0x%x:0x%x:0x%x]",
1643 (uint64_t)kwq->kw_addr, kwq->kw_type, kwq->kw_intr.count,
1644 kwq->kw_intr.type, kwq->kw_intr.seq,
1645 kwq->kw_intr.returnbits);
1646 PTHREAD_TRACE(psynch_mutex_kwqcollision, kwq->kw_addr,
1647 kwq->kw_type, 2, 0);
1648 }
1649 if (kwq->kw_iocount) {
1650 __FAILEDUSERTEST2__("uaddr 0x%llx busy for synch type 0x%x [ioc %d:%d]",
1651 (uint64_t)kwq->kw_addr, kwq->kw_type, kwq->kw_iocount,
1652 kwq->kw_dropcount);
1653 PTHREAD_TRACE(psynch_mutex_kwqcollision, kwq->kw_addr,
1654 kwq->kw_type, 3, 0);
1655 }
1656 if (kwq->kw_inqueue) {
1657 __FAILEDUSERTEST2__("uaddr 0x%llx busy for synch type 0x%x [inq %d:%d]",
1658 (uint64_t)kwq->kw_addr, kwq->kw_type, kwq->kw_inqueue,
1659 kwq->kw_fakecount);
1660 PTHREAD_TRACE(psynch_mutex_kwqcollision, kwq->kw_addr, kwq->kw_type,
1661 4, 0);
1662 }
1663 }
1664
1665 /* find kernel waitqueue, if not present create one. Grants a reference */
1666 int
1667 ksyn_wqfind(user_addr_t uaddr, uint32_t mgen, uint32_t ugen, uint32_t sgen,
1668 int flags, int wqtype, ksyn_wait_queue_t *kwqp)
1669 {
1670 int res = 0;
1671 ksyn_wait_queue_t kwq = NULL;
1672 ksyn_wait_queue_t nkwq = NULL;
1673 struct pthhashhead *hashptr;
1674 proc_t p = current_proc();
1675
1676 uint64_t object = 0, offset = 0;
1677 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED) {
1678 res = ksyn_findobj(uaddr, &object, &offset);
1679 hashptr = pth_glob_hashtbl;
1680 } else {
1681 hashptr = pthread_kern->proc_get_pthhash(p);
1682 }
1683
1684 while (res == 0) {
1685 pthread_list_lock();
1686 res = ksyn_wq_hash_lookup(uaddr, current_proc(), flags, &kwq, &hashptr,
1687 object, offset);
1688 if (res != 0) {
1689 pthread_list_unlock();
1690 break;
1691 }
1692 if (kwq == NULL && nkwq == NULL) {
1693 // Drop the lock to allocate a new kwq and retry.
1694 pthread_list_unlock();
1695
1696 nkwq = (ksyn_wait_queue_t)zalloc(kwq_zone);
1697 bzero(nkwq, sizeof(struct ksyn_wait_queue));
1698 int i;
1699 for (i = 0; i < KSYN_QUEUE_MAX; i++) {
1700 ksyn_queue_init(&nkwq->kw_ksynqueues[i]);
1701 }
1702 lck_spin_init(&nkwq->kw_lock, pthread_lck_grp, pthread_lck_attr);
1703 continue;
1704 } else if (kwq == NULL && nkwq != NULL) {
1705 // Still not found, add the new kwq to the hash.
1706 kwq = nkwq;
1707 nkwq = NULL; // Don't free.
1708 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED) {
1709 kwq->kw_pflags |= KSYN_WQ_SHARED;
1710 LIST_INSERT_HEAD(&hashptr[object & pthhash], kwq, kw_hash);
1711 } else {
1712 LIST_INSERT_HEAD(&hashptr[uaddr & pthhash], kwq, kw_hash);
1713 }
1714 kwq->kw_pflags |= KSYN_WQ_INHASH;
1715 } else if (kwq != NULL) {
1716 // Found an existing kwq, use it.
1717 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
1718 LIST_REMOVE(kwq, kw_list);
1719 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
1720 }
1721 if ((kwq->kw_type & KSYN_WQTYPE_MASK) != (wqtype & KSYN_WQTYPE_MASK)) {
1722 if (!_kwq_is_used(kwq)) {
1723 if (kwq->kw_iocount == 0) {
1724 kwq->kw_type = 0; // mark for reinitialization
1725 } else if (kwq->kw_iocount == 1 &&
1726 kwq->kw_dropcount == kwq->kw_iocount) {
1727 /* if all users are unlockers then wait for it to finish */
1728 kwq->kw_pflags |= KSYN_WQ_WAITING;
1729 // Drop the lock and wait for the kwq to be free.
1730 (void)msleep(&kwq->kw_pflags, pthread_list_mlock,
1731 PDROP, "ksyn_wqfind", 0);
1732 continue;
1733 } else {
1734 _kwq_report_inuse(kwq);
1735 res = EINVAL;
1736 }
1737 } else {
1738 _kwq_report_inuse(kwq);
1739 res = EINVAL;
1740 }
1741 }
1742 }
1743 if (res == 0) {
1744 if (kwq->kw_type == 0) {
1745 kwq->kw_addr = uaddr;
1746 kwq->kw_object = object;
1747 kwq->kw_offset = offset;
1748 kwq->kw_type = (wqtype & KSYN_WQTYPE_MASK);
1749 CLEAR_REINIT_BITS(kwq);
1750 kwq->kw_lword = mgen;
1751 kwq->kw_uword = ugen;
1752 kwq->kw_sword = sgen;
1753 kwq->kw_owner = THREAD_NULL;
1754 kwq->kw_kflags = 0;
1755 kwq->kw_qos_override = THREAD_QOS_UNSPECIFIED;
1756 PTHREAD_TRACE(psynch_mutex_kwqallocate | DBG_FUNC_START, uaddr,
1757 kwq->kw_type, kwq, 0);
1758 PTHREAD_TRACE(psynch_mutex_kwqallocate | DBG_FUNC_END, uaddr,
1759 mgen, ugen, sgen);
1760 }
1761 kwq->kw_iocount++;
1762 if (wqtype == KSYN_WQTYPE_MUTEXDROP) {
1763 kwq->kw_dropcount++;
1764 }
1765 }
1766 pthread_list_unlock();
1767 break;
1768 }
1769 if (kwqp != NULL) {
1770 *kwqp = kwq;
1771 }
1772 if (nkwq) {
1773 _kwq_destroy(nkwq);
1774 }
1775 return res;
1776 }
1777
1778 /* Reference from find is dropped here. Starts the free process if needed */
1779 void
1780 ksyn_wqrelease(ksyn_wait_queue_t kwq, int qfreenow, int wqtype)
1781 {
1782 uint64_t deadline;
1783 ksyn_wait_queue_t free_elem = NULL;
1784
1785 pthread_list_lock();
1786 if (wqtype == KSYN_WQTYPE_MUTEXDROP) {
1787 kwq->kw_dropcount--;
1788 }
1789 if (--kwq->kw_iocount == 0) {
1790 if ((kwq->kw_pflags & KSYN_WQ_WAITING) != 0) {
1791 /* some one is waiting for the waitqueue, wake them up */
1792 kwq->kw_pflags &= ~KSYN_WQ_WAITING;
1793 wakeup(&kwq->kw_pflags);
1794 }
1795
1796 if (!_kwq_is_used(kwq)) {
1797 if (kwq->kw_turnstile) {
1798 panic("kw_turnstile still non-null upon release");
1799 }
1800
1801 PTHREAD_TRACE(psynch_mutex_kwqdeallocate | DBG_FUNC_START,
1802 kwq->kw_addr, kwq->kw_type, qfreenow, 0);
1803 PTHREAD_TRACE(psynch_mutex_kwqdeallocate | DBG_FUNC_END,
1804 kwq->kw_addr, kwq->kw_lword, kwq->kw_uword, kwq->kw_sword);
1805
1806 if (qfreenow == 0) {
1807 microuptime(&kwq->kw_ts);
1808 LIST_INSERT_HEAD(&pth_free_list, kwq, kw_list);
1809 kwq->kw_pflags |= KSYN_WQ_FLIST;
1810 if (psynch_cleanupset == 0) {
1811 struct timeval t;
1812 microuptime(&t);
1813 t.tv_sec += KSYN_CLEANUP_DEADLINE;
1814 deadline = tvtoabstime(&t);
1815 thread_call_enter_delayed(psynch_thcall, deadline);
1816 psynch_cleanupset = 1;
1817 }
1818 } else {
1819 kwq->kw_pflags &= ~KSYN_WQ_INHASH;
1820 LIST_REMOVE(kwq, kw_hash);
1821 free_elem = kwq;
1822 }
1823 }
1824 }
1825 pthread_list_unlock();
1826 if (free_elem != NULL) {
1827 _kwq_destroy(free_elem);
1828 }
1829 }
1830
1831 /* responsible to free the waitqueues */
1832 void
1833 psynch_wq_cleanup(__unused void *param, __unused void * param1)
1834 {
1835 ksyn_wait_queue_t kwq, tmp;
1836 struct timeval t;
1837 int reschedule = 0;
1838 uint64_t deadline = 0;
1839 LIST_HEAD(, ksyn_wait_queue) freelist;
1840 LIST_INIT(&freelist);
1841
1842 pthread_list_lock();
1843
1844 microuptime(&t);
1845
1846 LIST_FOREACH(kwq, &pth_free_list, kw_list) {
1847 if (_kwq_is_used(kwq) || kwq->kw_iocount != 0) {
1848 // still in use
1849 continue;
1850 }
1851 __darwin_time_t diff = t.tv_sec - kwq->kw_ts.tv_sec;
1852 if (diff < 0)
1853 diff *= -1;
1854 if (diff >= KSYN_CLEANUP_DEADLINE) {
1855 kwq->kw_pflags &= ~(KSYN_WQ_FLIST | KSYN_WQ_INHASH);
1856 LIST_REMOVE(kwq, kw_hash);
1857 LIST_REMOVE(kwq, kw_list);
1858 LIST_INSERT_HEAD(&freelist, kwq, kw_list);
1859 } else {
1860 reschedule = 1;
1861 }
1862
1863 }
1864 if (reschedule != 0) {
1865 t.tv_sec += KSYN_CLEANUP_DEADLINE;
1866 deadline = tvtoabstime(&t);
1867 thread_call_enter_delayed(psynch_thcall, deadline);
1868 psynch_cleanupset = 1;
1869 } else {
1870 psynch_cleanupset = 0;
1871 }
1872 pthread_list_unlock();
1873
1874 LIST_FOREACH_SAFE(kwq, &freelist, kw_list, tmp) {
1875 _kwq_destroy(kwq);
1876 }
1877 }
1878
1879 static int
1880 _wait_result_to_errno(wait_result_t result)
1881 {
1882 int res = 0;
1883 switch (result) {
1884 case THREAD_TIMED_OUT:
1885 res = ETIMEDOUT;
1886 break;
1887 case THREAD_INTERRUPTED:
1888 res = EINTR;
1889 break;
1890 }
1891 return res;
1892 }
1893
1894 int
1895 ksyn_wait(ksyn_wait_queue_t kwq, kwq_queue_type_t kqi, uint32_t lockseq,
1896 int fit, uint64_t abstime, uint16_t kwe_flags,
1897 thread_continue_t continuation, block_hint_t block_hint)
1898 {
1899 thread_t th = current_thread();
1900 uthread_t uth = pthread_kern->get_bsdthread_info(th);
1901 struct turnstile **tstore = NULL;
1902 int res;
1903
1904 assert(continuation != THREAD_CONTINUE_NULL);
1905
1906 ksyn_waitq_element_t kwe = pthread_kern->uthread_get_uukwe(uth);
1907 bzero(kwe, sizeof(*kwe));
1908 kwe->kwe_count = 1;
1909 kwe->kwe_lockseq = lockseq & PTHRW_COUNT_MASK;
1910 kwe->kwe_state = KWE_THREAD_INWAIT;
1911 kwe->kwe_uth = uth;
1912 kwe->kwe_thread = th;
1913 kwe->kwe_flags = kwe_flags;
1914
1915 res = ksyn_queue_insert(kwq, kqi, kwe, lockseq, fit);
1916 if (res != 0) {
1917 //panic("psynch_rw_wrlock: failed to enqueue\n"); // XXX
1918 ksyn_wqunlock(kwq);
1919 return res;
1920 }
1921
1922 PTHREAD_TRACE(psynch_mutex_kwqwait, kwq->kw_addr, kwq->kw_inqueue,
1923 kwq->kw_prepost.count, kwq->kw_intr.count);
1924
1925 if (_kwq_use_turnstile(kwq)) {
1926 // pthread mutexes and rwlocks both (at least sometimes) know their
1927 // owner and can use turnstiles. Otherwise, we pass NULL as the
1928 // tstore to the shims so they wait on the global waitq.
1929 tstore = &kwq->kw_turnstile;
1930 }
1931
1932 pthread_kern->psynch_wait_prepare((uintptr_t)kwq, tstore, kwq->kw_owner,
1933 block_hint, abstime);
1934
1935 ksyn_wqunlock(kwq);
1936
1937 if (tstore) {
1938 pthread_kern->psynch_wait_update_complete(kwq->kw_turnstile);
1939 }
1940
1941 thread_block_parameter(continuation, kwq);
1942
1943 // NOT REACHED
1944 panic("ksyn_wait continuation returned");
1945 __builtin_unreachable();
1946 }
1947
1948 kern_return_t
1949 ksyn_signal(ksyn_wait_queue_t kwq, kwq_queue_type_t kqi,
1950 ksyn_waitq_element_t kwe, uint32_t updateval)
1951 {
1952 kern_return_t ret;
1953 struct turnstile **tstore = NULL;
1954
1955 // If no wait element was specified, wake the first.
1956 if (!kwe) {
1957 kwe = TAILQ_FIRST(&kwq->kw_ksynqueues[kqi].ksynq_kwelist);
1958 if (!kwe) {
1959 panic("ksyn_signal: panic signaling empty queue");
1960 }
1961 }
1962
1963 if (kwe->kwe_state != KWE_THREAD_INWAIT) {
1964 panic("ksyn_signal: panic signaling non-waiting element");
1965 }
1966
1967 ksyn_queue_remove_item(kwq, &kwq->kw_ksynqueues[kqi], kwe);
1968 kwe->kwe_psynchretval = updateval;
1969
1970 if (_kwq_use_turnstile(kwq)) {
1971 tstore = &kwq->kw_turnstile;
1972 }
1973
1974 ret = pthread_kern->psynch_wait_wakeup(kwq, kwe, tstore);
1975
1976 if (ret != KERN_SUCCESS && ret != KERN_NOT_WAITING) {
1977 panic("ksyn_signal: panic waking up thread %x\n", ret);
1978 }
1979 return ret;
1980 }
1981
1982 int
1983 ksyn_findobj(user_addr_t uaddr, uint64_t *objectp, uint64_t *offsetp)
1984 {
1985 kern_return_t ret;
1986 vm_page_info_basic_data_t info;
1987 mach_msg_type_number_t count = VM_PAGE_INFO_BASIC_COUNT;
1988 ret = pthread_kern->vm_map_page_info(pthread_kern->current_map(), uaddr,
1989 VM_PAGE_INFO_BASIC, (vm_page_info_t)&info, &count);
1990 if (ret != KERN_SUCCESS) {
1991 return EINVAL;
1992 }
1993
1994 if (objectp != NULL) {
1995 *objectp = (uint64_t)info.object_id;
1996 }
1997 if (offsetp != NULL) {
1998 *offsetp = (uint64_t)info.offset;
1999 }
2000
2001 return(0);
2002 }
2003
2004
2005 /* lowest of kw_fr, kw_flr, kw_fwr, kw_fywr */
2006 int
2007 kwq_find_rw_lowest(ksyn_wait_queue_t kwq, int flags, uint32_t premgen,
2008 int *typep, uint32_t lowest[])
2009 {
2010 uint32_t kw_fr, kw_fwr, low;
2011 int type = 0, lowtype, typenum[2] = { 0 };
2012 uint32_t numbers[2] = { 0 };
2013 int count = 0, i;
2014
2015 if ((kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count != 0) ||
2016 ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0)) {
2017 type |= PTH_RWSHFT_TYPE_READ;
2018 /* read entries are present */
2019 if (kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count != 0) {
2020 kw_fr = kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_firstnum;
2021 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) &&
2022 (is_seqlower(premgen, kw_fr) != 0))
2023 kw_fr = premgen;
2024 } else
2025 kw_fr = premgen;
2026
2027 lowest[KSYN_QUEUE_READ] = kw_fr;
2028 numbers[count]= kw_fr;
2029 typenum[count] = PTH_RW_TYPE_READ;
2030 count++;
2031 } else
2032 lowest[KSYN_QUEUE_READ] = 0;
2033
2034 if ((kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_count != 0) ||
2035 ((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0)) {
2036 type |= PTH_RWSHFT_TYPE_WRITE;
2037 /* read entries are present */
2038 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_count != 0) {
2039 kw_fwr = kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_firstnum;
2040 if (((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) &&
2041 (is_seqlower(premgen, kw_fwr) != 0))
2042 kw_fwr = premgen;
2043 } else
2044 kw_fwr = premgen;
2045
2046 lowest[KSYN_QUEUE_WRITE] = kw_fwr;
2047 numbers[count]= kw_fwr;
2048 typenum[count] = PTH_RW_TYPE_WRITE;
2049 count++;
2050 } else
2051 lowest[KSYN_QUEUE_WRITE] = 0;
2052
2053 #if __TESTPANICS__
2054 if (count == 0)
2055 panic("nothing in the queue???\n");
2056 #endif /* __TESTPANICS__ */
2057
2058 low = numbers[0];
2059 lowtype = typenum[0];
2060 if (count > 1) {
2061 for (i = 1; i< count; i++) {
2062 if (is_seqlower(numbers[i] , low) != 0) {
2063 low = numbers[i];
2064 lowtype = typenum[i];
2065 }
2066 }
2067 }
2068 type |= lowtype;
2069
2070 if (typep != 0)
2071 *typep = type;
2072 return(0);
2073 }
2074
2075 /* wakeup readers to upto the writer limits */
2076 int
2077 ksyn_wakeupreaders(ksyn_wait_queue_t kwq, uint32_t limitread, int allreaders,
2078 uint32_t updatebits, int *wokenp)
2079 {
2080 ksyn_queue_t kq;
2081 int failedwakeup = 0;
2082 int numwoken = 0;
2083 kern_return_t kret = KERN_SUCCESS;
2084 uint32_t lbits = 0;
2085
2086 lbits = updatebits;
2087
2088 kq = &kwq->kw_ksynqueues[KSYN_QUEUE_READ];
2089 while ((kq->ksynq_count != 0) &&
2090 (allreaders || (is_seqlower(kq->ksynq_firstnum, limitread) != 0))) {
2091 kret = ksyn_signal(kwq, KSYN_QUEUE_READ, NULL, lbits);
2092 if (kret == KERN_NOT_WAITING) {
2093 failedwakeup++;
2094 }
2095 numwoken++;
2096 }
2097
2098 if (wokenp != NULL)
2099 *wokenp = numwoken;
2100 return(failedwakeup);
2101 }
2102
2103
2104 /*
2105 * This handles the unlock grants for next set on rw_unlock() or on arrival
2106 * of all preposted waiters.
2107 */
2108 int
2109 kwq_handle_unlock(ksyn_wait_queue_t kwq, __unused uint32_t mgen, uint32_t rw_wc,
2110 uint32_t *updatep, int flags, int *blockp, uint32_t premgen)
2111 {
2112 uint32_t low_writer, limitrdnum;
2113 int rwtype, error=0;
2114 int allreaders, nfailed;
2115 uint32_t updatebits=0, numneeded = 0;;
2116 int prepost = flags & KW_UNLOCK_PREPOST;
2117 thread_t preth = THREAD_NULL;
2118 ksyn_waitq_element_t kwe;
2119 uthread_t uth;
2120 thread_t th;
2121 int woken = 0;
2122 int block = 1;
2123 uint32_t lowest[KSYN_QUEUE_MAX]; /* np need for upgrade as it is handled separately */
2124 kern_return_t kret = KERN_SUCCESS;
2125 ksyn_queue_t kq;
2126 int curthreturns = 0;
2127
2128 if (prepost != 0) {
2129 preth = current_thread();
2130 }
2131
2132 kq = &kwq->kw_ksynqueues[KSYN_QUEUE_READ];
2133 kwq->kw_lastseqword = rw_wc;
2134 kwq->kw_lastunlockseq = (rw_wc & PTHRW_COUNT_MASK);
2135 kwq->kw_kflags &= ~KSYN_KWF_OVERLAP_GUARD;
2136
2137 error = kwq_find_rw_lowest(kwq, flags, premgen, &rwtype, lowest);
2138 #if __TESTPANICS__
2139 if (error != 0)
2140 panic("rwunlock: cannot fails to slot next round of threads");
2141 #endif /* __TESTPANICS__ */
2142
2143 low_writer = lowest[KSYN_QUEUE_WRITE];
2144
2145 allreaders = 0;
2146 updatebits = 0;
2147
2148 switch (rwtype & PTH_RW_TYPE_MASK) {
2149 case PTH_RW_TYPE_READ: {
2150 // XXX
2151 /* what about the preflight which is LREAD or READ ?? */
2152 if ((rwtype & PTH_RWSHFT_TYPE_MASK) != 0) {
2153 if (rwtype & PTH_RWSHFT_TYPE_WRITE) {
2154 updatebits |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
2155 }
2156 }
2157 limitrdnum = 0;
2158 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0) {
2159 limitrdnum = low_writer;
2160 } else {
2161 allreaders = 1;
2162 }
2163
2164 numneeded = 0;
2165
2166 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0) {
2167 limitrdnum = low_writer;
2168 numneeded = ksyn_queue_count_tolowest(kq, limitrdnum);
2169 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) && (is_seqlower(premgen, limitrdnum) != 0)) {
2170 curthreturns = 1;
2171 numneeded += 1;
2172 }
2173 } else {
2174 // no writers at all
2175 // no other waiters only readers
2176 kwq->kw_kflags |= KSYN_KWF_OVERLAP_GUARD;
2177 numneeded += kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count;
2178 if ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) {
2179 curthreturns = 1;
2180 numneeded += 1;
2181 }
2182 }
2183
2184 updatebits += (numneeded << PTHRW_COUNT_SHIFT);
2185
2186 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
2187
2188 if (curthreturns != 0) {
2189 block = 0;
2190 uth = current_uthread();
2191 kwe = pthread_kern->uthread_get_uukwe(uth);
2192 kwe->kwe_psynchretval = updatebits;
2193 }
2194
2195
2196 nfailed = ksyn_wakeupreaders(kwq, limitrdnum, allreaders,
2197 updatebits, &woken);
2198 if (nfailed != 0) {
2199 _kwq_mark_interruped_wakeup(kwq, KWQ_INTR_READ, nfailed,
2200 limitrdnum, updatebits);
2201 }
2202
2203 error = 0;
2204
2205 if ((kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_count != 0) &&
2206 ((updatebits & PTH_RWL_WBIT) == 0)) {
2207 panic("kwq_handle_unlock: writer pending but no writebit set %x\n", updatebits);
2208 }
2209 }
2210 break;
2211
2212 case PTH_RW_TYPE_WRITE: {
2213
2214 /* only one thread is goin to be granted */
2215 updatebits |= (PTHRW_INC);
2216 updatebits |= PTH_RWL_KBIT| PTH_RWL_EBIT;
2217
2218 if (((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) && (low_writer == premgen)) {
2219 block = 0;
2220 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_count != 0) {
2221 updatebits |= PTH_RWL_WBIT;
2222 }
2223 th = preth;
2224 uth = pthread_kern->get_bsdthread_info(th);
2225 kwe = pthread_kern->uthread_get_uukwe(uth);
2226 kwe->kwe_psynchretval = updatebits;
2227 } else {
2228 /* we are not granting writelock to the preposting thread */
2229 /* if there are writers present or the preposting write thread then W bit is to be set */
2230 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_count > 1 ||
2231 (flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) {
2232 updatebits |= PTH_RWL_WBIT;
2233 }
2234 /* setup next in the queue */
2235 kret = ksyn_signal(kwq, KSYN_QUEUE_WRITE, NULL, updatebits);
2236 if (kret == KERN_NOT_WAITING) {
2237 _kwq_mark_interruped_wakeup(kwq, KWQ_INTR_WRITE, 1,
2238 low_writer, updatebits);
2239 }
2240 error = 0;
2241 }
2242 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
2243 if ((updatebits & (PTH_RWL_KBIT | PTH_RWL_EBIT)) !=
2244 (PTH_RWL_KBIT | PTH_RWL_EBIT)) {
2245 panic("kwq_handle_unlock: writer lock granted but no ke set %x\n", updatebits);
2246 }
2247 }
2248 break;
2249
2250 default:
2251 panic("rwunlock: invalid type for lock grants");
2252
2253 };
2254
2255 if (updatep != NULL)
2256 *updatep = updatebits;
2257 if (blockp != NULL)
2258 *blockp = block;
2259 return(error);
2260 }
2261
2262 /************* Indiv queue support routines ************************/
2263 void
2264 ksyn_queue_init(ksyn_queue_t kq)
2265 {
2266 TAILQ_INIT(&kq->ksynq_kwelist);
2267 kq->ksynq_count = 0;
2268 kq->ksynq_firstnum = 0;
2269 kq->ksynq_lastnum = 0;
2270 }
2271
2272 int
2273 ksyn_queue_insert(ksyn_wait_queue_t kwq, int kqi, ksyn_waitq_element_t kwe,
2274 uint32_t mgen, int fit)
2275 {
2276 ksyn_queue_t kq = &kwq->kw_ksynqueues[kqi];
2277 uint32_t lockseq = mgen & PTHRW_COUNT_MASK;
2278 int res = 0;
2279
2280 if (kwe->kwe_kwqqueue != NULL) {
2281 panic("adding enqueued item to another queue");
2282 }
2283
2284 if (kq->ksynq_count == 0) {
2285 TAILQ_INSERT_HEAD(&kq->ksynq_kwelist, kwe, kwe_list);
2286 kq->ksynq_firstnum = lockseq;
2287 kq->ksynq_lastnum = lockseq;
2288 } else if (fit == FIRSTFIT) {
2289 /* TBD: if retry bit is set for mutex, add it to the head */
2290 /* firstfit, arriving order */
2291 TAILQ_INSERT_TAIL(&kq->ksynq_kwelist, kwe, kwe_list);
2292 if (is_seqlower(lockseq, kq->ksynq_firstnum)) {
2293 kq->ksynq_firstnum = lockseq;
2294 }
2295 if (is_seqhigher(lockseq, kq->ksynq_lastnum)) {
2296 kq->ksynq_lastnum = lockseq;
2297 }
2298 } else if (lockseq == kq->ksynq_firstnum || lockseq == kq->ksynq_lastnum) {
2299 /* During prepost when a thread is getting cancelled, we could have
2300 * two with same seq */
2301 res = EBUSY;
2302 if (kwe->kwe_state == KWE_THREAD_PREPOST) {
2303 ksyn_waitq_element_t tmp = ksyn_queue_find_seq(kwq, kq, lockseq);
2304 if (tmp != NULL && tmp->kwe_uth != NULL &&
2305 pthread_kern->uthread_is_cancelled(tmp->kwe_uth)) {
2306 TAILQ_INSERT_TAIL(&kq->ksynq_kwelist, kwe, kwe_list);
2307 res = 0;
2308 }
2309 }
2310 } else if (is_seqlower(kq->ksynq_lastnum, lockseq)) { // XXX is_seqhigher
2311 TAILQ_INSERT_TAIL(&kq->ksynq_kwelist, kwe, kwe_list);
2312 kq->ksynq_lastnum = lockseq;
2313 } else if (is_seqlower(lockseq, kq->ksynq_firstnum)) {
2314 TAILQ_INSERT_HEAD(&kq->ksynq_kwelist, kwe, kwe_list);
2315 kq->ksynq_firstnum = lockseq;
2316 } else {
2317 ksyn_waitq_element_t q_kwe, r_kwe;
2318
2319 res = ESRCH;
2320 TAILQ_FOREACH_SAFE(q_kwe, &kq->ksynq_kwelist, kwe_list, r_kwe) {
2321 if (is_seqhigher(q_kwe->kwe_lockseq, lockseq)) {
2322 TAILQ_INSERT_BEFORE(q_kwe, kwe, kwe_list);
2323 res = 0;
2324 break;
2325 }
2326 }
2327 }
2328
2329 if (res == 0) {
2330 kwe->kwe_kwqqueue = kwq;
2331 kq->ksynq_count++;
2332 kwq->kw_inqueue++;
2333 update_low_high(kwq, lockseq);
2334 }
2335 return res;
2336 }
2337
2338 void
2339 ksyn_queue_remove_item(ksyn_wait_queue_t kwq, ksyn_queue_t kq,
2340 ksyn_waitq_element_t kwe)
2341 {
2342 if (kq->ksynq_count == 0) {
2343 panic("removing item from empty queue");
2344 }
2345
2346 if (kwe->kwe_kwqqueue != kwq) {
2347 panic("removing item from wrong queue");
2348 }
2349
2350 TAILQ_REMOVE(&kq->ksynq_kwelist, kwe, kwe_list);
2351 kwe->kwe_list.tqe_next = NULL;
2352 kwe->kwe_list.tqe_prev = NULL;
2353 kwe->kwe_kwqqueue = NULL;
2354
2355 if (--kq->ksynq_count > 0) {
2356 ksyn_waitq_element_t tmp;
2357 tmp = TAILQ_FIRST(&kq->ksynq_kwelist);
2358 kq->ksynq_firstnum = tmp->kwe_lockseq & PTHRW_COUNT_MASK;
2359 tmp = TAILQ_LAST(&kq->ksynq_kwelist, ksynq_kwelist_head);
2360 kq->ksynq_lastnum = tmp->kwe_lockseq & PTHRW_COUNT_MASK;
2361 } else {
2362 kq->ksynq_firstnum = 0;
2363 kq->ksynq_lastnum = 0;
2364 }
2365
2366 if (--kwq->kw_inqueue > 0) {
2367 uint32_t curseq = kwe->kwe_lockseq & PTHRW_COUNT_MASK;
2368 if (kwq->kw_lowseq == curseq) {
2369 kwq->kw_lowseq = find_nextlowseq(kwq);
2370 }
2371 if (kwq->kw_highseq == curseq) {
2372 kwq->kw_highseq = find_nexthighseq(kwq);
2373 }
2374 } else {
2375 kwq->kw_lowseq = 0;
2376 kwq->kw_highseq = 0;
2377 }
2378 }
2379
2380 ksyn_waitq_element_t
2381 ksyn_queue_find_seq(__unused ksyn_wait_queue_t kwq, ksyn_queue_t kq,
2382 uint32_t seq)
2383 {
2384 ksyn_waitq_element_t kwe;
2385
2386 // XXX: should stop searching when higher sequence number is seen
2387 TAILQ_FOREACH(kwe, &kq->ksynq_kwelist, kwe_list) {
2388 if ((kwe->kwe_lockseq & PTHRW_COUNT_MASK) == seq) {
2389 return kwe;
2390 }
2391 }
2392 return NULL;
2393 }
2394
2395 /* find the thread at the target sequence (or a broadcast/prepost at or above) */
2396 ksyn_waitq_element_t
2397 ksyn_queue_find_cvpreposeq(ksyn_queue_t kq, uint32_t cgen)
2398 {
2399 ksyn_waitq_element_t result = NULL;
2400 ksyn_waitq_element_t kwe;
2401 uint32_t lgen = (cgen & PTHRW_COUNT_MASK);
2402
2403 TAILQ_FOREACH(kwe, &kq->ksynq_kwelist, kwe_list) {
2404 if (is_seqhigher_eq(kwe->kwe_lockseq, cgen)) {
2405 result = kwe;
2406
2407 // KWE_THREAD_INWAIT must be strictly equal
2408 if (kwe->kwe_state == KWE_THREAD_INWAIT &&
2409 (kwe->kwe_lockseq & PTHRW_COUNT_MASK) != lgen) {
2410 result = NULL;
2411 }
2412 break;
2413 }
2414 }
2415 return result;
2416 }
2417
2418 /* look for a thread at lockseq, a */
2419 ksyn_waitq_element_t
2420 ksyn_queue_find_signalseq(__unused ksyn_wait_queue_t kwq, ksyn_queue_t kq,
2421 uint32_t uptoseq, uint32_t signalseq)
2422 {
2423 ksyn_waitq_element_t result = NULL;
2424 ksyn_waitq_element_t q_kwe, r_kwe;
2425
2426 // XXX
2427 /* case where wrap in the tail of the queue exists */
2428 TAILQ_FOREACH_SAFE(q_kwe, &kq->ksynq_kwelist, kwe_list, r_kwe) {
2429 if (q_kwe->kwe_state == KWE_THREAD_PREPOST) {
2430 if (is_seqhigher(q_kwe->kwe_lockseq, uptoseq)) {
2431 return result;
2432 }
2433 }
2434 if (q_kwe->kwe_state == KWE_THREAD_PREPOST |
2435 q_kwe->kwe_state == KWE_THREAD_BROADCAST) {
2436 /* match any prepost at our same uptoseq or any broadcast above */
2437 if (is_seqlower(q_kwe->kwe_lockseq, uptoseq)) {
2438 continue;
2439 }
2440 return q_kwe;
2441 } else if (q_kwe->kwe_state == KWE_THREAD_INWAIT) {
2442 /*
2443 * Match any (non-cancelled) thread at or below our upto sequence -
2444 * but prefer an exact match to our signal sequence (if present) to
2445 * keep exact matches happening.
2446 */
2447 if (is_seqhigher(q_kwe->kwe_lockseq, uptoseq)) {
2448 return result;
2449 }
2450 if (q_kwe->kwe_kwqqueue == kwq) {
2451 if (!pthread_kern->uthread_is_cancelled(q_kwe->kwe_uth)) {
2452 /* if equal or higher than our signal sequence, return this one */
2453 if (is_seqhigher_eq(q_kwe->kwe_lockseq, signalseq)) {
2454 return q_kwe;
2455 }
2456
2457 /* otherwise, just remember this eligible thread and move on */
2458 if (result == NULL) {
2459 result = q_kwe;
2460 }
2461 }
2462 }
2463 } else {
2464 panic("ksyn_queue_find_signalseq(): unknown wait queue element type (%d)\n", q_kwe->kwe_state);
2465 }
2466 }
2467 return result;
2468 }
2469
2470 void
2471 ksyn_queue_free_items(ksyn_wait_queue_t kwq, int kqi, uint32_t upto, int all)
2472 {
2473 ksyn_waitq_element_t kwe;
2474 uint32_t tseq = upto & PTHRW_COUNT_MASK;
2475 ksyn_queue_t kq = &kwq->kw_ksynqueues[kqi];
2476 uint32_t freed = 0, signaled = 0;
2477
2478 PTHREAD_TRACE(psynch_cvar_freeitems | DBG_FUNC_START, kwq->kw_addr,
2479 kqi, upto, all);
2480
2481 while ((kwe = TAILQ_FIRST(&kq->ksynq_kwelist)) != NULL) {
2482 if (all == 0 && is_seqhigher(kwe->kwe_lockseq, tseq)) {
2483 break;
2484 }
2485 if (kwe->kwe_state == KWE_THREAD_INWAIT) {
2486 /*
2487 * This scenario is typically noticed when the cvar is
2488 * reinited and the new waiters are waiting. We can
2489 * return them as spurious wait so the cvar state gets
2490 * reset correctly.
2491 */
2492
2493 PTHREAD_TRACE(psynch_cvar_freeitems, kwq->kw_addr, kwe,
2494 kwq->kw_inqueue, 1);
2495
2496 /* skip canceled ones */
2497 /* wake the rest */
2498 /* set M bit to indicate to waking CV to retun Inc val */
2499 (void)ksyn_signal(kwq, kqi, kwe,
2500 PTHRW_INC | PTH_RWS_CV_MBIT | PTH_RWL_MTX_WAIT);
2501 signaled++;
2502 } else {
2503 PTHREAD_TRACE(psynch_cvar_freeitems, kwq->kw_addr, kwe,
2504 kwq->kw_inqueue, 2);
2505 ksyn_queue_remove_item(kwq, kq, kwe);
2506 zfree(kwe_zone, kwe);
2507 kwq->kw_fakecount--;
2508 freed++;
2509 }
2510 }
2511
2512 PTHREAD_TRACE(psynch_cvar_freeitems | DBG_FUNC_END, kwq->kw_addr, freed,
2513 signaled, kwq->kw_inqueue);
2514 }
2515
2516 /*************************************************************************/
2517
2518 void
2519 update_low_high(ksyn_wait_queue_t kwq, uint32_t lockseq)
2520 {
2521 if (kwq->kw_inqueue == 1) {
2522 kwq->kw_lowseq = lockseq;
2523 kwq->kw_highseq = lockseq;
2524 } else {
2525 if (is_seqlower(lockseq, kwq->kw_lowseq)) {
2526 kwq->kw_lowseq = lockseq;
2527 }
2528 if (is_seqhigher(lockseq, kwq->kw_highseq)) {
2529 kwq->kw_highseq = lockseq;
2530 }
2531 }
2532 }
2533
2534 uint32_t
2535 find_nextlowseq(ksyn_wait_queue_t kwq)
2536 {
2537 uint32_t lowest = 0;
2538 int first = 1;
2539 int i;
2540
2541 for (i = 0; i < KSYN_QUEUE_MAX; i++) {
2542 if (kwq->kw_ksynqueues[i].ksynq_count > 0) {
2543 uint32_t current = kwq->kw_ksynqueues[i].ksynq_firstnum;
2544 if (first || is_seqlower(current, lowest)) {
2545 lowest = current;
2546 first = 0;
2547 }
2548 }
2549 }
2550
2551 return lowest;
2552 }
2553
2554 uint32_t
2555 find_nexthighseq(ksyn_wait_queue_t kwq)
2556 {
2557 uint32_t highest = 0;
2558 int first = 1;
2559 int i;
2560
2561 for (i = 0; i < KSYN_QUEUE_MAX; i++) {
2562 if (kwq->kw_ksynqueues[i].ksynq_count > 0) {
2563 uint32_t current = kwq->kw_ksynqueues[i].ksynq_lastnum;
2564 if (first || is_seqhigher(current, highest)) {
2565 highest = current;
2566 first = 0;
2567 }
2568 }
2569 }
2570
2571 return highest;
2572 }
2573
2574 int
2575 find_seq_till(ksyn_wait_queue_t kwq, uint32_t upto, uint32_t nwaiters,
2576 uint32_t *countp)
2577 {
2578 int i;
2579 uint32_t count = 0;
2580
2581 for (i = 0; i< KSYN_QUEUE_MAX; i++) {
2582 count += ksyn_queue_count_tolowest(&kwq->kw_ksynqueues[i], upto);
2583 if (count >= nwaiters) {
2584 break;
2585 }
2586 }
2587
2588 if (countp != NULL) {
2589 *countp = count;
2590 }
2591
2592 if (count == 0) {
2593 return 0;
2594 } else if (count >= nwaiters) {
2595 return 1;
2596 } else {
2597 return 0;
2598 }
2599 }
2600
2601
2602 uint32_t
2603 ksyn_queue_count_tolowest(ksyn_queue_t kq, uint32_t upto)
2604 {
2605 uint32_t i = 0;
2606 ksyn_waitq_element_t kwe, newkwe;
2607
2608 if (kq->ksynq_count == 0 || is_seqhigher(kq->ksynq_firstnum, upto)) {
2609 return 0;
2610 }
2611 if (upto == kq->ksynq_firstnum) {
2612 return 1;
2613 }
2614 TAILQ_FOREACH_SAFE(kwe, &kq->ksynq_kwelist, kwe_list, newkwe) {
2615 uint32_t curval = (kwe->kwe_lockseq & PTHRW_COUNT_MASK);
2616 if (is_seqhigher(curval, upto)) {
2617 break;
2618 }
2619 ++i;
2620 if (upto == curval) {
2621 break;
2622 }
2623 }
2624 return i;
2625 }
2626
2627 /* handles the cond broadcast of cvar and returns number of woken threads and bits for syscall return */
2628 void
2629 ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq, uint32_t upto, uint32_t *updatep)
2630 {
2631 ksyn_waitq_element_t kwe, newkwe;
2632 uint32_t updatebits = 0;
2633 ksyn_queue_t kq = &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITE];
2634
2635 struct ksyn_queue kfreeq;
2636 ksyn_queue_init(&kfreeq);
2637
2638 PTHREAD_TRACE(psynch_cvar_broadcast | DBG_FUNC_START, ckwq->kw_addr, upto,
2639 ckwq->kw_inqueue, 0);
2640
2641 retry:
2642 TAILQ_FOREACH_SAFE(kwe, &kq->ksynq_kwelist, kwe_list, newkwe) {
2643 if (is_seqhigher(kwe->kwe_lockseq, upto)) {
2644 // outside our range
2645 break;
2646 }
2647
2648 if (kwe->kwe_state == KWE_THREAD_INWAIT) {
2649 // Wake only non-canceled threads waiting on this CV.
2650 if (!pthread_kern->uthread_is_cancelled(kwe->kwe_uth)) {
2651 PTHREAD_TRACE(psynch_cvar_broadcast, ckwq->kw_addr, kwe, 0, 1);
2652 (void)ksyn_signal(ckwq, KSYN_QUEUE_WRITE, kwe, PTH_RWL_MTX_WAIT);
2653 updatebits += PTHRW_INC;
2654 }
2655 } else if (kwe->kwe_state == KWE_THREAD_BROADCAST ||
2656 kwe->kwe_state == KWE_THREAD_PREPOST) {
2657 PTHREAD_TRACE(psynch_cvar_broadcast, ckwq->kw_addr, kwe,
2658 kwe->kwe_state, 2);
2659 ksyn_queue_remove_item(ckwq, kq, kwe);
2660 TAILQ_INSERT_TAIL(&kfreeq.ksynq_kwelist, kwe, kwe_list);
2661 ckwq->kw_fakecount--;
2662 } else {
2663 panic("unknown kwe state\n");
2664 }
2665 }
2666
2667 /* Need to enter a broadcast in the queue (if not already at L == S) */
2668
2669 if (diff_genseq(ckwq->kw_lword, ckwq->kw_sword)) {
2670 PTHREAD_TRACE(psynch_cvar_broadcast, ckwq->kw_addr, ckwq->kw_lword,
2671 ckwq->kw_sword, 3);
2672
2673 newkwe = TAILQ_FIRST(&kfreeq.ksynq_kwelist);
2674 if (newkwe == NULL) {
2675 ksyn_wqunlock(ckwq);
2676 newkwe = (ksyn_waitq_element_t)zalloc(kwe_zone);
2677 TAILQ_INSERT_TAIL(&kfreeq.ksynq_kwelist, newkwe, kwe_list);
2678 ksyn_wqlock(ckwq);
2679 goto retry;
2680 } else {
2681 TAILQ_REMOVE(&kfreeq.ksynq_kwelist, newkwe, kwe_list);
2682 ksyn_prepost(ckwq, newkwe, KWE_THREAD_BROADCAST, upto);
2683 PTHREAD_TRACE(psynch_cvar_broadcast, ckwq->kw_addr, newkwe, 0, 4);
2684 }
2685 }
2686
2687 // free up any remaining things stumbled across above
2688 while ((kwe = TAILQ_FIRST(&kfreeq.ksynq_kwelist)) != NULL) {
2689 TAILQ_REMOVE(&kfreeq.ksynq_kwelist, kwe, kwe_list);
2690 zfree(kwe_zone, kwe);
2691 }
2692
2693 PTHREAD_TRACE(psynch_cvar_broadcast | DBG_FUNC_END, ckwq->kw_addr,
2694 updatebits, 0, 0);
2695
2696 if (updatep != NULL) {
2697 *updatep |= updatebits;
2698 }
2699 }
2700
2701 void
2702 ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq, uint32_t *updatebits)
2703 {
2704 if ((ckwq->kw_lword & PTHRW_COUNT_MASK) == (ckwq->kw_sword & PTHRW_COUNT_MASK)) {
2705 if (ckwq->kw_inqueue != 0) {
2706 /* FREE THE QUEUE */
2707 ksyn_queue_free_items(ckwq, KSYN_QUEUE_WRITE, ckwq->kw_lword, 0);
2708 #if __TESTPANICS__
2709 if (ckwq->kw_inqueue != 0)
2710 panic("ksyn_cvupdate_fixup: L == S, but entries in queue beyond S");
2711 #endif /* __TESTPANICS__ */
2712 }
2713 ckwq->kw_lword = ckwq->kw_uword = ckwq->kw_sword = 0;
2714 ckwq->kw_kflags |= KSYN_KWF_ZEROEDOUT;
2715 *updatebits |= PTH_RWS_CV_CBIT;
2716 } else if (ckwq->kw_inqueue != 0 && ckwq->kw_fakecount == ckwq->kw_inqueue) {
2717 // only fake entries are present in the queue
2718 *updatebits |= PTH_RWS_CV_PBIT;
2719 }
2720 }
2721
2722 void
2723 psynch_zoneinit(void)
2724 {
2725 kwq_zone = zinit(sizeof(struct ksyn_wait_queue),
2726 8192 * sizeof(struct ksyn_wait_queue), 4096, "ksyn_wait_queue");
2727 kwe_zone = zinit(sizeof(struct ksyn_waitq_element),
2728 8192 * sizeof(struct ksyn_waitq_element), 4096, "ksyn_waitq_element");
2729 }
2730
2731 void *
2732 _pthread_get_thread_kwq(thread_t thread)
2733 {
2734 assert(thread);
2735 struct uthread * uthread = pthread_kern->get_bsdthread_info(thread);
2736 assert(uthread);
2737 ksyn_waitq_element_t kwe = pthread_kern->uthread_get_uukwe(uthread);
2738 assert(kwe);
2739 ksyn_wait_queue_t kwq = kwe->kwe_kwqqueue;
2740 return kwq;
2741 }
2742
2743 /* This function is used by stackshot to determine why a thread is blocked, and report
2744 * who owns the object that the thread is blocked on. It should *only* be called if the
2745 * `block_hint' field in the relevant thread's struct is populated with something related
2746 * to pthread sync objects.
2747 */
2748 void
2749 _pthread_find_owner(thread_t thread,
2750 struct stackshot_thread_waitinfo * waitinfo)
2751 {
2752 ksyn_wait_queue_t kwq = _pthread_get_thread_kwq(thread);
2753 switch (waitinfo->wait_type) {
2754 case kThreadWaitPThreadMutex:
2755 assert((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_MTX);
2756 waitinfo->owner = thread_tid(kwq->kw_owner);
2757 waitinfo->context = kwq->kw_addr;
2758 break;
2759 /* Owner of rwlock not stored in kernel space due to races. Punt
2760 * and hope that the userspace address is helpful enough. */
2761 case kThreadWaitPThreadRWLockRead:
2762 case kThreadWaitPThreadRWLockWrite:
2763 assert((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_RWLOCK);
2764 waitinfo->owner = 0;
2765 waitinfo->context = kwq->kw_addr;
2766 break;
2767 /* Condvars don't have owners, so just give the userspace address. */
2768 case kThreadWaitPThreadCondVar:
2769 assert((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR);
2770 waitinfo->owner = 0;
2771 waitinfo->context = kwq->kw_addr;
2772 break;
2773 case kThreadWaitNone:
2774 default:
2775 waitinfo->owner = 0;
2776 waitinfo->context = 0;
2777 break;
2778 }
2779 }