]> git.saurik.com Git - apple/libpthread.git/blob - kern/kern_synch.c
libpthread-416.60.2.tar.gz
[apple/libpthread.git] / kern / kern_synch.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995-2005 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * pthread_support.c
31 */
32
33 #include <sys/param.h>
34 #include <sys/queue.h>
35 #include <sys/resourcevar.h>
36 //#include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/systm.h>
39 #include <sys/timeb.h>
40 #include <sys/times.h>
41 #include <sys/time.h>
42 #include <sys/acct.h>
43 #include <sys/kernel.h>
44 #include <sys/wait.h>
45 #include <sys/signalvar.h>
46 #include <sys/syslog.h>
47 #include <sys/stat.h>
48 #include <sys/lock.h>
49 #include <sys/kdebug.h>
50 //#include <sys/sysproto.h>
51 //#include <sys/pthread_internal.h>
52 #include <sys/vm.h>
53 #include <sys/user.h>
54
55 #include <mach/mach_types.h>
56 #include <mach/vm_prot.h>
57 #include <mach/semaphore.h>
58 #include <mach/sync_policy.h>
59 #include <mach/task.h>
60 #include <kern/kern_types.h>
61 #include <kern/task.h>
62 #include <kern/clock.h>
63 #include <mach/kern_return.h>
64 #include <kern/thread.h>
65 #include <kern/sched_prim.h>
66 #include <kern/thread_call.h>
67 #include <kern/kalloc.h>
68 #include <kern/zalloc.h>
69 #include <kern/sched_prim.h>
70 #include <kern/processor.h>
71 #include <kern/block_hint.h>
72 #include <kern/turnstile.h>
73 //#include <kern/mach_param.h>
74 #include <mach/mach_vm.h>
75 #include <mach/mach_param.h>
76 #include <mach/thread_policy.h>
77 #include <mach/message.h>
78 #include <mach/port.h>
79 //#include <vm/vm_protos.h>
80 #include <vm/vm_map.h>
81 #include <mach/vm_region.h>
82
83 #include <libkern/OSAtomic.h>
84
85 #include <pexpert/pexpert.h>
86
87 #include "kern_internal.h"
88 #include "synch_internal.h"
89 #include "kern_trace.h"
90
91 typedef struct uthread *uthread_t;
92
93 //#define __FAILEDUSERTEST__(s) do { panic(s); } while (0)
94 #define __FAILEDUSERTEST__(s) do { printf("PSYNCH: pid[%d]: %s\n", proc_pid(current_proc()), s); } while (0)
95 #define __FAILEDUSERTEST2__(s, x...) do { printf("PSYNCH: pid[%d]: " s "\n", proc_pid(current_proc()), x); } while (0)
96
97 lck_mtx_t *pthread_list_mlock;
98
99 #define PTH_HASHSIZE 100
100
101 static LIST_HEAD(pthhashhead, ksyn_wait_queue) *pth_glob_hashtbl;
102 static unsigned long pthhash;
103
104 static LIST_HEAD(, ksyn_wait_queue) pth_free_list;
105
106 static zone_t kwq_zone; /* zone for allocation of ksyn_queue */
107 static zone_t kwe_zone; /* zone for allocation of ksyn_waitq_element */
108
109 #define SEQFIT 0
110 #define FIRSTFIT 1
111
112 struct ksyn_queue {
113 TAILQ_HEAD(ksynq_kwelist_head, ksyn_waitq_element) ksynq_kwelist;
114 uint32_t ksynq_count; /* number of entries in queue */
115 uint32_t ksynq_firstnum; /* lowest seq in queue */
116 uint32_t ksynq_lastnum; /* highest seq in queue */
117 };
118 typedef struct ksyn_queue *ksyn_queue_t;
119
120 typedef enum {
121 KSYN_QUEUE_READ = 0,
122 KSYN_QUEUE_WRITE,
123 KSYN_QUEUE_MAX,
124 } kwq_queue_type_t;
125
126 typedef enum {
127 KWQ_INTR_NONE = 0,
128 KWQ_INTR_READ = 0x1,
129 KWQ_INTR_WRITE = 0x2,
130 } kwq_intr_type_t;
131
132 struct ksyn_wait_queue {
133 LIST_ENTRY(ksyn_wait_queue) kw_hash;
134 LIST_ENTRY(ksyn_wait_queue) kw_list;
135 user_addr_t kw_addr;
136 thread_t kw_owner; /* current owner or THREAD_NULL, has a +1 */
137 uint64_t kw_object; /* object backing in shared mode */
138 uint64_t kw_offset; /* offset inside the object in shared mode */
139 int kw_pflags; /* flags under listlock protection */
140 struct timeval kw_ts; /* timeval need for upkeep before free */
141 int kw_iocount; /* inuse reference */
142 int kw_dropcount; /* current users unlocking... */
143
144 int kw_type; /* queue type like mutex, cvar, etc */
145 uint32_t kw_inqueue; /* num of waiters held */
146 uint32_t kw_fakecount; /* number of error/prepost fakes */
147 uint32_t kw_highseq; /* highest seq in the queue */
148 uint32_t kw_lowseq; /* lowest seq in the queue */
149 uint32_t kw_lword; /* L value from userland */
150 uint32_t kw_uword; /* U world value from userland */
151 uint32_t kw_sword; /* S word value from userland */
152 uint32_t kw_lastunlockseq; /* the last seq that unlocked */
153 /* for CV to be used as the seq kernel has seen so far */
154 #define kw_cvkernelseq kw_lastunlockseq
155 uint32_t kw_lastseqword; /* the last seq that unlocked */
156 /* for mutex and cvar we need to track I bit values */
157 uint32_t kw_nextseqword; /* the last seq that unlocked; with num of waiters */
158 struct {
159 uint32_t count; /* prepost count */
160 uint32_t lseq; /* prepost target seq */
161 uint32_t sseq; /* prepost target sword, in cvar used for mutexowned */
162 } kw_prepost;
163 struct {
164 kwq_intr_type_t type; /* type of failed wakueps */
165 uint32_t count; /* prepost of missed wakeup due to intrs */
166 uint32_t seq; /* prepost of missed wakeup limit seq */
167 uint32_t returnbits; /* return bits value for missed wakeup threads */
168 } kw_intr;
169
170 int kw_kflags;
171 int kw_qos_override; /* QoS of max waiter during contention period */
172 struct turnstile *kw_turnstile;
173 struct ksyn_queue kw_ksynqueues[KSYN_QUEUE_MAX]; /* queues to hold threads */
174 lck_spin_t kw_lock; /* spinlock protecting this structure */
175 };
176 typedef struct ksyn_wait_queue * ksyn_wait_queue_t;
177
178 #define TID_ZERO (uint64_t)0
179
180 /* bits needed in handling the rwlock unlock */
181 #define PTH_RW_TYPE_READ 0x01
182 #define PTH_RW_TYPE_WRITE 0x04
183 #define PTH_RW_TYPE_MASK 0xff
184 #define PTH_RW_TYPE_SHIFT 8
185
186 #define PTH_RWSHFT_TYPE_READ 0x0100
187 #define PTH_RWSHFT_TYPE_WRITE 0x0400
188 #define PTH_RWSHFT_TYPE_MASK 0xff00
189
190 /*
191 * Mutex pshared attributes
192 */
193 #define PTHREAD_PROCESS_SHARED _PTHREAD_MTX_OPT_PSHARED
194 #define PTHREAD_PROCESS_PRIVATE 0x20
195 #define PTHREAD_PSHARED_FLAGS_MASK 0x30
196
197 /*
198 * Mutex policy attributes
199 */
200 #define _PTHREAD_MTX_OPT_POLICY_FAIRSHARE 0x040 /* 1 */
201 #define _PTHREAD_MTX_OPT_POLICY_FIRSTFIT 0x080 /* 2 */
202 #define _PTHREAD_MTX_OPT_POLICY_MASK 0x1c0
203
204 /* pflags */
205 #define KSYN_WQ_INHASH 2
206 #define KSYN_WQ_SHARED 4
207 #define KSYN_WQ_WAITING 8 /* threads waiting for this wq to be available */
208 #define KSYN_WQ_FLIST 0X10 /* in free list to be freed after a short delay */
209
210 /* kflags */
211 #define KSYN_KWF_INITCLEARED 0x1 /* the init status found and preposts cleared */
212 #define KSYN_KWF_ZEROEDOUT 0x2 /* the lword, etc are inited to 0 */
213 #define KSYN_KWF_QOS_APPLIED 0x4 /* QoS override applied to owner */
214 #define KSYN_KWF_OVERLAP_GUARD 0x8 /* overlap guard */
215
216 #define KSYN_CLEANUP_DEADLINE 10
217 static int psynch_cleanupset;
218 thread_call_t psynch_thcall;
219
220 #define KSYN_WQTYPE_INWAIT 0x1000
221 #define KSYN_WQTYPE_INDROP 0x2000
222 #define KSYN_WQTYPE_MTX 0x01
223 #define KSYN_WQTYPE_CVAR 0x02
224 #define KSYN_WQTYPE_RWLOCK 0x04
225 #define KSYN_WQTYPE_SEMA 0x08
226 #define KSYN_WQTYPE_MASK 0xff
227
228 #define KSYN_WQTYPE_MUTEXDROP (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_MTX)
229
230 static inline int
231 _kwq_type(ksyn_wait_queue_t kwq)
232 {
233 return (kwq->kw_type & KSYN_WQTYPE_MASK);
234 }
235
236 static inline bool
237 _kwq_use_turnstile(ksyn_wait_queue_t kwq)
238 {
239 // <rdar://problem/15926625> If we had writer-owner information from the
240 // rwlock then we could use the turnstile to push on it. For now, only
241 // plain mutexes use it.
242 return (_kwq_type(kwq) == KSYN_WQTYPE_MTX);
243 }
244
245 #define KW_UNLOCK_PREPOST 0x01
246 #define KW_UNLOCK_PREPOST_READLOCK 0x08
247 #define KW_UNLOCK_PREPOST_WRLOCK 0x20
248
249 static int ksyn_wq_hash_lookup(user_addr_t uaddr, proc_t p, int flags, ksyn_wait_queue_t *kwq, struct pthhashhead **hashptr, uint64_t object, uint64_t offset);
250 static int ksyn_wqfind(user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, int flags, int wqtype , ksyn_wait_queue_t *wq);
251 static void ksyn_wqrelease(ksyn_wait_queue_t mkwq, int qfreenow, int wqtype);
252 static int ksyn_findobj(user_addr_t uaddr, uint64_t *objectp, uint64_t *offsetp);
253
254 static int _wait_result_to_errno(wait_result_t result);
255
256 static int ksyn_wait(ksyn_wait_queue_t, kwq_queue_type_t, uint32_t, int, uint64_t, uint16_t, thread_continue_t, block_hint_t);
257 static kern_return_t ksyn_signal(ksyn_wait_queue_t, kwq_queue_type_t, ksyn_waitq_element_t, uint32_t);
258 static void ksyn_freeallkwe(ksyn_queue_t kq);
259
260 static kern_return_t ksyn_mtxsignal(ksyn_wait_queue_t, ksyn_waitq_element_t kwe, uint32_t, thread_t *);
261
262 static int kwq_handle_unlock(ksyn_wait_queue_t, uint32_t mgen, uint32_t rw_wc, uint32_t *updatep, int flags, int *blockp, uint32_t premgen);
263
264 static void ksyn_queue_init(ksyn_queue_t kq);
265 static int ksyn_queue_insert(ksyn_wait_queue_t kwq, int kqi, ksyn_waitq_element_t kwe, uint32_t mgen, int firstfit);
266 static void ksyn_queue_remove_item(ksyn_wait_queue_t kwq, ksyn_queue_t kq, ksyn_waitq_element_t kwe);
267 static void ksyn_queue_free_items(ksyn_wait_queue_t kwq, int kqi, uint32_t upto, int all);
268
269 static void update_low_high(ksyn_wait_queue_t kwq, uint32_t lockseq);
270 static uint32_t find_nextlowseq(ksyn_wait_queue_t kwq);
271 static uint32_t find_nexthighseq(ksyn_wait_queue_t kwq);
272 static int find_seq_till(ksyn_wait_queue_t kwq, uint32_t upto, uint32_t nwaiters, uint32_t *countp);
273
274 static uint32_t ksyn_queue_count_tolowest(ksyn_queue_t kq, uint32_t upto);
275
276 static ksyn_waitq_element_t ksyn_queue_find_cvpreposeq(ksyn_queue_t kq, uint32_t cgen);
277 static void ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq, uint32_t upto, uint32_t *updatep);
278 static void ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq, uint32_t *updatep);
279 static ksyn_waitq_element_t ksyn_queue_find_signalseq(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t toseq, uint32_t lockseq);
280
281 static void __dead2 psynch_cvcontinue(void *, wait_result_t);
282 static void __dead2 psynch_mtxcontinue(void *, wait_result_t);
283 static void __dead2 psynch_rw_rdcontinue(void *, wait_result_t);
284 static void __dead2 psynch_rw_wrcontinue(void *, wait_result_t);
285
286 static int ksyn_wakeupreaders(ksyn_wait_queue_t kwq, uint32_t limitread, int allreaders, uint32_t updatebits, int *wokenp);
287 static int kwq_find_rw_lowest(ksyn_wait_queue_t kwq, int flags, uint32_t premgen, int *type, uint32_t lowest[]);
288 static ksyn_waitq_element_t ksyn_queue_find_seq(ksyn_wait_queue_t kwq, ksyn_queue_t kq, uint32_t seq);
289
290 static void
291 UPDATE_CVKWQ(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t ugen, uint32_t rw_wc)
292 {
293 int sinit = ((rw_wc & PTH_RWS_CV_CBIT) != 0);
294
295 // assert((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR);
296
297 if ((kwq->kw_kflags & KSYN_KWF_ZEROEDOUT) != 0) {
298 /* the values of L,U and S are cleared out due to L==S in previous transition */
299 kwq->kw_lword = mgen;
300 kwq->kw_uword = ugen;
301 kwq->kw_sword = rw_wc;
302 kwq->kw_kflags &= ~KSYN_KWF_ZEROEDOUT;
303 } else {
304 if (is_seqhigher(mgen, kwq->kw_lword)) {
305 kwq->kw_lword = mgen;
306 }
307 if (is_seqhigher(ugen, kwq->kw_uword)) {
308 kwq->kw_uword = ugen;
309 }
310 if (sinit && is_seqhigher(rw_wc, kwq->kw_sword)) {
311 kwq->kw_sword = rw_wc;
312 }
313 }
314 if (sinit && is_seqlower(kwq->kw_cvkernelseq, rw_wc)) {
315 kwq->kw_cvkernelseq = (rw_wc & PTHRW_COUNT_MASK);
316 }
317 }
318
319 static inline void
320 _kwq_clear_preposted_wakeup(ksyn_wait_queue_t kwq)
321 {
322 kwq->kw_prepost.lseq = 0;
323 kwq->kw_prepost.sseq = PTHRW_RWS_INIT;
324 kwq->kw_prepost.count = 0;
325 }
326
327 static inline void
328 _kwq_mark_preposted_wakeup(ksyn_wait_queue_t kwq, uint32_t count,
329 uint32_t lseq, uint32_t sseq)
330 {
331 kwq->kw_prepost.count = count;
332 kwq->kw_prepost.lseq = lseq;
333 kwq->kw_prepost.sseq = sseq;
334 }
335
336 static inline void
337 _kwq_clear_interrupted_wakeup(ksyn_wait_queue_t kwq)
338 {
339 kwq->kw_intr.type = KWQ_INTR_NONE;
340 kwq->kw_intr.count = 0;
341 kwq->kw_intr.seq = 0;
342 kwq->kw_intr.returnbits = 0;
343 }
344
345 static inline void
346 _kwq_mark_interruped_wakeup(ksyn_wait_queue_t kwq, kwq_intr_type_t type,
347 uint32_t count, uint32_t lseq, uint32_t returnbits)
348 {
349 kwq->kw_intr.count = count;
350 kwq->kw_intr.seq = lseq;
351 kwq->kw_intr.returnbits = returnbits;
352 kwq->kw_intr.type = type;
353 }
354
355 static void
356 _kwq_destroy(ksyn_wait_queue_t kwq)
357 {
358 if (kwq->kw_owner) {
359 thread_deallocate(kwq->kw_owner);
360 }
361 lck_spin_destroy(&kwq->kw_lock, pthread_lck_grp);
362 zfree(kwq_zone, kwq);
363 }
364
365 #define KWQ_SET_OWNER_TRANSFER_REF 0x1
366
367 static inline thread_t
368 _kwq_set_owner(ksyn_wait_queue_t kwq, thread_t new_owner, int flags)
369 {
370 thread_t old_owner = kwq->kw_owner;
371 if (old_owner == new_owner) {
372 if (flags & KWQ_SET_OWNER_TRANSFER_REF) return new_owner;
373 return THREAD_NULL;
374 }
375 if ((flags & KWQ_SET_OWNER_TRANSFER_REF) == 0) {
376 thread_reference(new_owner);
377 }
378 kwq->kw_owner = new_owner;
379 return old_owner;
380 }
381
382 static inline thread_t
383 _kwq_clear_owner(ksyn_wait_queue_t kwq)
384 {
385 return _kwq_set_owner(kwq, THREAD_NULL, KWQ_SET_OWNER_TRANSFER_REF);
386 }
387
388 static inline void
389 _kwq_cleanup_old_owner(thread_t *thread)
390 {
391 if (*thread) {
392 thread_deallocate(*thread);
393 *thread = THREAD_NULL;
394 }
395 }
396
397 static void
398 CLEAR_REINIT_BITS(ksyn_wait_queue_t kwq)
399 {
400 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) {
401 if (kwq->kw_inqueue != 0 && kwq->kw_inqueue != kwq->kw_fakecount) {
402 panic("CV:entries in queue durinmg reinit %d:%d\n",kwq->kw_inqueue, kwq->kw_fakecount);
403 }
404 };
405 if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_RWLOCK) {
406 kwq->kw_nextseqword = PTHRW_RWS_INIT;
407 kwq->kw_kflags &= ~KSYN_KWF_OVERLAP_GUARD;
408 };
409 _kwq_clear_preposted_wakeup(kwq);
410 kwq->kw_lastunlockseq = PTHRW_RWL_INIT;
411 kwq->kw_lastseqword = PTHRW_RWS_INIT;
412 _kwq_clear_interrupted_wakeup(kwq);
413 kwq->kw_lword = 0;
414 kwq->kw_uword = 0;
415 kwq->kw_sword = PTHRW_RWS_INIT;
416 }
417
418 static bool
419 _kwq_handle_preposted_wakeup(ksyn_wait_queue_t kwq, uint32_t type,
420 uint32_t lseq, uint32_t *retval)
421 {
422 if (kwq->kw_prepost.count == 0 ||
423 !is_seqlower_eq(lseq, kwq->kw_prepost.lseq)) {
424 return false;
425 }
426
427 kwq->kw_prepost.count--;
428 if (kwq->kw_prepost.count > 0) {
429 return false;
430 }
431
432 int error, should_block = 0;
433 uint32_t updatebits = 0;
434 uint32_t pp_lseq = kwq->kw_prepost.lseq;
435 uint32_t pp_sseq = kwq->kw_prepost.sseq;
436 _kwq_clear_preposted_wakeup(kwq);
437
438 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
439
440 error = kwq_handle_unlock(kwq, pp_lseq, pp_sseq, &updatebits,
441 (type | KW_UNLOCK_PREPOST), &should_block, lseq);
442 if (error) {
443 panic("_kwq_handle_preposted_wakeup: kwq_handle_unlock failed %d",
444 error);
445 }
446
447 if (should_block) {
448 return false;
449 }
450 *retval = updatebits;
451 return true;
452 }
453
454 static bool
455 _kwq_handle_overlap(ksyn_wait_queue_t kwq, uint32_t type, uint32_t lgenval,
456 uint32_t rw_wc, uint32_t *retval)
457 {
458 int res = 0;
459
460 // overlaps only occur on read lockers
461 if (type != PTH_RW_TYPE_READ) {
462 return false;
463 }
464
465 // check for overlap and no pending W bit (indicates writers)
466 if ((kwq->kw_kflags & KSYN_KWF_OVERLAP_GUARD) &&
467 !is_rws_savemask_set(rw_wc) && !is_rwl_wbit_set(lgenval)) {
468 /* overlap is set, so no need to check for valid state for overlap */
469
470 if (is_seqlower_eq(rw_wc, kwq->kw_nextseqword) || is_seqhigher_eq(kwq->kw_lastseqword, rw_wc)) {
471 /* increase the next expected seq by one */
472 kwq->kw_nextseqword += PTHRW_INC;
473 /* set count by one & bits from the nextseq and add M bit */
474 *retval = PTHRW_INC | ((kwq->kw_nextseqword & PTHRW_BIT_MASK) | PTH_RWL_MBIT);
475 res = 1;
476 }
477 }
478 return res;
479 }
480
481 static inline bool
482 _kwq_is_used(ksyn_wait_queue_t kwq)
483 {
484 return (kwq->kw_inqueue != 0 || kwq->kw_prepost.count != 0 ||
485 kwq->kw_intr.count != 0);
486 }
487
488 /*
489 * consumes a pending interrupted waiter, returns true if the current
490 * thread should return back to userspace because it was previously
491 * interrupted.
492 */
493 static inline bool
494 _kwq_handle_interrupted_wakeup(ksyn_wait_queue_t kwq, kwq_intr_type_t type,
495 uint32_t lseq, uint32_t *retval)
496 {
497 if (kwq->kw_intr.count != 0 && kwq->kw_intr.type == type &&
498 (!kwq->kw_intr.seq || is_seqlower_eq(lseq, kwq->kw_intr.seq))) {
499 kwq->kw_intr.count--;
500 *retval = kwq->kw_intr.returnbits;
501 if (kwq->kw_intr.returnbits == 0) {
502 _kwq_clear_interrupted_wakeup(kwq);
503 }
504 return true;
505 }
506 return false;
507 }
508
509 static void
510 pthread_list_lock(void)
511 {
512 lck_mtx_lock_spin(pthread_list_mlock);
513 }
514
515 static void
516 pthread_list_unlock(void)
517 {
518 lck_mtx_unlock(pthread_list_mlock);
519 }
520
521 static void
522 ksyn_wqlock(ksyn_wait_queue_t kwq)
523 {
524 lck_spin_lock(&kwq->kw_lock);
525 }
526
527 static void
528 ksyn_wqunlock(ksyn_wait_queue_t kwq)
529 {
530 lck_spin_unlock(&kwq->kw_lock);
531 }
532
533 /* routine to drop the mutex unlocks , used both for mutexunlock system call and drop during cond wait */
534 static uint32_t
535 _psynch_mutexdrop_internal(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t ugen,
536 int flags)
537 {
538 kern_return_t ret;
539 uint32_t returnbits = 0;
540 uint32_t updatebits = 0;
541 int firstfit = (flags & _PTHREAD_MTX_OPT_POLICY_MASK) ==
542 _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
543 uint32_t nextgen = (ugen + PTHRW_INC);
544 thread_t old_owner = THREAD_NULL;
545
546 ksyn_wqlock(kwq);
547 kwq->kw_lastunlockseq = (ugen & PTHRW_COUNT_MASK);
548
549 redrive:
550 updatebits = (kwq->kw_highseq & PTHRW_COUNT_MASK) |
551 (PTH_RWL_EBIT | PTH_RWL_KBIT);
552
553 if (firstfit) {
554 if (kwq->kw_inqueue == 0) {
555 uint32_t count = kwq->kw_prepost.count + 1;
556 // Increment the number of preposters we have waiting
557 _kwq_mark_preposted_wakeup(kwq, count, mgen & PTHRW_COUNT_MASK, 0);
558 // We don't know the current owner as we've determined this mutex
559 // drop should have a preposted locker inbound into the kernel but
560 // we have no way of knowing who it is. When it arrives, the lock
561 // path will update the turnstile owner and return it to userspace.
562 old_owner = _kwq_clear_owner(kwq);
563 pthread_kern->psynch_wait_update_owner(kwq, THREAD_NULL,
564 &kwq->kw_turnstile);
565 PTHREAD_TRACE(psynch_mutex_kwqprepost, kwq->kw_addr,
566 kwq->kw_prepost.lseq, count, 0);
567 } else {
568 // signal first waiter
569 ret = ksyn_mtxsignal(kwq, NULL, updatebits, &old_owner);
570 if (ret == KERN_NOT_WAITING) {
571 // <rdar://problem/39093536> ksyn_mtxsignal attempts to signal
572 // the thread but it sets up the turnstile inheritor first.
573 // That means we can't redrive the mutex in a loop without
574 // dropping the wq lock and cleaning up the turnstile state.
575 ksyn_wqunlock(kwq);
576 pthread_kern->psynch_wait_cleanup();
577 _kwq_cleanup_old_owner(&old_owner);
578 ksyn_wqlock(kwq);
579 goto redrive;
580 }
581 }
582 } else {
583 bool prepost = false;
584 if (kwq->kw_inqueue == 0) {
585 // No waiters in the queue.
586 prepost = true;
587 } else {
588 uint32_t low_writer = (kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_firstnum & PTHRW_COUNT_MASK);
589 if (low_writer == nextgen) {
590 /* next seq to be granted found */
591 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
592 ret = ksyn_mtxsignal(kwq, NULL,
593 updatebits | PTH_RWL_MTX_WAIT, &old_owner);
594 if (ret == KERN_NOT_WAITING) {
595 /* interrupt post */
596 _kwq_mark_interruped_wakeup(kwq, KWQ_INTR_WRITE, 1,
597 nextgen, updatebits);
598 }
599 } else if (is_seqhigher(low_writer, nextgen)) {
600 prepost = true;
601 } else {
602 //__FAILEDUSERTEST__("psynch_mutexdrop_internal: FS mutex unlock sequence higher than the lowest one is queue\n");
603 ksyn_waitq_element_t kwe;
604 kwe = ksyn_queue_find_seq(kwq,
605 &kwq->kw_ksynqueues[KSYN_QUEUE_WRITE], nextgen);
606 if (kwe != NULL) {
607 /* next seq to be granted found */
608 /* since the grant could be cv, make sure mutex wait is set incase the thread interrupted out */
609 ret = ksyn_mtxsignal(kwq, kwe,
610 updatebits | PTH_RWL_MTX_WAIT, &old_owner);
611 if (ret == KERN_NOT_WAITING) {
612 goto redrive;
613 }
614 } else {
615 prepost = true;
616 }
617 }
618 }
619 if (prepost) {
620 if (kwq->kw_prepost.count != 0) {
621 __FAILEDUSERTEST__("_psynch_mutexdrop_internal: multiple preposts\n");
622 } else {
623 _kwq_mark_preposted_wakeup(kwq, 1, nextgen & PTHRW_COUNT_MASK,
624 0);
625 }
626 old_owner = _kwq_clear_owner(kwq);
627 pthread_kern->psynch_wait_update_owner(kwq, THREAD_NULL,
628 &kwq->kw_turnstile);
629 }
630 }
631
632 ksyn_wqunlock(kwq);
633 pthread_kern->psynch_wait_cleanup();
634 _kwq_cleanup_old_owner(&old_owner);
635 ksyn_wqrelease(kwq, 1, KSYN_WQTYPE_MUTEXDROP);
636 return returnbits;
637 }
638
639 static int
640 _ksyn_check_init(ksyn_wait_queue_t kwq, uint32_t lgenval)
641 {
642 int res = (lgenval & PTHRW_RWL_INIT) != 0;
643 if (res) {
644 if ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) == 0) {
645 /* first to notice the reset of the lock, clear preposts */
646 CLEAR_REINIT_BITS(kwq);
647 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
648 }
649 }
650 return res;
651 }
652
653 /*
654 * psynch_mutexwait: This system call is used for contended psynch mutexes to
655 * block.
656 */
657 int
658 _psynch_mutexwait(__unused proc_t p, user_addr_t mutex, uint32_t mgen,
659 uint32_t ugen, uint64_t tid, uint32_t flags, uint32_t *retval)
660 {
661 ksyn_wait_queue_t kwq;
662 int error = 0;
663 int firstfit = (flags & _PTHREAD_MTX_OPT_POLICY_MASK)
664 == _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
665 int ins_flags = SEQFIT;
666 uint32_t lseq = (mgen & PTHRW_COUNT_MASK);
667 uint32_t updatebits = 0;
668 thread_t tid_th = THREAD_NULL, old_owner = THREAD_NULL;
669
670 if (firstfit) {
671 /* first fit */
672 ins_flags = FIRSTFIT;
673 }
674
675 error = ksyn_wqfind(mutex, mgen, ugen, 0, flags,
676 (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_MTX), &kwq);
677 if (error != 0) {
678 return error;
679 }
680
681 again:
682 ksyn_wqlock(kwq);
683
684 if (_kwq_handle_interrupted_wakeup(kwq, KWQ_INTR_WRITE, lseq, retval)) {
685 old_owner = _kwq_set_owner(kwq, current_thread(), 0);
686 pthread_kern->psynch_wait_update_owner(kwq, kwq->kw_owner,
687 &kwq->kw_turnstile);
688 ksyn_wqunlock(kwq);
689 goto out;
690 }
691
692 if (kwq->kw_prepost.count && (firstfit || (lseq == kwq->kw_prepost.lseq))) {
693 /* got preposted lock */
694 kwq->kw_prepost.count--;
695
696 if (!firstfit) {
697 if (kwq->kw_prepost.count > 0) {
698 __FAILEDUSERTEST__("psynch_mutexwait: more than one prepost\n");
699 kwq->kw_prepost.lseq += PTHRW_INC; /* look for next one */
700 ksyn_wqunlock(kwq);
701 error = EINVAL;
702 goto out;
703 }
704 _kwq_clear_preposted_wakeup(kwq);
705 }
706
707 if (kwq->kw_inqueue == 0) {
708 updatebits = lseq | (PTH_RWL_KBIT | PTH_RWL_EBIT);
709 } else {
710 updatebits = (kwq->kw_highseq & PTHRW_COUNT_MASK) |
711 (PTH_RWL_KBIT | PTH_RWL_EBIT);
712 }
713 updatebits &= ~PTH_RWL_MTX_WAIT;
714
715 if (updatebits == 0) {
716 __FAILEDUSERTEST__("psynch_mutexwait(prepost): returning 0 lseq in mutexwait with no EBIT \n");
717 }
718
719 PTHREAD_TRACE(psynch_mutex_kwqprepost, kwq->kw_addr,
720 kwq->kw_prepost.lseq, kwq->kw_prepost.count, 1);
721
722 old_owner = _kwq_set_owner(kwq, current_thread(), 0);
723 pthread_kern->psynch_wait_update_owner(kwq, kwq->kw_owner,
724 &kwq->kw_turnstile);
725
726 ksyn_wqunlock(kwq);
727 *retval = updatebits;
728 goto out;
729 }
730
731 // mutexwait passes in an owner hint at the time userspace contended for
732 // the mutex, however, the owner tid in the userspace data structure may be
733 // unset or SWITCHING (-1), or it may correspond to a stale snapshot after
734 // the lock has subsequently been unlocked by another thread.
735 if (tid == thread_tid(kwq->kw_owner)) {
736 // userspace and kernel agree
737 } else if (tid == 0) {
738 // contender came in before owner could write TID
739 // let's assume that what the kernel knows is accurate
740 // for all we know this waiter came in late in the kernel
741 } else if (kwq->kw_lastunlockseq != PTHRW_RWL_INIT &&
742 is_seqlower(ugen, kwq->kw_lastunlockseq)) {
743 // owner is stale, someone has come in and unlocked since this
744 // contended read the TID, so assume what is known in the kernel is
745 // accurate
746 } else if (tid == PTHREAD_MTX_TID_SWITCHING) {
747 // userspace didn't know the owner because it was being unlocked, but
748 // that unlocker hasn't reached the kernel yet. So assume what is known
749 // in the kernel is accurate
750 } else {
751 // hint is being passed in for a specific thread, and we have no reason
752 // not to trust it (like the kernel unlock sequence being higher)
753 //
754 // So resolve the hint to a thread_t if we haven't done so yet
755 // and redrive as we dropped the lock
756 if (tid_th == THREAD_NULL) {
757 ksyn_wqunlock(kwq);
758 tid_th = pthread_kern->task_findtid(current_task(), tid);
759 if (tid_th == THREAD_NULL) tid = 0;
760 goto again;
761 }
762 tid_th = _kwq_set_owner(kwq, tid_th, KWQ_SET_OWNER_TRANSFER_REF);
763 }
764
765 if (tid_th) {
766 // We are on our way to block, and can't drop the spinlock anymore
767 pthread_kern->thread_deallocate_safe(tid_th);
768 tid_th = THREAD_NULL;
769 }
770 assert(old_owner == THREAD_NULL);
771 error = ksyn_wait(kwq, KSYN_QUEUE_WRITE, mgen, ins_flags, 0, 0,
772 psynch_mtxcontinue, kThreadWaitPThreadMutex);
773 // ksyn_wait drops wait queue lock
774 out:
775 pthread_kern->psynch_wait_cleanup();
776 ksyn_wqrelease(kwq, 1, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_MTX));
777 if (tid_th) {
778 thread_deallocate(tid_th);
779 }
780 if (old_owner) {
781 thread_deallocate(old_owner);
782 }
783 return error;
784 }
785
786 void __dead2
787 psynch_mtxcontinue(void *parameter, wait_result_t result)
788 {
789 uthread_t uth = current_uthread();
790 ksyn_wait_queue_t kwq = parameter;
791 ksyn_waitq_element_t kwe = pthread_kern->uthread_get_uukwe(uth);
792
793 ksyn_wqlock(kwq);
794
795 int error = _wait_result_to_errno(result);
796 if (error != 0) {
797 if (kwe->kwe_kwqqueue) {
798 ksyn_queue_remove_item(kwq, &kwq->kw_ksynqueues[KSYN_QUEUE_WRITE], kwe);
799 }
800 } else {
801 uint32_t updatebits = kwe->kwe_psynchretval & ~PTH_RWL_MTX_WAIT;
802 pthread_kern->uthread_set_returnval(uth, updatebits);
803
804 if (updatebits == 0) {
805 __FAILEDUSERTEST__("psynch_mutexwait: returning 0 lseq in mutexwait with no EBIT \n");
806 }
807 }
808
809 pthread_kern->psynch_wait_complete(kwq, &kwq->kw_turnstile);
810
811 ksyn_wqunlock(kwq);
812 pthread_kern->psynch_wait_cleanup();
813 ksyn_wqrelease(kwq, 1, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_MTX));
814 pthread_kern->unix_syscall_return(error);
815 __builtin_unreachable();
816 }
817
818 static void __dead2
819 _psynch_rw_continue(ksyn_wait_queue_t kwq, kwq_queue_type_t kqi,
820 wait_result_t result)
821 {
822 uthread_t uth = current_uthread();
823 ksyn_waitq_element_t kwe = pthread_kern->uthread_get_uukwe(uth);
824
825 ksyn_wqlock(kwq);
826
827 int error = _wait_result_to_errno(result);
828 if (error != 0) {
829 if (kwe->kwe_kwqqueue) {
830 ksyn_queue_remove_item(kwq, &kwq->kw_ksynqueues[kqi], kwe);
831 }
832 } else {
833 pthread_kern->uthread_set_returnval(uth, kwe->kwe_psynchretval);
834 }
835
836 ksyn_wqunlock(kwq);
837 ksyn_wqrelease(kwq, 0, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_RWLOCK));
838
839 pthread_kern->unix_syscall_return(error);
840 __builtin_unreachable();
841 }
842
843 void __dead2
844 psynch_rw_rdcontinue(void *parameter, wait_result_t result)
845 {
846 _psynch_rw_continue(parameter, KSYN_QUEUE_READ, result);
847 }
848
849 void __dead2
850 psynch_rw_wrcontinue(void *parameter, wait_result_t result)
851 {
852 _psynch_rw_continue(parameter, KSYN_QUEUE_WRITE, result);
853 }
854
855 /*
856 * psynch_mutexdrop: This system call is used for unlock postings on contended psynch mutexes.
857 */
858 int
859 _psynch_mutexdrop(__unused proc_t p, user_addr_t mutex, uint32_t mgen,
860 uint32_t ugen, uint64_t tid __unused, uint32_t flags, uint32_t *retval)
861 {
862 int res;
863 ksyn_wait_queue_t kwq;
864
865 res = ksyn_wqfind(mutex, mgen, ugen, 0, flags, KSYN_WQTYPE_MUTEXDROP, &kwq);
866 if (res == 0) {
867 uint32_t updateval = _psynch_mutexdrop_internal(kwq, mgen, ugen, flags);
868 /* drops the kwq reference */
869 if (retval) {
870 *retval = updateval;
871 }
872 }
873
874 return res;
875 }
876
877 static kern_return_t
878 ksyn_mtxsignal(ksyn_wait_queue_t kwq, ksyn_waitq_element_t kwe,
879 uint32_t updateval, thread_t *old_owner)
880 {
881 kern_return_t ret;
882
883 if (!kwe) {
884 kwe = TAILQ_FIRST(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_kwelist);
885 if (!kwe) {
886 panic("ksyn_mtxsignal: panic signaling empty queue");
887 }
888 }
889
890 PTHREAD_TRACE(psynch_mutex_kwqsignal | DBG_FUNC_START, kwq->kw_addr, kwe,
891 thread_tid(kwe->kwe_thread), kwq->kw_inqueue);
892
893 ret = ksyn_signal(kwq, KSYN_QUEUE_WRITE, kwe, updateval);
894 if (ret == KERN_SUCCESS) {
895 *old_owner = _kwq_set_owner(kwq, kwe->kwe_thread, 0);
896 } else {
897 *old_owner = _kwq_clear_owner(kwq);
898 }
899 PTHREAD_TRACE(psynch_mutex_kwqsignal | DBG_FUNC_END, kwq->kw_addr, kwe,
900 ret, 0);
901 return ret;
902 }
903
904
905 static void
906 ksyn_prepost(ksyn_wait_queue_t kwq, ksyn_waitq_element_t kwe, uint32_t state,
907 uint32_t lockseq)
908 {
909 bzero(kwe, sizeof(*kwe));
910 kwe->kwe_state = state;
911 kwe->kwe_lockseq = lockseq;
912 kwe->kwe_count = 1;
913
914 (void)ksyn_queue_insert(kwq, KSYN_QUEUE_WRITE, kwe, lockseq, SEQFIT);
915 kwq->kw_fakecount++;
916 }
917
918 static void
919 ksyn_cvsignal(ksyn_wait_queue_t ckwq, thread_t th, uint32_t uptoseq,
920 uint32_t signalseq, uint32_t *updatebits, int *broadcast,
921 ksyn_waitq_element_t *nkwep)
922 {
923 ksyn_waitq_element_t kwe = NULL;
924 ksyn_waitq_element_t nkwe = NULL;
925 ksyn_queue_t kq = &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITE];
926
927 uptoseq &= PTHRW_COUNT_MASK;
928
929 // Find the specified thread to wake.
930 if (th != THREAD_NULL) {
931 uthread_t uth = pthread_kern->get_bsdthread_info(th);
932 kwe = pthread_kern->uthread_get_uukwe(uth);
933 if (kwe->kwe_kwqqueue != ckwq ||
934 is_seqhigher(kwe->kwe_lockseq, uptoseq)) {
935 // Unless it's no longer waiting on this CV...
936 kwe = NULL;
937 // ...in which case we post a broadcast instead.
938 *broadcast = 1;
939 return;
940 }
941 }
942
943 // If no thread was specified, find any thread to wake (with the right
944 // sequence number).
945 while (th == THREAD_NULL) {
946 if (kwe == NULL) {
947 kwe = ksyn_queue_find_signalseq(ckwq, kq, uptoseq, signalseq);
948 }
949 if (kwe == NULL && nkwe == NULL) {
950 // No eligible entries; need to allocate a new
951 // entry to prepost. Loop to rescan after
952 // reacquiring the lock after allocation in
953 // case anything new shows up.
954 ksyn_wqunlock(ckwq);
955 nkwe = (ksyn_waitq_element_t)zalloc(kwe_zone);
956 ksyn_wqlock(ckwq);
957 } else {
958 break;
959 }
960 }
961
962 if (kwe != NULL) {
963 // If we found a thread to wake...
964 if (kwe->kwe_state == KWE_THREAD_INWAIT) {
965 if (is_seqlower(kwe->kwe_lockseq, signalseq)) {
966 /*
967 * A valid thread in our range, but lower than our signal.
968 * Matching it may leave our match with nobody to wake it if/when
969 * it arrives (the signal originally meant for this thread might
970 * not successfully wake it).
971 *
972 * Convert to broadcast - may cause some spurious wakeups
973 * (allowed by spec), but avoids starvation (better choice).
974 */
975 *broadcast = 1;
976 } else {
977 (void)ksyn_signal(ckwq, KSYN_QUEUE_WRITE, kwe, PTH_RWL_MTX_WAIT);
978 *updatebits += PTHRW_INC;
979 }
980 } else if (kwe->kwe_state == KWE_THREAD_PREPOST) {
981 // Merge with existing prepost at same uptoseq.
982 kwe->kwe_count += 1;
983 } else if (kwe->kwe_state == KWE_THREAD_BROADCAST) {
984 // Existing broadcasts subsume this signal.
985 } else {
986 panic("unknown kwe state\n");
987 }
988 if (nkwe) {
989 /*
990 * If we allocated a new kwe above but then found a different kwe to
991 * use then we need to deallocate the spare one.
992 */
993 zfree(kwe_zone, nkwe);
994 nkwe = NULL;
995 }
996 } else if (nkwe != NULL) {
997 // ... otherwise, insert the newly allocated prepost.
998 ksyn_prepost(ckwq, nkwe, KWE_THREAD_PREPOST, uptoseq);
999 nkwe = NULL;
1000 } else {
1001 panic("failed to allocate kwe\n");
1002 }
1003
1004 *nkwep = nkwe;
1005 }
1006
1007 static int
1008 __psynch_cvsignal(user_addr_t cv, uint32_t cgen, uint32_t cugen,
1009 uint32_t csgen, uint32_t flags, int broadcast,
1010 mach_port_name_t threadport, uint32_t *retval)
1011 {
1012 int error = 0;
1013 thread_t th = THREAD_NULL;
1014 ksyn_wait_queue_t kwq;
1015
1016 uint32_t uptoseq = cgen & PTHRW_COUNT_MASK;
1017 uint32_t fromseq = (cugen & PTHRW_COUNT_MASK) + PTHRW_INC;
1018
1019 // validate sane L, U, and S values
1020 if ((threadport == 0 && is_seqhigher(fromseq, uptoseq)) || is_seqhigher(csgen, uptoseq)) {
1021 __FAILEDUSERTEST__("cvbroad: invalid L, U and S values\n");
1022 return EINVAL;
1023 }
1024
1025 if (threadport != 0) {
1026 th = port_name_to_thread((mach_port_name_t)threadport);
1027 if (th == THREAD_NULL) {
1028 return ESRCH;
1029 }
1030 }
1031
1032 error = ksyn_wqfind(cv, cgen, cugen, csgen, flags, (KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INDROP), &kwq);
1033 if (error == 0) {
1034 uint32_t updatebits = 0;
1035 ksyn_waitq_element_t nkwe = NULL;
1036
1037 ksyn_wqlock(kwq);
1038
1039 // update L, U and S...
1040 UPDATE_CVKWQ(kwq, cgen, cugen, csgen);
1041
1042 PTHREAD_TRACE(psynch_cvar_signal | DBG_FUNC_START, kwq->kw_addr,
1043 fromseq, uptoseq, broadcast);
1044
1045 if (!broadcast) {
1046 // No need to signal if the CV is already balanced.
1047 if (diff_genseq(kwq->kw_lword, kwq->kw_sword)) {
1048 ksyn_cvsignal(kwq, th, uptoseq, fromseq, &updatebits,
1049 &broadcast, &nkwe);
1050 PTHREAD_TRACE(psynch_cvar_signal, kwq->kw_addr, broadcast, 0,0);
1051 }
1052 }
1053
1054 if (broadcast) {
1055 ksyn_handle_cvbroad(kwq, uptoseq, &updatebits);
1056 }
1057
1058 kwq->kw_sword += (updatebits & PTHRW_COUNT_MASK);
1059 // set C or P bits and free if needed
1060 ksyn_cvupdate_fixup(kwq, &updatebits);
1061 *retval = updatebits;
1062
1063 PTHREAD_TRACE(psynch_cvar_signal | DBG_FUNC_END, kwq->kw_addr,
1064 updatebits, 0, 0);
1065
1066 ksyn_wqunlock(kwq);
1067
1068 pthread_kern->psynch_wait_cleanup();
1069
1070 if (nkwe != NULL) {
1071 zfree(kwe_zone, nkwe);
1072 }
1073
1074 ksyn_wqrelease(kwq, 1, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_CVAR));
1075 }
1076
1077 if (th != NULL) {
1078 thread_deallocate(th);
1079 }
1080
1081 return error;
1082 }
1083
1084 /*
1085 * psynch_cvbroad: This system call is used for broadcast posting on blocked waiters of psynch cvars.
1086 */
1087 int
1088 _psynch_cvbroad(__unused proc_t p, user_addr_t cv, uint64_t cvlsgen,
1089 uint64_t cvudgen, uint32_t flags, __unused user_addr_t mutex,
1090 __unused uint64_t mugen, __unused uint64_t tid, uint32_t *retval)
1091 {
1092 uint32_t diffgen = cvudgen & 0xffffffff;
1093 uint32_t count = diffgen >> PTHRW_COUNT_SHIFT;
1094 if (count > pthread_kern->get_task_threadmax()) {
1095 __FAILEDUSERTEST__("cvbroad: difference greater than maximum possible thread count\n");
1096 return EBUSY;
1097 }
1098
1099 uint32_t csgen = (cvlsgen >> 32) & 0xffffffff;
1100 uint32_t cgen = cvlsgen & 0xffffffff;
1101 uint32_t cugen = (cvudgen >> 32) & 0xffffffff;
1102
1103 return __psynch_cvsignal(cv, cgen, cugen, csgen, flags, 1, 0, retval);
1104 }
1105
1106 /*
1107 * psynch_cvsignal: This system call is used for signalling the blocked waiters of psynch cvars.
1108 */
1109 int
1110 _psynch_cvsignal(__unused proc_t p, user_addr_t cv, uint64_t cvlsgen,
1111 uint32_t cvugen, int threadport, __unused user_addr_t mutex,
1112 __unused uint64_t mugen, __unused uint64_t tid, uint32_t flags,
1113 uint32_t *retval)
1114 {
1115 uint32_t csgen = (cvlsgen >> 32) & 0xffffffff;
1116 uint32_t cgen = cvlsgen & 0xffffffff;
1117
1118 return __psynch_cvsignal(cv, cgen, cvugen, csgen, flags, 0, threadport, retval);
1119 }
1120
1121 /*
1122 * psynch_cvwait: This system call is used for psynch cvar waiters to block in kernel.
1123 */
1124 int
1125 _psynch_cvwait(__unused proc_t p, user_addr_t cv, uint64_t cvlsgen,
1126 uint32_t cvugen, user_addr_t mutex, uint64_t mugen, uint32_t flags,
1127 int64_t sec, uint32_t nsec, uint32_t *retval)
1128 {
1129 int error = 0;
1130 uint32_t updatebits = 0;
1131 ksyn_wait_queue_t ckwq = NULL;
1132 ksyn_waitq_element_t kwe, nkwe = NULL;
1133
1134 /* for conformance reasons */
1135 pthread_kern->__pthread_testcancel(0);
1136
1137 uint32_t csgen = (cvlsgen >> 32) & 0xffffffff;
1138 uint32_t cgen = cvlsgen & 0xffffffff;
1139 uint32_t ugen = (mugen >> 32) & 0xffffffff;
1140 uint32_t mgen = mugen & 0xffffffff;
1141
1142 uint32_t lockseq = (cgen & PTHRW_COUNT_MASK);
1143
1144 /*
1145 * In cvwait U word can be out of range as cv could be used only for
1146 * timeouts. However S word needs to be within bounds and validated at
1147 * user level as well.
1148 */
1149 if (is_seqhigher_eq(csgen, lockseq) != 0) {
1150 __FAILEDUSERTEST__("psync_cvwait; invalid sequence numbers\n");
1151 return EINVAL;
1152 }
1153
1154 PTHREAD_TRACE(psynch_cvar_kwait | DBG_FUNC_START, cv, mutex, cgen, 0);
1155
1156 error = ksyn_wqfind(cv, cgen, cvugen, csgen, flags, KSYN_WQTYPE_CVAR | KSYN_WQTYPE_INWAIT, &ckwq);
1157 if (error != 0) {
1158 return error;
1159 }
1160
1161 if (mutex != 0) {
1162 uint32_t mutexrv = 0;
1163 error = _psynch_mutexdrop(NULL, mutex, mgen, ugen, 0, flags, &mutexrv);
1164 if (error != 0) {
1165 goto out;
1166 }
1167 }
1168
1169 ksyn_wqlock(ckwq);
1170
1171 // update L, U and S...
1172 UPDATE_CVKWQ(ckwq, cgen, cvugen, csgen);
1173
1174 /* Look for the sequence for prepost (or conflicting thread */
1175 ksyn_queue_t kq = &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITE];
1176 kwe = ksyn_queue_find_cvpreposeq(kq, lockseq);
1177 if (kwe != NULL) {
1178 if (kwe->kwe_state == KWE_THREAD_PREPOST) {
1179 if ((kwe->kwe_lockseq & PTHRW_COUNT_MASK) == lockseq) {
1180 /* we can safely consume a reference, so do so */
1181 if (--kwe->kwe_count == 0) {
1182 ksyn_queue_remove_item(ckwq, kq, kwe);
1183 ckwq->kw_fakecount--;
1184 nkwe = kwe;
1185 }
1186 } else {
1187 /*
1188 * consuming a prepost higher than our lock sequence is valid, but
1189 * can leave the higher thread without a match. Convert the entry
1190 * to a broadcast to compensate for this.
1191 */
1192 ksyn_handle_cvbroad(ckwq, kwe->kwe_lockseq, &updatebits);
1193 #if __TESTPANICS__
1194 if (updatebits != 0)
1195 panic("psync_cvwait: convert pre-post to broadcast: woke up %d threads that shouldn't be there\n", updatebits);
1196 #endif /* __TESTPANICS__ */
1197 }
1198 } else if (kwe->kwe_state == KWE_THREAD_BROADCAST) {
1199 // XXX
1200 // Nothing to do.
1201 } else if (kwe->kwe_state == KWE_THREAD_INWAIT) {
1202 __FAILEDUSERTEST__("cvwait: thread entry with same sequence already present\n");
1203 error = EBUSY;
1204 } else {
1205 panic("psync_cvwait: unexpected wait queue element type\n");
1206 }
1207
1208 if (error == 0) {
1209 updatebits |= PTHRW_INC;
1210 ckwq->kw_sword += PTHRW_INC;
1211
1212 /* set C or P bits and free if needed */
1213 ksyn_cvupdate_fixup(ckwq, &updatebits);
1214 *retval = updatebits;
1215 }
1216 } else {
1217 uint64_t abstime = 0;
1218 uint16_t kwe_flags = 0;
1219
1220 if (sec != 0 || (nsec & 0x3fffffff) != 0) {
1221 struct timespec ts;
1222 ts.tv_sec = (__darwin_time_t)sec;
1223 ts.tv_nsec = (nsec & 0x3fffffff);
1224 nanoseconds_to_absolutetime(
1225 (uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec, &abstime);
1226 clock_absolutetime_interval_to_deadline(abstime, &abstime);
1227 }
1228
1229 PTHREAD_TRACE(psynch_cvar_kwait, cv, mutex, kwe_flags, 1);
1230
1231 error = ksyn_wait(ckwq, KSYN_QUEUE_WRITE, cgen, SEQFIT, abstime,
1232 kwe_flags, psynch_cvcontinue, kThreadWaitPThreadCondVar);
1233 // ksyn_wait drops wait queue lock
1234 }
1235
1236 ksyn_wqunlock(ckwq);
1237
1238 if (nkwe != NULL) {
1239 zfree(kwe_zone, nkwe);
1240 }
1241 out:
1242
1243 PTHREAD_TRACE(psynch_cvar_kwait | DBG_FUNC_END, cv, error, updatebits, 2);
1244
1245 ksyn_wqrelease(ckwq, 1, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_CVAR));
1246 return error;
1247 }
1248
1249
1250 void __dead2
1251 psynch_cvcontinue(void *parameter, wait_result_t result)
1252 {
1253 uthread_t uth = current_uthread();
1254 ksyn_wait_queue_t ckwq = parameter;
1255 ksyn_waitq_element_t kwe = pthread_kern->uthread_get_uukwe(uth);
1256
1257 int error = _wait_result_to_errno(result);
1258 if (error != 0) {
1259 ksyn_wqlock(ckwq);
1260 /* just in case it got woken up as we were granting */
1261 int retval = kwe->kwe_psynchretval;
1262 pthread_kern->uthread_set_returnval(uth, retval);
1263
1264 if (kwe->kwe_kwqqueue) {
1265 ksyn_queue_remove_item(ckwq, &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITE], kwe);
1266 }
1267 if ((kwe->kwe_psynchretval & PTH_RWL_MTX_WAIT) != 0) {
1268 /* the condition var granted.
1269 * reset the error so that the thread returns back.
1270 */
1271 error = 0;
1272 /* no need to set any bits just return as cvsig/broad covers this */
1273 } else {
1274 ckwq->kw_sword += PTHRW_INC;
1275
1276 /* set C and P bits, in the local error */
1277 if ((ckwq->kw_lword & PTHRW_COUNT_MASK) == (ckwq->kw_sword & PTHRW_COUNT_MASK)) {
1278 PTHREAD_TRACE(psynch_cvar_zeroed, ckwq->kw_addr,
1279 ckwq->kw_lword, ckwq->kw_sword, ckwq->kw_inqueue);
1280 error |= ECVCLEARED;
1281 if (ckwq->kw_inqueue != 0) {
1282 ksyn_queue_free_items(ckwq, KSYN_QUEUE_WRITE, ckwq->kw_lword, 1);
1283 }
1284 ckwq->kw_lword = ckwq->kw_uword = ckwq->kw_sword = 0;
1285 ckwq->kw_kflags |= KSYN_KWF_ZEROEDOUT;
1286 } else {
1287 /* everythig in the queue is a fake entry ? */
1288 if (ckwq->kw_inqueue != 0 && ckwq->kw_fakecount == ckwq->kw_inqueue) {
1289 error |= ECVPREPOST;
1290 }
1291 }
1292 }
1293 ksyn_wqunlock(ckwq);
1294
1295 PTHREAD_TRACE(psynch_cvar_kwait | DBG_FUNC_END, ckwq->kw_addr,
1296 error, 0, 3);
1297 } else {
1298 int val = 0;
1299 // PTH_RWL_MTX_WAIT is removed
1300 if ((kwe->kwe_psynchretval & PTH_RWS_CV_MBIT) != 0) {
1301 val = PTHRW_INC | PTH_RWS_CV_CBIT;
1302 }
1303 PTHREAD_TRACE(psynch_cvar_kwait | DBG_FUNC_END, ckwq->kw_addr,
1304 val, 0, 4);
1305 pthread_kern->uthread_set_returnval(uth, val);
1306 }
1307
1308 ksyn_wqrelease(ckwq, 1, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_CVAR));
1309 pthread_kern->unix_syscall_return(error);
1310 __builtin_unreachable();
1311 }
1312
1313 /*
1314 * psynch_cvclrprepost: This system call clears pending prepost if present.
1315 */
1316 int
1317 _psynch_cvclrprepost(__unused proc_t p, user_addr_t cv, uint32_t cvgen,
1318 uint32_t cvugen, uint32_t cvsgen, __unused uint32_t prepocnt,
1319 uint32_t preposeq, uint32_t flags, int *retval)
1320 {
1321 int error = 0;
1322 int mutex = (flags & _PTHREAD_MTX_OPT_MUTEX);
1323 int wqtype = (mutex ? KSYN_WQTYPE_MTX : KSYN_WQTYPE_CVAR) | KSYN_WQTYPE_INDROP;
1324 ksyn_wait_queue_t kwq = NULL;
1325
1326 *retval = 0;
1327
1328 error = ksyn_wqfind(cv, cvgen, cvugen, mutex ? 0 : cvsgen, flags, wqtype,
1329 &kwq);
1330 if (error != 0) {
1331 return error;
1332 }
1333
1334 ksyn_wqlock(kwq);
1335
1336 if (mutex) {
1337 int firstfit = (flags & _PTHREAD_MTX_OPT_POLICY_MASK)
1338 == _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
1339 if (firstfit && kwq->kw_prepost.count) {
1340 if (is_seqlower_eq(kwq->kw_prepost.lseq, cvgen)) {
1341 PTHREAD_TRACE(psynch_mutex_kwqprepost, kwq->kw_addr,
1342 kwq->kw_prepost.lseq, 0, 2);
1343 _kwq_clear_preposted_wakeup(kwq);
1344 }
1345 }
1346 } else {
1347 PTHREAD_TRACE(psynch_cvar_clrprepost, kwq->kw_addr, wqtype,
1348 preposeq, 0);
1349 ksyn_queue_free_items(kwq, KSYN_QUEUE_WRITE, preposeq, 0);
1350 }
1351
1352 ksyn_wqunlock(kwq);
1353 ksyn_wqrelease(kwq, 1, wqtype);
1354 return error;
1355 }
1356
1357 /* ***************** pthread_rwlock ************************ */
1358
1359 static int
1360 __psynch_rw_lock(int type, user_addr_t rwlock, uint32_t lgenval,
1361 uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval)
1362 {
1363 uint32_t lockseq = lgenval & PTHRW_COUNT_MASK;
1364 ksyn_wait_queue_t kwq;
1365 int error, prepost_type, kqi;
1366 thread_continue_t tc;
1367
1368 if (type == PTH_RW_TYPE_READ) {
1369 prepost_type = KW_UNLOCK_PREPOST_READLOCK;
1370 kqi = KSYN_QUEUE_READ;
1371 tc = psynch_rw_rdcontinue;
1372 } else {
1373 prepost_type = KW_UNLOCK_PREPOST_WRLOCK;
1374 kqi = KSYN_QUEUE_WRITE;
1375 tc = psynch_rw_wrcontinue;
1376 }
1377
1378 error = ksyn_wqfind(rwlock, lgenval, ugenval, rw_wc, flags,
1379 (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_RWLOCK), &kwq);
1380 if (error != 0) {
1381 return error;
1382 }
1383
1384 ksyn_wqlock(kwq);
1385 _ksyn_check_init(kwq, lgenval);
1386 if (_kwq_handle_interrupted_wakeup(kwq, type, lockseq, retval) ||
1387 // handle overlap first as they are not counted against pre_rwwc
1388 // handle_overlap uses the flags in lgenval (vs. lockseq)
1389 _kwq_handle_overlap(kwq, type, lgenval, rw_wc, retval) ||
1390 _kwq_handle_preposted_wakeup(kwq, prepost_type, lockseq, retval)) {
1391 ksyn_wqunlock(kwq);
1392 goto out;
1393 }
1394
1395 block_hint_t block_hint = type == PTH_RW_TYPE_READ ?
1396 kThreadWaitPThreadRWLockRead : kThreadWaitPThreadRWLockWrite;
1397 error = ksyn_wait(kwq, kqi, lgenval, SEQFIT, 0, 0, tc, block_hint);
1398 // ksyn_wait drops wait queue lock
1399 out:
1400 ksyn_wqrelease(kwq, 0, (KSYN_WQTYPE_INWAIT | KSYN_WQTYPE_RWLOCK));
1401 return error;
1402 }
1403
1404 /*
1405 * psynch_rw_rdlock: This system call is used for psync rwlock readers to block.
1406 */
1407 int
1408 _psynch_rw_rdlock(__unused proc_t p, user_addr_t rwlock, uint32_t lgenval,
1409 uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval)
1410 {
1411 return __psynch_rw_lock(PTH_RW_TYPE_READ, rwlock, lgenval, ugenval, rw_wc,
1412 flags, retval);
1413 }
1414
1415 /*
1416 * psynch_rw_longrdlock: This system call is used for psync rwlock long readers to block.
1417 */
1418 int
1419 _psynch_rw_longrdlock(__unused proc_t p, __unused user_addr_t rwlock,
1420 __unused uint32_t lgenval, __unused uint32_t ugenval,
1421 __unused uint32_t rw_wc, __unused int flags, __unused uint32_t *retval)
1422 {
1423 return ESRCH;
1424 }
1425
1426
1427 /*
1428 * psynch_rw_wrlock: This system call is used for psync rwlock writers to block.
1429 */
1430 int
1431 _psynch_rw_wrlock(__unused proc_t p, user_addr_t rwlock, uint32_t lgenval,
1432 uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval)
1433 {
1434 return __psynch_rw_lock(PTH_RW_TYPE_WRITE, rwlock, lgenval, ugenval,
1435 rw_wc, flags, retval);
1436 }
1437
1438 /*
1439 * psynch_rw_yieldwrlock: This system call is used for psync rwlock yielding writers to block.
1440 */
1441 int
1442 _psynch_rw_yieldwrlock(__unused proc_t p, __unused user_addr_t rwlock,
1443 __unused uint32_t lgenval, __unused uint32_t ugenval,
1444 __unused uint32_t rw_wc, __unused int flags, __unused uint32_t *retval)
1445 {
1446 return ESRCH;
1447 }
1448
1449 /*
1450 * psynch_rw_unlock: This system call is used for unlock state postings. This will grant appropriate
1451 * reader/writer variety lock.
1452 */
1453 int
1454 _psynch_rw_unlock(__unused proc_t p, user_addr_t rwlock, uint32_t lgenval,
1455 uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval)
1456 {
1457 int error = 0;
1458 ksyn_wait_queue_t kwq;
1459 uint32_t updatebits = 0;
1460 int diff;
1461 uint32_t count = 0;
1462 uint32_t curgen = lgenval & PTHRW_COUNT_MASK;
1463 int clearedkflags = 0;
1464
1465 error = ksyn_wqfind(rwlock, lgenval, ugenval, rw_wc, flags,
1466 (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK), &kwq);
1467 if (error != 0) {
1468 return(error);
1469 }
1470
1471 ksyn_wqlock(kwq);
1472 int isinit = _ksyn_check_init(kwq, lgenval);
1473
1474 /* if lastunlock seq is set, ensure the current one is not lower than that, as it would be spurious */
1475 if ((kwq->kw_lastunlockseq != PTHRW_RWL_INIT) &&
1476 (is_seqlower(ugenval, kwq->kw_lastunlockseq)!= 0)) {
1477 error = 0;
1478 goto out;
1479 }
1480
1481 /* If L-U != num of waiters, then it needs to be preposted or spr */
1482 diff = find_diff(lgenval, ugenval);
1483
1484 if (find_seq_till(kwq, curgen, diff, &count) == 0) {
1485 if ((count == 0) || (count < (uint32_t)diff))
1486 goto prepost;
1487 }
1488
1489 /* no prepost and all threads are in place, reset the bit */
1490 if ((isinit != 0) && ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0)){
1491 kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
1492 clearedkflags = 1;
1493 }
1494
1495 /* can handle unlock now */
1496
1497 _kwq_clear_preposted_wakeup(kwq);
1498
1499 error = kwq_handle_unlock(kwq, lgenval, rw_wc, &updatebits, 0, NULL, 0);
1500 #if __TESTPANICS__
1501 if (error != 0)
1502 panic("psynch_rw_unlock: kwq_handle_unlock failed %d\n",error);
1503 #endif /* __TESTPANICS__ */
1504 out:
1505 if (error == 0) {
1506 /* update bits?? */
1507 *retval = updatebits;
1508 }
1509
1510 // <rdar://problem/22244050> If any of the wakeups failed because they
1511 // already returned to userspace because of a signal then we need to ensure
1512 // that the reset state is not cleared when that thread returns. Otherwise,
1513 // _pthread_rwlock_lock will clear the interrupted state before it is read.
1514 if (clearedkflags != 0 && kwq->kw_intr.count > 0) {
1515 kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
1516 }
1517
1518 ksyn_wqunlock(kwq);
1519 pthread_kern->psynch_wait_cleanup();
1520 ksyn_wqrelease(kwq, 0, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK));
1521
1522 return(error);
1523
1524 prepost:
1525 /* update if the new seq is higher than prev prepost, or first set */
1526 if (is_rws_sbit_set(kwq->kw_prepost.sseq) ||
1527 is_seqhigher_eq(rw_wc, kwq->kw_prepost.sseq)) {
1528 _kwq_mark_preposted_wakeup(kwq, diff - count, curgen, rw_wc);
1529 updatebits = lgenval; /* let this not do unlock handling */
1530 }
1531 error = 0;
1532 goto out;
1533 }
1534
1535
1536 /* ************************************************************************** */
1537 void
1538 pth_global_hashinit(void)
1539 {
1540 pth_glob_hashtbl = hashinit(PTH_HASHSIZE * 4, M_PROC, &pthhash);
1541 }
1542
1543 void
1544 _pth_proc_hashinit(proc_t p)
1545 {
1546 void *ptr = hashinit(PTH_HASHSIZE, M_PCB, &pthhash);
1547 if (ptr == NULL) {
1548 panic("pth_proc_hashinit: hash init returned 0\n");
1549 }
1550
1551 pthread_kern->proc_set_pthhash(p, ptr);
1552 }
1553
1554
1555 static int
1556 ksyn_wq_hash_lookup(user_addr_t uaddr, proc_t p, int flags,
1557 ksyn_wait_queue_t *out_kwq, struct pthhashhead **out_hashptr,
1558 uint64_t object, uint64_t offset)
1559 {
1560 int res = 0;
1561 ksyn_wait_queue_t kwq;
1562 struct pthhashhead *hashptr;
1563 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED) {
1564 hashptr = pth_glob_hashtbl;
1565 LIST_FOREACH(kwq, &hashptr[object & pthhash], kw_hash) {
1566 if (kwq->kw_object == object && kwq->kw_offset == offset) {
1567 break;
1568 }
1569 }
1570 } else {
1571 hashptr = pthread_kern->proc_get_pthhash(p);
1572 LIST_FOREACH(kwq, &hashptr[uaddr & pthhash], kw_hash) {
1573 if (kwq->kw_addr == uaddr) {
1574 break;
1575 }
1576 }
1577 }
1578 *out_kwq = kwq;
1579 *out_hashptr = hashptr;
1580 return res;
1581 }
1582
1583 void
1584 _pth_proc_hashdelete(proc_t p)
1585 {
1586 struct pthhashhead * hashptr;
1587 ksyn_wait_queue_t kwq;
1588 unsigned long hashsize = pthhash + 1;
1589 unsigned long i;
1590
1591 hashptr = pthread_kern->proc_get_pthhash(p);
1592 pthread_kern->proc_set_pthhash(p, NULL);
1593 if (hashptr == NULL) {
1594 return;
1595 }
1596
1597 pthread_list_lock();
1598 for(i= 0; i < hashsize; i++) {
1599 while ((kwq = LIST_FIRST(&hashptr[i])) != NULL) {
1600 if ((kwq->kw_pflags & KSYN_WQ_INHASH) != 0) {
1601 kwq->kw_pflags &= ~KSYN_WQ_INHASH;
1602 LIST_REMOVE(kwq, kw_hash);
1603 }
1604 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
1605 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
1606 LIST_REMOVE(kwq, kw_list);
1607 }
1608 pthread_list_unlock();
1609 /* release fake entries if present for cvars */
1610 if (((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) && (kwq->kw_inqueue != 0))
1611 ksyn_freeallkwe(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITE]);
1612 _kwq_destroy(kwq);
1613 pthread_list_lock();
1614 }
1615 }
1616 pthread_list_unlock();
1617 FREE(hashptr, M_PROC);
1618 }
1619
1620 /* no lock held for this as the waitqueue is getting freed */
1621 void
1622 ksyn_freeallkwe(ksyn_queue_t kq)
1623 {
1624 ksyn_waitq_element_t kwe;
1625 while ((kwe = TAILQ_FIRST(&kq->ksynq_kwelist)) != NULL) {
1626 TAILQ_REMOVE(&kq->ksynq_kwelist, kwe, kwe_list);
1627 if (kwe->kwe_state != KWE_THREAD_INWAIT) {
1628 zfree(kwe_zone, kwe);
1629 }
1630 }
1631 }
1632
1633 static inline void
1634 _kwq_report_inuse(ksyn_wait_queue_t kwq)
1635 {
1636 if (kwq->kw_prepost.count != 0) {
1637 __FAILEDUSERTEST2__("uaddr 0x%llx busy for synch type 0x%x [pre %d:0x%x:0x%x]",
1638 (uint64_t)kwq->kw_addr, kwq->kw_type, kwq->kw_prepost.count,
1639 kwq->kw_prepost.lseq, kwq->kw_prepost.sseq);
1640 PTHREAD_TRACE(psynch_mutex_kwqcollision, kwq->kw_addr,
1641 kwq->kw_type, 1, 0);
1642 }
1643 if (kwq->kw_intr.count != 0) {
1644 __FAILEDUSERTEST2__("uaddr 0x%llx busy for synch type 0x%x [intr %d:0x%x:0x%x:0x%x]",
1645 (uint64_t)kwq->kw_addr, kwq->kw_type, kwq->kw_intr.count,
1646 kwq->kw_intr.type, kwq->kw_intr.seq,
1647 kwq->kw_intr.returnbits);
1648 PTHREAD_TRACE(psynch_mutex_kwqcollision, kwq->kw_addr,
1649 kwq->kw_type, 2, 0);
1650 }
1651 if (kwq->kw_iocount) {
1652 __FAILEDUSERTEST2__("uaddr 0x%llx busy for synch type 0x%x [ioc %d:%d]",
1653 (uint64_t)kwq->kw_addr, kwq->kw_type, kwq->kw_iocount,
1654 kwq->kw_dropcount);
1655 PTHREAD_TRACE(psynch_mutex_kwqcollision, kwq->kw_addr,
1656 kwq->kw_type, 3, 0);
1657 }
1658 if (kwq->kw_inqueue) {
1659 __FAILEDUSERTEST2__("uaddr 0x%llx busy for synch type 0x%x [inq %d:%d]",
1660 (uint64_t)kwq->kw_addr, kwq->kw_type, kwq->kw_inqueue,
1661 kwq->kw_fakecount);
1662 PTHREAD_TRACE(psynch_mutex_kwqcollision, kwq->kw_addr, kwq->kw_type,
1663 4, 0);
1664 }
1665 }
1666
1667 /* find kernel waitqueue, if not present create one. Grants a reference */
1668 int
1669 ksyn_wqfind(user_addr_t uaddr, uint32_t mgen, uint32_t ugen, uint32_t sgen,
1670 int flags, int wqtype, ksyn_wait_queue_t *kwqp)
1671 {
1672 int res = 0;
1673 ksyn_wait_queue_t kwq = NULL;
1674 ksyn_wait_queue_t nkwq = NULL;
1675 struct pthhashhead *hashptr;
1676 proc_t p = current_proc();
1677
1678 uint64_t object = 0, offset = 0;
1679 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED) {
1680 res = ksyn_findobj(uaddr, &object, &offset);
1681 hashptr = pth_glob_hashtbl;
1682 } else {
1683 hashptr = pthread_kern->proc_get_pthhash(p);
1684 }
1685
1686 while (res == 0) {
1687 pthread_list_lock();
1688 res = ksyn_wq_hash_lookup(uaddr, current_proc(), flags, &kwq, &hashptr,
1689 object, offset);
1690 if (res != 0) {
1691 pthread_list_unlock();
1692 break;
1693 }
1694 if (kwq == NULL && nkwq == NULL) {
1695 // Drop the lock to allocate a new kwq and retry.
1696 pthread_list_unlock();
1697
1698 nkwq = (ksyn_wait_queue_t)zalloc(kwq_zone);
1699 bzero(nkwq, sizeof(struct ksyn_wait_queue));
1700 int i;
1701 for (i = 0; i < KSYN_QUEUE_MAX; i++) {
1702 ksyn_queue_init(&nkwq->kw_ksynqueues[i]);
1703 }
1704 lck_spin_init(&nkwq->kw_lock, pthread_lck_grp, pthread_lck_attr);
1705 continue;
1706 } else if (kwq == NULL && nkwq != NULL) {
1707 // Still not found, add the new kwq to the hash.
1708 kwq = nkwq;
1709 nkwq = NULL; // Don't free.
1710 if ((flags & PTHREAD_PSHARED_FLAGS_MASK) == PTHREAD_PROCESS_SHARED) {
1711 kwq->kw_pflags |= KSYN_WQ_SHARED;
1712 LIST_INSERT_HEAD(&hashptr[object & pthhash], kwq, kw_hash);
1713 } else {
1714 LIST_INSERT_HEAD(&hashptr[uaddr & pthhash], kwq, kw_hash);
1715 }
1716 kwq->kw_pflags |= KSYN_WQ_INHASH;
1717 } else if (kwq != NULL) {
1718 // Found an existing kwq, use it.
1719 if ((kwq->kw_pflags & KSYN_WQ_FLIST) != 0) {
1720 LIST_REMOVE(kwq, kw_list);
1721 kwq->kw_pflags &= ~KSYN_WQ_FLIST;
1722 }
1723 if ((kwq->kw_type & KSYN_WQTYPE_MASK) != (wqtype & KSYN_WQTYPE_MASK)) {
1724 if (!_kwq_is_used(kwq)) {
1725 if (kwq->kw_iocount == 0) {
1726 kwq->kw_type = 0; // mark for reinitialization
1727 } else if (kwq->kw_iocount == 1 &&
1728 kwq->kw_dropcount == kwq->kw_iocount) {
1729 /* if all users are unlockers then wait for it to finish */
1730 kwq->kw_pflags |= KSYN_WQ_WAITING;
1731 // Drop the lock and wait for the kwq to be free.
1732 (void)msleep(&kwq->kw_pflags, pthread_list_mlock,
1733 PDROP, "ksyn_wqfind", 0);
1734 continue;
1735 } else {
1736 _kwq_report_inuse(kwq);
1737 res = EINVAL;
1738 }
1739 } else {
1740 _kwq_report_inuse(kwq);
1741 res = EINVAL;
1742 }
1743 }
1744 }
1745 if (res == 0) {
1746 if (kwq->kw_type == 0) {
1747 kwq->kw_addr = uaddr;
1748 kwq->kw_object = object;
1749 kwq->kw_offset = offset;
1750 kwq->kw_type = (wqtype & KSYN_WQTYPE_MASK);
1751 CLEAR_REINIT_BITS(kwq);
1752 kwq->kw_lword = mgen;
1753 kwq->kw_uword = ugen;
1754 kwq->kw_sword = sgen;
1755 kwq->kw_owner = THREAD_NULL;
1756 kwq->kw_kflags = 0;
1757 kwq->kw_qos_override = THREAD_QOS_UNSPECIFIED;
1758 PTHREAD_TRACE(psynch_mutex_kwqallocate | DBG_FUNC_START, uaddr,
1759 kwq->kw_type, kwq, 0);
1760 PTHREAD_TRACE(psynch_mutex_kwqallocate | DBG_FUNC_END, uaddr,
1761 mgen, ugen, sgen);
1762 }
1763 kwq->kw_iocount++;
1764 if (wqtype == KSYN_WQTYPE_MUTEXDROP) {
1765 kwq->kw_dropcount++;
1766 }
1767 }
1768 pthread_list_unlock();
1769 break;
1770 }
1771 if (kwqp != NULL) {
1772 *kwqp = kwq;
1773 }
1774 if (nkwq) {
1775 _kwq_destroy(nkwq);
1776 }
1777 return res;
1778 }
1779
1780 /* Reference from find is dropped here. Starts the free process if needed */
1781 void
1782 ksyn_wqrelease(ksyn_wait_queue_t kwq, int qfreenow, int wqtype)
1783 {
1784 uint64_t deadline;
1785 ksyn_wait_queue_t free_elem = NULL;
1786
1787 pthread_list_lock();
1788 if (wqtype == KSYN_WQTYPE_MUTEXDROP) {
1789 kwq->kw_dropcount--;
1790 }
1791 if (--kwq->kw_iocount == 0) {
1792 if ((kwq->kw_pflags & KSYN_WQ_WAITING) != 0) {
1793 /* some one is waiting for the waitqueue, wake them up */
1794 kwq->kw_pflags &= ~KSYN_WQ_WAITING;
1795 wakeup(&kwq->kw_pflags);
1796 }
1797
1798 if (!_kwq_is_used(kwq)) {
1799 if (kwq->kw_turnstile) {
1800 panic("kw_turnstile still non-null upon release");
1801 }
1802
1803 PTHREAD_TRACE(psynch_mutex_kwqdeallocate | DBG_FUNC_START,
1804 kwq->kw_addr, kwq->kw_type, qfreenow, 0);
1805 PTHREAD_TRACE(psynch_mutex_kwqdeallocate | DBG_FUNC_END,
1806 kwq->kw_addr, kwq->kw_lword, kwq->kw_uword, kwq->kw_sword);
1807
1808 if (qfreenow == 0) {
1809 microuptime(&kwq->kw_ts);
1810 LIST_INSERT_HEAD(&pth_free_list, kwq, kw_list);
1811 kwq->kw_pflags |= KSYN_WQ_FLIST;
1812 if (psynch_cleanupset == 0) {
1813 struct timeval t;
1814 microuptime(&t);
1815 t.tv_sec += KSYN_CLEANUP_DEADLINE;
1816 deadline = tvtoabstime(&t);
1817 thread_call_enter_delayed(psynch_thcall, deadline);
1818 psynch_cleanupset = 1;
1819 }
1820 } else {
1821 kwq->kw_pflags &= ~KSYN_WQ_INHASH;
1822 LIST_REMOVE(kwq, kw_hash);
1823 free_elem = kwq;
1824 }
1825 }
1826 }
1827 pthread_list_unlock();
1828 if (free_elem != NULL) {
1829 _kwq_destroy(free_elem);
1830 }
1831 }
1832
1833 /* responsible to free the waitqueues */
1834 void
1835 psynch_wq_cleanup(__unused void *param, __unused void * param1)
1836 {
1837 ksyn_wait_queue_t kwq, tmp;
1838 struct timeval t;
1839 int reschedule = 0;
1840 uint64_t deadline = 0;
1841 LIST_HEAD(, ksyn_wait_queue) freelist;
1842 LIST_INIT(&freelist);
1843
1844 pthread_list_lock();
1845
1846 microuptime(&t);
1847
1848 LIST_FOREACH(kwq, &pth_free_list, kw_list) {
1849 if (_kwq_is_used(kwq) || kwq->kw_iocount != 0) {
1850 // still in use
1851 continue;
1852 }
1853 __darwin_time_t diff = t.tv_sec - kwq->kw_ts.tv_sec;
1854 if (diff < 0)
1855 diff *= -1;
1856 if (diff >= KSYN_CLEANUP_DEADLINE) {
1857 kwq->kw_pflags &= ~(KSYN_WQ_FLIST | KSYN_WQ_INHASH);
1858 LIST_REMOVE(kwq, kw_hash);
1859 LIST_REMOVE(kwq, kw_list);
1860 LIST_INSERT_HEAD(&freelist, kwq, kw_list);
1861 } else {
1862 reschedule = 1;
1863 }
1864
1865 }
1866 if (reschedule != 0) {
1867 t.tv_sec += KSYN_CLEANUP_DEADLINE;
1868 deadline = tvtoabstime(&t);
1869 thread_call_enter_delayed(psynch_thcall, deadline);
1870 psynch_cleanupset = 1;
1871 } else {
1872 psynch_cleanupset = 0;
1873 }
1874 pthread_list_unlock();
1875
1876 LIST_FOREACH_SAFE(kwq, &freelist, kw_list, tmp) {
1877 _kwq_destroy(kwq);
1878 }
1879 }
1880
1881 static int
1882 _wait_result_to_errno(wait_result_t result)
1883 {
1884 int res = 0;
1885 switch (result) {
1886 case THREAD_TIMED_OUT:
1887 res = ETIMEDOUT;
1888 break;
1889 case THREAD_INTERRUPTED:
1890 res = EINTR;
1891 break;
1892 }
1893 return res;
1894 }
1895
1896 int
1897 ksyn_wait(ksyn_wait_queue_t kwq, kwq_queue_type_t kqi, uint32_t lockseq,
1898 int fit, uint64_t abstime, uint16_t kwe_flags,
1899 thread_continue_t continuation, block_hint_t block_hint)
1900 {
1901 thread_t th = current_thread();
1902 uthread_t uth = pthread_kern->get_bsdthread_info(th);
1903 struct turnstile **tstore = NULL;
1904 int res;
1905
1906 assert(continuation != THREAD_CONTINUE_NULL);
1907
1908 ksyn_waitq_element_t kwe = pthread_kern->uthread_get_uukwe(uth);
1909 bzero(kwe, sizeof(*kwe));
1910 kwe->kwe_count = 1;
1911 kwe->kwe_lockseq = lockseq & PTHRW_COUNT_MASK;
1912 kwe->kwe_state = KWE_THREAD_INWAIT;
1913 kwe->kwe_uth = uth;
1914 kwe->kwe_thread = th;
1915 kwe->kwe_flags = kwe_flags;
1916
1917 res = ksyn_queue_insert(kwq, kqi, kwe, lockseq, fit);
1918 if (res != 0) {
1919 //panic("psynch_rw_wrlock: failed to enqueue\n"); // XXX
1920 ksyn_wqunlock(kwq);
1921 return res;
1922 }
1923
1924 PTHREAD_TRACE(psynch_mutex_kwqwait, kwq->kw_addr, kwq->kw_inqueue,
1925 kwq->kw_prepost.count, kwq->kw_intr.count);
1926
1927 if (_kwq_use_turnstile(kwq)) {
1928 // pthread mutexes and rwlocks both (at least sometimes) know their
1929 // owner and can use turnstiles. Otherwise, we pass NULL as the
1930 // tstore to the shims so they wait on the global waitq.
1931 tstore = &kwq->kw_turnstile;
1932 }
1933
1934 pthread_kern->psynch_wait_prepare((uintptr_t)kwq, tstore, kwq->kw_owner,
1935 block_hint, abstime);
1936
1937 ksyn_wqunlock(kwq);
1938
1939 if (tstore) {
1940 pthread_kern->psynch_wait_update_complete(kwq->kw_turnstile);
1941 }
1942
1943 thread_block_parameter(continuation, kwq);
1944
1945 // NOT REACHED
1946 panic("ksyn_wait continuation returned");
1947 __builtin_unreachable();
1948 }
1949
1950 kern_return_t
1951 ksyn_signal(ksyn_wait_queue_t kwq, kwq_queue_type_t kqi,
1952 ksyn_waitq_element_t kwe, uint32_t updateval)
1953 {
1954 kern_return_t ret;
1955 struct turnstile **tstore = NULL;
1956
1957 // If no wait element was specified, wake the first.
1958 if (!kwe) {
1959 kwe = TAILQ_FIRST(&kwq->kw_ksynqueues[kqi].ksynq_kwelist);
1960 if (!kwe) {
1961 panic("ksyn_signal: panic signaling empty queue");
1962 }
1963 }
1964
1965 if (kwe->kwe_state != KWE_THREAD_INWAIT) {
1966 panic("ksyn_signal: panic signaling non-waiting element");
1967 }
1968
1969 ksyn_queue_remove_item(kwq, &kwq->kw_ksynqueues[kqi], kwe);
1970 kwe->kwe_psynchretval = updateval;
1971
1972 if (_kwq_use_turnstile(kwq)) {
1973 tstore = &kwq->kw_turnstile;
1974 }
1975
1976 ret = pthread_kern->psynch_wait_wakeup(kwq, kwe, tstore);
1977
1978 if (ret != KERN_SUCCESS && ret != KERN_NOT_WAITING) {
1979 panic("ksyn_signal: panic waking up thread %x\n", ret);
1980 }
1981 return ret;
1982 }
1983
1984 int
1985 ksyn_findobj(user_addr_t uaddr, uint64_t *objectp, uint64_t *offsetp)
1986 {
1987 kern_return_t ret;
1988 vm_page_info_basic_data_t info;
1989 mach_msg_type_number_t count = VM_PAGE_INFO_BASIC_COUNT;
1990 ret = pthread_kern->vm_map_page_info(pthread_kern->current_map(), uaddr,
1991 VM_PAGE_INFO_BASIC, (vm_page_info_t)&info, &count);
1992 if (ret != KERN_SUCCESS) {
1993 return EINVAL;
1994 }
1995
1996 if (objectp != NULL) {
1997 *objectp = (uint64_t)info.object_id;
1998 }
1999 if (offsetp != NULL) {
2000 *offsetp = (uint64_t)info.offset;
2001 }
2002
2003 return(0);
2004 }
2005
2006
2007 /* lowest of kw_fr, kw_flr, kw_fwr, kw_fywr */
2008 int
2009 kwq_find_rw_lowest(ksyn_wait_queue_t kwq, int flags, uint32_t premgen,
2010 int *typep, uint32_t lowest[])
2011 {
2012 uint32_t kw_fr, kw_fwr, low;
2013 int type = 0, lowtype, typenum[2] = { 0 };
2014 uint32_t numbers[2] = { 0 };
2015 int count = 0, i;
2016
2017 if ((kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count != 0) ||
2018 ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0)) {
2019 type |= PTH_RWSHFT_TYPE_READ;
2020 /* read entries are present */
2021 if (kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count != 0) {
2022 kw_fr = kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_firstnum;
2023 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) &&
2024 (is_seqlower(premgen, kw_fr) != 0))
2025 kw_fr = premgen;
2026 } else
2027 kw_fr = premgen;
2028
2029 lowest[KSYN_QUEUE_READ] = kw_fr;
2030 numbers[count]= kw_fr;
2031 typenum[count] = PTH_RW_TYPE_READ;
2032 count++;
2033 } else
2034 lowest[KSYN_QUEUE_READ] = 0;
2035
2036 if ((kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_count != 0) ||
2037 ((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0)) {
2038 type |= PTH_RWSHFT_TYPE_WRITE;
2039 /* read entries are present */
2040 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_count != 0) {
2041 kw_fwr = kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_firstnum;
2042 if (((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) &&
2043 (is_seqlower(premgen, kw_fwr) != 0))
2044 kw_fwr = premgen;
2045 } else
2046 kw_fwr = premgen;
2047
2048 lowest[KSYN_QUEUE_WRITE] = kw_fwr;
2049 numbers[count]= kw_fwr;
2050 typenum[count] = PTH_RW_TYPE_WRITE;
2051 count++;
2052 } else
2053 lowest[KSYN_QUEUE_WRITE] = 0;
2054
2055 #if __TESTPANICS__
2056 if (count == 0)
2057 panic("nothing in the queue???\n");
2058 #endif /* __TESTPANICS__ */
2059
2060 low = numbers[0];
2061 lowtype = typenum[0];
2062 if (count > 1) {
2063 for (i = 1; i< count; i++) {
2064 if (is_seqlower(numbers[i] , low) != 0) {
2065 low = numbers[i];
2066 lowtype = typenum[i];
2067 }
2068 }
2069 }
2070 type |= lowtype;
2071
2072 if (typep != 0)
2073 *typep = type;
2074 return(0);
2075 }
2076
2077 /* wakeup readers to upto the writer limits */
2078 int
2079 ksyn_wakeupreaders(ksyn_wait_queue_t kwq, uint32_t limitread, int allreaders,
2080 uint32_t updatebits, int *wokenp)
2081 {
2082 ksyn_queue_t kq;
2083 int failedwakeup = 0;
2084 int numwoken = 0;
2085 kern_return_t kret = KERN_SUCCESS;
2086 uint32_t lbits = 0;
2087
2088 lbits = updatebits;
2089
2090 kq = &kwq->kw_ksynqueues[KSYN_QUEUE_READ];
2091 while ((kq->ksynq_count != 0) &&
2092 (allreaders || (is_seqlower(kq->ksynq_firstnum, limitread) != 0))) {
2093 kret = ksyn_signal(kwq, KSYN_QUEUE_READ, NULL, lbits);
2094 if (kret == KERN_NOT_WAITING) {
2095 failedwakeup++;
2096 }
2097 numwoken++;
2098 }
2099
2100 if (wokenp != NULL)
2101 *wokenp = numwoken;
2102 return(failedwakeup);
2103 }
2104
2105
2106 /*
2107 * This handles the unlock grants for next set on rw_unlock() or on arrival
2108 * of all preposted waiters.
2109 */
2110 int
2111 kwq_handle_unlock(ksyn_wait_queue_t kwq, __unused uint32_t mgen, uint32_t rw_wc,
2112 uint32_t *updatep, int flags, int *blockp, uint32_t premgen)
2113 {
2114 uint32_t low_writer, limitrdnum;
2115 int rwtype, error=0;
2116 int allreaders, nfailed;
2117 uint32_t updatebits=0, numneeded = 0;;
2118 int prepost = flags & KW_UNLOCK_PREPOST;
2119 thread_t preth = THREAD_NULL;
2120 ksyn_waitq_element_t kwe;
2121 uthread_t uth;
2122 thread_t th;
2123 int woken = 0;
2124 int block = 1;
2125 uint32_t lowest[KSYN_QUEUE_MAX]; /* np need for upgrade as it is handled separately */
2126 kern_return_t kret = KERN_SUCCESS;
2127 ksyn_queue_t kq;
2128 int curthreturns = 0;
2129
2130 if (prepost != 0) {
2131 preth = current_thread();
2132 }
2133
2134 kq = &kwq->kw_ksynqueues[KSYN_QUEUE_READ];
2135 kwq->kw_lastseqword = rw_wc;
2136 kwq->kw_lastunlockseq = (rw_wc & PTHRW_COUNT_MASK);
2137 kwq->kw_kflags &= ~KSYN_KWF_OVERLAP_GUARD;
2138
2139 error = kwq_find_rw_lowest(kwq, flags, premgen, &rwtype, lowest);
2140 #if __TESTPANICS__
2141 if (error != 0)
2142 panic("rwunlock: cannot fails to slot next round of threads");
2143 #endif /* __TESTPANICS__ */
2144
2145 low_writer = lowest[KSYN_QUEUE_WRITE];
2146
2147 allreaders = 0;
2148 updatebits = 0;
2149
2150 switch (rwtype & PTH_RW_TYPE_MASK) {
2151 case PTH_RW_TYPE_READ: {
2152 // XXX
2153 /* what about the preflight which is LREAD or READ ?? */
2154 if ((rwtype & PTH_RWSHFT_TYPE_MASK) != 0) {
2155 if (rwtype & PTH_RWSHFT_TYPE_WRITE) {
2156 updatebits |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
2157 }
2158 }
2159 limitrdnum = 0;
2160 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0) {
2161 limitrdnum = low_writer;
2162 } else {
2163 allreaders = 1;
2164 }
2165
2166 numneeded = 0;
2167
2168 if ((rwtype & PTH_RWSHFT_TYPE_WRITE) != 0) {
2169 limitrdnum = low_writer;
2170 numneeded = ksyn_queue_count_tolowest(kq, limitrdnum);
2171 if (((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) && (is_seqlower(premgen, limitrdnum) != 0)) {
2172 curthreturns = 1;
2173 numneeded += 1;
2174 }
2175 } else {
2176 // no writers at all
2177 // no other waiters only readers
2178 kwq->kw_kflags |= KSYN_KWF_OVERLAP_GUARD;
2179 numneeded += kwq->kw_ksynqueues[KSYN_QUEUE_READ].ksynq_count;
2180 if ((flags & KW_UNLOCK_PREPOST_READLOCK) != 0) {
2181 curthreturns = 1;
2182 numneeded += 1;
2183 }
2184 }
2185
2186 updatebits += (numneeded << PTHRW_COUNT_SHIFT);
2187
2188 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
2189
2190 if (curthreturns != 0) {
2191 block = 0;
2192 uth = current_uthread();
2193 kwe = pthread_kern->uthread_get_uukwe(uth);
2194 kwe->kwe_psynchretval = updatebits;
2195 }
2196
2197
2198 nfailed = ksyn_wakeupreaders(kwq, limitrdnum, allreaders,
2199 updatebits, &woken);
2200 if (nfailed != 0) {
2201 _kwq_mark_interruped_wakeup(kwq, KWQ_INTR_READ, nfailed,
2202 limitrdnum, updatebits);
2203 }
2204
2205 error = 0;
2206
2207 if ((kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_count != 0) &&
2208 ((updatebits & PTH_RWL_WBIT) == 0)) {
2209 panic("kwq_handle_unlock: writer pending but no writebit set %x\n", updatebits);
2210 }
2211 }
2212 break;
2213
2214 case PTH_RW_TYPE_WRITE: {
2215
2216 /* only one thread is goin to be granted */
2217 updatebits |= (PTHRW_INC);
2218 updatebits |= PTH_RWL_KBIT| PTH_RWL_EBIT;
2219
2220 if (((flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) && (low_writer == premgen)) {
2221 block = 0;
2222 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_count != 0) {
2223 updatebits |= PTH_RWL_WBIT;
2224 }
2225 th = preth;
2226 uth = pthread_kern->get_bsdthread_info(th);
2227 kwe = pthread_kern->uthread_get_uukwe(uth);
2228 kwe->kwe_psynchretval = updatebits;
2229 } else {
2230 /* we are not granting writelock to the preposting thread */
2231 /* if there are writers present or the preposting write thread then W bit is to be set */
2232 if (kwq->kw_ksynqueues[KSYN_QUEUE_WRITE].ksynq_count > 1 ||
2233 (flags & KW_UNLOCK_PREPOST_WRLOCK) != 0) {
2234 updatebits |= PTH_RWL_WBIT;
2235 }
2236 /* setup next in the queue */
2237 kret = ksyn_signal(kwq, KSYN_QUEUE_WRITE, NULL, updatebits);
2238 if (kret == KERN_NOT_WAITING) {
2239 _kwq_mark_interruped_wakeup(kwq, KWQ_INTR_WRITE, 1,
2240 low_writer, updatebits);
2241 }
2242 error = 0;
2243 }
2244 kwq->kw_nextseqword = (rw_wc & PTHRW_COUNT_MASK) + updatebits;
2245 if ((updatebits & (PTH_RWL_KBIT | PTH_RWL_EBIT)) !=
2246 (PTH_RWL_KBIT | PTH_RWL_EBIT)) {
2247 panic("kwq_handle_unlock: writer lock granted but no ke set %x\n", updatebits);
2248 }
2249 }
2250 break;
2251
2252 default:
2253 panic("rwunlock: invalid type for lock grants");
2254
2255 };
2256
2257 if (updatep != NULL)
2258 *updatep = updatebits;
2259 if (blockp != NULL)
2260 *blockp = block;
2261 return(error);
2262 }
2263
2264 /************* Indiv queue support routines ************************/
2265 void
2266 ksyn_queue_init(ksyn_queue_t kq)
2267 {
2268 TAILQ_INIT(&kq->ksynq_kwelist);
2269 kq->ksynq_count = 0;
2270 kq->ksynq_firstnum = 0;
2271 kq->ksynq_lastnum = 0;
2272 }
2273
2274 int
2275 ksyn_queue_insert(ksyn_wait_queue_t kwq, int kqi, ksyn_waitq_element_t kwe,
2276 uint32_t mgen, int fit)
2277 {
2278 ksyn_queue_t kq = &kwq->kw_ksynqueues[kqi];
2279 uint32_t lockseq = mgen & PTHRW_COUNT_MASK;
2280 int res = 0;
2281
2282 if (kwe->kwe_kwqqueue != NULL) {
2283 panic("adding enqueued item to another queue");
2284 }
2285
2286 if (kq->ksynq_count == 0) {
2287 TAILQ_INSERT_HEAD(&kq->ksynq_kwelist, kwe, kwe_list);
2288 kq->ksynq_firstnum = lockseq;
2289 kq->ksynq_lastnum = lockseq;
2290 } else if (fit == FIRSTFIT) {
2291 /* TBD: if retry bit is set for mutex, add it to the head */
2292 /* firstfit, arriving order */
2293 TAILQ_INSERT_TAIL(&kq->ksynq_kwelist, kwe, kwe_list);
2294 if (is_seqlower(lockseq, kq->ksynq_firstnum)) {
2295 kq->ksynq_firstnum = lockseq;
2296 }
2297 if (is_seqhigher(lockseq, kq->ksynq_lastnum)) {
2298 kq->ksynq_lastnum = lockseq;
2299 }
2300 } else if (lockseq == kq->ksynq_firstnum || lockseq == kq->ksynq_lastnum) {
2301 /* During prepost when a thread is getting cancelled, we could have
2302 * two with same seq */
2303 res = EBUSY;
2304 if (kwe->kwe_state == KWE_THREAD_PREPOST) {
2305 ksyn_waitq_element_t tmp = ksyn_queue_find_seq(kwq, kq, lockseq);
2306 if (tmp != NULL && tmp->kwe_uth != NULL &&
2307 pthread_kern->uthread_is_cancelled(tmp->kwe_uth)) {
2308 TAILQ_INSERT_TAIL(&kq->ksynq_kwelist, kwe, kwe_list);
2309 res = 0;
2310 }
2311 }
2312 } else if (is_seqlower(kq->ksynq_lastnum, lockseq)) { // XXX is_seqhigher
2313 TAILQ_INSERT_TAIL(&kq->ksynq_kwelist, kwe, kwe_list);
2314 kq->ksynq_lastnum = lockseq;
2315 } else if (is_seqlower(lockseq, kq->ksynq_firstnum)) {
2316 TAILQ_INSERT_HEAD(&kq->ksynq_kwelist, kwe, kwe_list);
2317 kq->ksynq_firstnum = lockseq;
2318 } else {
2319 ksyn_waitq_element_t q_kwe, r_kwe;
2320
2321 res = ESRCH;
2322 TAILQ_FOREACH_SAFE(q_kwe, &kq->ksynq_kwelist, kwe_list, r_kwe) {
2323 if (is_seqhigher(q_kwe->kwe_lockseq, lockseq)) {
2324 TAILQ_INSERT_BEFORE(q_kwe, kwe, kwe_list);
2325 res = 0;
2326 break;
2327 }
2328 }
2329 }
2330
2331 if (res == 0) {
2332 kwe->kwe_kwqqueue = kwq;
2333 kq->ksynq_count++;
2334 kwq->kw_inqueue++;
2335 update_low_high(kwq, lockseq);
2336 }
2337 return res;
2338 }
2339
2340 void
2341 ksyn_queue_remove_item(ksyn_wait_queue_t kwq, ksyn_queue_t kq,
2342 ksyn_waitq_element_t kwe)
2343 {
2344 if (kq->ksynq_count == 0) {
2345 panic("removing item from empty queue");
2346 }
2347
2348 if (kwe->kwe_kwqqueue != kwq) {
2349 panic("removing item from wrong queue");
2350 }
2351
2352 TAILQ_REMOVE(&kq->ksynq_kwelist, kwe, kwe_list);
2353 kwe->kwe_list.tqe_next = NULL;
2354 kwe->kwe_list.tqe_prev = NULL;
2355 kwe->kwe_kwqqueue = NULL;
2356
2357 if (--kq->ksynq_count > 0) {
2358 ksyn_waitq_element_t tmp;
2359 tmp = TAILQ_FIRST(&kq->ksynq_kwelist);
2360 kq->ksynq_firstnum = tmp->kwe_lockseq & PTHRW_COUNT_MASK;
2361 tmp = TAILQ_LAST(&kq->ksynq_kwelist, ksynq_kwelist_head);
2362 kq->ksynq_lastnum = tmp->kwe_lockseq & PTHRW_COUNT_MASK;
2363 } else {
2364 kq->ksynq_firstnum = 0;
2365 kq->ksynq_lastnum = 0;
2366 }
2367
2368 if (--kwq->kw_inqueue > 0) {
2369 uint32_t curseq = kwe->kwe_lockseq & PTHRW_COUNT_MASK;
2370 if (kwq->kw_lowseq == curseq) {
2371 kwq->kw_lowseq = find_nextlowseq(kwq);
2372 }
2373 if (kwq->kw_highseq == curseq) {
2374 kwq->kw_highseq = find_nexthighseq(kwq);
2375 }
2376 } else {
2377 kwq->kw_lowseq = 0;
2378 kwq->kw_highseq = 0;
2379 }
2380 }
2381
2382 ksyn_waitq_element_t
2383 ksyn_queue_find_seq(__unused ksyn_wait_queue_t kwq, ksyn_queue_t kq,
2384 uint32_t seq)
2385 {
2386 ksyn_waitq_element_t kwe;
2387
2388 // XXX: should stop searching when higher sequence number is seen
2389 TAILQ_FOREACH(kwe, &kq->ksynq_kwelist, kwe_list) {
2390 if ((kwe->kwe_lockseq & PTHRW_COUNT_MASK) == seq) {
2391 return kwe;
2392 }
2393 }
2394 return NULL;
2395 }
2396
2397 /* find the thread at the target sequence (or a broadcast/prepost at or above) */
2398 ksyn_waitq_element_t
2399 ksyn_queue_find_cvpreposeq(ksyn_queue_t kq, uint32_t cgen)
2400 {
2401 ksyn_waitq_element_t result = NULL;
2402 ksyn_waitq_element_t kwe;
2403 uint32_t lgen = (cgen & PTHRW_COUNT_MASK);
2404
2405 TAILQ_FOREACH(kwe, &kq->ksynq_kwelist, kwe_list) {
2406 if (is_seqhigher_eq(kwe->kwe_lockseq, cgen)) {
2407 result = kwe;
2408
2409 // KWE_THREAD_INWAIT must be strictly equal
2410 if (kwe->kwe_state == KWE_THREAD_INWAIT &&
2411 (kwe->kwe_lockseq & PTHRW_COUNT_MASK) != lgen) {
2412 result = NULL;
2413 }
2414 break;
2415 }
2416 }
2417 return result;
2418 }
2419
2420 /* look for a thread at lockseq, a */
2421 ksyn_waitq_element_t
2422 ksyn_queue_find_signalseq(__unused ksyn_wait_queue_t kwq, ksyn_queue_t kq,
2423 uint32_t uptoseq, uint32_t signalseq)
2424 {
2425 ksyn_waitq_element_t result = NULL;
2426 ksyn_waitq_element_t q_kwe, r_kwe;
2427
2428 // XXX
2429 /* case where wrap in the tail of the queue exists */
2430 TAILQ_FOREACH_SAFE(q_kwe, &kq->ksynq_kwelist, kwe_list, r_kwe) {
2431 if (q_kwe->kwe_state == KWE_THREAD_PREPOST) {
2432 if (is_seqhigher(q_kwe->kwe_lockseq, uptoseq)) {
2433 return result;
2434 }
2435 }
2436 if (q_kwe->kwe_state == KWE_THREAD_PREPOST |
2437 q_kwe->kwe_state == KWE_THREAD_BROADCAST) {
2438 /* match any prepost at our same uptoseq or any broadcast above */
2439 if (is_seqlower(q_kwe->kwe_lockseq, uptoseq)) {
2440 continue;
2441 }
2442 return q_kwe;
2443 } else if (q_kwe->kwe_state == KWE_THREAD_INWAIT) {
2444 /*
2445 * Match any (non-cancelled) thread at or below our upto sequence -
2446 * but prefer an exact match to our signal sequence (if present) to
2447 * keep exact matches happening.
2448 */
2449 if (is_seqhigher(q_kwe->kwe_lockseq, uptoseq)) {
2450 return result;
2451 }
2452 if (q_kwe->kwe_kwqqueue == kwq) {
2453 if (!pthread_kern->uthread_is_cancelled(q_kwe->kwe_uth)) {
2454 /* if equal or higher than our signal sequence, return this one */
2455 if (is_seqhigher_eq(q_kwe->kwe_lockseq, signalseq)) {
2456 return q_kwe;
2457 }
2458
2459 /* otherwise, just remember this eligible thread and move on */
2460 if (result == NULL) {
2461 result = q_kwe;
2462 }
2463 }
2464 }
2465 } else {
2466 panic("ksyn_queue_find_signalseq(): unknown wait queue element type (%d)\n", q_kwe->kwe_state);
2467 }
2468 }
2469 return result;
2470 }
2471
2472 void
2473 ksyn_queue_free_items(ksyn_wait_queue_t kwq, int kqi, uint32_t upto, int all)
2474 {
2475 ksyn_waitq_element_t kwe;
2476 uint32_t tseq = upto & PTHRW_COUNT_MASK;
2477 ksyn_queue_t kq = &kwq->kw_ksynqueues[kqi];
2478 uint32_t freed = 0, signaled = 0;
2479
2480 PTHREAD_TRACE(psynch_cvar_freeitems | DBG_FUNC_START, kwq->kw_addr,
2481 kqi, upto, all);
2482
2483 while ((kwe = TAILQ_FIRST(&kq->ksynq_kwelist)) != NULL) {
2484 if (all == 0 && is_seqhigher(kwe->kwe_lockseq, tseq)) {
2485 break;
2486 }
2487 if (kwe->kwe_state == KWE_THREAD_INWAIT) {
2488 /*
2489 * This scenario is typically noticed when the cvar is
2490 * reinited and the new waiters are waiting. We can
2491 * return them as spurious wait so the cvar state gets
2492 * reset correctly.
2493 */
2494
2495 PTHREAD_TRACE(psynch_cvar_freeitems, kwq->kw_addr, kwe,
2496 kwq->kw_inqueue, 1);
2497
2498 /* skip canceled ones */
2499 /* wake the rest */
2500 /* set M bit to indicate to waking CV to retun Inc val */
2501 (void)ksyn_signal(kwq, kqi, kwe,
2502 PTHRW_INC | PTH_RWS_CV_MBIT | PTH_RWL_MTX_WAIT);
2503 signaled++;
2504 } else {
2505 PTHREAD_TRACE(psynch_cvar_freeitems, kwq->kw_addr, kwe,
2506 kwq->kw_inqueue, 2);
2507 ksyn_queue_remove_item(kwq, kq, kwe);
2508 zfree(kwe_zone, kwe);
2509 kwq->kw_fakecount--;
2510 freed++;
2511 }
2512 }
2513
2514 PTHREAD_TRACE(psynch_cvar_freeitems | DBG_FUNC_END, kwq->kw_addr, freed,
2515 signaled, kwq->kw_inqueue);
2516 }
2517
2518 /*************************************************************************/
2519
2520 void
2521 update_low_high(ksyn_wait_queue_t kwq, uint32_t lockseq)
2522 {
2523 if (kwq->kw_inqueue == 1) {
2524 kwq->kw_lowseq = lockseq;
2525 kwq->kw_highseq = lockseq;
2526 } else {
2527 if (is_seqlower(lockseq, kwq->kw_lowseq)) {
2528 kwq->kw_lowseq = lockseq;
2529 }
2530 if (is_seqhigher(lockseq, kwq->kw_highseq)) {
2531 kwq->kw_highseq = lockseq;
2532 }
2533 }
2534 }
2535
2536 uint32_t
2537 find_nextlowseq(ksyn_wait_queue_t kwq)
2538 {
2539 uint32_t lowest = 0;
2540 int first = 1;
2541 int i;
2542
2543 for (i = 0; i < KSYN_QUEUE_MAX; i++) {
2544 if (kwq->kw_ksynqueues[i].ksynq_count > 0) {
2545 uint32_t current = kwq->kw_ksynqueues[i].ksynq_firstnum;
2546 if (first || is_seqlower(current, lowest)) {
2547 lowest = current;
2548 first = 0;
2549 }
2550 }
2551 }
2552
2553 return lowest;
2554 }
2555
2556 uint32_t
2557 find_nexthighseq(ksyn_wait_queue_t kwq)
2558 {
2559 uint32_t highest = 0;
2560 int first = 1;
2561 int i;
2562
2563 for (i = 0; i < KSYN_QUEUE_MAX; i++) {
2564 if (kwq->kw_ksynqueues[i].ksynq_count > 0) {
2565 uint32_t current = kwq->kw_ksynqueues[i].ksynq_lastnum;
2566 if (first || is_seqhigher(current, highest)) {
2567 highest = current;
2568 first = 0;
2569 }
2570 }
2571 }
2572
2573 return highest;
2574 }
2575
2576 int
2577 find_seq_till(ksyn_wait_queue_t kwq, uint32_t upto, uint32_t nwaiters,
2578 uint32_t *countp)
2579 {
2580 int i;
2581 uint32_t count = 0;
2582
2583 for (i = 0; i< KSYN_QUEUE_MAX; i++) {
2584 count += ksyn_queue_count_tolowest(&kwq->kw_ksynqueues[i], upto);
2585 if (count >= nwaiters) {
2586 break;
2587 }
2588 }
2589
2590 if (countp != NULL) {
2591 *countp = count;
2592 }
2593
2594 if (count == 0) {
2595 return 0;
2596 } else if (count >= nwaiters) {
2597 return 1;
2598 } else {
2599 return 0;
2600 }
2601 }
2602
2603
2604 uint32_t
2605 ksyn_queue_count_tolowest(ksyn_queue_t kq, uint32_t upto)
2606 {
2607 uint32_t i = 0;
2608 ksyn_waitq_element_t kwe, newkwe;
2609
2610 if (kq->ksynq_count == 0 || is_seqhigher(kq->ksynq_firstnum, upto)) {
2611 return 0;
2612 }
2613 if (upto == kq->ksynq_firstnum) {
2614 return 1;
2615 }
2616 TAILQ_FOREACH_SAFE(kwe, &kq->ksynq_kwelist, kwe_list, newkwe) {
2617 uint32_t curval = (kwe->kwe_lockseq & PTHRW_COUNT_MASK);
2618 if (is_seqhigher(curval, upto)) {
2619 break;
2620 }
2621 ++i;
2622 if (upto == curval) {
2623 break;
2624 }
2625 }
2626 return i;
2627 }
2628
2629 /* handles the cond broadcast of cvar and returns number of woken threads and bits for syscall return */
2630 void
2631 ksyn_handle_cvbroad(ksyn_wait_queue_t ckwq, uint32_t upto, uint32_t *updatep)
2632 {
2633 ksyn_waitq_element_t kwe, newkwe;
2634 uint32_t updatebits = 0;
2635 ksyn_queue_t kq = &ckwq->kw_ksynqueues[KSYN_QUEUE_WRITE];
2636
2637 struct ksyn_queue kfreeq;
2638 ksyn_queue_init(&kfreeq);
2639
2640 PTHREAD_TRACE(psynch_cvar_broadcast | DBG_FUNC_START, ckwq->kw_addr, upto,
2641 ckwq->kw_inqueue, 0);
2642
2643 retry:
2644 TAILQ_FOREACH_SAFE(kwe, &kq->ksynq_kwelist, kwe_list, newkwe) {
2645 if (is_seqhigher(kwe->kwe_lockseq, upto)) {
2646 // outside our range
2647 break;
2648 }
2649
2650 if (kwe->kwe_state == KWE_THREAD_INWAIT) {
2651 // Wake only non-canceled threads waiting on this CV.
2652 if (!pthread_kern->uthread_is_cancelled(kwe->kwe_uth)) {
2653 PTHREAD_TRACE(psynch_cvar_broadcast, ckwq->kw_addr, kwe, 0, 1);
2654 (void)ksyn_signal(ckwq, KSYN_QUEUE_WRITE, kwe, PTH_RWL_MTX_WAIT);
2655 updatebits += PTHRW_INC;
2656 }
2657 } else if (kwe->kwe_state == KWE_THREAD_BROADCAST ||
2658 kwe->kwe_state == KWE_THREAD_PREPOST) {
2659 PTHREAD_TRACE(psynch_cvar_broadcast, ckwq->kw_addr, kwe,
2660 kwe->kwe_state, 2);
2661 ksyn_queue_remove_item(ckwq, kq, kwe);
2662 TAILQ_INSERT_TAIL(&kfreeq.ksynq_kwelist, kwe, kwe_list);
2663 ckwq->kw_fakecount--;
2664 } else {
2665 panic("unknown kwe state\n");
2666 }
2667 }
2668
2669 /* Need to enter a broadcast in the queue (if not already at L == S) */
2670
2671 if (diff_genseq(ckwq->kw_lword, ckwq->kw_sword)) {
2672 PTHREAD_TRACE(psynch_cvar_broadcast, ckwq->kw_addr, ckwq->kw_lword,
2673 ckwq->kw_sword, 3);
2674
2675 newkwe = TAILQ_FIRST(&kfreeq.ksynq_kwelist);
2676 if (newkwe == NULL) {
2677 ksyn_wqunlock(ckwq);
2678 newkwe = (ksyn_waitq_element_t)zalloc(kwe_zone);
2679 TAILQ_INSERT_TAIL(&kfreeq.ksynq_kwelist, newkwe, kwe_list);
2680 ksyn_wqlock(ckwq);
2681 goto retry;
2682 } else {
2683 TAILQ_REMOVE(&kfreeq.ksynq_kwelist, newkwe, kwe_list);
2684 ksyn_prepost(ckwq, newkwe, KWE_THREAD_BROADCAST, upto);
2685 PTHREAD_TRACE(psynch_cvar_broadcast, ckwq->kw_addr, newkwe, 0, 4);
2686 }
2687 }
2688
2689 // free up any remaining things stumbled across above
2690 while ((kwe = TAILQ_FIRST(&kfreeq.ksynq_kwelist)) != NULL) {
2691 TAILQ_REMOVE(&kfreeq.ksynq_kwelist, kwe, kwe_list);
2692 zfree(kwe_zone, kwe);
2693 }
2694
2695 PTHREAD_TRACE(psynch_cvar_broadcast | DBG_FUNC_END, ckwq->kw_addr,
2696 updatebits, 0, 0);
2697
2698 if (updatep != NULL) {
2699 *updatep |= updatebits;
2700 }
2701 }
2702
2703 void
2704 ksyn_cvupdate_fixup(ksyn_wait_queue_t ckwq, uint32_t *updatebits)
2705 {
2706 if ((ckwq->kw_lword & PTHRW_COUNT_MASK) == (ckwq->kw_sword & PTHRW_COUNT_MASK)) {
2707 if (ckwq->kw_inqueue != 0) {
2708 /* FREE THE QUEUE */
2709 ksyn_queue_free_items(ckwq, KSYN_QUEUE_WRITE, ckwq->kw_lword, 0);
2710 #if __TESTPANICS__
2711 if (ckwq->kw_inqueue != 0)
2712 panic("ksyn_cvupdate_fixup: L == S, but entries in queue beyond S");
2713 #endif /* __TESTPANICS__ */
2714 }
2715 ckwq->kw_lword = ckwq->kw_uword = ckwq->kw_sword = 0;
2716 ckwq->kw_kflags |= KSYN_KWF_ZEROEDOUT;
2717 *updatebits |= PTH_RWS_CV_CBIT;
2718 } else if (ckwq->kw_inqueue != 0 && ckwq->kw_fakecount == ckwq->kw_inqueue) {
2719 // only fake entries are present in the queue
2720 *updatebits |= PTH_RWS_CV_PBIT;
2721 }
2722 }
2723
2724 void
2725 psynch_zoneinit(void)
2726 {
2727 kwq_zone = zinit(sizeof(struct ksyn_wait_queue),
2728 8192 * sizeof(struct ksyn_wait_queue), 4096, "ksyn_wait_queue");
2729 kwe_zone = zinit(sizeof(struct ksyn_waitq_element),
2730 8192 * sizeof(struct ksyn_waitq_element), 4096, "ksyn_waitq_element");
2731 }
2732
2733 void *
2734 _pthread_get_thread_kwq(thread_t thread)
2735 {
2736 assert(thread);
2737 struct uthread * uthread = pthread_kern->get_bsdthread_info(thread);
2738 assert(uthread);
2739 ksyn_waitq_element_t kwe = pthread_kern->uthread_get_uukwe(uthread);
2740 assert(kwe);
2741 ksyn_wait_queue_t kwq = kwe->kwe_kwqqueue;
2742 return kwq;
2743 }
2744
2745 /* This function is used by stackshot to determine why a thread is blocked, and report
2746 * who owns the object that the thread is blocked on. It should *only* be called if the
2747 * `block_hint' field in the relevant thread's struct is populated with something related
2748 * to pthread sync objects.
2749 */
2750 void
2751 _pthread_find_owner(thread_t thread,
2752 struct stackshot_thread_waitinfo * waitinfo)
2753 {
2754 ksyn_wait_queue_t kwq = _pthread_get_thread_kwq(thread);
2755 switch (waitinfo->wait_type) {
2756 case kThreadWaitPThreadMutex:
2757 assert((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_MTX);
2758 waitinfo->owner = thread_tid(kwq->kw_owner);
2759 waitinfo->context = kwq->kw_addr;
2760 break;
2761 /* Owner of rwlock not stored in kernel space due to races. Punt
2762 * and hope that the userspace address is helpful enough. */
2763 case kThreadWaitPThreadRWLockRead:
2764 case kThreadWaitPThreadRWLockWrite:
2765 assert((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_RWLOCK);
2766 waitinfo->owner = 0;
2767 waitinfo->context = kwq->kw_addr;
2768 break;
2769 /* Condvars don't have owners, so just give the userspace address. */
2770 case kThreadWaitPThreadCondVar:
2771 assert((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR);
2772 waitinfo->owner = 0;
2773 waitinfo->context = kwq->kw_addr;
2774 break;
2775 case kThreadWaitNone:
2776 default:
2777 waitinfo->owner = 0;
2778 waitinfo->context = 0;
2779 break;
2780 }
2781 }