]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sync_lock.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / kern / sync_lock.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b
A
29 */
30/*
31 * @OSF_COPYRIGHT@
32 *
33 */
34/*
35 * File: kern/sync_lock.c
36 * Author: Joseph CaraDonna
37 *
38 * Contains RT distributed lock synchronization services.
39 */
40
91447636
A
41#include <mach/mach_types.h>
42#include <mach/lock_set_server.h>
43#include <mach/task_server.h>
44
1c79356b 45#include <kern/misc_protos.h>
91447636 46#include <kern/kalloc.h>
1c79356b
A
47#include <kern/sync_lock.h>
48#include <kern/sched_prim.h>
49#include <kern/ipc_kobject.h>
50#include <kern/ipc_sync.h>
1c79356b
A
51#include <kern/thread.h>
52#include <kern/task.h>
53
54#include <ipc/ipc_port.h>
55#include <ipc/ipc_space.h>
56
57/*
58 * Ulock ownership MACROS
59 *
60 * Assumes: ulock internal lock is held
61 */
62
63#define ulock_ownership_set(ul, th) \
64 MACRO_BEGIN \
91447636
A
65 thread_mtx_lock(th); \
66 enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \
67 thread_mtx_unlock(th); \
68 (ul)->holder = th; \
1c79356b
A
69 MACRO_END
70
71#define ulock_ownership_clear(ul) \
72 MACRO_BEGIN \
91447636
A
73 thread_t th; \
74 th = (ul)->holder; \
75 if (th->active) { \
76 thread_mtx_lock(th); \
77 remqueue(&th->held_ulocks, \
1c79356b 78 (queue_entry_t) (ul)); \
91447636 79 thread_mtx_unlock(th); \
1c79356b 80 } else { \
91447636 81 remqueue(&th->held_ulocks, \
1c79356b
A
82 (queue_entry_t) (ul)); \
83 } \
91447636 84 (ul)->holder = THREAD_NULL; \
1c79356b
A
85 MACRO_END
86
87/*
88 * Lock set ownership MACROS
89 */
90
91#define lock_set_ownership_set(ls, t) \
92 MACRO_BEGIN \
93 task_lock((t)); \
94 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
95 (t)->lock_sets_owned++; \
96 task_unlock((t)); \
97 (ls)->owner = (t); \
98 MACRO_END
99
100#define lock_set_ownership_clear(ls, t) \
101 MACRO_BEGIN \
102 task_lock((t)); \
103 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
104 (t)->lock_sets_owned--; \
105 task_unlock((t)); \
106 MACRO_END
107
108unsigned int lock_set_event;
9bccf70c 109#define LOCK_SET_EVENT ((event64_t)&lock_set_event)
1c79356b
A
110
111unsigned int lock_set_handoff;
9bccf70c 112#define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
1c79356b
A
113
114/*
115 * ROUTINE: lock_set_init [private]
116 *
117 * Initialize the lock_set subsystem.
118 *
119 * For now, we don't have anything to do here.
120 */
121void
122lock_set_init(void)
123{
124 return;
125}
126
127
128/*
129 * ROUTINE: lock_set_create [exported]
130 *
131 * Creates a lock set.
132 * The port representing the lock set is returned as a parameter.
133 */
134kern_return_t
135lock_set_create (
136 task_t task,
137 lock_set_t *new_lock_set,
138 int n_ulocks,
139 int policy)
140{
141 lock_set_t lock_set = LOCK_SET_NULL;
142 ulock_t ulock;
8ad349bb 143 vm_size_t size;
1c79356b
A
144 int x;
145
146 *new_lock_set = LOCK_SET_NULL;
147
148 if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
149 return KERN_INVALID_ARGUMENT;
150
8ad349bb
A
151 if (VM_MAX_ADDRESS/sizeof(struct ulock) - sizeof(struct lock_set) < (unsigned)n_ulocks)
152 return KERN_RESOURCE_SHORTAGE;
153
1c79356b
A
154 size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
155 lock_set = (lock_set_t) kalloc (size);
156
157 if (lock_set == LOCK_SET_NULL)
158 return KERN_RESOURCE_SHORTAGE;
159
160
161 lock_set_lock_init(lock_set);
162 lock_set->n_ulocks = n_ulocks;
163 lock_set->ref_count = 1;
164
165 /*
166 * Create and initialize the lock set port
167 */
168 lock_set->port = ipc_port_alloc_kernel();
169 if (lock_set->port == IP_NULL) {
170 /* This will deallocate the lock set */
171 lock_set_dereference(lock_set);
172 return KERN_RESOURCE_SHORTAGE;
173 }
174
175 ipc_kobject_set (lock_set->port,
176 (ipc_kobject_t) lock_set,
177 IKOT_LOCK_SET);
178
179 /*
180 * Initialize each ulock in the lock set
181 */
182
183 for (x=0; x < n_ulocks; x++) {
184 ulock = (ulock_t) &lock_set->ulock_list[x];
185 ulock_lock_init(ulock);
186 ulock->lock_set = lock_set;
91447636 187 ulock->holder = THREAD_NULL;
1c79356b
A
188 ulock->blocked = FALSE;
189 ulock->unstable = FALSE;
190 ulock->ho_wait = FALSE;
191 wait_queue_init(&ulock->wait_queue, policy);
192 }
193
194 lock_set_ownership_set(lock_set, task);
195
196 lock_set->active = TRUE;
197 *new_lock_set = lock_set;
198
199 return KERN_SUCCESS;
200}
201
202/*
203 * ROUTINE: lock_set_destroy [exported]
204 *
205 * Destroys a lock set. This call will only succeed if the
206 * specified task is the SAME task name specified at the lock set's
207 * creation.
208 *
209 * NOTES:
210 * - All threads currently blocked on the lock set's ulocks are awoken.
211 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
212 */
213kern_return_t
214lock_set_destroy (task_t task, lock_set_t lock_set)
215{
1c79356b
A
216 ulock_t ulock;
217 int i;
218
219 if (task == TASK_NULL || lock_set == LOCK_SET_NULL)
220 return KERN_INVALID_ARGUMENT;
221
222 if (lock_set->owner != task)
223 return KERN_INVALID_RIGHT;
224
225 lock_set_lock(lock_set);
226 if (!lock_set->active) {
227 lock_set_unlock(lock_set);
228 return KERN_LOCK_SET_DESTROYED;
229 }
230
231 /*
232 * Deactivate lock set
233 */
234 lock_set->active = FALSE;
235
236 /*
237 * If a ulock is currently held in the target lock set:
238 *
239 * 1) Wakeup all threads blocked on the ulock (if any). Threads
240 * may be blocked waiting normally, or waiting for a handoff.
241 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
242 *
243 * 2) ulock ownership is cleared.
244 * The thread currently holding the ulock is revoked of its
245 * ownership.
246 */
247 for (i = 0; i < lock_set->n_ulocks; i++) {
248 ulock = &lock_set->ulock_list[i];
249
250 ulock_lock(ulock);
251
252 if (ulock->accept_wait) {
253 ulock->accept_wait = FALSE;
9bccf70c 254 wait_queue_wakeup64_one(&ulock->wait_queue,
1c79356b
A
255 LOCK_SET_HANDOFF,
256 THREAD_RESTART);
257 }
258
259 if (ulock->holder) {
260 if (ulock->blocked) {
261 ulock->blocked = FALSE;
9bccf70c 262 wait_queue_wakeup64_all(&ulock->wait_queue,
1c79356b
A
263 LOCK_SET_EVENT,
264 THREAD_RESTART);
265 }
266 if (ulock->ho_wait) {
267 ulock->ho_wait = FALSE;
9bccf70c 268 wait_queue_wakeup64_one(&ulock->wait_queue,
1c79356b
A
269 LOCK_SET_HANDOFF,
270 THREAD_RESTART);
271 }
272 ulock_ownership_clear(ulock);
273 }
274
275 ulock_unlock(ulock);
276 }
277
278 lock_set_unlock(lock_set);
279 lock_set_ownership_clear(lock_set, task);
280
281 /*
282 * Deallocate
283 *
284 * Drop the lock set reference, which inturn destroys the
285 * lock set structure if the reference count goes to zero.
286 */
287
288 ipc_port_dealloc_kernel(lock_set->port);
289 lock_set_dereference(lock_set);
290
291 return KERN_SUCCESS;
292}
293
294kern_return_t
295lock_acquire (lock_set_t lock_set, int lock_id)
296{
297 ulock_t ulock;
298
299 if (lock_set == LOCK_SET_NULL)
300 return KERN_INVALID_ARGUMENT;
301
302 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
303 return KERN_INVALID_ARGUMENT;
304
305 retry:
306 lock_set_lock(lock_set);
307 if (!lock_set->active) {
308 lock_set_unlock(lock_set);
309 return KERN_LOCK_SET_DESTROYED;
310 }
311
312 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
313 ulock_lock(ulock);
314 lock_set_unlock(lock_set);
315
316 /*
317 * Block the current thread if the lock is already held.
318 */
319
91447636 320 if (ulock->holder != THREAD_NULL) {
1c79356b
A
321 int wait_result;
322
91447636 323 if (ulock->holder == current_thread()) {
1c79356b
A
324 ulock_unlock(ulock);
325 return KERN_LOCK_OWNED_SELF;
326 }
327
328 ulock->blocked = TRUE;
9bccf70c 329 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
1c79356b 330 LOCK_SET_EVENT,
91447636 331 THREAD_ABORTSAFE, 0);
1c79356b
A
332 ulock_unlock(ulock);
333
334 /*
335 * Block - Wait for lock to become available.
336 */
9bccf70c
A
337 if (wait_result == THREAD_WAITING)
338 wait_result = thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
339
340 /*
341 * Check the result status:
342 *
343 * Check to see why thread was woken up. In all cases, we
344 * already have been removed from the queue.
345 */
346 switch (wait_result) {
347 case THREAD_AWAKENED:
348 /* lock transitioned from old locker to us */
349 /* he already made us owner */
350 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
351 KERN_SUCCESS;
352
353 case THREAD_INTERRUPTED:
354 return KERN_ABORTED;
355
356 case THREAD_RESTART:
357 goto retry; /* probably a dead lock_set */
358
359 default:
360 panic("lock_acquire\n");
361 }
362 }
363
364 /*
365 * Assign lock ownership
366 */
367 ulock_ownership_set(ulock, current_thread());
368 ulock_unlock(ulock);
369
370 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
371}
372
373kern_return_t
374lock_release (lock_set_t lock_set, int lock_id)
375{
376 ulock_t ulock;
377
378 if (lock_set == LOCK_SET_NULL)
379 return KERN_INVALID_ARGUMENT;
380
381 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
382 return KERN_INVALID_ARGUMENT;
383
384 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
385
91447636 386 return (ulock_release_internal(ulock, current_thread()));
1c79356b
A
387}
388
389kern_return_t
390lock_try (lock_set_t lock_set, int lock_id)
391{
392 ulock_t ulock;
393
394
395 if (lock_set == LOCK_SET_NULL)
396 return KERN_INVALID_ARGUMENT;
397
398 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
399 return KERN_INVALID_ARGUMENT;
400
401
402 lock_set_lock(lock_set);
403 if (!lock_set->active) {
404 lock_set_unlock(lock_set);
405 return KERN_LOCK_SET_DESTROYED;
406 }
407
408 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
409 ulock_lock(ulock);
410 lock_set_unlock(lock_set);
411
412 /*
413 * If the lock is already owned, we return without blocking.
414 *
415 * An ownership status is returned to inform the caller as to
416 * whether it already holds the lock or another thread does.
417 */
418
91447636 419 if (ulock->holder != THREAD_NULL) {
1c79356b
A
420 lock_set_unlock(lock_set);
421
91447636 422 if (ulock->holder == current_thread()) {
1c79356b
A
423 ulock_unlock(ulock);
424 return KERN_LOCK_OWNED_SELF;
425 }
426
427 ulock_unlock(ulock);
428 return KERN_LOCK_OWNED;
429 }
430
431 /*
432 * Add the ulock to the lock set's held_ulocks list.
433 */
434
435 ulock_ownership_set(ulock, current_thread());
436 ulock_unlock(ulock);
437
438 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
439}
440
441kern_return_t
442lock_make_stable (lock_set_t lock_set, int lock_id)
443{
444 ulock_t ulock;
445
446
447 if (lock_set == LOCK_SET_NULL)
448 return KERN_INVALID_ARGUMENT;
449
450 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
451 return KERN_INVALID_ARGUMENT;
452
453
454 lock_set_lock(lock_set);
455 if (!lock_set->active) {
456 lock_set_unlock(lock_set);
457 return KERN_LOCK_SET_DESTROYED;
458 }
459
460 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
461 ulock_lock(ulock);
462 lock_set_unlock(lock_set);
463
91447636 464 if (ulock->holder != current_thread()) {
1c79356b
A
465 ulock_unlock(ulock);
466 return KERN_INVALID_RIGHT;
467 }
468
469 ulock->unstable = FALSE;
470 ulock_unlock(ulock);
471
472 return KERN_SUCCESS;
473}
474
475/*
476 * ROUTINE: lock_make_unstable [internal]
477 *
478 * Marks the lock as unstable.
479 *
480 * NOTES:
481 * - All future acquisitions of the lock will return with a
482 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
483 */
484kern_return_t
91447636 485lock_make_unstable (ulock_t ulock, thread_t thread)
1c79356b
A
486{
487 lock_set_t lock_set;
488
1c79356b
A
489 lock_set = ulock->lock_set;
490 lock_set_lock(lock_set);
491 if (!lock_set->active) {
492 lock_set_unlock(lock_set);
493 return KERN_LOCK_SET_DESTROYED;
494 }
495
496 ulock_lock(ulock);
497 lock_set_unlock(lock_set);
498
91447636 499 if (ulock->holder != thread) {
1c79356b
A
500 ulock_unlock(ulock);
501 return KERN_INVALID_RIGHT;
502 }
503
504 ulock->unstable = TRUE;
505 ulock_unlock(ulock);
506
507 return KERN_SUCCESS;
508}
509
510/*
91447636 511 * ROUTINE: ulock_release_internal [internal]
1c79356b
A
512 *
513 * Releases the ulock.
514 * If any threads are blocked waiting for the ulock, one is woken-up.
515 *
516 */
517kern_return_t
91447636 518ulock_release_internal (ulock_t ulock, thread_t thread)
1c79356b
A
519{
520 lock_set_t lock_set;
1c79356b
A
521
522 if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
523 return KERN_INVALID_ARGUMENT;
524
525 lock_set_lock(lock_set);
526 if (!lock_set->active) {
527 lock_set_unlock(lock_set);
528 return KERN_LOCK_SET_DESTROYED;
529 }
530 ulock_lock(ulock);
531 lock_set_unlock(lock_set);
532
91447636 533 if (ulock->holder != thread) {
1c79356b 534 ulock_unlock(ulock);
1c79356b
A
535 return KERN_INVALID_RIGHT;
536 }
537
538 /*
539 * If we have a hint that threads might be waiting,
540 * try to transfer the lock ownership to a waiting thread
541 * and wake it up.
542 */
543 if (ulock->blocked) {
544 wait_queue_t wq = &ulock->wait_queue;
91447636 545 thread_t wqthread;
1c79356b
A
546 spl_t s;
547
548 s = splsched();
549 wait_queue_lock(wq);
91447636 550 wqthread = wait_queue_wakeup64_identity_locked(wq,
1c79356b
A
551 LOCK_SET_EVENT,
552 THREAD_AWAKENED,
553 TRUE);
554 /* wait_queue now unlocked, thread locked */
555
91447636 556 if (wqthread != THREAD_NULL) {
1c79356b
A
557 /*
558 * JMM - These ownership transfer macros have a
559 * locking/race problem. To keep the thread from
560 * changing states on us (nullifying the ownership
561 * assignment) we need to keep the thread locked
562 * during the assignment. But we can't because the
563 * macros take an activation lock, which is a mutex.
564 * Since this code was already broken before I got
565 * here, I will leave it for now.
566 */
91447636 567 thread_unlock(wqthread);
1c79356b
A
568 splx(s);
569
570 /*
571 * Transfer ulock ownership
572 * from the current thread to the acquisition thread.
573 */
574 ulock_ownership_clear(ulock);
91447636 575 ulock_ownership_set(ulock, wqthread);
1c79356b
A
576 ulock_unlock(ulock);
577
578 return KERN_SUCCESS;
579 } else {
580 ulock->blocked = FALSE;
581 splx(s);
582 }
583 }
584
585 /*
586 * Disown ulock
587 */
588 ulock_ownership_clear(ulock);
589 ulock_unlock(ulock);
590
591 return KERN_SUCCESS;
592}
593
594kern_return_t
595lock_handoff (lock_set_t lock_set, int lock_id)
596{
597 ulock_t ulock;
598 int wait_result;
599
600
601 if (lock_set == LOCK_SET_NULL)
602 return KERN_INVALID_ARGUMENT;
603
604 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
605 return KERN_INVALID_ARGUMENT;
606
607 retry:
608 lock_set_lock(lock_set);
609
610 if (!lock_set->active) {
611 lock_set_unlock(lock_set);
612 return KERN_LOCK_SET_DESTROYED;
613 }
614
615 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
616 ulock_lock(ulock);
617 lock_set_unlock(lock_set);
618
91447636 619 if (ulock->holder != current_thread()) {
1c79356b 620 ulock_unlock(ulock);
1c79356b
A
621 return KERN_INVALID_RIGHT;
622 }
623
624 /*
625 * If the accepting thread (the receiver) is already waiting
626 * to accept the lock from the handoff thread (the sender),
627 * then perform the hand-off now.
628 */
629
630 if (ulock->accept_wait) {
631 wait_queue_t wq = &ulock->wait_queue;
632 thread_t thread;
633 spl_t s;
634
635 /*
636 * See who the lucky devil is, if he is still there waiting.
637 */
638 s = splsched();
639 wait_queue_lock(wq);
9bccf70c 640 thread = wait_queue_wakeup64_identity_locked(
1c79356b
A
641 wq,
642 LOCK_SET_HANDOFF,
643 THREAD_AWAKENED,
644 TRUE);
645 /* wait queue unlocked, thread locked */
646
647 /*
648 * Transfer lock ownership
649 */
650 if (thread != THREAD_NULL) {
651 /*
652 * JMM - These ownership transfer macros have a
653 * locking/race problem. To keep the thread from
654 * changing states on us (nullifying the ownership
655 * assignment) we need to keep the thread locked
656 * during the assignment. But we can't because the
91447636
A
657 * macros take a thread mutex lock.
658 *
1c79356b
A
659 * Since this code was already broken before I got
660 * here, I will leave it for now.
661 */
662 thread_unlock(thread);
663 splx(s);
664
665 ulock_ownership_clear(ulock);
666 ulock_ownership_set(ulock, thread);
667 ulock->accept_wait = FALSE;
668 ulock_unlock(ulock);
669 return KERN_SUCCESS;
670 } else {
671
672 /*
673 * OOPS. The accepting thread must have been aborted.
674 * and is racing back to clear the flag that says is
675 * waiting for an accept. He will clear it when we
676 * release the lock, so just fall thru and wait for
677 * the next accept thread (that's the way it is
678 * specified).
679 */
680 splx(s);
681 }
682 }
683
684 /*
685 * Indicate that there is a hand-off thread waiting, and then wait
686 * for an accepting thread.
687 */
688 ulock->ho_wait = TRUE;
9bccf70c 689 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
1c79356b 690 LOCK_SET_HANDOFF,
91447636 691 THREAD_ABORTSAFE, 0);
1c79356b
A
692 ulock_unlock(ulock);
693
9bccf70c
A
694 if (wait_result == THREAD_WAITING)
695 wait_result = thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
696
697 /*
698 * If the thread was woken-up via some action other than
699 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
700 * then we need to clear the ulock's handoff state.
701 */
702 switch (wait_result) {
703
704 case THREAD_AWAKENED:
705 return KERN_SUCCESS;
706
707 case THREAD_INTERRUPTED:
708 ulock_lock(ulock);
91447636 709 assert(ulock->holder == current_thread());
1c79356b
A
710 ulock->ho_wait = FALSE;
711 ulock_unlock(ulock);
712 return KERN_ABORTED;
713
714 case THREAD_RESTART:
715 goto retry;
1c79356b 716 }
91447636
A
717
718 panic("lock_handoff");
719 return KERN_FAILURE;
1c79356b
A
720}
721
722kern_return_t
723lock_handoff_accept (lock_set_t lock_set, int lock_id)
724{
725 ulock_t ulock;
726 int wait_result;
727
728
729 if (lock_set == LOCK_SET_NULL)
730 return KERN_INVALID_ARGUMENT;
731
732 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
733 return KERN_INVALID_ARGUMENT;
734
735 retry:
736 lock_set_lock(lock_set);
737 if (!lock_set->active) {
738 lock_set_unlock(lock_set);
739 return KERN_LOCK_SET_DESTROYED;
740 }
741
742 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
743 ulock_lock(ulock);
744 lock_set_unlock(lock_set);
745
746 /*
747 * If there is another accepting thread that beat us, just
748 * return with an error.
749 */
750 if (ulock->accept_wait) {
751 ulock_unlock(ulock);
752 return KERN_ALREADY_WAITING;
753 }
754
91447636 755 if (ulock->holder == current_thread()) {
1c79356b
A
756 ulock_unlock(ulock);
757 return KERN_LOCK_OWNED_SELF;
758 }
759
760 /*
761 * If the handoff thread (the sender) is already waiting to
762 * hand-off the lock to the accepting thread (the receiver),
763 * then perform the hand-off now.
764 */
765 if (ulock->ho_wait) {
766 wait_queue_t wq = &ulock->wait_queue;
1c79356b
A
767
768 /*
769 * See who the lucky devil is, if he is still there waiting.
770 */
91447636 771 assert(ulock->holder != THREAD_NULL);
1c79356b 772
9bccf70c 773 if (wait_queue_wakeup64_thread(wq,
1c79356b 774 LOCK_SET_HANDOFF,
91447636 775 ulock->holder,
1c79356b
A
776 THREAD_AWAKENED) == KERN_SUCCESS) {
777 /*
778 * Holder thread was still waiting to give it
779 * away. Take over ownership.
780 */
781 ulock_ownership_clear(ulock);
782 ulock_ownership_set(ulock, current_thread());
783 ulock->ho_wait = FALSE;
784 ulock_unlock(ulock);
785 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
786 KERN_SUCCESS;
787 }
788
789 /*
790 * OOPS. The owner was aborted out of the handoff.
791 * He will clear his own flag when he gets back.
792 * in the meantime, we will wait as if we didn't
793 * even see his flag (by falling thru).
794 */
795 }
796
797 ulock->accept_wait = TRUE;
9bccf70c 798 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
1c79356b 799 LOCK_SET_HANDOFF,
91447636 800 THREAD_ABORTSAFE, 0);
1c79356b
A
801 ulock_unlock(ulock);
802
9bccf70c
A
803 if (wait_result == THREAD_WAITING)
804 wait_result = thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
805
806 /*
807 * If the thread was woken-up via some action other than
808 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
809 * then we need to clear the ulock's handoff state.
810 */
811 switch (wait_result) {
812
813 case THREAD_AWAKENED:
814 return KERN_SUCCESS;
815
816 case THREAD_INTERRUPTED:
817 ulock_lock(ulock);
818 ulock->accept_wait = FALSE;
819 ulock_unlock(ulock);
820 return KERN_ABORTED;
821
822 case THREAD_RESTART:
823 goto retry;
1c79356b 824 }
91447636
A
825
826 panic("lock_handoff_accept");
827 return KERN_FAILURE;
1c79356b
A
828}
829
830/*
831 * Routine: lock_set_reference
832 *
833 * Take out a reference on a lock set. This keeps the data structure
834 * in existence (but the lock set may be deactivated).
835 */
836void
837lock_set_reference(lock_set_t lock_set)
838{
839 lock_set_lock(lock_set);
840 lock_set->ref_count++;
841 lock_set_unlock(lock_set);
842}
843
844/*
845 * Routine: lock_set_dereference
846 *
847 * Release a reference on a lock set. If this is the last reference,
848 * the lock set data structure is deallocated.
849 */
850void
851lock_set_dereference(lock_set_t lock_set)
852{
853 int ref_count;
854 int size;
855
856 lock_set_lock(lock_set);
857 ref_count = --(lock_set->ref_count);
858 lock_set_unlock(lock_set);
859
860 if (ref_count == 0) {
861 size = sizeof(struct lock_set) +
862 (sizeof(struct ulock) * (lock_set->n_ulocks - 1));
91447636
A
863 kfree(lock_set, size);
864 }
865}
866
867void
868ulock_release_all(
869 thread_t thread)
870{
871 ulock_t ulock;
872
873 while (!queue_empty(&thread->held_ulocks)) {
874 ulock = (ulock_t)queue_first(&thread->held_ulocks);
875 lock_make_unstable(ulock, thread);
876 ulock_release_internal(ulock, thread);
1c79356b
A
877 }
878}