]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sync_lock.c
xnu-792.24.17.tar.gz
[apple/xnu.git] / osfmk / kern / sync_lock.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
6601e61a 4 * @APPLE_LICENSE_HEADER_START@
1c79356b 5 *
6601e61a
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 11 *
6601e61a
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
8f6c56a5 19 *
6601e61a 20 * @APPLE_LICENSE_HEADER_END@
1c79356b
A
21 */
22/*
23 * @OSF_COPYRIGHT@
24 *
25 */
26/*
27 * File: kern/sync_lock.c
28 * Author: Joseph CaraDonna
29 *
30 * Contains RT distributed lock synchronization services.
31 */
32
91447636
A
33#include <mach/mach_types.h>
34#include <mach/lock_set_server.h>
35#include <mach/task_server.h>
36
1c79356b 37#include <kern/misc_protos.h>
91447636 38#include <kern/kalloc.h>
1c79356b
A
39#include <kern/sync_lock.h>
40#include <kern/sched_prim.h>
41#include <kern/ipc_kobject.h>
42#include <kern/ipc_sync.h>
1c79356b
A
43#include <kern/thread.h>
44#include <kern/task.h>
45
46#include <ipc/ipc_port.h>
47#include <ipc/ipc_space.h>
48
49/*
50 * Ulock ownership MACROS
51 *
52 * Assumes: ulock internal lock is held
53 */
54
55#define ulock_ownership_set(ul, th) \
56 MACRO_BEGIN \
91447636
A
57 thread_mtx_lock(th); \
58 enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \
59 thread_mtx_unlock(th); \
60 (ul)->holder = th; \
1c79356b
A
61 MACRO_END
62
63#define ulock_ownership_clear(ul) \
64 MACRO_BEGIN \
91447636
A
65 thread_t th; \
66 th = (ul)->holder; \
67 if (th->active) { \
68 thread_mtx_lock(th); \
69 remqueue(&th->held_ulocks, \
1c79356b 70 (queue_entry_t) (ul)); \
91447636 71 thread_mtx_unlock(th); \
1c79356b 72 } else { \
91447636 73 remqueue(&th->held_ulocks, \
1c79356b
A
74 (queue_entry_t) (ul)); \
75 } \
91447636 76 (ul)->holder = THREAD_NULL; \
1c79356b
A
77 MACRO_END
78
79/*
80 * Lock set ownership MACROS
81 */
82
83#define lock_set_ownership_set(ls, t) \
84 MACRO_BEGIN \
85 task_lock((t)); \
86 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
87 (t)->lock_sets_owned++; \
88 task_unlock((t)); \
89 (ls)->owner = (t); \
90 MACRO_END
91
92#define lock_set_ownership_clear(ls, t) \
93 MACRO_BEGIN \
94 task_lock((t)); \
95 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
96 (t)->lock_sets_owned--; \
97 task_unlock((t)); \
98 MACRO_END
99
100unsigned int lock_set_event;
9bccf70c 101#define LOCK_SET_EVENT ((event64_t)&lock_set_event)
1c79356b
A
102
103unsigned int lock_set_handoff;
9bccf70c 104#define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
1c79356b
A
105
106/*
107 * ROUTINE: lock_set_init [private]
108 *
109 * Initialize the lock_set subsystem.
110 *
111 * For now, we don't have anything to do here.
112 */
113void
114lock_set_init(void)
115{
116 return;
117}
118
119
120/*
121 * ROUTINE: lock_set_create [exported]
122 *
123 * Creates a lock set.
124 * The port representing the lock set is returned as a parameter.
125 */
126kern_return_t
127lock_set_create (
128 task_t task,
129 lock_set_t *new_lock_set,
130 int n_ulocks,
131 int policy)
132{
133 lock_set_t lock_set = LOCK_SET_NULL;
134 ulock_t ulock;
8ad349bb 135 vm_size_t size;
1c79356b
A
136 int x;
137
138 *new_lock_set = LOCK_SET_NULL;
139
140 if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
141 return KERN_INVALID_ARGUMENT;
142
8ad349bb
A
143 if (VM_MAX_ADDRESS/sizeof(struct ulock) - sizeof(struct lock_set) < (unsigned)n_ulocks)
144 return KERN_RESOURCE_SHORTAGE;
145
1c79356b
A
146 size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
147 lock_set = (lock_set_t) kalloc (size);
148
149 if (lock_set == LOCK_SET_NULL)
150 return KERN_RESOURCE_SHORTAGE;
151
152
153 lock_set_lock_init(lock_set);
154 lock_set->n_ulocks = n_ulocks;
155 lock_set->ref_count = 1;
156
157 /*
158 * Create and initialize the lock set port
159 */
160 lock_set->port = ipc_port_alloc_kernel();
161 if (lock_set->port == IP_NULL) {
162 /* This will deallocate the lock set */
163 lock_set_dereference(lock_set);
164 return KERN_RESOURCE_SHORTAGE;
165 }
166
167 ipc_kobject_set (lock_set->port,
168 (ipc_kobject_t) lock_set,
169 IKOT_LOCK_SET);
170
171 /*
172 * Initialize each ulock in the lock set
173 */
174
175 for (x=0; x < n_ulocks; x++) {
176 ulock = (ulock_t) &lock_set->ulock_list[x];
177 ulock_lock_init(ulock);
178 ulock->lock_set = lock_set;
91447636 179 ulock->holder = THREAD_NULL;
1c79356b
A
180 ulock->blocked = FALSE;
181 ulock->unstable = FALSE;
182 ulock->ho_wait = FALSE;
183 wait_queue_init(&ulock->wait_queue, policy);
184 }
185
186 lock_set_ownership_set(lock_set, task);
187
188 lock_set->active = TRUE;
189 *new_lock_set = lock_set;
190
191 return KERN_SUCCESS;
192}
193
194/*
195 * ROUTINE: lock_set_destroy [exported]
196 *
197 * Destroys a lock set. This call will only succeed if the
198 * specified task is the SAME task name specified at the lock set's
199 * creation.
200 *
201 * NOTES:
202 * - All threads currently blocked on the lock set's ulocks are awoken.
203 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
204 */
205kern_return_t
206lock_set_destroy (task_t task, lock_set_t lock_set)
207{
1c79356b
A
208 ulock_t ulock;
209 int i;
210
211 if (task == TASK_NULL || lock_set == LOCK_SET_NULL)
212 return KERN_INVALID_ARGUMENT;
213
214 if (lock_set->owner != task)
215 return KERN_INVALID_RIGHT;
216
217 lock_set_lock(lock_set);
218 if (!lock_set->active) {
219 lock_set_unlock(lock_set);
220 return KERN_LOCK_SET_DESTROYED;
221 }
222
223 /*
224 * Deactivate lock set
225 */
226 lock_set->active = FALSE;
227
228 /*
229 * If a ulock is currently held in the target lock set:
230 *
231 * 1) Wakeup all threads blocked on the ulock (if any). Threads
232 * may be blocked waiting normally, or waiting for a handoff.
233 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
234 *
235 * 2) ulock ownership is cleared.
236 * The thread currently holding the ulock is revoked of its
237 * ownership.
238 */
239 for (i = 0; i < lock_set->n_ulocks; i++) {
240 ulock = &lock_set->ulock_list[i];
241
242 ulock_lock(ulock);
243
244 if (ulock->accept_wait) {
245 ulock->accept_wait = FALSE;
9bccf70c 246 wait_queue_wakeup64_one(&ulock->wait_queue,
1c79356b
A
247 LOCK_SET_HANDOFF,
248 THREAD_RESTART);
249 }
250
251 if (ulock->holder) {
252 if (ulock->blocked) {
253 ulock->blocked = FALSE;
9bccf70c 254 wait_queue_wakeup64_all(&ulock->wait_queue,
1c79356b
A
255 LOCK_SET_EVENT,
256 THREAD_RESTART);
257 }
258 if (ulock->ho_wait) {
259 ulock->ho_wait = FALSE;
9bccf70c 260 wait_queue_wakeup64_one(&ulock->wait_queue,
1c79356b
A
261 LOCK_SET_HANDOFF,
262 THREAD_RESTART);
263 }
264 ulock_ownership_clear(ulock);
265 }
266
267 ulock_unlock(ulock);
268 }
269
270 lock_set_unlock(lock_set);
271 lock_set_ownership_clear(lock_set, task);
272
273 /*
274 * Deallocate
275 *
276 * Drop the lock set reference, which inturn destroys the
277 * lock set structure if the reference count goes to zero.
278 */
279
280 ipc_port_dealloc_kernel(lock_set->port);
281 lock_set_dereference(lock_set);
282
283 return KERN_SUCCESS;
284}
285
286kern_return_t
287lock_acquire (lock_set_t lock_set, int lock_id)
288{
289 ulock_t ulock;
290
291 if (lock_set == LOCK_SET_NULL)
292 return KERN_INVALID_ARGUMENT;
293
294 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
295 return KERN_INVALID_ARGUMENT;
296
297 retry:
298 lock_set_lock(lock_set);
299 if (!lock_set->active) {
300 lock_set_unlock(lock_set);
301 return KERN_LOCK_SET_DESTROYED;
302 }
303
304 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
305 ulock_lock(ulock);
306 lock_set_unlock(lock_set);
307
308 /*
309 * Block the current thread if the lock is already held.
310 */
311
91447636 312 if (ulock->holder != THREAD_NULL) {
1c79356b
A
313 int wait_result;
314
91447636 315 if (ulock->holder == current_thread()) {
1c79356b
A
316 ulock_unlock(ulock);
317 return KERN_LOCK_OWNED_SELF;
318 }
319
320 ulock->blocked = TRUE;
9bccf70c 321 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
1c79356b 322 LOCK_SET_EVENT,
91447636 323 THREAD_ABORTSAFE, 0);
1c79356b
A
324 ulock_unlock(ulock);
325
326 /*
327 * Block - Wait for lock to become available.
328 */
9bccf70c
A
329 if (wait_result == THREAD_WAITING)
330 wait_result = thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
331
332 /*
333 * Check the result status:
334 *
335 * Check to see why thread was woken up. In all cases, we
336 * already have been removed from the queue.
337 */
338 switch (wait_result) {
339 case THREAD_AWAKENED:
340 /* lock transitioned from old locker to us */
341 /* he already made us owner */
342 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
343 KERN_SUCCESS;
344
345 case THREAD_INTERRUPTED:
346 return KERN_ABORTED;
347
348 case THREAD_RESTART:
349 goto retry; /* probably a dead lock_set */
350
351 default:
352 panic("lock_acquire\n");
353 }
354 }
355
356 /*
357 * Assign lock ownership
358 */
359 ulock_ownership_set(ulock, current_thread());
360 ulock_unlock(ulock);
361
362 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
363}
364
365kern_return_t
366lock_release (lock_set_t lock_set, int lock_id)
367{
368 ulock_t ulock;
369
370 if (lock_set == LOCK_SET_NULL)
371 return KERN_INVALID_ARGUMENT;
372
373 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
374 return KERN_INVALID_ARGUMENT;
375
376 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
377
91447636 378 return (ulock_release_internal(ulock, current_thread()));
1c79356b
A
379}
380
381kern_return_t
382lock_try (lock_set_t lock_set, int lock_id)
383{
384 ulock_t ulock;
385
386
387 if (lock_set == LOCK_SET_NULL)
388 return KERN_INVALID_ARGUMENT;
389
390 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
391 return KERN_INVALID_ARGUMENT;
392
393
394 lock_set_lock(lock_set);
395 if (!lock_set->active) {
396 lock_set_unlock(lock_set);
397 return KERN_LOCK_SET_DESTROYED;
398 }
399
400 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
401 ulock_lock(ulock);
402 lock_set_unlock(lock_set);
403
404 /*
405 * If the lock is already owned, we return without blocking.
406 *
407 * An ownership status is returned to inform the caller as to
408 * whether it already holds the lock or another thread does.
409 */
410
91447636 411 if (ulock->holder != THREAD_NULL) {
1c79356b
A
412 lock_set_unlock(lock_set);
413
91447636 414 if (ulock->holder == current_thread()) {
1c79356b
A
415 ulock_unlock(ulock);
416 return KERN_LOCK_OWNED_SELF;
417 }
418
419 ulock_unlock(ulock);
420 return KERN_LOCK_OWNED;
421 }
422
423 /*
424 * Add the ulock to the lock set's held_ulocks list.
425 */
426
427 ulock_ownership_set(ulock, current_thread());
428 ulock_unlock(ulock);
429
430 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
431}
432
433kern_return_t
434lock_make_stable (lock_set_t lock_set, int lock_id)
435{
436 ulock_t ulock;
437
438
439 if (lock_set == LOCK_SET_NULL)
440 return KERN_INVALID_ARGUMENT;
441
442 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
443 return KERN_INVALID_ARGUMENT;
444
445
446 lock_set_lock(lock_set);
447 if (!lock_set->active) {
448 lock_set_unlock(lock_set);
449 return KERN_LOCK_SET_DESTROYED;
450 }
451
452 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
453 ulock_lock(ulock);
454 lock_set_unlock(lock_set);
455
91447636 456 if (ulock->holder != current_thread()) {
1c79356b
A
457 ulock_unlock(ulock);
458 return KERN_INVALID_RIGHT;
459 }
460
461 ulock->unstable = FALSE;
462 ulock_unlock(ulock);
463
464 return KERN_SUCCESS;
465}
466
467/*
468 * ROUTINE: lock_make_unstable [internal]
469 *
470 * Marks the lock as unstable.
471 *
472 * NOTES:
473 * - All future acquisitions of the lock will return with a
474 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
475 */
476kern_return_t
91447636 477lock_make_unstable (ulock_t ulock, thread_t thread)
1c79356b
A
478{
479 lock_set_t lock_set;
480
1c79356b
A
481 lock_set = ulock->lock_set;
482 lock_set_lock(lock_set);
483 if (!lock_set->active) {
484 lock_set_unlock(lock_set);
485 return KERN_LOCK_SET_DESTROYED;
486 }
487
488 ulock_lock(ulock);
489 lock_set_unlock(lock_set);
490
91447636 491 if (ulock->holder != thread) {
1c79356b
A
492 ulock_unlock(ulock);
493 return KERN_INVALID_RIGHT;
494 }
495
496 ulock->unstable = TRUE;
497 ulock_unlock(ulock);
498
499 return KERN_SUCCESS;
500}
501
502/*
91447636 503 * ROUTINE: ulock_release_internal [internal]
1c79356b
A
504 *
505 * Releases the ulock.
506 * If any threads are blocked waiting for the ulock, one is woken-up.
507 *
508 */
509kern_return_t
91447636 510ulock_release_internal (ulock_t ulock, thread_t thread)
1c79356b
A
511{
512 lock_set_t lock_set;
1c79356b
A
513
514 if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
515 return KERN_INVALID_ARGUMENT;
516
517 lock_set_lock(lock_set);
518 if (!lock_set->active) {
519 lock_set_unlock(lock_set);
520 return KERN_LOCK_SET_DESTROYED;
521 }
522 ulock_lock(ulock);
523 lock_set_unlock(lock_set);
524
91447636 525 if (ulock->holder != thread) {
1c79356b 526 ulock_unlock(ulock);
1c79356b
A
527 return KERN_INVALID_RIGHT;
528 }
529
530 /*
531 * If we have a hint that threads might be waiting,
532 * try to transfer the lock ownership to a waiting thread
533 * and wake it up.
534 */
535 if (ulock->blocked) {
536 wait_queue_t wq = &ulock->wait_queue;
91447636 537 thread_t wqthread;
1c79356b
A
538 spl_t s;
539
540 s = splsched();
541 wait_queue_lock(wq);
91447636 542 wqthread = wait_queue_wakeup64_identity_locked(wq,
1c79356b
A
543 LOCK_SET_EVENT,
544 THREAD_AWAKENED,
545 TRUE);
546 /* wait_queue now unlocked, thread locked */
547
91447636 548 if (wqthread != THREAD_NULL) {
1c79356b
A
549 /*
550 * JMM - These ownership transfer macros have a
551 * locking/race problem. To keep the thread from
552 * changing states on us (nullifying the ownership
553 * assignment) we need to keep the thread locked
554 * during the assignment. But we can't because the
555 * macros take an activation lock, which is a mutex.
556 * Since this code was already broken before I got
557 * here, I will leave it for now.
558 */
91447636 559 thread_unlock(wqthread);
1c79356b
A
560 splx(s);
561
562 /*
563 * Transfer ulock ownership
564 * from the current thread to the acquisition thread.
565 */
566 ulock_ownership_clear(ulock);
91447636 567 ulock_ownership_set(ulock, wqthread);
1c79356b
A
568 ulock_unlock(ulock);
569
570 return KERN_SUCCESS;
571 } else {
572 ulock->blocked = FALSE;
573 splx(s);
574 }
575 }
576
577 /*
578 * Disown ulock
579 */
580 ulock_ownership_clear(ulock);
581 ulock_unlock(ulock);
582
583 return KERN_SUCCESS;
584}
585
586kern_return_t
587lock_handoff (lock_set_t lock_set, int lock_id)
588{
589 ulock_t ulock;
590 int wait_result;
591
592
593 if (lock_set == LOCK_SET_NULL)
594 return KERN_INVALID_ARGUMENT;
595
596 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
597 return KERN_INVALID_ARGUMENT;
598
599 retry:
600 lock_set_lock(lock_set);
601
602 if (!lock_set->active) {
603 lock_set_unlock(lock_set);
604 return KERN_LOCK_SET_DESTROYED;
605 }
606
607 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
608 ulock_lock(ulock);
609 lock_set_unlock(lock_set);
610
91447636 611 if (ulock->holder != current_thread()) {
1c79356b 612 ulock_unlock(ulock);
1c79356b
A
613 return KERN_INVALID_RIGHT;
614 }
615
616 /*
617 * If the accepting thread (the receiver) is already waiting
618 * to accept the lock from the handoff thread (the sender),
619 * then perform the hand-off now.
620 */
621
622 if (ulock->accept_wait) {
623 wait_queue_t wq = &ulock->wait_queue;
624 thread_t thread;
625 spl_t s;
626
627 /*
628 * See who the lucky devil is, if he is still there waiting.
629 */
630 s = splsched();
631 wait_queue_lock(wq);
9bccf70c 632 thread = wait_queue_wakeup64_identity_locked(
1c79356b
A
633 wq,
634 LOCK_SET_HANDOFF,
635 THREAD_AWAKENED,
636 TRUE);
637 /* wait queue unlocked, thread locked */
638
639 /*
640 * Transfer lock ownership
641 */
642 if (thread != THREAD_NULL) {
643 /*
644 * JMM - These ownership transfer macros have a
645 * locking/race problem. To keep the thread from
646 * changing states on us (nullifying the ownership
647 * assignment) we need to keep the thread locked
648 * during the assignment. But we can't because the
91447636
A
649 * macros take a thread mutex lock.
650 *
1c79356b
A
651 * Since this code was already broken before I got
652 * here, I will leave it for now.
653 */
654 thread_unlock(thread);
655 splx(s);
656
657 ulock_ownership_clear(ulock);
658 ulock_ownership_set(ulock, thread);
659 ulock->accept_wait = FALSE;
660 ulock_unlock(ulock);
661 return KERN_SUCCESS;
662 } else {
663
664 /*
665 * OOPS. The accepting thread must have been aborted.
666 * and is racing back to clear the flag that says is
667 * waiting for an accept. He will clear it when we
668 * release the lock, so just fall thru and wait for
669 * the next accept thread (that's the way it is
670 * specified).
671 */
672 splx(s);
673 }
674 }
675
676 /*
677 * Indicate that there is a hand-off thread waiting, and then wait
678 * for an accepting thread.
679 */
680 ulock->ho_wait = TRUE;
9bccf70c 681 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
1c79356b 682 LOCK_SET_HANDOFF,
91447636 683 THREAD_ABORTSAFE, 0);
1c79356b
A
684 ulock_unlock(ulock);
685
9bccf70c
A
686 if (wait_result == THREAD_WAITING)
687 wait_result = thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
688
689 /*
690 * If the thread was woken-up via some action other than
691 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
692 * then we need to clear the ulock's handoff state.
693 */
694 switch (wait_result) {
695
696 case THREAD_AWAKENED:
697 return KERN_SUCCESS;
698
699 case THREAD_INTERRUPTED:
700 ulock_lock(ulock);
91447636 701 assert(ulock->holder == current_thread());
1c79356b
A
702 ulock->ho_wait = FALSE;
703 ulock_unlock(ulock);
704 return KERN_ABORTED;
705
706 case THREAD_RESTART:
707 goto retry;
1c79356b 708 }
91447636
A
709
710 panic("lock_handoff");
711 return KERN_FAILURE;
1c79356b
A
712}
713
714kern_return_t
715lock_handoff_accept (lock_set_t lock_set, int lock_id)
716{
717 ulock_t ulock;
718 int wait_result;
719
720
721 if (lock_set == LOCK_SET_NULL)
722 return KERN_INVALID_ARGUMENT;
723
724 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
725 return KERN_INVALID_ARGUMENT;
726
727 retry:
728 lock_set_lock(lock_set);
729 if (!lock_set->active) {
730 lock_set_unlock(lock_set);
731 return KERN_LOCK_SET_DESTROYED;
732 }
733
734 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
735 ulock_lock(ulock);
736 lock_set_unlock(lock_set);
737
738 /*
739 * If there is another accepting thread that beat us, just
740 * return with an error.
741 */
742 if (ulock->accept_wait) {
743 ulock_unlock(ulock);
744 return KERN_ALREADY_WAITING;
745 }
746
91447636 747 if (ulock->holder == current_thread()) {
1c79356b
A
748 ulock_unlock(ulock);
749 return KERN_LOCK_OWNED_SELF;
750 }
751
752 /*
753 * If the handoff thread (the sender) is already waiting to
754 * hand-off the lock to the accepting thread (the receiver),
755 * then perform the hand-off now.
756 */
757 if (ulock->ho_wait) {
758 wait_queue_t wq = &ulock->wait_queue;
1c79356b
A
759
760 /*
761 * See who the lucky devil is, if he is still there waiting.
762 */
91447636 763 assert(ulock->holder != THREAD_NULL);
1c79356b 764
9bccf70c 765 if (wait_queue_wakeup64_thread(wq,
1c79356b 766 LOCK_SET_HANDOFF,
91447636 767 ulock->holder,
1c79356b
A
768 THREAD_AWAKENED) == KERN_SUCCESS) {
769 /*
770 * Holder thread was still waiting to give it
771 * away. Take over ownership.
772 */
773 ulock_ownership_clear(ulock);
774 ulock_ownership_set(ulock, current_thread());
775 ulock->ho_wait = FALSE;
776 ulock_unlock(ulock);
777 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
778 KERN_SUCCESS;
779 }
780
781 /*
782 * OOPS. The owner was aborted out of the handoff.
783 * He will clear his own flag when he gets back.
784 * in the meantime, we will wait as if we didn't
785 * even see his flag (by falling thru).
786 */
787 }
788
789 ulock->accept_wait = TRUE;
9bccf70c 790 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
1c79356b 791 LOCK_SET_HANDOFF,
91447636 792 THREAD_ABORTSAFE, 0);
1c79356b
A
793 ulock_unlock(ulock);
794
9bccf70c
A
795 if (wait_result == THREAD_WAITING)
796 wait_result = thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
797
798 /*
799 * If the thread was woken-up via some action other than
800 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
801 * then we need to clear the ulock's handoff state.
802 */
803 switch (wait_result) {
804
805 case THREAD_AWAKENED:
806 return KERN_SUCCESS;
807
808 case THREAD_INTERRUPTED:
809 ulock_lock(ulock);
810 ulock->accept_wait = FALSE;
811 ulock_unlock(ulock);
812 return KERN_ABORTED;
813
814 case THREAD_RESTART:
815 goto retry;
1c79356b 816 }
91447636
A
817
818 panic("lock_handoff_accept");
819 return KERN_FAILURE;
1c79356b
A
820}
821
822/*
823 * Routine: lock_set_reference
824 *
825 * Take out a reference on a lock set. This keeps the data structure
826 * in existence (but the lock set may be deactivated).
827 */
828void
829lock_set_reference(lock_set_t lock_set)
830{
831 lock_set_lock(lock_set);
832 lock_set->ref_count++;
833 lock_set_unlock(lock_set);
834}
835
836/*
837 * Routine: lock_set_dereference
838 *
839 * Release a reference on a lock set. If this is the last reference,
840 * the lock set data structure is deallocated.
841 */
842void
843lock_set_dereference(lock_set_t lock_set)
844{
845 int ref_count;
846 int size;
847
848 lock_set_lock(lock_set);
849 ref_count = --(lock_set->ref_count);
850 lock_set_unlock(lock_set);
851
852 if (ref_count == 0) {
853 size = sizeof(struct lock_set) +
854 (sizeof(struct ulock) * (lock_set->n_ulocks - 1));
91447636
A
855 kfree(lock_set, size);
856 }
857}
858
859void
860ulock_release_all(
861 thread_t thread)
862{
863 ulock_t ulock;
864
865 while (!queue_empty(&thread->held_ulocks)) {
866 ulock = (ulock_t)queue_first(&thread->held_ulocks);
867 lock_make_unstable(ulock, thread);
868 ulock_release_internal(ulock, thread);
1c79356b
A
869 }
870}