]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_lock.c
bf4f7d942203554da83b1a0d6b6061b919d652e6
[apple/xnu.git] / osfmk / kern / sync_lock.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 *
26 */
27 /*
28 * File: kern/sync_lock.c
29 * Author: Joseph CaraDonna
30 *
31 * Contains RT distributed lock synchronization services.
32 */
33
34 #include <mach/mach_types.h>
35 #include <mach/lock_set_server.h>
36 #include <mach/task_server.h>
37
38 #include <kern/misc_protos.h>
39 #include <kern/kalloc.h>
40 #include <kern/sync_lock.h>
41 #include <kern/sched_prim.h>
42 #include <kern/ipc_kobject.h>
43 #include <kern/ipc_sync.h>
44 #include <kern/thread.h>
45 #include <kern/task.h>
46
47 #include <ipc/ipc_port.h>
48 #include <ipc/ipc_space.h>
49
50 /*
51 * Ulock ownership MACROS
52 *
53 * Assumes: ulock internal lock is held
54 */
55
56 #define ulock_ownership_set(ul, th) \
57 MACRO_BEGIN \
58 thread_mtx_lock(th); \
59 enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \
60 thread_mtx_unlock(th); \
61 (ul)->holder = th; \
62 MACRO_END
63
64 #define ulock_ownership_clear(ul) \
65 MACRO_BEGIN \
66 thread_t th; \
67 th = (ul)->holder; \
68 if (th->active) { \
69 thread_mtx_lock(th); \
70 remqueue(&th->held_ulocks, \
71 (queue_entry_t) (ul)); \
72 thread_mtx_unlock(th); \
73 } else { \
74 remqueue(&th->held_ulocks, \
75 (queue_entry_t) (ul)); \
76 } \
77 (ul)->holder = THREAD_NULL; \
78 MACRO_END
79
80 /*
81 * Lock set ownership MACROS
82 */
83
84 #define lock_set_ownership_set(ls, t) \
85 MACRO_BEGIN \
86 task_lock((t)); \
87 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
88 (t)->lock_sets_owned++; \
89 task_unlock((t)); \
90 (ls)->owner = (t); \
91 MACRO_END
92
93 #define lock_set_ownership_clear(ls, t) \
94 MACRO_BEGIN \
95 task_lock((t)); \
96 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
97 (t)->lock_sets_owned--; \
98 task_unlock((t)); \
99 MACRO_END
100
101 unsigned int lock_set_event;
102 #define LOCK_SET_EVENT ((event64_t)&lock_set_event)
103
104 unsigned int lock_set_handoff;
105 #define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
106
107 /*
108 * ROUTINE: lock_set_init [private]
109 *
110 * Initialize the lock_set subsystem.
111 *
112 * For now, we don't have anything to do here.
113 */
114 void
115 lock_set_init(void)
116 {
117 return;
118 }
119
120
121 /*
122 * ROUTINE: lock_set_create [exported]
123 *
124 * Creates a lock set.
125 * The port representing the lock set is returned as a parameter.
126 */
127 kern_return_t
128 lock_set_create (
129 task_t task,
130 lock_set_t *new_lock_set,
131 int n_ulocks,
132 int policy)
133 {
134 lock_set_t lock_set = LOCK_SET_NULL;
135 ulock_t ulock;
136 int size;
137 int x;
138
139 *new_lock_set = LOCK_SET_NULL;
140
141 if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
142 return KERN_INVALID_ARGUMENT;
143
144 size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
145 lock_set = (lock_set_t) kalloc (size);
146
147 if (lock_set == LOCK_SET_NULL)
148 return KERN_RESOURCE_SHORTAGE;
149
150
151 lock_set_lock_init(lock_set);
152 lock_set->n_ulocks = n_ulocks;
153 lock_set->ref_count = 1;
154
155 /*
156 * Create and initialize the lock set port
157 */
158 lock_set->port = ipc_port_alloc_kernel();
159 if (lock_set->port == IP_NULL) {
160 /* This will deallocate the lock set */
161 lock_set_dereference(lock_set);
162 return KERN_RESOURCE_SHORTAGE;
163 }
164
165 ipc_kobject_set (lock_set->port,
166 (ipc_kobject_t) lock_set,
167 IKOT_LOCK_SET);
168
169 /*
170 * Initialize each ulock in the lock set
171 */
172
173 for (x=0; x < n_ulocks; x++) {
174 ulock = (ulock_t) &lock_set->ulock_list[x];
175 ulock_lock_init(ulock);
176 ulock->lock_set = lock_set;
177 ulock->holder = THREAD_NULL;
178 ulock->blocked = FALSE;
179 ulock->unstable = FALSE;
180 ulock->ho_wait = FALSE;
181 wait_queue_init(&ulock->wait_queue, policy);
182 }
183
184 lock_set_ownership_set(lock_set, task);
185
186 lock_set->active = TRUE;
187 *new_lock_set = lock_set;
188
189 return KERN_SUCCESS;
190 }
191
192 /*
193 * ROUTINE: lock_set_destroy [exported]
194 *
195 * Destroys a lock set. This call will only succeed if the
196 * specified task is the SAME task name specified at the lock set's
197 * creation.
198 *
199 * NOTES:
200 * - All threads currently blocked on the lock set's ulocks are awoken.
201 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
202 */
203 kern_return_t
204 lock_set_destroy (task_t task, lock_set_t lock_set)
205 {
206 ulock_t ulock;
207 int i;
208
209 if (task == TASK_NULL || lock_set == LOCK_SET_NULL)
210 return KERN_INVALID_ARGUMENT;
211
212 if (lock_set->owner != task)
213 return KERN_INVALID_RIGHT;
214
215 lock_set_lock(lock_set);
216 if (!lock_set->active) {
217 lock_set_unlock(lock_set);
218 return KERN_LOCK_SET_DESTROYED;
219 }
220
221 /*
222 * Deactivate lock set
223 */
224 lock_set->active = FALSE;
225
226 /*
227 * If a ulock is currently held in the target lock set:
228 *
229 * 1) Wakeup all threads blocked on the ulock (if any). Threads
230 * may be blocked waiting normally, or waiting for a handoff.
231 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
232 *
233 * 2) ulock ownership is cleared.
234 * The thread currently holding the ulock is revoked of its
235 * ownership.
236 */
237 for (i = 0; i < lock_set->n_ulocks; i++) {
238 ulock = &lock_set->ulock_list[i];
239
240 ulock_lock(ulock);
241
242 if (ulock->accept_wait) {
243 ulock->accept_wait = FALSE;
244 wait_queue_wakeup64_one(&ulock->wait_queue,
245 LOCK_SET_HANDOFF,
246 THREAD_RESTART);
247 }
248
249 if (ulock->holder) {
250 if (ulock->blocked) {
251 ulock->blocked = FALSE;
252 wait_queue_wakeup64_all(&ulock->wait_queue,
253 LOCK_SET_EVENT,
254 THREAD_RESTART);
255 }
256 if (ulock->ho_wait) {
257 ulock->ho_wait = FALSE;
258 wait_queue_wakeup64_one(&ulock->wait_queue,
259 LOCK_SET_HANDOFF,
260 THREAD_RESTART);
261 }
262 ulock_ownership_clear(ulock);
263 }
264
265 ulock_unlock(ulock);
266 }
267
268 lock_set_unlock(lock_set);
269 lock_set_ownership_clear(lock_set, task);
270
271 /*
272 * Deallocate
273 *
274 * Drop the lock set reference, which inturn destroys the
275 * lock set structure if the reference count goes to zero.
276 */
277
278 ipc_port_dealloc_kernel(lock_set->port);
279 lock_set_dereference(lock_set);
280
281 return KERN_SUCCESS;
282 }
283
284 kern_return_t
285 lock_acquire (lock_set_t lock_set, int lock_id)
286 {
287 ulock_t ulock;
288
289 if (lock_set == LOCK_SET_NULL)
290 return KERN_INVALID_ARGUMENT;
291
292 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
293 return KERN_INVALID_ARGUMENT;
294
295 retry:
296 lock_set_lock(lock_set);
297 if (!lock_set->active) {
298 lock_set_unlock(lock_set);
299 return KERN_LOCK_SET_DESTROYED;
300 }
301
302 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
303 ulock_lock(ulock);
304 lock_set_unlock(lock_set);
305
306 /*
307 * Block the current thread if the lock is already held.
308 */
309
310 if (ulock->holder != THREAD_NULL) {
311 int wait_result;
312
313 if (ulock->holder == current_thread()) {
314 ulock_unlock(ulock);
315 return KERN_LOCK_OWNED_SELF;
316 }
317
318 ulock->blocked = TRUE;
319 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
320 LOCK_SET_EVENT,
321 THREAD_ABORTSAFE, 0);
322 ulock_unlock(ulock);
323
324 /*
325 * Block - Wait for lock to become available.
326 */
327 if (wait_result == THREAD_WAITING)
328 wait_result = thread_block(THREAD_CONTINUE_NULL);
329
330 /*
331 * Check the result status:
332 *
333 * Check to see why thread was woken up. In all cases, we
334 * already have been removed from the queue.
335 */
336 switch (wait_result) {
337 case THREAD_AWAKENED:
338 /* lock transitioned from old locker to us */
339 /* he already made us owner */
340 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
341 KERN_SUCCESS;
342
343 case THREAD_INTERRUPTED:
344 return KERN_ABORTED;
345
346 case THREAD_RESTART:
347 goto retry; /* probably a dead lock_set */
348
349 default:
350 panic("lock_acquire\n");
351 }
352 }
353
354 /*
355 * Assign lock ownership
356 */
357 ulock_ownership_set(ulock, current_thread());
358 ulock_unlock(ulock);
359
360 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
361 }
362
363 kern_return_t
364 lock_release (lock_set_t lock_set, int lock_id)
365 {
366 ulock_t ulock;
367
368 if (lock_set == LOCK_SET_NULL)
369 return KERN_INVALID_ARGUMENT;
370
371 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
372 return KERN_INVALID_ARGUMENT;
373
374 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
375
376 return (ulock_release_internal(ulock, current_thread()));
377 }
378
379 kern_return_t
380 lock_try (lock_set_t lock_set, int lock_id)
381 {
382 ulock_t ulock;
383
384
385 if (lock_set == LOCK_SET_NULL)
386 return KERN_INVALID_ARGUMENT;
387
388 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
389 return KERN_INVALID_ARGUMENT;
390
391
392 lock_set_lock(lock_set);
393 if (!lock_set->active) {
394 lock_set_unlock(lock_set);
395 return KERN_LOCK_SET_DESTROYED;
396 }
397
398 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
399 ulock_lock(ulock);
400 lock_set_unlock(lock_set);
401
402 /*
403 * If the lock is already owned, we return without blocking.
404 *
405 * An ownership status is returned to inform the caller as to
406 * whether it already holds the lock or another thread does.
407 */
408
409 if (ulock->holder != THREAD_NULL) {
410 lock_set_unlock(lock_set);
411
412 if (ulock->holder == current_thread()) {
413 ulock_unlock(ulock);
414 return KERN_LOCK_OWNED_SELF;
415 }
416
417 ulock_unlock(ulock);
418 return KERN_LOCK_OWNED;
419 }
420
421 /*
422 * Add the ulock to the lock set's held_ulocks list.
423 */
424
425 ulock_ownership_set(ulock, current_thread());
426 ulock_unlock(ulock);
427
428 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
429 }
430
431 kern_return_t
432 lock_make_stable (lock_set_t lock_set, int lock_id)
433 {
434 ulock_t ulock;
435
436
437 if (lock_set == LOCK_SET_NULL)
438 return KERN_INVALID_ARGUMENT;
439
440 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
441 return KERN_INVALID_ARGUMENT;
442
443
444 lock_set_lock(lock_set);
445 if (!lock_set->active) {
446 lock_set_unlock(lock_set);
447 return KERN_LOCK_SET_DESTROYED;
448 }
449
450 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
451 ulock_lock(ulock);
452 lock_set_unlock(lock_set);
453
454 if (ulock->holder != current_thread()) {
455 ulock_unlock(ulock);
456 return KERN_INVALID_RIGHT;
457 }
458
459 ulock->unstable = FALSE;
460 ulock_unlock(ulock);
461
462 return KERN_SUCCESS;
463 }
464
465 /*
466 * ROUTINE: lock_make_unstable [internal]
467 *
468 * Marks the lock as unstable.
469 *
470 * NOTES:
471 * - All future acquisitions of the lock will return with a
472 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
473 */
474 kern_return_t
475 lock_make_unstable (ulock_t ulock, thread_t thread)
476 {
477 lock_set_t lock_set;
478
479 lock_set = ulock->lock_set;
480 lock_set_lock(lock_set);
481 if (!lock_set->active) {
482 lock_set_unlock(lock_set);
483 return KERN_LOCK_SET_DESTROYED;
484 }
485
486 ulock_lock(ulock);
487 lock_set_unlock(lock_set);
488
489 if (ulock->holder != thread) {
490 ulock_unlock(ulock);
491 return KERN_INVALID_RIGHT;
492 }
493
494 ulock->unstable = TRUE;
495 ulock_unlock(ulock);
496
497 return KERN_SUCCESS;
498 }
499
500 /*
501 * ROUTINE: ulock_release_internal [internal]
502 *
503 * Releases the ulock.
504 * If any threads are blocked waiting for the ulock, one is woken-up.
505 *
506 */
507 kern_return_t
508 ulock_release_internal (ulock_t ulock, thread_t thread)
509 {
510 lock_set_t lock_set;
511
512 if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
513 return KERN_INVALID_ARGUMENT;
514
515 lock_set_lock(lock_set);
516 if (!lock_set->active) {
517 lock_set_unlock(lock_set);
518 return KERN_LOCK_SET_DESTROYED;
519 }
520 ulock_lock(ulock);
521 lock_set_unlock(lock_set);
522
523 if (ulock->holder != thread) {
524 ulock_unlock(ulock);
525 return KERN_INVALID_RIGHT;
526 }
527
528 /*
529 * If we have a hint that threads might be waiting,
530 * try to transfer the lock ownership to a waiting thread
531 * and wake it up.
532 */
533 if (ulock->blocked) {
534 wait_queue_t wq = &ulock->wait_queue;
535 thread_t wqthread;
536 spl_t s;
537
538 s = splsched();
539 wait_queue_lock(wq);
540 wqthread = wait_queue_wakeup64_identity_locked(wq,
541 LOCK_SET_EVENT,
542 THREAD_AWAKENED,
543 TRUE);
544 /* wait_queue now unlocked, thread locked */
545
546 if (wqthread != THREAD_NULL) {
547 /*
548 * JMM - These ownership transfer macros have a
549 * locking/race problem. To keep the thread from
550 * changing states on us (nullifying the ownership
551 * assignment) we need to keep the thread locked
552 * during the assignment. But we can't because the
553 * macros take an activation lock, which is a mutex.
554 * Since this code was already broken before I got
555 * here, I will leave it for now.
556 */
557 thread_unlock(wqthread);
558 splx(s);
559
560 /*
561 * Transfer ulock ownership
562 * from the current thread to the acquisition thread.
563 */
564 ulock_ownership_clear(ulock);
565 ulock_ownership_set(ulock, wqthread);
566 ulock_unlock(ulock);
567
568 return KERN_SUCCESS;
569 } else {
570 ulock->blocked = FALSE;
571 splx(s);
572 }
573 }
574
575 /*
576 * Disown ulock
577 */
578 ulock_ownership_clear(ulock);
579 ulock_unlock(ulock);
580
581 return KERN_SUCCESS;
582 }
583
584 kern_return_t
585 lock_handoff (lock_set_t lock_set, int lock_id)
586 {
587 ulock_t ulock;
588 int wait_result;
589
590
591 if (lock_set == LOCK_SET_NULL)
592 return KERN_INVALID_ARGUMENT;
593
594 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
595 return KERN_INVALID_ARGUMENT;
596
597 retry:
598 lock_set_lock(lock_set);
599
600 if (!lock_set->active) {
601 lock_set_unlock(lock_set);
602 return KERN_LOCK_SET_DESTROYED;
603 }
604
605 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
606 ulock_lock(ulock);
607 lock_set_unlock(lock_set);
608
609 if (ulock->holder != current_thread()) {
610 ulock_unlock(ulock);
611 return KERN_INVALID_RIGHT;
612 }
613
614 /*
615 * If the accepting thread (the receiver) is already waiting
616 * to accept the lock from the handoff thread (the sender),
617 * then perform the hand-off now.
618 */
619
620 if (ulock->accept_wait) {
621 wait_queue_t wq = &ulock->wait_queue;
622 thread_t thread;
623 spl_t s;
624
625 /*
626 * See who the lucky devil is, if he is still there waiting.
627 */
628 s = splsched();
629 wait_queue_lock(wq);
630 thread = wait_queue_wakeup64_identity_locked(
631 wq,
632 LOCK_SET_HANDOFF,
633 THREAD_AWAKENED,
634 TRUE);
635 /* wait queue unlocked, thread locked */
636
637 /*
638 * Transfer lock ownership
639 */
640 if (thread != THREAD_NULL) {
641 /*
642 * JMM - These ownership transfer macros have a
643 * locking/race problem. To keep the thread from
644 * changing states on us (nullifying the ownership
645 * assignment) we need to keep the thread locked
646 * during the assignment. But we can't because the
647 * macros take a thread mutex lock.
648 *
649 * Since this code was already broken before I got
650 * here, I will leave it for now.
651 */
652 thread_unlock(thread);
653 splx(s);
654
655 ulock_ownership_clear(ulock);
656 ulock_ownership_set(ulock, thread);
657 ulock->accept_wait = FALSE;
658 ulock_unlock(ulock);
659 return KERN_SUCCESS;
660 } else {
661
662 /*
663 * OOPS. The accepting thread must have been aborted.
664 * and is racing back to clear the flag that says is
665 * waiting for an accept. He will clear it when we
666 * release the lock, so just fall thru and wait for
667 * the next accept thread (that's the way it is
668 * specified).
669 */
670 splx(s);
671 }
672 }
673
674 /*
675 * Indicate that there is a hand-off thread waiting, and then wait
676 * for an accepting thread.
677 */
678 ulock->ho_wait = TRUE;
679 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
680 LOCK_SET_HANDOFF,
681 THREAD_ABORTSAFE, 0);
682 ulock_unlock(ulock);
683
684 if (wait_result == THREAD_WAITING)
685 wait_result = thread_block(THREAD_CONTINUE_NULL);
686
687 /*
688 * If the thread was woken-up via some action other than
689 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
690 * then we need to clear the ulock's handoff state.
691 */
692 switch (wait_result) {
693
694 case THREAD_AWAKENED:
695 return KERN_SUCCESS;
696
697 case THREAD_INTERRUPTED:
698 ulock_lock(ulock);
699 assert(ulock->holder == current_thread());
700 ulock->ho_wait = FALSE;
701 ulock_unlock(ulock);
702 return KERN_ABORTED;
703
704 case THREAD_RESTART:
705 goto retry;
706 }
707
708 panic("lock_handoff");
709 return KERN_FAILURE;
710 }
711
712 kern_return_t
713 lock_handoff_accept (lock_set_t lock_set, int lock_id)
714 {
715 ulock_t ulock;
716 int wait_result;
717
718
719 if (lock_set == LOCK_SET_NULL)
720 return KERN_INVALID_ARGUMENT;
721
722 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
723 return KERN_INVALID_ARGUMENT;
724
725 retry:
726 lock_set_lock(lock_set);
727 if (!lock_set->active) {
728 lock_set_unlock(lock_set);
729 return KERN_LOCK_SET_DESTROYED;
730 }
731
732 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
733 ulock_lock(ulock);
734 lock_set_unlock(lock_set);
735
736 /*
737 * If there is another accepting thread that beat us, just
738 * return with an error.
739 */
740 if (ulock->accept_wait) {
741 ulock_unlock(ulock);
742 return KERN_ALREADY_WAITING;
743 }
744
745 if (ulock->holder == current_thread()) {
746 ulock_unlock(ulock);
747 return KERN_LOCK_OWNED_SELF;
748 }
749
750 /*
751 * If the handoff thread (the sender) is already waiting to
752 * hand-off the lock to the accepting thread (the receiver),
753 * then perform the hand-off now.
754 */
755 if (ulock->ho_wait) {
756 wait_queue_t wq = &ulock->wait_queue;
757
758 /*
759 * See who the lucky devil is, if he is still there waiting.
760 */
761 assert(ulock->holder != THREAD_NULL);
762
763 if (wait_queue_wakeup64_thread(wq,
764 LOCK_SET_HANDOFF,
765 ulock->holder,
766 THREAD_AWAKENED) == KERN_SUCCESS) {
767 /*
768 * Holder thread was still waiting to give it
769 * away. Take over ownership.
770 */
771 ulock_ownership_clear(ulock);
772 ulock_ownership_set(ulock, current_thread());
773 ulock->ho_wait = FALSE;
774 ulock_unlock(ulock);
775 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
776 KERN_SUCCESS;
777 }
778
779 /*
780 * OOPS. The owner was aborted out of the handoff.
781 * He will clear his own flag when he gets back.
782 * in the meantime, we will wait as if we didn't
783 * even see his flag (by falling thru).
784 */
785 }
786
787 ulock->accept_wait = TRUE;
788 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
789 LOCK_SET_HANDOFF,
790 THREAD_ABORTSAFE, 0);
791 ulock_unlock(ulock);
792
793 if (wait_result == THREAD_WAITING)
794 wait_result = thread_block(THREAD_CONTINUE_NULL);
795
796 /*
797 * If the thread was woken-up via some action other than
798 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
799 * then we need to clear the ulock's handoff state.
800 */
801 switch (wait_result) {
802
803 case THREAD_AWAKENED:
804 return KERN_SUCCESS;
805
806 case THREAD_INTERRUPTED:
807 ulock_lock(ulock);
808 ulock->accept_wait = FALSE;
809 ulock_unlock(ulock);
810 return KERN_ABORTED;
811
812 case THREAD_RESTART:
813 goto retry;
814 }
815
816 panic("lock_handoff_accept");
817 return KERN_FAILURE;
818 }
819
820 /*
821 * Routine: lock_set_reference
822 *
823 * Take out a reference on a lock set. This keeps the data structure
824 * in existence (but the lock set may be deactivated).
825 */
826 void
827 lock_set_reference(lock_set_t lock_set)
828 {
829 lock_set_lock(lock_set);
830 lock_set->ref_count++;
831 lock_set_unlock(lock_set);
832 }
833
834 /*
835 * Routine: lock_set_dereference
836 *
837 * Release a reference on a lock set. If this is the last reference,
838 * the lock set data structure is deallocated.
839 */
840 void
841 lock_set_dereference(lock_set_t lock_set)
842 {
843 int ref_count;
844 int size;
845
846 lock_set_lock(lock_set);
847 ref_count = --(lock_set->ref_count);
848 lock_set_unlock(lock_set);
849
850 if (ref_count == 0) {
851 size = sizeof(struct lock_set) +
852 (sizeof(struct ulock) * (lock_set->n_ulocks - 1));
853 kfree(lock_set, size);
854 }
855 }
856
857 void
858 ulock_release_all(
859 thread_t thread)
860 {
861 ulock_t ulock;
862
863 while (!queue_empty(&thread->held_ulocks)) {
864 ulock = (ulock_t)queue_first(&thread->held_ulocks);
865 lock_make_unstable(ulock, thread);
866 ulock_release_internal(ulock, thread);
867 }
868 }