]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_lock.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / kern / sync_lock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 *
28 */
29 /*
30 * File: kern/sync_lock.c
31 * Author: Joseph CaraDonna
32 *
33 * Contains RT distributed lock synchronization services.
34 */
35
36 #include <kern/etap_macros.h>
37 #include <kern/misc_protos.h>
38 #include <kern/sync_lock.h>
39 #include <kern/sched_prim.h>
40 #include <kern/ipc_kobject.h>
41 #include <kern/ipc_sync.h>
42 #include <kern/etap_macros.h>
43 #include <kern/thread.h>
44 #include <kern/task.h>
45
46 #include <ipc/ipc_port.h>
47 #include <ipc/ipc_space.h>
48
49 /*
50 * Ulock ownership MACROS
51 *
52 * Assumes: ulock internal lock is held
53 */
54
55 #define ulock_ownership_set(ul, th) \
56 MACRO_BEGIN \
57 thread_act_t _th_act; \
58 _th_act = (th)->top_act; \
59 act_lock(_th_act); \
60 enqueue (&_th_act->held_ulocks, (queue_entry_t) (ul)); \
61 act_unlock(_th_act); \
62 (ul)->holder = _th_act; \
63 MACRO_END
64
65 #define ulock_ownership_clear(ul) \
66 MACRO_BEGIN \
67 thread_act_t _th_act; \
68 _th_act = (ul)->holder; \
69 if (_th_act->active) { \
70 act_lock(_th_act); \
71 remqueue(&_th_act->held_ulocks, \
72 (queue_entry_t) (ul)); \
73 act_unlock(_th_act); \
74 } else { \
75 remqueue(&_th_act->held_ulocks, \
76 (queue_entry_t) (ul)); \
77 } \
78 (ul)->holder = THR_ACT_NULL; \
79 MACRO_END
80
81 /*
82 * Lock set ownership MACROS
83 */
84
85 #define lock_set_ownership_set(ls, t) \
86 MACRO_BEGIN \
87 task_lock((t)); \
88 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
89 (t)->lock_sets_owned++; \
90 task_unlock((t)); \
91 (ls)->owner = (t); \
92 MACRO_END
93
94 #define lock_set_ownership_clear(ls, t) \
95 MACRO_BEGIN \
96 task_lock((t)); \
97 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
98 (t)->lock_sets_owned--; \
99 task_unlock((t)); \
100 MACRO_END
101
102 unsigned int lock_set_event;
103 #define LOCK_SET_EVENT ((event64_t)&lock_set_event)
104
105 unsigned int lock_set_handoff;
106 #define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
107
108 /*
109 * ROUTINE: lock_set_init [private]
110 *
111 * Initialize the lock_set subsystem.
112 *
113 * For now, we don't have anything to do here.
114 */
115 void
116 lock_set_init(void)
117 {
118 return;
119 }
120
121
122 /*
123 * ROUTINE: lock_set_create [exported]
124 *
125 * Creates a lock set.
126 * The port representing the lock set is returned as a parameter.
127 */
128 kern_return_t
129 lock_set_create (
130 task_t task,
131 lock_set_t *new_lock_set,
132 int n_ulocks,
133 int policy)
134 {
135 lock_set_t lock_set = LOCK_SET_NULL;
136 ulock_t ulock;
137 int size;
138 int x;
139
140 *new_lock_set = LOCK_SET_NULL;
141
142 if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
143 return KERN_INVALID_ARGUMENT;
144
145 size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
146 lock_set = (lock_set_t) kalloc (size);
147
148 if (lock_set == LOCK_SET_NULL)
149 return KERN_RESOURCE_SHORTAGE;
150
151
152 lock_set_lock_init(lock_set);
153 lock_set->n_ulocks = n_ulocks;
154 lock_set->ref_count = 1;
155
156 /*
157 * Create and initialize the lock set port
158 */
159 lock_set->port = ipc_port_alloc_kernel();
160 if (lock_set->port == IP_NULL) {
161 /* This will deallocate the lock set */
162 lock_set_dereference(lock_set);
163 return KERN_RESOURCE_SHORTAGE;
164 }
165
166 ipc_kobject_set (lock_set->port,
167 (ipc_kobject_t) lock_set,
168 IKOT_LOCK_SET);
169
170 /*
171 * Initialize each ulock in the lock set
172 */
173
174 for (x=0; x < n_ulocks; x++) {
175 ulock = (ulock_t) &lock_set->ulock_list[x];
176 ulock_lock_init(ulock);
177 ulock->lock_set = lock_set;
178 ulock->holder = THR_ACT_NULL;
179 ulock->blocked = FALSE;
180 ulock->unstable = FALSE;
181 ulock->ho_wait = FALSE;
182 wait_queue_init(&ulock->wait_queue, policy);
183 }
184
185 lock_set_ownership_set(lock_set, task);
186
187 lock_set->active = TRUE;
188 *new_lock_set = lock_set;
189
190 return KERN_SUCCESS;
191 }
192
193 /*
194 * ROUTINE: lock_set_destroy [exported]
195 *
196 * Destroys a lock set. This call will only succeed if the
197 * specified task is the SAME task name specified at the lock set's
198 * creation.
199 *
200 * NOTES:
201 * - All threads currently blocked on the lock set's ulocks are awoken.
202 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
203 */
204 kern_return_t
205 lock_set_destroy (task_t task, lock_set_t lock_set)
206 {
207 thread_t thread;
208 ulock_t ulock;
209 int i;
210
211 if (task == TASK_NULL || lock_set == LOCK_SET_NULL)
212 return KERN_INVALID_ARGUMENT;
213
214 if (lock_set->owner != task)
215 return KERN_INVALID_RIGHT;
216
217 lock_set_lock(lock_set);
218 if (!lock_set->active) {
219 lock_set_unlock(lock_set);
220 return KERN_LOCK_SET_DESTROYED;
221 }
222
223 /*
224 * Deactivate lock set
225 */
226 lock_set->active = FALSE;
227
228 /*
229 * If a ulock is currently held in the target lock set:
230 *
231 * 1) Wakeup all threads blocked on the ulock (if any). Threads
232 * may be blocked waiting normally, or waiting for a handoff.
233 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
234 *
235 * 2) ulock ownership is cleared.
236 * The thread currently holding the ulock is revoked of its
237 * ownership.
238 */
239 for (i = 0; i < lock_set->n_ulocks; i++) {
240 ulock = &lock_set->ulock_list[i];
241
242 ulock_lock(ulock);
243
244 if (ulock->accept_wait) {
245 ulock->accept_wait = FALSE;
246 wait_queue_wakeup64_one(&ulock->wait_queue,
247 LOCK_SET_HANDOFF,
248 THREAD_RESTART);
249 }
250
251 if (ulock->holder) {
252 if (ulock->blocked) {
253 ulock->blocked = FALSE;
254 wait_queue_wakeup64_all(&ulock->wait_queue,
255 LOCK_SET_EVENT,
256 THREAD_RESTART);
257 }
258 if (ulock->ho_wait) {
259 ulock->ho_wait = FALSE;
260 wait_queue_wakeup64_one(&ulock->wait_queue,
261 LOCK_SET_HANDOFF,
262 THREAD_RESTART);
263 }
264 ulock_ownership_clear(ulock);
265 }
266
267 ulock_unlock(ulock);
268 }
269
270 lock_set_unlock(lock_set);
271 lock_set_ownership_clear(lock_set, task);
272
273 /*
274 * Deallocate
275 *
276 * Drop the lock set reference, which inturn destroys the
277 * lock set structure if the reference count goes to zero.
278 */
279
280 ipc_port_dealloc_kernel(lock_set->port);
281 lock_set_dereference(lock_set);
282
283 return KERN_SUCCESS;
284 }
285
286 kern_return_t
287 lock_acquire (lock_set_t lock_set, int lock_id)
288 {
289 ulock_t ulock;
290
291 if (lock_set == LOCK_SET_NULL)
292 return KERN_INVALID_ARGUMENT;
293
294 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
295 return KERN_INVALID_ARGUMENT;
296
297 retry:
298 lock_set_lock(lock_set);
299 if (!lock_set->active) {
300 lock_set_unlock(lock_set);
301 return KERN_LOCK_SET_DESTROYED;
302 }
303
304 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
305 ulock_lock(ulock);
306 lock_set_unlock(lock_set);
307
308 /*
309 * Block the current thread if the lock is already held.
310 */
311
312 if (ulock->holder != THR_ACT_NULL) {
313 int wait_result;
314
315 if (ulock->holder == current_act()) {
316 ulock_unlock(ulock);
317 return KERN_LOCK_OWNED_SELF;
318 }
319
320 ulock->blocked = TRUE;
321 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
322 LOCK_SET_EVENT,
323 THREAD_ABORTSAFE);
324 ulock_unlock(ulock);
325
326 /*
327 * Block - Wait for lock to become available.
328 */
329 if (wait_result == THREAD_WAITING)
330 wait_result = thread_block(THREAD_CONTINUE_NULL);
331
332 /*
333 * Check the result status:
334 *
335 * Check to see why thread was woken up. In all cases, we
336 * already have been removed from the queue.
337 */
338 switch (wait_result) {
339 case THREAD_AWAKENED:
340 /* lock transitioned from old locker to us */
341 /* he already made us owner */
342 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
343 KERN_SUCCESS;
344
345 case THREAD_INTERRUPTED:
346 return KERN_ABORTED;
347
348 case THREAD_RESTART:
349 goto retry; /* probably a dead lock_set */
350
351 default:
352 panic("lock_acquire\n");
353 }
354 }
355
356 /*
357 * Assign lock ownership
358 */
359 ulock_ownership_set(ulock, current_thread());
360 ulock_unlock(ulock);
361
362 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
363 }
364
365 kern_return_t
366 lock_release (lock_set_t lock_set, int lock_id)
367 {
368 ulock_t ulock;
369
370 if (lock_set == LOCK_SET_NULL)
371 return KERN_INVALID_ARGUMENT;
372
373 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
374 return KERN_INVALID_ARGUMENT;
375
376 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
377
378 return (lock_release_internal(ulock, current_act()));
379 }
380
381 kern_return_t
382 lock_try (lock_set_t lock_set, int lock_id)
383 {
384 ulock_t ulock;
385
386
387 if (lock_set == LOCK_SET_NULL)
388 return KERN_INVALID_ARGUMENT;
389
390 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
391 return KERN_INVALID_ARGUMENT;
392
393
394 lock_set_lock(lock_set);
395 if (!lock_set->active) {
396 lock_set_unlock(lock_set);
397 return KERN_LOCK_SET_DESTROYED;
398 }
399
400 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
401 ulock_lock(ulock);
402 lock_set_unlock(lock_set);
403
404 /*
405 * If the lock is already owned, we return without blocking.
406 *
407 * An ownership status is returned to inform the caller as to
408 * whether it already holds the lock or another thread does.
409 */
410
411 if (ulock->holder != THR_ACT_NULL) {
412 lock_set_unlock(lock_set);
413
414 if (ulock->holder == current_act()) {
415 ulock_unlock(ulock);
416 return KERN_LOCK_OWNED_SELF;
417 }
418
419 ulock_unlock(ulock);
420 return KERN_LOCK_OWNED;
421 }
422
423 /*
424 * Add the ulock to the lock set's held_ulocks list.
425 */
426
427 ulock_ownership_set(ulock, current_thread());
428 ulock_unlock(ulock);
429
430 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
431 }
432
433 kern_return_t
434 lock_make_stable (lock_set_t lock_set, int lock_id)
435 {
436 ulock_t ulock;
437
438
439 if (lock_set == LOCK_SET_NULL)
440 return KERN_INVALID_ARGUMENT;
441
442 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
443 return KERN_INVALID_ARGUMENT;
444
445
446 lock_set_lock(lock_set);
447 if (!lock_set->active) {
448 lock_set_unlock(lock_set);
449 return KERN_LOCK_SET_DESTROYED;
450 }
451
452 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
453 ulock_lock(ulock);
454 lock_set_unlock(lock_set);
455
456 if (ulock->holder != current_act()) {
457 ulock_unlock(ulock);
458 return KERN_INVALID_RIGHT;
459 }
460
461 ulock->unstable = FALSE;
462 ulock_unlock(ulock);
463
464 return KERN_SUCCESS;
465 }
466
467 /*
468 * ROUTINE: lock_make_unstable [internal]
469 *
470 * Marks the lock as unstable.
471 *
472 * NOTES:
473 * - All future acquisitions of the lock will return with a
474 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
475 */
476 kern_return_t
477 lock_make_unstable (ulock_t ulock, thread_act_t thr_act)
478 {
479 lock_set_t lock_set;
480
481
482 lock_set = ulock->lock_set;
483 lock_set_lock(lock_set);
484 if (!lock_set->active) {
485 lock_set_unlock(lock_set);
486 return KERN_LOCK_SET_DESTROYED;
487 }
488
489 ulock_lock(ulock);
490 lock_set_unlock(lock_set);
491
492 if (ulock->holder != thr_act) {
493 ulock_unlock(ulock);
494 return KERN_INVALID_RIGHT;
495 }
496
497 ulock->unstable = TRUE;
498 ulock_unlock(ulock);
499
500 return KERN_SUCCESS;
501 }
502
503 /*
504 * ROUTINE: lock_release_internal [internal]
505 *
506 * Releases the ulock.
507 * If any threads are blocked waiting for the ulock, one is woken-up.
508 *
509 */
510 kern_return_t
511 lock_release_internal (ulock_t ulock, thread_act_t thr_act)
512 {
513 lock_set_t lock_set;
514 int result;
515
516
517 if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
518 return KERN_INVALID_ARGUMENT;
519
520 lock_set_lock(lock_set);
521 if (!lock_set->active) {
522 lock_set_unlock(lock_set);
523 return KERN_LOCK_SET_DESTROYED;
524 }
525 ulock_lock(ulock);
526 lock_set_unlock(lock_set);
527
528 if (ulock->holder != thr_act) {
529 ulock_unlock(ulock);
530 return KERN_INVALID_RIGHT;
531 }
532
533 /*
534 * If we have a hint that threads might be waiting,
535 * try to transfer the lock ownership to a waiting thread
536 * and wake it up.
537 */
538 if (ulock->blocked) {
539 wait_queue_t wq = &ulock->wait_queue;
540 thread_t thread;
541 spl_t s;
542
543 s = splsched();
544 wait_queue_lock(wq);
545 thread = wait_queue_wakeup64_identity_locked(wq,
546 LOCK_SET_EVENT,
547 THREAD_AWAKENED,
548 TRUE);
549 /* wait_queue now unlocked, thread locked */
550
551 if (thread != THREAD_NULL) {
552 /*
553 * JMM - These ownership transfer macros have a
554 * locking/race problem. To keep the thread from
555 * changing states on us (nullifying the ownership
556 * assignment) we need to keep the thread locked
557 * during the assignment. But we can't because the
558 * macros take an activation lock, which is a mutex.
559 * Since this code was already broken before I got
560 * here, I will leave it for now.
561 */
562 thread_unlock(thread);
563 splx(s);
564
565 /*
566 * Transfer ulock ownership
567 * from the current thread to the acquisition thread.
568 */
569 ulock_ownership_clear(ulock);
570 ulock_ownership_set(ulock, thread);
571 ulock_unlock(ulock);
572
573 return KERN_SUCCESS;
574 } else {
575 ulock->blocked = FALSE;
576 splx(s);
577 }
578 }
579
580 /*
581 * Disown ulock
582 */
583 ulock_ownership_clear(ulock);
584 ulock_unlock(ulock);
585
586 return KERN_SUCCESS;
587 }
588
589 kern_return_t
590 lock_handoff (lock_set_t lock_set, int lock_id)
591 {
592 ulock_t ulock;
593 int wait_result;
594
595
596 if (lock_set == LOCK_SET_NULL)
597 return KERN_INVALID_ARGUMENT;
598
599 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
600 return KERN_INVALID_ARGUMENT;
601
602 retry:
603 lock_set_lock(lock_set);
604
605 if (!lock_set->active) {
606 lock_set_unlock(lock_set);
607 return KERN_LOCK_SET_DESTROYED;
608 }
609
610 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
611 ulock_lock(ulock);
612 lock_set_unlock(lock_set);
613
614 if (ulock->holder != current_act()) {
615 ulock_unlock(ulock);
616 return KERN_INVALID_RIGHT;
617 }
618
619 /*
620 * If the accepting thread (the receiver) is already waiting
621 * to accept the lock from the handoff thread (the sender),
622 * then perform the hand-off now.
623 */
624
625 if (ulock->accept_wait) {
626 wait_queue_t wq = &ulock->wait_queue;
627 thread_t thread;
628 spl_t s;
629
630 /*
631 * See who the lucky devil is, if he is still there waiting.
632 */
633 s = splsched();
634 wait_queue_lock(wq);
635 thread = wait_queue_wakeup64_identity_locked(
636 wq,
637 LOCK_SET_HANDOFF,
638 THREAD_AWAKENED,
639 TRUE);
640 /* wait queue unlocked, thread locked */
641
642 /*
643 * Transfer lock ownership
644 */
645 if (thread != THREAD_NULL) {
646 /*
647 * JMM - These ownership transfer macros have a
648 * locking/race problem. To keep the thread from
649 * changing states on us (nullifying the ownership
650 * assignment) we need to keep the thread locked
651 * during the assignment. But we can't because the
652 * macros take an activation lock, which is a mutex.
653 * Since this code was already broken before I got
654 * here, I will leave it for now.
655 */
656 thread_unlock(thread);
657 splx(s);
658
659 ulock_ownership_clear(ulock);
660 ulock_ownership_set(ulock, thread);
661 ulock->accept_wait = FALSE;
662 ulock_unlock(ulock);
663 return KERN_SUCCESS;
664 } else {
665
666 /*
667 * OOPS. The accepting thread must have been aborted.
668 * and is racing back to clear the flag that says is
669 * waiting for an accept. He will clear it when we
670 * release the lock, so just fall thru and wait for
671 * the next accept thread (that's the way it is
672 * specified).
673 */
674 splx(s);
675 }
676 }
677
678 /*
679 * Indicate that there is a hand-off thread waiting, and then wait
680 * for an accepting thread.
681 */
682 ulock->ho_wait = TRUE;
683 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
684 LOCK_SET_HANDOFF,
685 THREAD_ABORTSAFE);
686 ulock_unlock(ulock);
687
688 if (wait_result == THREAD_WAITING)
689 wait_result = thread_block(THREAD_CONTINUE_NULL);
690
691 /*
692 * If the thread was woken-up via some action other than
693 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
694 * then we need to clear the ulock's handoff state.
695 */
696 switch (wait_result) {
697
698 case THREAD_AWAKENED:
699 return KERN_SUCCESS;
700
701 case THREAD_INTERRUPTED:
702 ulock_lock(ulock);
703 assert(ulock->holder == current_act());
704 ulock->ho_wait = FALSE;
705 ulock_unlock(ulock);
706 return KERN_ABORTED;
707
708 case THREAD_RESTART:
709 goto retry;
710
711 default:
712 panic("lock_handoff");
713 }
714 }
715
716 kern_return_t
717 lock_handoff_accept (lock_set_t lock_set, int lock_id)
718 {
719 ulock_t ulock;
720 int wait_result;
721
722
723 if (lock_set == LOCK_SET_NULL)
724 return KERN_INVALID_ARGUMENT;
725
726 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
727 return KERN_INVALID_ARGUMENT;
728
729 retry:
730 lock_set_lock(lock_set);
731 if (!lock_set->active) {
732 lock_set_unlock(lock_set);
733 return KERN_LOCK_SET_DESTROYED;
734 }
735
736 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
737 ulock_lock(ulock);
738 lock_set_unlock(lock_set);
739
740 /*
741 * If there is another accepting thread that beat us, just
742 * return with an error.
743 */
744 if (ulock->accept_wait) {
745 ulock_unlock(ulock);
746 return KERN_ALREADY_WAITING;
747 }
748
749 if (ulock->holder == current_act()) {
750 ulock_unlock(ulock);
751 return KERN_LOCK_OWNED_SELF;
752 }
753
754 /*
755 * If the handoff thread (the sender) is already waiting to
756 * hand-off the lock to the accepting thread (the receiver),
757 * then perform the hand-off now.
758 */
759 if (ulock->ho_wait) {
760 wait_queue_t wq = &ulock->wait_queue;
761 thread_t thread;
762
763 /*
764 * See who the lucky devil is, if he is still there waiting.
765 */
766 assert(ulock->holder != THR_ACT_NULL);
767 thread = ulock->holder->thread;
768
769 if (wait_queue_wakeup64_thread(wq,
770 LOCK_SET_HANDOFF,
771 thread,
772 THREAD_AWAKENED) == KERN_SUCCESS) {
773 /*
774 * Holder thread was still waiting to give it
775 * away. Take over ownership.
776 */
777 ulock_ownership_clear(ulock);
778 ulock_ownership_set(ulock, current_thread());
779 ulock->ho_wait = FALSE;
780 ulock_unlock(ulock);
781 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
782 KERN_SUCCESS;
783 }
784
785 /*
786 * OOPS. The owner was aborted out of the handoff.
787 * He will clear his own flag when he gets back.
788 * in the meantime, we will wait as if we didn't
789 * even see his flag (by falling thru).
790 */
791 }
792
793 ulock->accept_wait = TRUE;
794 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
795 LOCK_SET_HANDOFF,
796 THREAD_ABORTSAFE);
797 ulock_unlock(ulock);
798
799 if (wait_result == THREAD_WAITING)
800 wait_result = thread_block(THREAD_CONTINUE_NULL);
801
802 /*
803 * If the thread was woken-up via some action other than
804 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
805 * then we need to clear the ulock's handoff state.
806 */
807 switch (wait_result) {
808
809 case THREAD_AWAKENED:
810 return KERN_SUCCESS;
811
812 case THREAD_INTERRUPTED:
813 ulock_lock(ulock);
814 ulock->accept_wait = FALSE;
815 ulock_unlock(ulock);
816 return KERN_ABORTED;
817
818 case THREAD_RESTART:
819 goto retry;
820
821 default:
822 panic("lock_handoff_accept");
823 }
824 }
825
826 /*
827 * Routine: lock_set_reference
828 *
829 * Take out a reference on a lock set. This keeps the data structure
830 * in existence (but the lock set may be deactivated).
831 */
832 void
833 lock_set_reference(lock_set_t lock_set)
834 {
835 lock_set_lock(lock_set);
836 lock_set->ref_count++;
837 lock_set_unlock(lock_set);
838 }
839
840 /*
841 * Routine: lock_set_dereference
842 *
843 * Release a reference on a lock set. If this is the last reference,
844 * the lock set data structure is deallocated.
845 */
846 void
847 lock_set_dereference(lock_set_t lock_set)
848 {
849 int ref_count;
850 int size;
851
852 lock_set_lock(lock_set);
853 ref_count = --(lock_set->ref_count);
854 lock_set_unlock(lock_set);
855
856 if (ref_count == 0) {
857 size = sizeof(struct lock_set) +
858 (sizeof(struct ulock) * (lock_set->n_ulocks - 1));
859 kfree((vm_offset_t) lock_set, size);
860 }
861 }