]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_lock.c
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / kern / sync_lock.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 *
31 */
32 /*
33 * File: kern/sync_lock.c
34 * Author: Joseph CaraDonna
35 *
36 * Contains RT distributed lock synchronization services.
37 */
38
39 #include <mach/mach_types.h>
40 #include <mach/lock_set_server.h>
41 #include <mach/task_server.h>
42
43 #include <kern/misc_protos.h>
44 #include <kern/kalloc.h>
45 #include <kern/sync_lock.h>
46 #include <kern/sched_prim.h>
47 #include <kern/ipc_kobject.h>
48 #include <kern/ipc_sync.h>
49 #include <kern/thread.h>
50 #include <kern/task.h>
51
52 #include <ipc/ipc_port.h>
53 #include <ipc/ipc_space.h>
54
55 /*
56 * Ulock ownership MACROS
57 *
58 * Assumes: ulock internal lock is held
59 */
60
61 #define ulock_ownership_set(ul, th) \
62 MACRO_BEGIN \
63 thread_mtx_lock(th); \
64 enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \
65 thread_mtx_unlock(th); \
66 (ul)->holder = th; \
67 MACRO_END
68
69 #define ulock_ownership_clear(ul) \
70 MACRO_BEGIN \
71 thread_t th; \
72 th = (ul)->holder; \
73 if (th->active) { \
74 thread_mtx_lock(th); \
75 remqueue(&th->held_ulocks, \
76 (queue_entry_t) (ul)); \
77 thread_mtx_unlock(th); \
78 } else { \
79 remqueue(&th->held_ulocks, \
80 (queue_entry_t) (ul)); \
81 } \
82 (ul)->holder = THREAD_NULL; \
83 MACRO_END
84
85 /*
86 * Lock set ownership MACROS
87 */
88
89 #define lock_set_ownership_set(ls, t) \
90 MACRO_BEGIN \
91 task_lock((t)); \
92 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
93 (t)->lock_sets_owned++; \
94 task_unlock((t)); \
95 (ls)->owner = (t); \
96 MACRO_END
97
98 #define lock_set_ownership_clear(ls, t) \
99 MACRO_BEGIN \
100 task_lock((t)); \
101 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
102 (t)->lock_sets_owned--; \
103 task_unlock((t)); \
104 MACRO_END
105
106 unsigned int lock_set_event;
107 #define LOCK_SET_EVENT ((event64_t)&lock_set_event)
108
109 unsigned int lock_set_handoff;
110 #define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
111
112 /*
113 * ROUTINE: lock_set_init [private]
114 *
115 * Initialize the lock_set subsystem.
116 *
117 * For now, we don't have anything to do here.
118 */
119 void
120 lock_set_init(void)
121 {
122 return;
123 }
124
125
126 /*
127 * ROUTINE: lock_set_create [exported]
128 *
129 * Creates a lock set.
130 * The port representing the lock set is returned as a parameter.
131 */
132 kern_return_t
133 lock_set_create (
134 task_t task,
135 lock_set_t *new_lock_set,
136 int n_ulocks,
137 int policy)
138 {
139 lock_set_t lock_set = LOCK_SET_NULL;
140 ulock_t ulock;
141 vm_size_t size;
142 int x;
143
144 *new_lock_set = LOCK_SET_NULL;
145
146 if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
147 return KERN_INVALID_ARGUMENT;
148
149 if ((VM_MAX_ADDRESS - sizeof(struct lock_set))/sizeof(struct ulock) < (unsigned)n_ulocks)
150 return KERN_RESOURCE_SHORTAGE;
151
152 size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
153 lock_set = (lock_set_t) kalloc (size);
154
155 if (lock_set == LOCK_SET_NULL)
156 return KERN_RESOURCE_SHORTAGE;
157
158
159 lock_set_lock_init(lock_set);
160 lock_set->n_ulocks = n_ulocks;
161 lock_set->ref_count = 1;
162
163 /*
164 * Create and initialize the lock set port
165 */
166 lock_set->port = ipc_port_alloc_kernel();
167 if (lock_set->port == IP_NULL) {
168 /* This will deallocate the lock set */
169 lock_set_dereference(lock_set);
170 return KERN_RESOURCE_SHORTAGE;
171 }
172
173 ipc_kobject_set (lock_set->port,
174 (ipc_kobject_t) lock_set,
175 IKOT_LOCK_SET);
176
177 /*
178 * Initialize each ulock in the lock set
179 */
180
181 for (x=0; x < n_ulocks; x++) {
182 ulock = (ulock_t) &lock_set->ulock_list[x];
183 ulock_lock_init(ulock);
184 ulock->lock_set = lock_set;
185 ulock->holder = THREAD_NULL;
186 ulock->blocked = FALSE;
187 ulock->unstable = FALSE;
188 ulock->ho_wait = FALSE;
189 wait_queue_init(&ulock->wait_queue, policy);
190 }
191
192 lock_set_ownership_set(lock_set, task);
193
194 lock_set->active = TRUE;
195 *new_lock_set = lock_set;
196
197 return KERN_SUCCESS;
198 }
199
200 /*
201 * ROUTINE: lock_set_destroy [exported]
202 *
203 * Destroys a lock set. This call will only succeed if the
204 * specified task is the SAME task name specified at the lock set's
205 * creation.
206 *
207 * NOTES:
208 * - All threads currently blocked on the lock set's ulocks are awoken.
209 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
210 */
211 kern_return_t
212 lock_set_destroy (task_t task, lock_set_t lock_set)
213 {
214 ulock_t ulock;
215 int i;
216
217 if (task == TASK_NULL || lock_set == LOCK_SET_NULL)
218 return KERN_INVALID_ARGUMENT;
219
220 if (lock_set->owner != task)
221 return KERN_INVALID_RIGHT;
222
223 lock_set_lock(lock_set);
224 if (!lock_set->active) {
225 lock_set_unlock(lock_set);
226 return KERN_LOCK_SET_DESTROYED;
227 }
228
229 /*
230 * Deactivate lock set
231 */
232 lock_set->active = FALSE;
233
234 /*
235 * If a ulock is currently held in the target lock set:
236 *
237 * 1) Wakeup all threads blocked on the ulock (if any). Threads
238 * may be blocked waiting normally, or waiting for a handoff.
239 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
240 *
241 * 2) ulock ownership is cleared.
242 * The thread currently holding the ulock is revoked of its
243 * ownership.
244 */
245 for (i = 0; i < lock_set->n_ulocks; i++) {
246 ulock = &lock_set->ulock_list[i];
247
248 ulock_lock(ulock);
249
250 if (ulock->accept_wait) {
251 ulock->accept_wait = FALSE;
252 wait_queue_wakeup64_one(&ulock->wait_queue,
253 LOCK_SET_HANDOFF,
254 THREAD_RESTART);
255 }
256
257 if (ulock->holder) {
258 if (ulock->blocked) {
259 ulock->blocked = FALSE;
260 wait_queue_wakeup64_all(&ulock->wait_queue,
261 LOCK_SET_EVENT,
262 THREAD_RESTART);
263 }
264 if (ulock->ho_wait) {
265 ulock->ho_wait = FALSE;
266 wait_queue_wakeup64_one(&ulock->wait_queue,
267 LOCK_SET_HANDOFF,
268 THREAD_RESTART);
269 }
270 ulock_ownership_clear(ulock);
271 }
272
273 ulock_unlock(ulock);
274 }
275
276 lock_set_unlock(lock_set);
277 lock_set_ownership_clear(lock_set, task);
278
279 /*
280 * Deallocate
281 *
282 * Drop the lock set reference, which inturn destroys the
283 * lock set structure if the reference count goes to zero.
284 */
285
286 ipc_port_dealloc_kernel(lock_set->port);
287 lock_set_dereference(lock_set);
288
289 return KERN_SUCCESS;
290 }
291
292 kern_return_t
293 lock_acquire (lock_set_t lock_set, int lock_id)
294 {
295 ulock_t ulock;
296
297 if (lock_set == LOCK_SET_NULL)
298 return KERN_INVALID_ARGUMENT;
299
300 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
301 return KERN_INVALID_ARGUMENT;
302
303 retry:
304 lock_set_lock(lock_set);
305 if (!lock_set->active) {
306 lock_set_unlock(lock_set);
307 return KERN_LOCK_SET_DESTROYED;
308 }
309
310 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
311 ulock_lock(ulock);
312 lock_set_unlock(lock_set);
313
314 /*
315 * Block the current thread if the lock is already held.
316 */
317
318 if (ulock->holder != THREAD_NULL) {
319 int wait_result;
320
321 if (ulock->holder == current_thread()) {
322 ulock_unlock(ulock);
323 return KERN_LOCK_OWNED_SELF;
324 }
325
326 ulock->blocked = TRUE;
327 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
328 LOCK_SET_EVENT,
329 THREAD_ABORTSAFE, 0);
330 ulock_unlock(ulock);
331
332 /*
333 * Block - Wait for lock to become available.
334 */
335 if (wait_result == THREAD_WAITING)
336 wait_result = thread_block(THREAD_CONTINUE_NULL);
337
338 /*
339 * Check the result status:
340 *
341 * Check to see why thread was woken up. In all cases, we
342 * already have been removed from the queue.
343 */
344 switch (wait_result) {
345 case THREAD_AWAKENED:
346 /* lock transitioned from old locker to us */
347 /* he already made us owner */
348 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
349 KERN_SUCCESS;
350
351 case THREAD_INTERRUPTED:
352 return KERN_ABORTED;
353
354 case THREAD_RESTART:
355 goto retry; /* probably a dead lock_set */
356
357 default:
358 panic("lock_acquire\n");
359 }
360 }
361
362 /*
363 * Assign lock ownership
364 */
365 ulock_ownership_set(ulock, current_thread());
366 ulock_unlock(ulock);
367
368 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
369 }
370
371 kern_return_t
372 lock_release (lock_set_t lock_set, int lock_id)
373 {
374 ulock_t ulock;
375
376 if (lock_set == LOCK_SET_NULL)
377 return KERN_INVALID_ARGUMENT;
378
379 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
380 return KERN_INVALID_ARGUMENT;
381
382 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
383
384 return (ulock_release_internal(ulock, current_thread()));
385 }
386
387 kern_return_t
388 lock_try (lock_set_t lock_set, int lock_id)
389 {
390 ulock_t ulock;
391
392
393 if (lock_set == LOCK_SET_NULL)
394 return KERN_INVALID_ARGUMENT;
395
396 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
397 return KERN_INVALID_ARGUMENT;
398
399
400 lock_set_lock(lock_set);
401 if (!lock_set->active) {
402 lock_set_unlock(lock_set);
403 return KERN_LOCK_SET_DESTROYED;
404 }
405
406 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
407 ulock_lock(ulock);
408 lock_set_unlock(lock_set);
409
410 /*
411 * If the lock is already owned, we return without blocking.
412 *
413 * An ownership status is returned to inform the caller as to
414 * whether it already holds the lock or another thread does.
415 */
416
417 if (ulock->holder != THREAD_NULL) {
418 lock_set_unlock(lock_set);
419
420 if (ulock->holder == current_thread()) {
421 ulock_unlock(ulock);
422 return KERN_LOCK_OWNED_SELF;
423 }
424
425 ulock_unlock(ulock);
426 return KERN_LOCK_OWNED;
427 }
428
429 /*
430 * Add the ulock to the lock set's held_ulocks list.
431 */
432
433 ulock_ownership_set(ulock, current_thread());
434 ulock_unlock(ulock);
435
436 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
437 }
438
439 kern_return_t
440 lock_make_stable (lock_set_t lock_set, int lock_id)
441 {
442 ulock_t ulock;
443
444
445 if (lock_set == LOCK_SET_NULL)
446 return KERN_INVALID_ARGUMENT;
447
448 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
449 return KERN_INVALID_ARGUMENT;
450
451
452 lock_set_lock(lock_set);
453 if (!lock_set->active) {
454 lock_set_unlock(lock_set);
455 return KERN_LOCK_SET_DESTROYED;
456 }
457
458 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
459 ulock_lock(ulock);
460 lock_set_unlock(lock_set);
461
462 if (ulock->holder != current_thread()) {
463 ulock_unlock(ulock);
464 return KERN_INVALID_RIGHT;
465 }
466
467 ulock->unstable = FALSE;
468 ulock_unlock(ulock);
469
470 return KERN_SUCCESS;
471 }
472
473 /*
474 * ROUTINE: lock_make_unstable [internal]
475 *
476 * Marks the lock as unstable.
477 *
478 * NOTES:
479 * - All future acquisitions of the lock will return with a
480 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
481 */
482 kern_return_t
483 lock_make_unstable (ulock_t ulock, thread_t thread)
484 {
485 lock_set_t lock_set;
486
487 lock_set = ulock->lock_set;
488 lock_set_lock(lock_set);
489 if (!lock_set->active) {
490 lock_set_unlock(lock_set);
491 return KERN_LOCK_SET_DESTROYED;
492 }
493
494 ulock_lock(ulock);
495 lock_set_unlock(lock_set);
496
497 if (ulock->holder != thread) {
498 ulock_unlock(ulock);
499 return KERN_INVALID_RIGHT;
500 }
501
502 ulock->unstable = TRUE;
503 ulock_unlock(ulock);
504
505 return KERN_SUCCESS;
506 }
507
508 /*
509 * ROUTINE: ulock_release_internal [internal]
510 *
511 * Releases the ulock.
512 * If any threads are blocked waiting for the ulock, one is woken-up.
513 *
514 */
515 kern_return_t
516 ulock_release_internal (ulock_t ulock, thread_t thread)
517 {
518 lock_set_t lock_set;
519
520 if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
521 return KERN_INVALID_ARGUMENT;
522
523 lock_set_lock(lock_set);
524 if (!lock_set->active) {
525 lock_set_unlock(lock_set);
526 return KERN_LOCK_SET_DESTROYED;
527 }
528 ulock_lock(ulock);
529 lock_set_unlock(lock_set);
530
531 if (ulock->holder != thread) {
532 ulock_unlock(ulock);
533 return KERN_INVALID_RIGHT;
534 }
535
536 /*
537 * If we have a hint that threads might be waiting,
538 * try to transfer the lock ownership to a waiting thread
539 * and wake it up.
540 */
541 if (ulock->blocked) {
542 wait_queue_t wq = &ulock->wait_queue;
543 thread_t wqthread;
544 spl_t s;
545
546 s = splsched();
547 wait_queue_lock(wq);
548 wqthread = wait_queue_wakeup64_identity_locked(wq,
549 LOCK_SET_EVENT,
550 THREAD_AWAKENED,
551 TRUE);
552 /* wait_queue now unlocked, thread locked */
553
554 if (wqthread != THREAD_NULL) {
555 /*
556 * JMM - These ownership transfer macros have a
557 * locking/race problem. To keep the thread from
558 * changing states on us (nullifying the ownership
559 * assignment) we need to keep the thread locked
560 * during the assignment. But we can't because the
561 * macros take an activation lock, which is a mutex.
562 * Since this code was already broken before I got
563 * here, I will leave it for now.
564 */
565 thread_unlock(wqthread);
566 splx(s);
567
568 /*
569 * Transfer ulock ownership
570 * from the current thread to the acquisition thread.
571 */
572 ulock_ownership_clear(ulock);
573 ulock_ownership_set(ulock, wqthread);
574 ulock_unlock(ulock);
575
576 return KERN_SUCCESS;
577 } else {
578 ulock->blocked = FALSE;
579 splx(s);
580 }
581 }
582
583 /*
584 * Disown ulock
585 */
586 ulock_ownership_clear(ulock);
587 ulock_unlock(ulock);
588
589 return KERN_SUCCESS;
590 }
591
592 kern_return_t
593 lock_handoff (lock_set_t lock_set, int lock_id)
594 {
595 ulock_t ulock;
596 int wait_result;
597
598
599 if (lock_set == LOCK_SET_NULL)
600 return KERN_INVALID_ARGUMENT;
601
602 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
603 return KERN_INVALID_ARGUMENT;
604
605 retry:
606 lock_set_lock(lock_set);
607
608 if (!lock_set->active) {
609 lock_set_unlock(lock_set);
610 return KERN_LOCK_SET_DESTROYED;
611 }
612
613 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
614 ulock_lock(ulock);
615 lock_set_unlock(lock_set);
616
617 if (ulock->holder != current_thread()) {
618 ulock_unlock(ulock);
619 return KERN_INVALID_RIGHT;
620 }
621
622 /*
623 * If the accepting thread (the receiver) is already waiting
624 * to accept the lock from the handoff thread (the sender),
625 * then perform the hand-off now.
626 */
627
628 if (ulock->accept_wait) {
629 wait_queue_t wq = &ulock->wait_queue;
630 thread_t thread;
631 spl_t s;
632
633 /*
634 * See who the lucky devil is, if he is still there waiting.
635 */
636 s = splsched();
637 wait_queue_lock(wq);
638 thread = wait_queue_wakeup64_identity_locked(
639 wq,
640 LOCK_SET_HANDOFF,
641 THREAD_AWAKENED,
642 TRUE);
643 /* wait queue unlocked, thread locked */
644
645 /*
646 * Transfer lock ownership
647 */
648 if (thread != THREAD_NULL) {
649 /*
650 * JMM - These ownership transfer macros have a
651 * locking/race problem. To keep the thread from
652 * changing states on us (nullifying the ownership
653 * assignment) we need to keep the thread locked
654 * during the assignment. But we can't because the
655 * macros take a thread mutex lock.
656 *
657 * Since this code was already broken before I got
658 * here, I will leave it for now.
659 */
660 thread_unlock(thread);
661 splx(s);
662
663 ulock_ownership_clear(ulock);
664 ulock_ownership_set(ulock, thread);
665 ulock->accept_wait = FALSE;
666 ulock_unlock(ulock);
667 return KERN_SUCCESS;
668 } else {
669
670 /*
671 * OOPS. The accepting thread must have been aborted.
672 * and is racing back to clear the flag that says is
673 * waiting for an accept. He will clear it when we
674 * release the lock, so just fall thru and wait for
675 * the next accept thread (that's the way it is
676 * specified).
677 */
678 splx(s);
679 }
680 }
681
682 /*
683 * Indicate that there is a hand-off thread waiting, and then wait
684 * for an accepting thread.
685 */
686 ulock->ho_wait = TRUE;
687 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
688 LOCK_SET_HANDOFF,
689 THREAD_ABORTSAFE, 0);
690 ulock_unlock(ulock);
691
692 if (wait_result == THREAD_WAITING)
693 wait_result = thread_block(THREAD_CONTINUE_NULL);
694
695 /*
696 * If the thread was woken-up via some action other than
697 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
698 * then we need to clear the ulock's handoff state.
699 */
700 switch (wait_result) {
701
702 case THREAD_AWAKENED:
703 return KERN_SUCCESS;
704
705 case THREAD_INTERRUPTED:
706 ulock_lock(ulock);
707 assert(ulock->holder == current_thread());
708 ulock->ho_wait = FALSE;
709 ulock_unlock(ulock);
710 return KERN_ABORTED;
711
712 case THREAD_RESTART:
713 goto retry;
714 }
715
716 panic("lock_handoff");
717 return KERN_FAILURE;
718 }
719
720 kern_return_t
721 lock_handoff_accept (lock_set_t lock_set, int lock_id)
722 {
723 ulock_t ulock;
724 int wait_result;
725
726
727 if (lock_set == LOCK_SET_NULL)
728 return KERN_INVALID_ARGUMENT;
729
730 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
731 return KERN_INVALID_ARGUMENT;
732
733 retry:
734 lock_set_lock(lock_set);
735 if (!lock_set->active) {
736 lock_set_unlock(lock_set);
737 return KERN_LOCK_SET_DESTROYED;
738 }
739
740 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
741 ulock_lock(ulock);
742 lock_set_unlock(lock_set);
743
744 /*
745 * If there is another accepting thread that beat us, just
746 * return with an error.
747 */
748 if (ulock->accept_wait) {
749 ulock_unlock(ulock);
750 return KERN_ALREADY_WAITING;
751 }
752
753 if (ulock->holder == current_thread()) {
754 ulock_unlock(ulock);
755 return KERN_LOCK_OWNED_SELF;
756 }
757
758 /*
759 * If the handoff thread (the sender) is already waiting to
760 * hand-off the lock to the accepting thread (the receiver),
761 * then perform the hand-off now.
762 */
763 if (ulock->ho_wait) {
764 wait_queue_t wq = &ulock->wait_queue;
765
766 /*
767 * See who the lucky devil is, if he is still there waiting.
768 */
769 assert(ulock->holder != THREAD_NULL);
770
771 if (wait_queue_wakeup64_thread(wq,
772 LOCK_SET_HANDOFF,
773 ulock->holder,
774 THREAD_AWAKENED) == KERN_SUCCESS) {
775 /*
776 * Holder thread was still waiting to give it
777 * away. Take over ownership.
778 */
779 ulock_ownership_clear(ulock);
780 ulock_ownership_set(ulock, current_thread());
781 ulock->ho_wait = FALSE;
782 ulock_unlock(ulock);
783 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
784 KERN_SUCCESS;
785 }
786
787 /*
788 * OOPS. The owner was aborted out of the handoff.
789 * He will clear his own flag when he gets back.
790 * in the meantime, we will wait as if we didn't
791 * even see his flag (by falling thru).
792 */
793 }
794
795 ulock->accept_wait = TRUE;
796 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
797 LOCK_SET_HANDOFF,
798 THREAD_ABORTSAFE, 0);
799 ulock_unlock(ulock);
800
801 if (wait_result == THREAD_WAITING)
802 wait_result = thread_block(THREAD_CONTINUE_NULL);
803
804 /*
805 * If the thread was woken-up via some action other than
806 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
807 * then we need to clear the ulock's handoff state.
808 */
809 switch (wait_result) {
810
811 case THREAD_AWAKENED:
812 return KERN_SUCCESS;
813
814 case THREAD_INTERRUPTED:
815 ulock_lock(ulock);
816 ulock->accept_wait = FALSE;
817 ulock_unlock(ulock);
818 return KERN_ABORTED;
819
820 case THREAD_RESTART:
821 goto retry;
822 }
823
824 panic("lock_handoff_accept");
825 return KERN_FAILURE;
826 }
827
828 /*
829 * Routine: lock_set_reference
830 *
831 * Take out a reference on a lock set. This keeps the data structure
832 * in existence (but the lock set may be deactivated).
833 */
834 void
835 lock_set_reference(lock_set_t lock_set)
836 {
837 lock_set_lock(lock_set);
838 lock_set->ref_count++;
839 lock_set_unlock(lock_set);
840 }
841
842 /*
843 * Routine: lock_set_dereference
844 *
845 * Release a reference on a lock set. If this is the last reference,
846 * the lock set data structure is deallocated.
847 */
848 void
849 lock_set_dereference(lock_set_t lock_set)
850 {
851 int ref_count;
852 int size;
853
854 lock_set_lock(lock_set);
855 ref_count = --(lock_set->ref_count);
856 lock_set_unlock(lock_set);
857
858 if (ref_count == 0) {
859 size = sizeof(struct lock_set) +
860 (sizeof(struct ulock) * (lock_set->n_ulocks - 1));
861 kfree(lock_set, size);
862 }
863 }
864
865 void
866 ulock_release_all(
867 thread_t thread)
868 {
869 ulock_t ulock;
870
871 while (!queue_empty(&thread->held_ulocks)) {
872 ulock = (ulock_t)queue_first(&thread->held_ulocks);
873 lock_make_unstable(ulock, thread);
874 ulock_release_internal(ulock, thread);
875 }
876 }