]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_lock.c
xnu-1486.2.11.tar.gz
[apple/xnu.git] / osfmk / kern / sync_lock.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 *
31 */
32 /*
33 * File: kern/sync_lock.c
34 * Author: Joseph CaraDonna
35 *
36 * Contains RT distributed lock synchronization services.
37 */
38
39 #include <mach/mach_types.h>
40 #include <mach/lock_set_server.h>
41 #include <mach/task_server.h>
42
43 #include <kern/misc_protos.h>
44 #include <kern/kalloc.h>
45 #include <kern/sync_lock.h>
46 #include <kern/sched_prim.h>
47 #include <kern/ipc_kobject.h>
48 #include <kern/ipc_sync.h>
49 #include <kern/thread.h>
50 #include <kern/task.h>
51
52 #include <ipc/ipc_port.h>
53 #include <ipc/ipc_space.h>
54
55 /*
56 * Ulock ownership MACROS
57 *
58 * Assumes: ulock internal lock is held
59 */
60
61 #define ulock_ownership_set(ul, th) \
62 MACRO_BEGIN \
63 thread_mtx_lock(th); \
64 enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \
65 thread_mtx_unlock(th); \
66 (ul)->holder = th; \
67 MACRO_END
68
69 #define ulock_ownership_clear(ul) \
70 MACRO_BEGIN \
71 thread_t th; \
72 th = (ul)->holder; \
73 if ((th)->active) { \
74 thread_mtx_lock(th); \
75 remqueue(&th->held_ulocks, \
76 (queue_entry_t) (ul)); \
77 thread_mtx_unlock(th); \
78 } else { \
79 remqueue(&th->held_ulocks, \
80 (queue_entry_t) (ul)); \
81 } \
82 (ul)->holder = THREAD_NULL; \
83 MACRO_END
84
85 /*
86 * Lock set ownership MACROS
87 */
88
89 #define lock_set_ownership_set(ls, t) \
90 MACRO_BEGIN \
91 task_lock((t)); \
92 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
93 (t)->lock_sets_owned++; \
94 task_unlock((t)); \
95 (ls)->owner = (t); \
96 MACRO_END
97
98 #define lock_set_ownership_clear(ls, t) \
99 MACRO_BEGIN \
100 task_lock((t)); \
101 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
102 (t)->lock_sets_owned--; \
103 task_unlock((t)); \
104 MACRO_END
105
106 unsigned int lock_set_event;
107 #define LOCK_SET_EVENT CAST_EVENT64_T(&lock_set_event)
108
109 unsigned int lock_set_handoff;
110 #define LOCK_SET_HANDOFF CAST_EVENT64_T(&lock_set_handoff)
111
112
113 lck_attr_t lock_set_attr;
114 lck_grp_t lock_set_grp;
115 static lck_grp_attr_t lock_set_grp_attr;
116
117
118
119 /*
120 * ROUTINE: lock_set_init [private]
121 *
122 * Initialize the lock_set subsystem.
123 */
124 void
125 lock_set_init(void)
126 {
127 lck_grp_attr_setdefault(&lock_set_grp_attr);
128 lck_grp_init(&lock_set_grp, "lock_set", &lock_set_grp_attr);
129 lck_attr_setdefault(&lock_set_attr);
130 }
131
132
133 /*
134 * ROUTINE: lock_set_create [exported]
135 *
136 * Creates a lock set.
137 * The port representing the lock set is returned as a parameter.
138 */
139 kern_return_t
140 lock_set_create (
141 task_t task,
142 lock_set_t *new_lock_set,
143 int n_ulocks,
144 int policy)
145 {
146 lock_set_t lock_set = LOCK_SET_NULL;
147 ulock_t ulock;
148 vm_size_t size;
149 int x;
150
151 *new_lock_set = LOCK_SET_NULL;
152
153 if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
154 return KERN_INVALID_ARGUMENT;
155
156 if ((VM_MAX_ADDRESS - sizeof(struct lock_set))/sizeof(struct ulock) < (unsigned)n_ulocks)
157 return KERN_RESOURCE_SHORTAGE;
158
159 size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
160 lock_set = (lock_set_t) kalloc (size);
161
162 if (lock_set == LOCK_SET_NULL)
163 return KERN_RESOURCE_SHORTAGE;
164
165
166 lock_set_lock_init(lock_set);
167 lock_set->n_ulocks = n_ulocks;
168 lock_set->ref_count = (task == kernel_task) ? 1 : 2; /* one for kernel, one for port */
169
170 /*
171 * Create and initialize the lock set port
172 */
173 lock_set->port = ipc_port_alloc_kernel();
174 if (lock_set->port == IP_NULL) {
175 kfree(lock_set, size);
176 return KERN_RESOURCE_SHORTAGE;
177 }
178
179 ipc_kobject_set (lock_set->port,
180 (ipc_kobject_t) lock_set,
181 IKOT_LOCK_SET);
182
183 /*
184 * Initialize each ulock in the lock set
185 */
186
187 for (x=0; x < n_ulocks; x++) {
188 ulock = (ulock_t) &lock_set->ulock_list[x];
189 ulock_lock_init(ulock);
190 ulock->lock_set = lock_set;
191 ulock->holder = THREAD_NULL;
192 ulock->blocked = FALSE;
193 ulock->unstable = FALSE;
194 ulock->ho_wait = FALSE;
195 ulock->accept_wait = FALSE;
196 wait_queue_init(&ulock->wait_queue, policy);
197 }
198
199 lock_set_ownership_set(lock_set, task);
200
201 lock_set->active = TRUE;
202 *new_lock_set = lock_set;
203
204 return KERN_SUCCESS;
205 }
206
207 /*
208 * ROUTINE: lock_set_destroy [exported]
209 *
210 * Destroys a lock set. This call will only succeed if the
211 * specified task is the SAME task name specified at the lock set's
212 * creation.
213 *
214 * NOTES:
215 * - All threads currently blocked on the lock set's ulocks are awoken.
216 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
217 */
218 kern_return_t
219 lock_set_destroy (task_t task, lock_set_t lock_set)
220 {
221 ulock_t ulock;
222 int i;
223
224 if (task == TASK_NULL || lock_set == LOCK_SET_NULL)
225 return KERN_INVALID_ARGUMENT;
226
227 if (lock_set->owner != task)
228 return KERN_INVALID_RIGHT;
229
230 lock_set_lock(lock_set);
231 if (!lock_set->active) {
232 lock_set_unlock(lock_set);
233 return KERN_LOCK_SET_DESTROYED;
234 }
235
236 /*
237 * Deactivate lock set
238 */
239 lock_set->active = FALSE;
240
241 /*
242 * If a ulock is currently held in the target lock set:
243 *
244 * 1) Wakeup all threads blocked on the ulock (if any). Threads
245 * may be blocked waiting normally, or waiting for a handoff.
246 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
247 *
248 * 2) ulock ownership is cleared.
249 * The thread currently holding the ulock is revoked of its
250 * ownership.
251 */
252 for (i = 0; i < lock_set->n_ulocks; i++) {
253 ulock = &lock_set->ulock_list[i];
254
255 ulock_lock(ulock);
256
257 if (ulock->accept_wait) {
258 ulock->accept_wait = FALSE;
259 wait_queue_wakeup64_one(&ulock->wait_queue,
260 LOCK_SET_HANDOFF,
261 THREAD_RESTART);
262 }
263
264 if (ulock->holder) {
265 if (ulock->blocked) {
266 ulock->blocked = FALSE;
267 wait_queue_wakeup64_all(&ulock->wait_queue,
268 LOCK_SET_EVENT,
269 THREAD_RESTART);
270 }
271 if (ulock->ho_wait) {
272 ulock->ho_wait = FALSE;
273 wait_queue_wakeup64_one(&ulock->wait_queue,
274 LOCK_SET_HANDOFF,
275 THREAD_RESTART);
276 }
277 ulock_ownership_clear(ulock);
278 }
279
280 ulock_unlock(ulock);
281 }
282
283 lock_set_unlock(lock_set);
284 lock_set_ownership_clear(lock_set, task);
285
286 /*
287 * Drop the lock set reference given to the containing task,
288 * which inturn destroys the lock set structure if the reference
289 * count goes to zero.
290 */
291 lock_set_dereference(lock_set);
292
293 return KERN_SUCCESS;
294 }
295
296 kern_return_t
297 lock_acquire (lock_set_t lock_set, int lock_id)
298 {
299 ulock_t ulock;
300
301 if (lock_set == LOCK_SET_NULL)
302 return KERN_INVALID_ARGUMENT;
303
304 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
305 return KERN_INVALID_ARGUMENT;
306
307 retry:
308 lock_set_lock(lock_set);
309 if (!lock_set->active) {
310 lock_set_unlock(lock_set);
311 return KERN_LOCK_SET_DESTROYED;
312 }
313
314 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
315 ulock_lock(ulock);
316 lock_set_unlock(lock_set);
317
318 /*
319 * Block the current thread if the lock is already held.
320 */
321
322 if (ulock->holder != THREAD_NULL) {
323 int wait_result;
324
325 if (ulock->holder == current_thread()) {
326 ulock_unlock(ulock);
327 return KERN_LOCK_OWNED_SELF;
328 }
329
330 ulock->blocked = TRUE;
331 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
332 LOCK_SET_EVENT,
333 THREAD_ABORTSAFE, 0);
334 ulock_unlock(ulock);
335
336 /*
337 * Block - Wait for lock to become available.
338 */
339 if (wait_result == THREAD_WAITING)
340 wait_result = thread_block(THREAD_CONTINUE_NULL);
341
342 /*
343 * Check the result status:
344 *
345 * Check to see why thread was woken up. In all cases, we
346 * already have been removed from the queue.
347 */
348 switch (wait_result) {
349 case THREAD_AWAKENED:
350 /* lock transitioned from old locker to us */
351 /* he already made us owner */
352 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
353 KERN_SUCCESS;
354
355 case THREAD_INTERRUPTED:
356 return KERN_ABORTED;
357
358 case THREAD_RESTART:
359 goto retry; /* probably a dead lock_set */
360
361 default:
362 panic("lock_acquire\n");
363 }
364 }
365
366 /*
367 * Assign lock ownership
368 */
369 ulock_ownership_set(ulock, current_thread());
370 ulock_unlock(ulock);
371
372 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
373 }
374
375 kern_return_t
376 lock_release (lock_set_t lock_set, int lock_id)
377 {
378 ulock_t ulock;
379
380 if (lock_set == LOCK_SET_NULL)
381 return KERN_INVALID_ARGUMENT;
382
383 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
384 return KERN_INVALID_ARGUMENT;
385
386 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
387
388 return (ulock_release_internal(ulock, current_thread()));
389 }
390
391 kern_return_t
392 lock_try (lock_set_t lock_set, int lock_id)
393 {
394 ulock_t ulock;
395
396
397 if (lock_set == LOCK_SET_NULL)
398 return KERN_INVALID_ARGUMENT;
399
400 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
401 return KERN_INVALID_ARGUMENT;
402
403
404 lock_set_lock(lock_set);
405 if (!lock_set->active) {
406 lock_set_unlock(lock_set);
407 return KERN_LOCK_SET_DESTROYED;
408 }
409
410 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
411 ulock_lock(ulock);
412 lock_set_unlock(lock_set);
413
414 /*
415 * If the lock is already owned, we return without blocking.
416 *
417 * An ownership status is returned to inform the caller as to
418 * whether it already holds the lock or another thread does.
419 */
420
421 if (ulock->holder != THREAD_NULL) {
422 lock_set_unlock(lock_set);
423
424 if (ulock->holder == current_thread()) {
425 ulock_unlock(ulock);
426 return KERN_LOCK_OWNED_SELF;
427 }
428
429 ulock_unlock(ulock);
430 return KERN_LOCK_OWNED;
431 }
432
433 /*
434 * Add the ulock to the lock set's held_ulocks list.
435 */
436
437 ulock_ownership_set(ulock, current_thread());
438 ulock_unlock(ulock);
439
440 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
441 }
442
443 kern_return_t
444 lock_make_stable (lock_set_t lock_set, int lock_id)
445 {
446 ulock_t ulock;
447
448
449 if (lock_set == LOCK_SET_NULL)
450 return KERN_INVALID_ARGUMENT;
451
452 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
453 return KERN_INVALID_ARGUMENT;
454
455
456 lock_set_lock(lock_set);
457 if (!lock_set->active) {
458 lock_set_unlock(lock_set);
459 return KERN_LOCK_SET_DESTROYED;
460 }
461
462 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
463 ulock_lock(ulock);
464 lock_set_unlock(lock_set);
465
466 if (ulock->holder != current_thread()) {
467 ulock_unlock(ulock);
468 return KERN_INVALID_RIGHT;
469 }
470
471 ulock->unstable = FALSE;
472 ulock_unlock(ulock);
473
474 return KERN_SUCCESS;
475 }
476
477 /*
478 * ROUTINE: lock_make_unstable [internal]
479 *
480 * Marks the lock as unstable.
481 *
482 * NOTES:
483 * - All future acquisitions of the lock will return with a
484 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
485 */
486 kern_return_t
487 lock_make_unstable (ulock_t ulock, thread_t thread)
488 {
489 lock_set_t lock_set;
490
491 lock_set = ulock->lock_set;
492 lock_set_lock(lock_set);
493 if (!lock_set->active) {
494 lock_set_unlock(lock_set);
495 return KERN_LOCK_SET_DESTROYED;
496 }
497
498 ulock_lock(ulock);
499 lock_set_unlock(lock_set);
500
501 if (ulock->holder != thread) {
502 ulock_unlock(ulock);
503 return KERN_INVALID_RIGHT;
504 }
505
506 ulock->unstable = TRUE;
507 ulock_unlock(ulock);
508
509 return KERN_SUCCESS;
510 }
511
512 /*
513 * ROUTINE: ulock_release_internal [internal]
514 *
515 * Releases the ulock.
516 * If any threads are blocked waiting for the ulock, one is woken-up.
517 *
518 */
519 kern_return_t
520 ulock_release_internal (ulock_t ulock, thread_t thread)
521 {
522 lock_set_t lock_set;
523
524 if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
525 return KERN_INVALID_ARGUMENT;
526
527 lock_set_lock(lock_set);
528 if (!lock_set->active) {
529 lock_set_unlock(lock_set);
530 return KERN_LOCK_SET_DESTROYED;
531 }
532 ulock_lock(ulock);
533 lock_set_unlock(lock_set);
534
535 if (ulock->holder != thread) {
536 ulock_unlock(ulock);
537 return KERN_INVALID_RIGHT;
538 }
539
540 /*
541 * If we have a hint that threads might be waiting,
542 * try to transfer the lock ownership to a waiting thread
543 * and wake it up.
544 */
545 if (ulock->blocked) {
546 wait_queue_t wq = &ulock->wait_queue;
547 thread_t wqthread;
548 spl_t s;
549
550 s = splsched();
551 wait_queue_lock(wq);
552 wqthread = wait_queue_wakeup64_identity_locked(wq,
553 LOCK_SET_EVENT,
554 THREAD_AWAKENED,
555 TRUE);
556 /* wait_queue now unlocked, thread locked */
557
558 if (wqthread != THREAD_NULL) {
559 thread_unlock(wqthread);
560 splx(s);
561
562 /*
563 * Transfer ulock ownership
564 * from the current thread to the acquisition thread.
565 */
566 ulock_ownership_clear(ulock);
567 ulock_ownership_set(ulock, wqthread);
568 ulock_unlock(ulock);
569
570 return KERN_SUCCESS;
571 } else {
572 ulock->blocked = FALSE;
573 splx(s);
574 }
575 }
576
577 /*
578 * Disown ulock
579 */
580 ulock_ownership_clear(ulock);
581 ulock_unlock(ulock);
582
583 return KERN_SUCCESS;
584 }
585
586 kern_return_t
587 lock_handoff (lock_set_t lock_set, int lock_id)
588 {
589 ulock_t ulock;
590 int wait_result;
591
592
593 if (lock_set == LOCK_SET_NULL)
594 return KERN_INVALID_ARGUMENT;
595
596 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
597 return KERN_INVALID_ARGUMENT;
598
599 retry:
600 lock_set_lock(lock_set);
601
602 if (!lock_set->active) {
603 lock_set_unlock(lock_set);
604 return KERN_LOCK_SET_DESTROYED;
605 }
606
607 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
608 ulock_lock(ulock);
609 lock_set_unlock(lock_set);
610
611 if (ulock->holder != current_thread()) {
612 ulock_unlock(ulock);
613 return KERN_INVALID_RIGHT;
614 }
615
616 /*
617 * If the accepting thread (the receiver) is already waiting
618 * to accept the lock from the handoff thread (the sender),
619 * then perform the hand-off now.
620 */
621
622 if (ulock->accept_wait) {
623 wait_queue_t wq = &ulock->wait_queue;
624 thread_t thread;
625 spl_t s;
626
627 /*
628 * See who the lucky devil is, if he is still there waiting.
629 */
630 s = splsched();
631 wait_queue_lock(wq);
632 thread = wait_queue_wakeup64_identity_locked(
633 wq,
634 LOCK_SET_HANDOFF,
635 THREAD_AWAKENED,
636 TRUE);
637 /* wait queue unlocked, thread locked */
638
639 /*
640 * Transfer lock ownership
641 */
642 if (thread != THREAD_NULL) {
643 /*
644 * The thread we are transferring to will try
645 * to take the lock on the ulock, and therefore
646 * will wait for us complete the handoff even
647 * through we set the thread running.
648 */
649 thread_unlock(thread);
650 splx(s);
651
652 ulock_ownership_clear(ulock);
653 ulock_ownership_set(ulock, thread);
654 ulock->accept_wait = FALSE;
655 ulock_unlock(ulock);
656 return KERN_SUCCESS;
657 } else {
658
659 /*
660 * OOPS. The accepting thread must have been aborted.
661 * and is racing back to clear the flag that says is
662 * waiting for an accept. He will clear it when we
663 * release the lock, so just fall thru and wait for
664 * the next accept thread (that's the way it is
665 * specified).
666 */
667 splx(s);
668 }
669 }
670
671 /*
672 * Indicate that there is a hand-off thread waiting, and then wait
673 * for an accepting thread.
674 */
675 ulock->ho_wait = TRUE;
676 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
677 LOCK_SET_HANDOFF,
678 THREAD_ABORTSAFE, 0);
679 ulock_unlock(ulock);
680
681 if (wait_result == THREAD_WAITING)
682 wait_result = thread_block(THREAD_CONTINUE_NULL);
683
684 /*
685 * If the thread was woken-up via some action other than
686 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
687 * then we need to clear the ulock's handoff state.
688 */
689 switch (wait_result) {
690
691
692 case THREAD_AWAKENED:
693 /*
694 * we take the ulock lock to syncronize with the
695 * thread that is accepting ownership.
696 */
697 ulock_lock(ulock);
698 assert(ulock->holder != current_thread());
699 ulock_unlock(ulock);
700 return KERN_SUCCESS;
701
702 case THREAD_INTERRUPTED:
703 ulock_lock(ulock);
704 assert(ulock->holder == current_thread());
705 ulock->ho_wait = FALSE;
706 ulock_unlock(ulock);
707 return KERN_ABORTED;
708
709 case THREAD_RESTART:
710 goto retry;
711 }
712
713 panic("lock_handoff");
714 return KERN_FAILURE;
715 }
716
717 kern_return_t
718 lock_handoff_accept (lock_set_t lock_set, int lock_id)
719 {
720 ulock_t ulock;
721 int wait_result;
722
723
724 if (lock_set == LOCK_SET_NULL)
725 return KERN_INVALID_ARGUMENT;
726
727 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
728 return KERN_INVALID_ARGUMENT;
729
730 retry:
731 lock_set_lock(lock_set);
732 if (!lock_set->active) {
733 lock_set_unlock(lock_set);
734 return KERN_LOCK_SET_DESTROYED;
735 }
736
737 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
738 ulock_lock(ulock);
739 lock_set_unlock(lock_set);
740
741 /*
742 * If there is another accepting thread that beat us, just
743 * return with an error.
744 */
745 if (ulock->accept_wait) {
746 ulock_unlock(ulock);
747 return KERN_ALREADY_WAITING;
748 }
749
750 if (ulock->holder == current_thread()) {
751 ulock_unlock(ulock);
752 return KERN_LOCK_OWNED_SELF;
753 }
754
755 /*
756 * If the handoff thread (the sender) is already waiting to
757 * hand-off the lock to the accepting thread (the receiver),
758 * then perform the hand-off now.
759 */
760 if (ulock->ho_wait) {
761 wait_queue_t wq = &ulock->wait_queue;
762
763 /*
764 * See who the lucky devil is, if he is still there waiting.
765 */
766 assert(ulock->holder != THREAD_NULL);
767
768 if (wait_queue_wakeup64_thread(wq,
769 LOCK_SET_HANDOFF,
770 ulock->holder,
771 THREAD_AWAKENED) == KERN_SUCCESS) {
772 /*
773 * Holder thread was still waiting to give it
774 * away. Take over ownership.
775 */
776 ulock_ownership_clear(ulock);
777 ulock_ownership_set(ulock, current_thread());
778 ulock->ho_wait = FALSE;
779 ulock_unlock(ulock);
780 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
781 KERN_SUCCESS;
782 }
783
784 /*
785 * OOPS. The owner was aborted out of the handoff.
786 * He will clear his own flag when he gets back.
787 * in the meantime, we will wait as if we didn't
788 * even see his flag (by falling thru).
789 */
790 }
791
792 ulock->accept_wait = TRUE;
793 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
794 LOCK_SET_HANDOFF,
795 THREAD_ABORTSAFE, 0);
796 ulock_unlock(ulock);
797
798 if (wait_result == THREAD_WAITING)
799 wait_result = thread_block(THREAD_CONTINUE_NULL);
800
801 /*
802 * If the thread was woken-up via some action other than
803 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
804 * then we need to clear the ulock's handoff state.
805 */
806 switch (wait_result) {
807
808 case THREAD_AWAKENED:
809 /*
810 * Take the lock to synchronize with the thread handing
811 * off the lock to us. We don't want to continue until
812 * they complete the handoff.
813 */
814 ulock_lock(ulock);
815 assert(ulock->accept_wait == FALSE);
816 assert(ulock->holder == current_thread());
817 ulock_unlock(ulock);
818 return KERN_SUCCESS;
819
820 case THREAD_INTERRUPTED:
821 ulock_lock(ulock);
822 ulock->accept_wait = FALSE;
823 ulock_unlock(ulock);
824 return KERN_ABORTED;
825
826 case THREAD_RESTART:
827 goto retry;
828 }
829
830 panic("lock_handoff_accept");
831 return KERN_FAILURE;
832 }
833
834 /*
835 * Routine: lock_set_reference
836 *
837 * Take out a reference on a lock set. This keeps the data structure
838 * in existence (but the lock set may be deactivated).
839 */
840 void
841 lock_set_reference(lock_set_t lock_set)
842 {
843 lock_set_lock(lock_set);
844 lock_set->ref_count++;
845 lock_set_unlock(lock_set);
846 }
847
848 /*
849 * Routine: lock_set_dereference
850 *
851 * Release a reference on a lock set. If this is the last reference,
852 * the lock set data structure is deallocated.
853 */
854 void
855 lock_set_dereference(lock_set_t lock_set)
856 {
857 int ref_count;
858 int size;
859
860 lock_set_lock(lock_set);
861 ref_count = --(lock_set->ref_count);
862 lock_set_unlock(lock_set);
863
864 if (ref_count == 0) {
865 ipc_port_dealloc_kernel(lock_set->port);
866 size = (int)(sizeof(struct lock_set) +
867 (sizeof(struct ulock) * (lock_set->n_ulocks - 1)));
868 kfree(lock_set, size);
869 }
870 }
871
872 void
873 ulock_release_all(
874 thread_t thread)
875 {
876 ulock_t ulock;
877
878 while (!queue_empty(&thread->held_ulocks)) {
879 ulock = (ulock_t)queue_first(&thread->held_ulocks);
880 lock_make_unstable(ulock, thread);
881 ulock_release_internal(ulock, thread);
882 }
883 }