]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sync_lock.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / kern / sync_lock.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 *
25 */
26/*
27 * File: kern/sync_lock.c
28 * Author: Joseph CaraDonna
29 *
30 * Contains RT distributed lock synchronization services.
31 */
32
91447636
A
33#include <mach/mach_types.h>
34#include <mach/lock_set_server.h>
35#include <mach/task_server.h>
36
1c79356b 37#include <kern/misc_protos.h>
91447636 38#include <kern/kalloc.h>
1c79356b
A
39#include <kern/sync_lock.h>
40#include <kern/sched_prim.h>
41#include <kern/ipc_kobject.h>
42#include <kern/ipc_sync.h>
1c79356b
A
43#include <kern/thread.h>
44#include <kern/task.h>
45
46#include <ipc/ipc_port.h>
47#include <ipc/ipc_space.h>
48
49/*
50 * Ulock ownership MACROS
51 *
52 * Assumes: ulock internal lock is held
53 */
54
55#define ulock_ownership_set(ul, th) \
56 MACRO_BEGIN \
91447636
A
57 thread_mtx_lock(th); \
58 enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \
59 thread_mtx_unlock(th); \
60 (ul)->holder = th; \
1c79356b
A
61 MACRO_END
62
63#define ulock_ownership_clear(ul) \
64 MACRO_BEGIN \
91447636
A
65 thread_t th; \
66 th = (ul)->holder; \
67 if (th->active) { \
68 thread_mtx_lock(th); \
69 remqueue(&th->held_ulocks, \
1c79356b 70 (queue_entry_t) (ul)); \
91447636 71 thread_mtx_unlock(th); \
1c79356b 72 } else { \
91447636 73 remqueue(&th->held_ulocks, \
1c79356b
A
74 (queue_entry_t) (ul)); \
75 } \
91447636 76 (ul)->holder = THREAD_NULL; \
1c79356b
A
77 MACRO_END
78
79/*
80 * Lock set ownership MACROS
81 */
82
83#define lock_set_ownership_set(ls, t) \
84 MACRO_BEGIN \
85 task_lock((t)); \
86 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
87 (t)->lock_sets_owned++; \
88 task_unlock((t)); \
89 (ls)->owner = (t); \
90 MACRO_END
91
92#define lock_set_ownership_clear(ls, t) \
93 MACRO_BEGIN \
94 task_lock((t)); \
95 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
96 (t)->lock_sets_owned--; \
97 task_unlock((t)); \
98 MACRO_END
99
100unsigned int lock_set_event;
9bccf70c 101#define LOCK_SET_EVENT ((event64_t)&lock_set_event)
1c79356b
A
102
103unsigned int lock_set_handoff;
9bccf70c 104#define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
1c79356b
A
105
106/*
107 * ROUTINE: lock_set_init [private]
108 *
109 * Initialize the lock_set subsystem.
110 *
111 * For now, we don't have anything to do here.
112 */
113void
114lock_set_init(void)
115{
116 return;
117}
118
119
120/*
121 * ROUTINE: lock_set_create [exported]
122 *
123 * Creates a lock set.
124 * The port representing the lock set is returned as a parameter.
125 */
126kern_return_t
127lock_set_create (
128 task_t task,
129 lock_set_t *new_lock_set,
130 int n_ulocks,
131 int policy)
132{
133 lock_set_t lock_set = LOCK_SET_NULL;
134 ulock_t ulock;
135 int size;
136 int x;
137
138 *new_lock_set = LOCK_SET_NULL;
139
140 if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
141 return KERN_INVALID_ARGUMENT;
142
143 size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
144 lock_set = (lock_set_t) kalloc (size);
145
146 if (lock_set == LOCK_SET_NULL)
147 return KERN_RESOURCE_SHORTAGE;
148
149
150 lock_set_lock_init(lock_set);
151 lock_set->n_ulocks = n_ulocks;
152 lock_set->ref_count = 1;
153
154 /*
155 * Create and initialize the lock set port
156 */
157 lock_set->port = ipc_port_alloc_kernel();
158 if (lock_set->port == IP_NULL) {
159 /* This will deallocate the lock set */
160 lock_set_dereference(lock_set);
161 return KERN_RESOURCE_SHORTAGE;
162 }
163
164 ipc_kobject_set (lock_set->port,
165 (ipc_kobject_t) lock_set,
166 IKOT_LOCK_SET);
167
168 /*
169 * Initialize each ulock in the lock set
170 */
171
172 for (x=0; x < n_ulocks; x++) {
173 ulock = (ulock_t) &lock_set->ulock_list[x];
174 ulock_lock_init(ulock);
175 ulock->lock_set = lock_set;
91447636 176 ulock->holder = THREAD_NULL;
1c79356b
A
177 ulock->blocked = FALSE;
178 ulock->unstable = FALSE;
179 ulock->ho_wait = FALSE;
180 wait_queue_init(&ulock->wait_queue, policy);
181 }
182
183 lock_set_ownership_set(lock_set, task);
184
185 lock_set->active = TRUE;
186 *new_lock_set = lock_set;
187
188 return KERN_SUCCESS;
189}
190
191/*
192 * ROUTINE: lock_set_destroy [exported]
193 *
194 * Destroys a lock set. This call will only succeed if the
195 * specified task is the SAME task name specified at the lock set's
196 * creation.
197 *
198 * NOTES:
199 * - All threads currently blocked on the lock set's ulocks are awoken.
200 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
201 */
202kern_return_t
203lock_set_destroy (task_t task, lock_set_t lock_set)
204{
1c79356b
A
205 ulock_t ulock;
206 int i;
207
208 if (task == TASK_NULL || lock_set == LOCK_SET_NULL)
209 return KERN_INVALID_ARGUMENT;
210
211 if (lock_set->owner != task)
212 return KERN_INVALID_RIGHT;
213
214 lock_set_lock(lock_set);
215 if (!lock_set->active) {
216 lock_set_unlock(lock_set);
217 return KERN_LOCK_SET_DESTROYED;
218 }
219
220 /*
221 * Deactivate lock set
222 */
223 lock_set->active = FALSE;
224
225 /*
226 * If a ulock is currently held in the target lock set:
227 *
228 * 1) Wakeup all threads blocked on the ulock (if any). Threads
229 * may be blocked waiting normally, or waiting for a handoff.
230 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
231 *
232 * 2) ulock ownership is cleared.
233 * The thread currently holding the ulock is revoked of its
234 * ownership.
235 */
236 for (i = 0; i < lock_set->n_ulocks; i++) {
237 ulock = &lock_set->ulock_list[i];
238
239 ulock_lock(ulock);
240
241 if (ulock->accept_wait) {
242 ulock->accept_wait = FALSE;
9bccf70c 243 wait_queue_wakeup64_one(&ulock->wait_queue,
1c79356b
A
244 LOCK_SET_HANDOFF,
245 THREAD_RESTART);
246 }
247
248 if (ulock->holder) {
249 if (ulock->blocked) {
250 ulock->blocked = FALSE;
9bccf70c 251 wait_queue_wakeup64_all(&ulock->wait_queue,
1c79356b
A
252 LOCK_SET_EVENT,
253 THREAD_RESTART);
254 }
255 if (ulock->ho_wait) {
256 ulock->ho_wait = FALSE;
9bccf70c 257 wait_queue_wakeup64_one(&ulock->wait_queue,
1c79356b
A
258 LOCK_SET_HANDOFF,
259 THREAD_RESTART);
260 }
261 ulock_ownership_clear(ulock);
262 }
263
264 ulock_unlock(ulock);
265 }
266
267 lock_set_unlock(lock_set);
268 lock_set_ownership_clear(lock_set, task);
269
270 /*
271 * Deallocate
272 *
273 * Drop the lock set reference, which inturn destroys the
274 * lock set structure if the reference count goes to zero.
275 */
276
277 ipc_port_dealloc_kernel(lock_set->port);
278 lock_set_dereference(lock_set);
279
280 return KERN_SUCCESS;
281}
282
283kern_return_t
284lock_acquire (lock_set_t lock_set, int lock_id)
285{
286 ulock_t ulock;
287
288 if (lock_set == LOCK_SET_NULL)
289 return KERN_INVALID_ARGUMENT;
290
291 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
292 return KERN_INVALID_ARGUMENT;
293
294 retry:
295 lock_set_lock(lock_set);
296 if (!lock_set->active) {
297 lock_set_unlock(lock_set);
298 return KERN_LOCK_SET_DESTROYED;
299 }
300
301 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
302 ulock_lock(ulock);
303 lock_set_unlock(lock_set);
304
305 /*
306 * Block the current thread if the lock is already held.
307 */
308
91447636 309 if (ulock->holder != THREAD_NULL) {
1c79356b
A
310 int wait_result;
311
91447636 312 if (ulock->holder == current_thread()) {
1c79356b
A
313 ulock_unlock(ulock);
314 return KERN_LOCK_OWNED_SELF;
315 }
316
317 ulock->blocked = TRUE;
9bccf70c 318 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
1c79356b 319 LOCK_SET_EVENT,
91447636 320 THREAD_ABORTSAFE, 0);
1c79356b
A
321 ulock_unlock(ulock);
322
323 /*
324 * Block - Wait for lock to become available.
325 */
9bccf70c
A
326 if (wait_result == THREAD_WAITING)
327 wait_result = thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
328
329 /*
330 * Check the result status:
331 *
332 * Check to see why thread was woken up. In all cases, we
333 * already have been removed from the queue.
334 */
335 switch (wait_result) {
336 case THREAD_AWAKENED:
337 /* lock transitioned from old locker to us */
338 /* he already made us owner */
339 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
340 KERN_SUCCESS;
341
342 case THREAD_INTERRUPTED:
343 return KERN_ABORTED;
344
345 case THREAD_RESTART:
346 goto retry; /* probably a dead lock_set */
347
348 default:
349 panic("lock_acquire\n");
350 }
351 }
352
353 /*
354 * Assign lock ownership
355 */
356 ulock_ownership_set(ulock, current_thread());
357 ulock_unlock(ulock);
358
359 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
360}
361
362kern_return_t
363lock_release (lock_set_t lock_set, int lock_id)
364{
365 ulock_t ulock;
366
367 if (lock_set == LOCK_SET_NULL)
368 return KERN_INVALID_ARGUMENT;
369
370 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
371 return KERN_INVALID_ARGUMENT;
372
373 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
374
91447636 375 return (ulock_release_internal(ulock, current_thread()));
1c79356b
A
376}
377
378kern_return_t
379lock_try (lock_set_t lock_set, int lock_id)
380{
381 ulock_t ulock;
382
383
384 if (lock_set == LOCK_SET_NULL)
385 return KERN_INVALID_ARGUMENT;
386
387 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
388 return KERN_INVALID_ARGUMENT;
389
390
391 lock_set_lock(lock_set);
392 if (!lock_set->active) {
393 lock_set_unlock(lock_set);
394 return KERN_LOCK_SET_DESTROYED;
395 }
396
397 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
398 ulock_lock(ulock);
399 lock_set_unlock(lock_set);
400
401 /*
402 * If the lock is already owned, we return without blocking.
403 *
404 * An ownership status is returned to inform the caller as to
405 * whether it already holds the lock or another thread does.
406 */
407
91447636 408 if (ulock->holder != THREAD_NULL) {
1c79356b
A
409 lock_set_unlock(lock_set);
410
91447636 411 if (ulock->holder == current_thread()) {
1c79356b
A
412 ulock_unlock(ulock);
413 return KERN_LOCK_OWNED_SELF;
414 }
415
416 ulock_unlock(ulock);
417 return KERN_LOCK_OWNED;
418 }
419
420 /*
421 * Add the ulock to the lock set's held_ulocks list.
422 */
423
424 ulock_ownership_set(ulock, current_thread());
425 ulock_unlock(ulock);
426
427 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
428}
429
430kern_return_t
431lock_make_stable (lock_set_t lock_set, int lock_id)
432{
433 ulock_t ulock;
434
435
436 if (lock_set == LOCK_SET_NULL)
437 return KERN_INVALID_ARGUMENT;
438
439 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
440 return KERN_INVALID_ARGUMENT;
441
442
443 lock_set_lock(lock_set);
444 if (!lock_set->active) {
445 lock_set_unlock(lock_set);
446 return KERN_LOCK_SET_DESTROYED;
447 }
448
449 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
450 ulock_lock(ulock);
451 lock_set_unlock(lock_set);
452
91447636 453 if (ulock->holder != current_thread()) {
1c79356b
A
454 ulock_unlock(ulock);
455 return KERN_INVALID_RIGHT;
456 }
457
458 ulock->unstable = FALSE;
459 ulock_unlock(ulock);
460
461 return KERN_SUCCESS;
462}
463
464/*
465 * ROUTINE: lock_make_unstable [internal]
466 *
467 * Marks the lock as unstable.
468 *
469 * NOTES:
470 * - All future acquisitions of the lock will return with a
471 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
472 */
473kern_return_t
91447636 474lock_make_unstable (ulock_t ulock, thread_t thread)
1c79356b
A
475{
476 lock_set_t lock_set;
477
1c79356b
A
478 lock_set = ulock->lock_set;
479 lock_set_lock(lock_set);
480 if (!lock_set->active) {
481 lock_set_unlock(lock_set);
482 return KERN_LOCK_SET_DESTROYED;
483 }
484
485 ulock_lock(ulock);
486 lock_set_unlock(lock_set);
487
91447636 488 if (ulock->holder != thread) {
1c79356b
A
489 ulock_unlock(ulock);
490 return KERN_INVALID_RIGHT;
491 }
492
493 ulock->unstable = TRUE;
494 ulock_unlock(ulock);
495
496 return KERN_SUCCESS;
497}
498
499/*
91447636 500 * ROUTINE: ulock_release_internal [internal]
1c79356b
A
501 *
502 * Releases the ulock.
503 * If any threads are blocked waiting for the ulock, one is woken-up.
504 *
505 */
506kern_return_t
91447636 507ulock_release_internal (ulock_t ulock, thread_t thread)
1c79356b
A
508{
509 lock_set_t lock_set;
1c79356b
A
510
511 if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
512 return KERN_INVALID_ARGUMENT;
513
514 lock_set_lock(lock_set);
515 if (!lock_set->active) {
516 lock_set_unlock(lock_set);
517 return KERN_LOCK_SET_DESTROYED;
518 }
519 ulock_lock(ulock);
520 lock_set_unlock(lock_set);
521
91447636 522 if (ulock->holder != thread) {
1c79356b 523 ulock_unlock(ulock);
1c79356b
A
524 return KERN_INVALID_RIGHT;
525 }
526
527 /*
528 * If we have a hint that threads might be waiting,
529 * try to transfer the lock ownership to a waiting thread
530 * and wake it up.
531 */
532 if (ulock->blocked) {
533 wait_queue_t wq = &ulock->wait_queue;
91447636 534 thread_t wqthread;
1c79356b
A
535 spl_t s;
536
537 s = splsched();
538 wait_queue_lock(wq);
91447636 539 wqthread = wait_queue_wakeup64_identity_locked(wq,
1c79356b
A
540 LOCK_SET_EVENT,
541 THREAD_AWAKENED,
542 TRUE);
543 /* wait_queue now unlocked, thread locked */
544
91447636 545 if (wqthread != THREAD_NULL) {
1c79356b
A
546 /*
547 * JMM - These ownership transfer macros have a
548 * locking/race problem. To keep the thread from
549 * changing states on us (nullifying the ownership
550 * assignment) we need to keep the thread locked
551 * during the assignment. But we can't because the
552 * macros take an activation lock, which is a mutex.
553 * Since this code was already broken before I got
554 * here, I will leave it for now.
555 */
91447636 556 thread_unlock(wqthread);
1c79356b
A
557 splx(s);
558
559 /*
560 * Transfer ulock ownership
561 * from the current thread to the acquisition thread.
562 */
563 ulock_ownership_clear(ulock);
91447636 564 ulock_ownership_set(ulock, wqthread);
1c79356b
A
565 ulock_unlock(ulock);
566
567 return KERN_SUCCESS;
568 } else {
569 ulock->blocked = FALSE;
570 splx(s);
571 }
572 }
573
574 /*
575 * Disown ulock
576 */
577 ulock_ownership_clear(ulock);
578 ulock_unlock(ulock);
579
580 return KERN_SUCCESS;
581}
582
583kern_return_t
584lock_handoff (lock_set_t lock_set, int lock_id)
585{
586 ulock_t ulock;
587 int wait_result;
588
589
590 if (lock_set == LOCK_SET_NULL)
591 return KERN_INVALID_ARGUMENT;
592
593 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
594 return KERN_INVALID_ARGUMENT;
595
596 retry:
597 lock_set_lock(lock_set);
598
599 if (!lock_set->active) {
600 lock_set_unlock(lock_set);
601 return KERN_LOCK_SET_DESTROYED;
602 }
603
604 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
605 ulock_lock(ulock);
606 lock_set_unlock(lock_set);
607
91447636 608 if (ulock->holder != current_thread()) {
1c79356b 609 ulock_unlock(ulock);
1c79356b
A
610 return KERN_INVALID_RIGHT;
611 }
612
613 /*
614 * If the accepting thread (the receiver) is already waiting
615 * to accept the lock from the handoff thread (the sender),
616 * then perform the hand-off now.
617 */
618
619 if (ulock->accept_wait) {
620 wait_queue_t wq = &ulock->wait_queue;
621 thread_t thread;
622 spl_t s;
623
624 /*
625 * See who the lucky devil is, if he is still there waiting.
626 */
627 s = splsched();
628 wait_queue_lock(wq);
9bccf70c 629 thread = wait_queue_wakeup64_identity_locked(
1c79356b
A
630 wq,
631 LOCK_SET_HANDOFF,
632 THREAD_AWAKENED,
633 TRUE);
634 /* wait queue unlocked, thread locked */
635
636 /*
637 * Transfer lock ownership
638 */
639 if (thread != THREAD_NULL) {
640 /*
641 * JMM - These ownership transfer macros have a
642 * locking/race problem. To keep the thread from
643 * changing states on us (nullifying the ownership
644 * assignment) we need to keep the thread locked
645 * during the assignment. But we can't because the
91447636
A
646 * macros take a thread mutex lock.
647 *
1c79356b
A
648 * Since this code was already broken before I got
649 * here, I will leave it for now.
650 */
651 thread_unlock(thread);
652 splx(s);
653
654 ulock_ownership_clear(ulock);
655 ulock_ownership_set(ulock, thread);
656 ulock->accept_wait = FALSE;
657 ulock_unlock(ulock);
658 return KERN_SUCCESS;
659 } else {
660
661 /*
662 * OOPS. The accepting thread must have been aborted.
663 * and is racing back to clear the flag that says is
664 * waiting for an accept. He will clear it when we
665 * release the lock, so just fall thru and wait for
666 * the next accept thread (that's the way it is
667 * specified).
668 */
669 splx(s);
670 }
671 }
672
673 /*
674 * Indicate that there is a hand-off thread waiting, and then wait
675 * for an accepting thread.
676 */
677 ulock->ho_wait = TRUE;
9bccf70c 678 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
1c79356b 679 LOCK_SET_HANDOFF,
91447636 680 THREAD_ABORTSAFE, 0);
1c79356b
A
681 ulock_unlock(ulock);
682
9bccf70c
A
683 if (wait_result == THREAD_WAITING)
684 wait_result = thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
685
686 /*
687 * If the thread was woken-up via some action other than
688 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
689 * then we need to clear the ulock's handoff state.
690 */
691 switch (wait_result) {
692
693 case THREAD_AWAKENED:
694 return KERN_SUCCESS;
695
696 case THREAD_INTERRUPTED:
697 ulock_lock(ulock);
91447636 698 assert(ulock->holder == current_thread());
1c79356b
A
699 ulock->ho_wait = FALSE;
700 ulock_unlock(ulock);
701 return KERN_ABORTED;
702
703 case THREAD_RESTART:
704 goto retry;
1c79356b 705 }
91447636
A
706
707 panic("lock_handoff");
708 return KERN_FAILURE;
1c79356b
A
709}
710
711kern_return_t
712lock_handoff_accept (lock_set_t lock_set, int lock_id)
713{
714 ulock_t ulock;
715 int wait_result;
716
717
718 if (lock_set == LOCK_SET_NULL)
719 return KERN_INVALID_ARGUMENT;
720
721 if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
722 return KERN_INVALID_ARGUMENT;
723
724 retry:
725 lock_set_lock(lock_set);
726 if (!lock_set->active) {
727 lock_set_unlock(lock_set);
728 return KERN_LOCK_SET_DESTROYED;
729 }
730
731 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
732 ulock_lock(ulock);
733 lock_set_unlock(lock_set);
734
735 /*
736 * If there is another accepting thread that beat us, just
737 * return with an error.
738 */
739 if (ulock->accept_wait) {
740 ulock_unlock(ulock);
741 return KERN_ALREADY_WAITING;
742 }
743
91447636 744 if (ulock->holder == current_thread()) {
1c79356b
A
745 ulock_unlock(ulock);
746 return KERN_LOCK_OWNED_SELF;
747 }
748
749 /*
750 * If the handoff thread (the sender) is already waiting to
751 * hand-off the lock to the accepting thread (the receiver),
752 * then perform the hand-off now.
753 */
754 if (ulock->ho_wait) {
755 wait_queue_t wq = &ulock->wait_queue;
1c79356b
A
756
757 /*
758 * See who the lucky devil is, if he is still there waiting.
759 */
91447636 760 assert(ulock->holder != THREAD_NULL);
1c79356b 761
9bccf70c 762 if (wait_queue_wakeup64_thread(wq,
1c79356b 763 LOCK_SET_HANDOFF,
91447636 764 ulock->holder,
1c79356b
A
765 THREAD_AWAKENED) == KERN_SUCCESS) {
766 /*
767 * Holder thread was still waiting to give it
768 * away. Take over ownership.
769 */
770 ulock_ownership_clear(ulock);
771 ulock_ownership_set(ulock, current_thread());
772 ulock->ho_wait = FALSE;
773 ulock_unlock(ulock);
774 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
775 KERN_SUCCESS;
776 }
777
778 /*
779 * OOPS. The owner was aborted out of the handoff.
780 * He will clear his own flag when he gets back.
781 * in the meantime, we will wait as if we didn't
782 * even see his flag (by falling thru).
783 */
784 }
785
786 ulock->accept_wait = TRUE;
9bccf70c 787 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
1c79356b 788 LOCK_SET_HANDOFF,
91447636 789 THREAD_ABORTSAFE, 0);
1c79356b
A
790 ulock_unlock(ulock);
791
9bccf70c
A
792 if (wait_result == THREAD_WAITING)
793 wait_result = thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
794
795 /*
796 * If the thread was woken-up via some action other than
797 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
798 * then we need to clear the ulock's handoff state.
799 */
800 switch (wait_result) {
801
802 case THREAD_AWAKENED:
803 return KERN_SUCCESS;
804
805 case THREAD_INTERRUPTED:
806 ulock_lock(ulock);
807 ulock->accept_wait = FALSE;
808 ulock_unlock(ulock);
809 return KERN_ABORTED;
810
811 case THREAD_RESTART:
812 goto retry;
1c79356b 813 }
91447636
A
814
815 panic("lock_handoff_accept");
816 return KERN_FAILURE;
1c79356b
A
817}
818
819/*
820 * Routine: lock_set_reference
821 *
822 * Take out a reference on a lock set. This keeps the data structure
823 * in existence (but the lock set may be deactivated).
824 */
825void
826lock_set_reference(lock_set_t lock_set)
827{
828 lock_set_lock(lock_set);
829 lock_set->ref_count++;
830 lock_set_unlock(lock_set);
831}
832
833/*
834 * Routine: lock_set_dereference
835 *
836 * Release a reference on a lock set. If this is the last reference,
837 * the lock set data structure is deallocated.
838 */
839void
840lock_set_dereference(lock_set_t lock_set)
841{
842 int ref_count;
843 int size;
844
845 lock_set_lock(lock_set);
846 ref_count = --(lock_set->ref_count);
847 lock_set_unlock(lock_set);
848
849 if (ref_count == 0) {
850 size = sizeof(struct lock_set) +
851 (sizeof(struct ulock) * (lock_set->n_ulocks - 1));
91447636
A
852 kfree(lock_set, size);
853 }
854}
855
856void
857ulock_release_all(
858 thread_t thread)
859{
860 ulock_t ulock;
861
862 while (!queue_empty(&thread->held_ulocks)) {
863 ulock = (ulock_t)queue_first(&thread->held_ulocks);
864 lock_make_unstable(ulock, thread);
865 ulock_release_internal(ulock, thread);
1c79356b
A
866 }
867}