]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_lock.c
bf4f7d942203554da83b1a0d6b6061b919d652e6
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
28 * File: kern/sync_lock.c
29 * Author: Joseph CaraDonna
31 * Contains RT distributed lock synchronization services.
34 #include <mach/mach_types.h>
35 #include <mach/lock_set_server.h>
36 #include <mach/task_server.h>
38 #include <kern/misc_protos.h>
39 #include <kern/kalloc.h>
40 #include <kern/sync_lock.h>
41 #include <kern/sched_prim.h>
42 #include <kern/ipc_kobject.h>
43 #include <kern/ipc_sync.h>
44 #include <kern/thread.h>
45 #include <kern/task.h>
47 #include <ipc/ipc_port.h>
48 #include <ipc/ipc_space.h>
51 * Ulock ownership MACROS
53 * Assumes: ulock internal lock is held
56 #define ulock_ownership_set(ul, th) \
58 thread_mtx_lock(th); \
59 enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \
60 thread_mtx_unlock(th); \
64 #define ulock_ownership_clear(ul) \
69 thread_mtx_lock(th); \
70 remqueue(&th->held_ulocks, \
71 (queue_entry_t) (ul)); \
72 thread_mtx_unlock(th); \
74 remqueue(&th->held_ulocks, \
75 (queue_entry_t) (ul)); \
77 (ul)->holder = THREAD_NULL; \
81 * Lock set ownership MACROS
84 #define lock_set_ownership_set(ls, t) \
87 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
88 (t)->lock_sets_owned++; \
93 #define lock_set_ownership_clear(ls, t) \
96 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
97 (t)->lock_sets_owned--; \
101 unsigned int lock_set_event
;
102 #define LOCK_SET_EVENT ((event64_t)&lock_set_event)
104 unsigned int lock_set_handoff
;
105 #define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
108 * ROUTINE: lock_set_init [private]
110 * Initialize the lock_set subsystem.
112 * For now, we don't have anything to do here.
122 * ROUTINE: lock_set_create [exported]
124 * Creates a lock set.
125 * The port representing the lock set is returned as a parameter.
130 lock_set_t
*new_lock_set
,
134 lock_set_t lock_set
= LOCK_SET_NULL
;
139 *new_lock_set
= LOCK_SET_NULL
;
141 if (task
== TASK_NULL
|| n_ulocks
<= 0 || policy
> SYNC_POLICY_MAX
)
142 return KERN_INVALID_ARGUMENT
;
144 size
= sizeof(struct lock_set
) + (sizeof(struct ulock
) * (n_ulocks
-1));
145 lock_set
= (lock_set_t
) kalloc (size
);
147 if (lock_set
== LOCK_SET_NULL
)
148 return KERN_RESOURCE_SHORTAGE
;
151 lock_set_lock_init(lock_set
);
152 lock_set
->n_ulocks
= n_ulocks
;
153 lock_set
->ref_count
= 1;
156 * Create and initialize the lock set port
158 lock_set
->port
= ipc_port_alloc_kernel();
159 if (lock_set
->port
== IP_NULL
) {
160 /* This will deallocate the lock set */
161 lock_set_dereference(lock_set
);
162 return KERN_RESOURCE_SHORTAGE
;
165 ipc_kobject_set (lock_set
->port
,
166 (ipc_kobject_t
) lock_set
,
170 * Initialize each ulock in the lock set
173 for (x
=0; x
< n_ulocks
; x
++) {
174 ulock
= (ulock_t
) &lock_set
->ulock_list
[x
];
175 ulock_lock_init(ulock
);
176 ulock
->lock_set
= lock_set
;
177 ulock
->holder
= THREAD_NULL
;
178 ulock
->blocked
= FALSE
;
179 ulock
->unstable
= FALSE
;
180 ulock
->ho_wait
= FALSE
;
181 wait_queue_init(&ulock
->wait_queue
, policy
);
184 lock_set_ownership_set(lock_set
, task
);
186 lock_set
->active
= TRUE
;
187 *new_lock_set
= lock_set
;
193 * ROUTINE: lock_set_destroy [exported]
195 * Destroys a lock set. This call will only succeed if the
196 * specified task is the SAME task name specified at the lock set's
200 * - All threads currently blocked on the lock set's ulocks are awoken.
201 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
204 lock_set_destroy (task_t task
, lock_set_t lock_set
)
209 if (task
== TASK_NULL
|| lock_set
== LOCK_SET_NULL
)
210 return KERN_INVALID_ARGUMENT
;
212 if (lock_set
->owner
!= task
)
213 return KERN_INVALID_RIGHT
;
215 lock_set_lock(lock_set
);
216 if (!lock_set
->active
) {
217 lock_set_unlock(lock_set
);
218 return KERN_LOCK_SET_DESTROYED
;
222 * Deactivate lock set
224 lock_set
->active
= FALSE
;
227 * If a ulock is currently held in the target lock set:
229 * 1) Wakeup all threads blocked on the ulock (if any). Threads
230 * may be blocked waiting normally, or waiting for a handoff.
231 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
233 * 2) ulock ownership is cleared.
234 * The thread currently holding the ulock is revoked of its
237 for (i
= 0; i
< lock_set
->n_ulocks
; i
++) {
238 ulock
= &lock_set
->ulock_list
[i
];
242 if (ulock
->accept_wait
) {
243 ulock
->accept_wait
= FALSE
;
244 wait_queue_wakeup64_one(&ulock
->wait_queue
,
250 if (ulock
->blocked
) {
251 ulock
->blocked
= FALSE
;
252 wait_queue_wakeup64_all(&ulock
->wait_queue
,
256 if (ulock
->ho_wait
) {
257 ulock
->ho_wait
= FALSE
;
258 wait_queue_wakeup64_one(&ulock
->wait_queue
,
262 ulock_ownership_clear(ulock
);
268 lock_set_unlock(lock_set
);
269 lock_set_ownership_clear(lock_set
, task
);
274 * Drop the lock set reference, which inturn destroys the
275 * lock set structure if the reference count goes to zero.
278 ipc_port_dealloc_kernel(lock_set
->port
);
279 lock_set_dereference(lock_set
);
285 lock_acquire (lock_set_t lock_set
, int lock_id
)
289 if (lock_set
== LOCK_SET_NULL
)
290 return KERN_INVALID_ARGUMENT
;
292 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
293 return KERN_INVALID_ARGUMENT
;
296 lock_set_lock(lock_set
);
297 if (!lock_set
->active
) {
298 lock_set_unlock(lock_set
);
299 return KERN_LOCK_SET_DESTROYED
;
302 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
304 lock_set_unlock(lock_set
);
307 * Block the current thread if the lock is already held.
310 if (ulock
->holder
!= THREAD_NULL
) {
313 if (ulock
->holder
== current_thread()) {
315 return KERN_LOCK_OWNED_SELF
;
318 ulock
->blocked
= TRUE
;
319 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
321 THREAD_ABORTSAFE
, 0);
325 * Block - Wait for lock to become available.
327 if (wait_result
== THREAD_WAITING
)
328 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
331 * Check the result status:
333 * Check to see why thread was woken up. In all cases, we
334 * already have been removed from the queue.
336 switch (wait_result
) {
337 case THREAD_AWAKENED
:
338 /* lock transitioned from old locker to us */
339 /* he already made us owner */
340 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
343 case THREAD_INTERRUPTED
:
347 goto retry
; /* probably a dead lock_set */
350 panic("lock_acquire\n");
355 * Assign lock ownership
357 ulock_ownership_set(ulock
, current_thread());
360 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
364 lock_release (lock_set_t lock_set
, int lock_id
)
368 if (lock_set
== LOCK_SET_NULL
)
369 return KERN_INVALID_ARGUMENT
;
371 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
372 return KERN_INVALID_ARGUMENT
;
374 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
376 return (ulock_release_internal(ulock
, current_thread()));
380 lock_try (lock_set_t lock_set
, int lock_id
)
385 if (lock_set
== LOCK_SET_NULL
)
386 return KERN_INVALID_ARGUMENT
;
388 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
389 return KERN_INVALID_ARGUMENT
;
392 lock_set_lock(lock_set
);
393 if (!lock_set
->active
) {
394 lock_set_unlock(lock_set
);
395 return KERN_LOCK_SET_DESTROYED
;
398 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
400 lock_set_unlock(lock_set
);
403 * If the lock is already owned, we return without blocking.
405 * An ownership status is returned to inform the caller as to
406 * whether it already holds the lock or another thread does.
409 if (ulock
->holder
!= THREAD_NULL
) {
410 lock_set_unlock(lock_set
);
412 if (ulock
->holder
== current_thread()) {
414 return KERN_LOCK_OWNED_SELF
;
418 return KERN_LOCK_OWNED
;
422 * Add the ulock to the lock set's held_ulocks list.
425 ulock_ownership_set(ulock
, current_thread());
428 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
432 lock_make_stable (lock_set_t lock_set
, int lock_id
)
437 if (lock_set
== LOCK_SET_NULL
)
438 return KERN_INVALID_ARGUMENT
;
440 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
441 return KERN_INVALID_ARGUMENT
;
444 lock_set_lock(lock_set
);
445 if (!lock_set
->active
) {
446 lock_set_unlock(lock_set
);
447 return KERN_LOCK_SET_DESTROYED
;
450 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
452 lock_set_unlock(lock_set
);
454 if (ulock
->holder
!= current_thread()) {
456 return KERN_INVALID_RIGHT
;
459 ulock
->unstable
= FALSE
;
466 * ROUTINE: lock_make_unstable [internal]
468 * Marks the lock as unstable.
471 * - All future acquisitions of the lock will return with a
472 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
475 lock_make_unstable (ulock_t ulock
, thread_t thread
)
479 lock_set
= ulock
->lock_set
;
480 lock_set_lock(lock_set
);
481 if (!lock_set
->active
) {
482 lock_set_unlock(lock_set
);
483 return KERN_LOCK_SET_DESTROYED
;
487 lock_set_unlock(lock_set
);
489 if (ulock
->holder
!= thread
) {
491 return KERN_INVALID_RIGHT
;
494 ulock
->unstable
= TRUE
;
501 * ROUTINE: ulock_release_internal [internal]
503 * Releases the ulock.
504 * If any threads are blocked waiting for the ulock, one is woken-up.
508 ulock_release_internal (ulock_t ulock
, thread_t thread
)
512 if ((lock_set
= ulock
->lock_set
) == LOCK_SET_NULL
)
513 return KERN_INVALID_ARGUMENT
;
515 lock_set_lock(lock_set
);
516 if (!lock_set
->active
) {
517 lock_set_unlock(lock_set
);
518 return KERN_LOCK_SET_DESTROYED
;
521 lock_set_unlock(lock_set
);
523 if (ulock
->holder
!= thread
) {
525 return KERN_INVALID_RIGHT
;
529 * If we have a hint that threads might be waiting,
530 * try to transfer the lock ownership to a waiting thread
533 if (ulock
->blocked
) {
534 wait_queue_t wq
= &ulock
->wait_queue
;
540 wqthread
= wait_queue_wakeup64_identity_locked(wq
,
544 /* wait_queue now unlocked, thread locked */
546 if (wqthread
!= THREAD_NULL
) {
548 * JMM - These ownership transfer macros have a
549 * locking/race problem. To keep the thread from
550 * changing states on us (nullifying the ownership
551 * assignment) we need to keep the thread locked
552 * during the assignment. But we can't because the
553 * macros take an activation lock, which is a mutex.
554 * Since this code was already broken before I got
555 * here, I will leave it for now.
557 thread_unlock(wqthread
);
561 * Transfer ulock ownership
562 * from the current thread to the acquisition thread.
564 ulock_ownership_clear(ulock
);
565 ulock_ownership_set(ulock
, wqthread
);
570 ulock
->blocked
= FALSE
;
578 ulock_ownership_clear(ulock
);
585 lock_handoff (lock_set_t lock_set
, int lock_id
)
591 if (lock_set
== LOCK_SET_NULL
)
592 return KERN_INVALID_ARGUMENT
;
594 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
595 return KERN_INVALID_ARGUMENT
;
598 lock_set_lock(lock_set
);
600 if (!lock_set
->active
) {
601 lock_set_unlock(lock_set
);
602 return KERN_LOCK_SET_DESTROYED
;
605 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
607 lock_set_unlock(lock_set
);
609 if (ulock
->holder
!= current_thread()) {
611 return KERN_INVALID_RIGHT
;
615 * If the accepting thread (the receiver) is already waiting
616 * to accept the lock from the handoff thread (the sender),
617 * then perform the hand-off now.
620 if (ulock
->accept_wait
) {
621 wait_queue_t wq
= &ulock
->wait_queue
;
626 * See who the lucky devil is, if he is still there waiting.
630 thread
= wait_queue_wakeup64_identity_locked(
635 /* wait queue unlocked, thread locked */
638 * Transfer lock ownership
640 if (thread
!= THREAD_NULL
) {
642 * JMM - These ownership transfer macros have a
643 * locking/race problem. To keep the thread from
644 * changing states on us (nullifying the ownership
645 * assignment) we need to keep the thread locked
646 * during the assignment. But we can't because the
647 * macros take a thread mutex lock.
649 * Since this code was already broken before I got
650 * here, I will leave it for now.
652 thread_unlock(thread
);
655 ulock_ownership_clear(ulock
);
656 ulock_ownership_set(ulock
, thread
);
657 ulock
->accept_wait
= FALSE
;
663 * OOPS. The accepting thread must have been aborted.
664 * and is racing back to clear the flag that says is
665 * waiting for an accept. He will clear it when we
666 * release the lock, so just fall thru and wait for
667 * the next accept thread (that's the way it is
675 * Indicate that there is a hand-off thread waiting, and then wait
676 * for an accepting thread.
678 ulock
->ho_wait
= TRUE
;
679 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
681 THREAD_ABORTSAFE
, 0);
684 if (wait_result
== THREAD_WAITING
)
685 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
688 * If the thread was woken-up via some action other than
689 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
690 * then we need to clear the ulock's handoff state.
692 switch (wait_result
) {
694 case THREAD_AWAKENED
:
697 case THREAD_INTERRUPTED
:
699 assert(ulock
->holder
== current_thread());
700 ulock
->ho_wait
= FALSE
;
708 panic("lock_handoff");
713 lock_handoff_accept (lock_set_t lock_set
, int lock_id
)
719 if (lock_set
== LOCK_SET_NULL
)
720 return KERN_INVALID_ARGUMENT
;
722 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
723 return KERN_INVALID_ARGUMENT
;
726 lock_set_lock(lock_set
);
727 if (!lock_set
->active
) {
728 lock_set_unlock(lock_set
);
729 return KERN_LOCK_SET_DESTROYED
;
732 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
734 lock_set_unlock(lock_set
);
737 * If there is another accepting thread that beat us, just
738 * return with an error.
740 if (ulock
->accept_wait
) {
742 return KERN_ALREADY_WAITING
;
745 if (ulock
->holder
== current_thread()) {
747 return KERN_LOCK_OWNED_SELF
;
751 * If the handoff thread (the sender) is already waiting to
752 * hand-off the lock to the accepting thread (the receiver),
753 * then perform the hand-off now.
755 if (ulock
->ho_wait
) {
756 wait_queue_t wq
= &ulock
->wait_queue
;
759 * See who the lucky devil is, if he is still there waiting.
761 assert(ulock
->holder
!= THREAD_NULL
);
763 if (wait_queue_wakeup64_thread(wq
,
766 THREAD_AWAKENED
) == KERN_SUCCESS
) {
768 * Holder thread was still waiting to give it
769 * away. Take over ownership.
771 ulock_ownership_clear(ulock
);
772 ulock_ownership_set(ulock
, current_thread());
773 ulock
->ho_wait
= FALSE
;
775 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
780 * OOPS. The owner was aborted out of the handoff.
781 * He will clear his own flag when he gets back.
782 * in the meantime, we will wait as if we didn't
783 * even see his flag (by falling thru).
787 ulock
->accept_wait
= TRUE
;
788 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
790 THREAD_ABORTSAFE
, 0);
793 if (wait_result
== THREAD_WAITING
)
794 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
797 * If the thread was woken-up via some action other than
798 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
799 * then we need to clear the ulock's handoff state.
801 switch (wait_result
) {
803 case THREAD_AWAKENED
:
806 case THREAD_INTERRUPTED
:
808 ulock
->accept_wait
= FALSE
;
816 panic("lock_handoff_accept");
821 * Routine: lock_set_reference
823 * Take out a reference on a lock set. This keeps the data structure
824 * in existence (but the lock set may be deactivated).
827 lock_set_reference(lock_set_t lock_set
)
829 lock_set_lock(lock_set
);
830 lock_set
->ref_count
++;
831 lock_set_unlock(lock_set
);
835 * Routine: lock_set_dereference
837 * Release a reference on a lock set. If this is the last reference,
838 * the lock set data structure is deallocated.
841 lock_set_dereference(lock_set_t lock_set
)
846 lock_set_lock(lock_set
);
847 ref_count
= --(lock_set
->ref_count
);
848 lock_set_unlock(lock_set
);
850 if (ref_count
== 0) {
851 size
= sizeof(struct lock_set
) +
852 (sizeof(struct ulock
) * (lock_set
->n_ulocks
- 1));
853 kfree(lock_set
, size
);
863 while (!queue_empty(&thread
->held_ulocks
)) {
864 ulock
= (ulock_t
)queue_first(&thread
->held_ulocks
);
865 lock_make_unstable(ulock
, thread
);
866 ulock_release_internal(ulock
, thread
);