]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_lock.c
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
35 * File: kern/sync_lock.c
36 * Author: Joseph CaraDonna
38 * Contains RT distributed lock synchronization services.
41 #include <mach/mach_types.h>
42 #include <mach/lock_set_server.h>
43 #include <mach/task_server.h>
45 #include <kern/misc_protos.h>
46 #include <kern/kalloc.h>
47 #include <kern/sync_lock.h>
48 #include <kern/sched_prim.h>
49 #include <kern/ipc_kobject.h>
50 #include <kern/ipc_sync.h>
51 #include <kern/thread.h>
52 #include <kern/task.h>
54 #include <ipc/ipc_port.h>
55 #include <ipc/ipc_space.h>
58 * Ulock ownership MACROS
60 * Assumes: ulock internal lock is held
63 #define ulock_ownership_set(ul, th) \
65 thread_mtx_lock(th); \
66 enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \
67 thread_mtx_unlock(th); \
71 #define ulock_ownership_clear(ul) \
76 thread_mtx_lock(th); \
77 remqueue(&th->held_ulocks, \
78 (queue_entry_t) (ul)); \
79 thread_mtx_unlock(th); \
81 remqueue(&th->held_ulocks, \
82 (queue_entry_t) (ul)); \
84 (ul)->holder = THREAD_NULL; \
88 * Lock set ownership MACROS
91 #define lock_set_ownership_set(ls, t) \
94 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
95 (t)->lock_sets_owned++; \
100 #define lock_set_ownership_clear(ls, t) \
103 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
104 (t)->lock_sets_owned--; \
108 unsigned int lock_set_event
;
109 #define LOCK_SET_EVENT ((event64_t)&lock_set_event)
111 unsigned int lock_set_handoff
;
112 #define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
115 * ROUTINE: lock_set_init [private]
117 * Initialize the lock_set subsystem.
119 * For now, we don't have anything to do here.
129 * ROUTINE: lock_set_create [exported]
131 * Creates a lock set.
132 * The port representing the lock set is returned as a parameter.
137 lock_set_t
*new_lock_set
,
141 lock_set_t lock_set
= LOCK_SET_NULL
;
146 *new_lock_set
= LOCK_SET_NULL
;
148 if (task
== TASK_NULL
|| n_ulocks
<= 0 || policy
> SYNC_POLICY_MAX
)
149 return KERN_INVALID_ARGUMENT
;
151 if (VM_MAX_ADDRESS
/sizeof(struct ulock
) - sizeof(struct lock_set
) < (unsigned)n_ulocks
)
152 return KERN_RESOURCE_SHORTAGE
;
154 size
= sizeof(struct lock_set
) + (sizeof(struct ulock
) * (n_ulocks
-1));
155 lock_set
= (lock_set_t
) kalloc (size
);
157 if (lock_set
== LOCK_SET_NULL
)
158 return KERN_RESOURCE_SHORTAGE
;
161 lock_set_lock_init(lock_set
);
162 lock_set
->n_ulocks
= n_ulocks
;
163 lock_set
->ref_count
= 1;
166 * Create and initialize the lock set port
168 lock_set
->port
= ipc_port_alloc_kernel();
169 if (lock_set
->port
== IP_NULL
) {
170 /* This will deallocate the lock set */
171 lock_set_dereference(lock_set
);
172 return KERN_RESOURCE_SHORTAGE
;
175 ipc_kobject_set (lock_set
->port
,
176 (ipc_kobject_t
) lock_set
,
180 * Initialize each ulock in the lock set
183 for (x
=0; x
< n_ulocks
; x
++) {
184 ulock
= (ulock_t
) &lock_set
->ulock_list
[x
];
185 ulock_lock_init(ulock
);
186 ulock
->lock_set
= lock_set
;
187 ulock
->holder
= THREAD_NULL
;
188 ulock
->blocked
= FALSE
;
189 ulock
->unstable
= FALSE
;
190 ulock
->ho_wait
= FALSE
;
191 wait_queue_init(&ulock
->wait_queue
, policy
);
194 lock_set_ownership_set(lock_set
, task
);
196 lock_set
->active
= TRUE
;
197 *new_lock_set
= lock_set
;
203 * ROUTINE: lock_set_destroy [exported]
205 * Destroys a lock set. This call will only succeed if the
206 * specified task is the SAME task name specified at the lock set's
210 * - All threads currently blocked on the lock set's ulocks are awoken.
211 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
214 lock_set_destroy (task_t task
, lock_set_t lock_set
)
219 if (task
== TASK_NULL
|| lock_set
== LOCK_SET_NULL
)
220 return KERN_INVALID_ARGUMENT
;
222 if (lock_set
->owner
!= task
)
223 return KERN_INVALID_RIGHT
;
225 lock_set_lock(lock_set
);
226 if (!lock_set
->active
) {
227 lock_set_unlock(lock_set
);
228 return KERN_LOCK_SET_DESTROYED
;
232 * Deactivate lock set
234 lock_set
->active
= FALSE
;
237 * If a ulock is currently held in the target lock set:
239 * 1) Wakeup all threads blocked on the ulock (if any). Threads
240 * may be blocked waiting normally, or waiting for a handoff.
241 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
243 * 2) ulock ownership is cleared.
244 * The thread currently holding the ulock is revoked of its
247 for (i
= 0; i
< lock_set
->n_ulocks
; i
++) {
248 ulock
= &lock_set
->ulock_list
[i
];
252 if (ulock
->accept_wait
) {
253 ulock
->accept_wait
= FALSE
;
254 wait_queue_wakeup64_one(&ulock
->wait_queue
,
260 if (ulock
->blocked
) {
261 ulock
->blocked
= FALSE
;
262 wait_queue_wakeup64_all(&ulock
->wait_queue
,
266 if (ulock
->ho_wait
) {
267 ulock
->ho_wait
= FALSE
;
268 wait_queue_wakeup64_one(&ulock
->wait_queue
,
272 ulock_ownership_clear(ulock
);
278 lock_set_unlock(lock_set
);
279 lock_set_ownership_clear(lock_set
, task
);
284 * Drop the lock set reference, which inturn destroys the
285 * lock set structure if the reference count goes to zero.
288 ipc_port_dealloc_kernel(lock_set
->port
);
289 lock_set_dereference(lock_set
);
295 lock_acquire (lock_set_t lock_set
, int lock_id
)
299 if (lock_set
== LOCK_SET_NULL
)
300 return KERN_INVALID_ARGUMENT
;
302 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
303 return KERN_INVALID_ARGUMENT
;
306 lock_set_lock(lock_set
);
307 if (!lock_set
->active
) {
308 lock_set_unlock(lock_set
);
309 return KERN_LOCK_SET_DESTROYED
;
312 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
314 lock_set_unlock(lock_set
);
317 * Block the current thread if the lock is already held.
320 if (ulock
->holder
!= THREAD_NULL
) {
323 if (ulock
->holder
== current_thread()) {
325 return KERN_LOCK_OWNED_SELF
;
328 ulock
->blocked
= TRUE
;
329 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
331 THREAD_ABORTSAFE
, 0);
335 * Block - Wait for lock to become available.
337 if (wait_result
== THREAD_WAITING
)
338 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
341 * Check the result status:
343 * Check to see why thread was woken up. In all cases, we
344 * already have been removed from the queue.
346 switch (wait_result
) {
347 case THREAD_AWAKENED
:
348 /* lock transitioned from old locker to us */
349 /* he already made us owner */
350 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
353 case THREAD_INTERRUPTED
:
357 goto retry
; /* probably a dead lock_set */
360 panic("lock_acquire\n");
365 * Assign lock ownership
367 ulock_ownership_set(ulock
, current_thread());
370 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
374 lock_release (lock_set_t lock_set
, int lock_id
)
378 if (lock_set
== LOCK_SET_NULL
)
379 return KERN_INVALID_ARGUMENT
;
381 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
382 return KERN_INVALID_ARGUMENT
;
384 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
386 return (ulock_release_internal(ulock
, current_thread()));
390 lock_try (lock_set_t lock_set
, int lock_id
)
395 if (lock_set
== LOCK_SET_NULL
)
396 return KERN_INVALID_ARGUMENT
;
398 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
399 return KERN_INVALID_ARGUMENT
;
402 lock_set_lock(lock_set
);
403 if (!lock_set
->active
) {
404 lock_set_unlock(lock_set
);
405 return KERN_LOCK_SET_DESTROYED
;
408 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
410 lock_set_unlock(lock_set
);
413 * If the lock is already owned, we return without blocking.
415 * An ownership status is returned to inform the caller as to
416 * whether it already holds the lock or another thread does.
419 if (ulock
->holder
!= THREAD_NULL
) {
420 lock_set_unlock(lock_set
);
422 if (ulock
->holder
== current_thread()) {
424 return KERN_LOCK_OWNED_SELF
;
428 return KERN_LOCK_OWNED
;
432 * Add the ulock to the lock set's held_ulocks list.
435 ulock_ownership_set(ulock
, current_thread());
438 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
442 lock_make_stable (lock_set_t lock_set
, int lock_id
)
447 if (lock_set
== LOCK_SET_NULL
)
448 return KERN_INVALID_ARGUMENT
;
450 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
451 return KERN_INVALID_ARGUMENT
;
454 lock_set_lock(lock_set
);
455 if (!lock_set
->active
) {
456 lock_set_unlock(lock_set
);
457 return KERN_LOCK_SET_DESTROYED
;
460 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
462 lock_set_unlock(lock_set
);
464 if (ulock
->holder
!= current_thread()) {
466 return KERN_INVALID_RIGHT
;
469 ulock
->unstable
= FALSE
;
476 * ROUTINE: lock_make_unstable [internal]
478 * Marks the lock as unstable.
481 * - All future acquisitions of the lock will return with a
482 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
485 lock_make_unstable (ulock_t ulock
, thread_t thread
)
489 lock_set
= ulock
->lock_set
;
490 lock_set_lock(lock_set
);
491 if (!lock_set
->active
) {
492 lock_set_unlock(lock_set
);
493 return KERN_LOCK_SET_DESTROYED
;
497 lock_set_unlock(lock_set
);
499 if (ulock
->holder
!= thread
) {
501 return KERN_INVALID_RIGHT
;
504 ulock
->unstable
= TRUE
;
511 * ROUTINE: ulock_release_internal [internal]
513 * Releases the ulock.
514 * If any threads are blocked waiting for the ulock, one is woken-up.
518 ulock_release_internal (ulock_t ulock
, thread_t thread
)
522 if ((lock_set
= ulock
->lock_set
) == LOCK_SET_NULL
)
523 return KERN_INVALID_ARGUMENT
;
525 lock_set_lock(lock_set
);
526 if (!lock_set
->active
) {
527 lock_set_unlock(lock_set
);
528 return KERN_LOCK_SET_DESTROYED
;
531 lock_set_unlock(lock_set
);
533 if (ulock
->holder
!= thread
) {
535 return KERN_INVALID_RIGHT
;
539 * If we have a hint that threads might be waiting,
540 * try to transfer the lock ownership to a waiting thread
543 if (ulock
->blocked
) {
544 wait_queue_t wq
= &ulock
->wait_queue
;
550 wqthread
= wait_queue_wakeup64_identity_locked(wq
,
554 /* wait_queue now unlocked, thread locked */
556 if (wqthread
!= THREAD_NULL
) {
558 * JMM - These ownership transfer macros have a
559 * locking/race problem. To keep the thread from
560 * changing states on us (nullifying the ownership
561 * assignment) we need to keep the thread locked
562 * during the assignment. But we can't because the
563 * macros take an activation lock, which is a mutex.
564 * Since this code was already broken before I got
565 * here, I will leave it for now.
567 thread_unlock(wqthread
);
571 * Transfer ulock ownership
572 * from the current thread to the acquisition thread.
574 ulock_ownership_clear(ulock
);
575 ulock_ownership_set(ulock
, wqthread
);
580 ulock
->blocked
= FALSE
;
588 ulock_ownership_clear(ulock
);
595 lock_handoff (lock_set_t lock_set
, int lock_id
)
601 if (lock_set
== LOCK_SET_NULL
)
602 return KERN_INVALID_ARGUMENT
;
604 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
605 return KERN_INVALID_ARGUMENT
;
608 lock_set_lock(lock_set
);
610 if (!lock_set
->active
) {
611 lock_set_unlock(lock_set
);
612 return KERN_LOCK_SET_DESTROYED
;
615 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
617 lock_set_unlock(lock_set
);
619 if (ulock
->holder
!= current_thread()) {
621 return KERN_INVALID_RIGHT
;
625 * If the accepting thread (the receiver) is already waiting
626 * to accept the lock from the handoff thread (the sender),
627 * then perform the hand-off now.
630 if (ulock
->accept_wait
) {
631 wait_queue_t wq
= &ulock
->wait_queue
;
636 * See who the lucky devil is, if he is still there waiting.
640 thread
= wait_queue_wakeup64_identity_locked(
645 /* wait queue unlocked, thread locked */
648 * Transfer lock ownership
650 if (thread
!= THREAD_NULL
) {
652 * JMM - These ownership transfer macros have a
653 * locking/race problem. To keep the thread from
654 * changing states on us (nullifying the ownership
655 * assignment) we need to keep the thread locked
656 * during the assignment. But we can't because the
657 * macros take a thread mutex lock.
659 * Since this code was already broken before I got
660 * here, I will leave it for now.
662 thread_unlock(thread
);
665 ulock_ownership_clear(ulock
);
666 ulock_ownership_set(ulock
, thread
);
667 ulock
->accept_wait
= FALSE
;
673 * OOPS. The accepting thread must have been aborted.
674 * and is racing back to clear the flag that says is
675 * waiting for an accept. He will clear it when we
676 * release the lock, so just fall thru and wait for
677 * the next accept thread (that's the way it is
685 * Indicate that there is a hand-off thread waiting, and then wait
686 * for an accepting thread.
688 ulock
->ho_wait
= TRUE
;
689 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
691 THREAD_ABORTSAFE
, 0);
694 if (wait_result
== THREAD_WAITING
)
695 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
698 * If the thread was woken-up via some action other than
699 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
700 * then we need to clear the ulock's handoff state.
702 switch (wait_result
) {
704 case THREAD_AWAKENED
:
707 case THREAD_INTERRUPTED
:
709 assert(ulock
->holder
== current_thread());
710 ulock
->ho_wait
= FALSE
;
718 panic("lock_handoff");
723 lock_handoff_accept (lock_set_t lock_set
, int lock_id
)
729 if (lock_set
== LOCK_SET_NULL
)
730 return KERN_INVALID_ARGUMENT
;
732 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
733 return KERN_INVALID_ARGUMENT
;
736 lock_set_lock(lock_set
);
737 if (!lock_set
->active
) {
738 lock_set_unlock(lock_set
);
739 return KERN_LOCK_SET_DESTROYED
;
742 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
744 lock_set_unlock(lock_set
);
747 * If there is another accepting thread that beat us, just
748 * return with an error.
750 if (ulock
->accept_wait
) {
752 return KERN_ALREADY_WAITING
;
755 if (ulock
->holder
== current_thread()) {
757 return KERN_LOCK_OWNED_SELF
;
761 * If the handoff thread (the sender) is already waiting to
762 * hand-off the lock to the accepting thread (the receiver),
763 * then perform the hand-off now.
765 if (ulock
->ho_wait
) {
766 wait_queue_t wq
= &ulock
->wait_queue
;
769 * See who the lucky devil is, if he is still there waiting.
771 assert(ulock
->holder
!= THREAD_NULL
);
773 if (wait_queue_wakeup64_thread(wq
,
776 THREAD_AWAKENED
) == KERN_SUCCESS
) {
778 * Holder thread was still waiting to give it
779 * away. Take over ownership.
781 ulock_ownership_clear(ulock
);
782 ulock_ownership_set(ulock
, current_thread());
783 ulock
->ho_wait
= FALSE
;
785 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
790 * OOPS. The owner was aborted out of the handoff.
791 * He will clear his own flag when he gets back.
792 * in the meantime, we will wait as if we didn't
793 * even see his flag (by falling thru).
797 ulock
->accept_wait
= TRUE
;
798 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
800 THREAD_ABORTSAFE
, 0);
803 if (wait_result
== THREAD_WAITING
)
804 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
807 * If the thread was woken-up via some action other than
808 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
809 * then we need to clear the ulock's handoff state.
811 switch (wait_result
) {
813 case THREAD_AWAKENED
:
816 case THREAD_INTERRUPTED
:
818 ulock
->accept_wait
= FALSE
;
826 panic("lock_handoff_accept");
831 * Routine: lock_set_reference
833 * Take out a reference on a lock set. This keeps the data structure
834 * in existence (but the lock set may be deactivated).
837 lock_set_reference(lock_set_t lock_set
)
839 lock_set_lock(lock_set
);
840 lock_set
->ref_count
++;
841 lock_set_unlock(lock_set
);
845 * Routine: lock_set_dereference
847 * Release a reference on a lock set. If this is the last reference,
848 * the lock set data structure is deallocated.
851 lock_set_dereference(lock_set_t lock_set
)
856 lock_set_lock(lock_set
);
857 ref_count
= --(lock_set
->ref_count
);
858 lock_set_unlock(lock_set
);
860 if (ref_count
== 0) {
861 size
= sizeof(struct lock_set
) +
862 (sizeof(struct ulock
) * (lock_set
->n_ulocks
- 1));
863 kfree(lock_set
, size
);
873 while (!queue_empty(&thread
->held_ulocks
)) {
874 ulock
= (ulock_t
)queue_first(&thread
->held_ulocks
);
875 lock_make_unstable(ulock
, thread
);
876 ulock_release_internal(ulock
, thread
);