]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_lock.c
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 * File: kern/sync_lock.c
34 * Author: Joseph CaraDonna
36 * Contains RT distributed lock synchronization services.
39 #include <mach/mach_types.h>
40 #include <mach/lock_set_server.h>
41 #include <mach/task_server.h>
43 #include <kern/misc_protos.h>
44 #include <kern/kalloc.h>
45 #include <kern/sync_lock.h>
46 #include <kern/sched_prim.h>
47 #include <kern/ipc_kobject.h>
48 #include <kern/ipc_sync.h>
49 #include <kern/thread.h>
50 #include <kern/task.h>
52 #include <ipc/ipc_port.h>
53 #include <ipc/ipc_space.h>
56 * Ulock ownership MACROS
58 * Assumes: ulock internal lock is held
61 #define ulock_ownership_set(ul, th) \
63 thread_mtx_lock(th); \
64 enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \
65 thread_mtx_unlock(th); \
69 #define ulock_ownership_clear(ul) \
74 thread_mtx_lock(th); \
75 remqueue(&th->held_ulocks, \
76 (queue_entry_t) (ul)); \
77 thread_mtx_unlock(th); \
79 remqueue(&th->held_ulocks, \
80 (queue_entry_t) (ul)); \
82 (ul)->holder = THREAD_NULL; \
86 * Lock set ownership MACROS
89 #define lock_set_ownership_set(ls, t) \
92 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
93 (t)->lock_sets_owned++; \
98 #define lock_set_ownership_clear(ls, t) \
101 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
102 (t)->lock_sets_owned--; \
106 unsigned int lock_set_event
;
107 #define LOCK_SET_EVENT ((event64_t)&lock_set_event)
109 unsigned int lock_set_handoff
;
110 #define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
113 * ROUTINE: lock_set_init [private]
115 * Initialize the lock_set subsystem.
117 * For now, we don't have anything to do here.
127 * ROUTINE: lock_set_create [exported]
129 * Creates a lock set.
130 * The port representing the lock set is returned as a parameter.
135 lock_set_t
*new_lock_set
,
139 lock_set_t lock_set
= LOCK_SET_NULL
;
144 *new_lock_set
= LOCK_SET_NULL
;
146 if (task
== TASK_NULL
|| n_ulocks
<= 0 || policy
> SYNC_POLICY_MAX
)
147 return KERN_INVALID_ARGUMENT
;
149 if (VM_MAX_ADDRESS
/sizeof(struct ulock
) - sizeof(struct lock_set
) < (unsigned)n_ulocks
)
150 return KERN_RESOURCE_SHORTAGE
;
152 size
= sizeof(struct lock_set
) + (sizeof(struct ulock
) * (n_ulocks
-1));
153 lock_set
= (lock_set_t
) kalloc (size
);
155 if (lock_set
== LOCK_SET_NULL
)
156 return KERN_RESOURCE_SHORTAGE
;
159 lock_set_lock_init(lock_set
);
160 lock_set
->n_ulocks
= n_ulocks
;
161 lock_set
->ref_count
= 1;
164 * Create and initialize the lock set port
166 lock_set
->port
= ipc_port_alloc_kernel();
167 if (lock_set
->port
== IP_NULL
) {
168 /* This will deallocate the lock set */
169 lock_set_dereference(lock_set
);
170 return KERN_RESOURCE_SHORTAGE
;
173 ipc_kobject_set (lock_set
->port
,
174 (ipc_kobject_t
) lock_set
,
178 * Initialize each ulock in the lock set
181 for (x
=0; x
< n_ulocks
; x
++) {
182 ulock
= (ulock_t
) &lock_set
->ulock_list
[x
];
183 ulock_lock_init(ulock
);
184 ulock
->lock_set
= lock_set
;
185 ulock
->holder
= THREAD_NULL
;
186 ulock
->blocked
= FALSE
;
187 ulock
->unstable
= FALSE
;
188 ulock
->ho_wait
= FALSE
;
189 wait_queue_init(&ulock
->wait_queue
, policy
);
192 lock_set_ownership_set(lock_set
, task
);
194 lock_set
->active
= TRUE
;
195 *new_lock_set
= lock_set
;
201 * ROUTINE: lock_set_destroy [exported]
203 * Destroys a lock set. This call will only succeed if the
204 * specified task is the SAME task name specified at the lock set's
208 * - All threads currently blocked on the lock set's ulocks are awoken.
209 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
212 lock_set_destroy (task_t task
, lock_set_t lock_set
)
217 if (task
== TASK_NULL
|| lock_set
== LOCK_SET_NULL
)
218 return KERN_INVALID_ARGUMENT
;
220 if (lock_set
->owner
!= task
)
221 return KERN_INVALID_RIGHT
;
223 lock_set_lock(lock_set
);
224 if (!lock_set
->active
) {
225 lock_set_unlock(lock_set
);
226 return KERN_LOCK_SET_DESTROYED
;
230 * Deactivate lock set
232 lock_set
->active
= FALSE
;
235 * If a ulock is currently held in the target lock set:
237 * 1) Wakeup all threads blocked on the ulock (if any). Threads
238 * may be blocked waiting normally, or waiting for a handoff.
239 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
241 * 2) ulock ownership is cleared.
242 * The thread currently holding the ulock is revoked of its
245 for (i
= 0; i
< lock_set
->n_ulocks
; i
++) {
246 ulock
= &lock_set
->ulock_list
[i
];
250 if (ulock
->accept_wait
) {
251 ulock
->accept_wait
= FALSE
;
252 wait_queue_wakeup64_one(&ulock
->wait_queue
,
258 if (ulock
->blocked
) {
259 ulock
->blocked
= FALSE
;
260 wait_queue_wakeup64_all(&ulock
->wait_queue
,
264 if (ulock
->ho_wait
) {
265 ulock
->ho_wait
= FALSE
;
266 wait_queue_wakeup64_one(&ulock
->wait_queue
,
270 ulock_ownership_clear(ulock
);
276 lock_set_unlock(lock_set
);
277 lock_set_ownership_clear(lock_set
, task
);
282 * Drop the lock set reference, which inturn destroys the
283 * lock set structure if the reference count goes to zero.
286 ipc_port_dealloc_kernel(lock_set
->port
);
287 lock_set_dereference(lock_set
);
293 lock_acquire (lock_set_t lock_set
, int lock_id
)
297 if (lock_set
== LOCK_SET_NULL
)
298 return KERN_INVALID_ARGUMENT
;
300 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
301 return KERN_INVALID_ARGUMENT
;
304 lock_set_lock(lock_set
);
305 if (!lock_set
->active
) {
306 lock_set_unlock(lock_set
);
307 return KERN_LOCK_SET_DESTROYED
;
310 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
312 lock_set_unlock(lock_set
);
315 * Block the current thread if the lock is already held.
318 if (ulock
->holder
!= THREAD_NULL
) {
321 if (ulock
->holder
== current_thread()) {
323 return KERN_LOCK_OWNED_SELF
;
326 ulock
->blocked
= TRUE
;
327 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
329 THREAD_ABORTSAFE
, 0);
333 * Block - Wait for lock to become available.
335 if (wait_result
== THREAD_WAITING
)
336 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
339 * Check the result status:
341 * Check to see why thread was woken up. In all cases, we
342 * already have been removed from the queue.
344 switch (wait_result
) {
345 case THREAD_AWAKENED
:
346 /* lock transitioned from old locker to us */
347 /* he already made us owner */
348 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
351 case THREAD_INTERRUPTED
:
355 goto retry
; /* probably a dead lock_set */
358 panic("lock_acquire\n");
363 * Assign lock ownership
365 ulock_ownership_set(ulock
, current_thread());
368 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
372 lock_release (lock_set_t lock_set
, int lock_id
)
376 if (lock_set
== LOCK_SET_NULL
)
377 return KERN_INVALID_ARGUMENT
;
379 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
380 return KERN_INVALID_ARGUMENT
;
382 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
384 return (ulock_release_internal(ulock
, current_thread()));
388 lock_try (lock_set_t lock_set
, int lock_id
)
393 if (lock_set
== LOCK_SET_NULL
)
394 return KERN_INVALID_ARGUMENT
;
396 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
397 return KERN_INVALID_ARGUMENT
;
400 lock_set_lock(lock_set
);
401 if (!lock_set
->active
) {
402 lock_set_unlock(lock_set
);
403 return KERN_LOCK_SET_DESTROYED
;
406 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
408 lock_set_unlock(lock_set
);
411 * If the lock is already owned, we return without blocking.
413 * An ownership status is returned to inform the caller as to
414 * whether it already holds the lock or another thread does.
417 if (ulock
->holder
!= THREAD_NULL
) {
418 lock_set_unlock(lock_set
);
420 if (ulock
->holder
== current_thread()) {
422 return KERN_LOCK_OWNED_SELF
;
426 return KERN_LOCK_OWNED
;
430 * Add the ulock to the lock set's held_ulocks list.
433 ulock_ownership_set(ulock
, current_thread());
436 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
440 lock_make_stable (lock_set_t lock_set
, int lock_id
)
445 if (lock_set
== LOCK_SET_NULL
)
446 return KERN_INVALID_ARGUMENT
;
448 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
449 return KERN_INVALID_ARGUMENT
;
452 lock_set_lock(lock_set
);
453 if (!lock_set
->active
) {
454 lock_set_unlock(lock_set
);
455 return KERN_LOCK_SET_DESTROYED
;
458 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
460 lock_set_unlock(lock_set
);
462 if (ulock
->holder
!= current_thread()) {
464 return KERN_INVALID_RIGHT
;
467 ulock
->unstable
= FALSE
;
474 * ROUTINE: lock_make_unstable [internal]
476 * Marks the lock as unstable.
479 * - All future acquisitions of the lock will return with a
480 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
483 lock_make_unstable (ulock_t ulock
, thread_t thread
)
487 lock_set
= ulock
->lock_set
;
488 lock_set_lock(lock_set
);
489 if (!lock_set
->active
) {
490 lock_set_unlock(lock_set
);
491 return KERN_LOCK_SET_DESTROYED
;
495 lock_set_unlock(lock_set
);
497 if (ulock
->holder
!= thread
) {
499 return KERN_INVALID_RIGHT
;
502 ulock
->unstable
= TRUE
;
509 * ROUTINE: ulock_release_internal [internal]
511 * Releases the ulock.
512 * If any threads are blocked waiting for the ulock, one is woken-up.
516 ulock_release_internal (ulock_t ulock
, thread_t thread
)
520 if ((lock_set
= ulock
->lock_set
) == LOCK_SET_NULL
)
521 return KERN_INVALID_ARGUMENT
;
523 lock_set_lock(lock_set
);
524 if (!lock_set
->active
) {
525 lock_set_unlock(lock_set
);
526 return KERN_LOCK_SET_DESTROYED
;
529 lock_set_unlock(lock_set
);
531 if (ulock
->holder
!= thread
) {
533 return KERN_INVALID_RIGHT
;
537 * If we have a hint that threads might be waiting,
538 * try to transfer the lock ownership to a waiting thread
541 if (ulock
->blocked
) {
542 wait_queue_t wq
= &ulock
->wait_queue
;
548 wqthread
= wait_queue_wakeup64_identity_locked(wq
,
552 /* wait_queue now unlocked, thread locked */
554 if (wqthread
!= THREAD_NULL
) {
556 * JMM - These ownership transfer macros have a
557 * locking/race problem. To keep the thread from
558 * changing states on us (nullifying the ownership
559 * assignment) we need to keep the thread locked
560 * during the assignment. But we can't because the
561 * macros take an activation lock, which is a mutex.
562 * Since this code was already broken before I got
563 * here, I will leave it for now.
565 thread_unlock(wqthread
);
569 * Transfer ulock ownership
570 * from the current thread to the acquisition thread.
572 ulock_ownership_clear(ulock
);
573 ulock_ownership_set(ulock
, wqthread
);
578 ulock
->blocked
= FALSE
;
586 ulock_ownership_clear(ulock
);
593 lock_handoff (lock_set_t lock_set
, int lock_id
)
599 if (lock_set
== LOCK_SET_NULL
)
600 return KERN_INVALID_ARGUMENT
;
602 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
603 return KERN_INVALID_ARGUMENT
;
606 lock_set_lock(lock_set
);
608 if (!lock_set
->active
) {
609 lock_set_unlock(lock_set
);
610 return KERN_LOCK_SET_DESTROYED
;
613 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
615 lock_set_unlock(lock_set
);
617 if (ulock
->holder
!= current_thread()) {
619 return KERN_INVALID_RIGHT
;
623 * If the accepting thread (the receiver) is already waiting
624 * to accept the lock from the handoff thread (the sender),
625 * then perform the hand-off now.
628 if (ulock
->accept_wait
) {
629 wait_queue_t wq
= &ulock
->wait_queue
;
634 * See who the lucky devil is, if he is still there waiting.
638 thread
= wait_queue_wakeup64_identity_locked(
643 /* wait queue unlocked, thread locked */
646 * Transfer lock ownership
648 if (thread
!= THREAD_NULL
) {
650 * JMM - These ownership transfer macros have a
651 * locking/race problem. To keep the thread from
652 * changing states on us (nullifying the ownership
653 * assignment) we need to keep the thread locked
654 * during the assignment. But we can't because the
655 * macros take a thread mutex lock.
657 * Since this code was already broken before I got
658 * here, I will leave it for now.
660 thread_unlock(thread
);
663 ulock_ownership_clear(ulock
);
664 ulock_ownership_set(ulock
, thread
);
665 ulock
->accept_wait
= FALSE
;
671 * OOPS. The accepting thread must have been aborted.
672 * and is racing back to clear the flag that says is
673 * waiting for an accept. He will clear it when we
674 * release the lock, so just fall thru and wait for
675 * the next accept thread (that's the way it is
683 * Indicate that there is a hand-off thread waiting, and then wait
684 * for an accepting thread.
686 ulock
->ho_wait
= TRUE
;
687 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
689 THREAD_ABORTSAFE
, 0);
692 if (wait_result
== THREAD_WAITING
)
693 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
696 * If the thread was woken-up via some action other than
697 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
698 * then we need to clear the ulock's handoff state.
700 switch (wait_result
) {
702 case THREAD_AWAKENED
:
705 case THREAD_INTERRUPTED
:
707 assert(ulock
->holder
== current_thread());
708 ulock
->ho_wait
= FALSE
;
716 panic("lock_handoff");
721 lock_handoff_accept (lock_set_t lock_set
, int lock_id
)
727 if (lock_set
== LOCK_SET_NULL
)
728 return KERN_INVALID_ARGUMENT
;
730 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
731 return KERN_INVALID_ARGUMENT
;
734 lock_set_lock(lock_set
);
735 if (!lock_set
->active
) {
736 lock_set_unlock(lock_set
);
737 return KERN_LOCK_SET_DESTROYED
;
740 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
742 lock_set_unlock(lock_set
);
745 * If there is another accepting thread that beat us, just
746 * return with an error.
748 if (ulock
->accept_wait
) {
750 return KERN_ALREADY_WAITING
;
753 if (ulock
->holder
== current_thread()) {
755 return KERN_LOCK_OWNED_SELF
;
759 * If the handoff thread (the sender) is already waiting to
760 * hand-off the lock to the accepting thread (the receiver),
761 * then perform the hand-off now.
763 if (ulock
->ho_wait
) {
764 wait_queue_t wq
= &ulock
->wait_queue
;
767 * See who the lucky devil is, if he is still there waiting.
769 assert(ulock
->holder
!= THREAD_NULL
);
771 if (wait_queue_wakeup64_thread(wq
,
774 THREAD_AWAKENED
) == KERN_SUCCESS
) {
776 * Holder thread was still waiting to give it
777 * away. Take over ownership.
779 ulock_ownership_clear(ulock
);
780 ulock_ownership_set(ulock
, current_thread());
781 ulock
->ho_wait
= FALSE
;
783 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
788 * OOPS. The owner was aborted out of the handoff.
789 * He will clear his own flag when he gets back.
790 * in the meantime, we will wait as if we didn't
791 * even see his flag (by falling thru).
795 ulock
->accept_wait
= TRUE
;
796 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
798 THREAD_ABORTSAFE
, 0);
801 if (wait_result
== THREAD_WAITING
)
802 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
805 * If the thread was woken-up via some action other than
806 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
807 * then we need to clear the ulock's handoff state.
809 switch (wait_result
) {
811 case THREAD_AWAKENED
:
814 case THREAD_INTERRUPTED
:
816 ulock
->accept_wait
= FALSE
;
824 panic("lock_handoff_accept");
829 * Routine: lock_set_reference
831 * Take out a reference on a lock set. This keeps the data structure
832 * in existence (but the lock set may be deactivated).
835 lock_set_reference(lock_set_t lock_set
)
837 lock_set_lock(lock_set
);
838 lock_set
->ref_count
++;
839 lock_set_unlock(lock_set
);
843 * Routine: lock_set_dereference
845 * Release a reference on a lock set. If this is the last reference,
846 * the lock set data structure is deallocated.
849 lock_set_dereference(lock_set_t lock_set
)
854 lock_set_lock(lock_set
);
855 ref_count
= --(lock_set
->ref_count
);
856 lock_set_unlock(lock_set
);
858 if (ref_count
== 0) {
859 size
= sizeof(struct lock_set
) +
860 (sizeof(struct ulock
) * (lock_set
->n_ulocks
- 1));
861 kfree(lock_set
, size
);
871 while (!queue_empty(&thread
->held_ulocks
)) {
872 ulock
= (ulock_t
)queue_first(&thread
->held_ulocks
);
873 lock_make_unstable(ulock
, thread
);
874 ulock_release_internal(ulock
, thread
);