]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_lock.c
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
27 * File: kern/sync_lock.c
28 * Author: Joseph CaraDonna
30 * Contains RT distributed lock synchronization services.
33 #include <mach/mach_types.h>
34 #include <mach/lock_set_server.h>
35 #include <mach/task_server.h>
37 #include <kern/misc_protos.h>
38 #include <kern/kalloc.h>
39 #include <kern/sync_lock.h>
40 #include <kern/sched_prim.h>
41 #include <kern/ipc_kobject.h>
42 #include <kern/ipc_sync.h>
43 #include <kern/thread.h>
44 #include <kern/task.h>
46 #include <ipc/ipc_port.h>
47 #include <ipc/ipc_space.h>
50 * Ulock ownership MACROS
52 * Assumes: ulock internal lock is held
55 #define ulock_ownership_set(ul, th) \
57 thread_mtx_lock(th); \
58 enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \
59 thread_mtx_unlock(th); \
63 #define ulock_ownership_clear(ul) \
68 thread_mtx_lock(th); \
69 remqueue(&th->held_ulocks, \
70 (queue_entry_t) (ul)); \
71 thread_mtx_unlock(th); \
73 remqueue(&th->held_ulocks, \
74 (queue_entry_t) (ul)); \
76 (ul)->holder = THREAD_NULL; \
80 * Lock set ownership MACROS
83 #define lock_set_ownership_set(ls, t) \
86 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
87 (t)->lock_sets_owned++; \
92 #define lock_set_ownership_clear(ls, t) \
95 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
96 (t)->lock_sets_owned--; \
100 unsigned int lock_set_event
;
101 #define LOCK_SET_EVENT ((event64_t)&lock_set_event)
103 unsigned int lock_set_handoff
;
104 #define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
107 * ROUTINE: lock_set_init [private]
109 * Initialize the lock_set subsystem.
111 * For now, we don't have anything to do here.
121 * ROUTINE: lock_set_create [exported]
123 * Creates a lock set.
124 * The port representing the lock set is returned as a parameter.
129 lock_set_t
*new_lock_set
,
133 lock_set_t lock_set
= LOCK_SET_NULL
;
138 *new_lock_set
= LOCK_SET_NULL
;
140 if (task
== TASK_NULL
|| n_ulocks
<= 0 || policy
> SYNC_POLICY_MAX
)
141 return KERN_INVALID_ARGUMENT
;
143 size
= sizeof(struct lock_set
) + (sizeof(struct ulock
) * (n_ulocks
-1));
144 lock_set
= (lock_set_t
) kalloc (size
);
146 if (lock_set
== LOCK_SET_NULL
)
147 return KERN_RESOURCE_SHORTAGE
;
150 lock_set_lock_init(lock_set
);
151 lock_set
->n_ulocks
= n_ulocks
;
152 lock_set
->ref_count
= 1;
155 * Create and initialize the lock set port
157 lock_set
->port
= ipc_port_alloc_kernel();
158 if (lock_set
->port
== IP_NULL
) {
159 /* This will deallocate the lock set */
160 lock_set_dereference(lock_set
);
161 return KERN_RESOURCE_SHORTAGE
;
164 ipc_kobject_set (lock_set
->port
,
165 (ipc_kobject_t
) lock_set
,
169 * Initialize each ulock in the lock set
172 for (x
=0; x
< n_ulocks
; x
++) {
173 ulock
= (ulock_t
) &lock_set
->ulock_list
[x
];
174 ulock_lock_init(ulock
);
175 ulock
->lock_set
= lock_set
;
176 ulock
->holder
= THREAD_NULL
;
177 ulock
->blocked
= FALSE
;
178 ulock
->unstable
= FALSE
;
179 ulock
->ho_wait
= FALSE
;
180 wait_queue_init(&ulock
->wait_queue
, policy
);
183 lock_set_ownership_set(lock_set
, task
);
185 lock_set
->active
= TRUE
;
186 *new_lock_set
= lock_set
;
192 * ROUTINE: lock_set_destroy [exported]
194 * Destroys a lock set. This call will only succeed if the
195 * specified task is the SAME task name specified at the lock set's
199 * - All threads currently blocked on the lock set's ulocks are awoken.
200 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
203 lock_set_destroy (task_t task
, lock_set_t lock_set
)
208 if (task
== TASK_NULL
|| lock_set
== LOCK_SET_NULL
)
209 return KERN_INVALID_ARGUMENT
;
211 if (lock_set
->owner
!= task
)
212 return KERN_INVALID_RIGHT
;
214 lock_set_lock(lock_set
);
215 if (!lock_set
->active
) {
216 lock_set_unlock(lock_set
);
217 return KERN_LOCK_SET_DESTROYED
;
221 * Deactivate lock set
223 lock_set
->active
= FALSE
;
226 * If a ulock is currently held in the target lock set:
228 * 1) Wakeup all threads blocked on the ulock (if any). Threads
229 * may be blocked waiting normally, or waiting for a handoff.
230 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
232 * 2) ulock ownership is cleared.
233 * The thread currently holding the ulock is revoked of its
236 for (i
= 0; i
< lock_set
->n_ulocks
; i
++) {
237 ulock
= &lock_set
->ulock_list
[i
];
241 if (ulock
->accept_wait
) {
242 ulock
->accept_wait
= FALSE
;
243 wait_queue_wakeup64_one(&ulock
->wait_queue
,
249 if (ulock
->blocked
) {
250 ulock
->blocked
= FALSE
;
251 wait_queue_wakeup64_all(&ulock
->wait_queue
,
255 if (ulock
->ho_wait
) {
256 ulock
->ho_wait
= FALSE
;
257 wait_queue_wakeup64_one(&ulock
->wait_queue
,
261 ulock_ownership_clear(ulock
);
267 lock_set_unlock(lock_set
);
268 lock_set_ownership_clear(lock_set
, task
);
273 * Drop the lock set reference, which inturn destroys the
274 * lock set structure if the reference count goes to zero.
277 ipc_port_dealloc_kernel(lock_set
->port
);
278 lock_set_dereference(lock_set
);
284 lock_acquire (lock_set_t lock_set
, int lock_id
)
288 if (lock_set
== LOCK_SET_NULL
)
289 return KERN_INVALID_ARGUMENT
;
291 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
292 return KERN_INVALID_ARGUMENT
;
295 lock_set_lock(lock_set
);
296 if (!lock_set
->active
) {
297 lock_set_unlock(lock_set
);
298 return KERN_LOCK_SET_DESTROYED
;
301 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
303 lock_set_unlock(lock_set
);
306 * Block the current thread if the lock is already held.
309 if (ulock
->holder
!= THREAD_NULL
) {
312 if (ulock
->holder
== current_thread()) {
314 return KERN_LOCK_OWNED_SELF
;
317 ulock
->blocked
= TRUE
;
318 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
320 THREAD_ABORTSAFE
, 0);
324 * Block - Wait for lock to become available.
326 if (wait_result
== THREAD_WAITING
)
327 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
330 * Check the result status:
332 * Check to see why thread was woken up. In all cases, we
333 * already have been removed from the queue.
335 switch (wait_result
) {
336 case THREAD_AWAKENED
:
337 /* lock transitioned from old locker to us */
338 /* he already made us owner */
339 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
342 case THREAD_INTERRUPTED
:
346 goto retry
; /* probably a dead lock_set */
349 panic("lock_acquire\n");
354 * Assign lock ownership
356 ulock_ownership_set(ulock
, current_thread());
359 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
363 lock_release (lock_set_t lock_set
, int lock_id
)
367 if (lock_set
== LOCK_SET_NULL
)
368 return KERN_INVALID_ARGUMENT
;
370 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
371 return KERN_INVALID_ARGUMENT
;
373 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
375 return (ulock_release_internal(ulock
, current_thread()));
379 lock_try (lock_set_t lock_set
, int lock_id
)
384 if (lock_set
== LOCK_SET_NULL
)
385 return KERN_INVALID_ARGUMENT
;
387 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
388 return KERN_INVALID_ARGUMENT
;
391 lock_set_lock(lock_set
);
392 if (!lock_set
->active
) {
393 lock_set_unlock(lock_set
);
394 return KERN_LOCK_SET_DESTROYED
;
397 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
399 lock_set_unlock(lock_set
);
402 * If the lock is already owned, we return without blocking.
404 * An ownership status is returned to inform the caller as to
405 * whether it already holds the lock or another thread does.
408 if (ulock
->holder
!= THREAD_NULL
) {
409 lock_set_unlock(lock_set
);
411 if (ulock
->holder
== current_thread()) {
413 return KERN_LOCK_OWNED_SELF
;
417 return KERN_LOCK_OWNED
;
421 * Add the ulock to the lock set's held_ulocks list.
424 ulock_ownership_set(ulock
, current_thread());
427 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
431 lock_make_stable (lock_set_t lock_set
, int lock_id
)
436 if (lock_set
== LOCK_SET_NULL
)
437 return KERN_INVALID_ARGUMENT
;
439 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
440 return KERN_INVALID_ARGUMENT
;
443 lock_set_lock(lock_set
);
444 if (!lock_set
->active
) {
445 lock_set_unlock(lock_set
);
446 return KERN_LOCK_SET_DESTROYED
;
449 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
451 lock_set_unlock(lock_set
);
453 if (ulock
->holder
!= current_thread()) {
455 return KERN_INVALID_RIGHT
;
458 ulock
->unstable
= FALSE
;
465 * ROUTINE: lock_make_unstable [internal]
467 * Marks the lock as unstable.
470 * - All future acquisitions of the lock will return with a
471 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
474 lock_make_unstable (ulock_t ulock
, thread_t thread
)
478 lock_set
= ulock
->lock_set
;
479 lock_set_lock(lock_set
);
480 if (!lock_set
->active
) {
481 lock_set_unlock(lock_set
);
482 return KERN_LOCK_SET_DESTROYED
;
486 lock_set_unlock(lock_set
);
488 if (ulock
->holder
!= thread
) {
490 return KERN_INVALID_RIGHT
;
493 ulock
->unstable
= TRUE
;
500 * ROUTINE: ulock_release_internal [internal]
502 * Releases the ulock.
503 * If any threads are blocked waiting for the ulock, one is woken-up.
507 ulock_release_internal (ulock_t ulock
, thread_t thread
)
511 if ((lock_set
= ulock
->lock_set
) == LOCK_SET_NULL
)
512 return KERN_INVALID_ARGUMENT
;
514 lock_set_lock(lock_set
);
515 if (!lock_set
->active
) {
516 lock_set_unlock(lock_set
);
517 return KERN_LOCK_SET_DESTROYED
;
520 lock_set_unlock(lock_set
);
522 if (ulock
->holder
!= thread
) {
524 return KERN_INVALID_RIGHT
;
528 * If we have a hint that threads might be waiting,
529 * try to transfer the lock ownership to a waiting thread
532 if (ulock
->blocked
) {
533 wait_queue_t wq
= &ulock
->wait_queue
;
539 wqthread
= wait_queue_wakeup64_identity_locked(wq
,
543 /* wait_queue now unlocked, thread locked */
545 if (wqthread
!= THREAD_NULL
) {
547 * JMM - These ownership transfer macros have a
548 * locking/race problem. To keep the thread from
549 * changing states on us (nullifying the ownership
550 * assignment) we need to keep the thread locked
551 * during the assignment. But we can't because the
552 * macros take an activation lock, which is a mutex.
553 * Since this code was already broken before I got
554 * here, I will leave it for now.
556 thread_unlock(wqthread
);
560 * Transfer ulock ownership
561 * from the current thread to the acquisition thread.
563 ulock_ownership_clear(ulock
);
564 ulock_ownership_set(ulock
, wqthread
);
569 ulock
->blocked
= FALSE
;
577 ulock_ownership_clear(ulock
);
584 lock_handoff (lock_set_t lock_set
, int lock_id
)
590 if (lock_set
== LOCK_SET_NULL
)
591 return KERN_INVALID_ARGUMENT
;
593 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
594 return KERN_INVALID_ARGUMENT
;
597 lock_set_lock(lock_set
);
599 if (!lock_set
->active
) {
600 lock_set_unlock(lock_set
);
601 return KERN_LOCK_SET_DESTROYED
;
604 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
606 lock_set_unlock(lock_set
);
608 if (ulock
->holder
!= current_thread()) {
610 return KERN_INVALID_RIGHT
;
614 * If the accepting thread (the receiver) is already waiting
615 * to accept the lock from the handoff thread (the sender),
616 * then perform the hand-off now.
619 if (ulock
->accept_wait
) {
620 wait_queue_t wq
= &ulock
->wait_queue
;
625 * See who the lucky devil is, if he is still there waiting.
629 thread
= wait_queue_wakeup64_identity_locked(
634 /* wait queue unlocked, thread locked */
637 * Transfer lock ownership
639 if (thread
!= THREAD_NULL
) {
641 * JMM - These ownership transfer macros have a
642 * locking/race problem. To keep the thread from
643 * changing states on us (nullifying the ownership
644 * assignment) we need to keep the thread locked
645 * during the assignment. But we can't because the
646 * macros take a thread mutex lock.
648 * Since this code was already broken before I got
649 * here, I will leave it for now.
651 thread_unlock(thread
);
654 ulock_ownership_clear(ulock
);
655 ulock_ownership_set(ulock
, thread
);
656 ulock
->accept_wait
= FALSE
;
662 * OOPS. The accepting thread must have been aborted.
663 * and is racing back to clear the flag that says is
664 * waiting for an accept. He will clear it when we
665 * release the lock, so just fall thru and wait for
666 * the next accept thread (that's the way it is
674 * Indicate that there is a hand-off thread waiting, and then wait
675 * for an accepting thread.
677 ulock
->ho_wait
= TRUE
;
678 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
680 THREAD_ABORTSAFE
, 0);
683 if (wait_result
== THREAD_WAITING
)
684 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
687 * If the thread was woken-up via some action other than
688 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
689 * then we need to clear the ulock's handoff state.
691 switch (wait_result
) {
693 case THREAD_AWAKENED
:
696 case THREAD_INTERRUPTED
:
698 assert(ulock
->holder
== current_thread());
699 ulock
->ho_wait
= FALSE
;
707 panic("lock_handoff");
712 lock_handoff_accept (lock_set_t lock_set
, int lock_id
)
718 if (lock_set
== LOCK_SET_NULL
)
719 return KERN_INVALID_ARGUMENT
;
721 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
722 return KERN_INVALID_ARGUMENT
;
725 lock_set_lock(lock_set
);
726 if (!lock_set
->active
) {
727 lock_set_unlock(lock_set
);
728 return KERN_LOCK_SET_DESTROYED
;
731 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
733 lock_set_unlock(lock_set
);
736 * If there is another accepting thread that beat us, just
737 * return with an error.
739 if (ulock
->accept_wait
) {
741 return KERN_ALREADY_WAITING
;
744 if (ulock
->holder
== current_thread()) {
746 return KERN_LOCK_OWNED_SELF
;
750 * If the handoff thread (the sender) is already waiting to
751 * hand-off the lock to the accepting thread (the receiver),
752 * then perform the hand-off now.
754 if (ulock
->ho_wait
) {
755 wait_queue_t wq
= &ulock
->wait_queue
;
758 * See who the lucky devil is, if he is still there waiting.
760 assert(ulock
->holder
!= THREAD_NULL
);
762 if (wait_queue_wakeup64_thread(wq
,
765 THREAD_AWAKENED
) == KERN_SUCCESS
) {
767 * Holder thread was still waiting to give it
768 * away. Take over ownership.
770 ulock_ownership_clear(ulock
);
771 ulock_ownership_set(ulock
, current_thread());
772 ulock
->ho_wait
= FALSE
;
774 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
779 * OOPS. The owner was aborted out of the handoff.
780 * He will clear his own flag when he gets back.
781 * in the meantime, we will wait as if we didn't
782 * even see his flag (by falling thru).
786 ulock
->accept_wait
= TRUE
;
787 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
789 THREAD_ABORTSAFE
, 0);
792 if (wait_result
== THREAD_WAITING
)
793 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
796 * If the thread was woken-up via some action other than
797 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
798 * then we need to clear the ulock's handoff state.
800 switch (wait_result
) {
802 case THREAD_AWAKENED
:
805 case THREAD_INTERRUPTED
:
807 ulock
->accept_wait
= FALSE
;
815 panic("lock_handoff_accept");
820 * Routine: lock_set_reference
822 * Take out a reference on a lock set. This keeps the data structure
823 * in existence (but the lock set may be deactivated).
826 lock_set_reference(lock_set_t lock_set
)
828 lock_set_lock(lock_set
);
829 lock_set
->ref_count
++;
830 lock_set_unlock(lock_set
);
834 * Routine: lock_set_dereference
836 * Release a reference on a lock set. If this is the last reference,
837 * the lock set data structure is deallocated.
840 lock_set_dereference(lock_set_t lock_set
)
845 lock_set_lock(lock_set
);
846 ref_count
= --(lock_set
->ref_count
);
847 lock_set_unlock(lock_set
);
849 if (ref_count
== 0) {
850 size
= sizeof(struct lock_set
) +
851 (sizeof(struct ulock
) * (lock_set
->n_ulocks
- 1));
852 kfree(lock_set
, size
);
862 while (!queue_empty(&thread
->held_ulocks
)) {
863 ulock
= (ulock_t
)queue_first(&thread
->held_ulocks
);
864 lock_make_unstable(ulock
, thread
);
865 ulock_release_internal(ulock
, thread
);