]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_lock.c
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
30 * File: kern/sync_lock.c
31 * Author: Joseph CaraDonna
33 * Contains RT distributed lock synchronization services.
36 #include <kern/etap_macros.h>
37 #include <kern/misc_protos.h>
38 #include <kern/sync_lock.h>
39 #include <kern/sched_prim.h>
40 #include <kern/ipc_kobject.h>
41 #include <kern/ipc_sync.h>
42 #include <kern/etap_macros.h>
43 #include <kern/thread.h>
44 #include <kern/task.h>
46 #include <ipc/ipc_port.h>
47 #include <ipc/ipc_space.h>
50 * Ulock ownership MACROS
52 * Assumes: ulock internal lock is held
55 #define ulock_ownership_set(ul, th) \
57 thread_act_t _th_act; \
58 _th_act = (th)->top_act; \
60 enqueue (&_th_act->held_ulocks, (queue_entry_t) (ul)); \
61 act_unlock(_th_act); \
62 (ul)->holder = _th_act; \
65 #define ulock_ownership_clear(ul) \
67 thread_act_t _th_act; \
68 _th_act = (ul)->holder; \
69 if (_th_act->active) { \
71 remqueue(&_th_act->held_ulocks, \
72 (queue_entry_t) (ul)); \
73 act_unlock(_th_act); \
75 remqueue(&_th_act->held_ulocks, \
76 (queue_entry_t) (ul)); \
78 (ul)->holder = THR_ACT_NULL; \
82 * Lock set ownership MACROS
85 #define lock_set_ownership_set(ls, t) \
88 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
89 (t)->lock_sets_owned++; \
94 #define lock_set_ownership_clear(ls, t) \
97 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
98 (t)->lock_sets_owned--; \
102 unsigned int lock_set_event
;
103 #define LOCK_SET_EVENT ((event64_t)&lock_set_event)
105 unsigned int lock_set_handoff
;
106 #define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
109 * ROUTINE: lock_set_init [private]
111 * Initialize the lock_set subsystem.
113 * For now, we don't have anything to do here.
123 * ROUTINE: lock_set_create [exported]
125 * Creates a lock set.
126 * The port representing the lock set is returned as a parameter.
131 lock_set_t
*new_lock_set
,
135 lock_set_t lock_set
= LOCK_SET_NULL
;
140 *new_lock_set
= LOCK_SET_NULL
;
142 if (task
== TASK_NULL
|| n_ulocks
<= 0 || policy
> SYNC_POLICY_MAX
)
143 return KERN_INVALID_ARGUMENT
;
145 size
= sizeof(struct lock_set
) + (sizeof(struct ulock
) * (n_ulocks
-1));
146 lock_set
= (lock_set_t
) kalloc (size
);
148 if (lock_set
== LOCK_SET_NULL
)
149 return KERN_RESOURCE_SHORTAGE
;
152 lock_set_lock_init(lock_set
);
153 lock_set
->n_ulocks
= n_ulocks
;
154 lock_set
->ref_count
= 1;
157 * Create and initialize the lock set port
159 lock_set
->port
= ipc_port_alloc_kernel();
160 if (lock_set
->port
== IP_NULL
) {
161 /* This will deallocate the lock set */
162 lock_set_dereference(lock_set
);
163 return KERN_RESOURCE_SHORTAGE
;
166 ipc_kobject_set (lock_set
->port
,
167 (ipc_kobject_t
) lock_set
,
171 * Initialize each ulock in the lock set
174 for (x
=0; x
< n_ulocks
; x
++) {
175 ulock
= (ulock_t
) &lock_set
->ulock_list
[x
];
176 ulock_lock_init(ulock
);
177 ulock
->lock_set
= lock_set
;
178 ulock
->holder
= THR_ACT_NULL
;
179 ulock
->blocked
= FALSE
;
180 ulock
->unstable
= FALSE
;
181 ulock
->ho_wait
= FALSE
;
182 wait_queue_init(&ulock
->wait_queue
, policy
);
185 lock_set_ownership_set(lock_set
, task
);
187 lock_set
->active
= TRUE
;
188 *new_lock_set
= lock_set
;
194 * ROUTINE: lock_set_destroy [exported]
196 * Destroys a lock set. This call will only succeed if the
197 * specified task is the SAME task name specified at the lock set's
201 * - All threads currently blocked on the lock set's ulocks are awoken.
202 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
205 lock_set_destroy (task_t task
, lock_set_t lock_set
)
211 if (task
== TASK_NULL
|| lock_set
== LOCK_SET_NULL
)
212 return KERN_INVALID_ARGUMENT
;
214 if (lock_set
->owner
!= task
)
215 return KERN_INVALID_RIGHT
;
217 lock_set_lock(lock_set
);
218 if (!lock_set
->active
) {
219 lock_set_unlock(lock_set
);
220 return KERN_LOCK_SET_DESTROYED
;
224 * Deactivate lock set
226 lock_set
->active
= FALSE
;
229 * If a ulock is currently held in the target lock set:
231 * 1) Wakeup all threads blocked on the ulock (if any). Threads
232 * may be blocked waiting normally, or waiting for a handoff.
233 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
235 * 2) ulock ownership is cleared.
236 * The thread currently holding the ulock is revoked of its
239 for (i
= 0; i
< lock_set
->n_ulocks
; i
++) {
240 ulock
= &lock_set
->ulock_list
[i
];
244 if (ulock
->accept_wait
) {
245 ulock
->accept_wait
= FALSE
;
246 wait_queue_wakeup64_one(&ulock
->wait_queue
,
252 if (ulock
->blocked
) {
253 ulock
->blocked
= FALSE
;
254 wait_queue_wakeup64_all(&ulock
->wait_queue
,
258 if (ulock
->ho_wait
) {
259 ulock
->ho_wait
= FALSE
;
260 wait_queue_wakeup64_one(&ulock
->wait_queue
,
264 ulock_ownership_clear(ulock
);
270 lock_set_unlock(lock_set
);
271 lock_set_ownership_clear(lock_set
, task
);
276 * Drop the lock set reference, which inturn destroys the
277 * lock set structure if the reference count goes to zero.
280 ipc_port_dealloc_kernel(lock_set
->port
);
281 lock_set_dereference(lock_set
);
287 lock_acquire (lock_set_t lock_set
, int lock_id
)
291 if (lock_set
== LOCK_SET_NULL
)
292 return KERN_INVALID_ARGUMENT
;
294 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
295 return KERN_INVALID_ARGUMENT
;
298 lock_set_lock(lock_set
);
299 if (!lock_set
->active
) {
300 lock_set_unlock(lock_set
);
301 return KERN_LOCK_SET_DESTROYED
;
304 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
306 lock_set_unlock(lock_set
);
309 * Block the current thread if the lock is already held.
312 if (ulock
->holder
!= THR_ACT_NULL
) {
315 if (ulock
->holder
== current_act()) {
317 return KERN_LOCK_OWNED_SELF
;
320 ulock
->blocked
= TRUE
;
321 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
327 * Block - Wait for lock to become available.
329 if (wait_result
== THREAD_WAITING
)
330 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
333 * Check the result status:
335 * Check to see why thread was woken up. In all cases, we
336 * already have been removed from the queue.
338 switch (wait_result
) {
339 case THREAD_AWAKENED
:
340 /* lock transitioned from old locker to us */
341 /* he already made us owner */
342 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
345 case THREAD_INTERRUPTED
:
349 goto retry
; /* probably a dead lock_set */
352 panic("lock_acquire\n");
357 * Assign lock ownership
359 ulock_ownership_set(ulock
, current_thread());
362 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
366 lock_release (lock_set_t lock_set
, int lock_id
)
370 if (lock_set
== LOCK_SET_NULL
)
371 return KERN_INVALID_ARGUMENT
;
373 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
374 return KERN_INVALID_ARGUMENT
;
376 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
378 return (lock_release_internal(ulock
, current_act()));
382 lock_try (lock_set_t lock_set
, int lock_id
)
387 if (lock_set
== LOCK_SET_NULL
)
388 return KERN_INVALID_ARGUMENT
;
390 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
391 return KERN_INVALID_ARGUMENT
;
394 lock_set_lock(lock_set
);
395 if (!lock_set
->active
) {
396 lock_set_unlock(lock_set
);
397 return KERN_LOCK_SET_DESTROYED
;
400 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
402 lock_set_unlock(lock_set
);
405 * If the lock is already owned, we return without blocking.
407 * An ownership status is returned to inform the caller as to
408 * whether it already holds the lock or another thread does.
411 if (ulock
->holder
!= THR_ACT_NULL
) {
412 lock_set_unlock(lock_set
);
414 if (ulock
->holder
== current_act()) {
416 return KERN_LOCK_OWNED_SELF
;
420 return KERN_LOCK_OWNED
;
424 * Add the ulock to the lock set's held_ulocks list.
427 ulock_ownership_set(ulock
, current_thread());
430 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
434 lock_make_stable (lock_set_t lock_set
, int lock_id
)
439 if (lock_set
== LOCK_SET_NULL
)
440 return KERN_INVALID_ARGUMENT
;
442 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
443 return KERN_INVALID_ARGUMENT
;
446 lock_set_lock(lock_set
);
447 if (!lock_set
->active
) {
448 lock_set_unlock(lock_set
);
449 return KERN_LOCK_SET_DESTROYED
;
452 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
454 lock_set_unlock(lock_set
);
456 if (ulock
->holder
!= current_act()) {
458 return KERN_INVALID_RIGHT
;
461 ulock
->unstable
= FALSE
;
468 * ROUTINE: lock_make_unstable [internal]
470 * Marks the lock as unstable.
473 * - All future acquisitions of the lock will return with a
474 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
477 lock_make_unstable (ulock_t ulock
, thread_act_t thr_act
)
482 lock_set
= ulock
->lock_set
;
483 lock_set_lock(lock_set
);
484 if (!lock_set
->active
) {
485 lock_set_unlock(lock_set
);
486 return KERN_LOCK_SET_DESTROYED
;
490 lock_set_unlock(lock_set
);
492 if (ulock
->holder
!= thr_act
) {
494 return KERN_INVALID_RIGHT
;
497 ulock
->unstable
= TRUE
;
504 * ROUTINE: lock_release_internal [internal]
506 * Releases the ulock.
507 * If any threads are blocked waiting for the ulock, one is woken-up.
511 lock_release_internal (ulock_t ulock
, thread_act_t thr_act
)
517 if ((lock_set
= ulock
->lock_set
) == LOCK_SET_NULL
)
518 return KERN_INVALID_ARGUMENT
;
520 lock_set_lock(lock_set
);
521 if (!lock_set
->active
) {
522 lock_set_unlock(lock_set
);
523 return KERN_LOCK_SET_DESTROYED
;
526 lock_set_unlock(lock_set
);
528 if (ulock
->holder
!= thr_act
) {
530 return KERN_INVALID_RIGHT
;
534 * If we have a hint that threads might be waiting,
535 * try to transfer the lock ownership to a waiting thread
538 if (ulock
->blocked
) {
539 wait_queue_t wq
= &ulock
->wait_queue
;
545 thread
= wait_queue_wakeup64_identity_locked(wq
,
549 /* wait_queue now unlocked, thread locked */
551 if (thread
!= THREAD_NULL
) {
553 * JMM - These ownership transfer macros have a
554 * locking/race problem. To keep the thread from
555 * changing states on us (nullifying the ownership
556 * assignment) we need to keep the thread locked
557 * during the assignment. But we can't because the
558 * macros take an activation lock, which is a mutex.
559 * Since this code was already broken before I got
560 * here, I will leave it for now.
562 thread_unlock(thread
);
566 * Transfer ulock ownership
567 * from the current thread to the acquisition thread.
569 ulock_ownership_clear(ulock
);
570 ulock_ownership_set(ulock
, thread
);
575 ulock
->blocked
= FALSE
;
583 ulock_ownership_clear(ulock
);
590 lock_handoff (lock_set_t lock_set
, int lock_id
)
596 if (lock_set
== LOCK_SET_NULL
)
597 return KERN_INVALID_ARGUMENT
;
599 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
600 return KERN_INVALID_ARGUMENT
;
603 lock_set_lock(lock_set
);
605 if (!lock_set
->active
) {
606 lock_set_unlock(lock_set
);
607 return KERN_LOCK_SET_DESTROYED
;
610 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
612 lock_set_unlock(lock_set
);
614 if (ulock
->holder
!= current_act()) {
616 return KERN_INVALID_RIGHT
;
620 * If the accepting thread (the receiver) is already waiting
621 * to accept the lock from the handoff thread (the sender),
622 * then perform the hand-off now.
625 if (ulock
->accept_wait
) {
626 wait_queue_t wq
= &ulock
->wait_queue
;
631 * See who the lucky devil is, if he is still there waiting.
635 thread
= wait_queue_wakeup64_identity_locked(
640 /* wait queue unlocked, thread locked */
643 * Transfer lock ownership
645 if (thread
!= THREAD_NULL
) {
647 * JMM - These ownership transfer macros have a
648 * locking/race problem. To keep the thread from
649 * changing states on us (nullifying the ownership
650 * assignment) we need to keep the thread locked
651 * during the assignment. But we can't because the
652 * macros take an activation lock, which is a mutex.
653 * Since this code was already broken before I got
654 * here, I will leave it for now.
656 thread_unlock(thread
);
659 ulock_ownership_clear(ulock
);
660 ulock_ownership_set(ulock
, thread
);
661 ulock
->accept_wait
= FALSE
;
667 * OOPS. The accepting thread must have been aborted.
668 * and is racing back to clear the flag that says is
669 * waiting for an accept. He will clear it when we
670 * release the lock, so just fall thru and wait for
671 * the next accept thread (that's the way it is
679 * Indicate that there is a hand-off thread waiting, and then wait
680 * for an accepting thread.
682 ulock
->ho_wait
= TRUE
;
683 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
688 if (wait_result
== THREAD_WAITING
)
689 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
692 * If the thread was woken-up via some action other than
693 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
694 * then we need to clear the ulock's handoff state.
696 switch (wait_result
) {
698 case THREAD_AWAKENED
:
701 case THREAD_INTERRUPTED
:
703 assert(ulock
->holder
== current_act());
704 ulock
->ho_wait
= FALSE
;
712 panic("lock_handoff");
717 lock_handoff_accept (lock_set_t lock_set
, int lock_id
)
723 if (lock_set
== LOCK_SET_NULL
)
724 return KERN_INVALID_ARGUMENT
;
726 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
727 return KERN_INVALID_ARGUMENT
;
730 lock_set_lock(lock_set
);
731 if (!lock_set
->active
) {
732 lock_set_unlock(lock_set
);
733 return KERN_LOCK_SET_DESTROYED
;
736 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
738 lock_set_unlock(lock_set
);
741 * If there is another accepting thread that beat us, just
742 * return with an error.
744 if (ulock
->accept_wait
) {
746 return KERN_ALREADY_WAITING
;
749 if (ulock
->holder
== current_act()) {
751 return KERN_LOCK_OWNED_SELF
;
755 * If the handoff thread (the sender) is already waiting to
756 * hand-off the lock to the accepting thread (the receiver),
757 * then perform the hand-off now.
759 if (ulock
->ho_wait
) {
760 wait_queue_t wq
= &ulock
->wait_queue
;
764 * See who the lucky devil is, if he is still there waiting.
766 assert(ulock
->holder
!= THR_ACT_NULL
);
767 thread
= ulock
->holder
->thread
;
769 if (wait_queue_wakeup64_thread(wq
,
772 THREAD_AWAKENED
) == KERN_SUCCESS
) {
774 * Holder thread was still waiting to give it
775 * away. Take over ownership.
777 ulock_ownership_clear(ulock
);
778 ulock_ownership_set(ulock
, current_thread());
779 ulock
->ho_wait
= FALSE
;
781 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
786 * OOPS. The owner was aborted out of the handoff.
787 * He will clear his own flag when he gets back.
788 * in the meantime, we will wait as if we didn't
789 * even see his flag (by falling thru).
793 ulock
->accept_wait
= TRUE
;
794 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
799 if (wait_result
== THREAD_WAITING
)
800 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
803 * If the thread was woken-up via some action other than
804 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
805 * then we need to clear the ulock's handoff state.
807 switch (wait_result
) {
809 case THREAD_AWAKENED
:
812 case THREAD_INTERRUPTED
:
814 ulock
->accept_wait
= FALSE
;
822 panic("lock_handoff_accept");
827 * Routine: lock_set_reference
829 * Take out a reference on a lock set. This keeps the data structure
830 * in existence (but the lock set may be deactivated).
833 lock_set_reference(lock_set_t lock_set
)
835 lock_set_lock(lock_set
);
836 lock_set
->ref_count
++;
837 lock_set_unlock(lock_set
);
841 * Routine: lock_set_dereference
843 * Release a reference on a lock set. If this is the last reference,
844 * the lock set data structure is deallocated.
847 lock_set_dereference(lock_set_t lock_set
)
852 lock_set_lock(lock_set
);
853 ref_count
= --(lock_set
->ref_count
);
854 lock_set_unlock(lock_set
);
856 if (ref_count
== 0) {
857 size
= sizeof(struct lock_set
) +
858 (sizeof(struct ulock
) * (lock_set
->n_ulocks
- 1));
859 kfree((vm_offset_t
) lock_set
, size
);