]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_lock.c
c6b997e54e303311d6b742a599b9dd9f37da5fe0
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
27 * File: kern/sync_lock.c
28 * Author: Joseph CaraDonna
30 * Contains RT distributed lock synchronization services.
33 #include <kern/etap_macros.h>
34 #include <kern/misc_protos.h>
35 #include <kern/sync_lock.h>
36 #include <kern/sched_prim.h>
37 #include <kern/ipc_kobject.h>
38 #include <kern/ipc_sync.h>
39 #include <kern/etap_macros.h>
40 #include <kern/thread.h>
41 #include <kern/task.h>
43 #include <ipc/ipc_port.h>
44 #include <ipc/ipc_space.h>
47 * Ulock ownership MACROS
49 * Assumes: ulock internal lock is held
52 #define ulock_ownership_set(ul, th) \
54 thread_act_t _th_act; \
55 _th_act = (th)->top_act; \
57 enqueue (&_th_act->held_ulocks, (queue_entry_t) (ul)); \
58 act_unlock(_th_act); \
59 (ul)->holder = _th_act; \
62 #define ulock_ownership_clear(ul) \
64 thread_act_t _th_act; \
65 _th_act = (ul)->holder; \
66 if (_th_act->active) { \
68 remqueue(&_th_act->held_ulocks, \
69 (queue_entry_t) (ul)); \
70 act_unlock(_th_act); \
72 remqueue(&_th_act->held_ulocks, \
73 (queue_entry_t) (ul)); \
75 (ul)->holder = THR_ACT_NULL; \
79 * Lock set ownership MACROS
82 #define lock_set_ownership_set(ls, t) \
85 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
86 (t)->lock_sets_owned++; \
91 #define lock_set_ownership_clear(ls, t) \
94 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
95 (t)->lock_sets_owned--; \
99 unsigned int lock_set_event
;
100 #define LOCK_SET_EVENT ((event_t)&lock_set_event)
102 unsigned int lock_set_handoff
;
103 #define LOCK_SET_HANDOFF ((event_t)&lock_set_handoff)
106 * ROUTINE: lock_set_init [private]
108 * Initialize the lock_set subsystem.
110 * For now, we don't have anything to do here.
120 * ROUTINE: lock_set_create [exported]
122 * Creates a lock set.
123 * The port representing the lock set is returned as a parameter.
128 lock_set_t
*new_lock_set
,
132 lock_set_t lock_set
= LOCK_SET_NULL
;
137 *new_lock_set
= LOCK_SET_NULL
;
139 if (task
== TASK_NULL
|| n_ulocks
<= 0 || policy
> SYNC_POLICY_MAX
)
140 return KERN_INVALID_ARGUMENT
;
142 size
= sizeof(struct lock_set
) + (sizeof(struct ulock
) * (n_ulocks
-1));
143 lock_set
= (lock_set_t
) kalloc (size
);
145 if (lock_set
== LOCK_SET_NULL
)
146 return KERN_RESOURCE_SHORTAGE
;
149 lock_set_lock_init(lock_set
);
150 lock_set
->n_ulocks
= n_ulocks
;
151 lock_set
->ref_count
= 1;
154 * Create and initialize the lock set port
156 lock_set
->port
= ipc_port_alloc_kernel();
157 if (lock_set
->port
== IP_NULL
) {
158 /* This will deallocate the lock set */
159 lock_set_dereference(lock_set
);
160 return KERN_RESOURCE_SHORTAGE
;
163 ipc_kobject_set (lock_set
->port
,
164 (ipc_kobject_t
) lock_set
,
168 * Initialize each ulock in the lock set
171 for (x
=0; x
< n_ulocks
; x
++) {
172 ulock
= (ulock_t
) &lock_set
->ulock_list
[x
];
173 ulock_lock_init(ulock
);
174 ulock
->lock_set
= lock_set
;
175 ulock
->holder
= THR_ACT_NULL
;
176 ulock
->blocked
= FALSE
;
177 ulock
->unstable
= FALSE
;
178 ulock
->ho_wait
= FALSE
;
179 wait_queue_init(&ulock
->wait_queue
, policy
);
182 lock_set_ownership_set(lock_set
, task
);
184 lock_set
->active
= TRUE
;
185 *new_lock_set
= lock_set
;
191 * ROUTINE: lock_set_destroy [exported]
193 * Destroys a lock set. This call will only succeed if the
194 * specified task is the SAME task name specified at the lock set's
198 * - All threads currently blocked on the lock set's ulocks are awoken.
199 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
202 lock_set_destroy (task_t task
, lock_set_t lock_set
)
208 if (task
== TASK_NULL
|| lock_set
== LOCK_SET_NULL
)
209 return KERN_INVALID_ARGUMENT
;
211 if (lock_set
->owner
!= task
)
212 return KERN_INVALID_RIGHT
;
214 lock_set_lock(lock_set
);
215 if (!lock_set
->active
) {
216 lock_set_unlock(lock_set
);
217 return KERN_LOCK_SET_DESTROYED
;
221 * Deactivate lock set
223 lock_set
->active
= FALSE
;
226 * If a ulock is currently held in the target lock set:
228 * 1) Wakeup all threads blocked on the ulock (if any). Threads
229 * may be blocked waiting normally, or waiting for a handoff.
230 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
232 * 2) ulock ownership is cleared.
233 * The thread currently holding the ulock is revoked of its
236 for (i
= 0; i
< lock_set
->n_ulocks
; i
++) {
237 ulock
= &lock_set
->ulock_list
[i
];
241 if (ulock
->accept_wait
) {
242 ulock
->accept_wait
= FALSE
;
243 wait_queue_wakeup_one(&ulock
->wait_queue
,
249 if (ulock
->blocked
) {
250 ulock
->blocked
= FALSE
;
251 wait_queue_wakeup_all(&ulock
->wait_queue
,
255 if (ulock
->ho_wait
) {
256 ulock
->ho_wait
= FALSE
;
257 wait_queue_wakeup_one(&ulock
->wait_queue
,
261 ulock_ownership_clear(ulock
);
267 lock_set_unlock(lock_set
);
268 lock_set_ownership_clear(lock_set
, task
);
273 * Drop the lock set reference, which inturn destroys the
274 * lock set structure if the reference count goes to zero.
277 ipc_port_dealloc_kernel(lock_set
->port
);
278 lock_set_dereference(lock_set
);
284 lock_acquire (lock_set_t lock_set
, int lock_id
)
288 if (lock_set
== LOCK_SET_NULL
)
289 return KERN_INVALID_ARGUMENT
;
291 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
292 return KERN_INVALID_ARGUMENT
;
295 lock_set_lock(lock_set
);
296 if (!lock_set
->active
) {
297 lock_set_unlock(lock_set
);
298 return KERN_LOCK_SET_DESTROYED
;
301 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
303 lock_set_unlock(lock_set
);
306 * Block the current thread if the lock is already held.
309 if (ulock
->holder
!= THR_ACT_NULL
) {
312 lock_set_unlock(lock_set
);
314 if (ulock
->holder
== current_act()) {
316 return KERN_LOCK_OWNED_SELF
;
319 ulock
->blocked
= TRUE
;
320 (void)wait_queue_assert_wait(&ulock
->wait_queue
,
326 * Block - Wait for lock to become available.
329 wait_result
= thread_block((void (*)(void))0);
332 * Check the result status:
334 * Check to see why thread was woken up. In all cases, we
335 * already have been removed from the queue.
337 switch (wait_result
) {
338 case THREAD_AWAKENED
:
339 /* lock transitioned from old locker to us */
340 /* he already made us owner */
341 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
344 case THREAD_INTERRUPTED
:
348 goto retry
; /* probably a dead lock_set */
351 panic("lock_acquire\n");
356 * Assign lock ownership
358 ulock_ownership_set(ulock
, current_thread());
361 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
365 lock_release (lock_set_t lock_set
, int lock_id
)
369 if (lock_set
== LOCK_SET_NULL
)
370 return KERN_INVALID_ARGUMENT
;
372 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
373 return KERN_INVALID_ARGUMENT
;
375 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
377 return (lock_release_internal(ulock
, current_act()));
381 lock_try (lock_set_t lock_set
, int lock_id
)
386 if (lock_set
== LOCK_SET_NULL
)
387 return KERN_INVALID_ARGUMENT
;
389 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
390 return KERN_INVALID_ARGUMENT
;
393 lock_set_lock(lock_set
);
394 if (!lock_set
->active
) {
395 lock_set_unlock(lock_set
);
396 return KERN_LOCK_SET_DESTROYED
;
399 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
401 lock_set_unlock(lock_set
);
404 * If the lock is already owned, we return without blocking.
406 * An ownership status is returned to inform the caller as to
407 * whether it already holds the lock or another thread does.
410 if (ulock
->holder
!= THR_ACT_NULL
) {
411 lock_set_unlock(lock_set
);
413 if (ulock
->holder
== current_act()) {
415 return KERN_LOCK_OWNED_SELF
;
419 return KERN_LOCK_OWNED
;
423 * Add the ulock to the lock set's held_ulocks list.
426 ulock_ownership_set(ulock
, current_thread());
429 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
433 lock_make_stable (lock_set_t lock_set
, int lock_id
)
438 if (lock_set
== LOCK_SET_NULL
)
439 return KERN_INVALID_ARGUMENT
;
441 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
442 return KERN_INVALID_ARGUMENT
;
445 lock_set_lock(lock_set
);
446 if (!lock_set
->active
) {
447 lock_set_unlock(lock_set
);
448 return KERN_LOCK_SET_DESTROYED
;
451 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
453 lock_set_unlock(lock_set
);
455 if (ulock
->holder
!= current_act()) {
457 return KERN_INVALID_RIGHT
;
460 ulock
->unstable
= FALSE
;
467 * ROUTINE: lock_make_unstable [internal]
469 * Marks the lock as unstable.
472 * - All future acquisitions of the lock will return with a
473 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
476 lock_make_unstable (ulock_t ulock
, thread_act_t thr_act
)
481 lock_set
= ulock
->lock_set
;
482 lock_set_lock(lock_set
);
483 if (!lock_set
->active
) {
484 lock_set_unlock(lock_set
);
485 return KERN_LOCK_SET_DESTROYED
;
489 lock_set_unlock(lock_set
);
491 if (ulock
->holder
!= thr_act
) {
493 return KERN_INVALID_RIGHT
;
496 ulock
->unstable
= TRUE
;
503 * ROUTINE: lock_release_internal [internal]
505 * Releases the ulock.
506 * If any threads are blocked waiting for the ulock, one is woken-up.
510 lock_release_internal (ulock_t ulock
, thread_act_t thr_act
)
516 if ((lock_set
= ulock
->lock_set
) == LOCK_SET_NULL
)
517 return KERN_INVALID_ARGUMENT
;
519 lock_set_lock(lock_set
);
520 if (!lock_set
->active
) {
521 lock_set_unlock(lock_set
);
522 return KERN_LOCK_SET_DESTROYED
;
525 lock_set_unlock(lock_set
);
527 if (ulock
->holder
!= thr_act
) {
529 lock_set_unlock(lock_set
);
530 return KERN_INVALID_RIGHT
;
534 * If we have a hint that threads might be waiting,
535 * try to transfer the lock ownership to a waiting thread
538 if (ulock
->blocked
) {
539 wait_queue_t wq
= &ulock
->wait_queue
;
545 thread
= wait_queue_wakeup_identity_locked(wq
,
549 /* wait_queue now unlocked, thread locked */
551 if (thread
!= THREAD_NULL
) {
553 * JMM - These ownership transfer macros have a
554 * locking/race problem. To keep the thread from
555 * changing states on us (nullifying the ownership
556 * assignment) we need to keep the thread locked
557 * during the assignment. But we can't because the
558 * macros take an activation lock, which is a mutex.
559 * Since this code was already broken before I got
560 * here, I will leave it for now.
562 thread_unlock(thread
);
566 * Transfer ulock ownership
567 * from the current thread to the acquisition thread.
569 ulock_ownership_clear(ulock
);
570 ulock_ownership_set(ulock
, thread
);
575 ulock
->blocked
= FALSE
;
583 ulock_ownership_clear(ulock
);
590 lock_handoff (lock_set_t lock_set
, int lock_id
)
596 if (lock_set
== LOCK_SET_NULL
)
597 return KERN_INVALID_ARGUMENT
;
599 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
600 return KERN_INVALID_ARGUMENT
;
603 lock_set_lock(lock_set
);
605 if (!lock_set
->active
) {
606 lock_set_unlock(lock_set
);
607 return KERN_LOCK_SET_DESTROYED
;
610 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
612 lock_set_unlock(lock_set
);
614 if (ulock
->holder
!= current_act()) {
616 lock_set_unlock(lock_set
);
617 return KERN_INVALID_RIGHT
;
621 * If the accepting thread (the receiver) is already waiting
622 * to accept the lock from the handoff thread (the sender),
623 * then perform the hand-off now.
626 if (ulock
->accept_wait
) {
627 wait_queue_t wq
= &ulock
->wait_queue
;
632 * See who the lucky devil is, if he is still there waiting.
636 thread
= wait_queue_wakeup_identity_locked(
641 /* wait queue unlocked, thread locked */
644 * Transfer lock ownership
646 if (thread
!= THREAD_NULL
) {
648 * JMM - These ownership transfer macros have a
649 * locking/race problem. To keep the thread from
650 * changing states on us (nullifying the ownership
651 * assignment) we need to keep the thread locked
652 * during the assignment. But we can't because the
653 * macros take an activation lock, which is a mutex.
654 * Since this code was already broken before I got
655 * here, I will leave it for now.
657 thread_unlock(thread
);
660 ulock_ownership_clear(ulock
);
661 ulock_ownership_set(ulock
, thread
);
662 ulock
->accept_wait
= FALSE
;
668 * OOPS. The accepting thread must have been aborted.
669 * and is racing back to clear the flag that says is
670 * waiting for an accept. He will clear it when we
671 * release the lock, so just fall thru and wait for
672 * the next accept thread (that's the way it is
680 * Indicate that there is a hand-off thread waiting, and then wait
681 * for an accepting thread.
683 ulock
->ho_wait
= TRUE
;
684 (void)wait_queue_assert_wait(&ulock
->wait_queue
,
689 ETAP_SET_REASON(current_thread(), BLOCKED_ON_LOCK_HANDOFF
);
690 wait_result
= thread_block((void (*)(void))0);
693 * If the thread was woken-up via some action other than
694 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
695 * then we need to clear the ulock's handoff state.
697 switch (wait_result
) {
699 case THREAD_AWAKENED
:
702 case THREAD_INTERRUPTED
:
704 assert(ulock
->holder
== current_act());
705 ulock
->ho_wait
= FALSE
;
713 panic("lock_handoff");
718 lock_handoff_accept (lock_set_t lock_set
, int lock_id
)
724 if (lock_set
== LOCK_SET_NULL
)
725 return KERN_INVALID_ARGUMENT
;
727 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
728 return KERN_INVALID_ARGUMENT
;
731 lock_set_lock(lock_set
);
732 if (!lock_set
->active
) {
733 lock_set_unlock(lock_set
);
734 return KERN_LOCK_SET_DESTROYED
;
737 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
739 lock_set_unlock(lock_set
);
742 * If there is another accepting thread that beat us, just
743 * return with an error.
745 if (ulock
->accept_wait
) {
747 return KERN_ALREADY_WAITING
;
750 if (ulock
->holder
== current_act()) {
752 return KERN_LOCK_OWNED_SELF
;
756 * If the handoff thread (the sender) is already waiting to
757 * hand-off the lock to the accepting thread (the receiver),
758 * then perform the hand-off now.
760 if (ulock
->ho_wait
) {
761 wait_queue_t wq
= &ulock
->wait_queue
;
765 * See who the lucky devil is, if he is still there waiting.
767 assert(ulock
->holder
!= THR_ACT_NULL
);
768 thread
= ulock
->holder
->thread
;
770 if (wait_queue_wakeup_thread(wq
,
773 THREAD_AWAKENED
) == KERN_SUCCESS
) {
775 * Holder thread was still waiting to give it
776 * away. Take over ownership.
778 ulock_ownership_clear(ulock
);
779 ulock_ownership_set(ulock
, current_thread());
780 ulock
->ho_wait
= FALSE
;
782 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
787 * OOPS. The owner was aborted out of the handoff.
788 * He will clear his own flag when he gets back.
789 * in the meantime, we will wait as if we didn't
790 * even see his flag (by falling thru).
794 ulock
->accept_wait
= TRUE
;
795 (void)wait_queue_assert_wait(&ulock
->wait_queue
,
800 ETAP_SET_REASON(current_thread(), BLOCKED_ON_LOCK_HANDOFF
);
801 wait_result
= thread_block((void (*)(void))0);
804 * If the thread was woken-up via some action other than
805 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
806 * then we need to clear the ulock's handoff state.
808 switch (wait_result
) {
810 case THREAD_AWAKENED
:
813 case THREAD_INTERRUPTED
:
815 ulock
->accept_wait
= FALSE
;
823 panic("lock_handoff_accept");
828 * Routine: lock_set_reference
830 * Take out a reference on a lock set. This keeps the data structure
831 * in existence (but the lock set may be deactivated).
834 lock_set_reference(lock_set_t lock_set
)
836 lock_set_lock(lock_set
);
837 lock_set
->ref_count
++;
838 lock_set_unlock(lock_set
);
842 * Routine: lock_set_dereference
844 * Release a reference on a lock set. If this is the last reference,
845 * the lock set data structure is deallocated.
848 lock_set_dereference(lock_set_t lock_set
)
853 lock_set_lock(lock_set
);
854 ref_count
= --(lock_set
->ref_count
);
855 lock_set_unlock(lock_set
);
857 if (ref_count
== 0) {
858 size
= sizeof(struct lock_set
) +
859 (sizeof(struct ulock
) * (lock_set
->n_ulocks
- 1));
860 kfree((vm_offset_t
) lock_set
, size
);