2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 * File: kern/sync_lock.c
34 * Author: Joseph CaraDonna
36 * Contains RT distributed lock synchronization services.
39 #include <mach/mach_types.h>
40 #include <mach/lock_set_server.h>
41 #include <mach/task_server.h>
43 #include <kern/misc_protos.h>
44 #include <kern/kalloc.h>
45 #include <kern/sync_lock.h>
46 #include <kern/sched_prim.h>
47 #include <kern/ipc_kobject.h>
48 #include <kern/ipc_sync.h>
49 #include <kern/thread.h>
50 #include <kern/task.h>
52 #include <ipc/ipc_port.h>
53 #include <ipc/ipc_space.h>
56 * Ulock ownership MACROS
58 * Assumes: ulock internal lock is held
61 #define ulock_ownership_set(ul, th) \
63 thread_mtx_lock(th); \
64 enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \
65 thread_mtx_unlock(th); \
69 #define ulock_ownership_clear(ul) \
74 thread_mtx_lock(th); \
75 remqueue(&th->held_ulocks, \
76 (queue_entry_t) (ul)); \
77 thread_mtx_unlock(th); \
79 remqueue(&th->held_ulocks, \
80 (queue_entry_t) (ul)); \
82 (ul)->holder = THREAD_NULL; \
86 * Lock set ownership MACROS
89 #define lock_set_ownership_set(ls, t) \
92 enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
93 (t)->lock_sets_owned++; \
98 #define lock_set_ownership_clear(ls, t) \
101 remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
102 (t)->lock_sets_owned--; \
106 unsigned int lock_set_event
;
107 #define LOCK_SET_EVENT CAST_EVENT64_T(&lock_set_event)
109 unsigned int lock_set_handoff
;
110 #define LOCK_SET_HANDOFF CAST_EVENT64_T(&lock_set_handoff)
113 lck_attr_t lock_set_attr
;
114 lck_grp_t lock_set_grp
;
115 static lck_grp_attr_t lock_set_grp_attr
;
120 * ROUTINE: lock_set_init [private]
122 * Initialize the lock_set subsystem.
127 lck_grp_attr_setdefault(&lock_set_grp_attr
);
128 lck_grp_init(&lock_set_grp
, "lock_set", &lock_set_grp_attr
);
129 lck_attr_setdefault(&lock_set_attr
);
134 * ROUTINE: lock_set_create [exported]
136 * Creates a lock set.
137 * The port representing the lock set is returned as a parameter.
142 lock_set_t
*new_lock_set
,
146 lock_set_t lock_set
= LOCK_SET_NULL
;
151 *new_lock_set
= LOCK_SET_NULL
;
153 if (task
== TASK_NULL
|| n_ulocks
<= 0 || policy
> SYNC_POLICY_MAX
)
154 return KERN_INVALID_ARGUMENT
;
156 if ((VM_MAX_ADDRESS
- sizeof(struct lock_set
))/sizeof(struct ulock
) < (unsigned)n_ulocks
)
157 return KERN_RESOURCE_SHORTAGE
;
159 size
= sizeof(struct lock_set
) + (sizeof(struct ulock
) * (n_ulocks
-1));
160 lock_set
= (lock_set_t
) kalloc (size
);
162 if (lock_set
== LOCK_SET_NULL
)
163 return KERN_RESOURCE_SHORTAGE
;
166 lock_set_lock_init(lock_set
);
167 lock_set
->n_ulocks
= n_ulocks
;
168 lock_set
->ref_count
= (task
== kernel_task
) ? 1 : 2; /* one for kernel, one for port */
171 * Create and initialize the lock set port
173 lock_set
->port
= ipc_port_alloc_kernel();
174 if (lock_set
->port
== IP_NULL
) {
175 kfree(lock_set
, size
);
176 return KERN_RESOURCE_SHORTAGE
;
179 ipc_kobject_set (lock_set
->port
,
180 (ipc_kobject_t
) lock_set
,
184 * Initialize each ulock in the lock set
187 for (x
=0; x
< n_ulocks
; x
++) {
188 ulock
= (ulock_t
) &lock_set
->ulock_list
[x
];
189 ulock_lock_init(ulock
);
190 ulock
->lock_set
= lock_set
;
191 ulock
->holder
= THREAD_NULL
;
192 ulock
->blocked
= FALSE
;
193 ulock
->unstable
= FALSE
;
194 ulock
->ho_wait
= FALSE
;
195 ulock
->accept_wait
= FALSE
;
196 wait_queue_init(&ulock
->wait_queue
, policy
);
199 lock_set_ownership_set(lock_set
, task
);
201 lock_set
->active
= TRUE
;
202 *new_lock_set
= lock_set
;
208 * ROUTINE: lock_set_destroy [exported]
210 * Destroys a lock set. This call will only succeed if the
211 * specified task is the SAME task name specified at the lock set's
215 * - All threads currently blocked on the lock set's ulocks are awoken.
216 * - These threads will return with the KERN_LOCK_SET_DESTROYED error.
219 lock_set_destroy (task_t task
, lock_set_t lock_set
)
224 if (task
== TASK_NULL
|| lock_set
== LOCK_SET_NULL
)
225 return KERN_INVALID_ARGUMENT
;
227 if (lock_set
->owner
!= task
)
228 return KERN_INVALID_RIGHT
;
230 lock_set_lock(lock_set
);
231 if (!lock_set
->active
) {
232 lock_set_unlock(lock_set
);
233 return KERN_LOCK_SET_DESTROYED
;
237 * Deactivate lock set
239 lock_set
->active
= FALSE
;
242 * If a ulock is currently held in the target lock set:
244 * 1) Wakeup all threads blocked on the ulock (if any). Threads
245 * may be blocked waiting normally, or waiting for a handoff.
246 * Blocked threads will return with KERN_LOCK_SET_DESTROYED.
248 * 2) ulock ownership is cleared.
249 * The thread currently holding the ulock is revoked of its
252 for (i
= 0; i
< lock_set
->n_ulocks
; i
++) {
253 ulock
= &lock_set
->ulock_list
[i
];
257 if (ulock
->accept_wait
) {
258 ulock
->accept_wait
= FALSE
;
259 wait_queue_wakeup64_one(&ulock
->wait_queue
,
265 if (ulock
->blocked
) {
266 ulock
->blocked
= FALSE
;
267 wait_queue_wakeup64_all(&ulock
->wait_queue
,
271 if (ulock
->ho_wait
) {
272 ulock
->ho_wait
= FALSE
;
273 wait_queue_wakeup64_one(&ulock
->wait_queue
,
277 ulock_ownership_clear(ulock
);
283 lock_set_unlock(lock_set
);
284 lock_set_ownership_clear(lock_set
, task
);
287 * Drop the lock set reference given to the containing task,
288 * which inturn destroys the lock set structure if the reference
289 * count goes to zero.
291 lock_set_dereference(lock_set
);
297 lock_acquire (lock_set_t lock_set
, int lock_id
)
301 if (lock_set
== LOCK_SET_NULL
)
302 return KERN_INVALID_ARGUMENT
;
304 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
305 return KERN_INVALID_ARGUMENT
;
308 lock_set_lock(lock_set
);
309 if (!lock_set
->active
) {
310 lock_set_unlock(lock_set
);
311 return KERN_LOCK_SET_DESTROYED
;
314 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
316 lock_set_unlock(lock_set
);
319 * Block the current thread if the lock is already held.
322 if (ulock
->holder
!= THREAD_NULL
) {
325 if (ulock
->holder
== current_thread()) {
327 return KERN_LOCK_OWNED_SELF
;
330 ulock
->blocked
= TRUE
;
331 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
333 THREAD_ABORTSAFE
, 0);
337 * Block - Wait for lock to become available.
339 if (wait_result
== THREAD_WAITING
)
340 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
343 * Check the result status:
345 * Check to see why thread was woken up. In all cases, we
346 * already have been removed from the queue.
348 switch (wait_result
) {
349 case THREAD_AWAKENED
:
350 /* lock transitioned from old locker to us */
351 /* he already made us owner */
352 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
355 case THREAD_INTERRUPTED
:
359 goto retry
; /* probably a dead lock_set */
362 panic("lock_acquire\n");
367 * Assign lock ownership
369 ulock_ownership_set(ulock
, current_thread());
372 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
376 lock_release (lock_set_t lock_set
, int lock_id
)
380 if (lock_set
== LOCK_SET_NULL
)
381 return KERN_INVALID_ARGUMENT
;
383 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
384 return KERN_INVALID_ARGUMENT
;
386 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
388 return (ulock_release_internal(ulock
, current_thread()));
392 lock_try (lock_set_t lock_set
, int lock_id
)
397 if (lock_set
== LOCK_SET_NULL
)
398 return KERN_INVALID_ARGUMENT
;
400 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
401 return KERN_INVALID_ARGUMENT
;
404 lock_set_lock(lock_set
);
405 if (!lock_set
->active
) {
406 lock_set_unlock(lock_set
);
407 return KERN_LOCK_SET_DESTROYED
;
410 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
412 lock_set_unlock(lock_set
);
415 * If the lock is already owned, we return without blocking.
417 * An ownership status is returned to inform the caller as to
418 * whether it already holds the lock or another thread does.
421 if (ulock
->holder
!= THREAD_NULL
) {
422 lock_set_unlock(lock_set
);
424 if (ulock
->holder
== current_thread()) {
426 return KERN_LOCK_OWNED_SELF
;
430 return KERN_LOCK_OWNED
;
434 * Add the ulock to the lock set's held_ulocks list.
437 ulock_ownership_set(ulock
, current_thread());
440 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
: KERN_SUCCESS
;
444 lock_make_stable (lock_set_t lock_set
, int lock_id
)
449 if (lock_set
== LOCK_SET_NULL
)
450 return KERN_INVALID_ARGUMENT
;
452 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
453 return KERN_INVALID_ARGUMENT
;
456 lock_set_lock(lock_set
);
457 if (!lock_set
->active
) {
458 lock_set_unlock(lock_set
);
459 return KERN_LOCK_SET_DESTROYED
;
462 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
464 lock_set_unlock(lock_set
);
466 if (ulock
->holder
!= current_thread()) {
468 return KERN_INVALID_RIGHT
;
471 ulock
->unstable
= FALSE
;
478 * ROUTINE: lock_make_unstable [internal]
480 * Marks the lock as unstable.
483 * - All future acquisitions of the lock will return with a
484 * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
487 lock_make_unstable (ulock_t ulock
, thread_t thread
)
491 lock_set
= ulock
->lock_set
;
492 lock_set_lock(lock_set
);
493 if (!lock_set
->active
) {
494 lock_set_unlock(lock_set
);
495 return KERN_LOCK_SET_DESTROYED
;
499 lock_set_unlock(lock_set
);
501 if (ulock
->holder
!= thread
) {
503 return KERN_INVALID_RIGHT
;
506 ulock
->unstable
= TRUE
;
513 * ROUTINE: ulock_release_internal [internal]
515 * Releases the ulock.
516 * If any threads are blocked waiting for the ulock, one is woken-up.
520 ulock_release_internal (ulock_t ulock
, thread_t thread
)
524 if ((lock_set
= ulock
->lock_set
) == LOCK_SET_NULL
)
525 return KERN_INVALID_ARGUMENT
;
527 lock_set_lock(lock_set
);
528 if (!lock_set
->active
) {
529 lock_set_unlock(lock_set
);
530 return KERN_LOCK_SET_DESTROYED
;
533 lock_set_unlock(lock_set
);
535 if (ulock
->holder
!= thread
) {
537 return KERN_INVALID_RIGHT
;
541 * If we have a hint that threads might be waiting,
542 * try to transfer the lock ownership to a waiting thread
545 if (ulock
->blocked
) {
546 wait_queue_t wq
= &ulock
->wait_queue
;
552 wqthread
= wait_queue_wakeup64_identity_locked(wq
,
556 /* wait_queue now unlocked, thread locked */
558 if (wqthread
!= THREAD_NULL
) {
559 thread_unlock(wqthread
);
563 * Transfer ulock ownership
564 * from the current thread to the acquisition thread.
566 ulock_ownership_clear(ulock
);
567 ulock_ownership_set(ulock
, wqthread
);
572 ulock
->blocked
= FALSE
;
580 ulock_ownership_clear(ulock
);
587 lock_handoff (lock_set_t lock_set
, int lock_id
)
593 if (lock_set
== LOCK_SET_NULL
)
594 return KERN_INVALID_ARGUMENT
;
596 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
597 return KERN_INVALID_ARGUMENT
;
600 lock_set_lock(lock_set
);
602 if (!lock_set
->active
) {
603 lock_set_unlock(lock_set
);
604 return KERN_LOCK_SET_DESTROYED
;
607 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
609 lock_set_unlock(lock_set
);
611 if (ulock
->holder
!= current_thread()) {
613 return KERN_INVALID_RIGHT
;
617 * If the accepting thread (the receiver) is already waiting
618 * to accept the lock from the handoff thread (the sender),
619 * then perform the hand-off now.
622 if (ulock
->accept_wait
) {
623 wait_queue_t wq
= &ulock
->wait_queue
;
628 * See who the lucky devil is, if he is still there waiting.
632 thread
= wait_queue_wakeup64_identity_locked(
637 /* wait queue unlocked, thread locked */
640 * Transfer lock ownership
642 if (thread
!= THREAD_NULL
) {
644 * The thread we are transferring to will try
645 * to take the lock on the ulock, and therefore
646 * will wait for us complete the handoff even
647 * through we set the thread running.
649 thread_unlock(thread
);
652 ulock_ownership_clear(ulock
);
653 ulock_ownership_set(ulock
, thread
);
654 ulock
->accept_wait
= FALSE
;
660 * OOPS. The accepting thread must have been aborted.
661 * and is racing back to clear the flag that says is
662 * waiting for an accept. He will clear it when we
663 * release the lock, so just fall thru and wait for
664 * the next accept thread (that's the way it is
672 * Indicate that there is a hand-off thread waiting, and then wait
673 * for an accepting thread.
675 ulock
->ho_wait
= TRUE
;
676 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
678 THREAD_ABORTSAFE
, 0);
681 if (wait_result
== THREAD_WAITING
)
682 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
685 * If the thread was woken-up via some action other than
686 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
687 * then we need to clear the ulock's handoff state.
689 switch (wait_result
) {
692 case THREAD_AWAKENED
:
694 * we take the ulock lock to syncronize with the
695 * thread that is accepting ownership.
698 assert(ulock
->holder
!= current_thread());
702 case THREAD_INTERRUPTED
:
704 assert(ulock
->holder
== current_thread());
705 ulock
->ho_wait
= FALSE
;
713 panic("lock_handoff");
718 lock_handoff_accept (lock_set_t lock_set
, int lock_id
)
724 if (lock_set
== LOCK_SET_NULL
)
725 return KERN_INVALID_ARGUMENT
;
727 if (lock_id
< 0 || lock_id
>= lock_set
->n_ulocks
)
728 return KERN_INVALID_ARGUMENT
;
731 lock_set_lock(lock_set
);
732 if (!lock_set
->active
) {
733 lock_set_unlock(lock_set
);
734 return KERN_LOCK_SET_DESTROYED
;
737 ulock
= (ulock_t
) &lock_set
->ulock_list
[lock_id
];
739 lock_set_unlock(lock_set
);
742 * If there is another accepting thread that beat us, just
743 * return with an error.
745 if (ulock
->accept_wait
) {
747 return KERN_ALREADY_WAITING
;
750 if (ulock
->holder
== current_thread()) {
752 return KERN_LOCK_OWNED_SELF
;
756 * If the handoff thread (the sender) is already waiting to
757 * hand-off the lock to the accepting thread (the receiver),
758 * then perform the hand-off now.
760 if (ulock
->ho_wait
) {
761 wait_queue_t wq
= &ulock
->wait_queue
;
764 * See who the lucky devil is, if he is still there waiting.
766 assert(ulock
->holder
!= THREAD_NULL
);
768 if (wait_queue_wakeup64_thread(wq
,
771 THREAD_AWAKENED
) == KERN_SUCCESS
) {
773 * Holder thread was still waiting to give it
774 * away. Take over ownership.
776 ulock_ownership_clear(ulock
);
777 ulock_ownership_set(ulock
, current_thread());
778 ulock
->ho_wait
= FALSE
;
780 return (ulock
->unstable
) ? KERN_LOCK_UNSTABLE
:
785 * OOPS. The owner was aborted out of the handoff.
786 * He will clear his own flag when he gets back.
787 * in the meantime, we will wait as if we didn't
788 * even see his flag (by falling thru).
792 ulock
->accept_wait
= TRUE
;
793 wait_result
= wait_queue_assert_wait64(&ulock
->wait_queue
,
795 THREAD_ABORTSAFE
, 0);
798 if (wait_result
== THREAD_WAITING
)
799 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
802 * If the thread was woken-up via some action other than
803 * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
804 * then we need to clear the ulock's handoff state.
806 switch (wait_result
) {
808 case THREAD_AWAKENED
:
810 * Take the lock to synchronize with the thread handing
811 * off the lock to us. We don't want to continue until
812 * they complete the handoff.
815 assert(ulock
->accept_wait
== FALSE
);
816 assert(ulock
->holder
== current_thread());
820 case THREAD_INTERRUPTED
:
822 ulock
->accept_wait
= FALSE
;
830 panic("lock_handoff_accept");
835 * Routine: lock_set_reference
837 * Take out a reference on a lock set. This keeps the data structure
838 * in existence (but the lock set may be deactivated).
841 lock_set_reference(lock_set_t lock_set
)
843 lock_set_lock(lock_set
);
844 lock_set
->ref_count
++;
845 lock_set_unlock(lock_set
);
849 * Routine: lock_set_dereference
851 * Release a reference on a lock set. If this is the last reference,
852 * the lock set data structure is deallocated.
855 lock_set_dereference(lock_set_t lock_set
)
860 lock_set_lock(lock_set
);
861 ref_count
= --(lock_set
->ref_count
);
862 lock_set_unlock(lock_set
);
864 if (ref_count
== 0) {
865 ipc_port_dealloc_kernel(lock_set
->port
);
866 size
= (int)(sizeof(struct lock_set
) +
867 (sizeof(struct ulock
) * (lock_set
->n_ulocks
- 1)));
868 kfree(lock_set
, size
);
878 while (!queue_empty(&thread
->held_ulocks
)) {
879 ulock
= (ulock_t
)queue_first(&thread
->held_ulocks
);
880 lock_make_unstable(ulock
, thread
);
881 ulock_release_internal(ulock
, thread
);