- ulock_t ulock;
-
-
- if (lock_set == LOCK_SET_NULL)
- return KERN_INVALID_ARGUMENT;
-
- if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
- return KERN_INVALID_ARGUMENT;
-
-
- lock_set_lock(lock_set);
- if (!lock_set->active) {
- lock_set_unlock(lock_set);
- return KERN_LOCK_SET_DESTROYED;
- }
-
- ulock = (ulock_t) &lock_set->ulock_list[lock_id];
- ulock_lock(ulock);
- lock_set_unlock(lock_set);
-
- if (ulock->holder != current_act()) {
- ulock_unlock(ulock);
- return KERN_INVALID_RIGHT;
- }
-
- ulock->unstable = FALSE;
- ulock_unlock(ulock);
-
- return KERN_SUCCESS;
-}
-
-/*
- * ROUTINE: lock_make_unstable [internal]
- *
- * Marks the lock as unstable.
- *
- * NOTES:
- * - All future acquisitions of the lock will return with a
- * KERN_LOCK_UNSTABLE status, until the lock is made stable again.
- */
-kern_return_t
-lock_make_unstable (ulock_t ulock, thread_act_t thr_act)
-{
- lock_set_t lock_set;
-
-
- lock_set = ulock->lock_set;
- lock_set_lock(lock_set);
- if (!lock_set->active) {
- lock_set_unlock(lock_set);
- return KERN_LOCK_SET_DESTROYED;
- }
-
- ulock_lock(ulock);
- lock_set_unlock(lock_set);
-
- if (ulock->holder != thr_act) {
- ulock_unlock(ulock);
- return KERN_INVALID_RIGHT;
- }
-
- ulock->unstable = TRUE;
- ulock_unlock(ulock);
-
- return KERN_SUCCESS;
-}
-
-/*
- * ROUTINE: lock_release_internal [internal]
- *
- * Releases the ulock.
- * If any threads are blocked waiting for the ulock, one is woken-up.
- *
- */
-kern_return_t
-lock_release_internal (ulock_t ulock, thread_act_t thr_act)
-{
- lock_set_t lock_set;
- int result;
-
-
- if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
- return KERN_INVALID_ARGUMENT;
-
- lock_set_lock(lock_set);
- if (!lock_set->active) {
- lock_set_unlock(lock_set);
- return KERN_LOCK_SET_DESTROYED;
- }
- ulock_lock(ulock);
- lock_set_unlock(lock_set);
-
- if (ulock->holder != thr_act) {
- ulock_unlock(ulock);
- return KERN_INVALID_RIGHT;
- }
-
- /*
- * If we have a hint that threads might be waiting,
- * try to transfer the lock ownership to a waiting thread
- * and wake it up.
- */
- if (ulock->blocked) {
- wait_queue_t wq = &ulock->wait_queue;
- thread_t thread;
- spl_t s;
-
- s = splsched();
- wait_queue_lock(wq);
- thread = wait_queue_wakeup64_identity_locked(wq,
- LOCK_SET_EVENT,
- THREAD_AWAKENED,
- TRUE);
- /* wait_queue now unlocked, thread locked */
-
- if (thread != THREAD_NULL) {
- /*
- * JMM - These ownership transfer macros have a
- * locking/race problem. To keep the thread from
- * changing states on us (nullifying the ownership
- * assignment) we need to keep the thread locked
- * during the assignment. But we can't because the
- * macros take an activation lock, which is a mutex.
- * Since this code was already broken before I got
- * here, I will leave it for now.
- */
- thread_unlock(thread);
- splx(s);
-
- /*
- * Transfer ulock ownership
- * from the current thread to the acquisition thread.
- */
- ulock_ownership_clear(ulock);
- ulock_ownership_set(ulock, thread);
- ulock_unlock(ulock);
-
- return KERN_SUCCESS;
- } else {
- ulock->blocked = FALSE;
- splx(s);
- }
- }
-
- /*
- * Disown ulock
- */
- ulock_ownership_clear(ulock);
- ulock_unlock(ulock);
-
- return KERN_SUCCESS;