X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/43866e378188c25dd1e2208016ab3cbeb086ae6c..ff6e181ae92fc6f1e89841290f461d1f2f9badd9:/osfmk/kern/sync_lock.c diff --git a/osfmk/kern/sync_lock.c b/osfmk/kern/sync_lock.c index d11aa1f89..bf4f7d942 100644 --- a/osfmk/kern/sync_lock.c +++ b/osfmk/kern/sync_lock.c @@ -1,10 +1,8 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -33,13 +31,16 @@ * Contains RT distributed lock synchronization services. */ -#include +#include +#include +#include + #include +#include #include #include #include #include -#include #include #include @@ -54,28 +55,26 @@ #define ulock_ownership_set(ul, th) \ MACRO_BEGIN \ - thread_act_t _th_act; \ - _th_act = (th)->top_act; \ - act_lock(_th_act); \ - enqueue (&_th_act->held_ulocks, (queue_entry_t) (ul)); \ - act_unlock(_th_act); \ - (ul)->holder = _th_act; \ + thread_mtx_lock(th); \ + enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \ + thread_mtx_unlock(th); \ + (ul)->holder = th; \ MACRO_END #define ulock_ownership_clear(ul) \ MACRO_BEGIN \ - thread_act_t _th_act; \ - _th_act = (ul)->holder; \ - if (_th_act->active) { \ - act_lock(_th_act); \ - remqueue(&_th_act->held_ulocks, \ + thread_t th; \ + th = (ul)->holder; \ + if (th->active) { \ + thread_mtx_lock(th); \ + remqueue(&th->held_ulocks, \ (queue_entry_t) (ul)); \ - act_unlock(_th_act); \ + thread_mtx_unlock(th); \ } else { \ - remqueue(&_th_act->held_ulocks, \ + remqueue(&th->held_ulocks, \ (queue_entry_t) (ul)); \ } \ - (ul)->holder = THR_ACT_NULL; \ + (ul)->holder = THREAD_NULL; \ MACRO_END /* @@ -175,7 +174,7 @@ lock_set_create ( ulock = (ulock_t) &lock_set->ulock_list[x]; ulock_lock_init(ulock); ulock->lock_set = lock_set; - ulock->holder = THR_ACT_NULL; + ulock->holder = THREAD_NULL; ulock->blocked = FALSE; ulock->unstable = FALSE; ulock->ho_wait = FALSE; @@ -204,7 +203,6 @@ lock_set_create ( kern_return_t lock_set_destroy (task_t task, lock_set_t lock_set) { - thread_t thread; ulock_t ulock; int i; @@ -309,10 +307,10 @@ lock_acquire (lock_set_t lock_set, int lock_id) * Block the current thread if the lock is already held. */ - if (ulock->holder != THR_ACT_NULL) { + if (ulock->holder != THREAD_NULL) { int wait_result; - if (ulock->holder == current_act()) { + if (ulock->holder == current_thread()) { ulock_unlock(ulock); return KERN_LOCK_OWNED_SELF; } @@ -320,7 +318,7 @@ lock_acquire (lock_set_t lock_set, int lock_id) ulock->blocked = TRUE; wait_result = wait_queue_assert_wait64(&ulock->wait_queue, LOCK_SET_EVENT, - THREAD_ABORTSAFE); + THREAD_ABORTSAFE, 0); ulock_unlock(ulock); /* @@ -375,7 +373,7 @@ lock_release (lock_set_t lock_set, int lock_id) ulock = (ulock_t) &lock_set->ulock_list[lock_id]; - return (lock_release_internal(ulock, current_act())); + return (ulock_release_internal(ulock, current_thread())); } kern_return_t @@ -408,10 +406,10 @@ lock_try (lock_set_t lock_set, int lock_id) * whether it already holds the lock or another thread does. */ - if (ulock->holder != THR_ACT_NULL) { + if (ulock->holder != THREAD_NULL) { lock_set_unlock(lock_set); - if (ulock->holder == current_act()) { + if (ulock->holder == current_thread()) { ulock_unlock(ulock); return KERN_LOCK_OWNED_SELF; } @@ -453,7 +451,7 @@ lock_make_stable (lock_set_t lock_set, int lock_id) ulock_lock(ulock); lock_set_unlock(lock_set); - if (ulock->holder != current_act()) { + if (ulock->holder != current_thread()) { ulock_unlock(ulock); return KERN_INVALID_RIGHT; } @@ -474,11 +472,10 @@ lock_make_stable (lock_set_t lock_set, int lock_id) * KERN_LOCK_UNSTABLE status, until the lock is made stable again. */ kern_return_t -lock_make_unstable (ulock_t ulock, thread_act_t thr_act) +lock_make_unstable (ulock_t ulock, thread_t thread) { lock_set_t lock_set; - lock_set = ulock->lock_set; lock_set_lock(lock_set); if (!lock_set->active) { @@ -489,7 +486,7 @@ lock_make_unstable (ulock_t ulock, thread_act_t thr_act) ulock_lock(ulock); lock_set_unlock(lock_set); - if (ulock->holder != thr_act) { + if (ulock->holder != thread) { ulock_unlock(ulock); return KERN_INVALID_RIGHT; } @@ -501,18 +498,16 @@ lock_make_unstable (ulock_t ulock, thread_act_t thr_act) } /* - * ROUTINE: lock_release_internal [internal] + * ROUTINE: ulock_release_internal [internal] * * Releases the ulock. * If any threads are blocked waiting for the ulock, one is woken-up. * */ kern_return_t -lock_release_internal (ulock_t ulock, thread_act_t thr_act) +ulock_release_internal (ulock_t ulock, thread_t thread) { lock_set_t lock_set; - int result; - if ((lock_set = ulock->lock_set) == LOCK_SET_NULL) return KERN_INVALID_ARGUMENT; @@ -525,7 +520,7 @@ lock_release_internal (ulock_t ulock, thread_act_t thr_act) ulock_lock(ulock); lock_set_unlock(lock_set); - if (ulock->holder != thr_act) { + if (ulock->holder != thread) { ulock_unlock(ulock); return KERN_INVALID_RIGHT; } @@ -537,18 +532,18 @@ lock_release_internal (ulock_t ulock, thread_act_t thr_act) */ if (ulock->blocked) { wait_queue_t wq = &ulock->wait_queue; - thread_t thread; + thread_t wqthread; spl_t s; s = splsched(); wait_queue_lock(wq); - thread = wait_queue_wakeup64_identity_locked(wq, + wqthread = wait_queue_wakeup64_identity_locked(wq, LOCK_SET_EVENT, THREAD_AWAKENED, TRUE); /* wait_queue now unlocked, thread locked */ - if (thread != THREAD_NULL) { + if (wqthread != THREAD_NULL) { /* * JMM - These ownership transfer macros have a * locking/race problem. To keep the thread from @@ -559,7 +554,7 @@ lock_release_internal (ulock_t ulock, thread_act_t thr_act) * Since this code was already broken before I got * here, I will leave it for now. */ - thread_unlock(thread); + thread_unlock(wqthread); splx(s); /* @@ -567,7 +562,7 @@ lock_release_internal (ulock_t ulock, thread_act_t thr_act) * from the current thread to the acquisition thread. */ ulock_ownership_clear(ulock); - ulock_ownership_set(ulock, thread); + ulock_ownership_set(ulock, wqthread); ulock_unlock(ulock); return KERN_SUCCESS; @@ -611,7 +606,7 @@ lock_handoff (lock_set_t lock_set, int lock_id) ulock_lock(ulock); lock_set_unlock(lock_set); - if (ulock->holder != current_act()) { + if (ulock->holder != current_thread()) { ulock_unlock(ulock); return KERN_INVALID_RIGHT; } @@ -649,7 +644,8 @@ lock_handoff (lock_set_t lock_set, int lock_id) * changing states on us (nullifying the ownership * assignment) we need to keep the thread locked * during the assignment. But we can't because the - * macros take an activation lock, which is a mutex. + * macros take a thread mutex lock. + * * Since this code was already broken before I got * here, I will leave it for now. */ @@ -682,7 +678,7 @@ lock_handoff (lock_set_t lock_set, int lock_id) ulock->ho_wait = TRUE; wait_result = wait_queue_assert_wait64(&ulock->wait_queue, LOCK_SET_HANDOFF, - THREAD_ABORTSAFE); + THREAD_ABORTSAFE, 0); ulock_unlock(ulock); if (wait_result == THREAD_WAITING) @@ -700,17 +696,17 @@ lock_handoff (lock_set_t lock_set, int lock_id) case THREAD_INTERRUPTED: ulock_lock(ulock); - assert(ulock->holder == current_act()); + assert(ulock->holder == current_thread()); ulock->ho_wait = FALSE; ulock_unlock(ulock); return KERN_ABORTED; case THREAD_RESTART: goto retry; - - default: - panic("lock_handoff"); } + + panic("lock_handoff"); + return KERN_FAILURE; } kern_return_t @@ -746,7 +742,7 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) return KERN_ALREADY_WAITING; } - if (ulock->holder == current_act()) { + if (ulock->holder == current_thread()) { ulock_unlock(ulock); return KERN_LOCK_OWNED_SELF; } @@ -758,17 +754,15 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) */ if (ulock->ho_wait) { wait_queue_t wq = &ulock->wait_queue; - thread_t thread; /* * See who the lucky devil is, if he is still there waiting. */ - assert(ulock->holder != THR_ACT_NULL); - thread = ulock->holder->thread; + assert(ulock->holder != THREAD_NULL); if (wait_queue_wakeup64_thread(wq, LOCK_SET_HANDOFF, - thread, + ulock->holder, THREAD_AWAKENED) == KERN_SUCCESS) { /* * Holder thread was still waiting to give it @@ -793,7 +787,7 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) ulock->accept_wait = TRUE; wait_result = wait_queue_assert_wait64(&ulock->wait_queue, LOCK_SET_HANDOFF, - THREAD_ABORTSAFE); + THREAD_ABORTSAFE, 0); ulock_unlock(ulock); if (wait_result == THREAD_WAITING) @@ -817,10 +811,10 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) case THREAD_RESTART: goto retry; - - default: - panic("lock_handoff_accept"); } + + panic("lock_handoff_accept"); + return KERN_FAILURE; } /* @@ -856,6 +850,19 @@ lock_set_dereference(lock_set_t lock_set) if (ref_count == 0) { size = sizeof(struct lock_set) + (sizeof(struct ulock) * (lock_set->n_ulocks - 1)); - kfree((vm_offset_t) lock_set, size); + kfree(lock_set, size); + } +} + +void +ulock_release_all( + thread_t thread) +{ + ulock_t ulock; + + while (!queue_empty(&thread->held_ulocks)) { + ulock = (ulock_t)queue_first(&thread->held_ulocks); + lock_make_unstable(ulock, thread); + ulock_release_internal(ulock, thread); } }