X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d7e50217d7adf6e52786a38bcaa4cd698cb9a79e..d1ecb069dfe24481e4a83f44cb5217a2b06746d7:/osfmk/kern/sync_lock.c diff --git a/osfmk/kern/sync_lock.c b/osfmk/kern/sync_lock.c index d11aa1f89..174381f5f 100644 --- a/osfmk/kern/sync_lock.c +++ b/osfmk/kern/sync_lock.c @@ -1,16 +1,19 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -33,13 +36,16 @@ * Contains RT distributed lock synchronization services. */ -#include +#include +#include +#include + #include +#include #include #include #include #include -#include #include #include @@ -54,28 +60,26 @@ #define ulock_ownership_set(ul, th) \ MACRO_BEGIN \ - thread_act_t _th_act; \ - _th_act = (th)->top_act; \ - act_lock(_th_act); \ - enqueue (&_th_act->held_ulocks, (queue_entry_t) (ul)); \ - act_unlock(_th_act); \ - (ul)->holder = _th_act; \ + thread_mtx_lock(th); \ + enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \ + thread_mtx_unlock(th); \ + (ul)->holder = th; \ MACRO_END #define ulock_ownership_clear(ul) \ MACRO_BEGIN \ - thread_act_t _th_act; \ - _th_act = (ul)->holder; \ - if (_th_act->active) { \ - act_lock(_th_act); \ - remqueue(&_th_act->held_ulocks, \ + thread_t th; \ + th = (ul)->holder; \ + if ((th)->active) { \ + thread_mtx_lock(th); \ + remqueue(&th->held_ulocks, \ (queue_entry_t) (ul)); \ - act_unlock(_th_act); \ + thread_mtx_unlock(th); \ } else { \ - remqueue(&_th_act->held_ulocks, \ + remqueue(&th->held_ulocks, \ (queue_entry_t) (ul)); \ } \ - (ul)->holder = THR_ACT_NULL; \ + (ul)->holder = THREAD_NULL; \ MACRO_END /* @@ -100,22 +104,29 @@ MACRO_END unsigned int lock_set_event; -#define LOCK_SET_EVENT ((event64_t)&lock_set_event) +#define LOCK_SET_EVENT CAST_EVENT64_T(&lock_set_event) unsigned int lock_set_handoff; -#define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff) +#define LOCK_SET_HANDOFF CAST_EVENT64_T(&lock_set_handoff) + + +lck_attr_t lock_set_attr; +lck_grp_t lock_set_grp; +static lck_grp_attr_t lock_set_grp_attr; + + /* * ROUTINE: lock_set_init [private] * * Initialize the lock_set subsystem. - * - * For now, we don't have anything to do here. */ void lock_set_init(void) { - return; + lck_grp_attr_setdefault(&lock_set_grp_attr); + lck_grp_init(&lock_set_grp, "lock_set", &lock_set_grp_attr); + lck_attr_setdefault(&lock_set_attr); } @@ -134,7 +145,7 @@ lock_set_create ( { lock_set_t lock_set = LOCK_SET_NULL; ulock_t ulock; - int size; + vm_size_t size; int x; *new_lock_set = LOCK_SET_NULL; @@ -142,6 +153,9 @@ lock_set_create ( if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX) return KERN_INVALID_ARGUMENT; + if ((VM_MAX_ADDRESS - sizeof(struct lock_set))/sizeof(struct ulock) < (unsigned)n_ulocks) + return KERN_RESOURCE_SHORTAGE; + size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1)); lock_set = (lock_set_t) kalloc (size); @@ -151,15 +165,14 @@ lock_set_create ( lock_set_lock_init(lock_set); lock_set->n_ulocks = n_ulocks; - lock_set->ref_count = 1; + lock_set->ref_count = (task == kernel_task) ? 1 : 2; /* one for kernel, one for port */ /* * Create and initialize the lock set port */ lock_set->port = ipc_port_alloc_kernel(); if (lock_set->port == IP_NULL) { - /* This will deallocate the lock set */ - lock_set_dereference(lock_set); + kfree(lock_set, size); return KERN_RESOURCE_SHORTAGE; } @@ -175,10 +188,11 @@ lock_set_create ( ulock = (ulock_t) &lock_set->ulock_list[x]; ulock_lock_init(ulock); ulock->lock_set = lock_set; - ulock->holder = THR_ACT_NULL; + ulock->holder = THREAD_NULL; ulock->blocked = FALSE; ulock->unstable = FALSE; ulock->ho_wait = FALSE; + ulock->accept_wait = FALSE; wait_queue_init(&ulock->wait_queue, policy); } @@ -204,7 +218,6 @@ lock_set_create ( kern_return_t lock_set_destroy (task_t task, lock_set_t lock_set) { - thread_t thread; ulock_t ulock; int i; @@ -271,13 +284,10 @@ lock_set_destroy (task_t task, lock_set_t lock_set) lock_set_ownership_clear(lock_set, task); /* - * Deallocate - * - * Drop the lock set reference, which inturn destroys the - * lock set structure if the reference count goes to zero. + * Drop the lock set reference given to the containing task, + * which inturn destroys the lock set structure if the reference + * count goes to zero. */ - - ipc_port_dealloc_kernel(lock_set->port); lock_set_dereference(lock_set); return KERN_SUCCESS; @@ -309,10 +319,10 @@ lock_acquire (lock_set_t lock_set, int lock_id) * Block the current thread if the lock is already held. */ - if (ulock->holder != THR_ACT_NULL) { + if (ulock->holder != THREAD_NULL) { int wait_result; - if (ulock->holder == current_act()) { + if (ulock->holder == current_thread()) { ulock_unlock(ulock); return KERN_LOCK_OWNED_SELF; } @@ -320,7 +330,7 @@ lock_acquire (lock_set_t lock_set, int lock_id) ulock->blocked = TRUE; wait_result = wait_queue_assert_wait64(&ulock->wait_queue, LOCK_SET_EVENT, - THREAD_ABORTSAFE); + THREAD_ABORTSAFE, 0); ulock_unlock(ulock); /* @@ -375,7 +385,7 @@ lock_release (lock_set_t lock_set, int lock_id) ulock = (ulock_t) &lock_set->ulock_list[lock_id]; - return (lock_release_internal(ulock, current_act())); + return (ulock_release_internal(ulock, current_thread())); } kern_return_t @@ -408,10 +418,10 @@ lock_try (lock_set_t lock_set, int lock_id) * whether it already holds the lock or another thread does. */ - if (ulock->holder != THR_ACT_NULL) { + if (ulock->holder != THREAD_NULL) { lock_set_unlock(lock_set); - if (ulock->holder == current_act()) { + if (ulock->holder == current_thread()) { ulock_unlock(ulock); return KERN_LOCK_OWNED_SELF; } @@ -453,7 +463,7 @@ lock_make_stable (lock_set_t lock_set, int lock_id) ulock_lock(ulock); lock_set_unlock(lock_set); - if (ulock->holder != current_act()) { + if (ulock->holder != current_thread()) { ulock_unlock(ulock); return KERN_INVALID_RIGHT; } @@ -474,11 +484,10 @@ lock_make_stable (lock_set_t lock_set, int lock_id) * KERN_LOCK_UNSTABLE status, until the lock is made stable again. */ kern_return_t -lock_make_unstable (ulock_t ulock, thread_act_t thr_act) +lock_make_unstable (ulock_t ulock, thread_t thread) { lock_set_t lock_set; - lock_set = ulock->lock_set; lock_set_lock(lock_set); if (!lock_set->active) { @@ -489,7 +498,7 @@ lock_make_unstable (ulock_t ulock, thread_act_t thr_act) ulock_lock(ulock); lock_set_unlock(lock_set); - if (ulock->holder != thr_act) { + if (ulock->holder != thread) { ulock_unlock(ulock); return KERN_INVALID_RIGHT; } @@ -501,18 +510,16 @@ lock_make_unstable (ulock_t ulock, thread_act_t thr_act) } /* - * ROUTINE: lock_release_internal [internal] + * ROUTINE: ulock_release_internal [internal] * * Releases the ulock. * If any threads are blocked waiting for the ulock, one is woken-up. * */ kern_return_t -lock_release_internal (ulock_t ulock, thread_act_t thr_act) +ulock_release_internal (ulock_t ulock, thread_t thread) { lock_set_t lock_set; - int result; - if ((lock_set = ulock->lock_set) == LOCK_SET_NULL) return KERN_INVALID_ARGUMENT; @@ -525,7 +532,7 @@ lock_release_internal (ulock_t ulock, thread_act_t thr_act) ulock_lock(ulock); lock_set_unlock(lock_set); - if (ulock->holder != thr_act) { + if (ulock->holder != thread) { ulock_unlock(ulock); return KERN_INVALID_RIGHT; } @@ -537,29 +544,19 @@ lock_release_internal (ulock_t ulock, thread_act_t thr_act) */ if (ulock->blocked) { wait_queue_t wq = &ulock->wait_queue; - thread_t thread; + thread_t wqthread; spl_t s; s = splsched(); wait_queue_lock(wq); - thread = wait_queue_wakeup64_identity_locked(wq, + wqthread = wait_queue_wakeup64_identity_locked(wq, LOCK_SET_EVENT, THREAD_AWAKENED, TRUE); /* wait_queue now unlocked, thread locked */ - if (thread != THREAD_NULL) { - /* - * JMM - These ownership transfer macros have a - * locking/race problem. To keep the thread from - * changing states on us (nullifying the ownership - * assignment) we need to keep the thread locked - * during the assignment. But we can't because the - * macros take an activation lock, which is a mutex. - * Since this code was already broken before I got - * here, I will leave it for now. - */ - thread_unlock(thread); + if (wqthread != THREAD_NULL) { + thread_unlock(wqthread); splx(s); /* @@ -567,7 +564,7 @@ lock_release_internal (ulock_t ulock, thread_act_t thr_act) * from the current thread to the acquisition thread. */ ulock_ownership_clear(ulock); - ulock_ownership_set(ulock, thread); + ulock_ownership_set(ulock, wqthread); ulock_unlock(ulock); return KERN_SUCCESS; @@ -611,7 +608,7 @@ lock_handoff (lock_set_t lock_set, int lock_id) ulock_lock(ulock); lock_set_unlock(lock_set); - if (ulock->holder != current_act()) { + if (ulock->holder != current_thread()) { ulock_unlock(ulock); return KERN_INVALID_RIGHT; } @@ -643,15 +640,11 @@ lock_handoff (lock_set_t lock_set, int lock_id) * Transfer lock ownership */ if (thread != THREAD_NULL) { - /* - * JMM - These ownership transfer macros have a - * locking/race problem. To keep the thread from - * changing states on us (nullifying the ownership - * assignment) we need to keep the thread locked - * during the assignment. But we can't because the - * macros take an activation lock, which is a mutex. - * Since this code was already broken before I got - * here, I will leave it for now. + /* + * The thread we are transferring to will try + * to take the lock on the ulock, and therefore + * will wait for us complete the handoff even + * through we set the thread running. */ thread_unlock(thread); splx(s); @@ -682,7 +675,7 @@ lock_handoff (lock_set_t lock_set, int lock_id) ulock->ho_wait = TRUE; wait_result = wait_queue_assert_wait64(&ulock->wait_queue, LOCK_SET_HANDOFF, - THREAD_ABORTSAFE); + THREAD_ABORTSAFE, 0); ulock_unlock(ulock); if (wait_result == THREAD_WAITING) @@ -695,22 +688,30 @@ lock_handoff (lock_set_t lock_set, int lock_id) */ switch (wait_result) { + case THREAD_AWAKENED: + /* + * we take the ulock lock to syncronize with the + * thread that is accepting ownership. + */ + ulock_lock(ulock); + assert(ulock->holder != current_thread()); + ulock_unlock(ulock); return KERN_SUCCESS; case THREAD_INTERRUPTED: ulock_lock(ulock); - assert(ulock->holder == current_act()); + assert(ulock->holder == current_thread()); ulock->ho_wait = FALSE; ulock_unlock(ulock); return KERN_ABORTED; case THREAD_RESTART: goto retry; - - default: - panic("lock_handoff"); } + + panic("lock_handoff"); + return KERN_FAILURE; } kern_return_t @@ -746,7 +747,7 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) return KERN_ALREADY_WAITING; } - if (ulock->holder == current_act()) { + if (ulock->holder == current_thread()) { ulock_unlock(ulock); return KERN_LOCK_OWNED_SELF; } @@ -758,17 +759,15 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) */ if (ulock->ho_wait) { wait_queue_t wq = &ulock->wait_queue; - thread_t thread; /* * See who the lucky devil is, if he is still there waiting. */ - assert(ulock->holder != THR_ACT_NULL); - thread = ulock->holder->thread; + assert(ulock->holder != THREAD_NULL); if (wait_queue_wakeup64_thread(wq, LOCK_SET_HANDOFF, - thread, + ulock->holder, THREAD_AWAKENED) == KERN_SUCCESS) { /* * Holder thread was still waiting to give it @@ -793,7 +792,7 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) ulock->accept_wait = TRUE; wait_result = wait_queue_assert_wait64(&ulock->wait_queue, LOCK_SET_HANDOFF, - THREAD_ABORTSAFE); + THREAD_ABORTSAFE, 0); ulock_unlock(ulock); if (wait_result == THREAD_WAITING) @@ -807,6 +806,15 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) switch (wait_result) { case THREAD_AWAKENED: + /* + * Take the lock to synchronize with the thread handing + * off the lock to us. We don't want to continue until + * they complete the handoff. + */ + ulock_lock(ulock); + assert(ulock->accept_wait == FALSE); + assert(ulock->holder == current_thread()); + ulock_unlock(ulock); return KERN_SUCCESS; case THREAD_INTERRUPTED: @@ -817,10 +825,10 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) case THREAD_RESTART: goto retry; - - default: - panic("lock_handoff_accept"); } + + panic("lock_handoff_accept"); + return KERN_FAILURE; } /* @@ -854,8 +862,22 @@ lock_set_dereference(lock_set_t lock_set) lock_set_unlock(lock_set); if (ref_count == 0) { - size = sizeof(struct lock_set) + - (sizeof(struct ulock) * (lock_set->n_ulocks - 1)); - kfree((vm_offset_t) lock_set, size); + ipc_port_dealloc_kernel(lock_set->port); + size = (int)(sizeof(struct lock_set) + + (sizeof(struct ulock) * (lock_set->n_ulocks - 1))); + kfree(lock_set, size); + } +} + +void +ulock_release_all( + thread_t thread) +{ + ulock_t ulock; + + while (!queue_empty(&thread->held_ulocks)) { + ulock = (ulock_t)queue_first(&thread->held_ulocks); + lock_make_unstable(ulock, thread); + ulock_release_internal(ulock, thread); } }