X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..89b3af67bb32e691275bf6fa803d1834b2284115:/osfmk/kern/sync_lock.c diff --git a/osfmk/kern/sync_lock.c b/osfmk/kern/sync_lock.c index 5849f6883..7bf5387f3 100644 --- a/osfmk/kern/sync_lock.c +++ b/osfmk/kern/sync_lock.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -30,13 +36,16 @@ * Contains RT distributed lock synchronization services. */ -#include +#include +#include +#include + #include +#include #include #include #include #include -#include #include #include @@ -51,28 +60,26 @@ #define ulock_ownership_set(ul, th) \ MACRO_BEGIN \ - thread_act_t _th_act; \ - _th_act = (th)->top_act; \ - act_lock(_th_act); \ - enqueue (&_th_act->held_ulocks, (queue_entry_t) (ul)); \ - act_unlock(_th_act); \ - (ul)->holder = _th_act; \ + thread_mtx_lock(th); \ + enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \ + thread_mtx_unlock(th); \ + (ul)->holder = th; \ MACRO_END #define ulock_ownership_clear(ul) \ MACRO_BEGIN \ - thread_act_t _th_act; \ - _th_act = (ul)->holder; \ - if (_th_act->active) { \ - act_lock(_th_act); \ - remqueue(&_th_act->held_ulocks, \ + thread_t th; \ + th = (ul)->holder; \ + if (th->active) { \ + thread_mtx_lock(th); \ + remqueue(&th->held_ulocks, \ (queue_entry_t) (ul)); \ - act_unlock(_th_act); \ + thread_mtx_unlock(th); \ } else { \ - remqueue(&_th_act->held_ulocks, \ + remqueue(&th->held_ulocks, \ (queue_entry_t) (ul)); \ } \ - (ul)->holder = THR_ACT_NULL; \ + (ul)->holder = THREAD_NULL; \ MACRO_END /* @@ -97,10 +104,10 @@ MACRO_END unsigned int lock_set_event; -#define LOCK_SET_EVENT ((event_t)&lock_set_event) +#define LOCK_SET_EVENT ((event64_t)&lock_set_event) unsigned int lock_set_handoff; -#define LOCK_SET_HANDOFF ((event_t)&lock_set_handoff) +#define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff) /* * ROUTINE: lock_set_init [private] @@ -131,7 +138,7 @@ lock_set_create ( { lock_set_t lock_set = LOCK_SET_NULL; ulock_t ulock; - int size; + vm_size_t size; int x; *new_lock_set = LOCK_SET_NULL; @@ -139,6 +146,9 @@ lock_set_create ( if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX) return KERN_INVALID_ARGUMENT; + if (VM_MAX_ADDRESS/sizeof(struct ulock) - sizeof(struct lock_set) < (unsigned)n_ulocks) + return KERN_RESOURCE_SHORTAGE; + size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1)); lock_set = (lock_set_t) kalloc (size); @@ -172,7 +182,7 @@ lock_set_create ( ulock = (ulock_t) &lock_set->ulock_list[x]; ulock_lock_init(ulock); ulock->lock_set = lock_set; - ulock->holder = THR_ACT_NULL; + ulock->holder = THREAD_NULL; ulock->blocked = FALSE; ulock->unstable = FALSE; ulock->ho_wait = FALSE; @@ -201,7 +211,6 @@ lock_set_create ( kern_return_t lock_set_destroy (task_t task, lock_set_t lock_set) { - thread_t thread; ulock_t ulock; int i; @@ -240,7 +249,7 @@ lock_set_destroy (task_t task, lock_set_t lock_set) if (ulock->accept_wait) { ulock->accept_wait = FALSE; - wait_queue_wakeup_one(&ulock->wait_queue, + wait_queue_wakeup64_one(&ulock->wait_queue, LOCK_SET_HANDOFF, THREAD_RESTART); } @@ -248,13 +257,13 @@ lock_set_destroy (task_t task, lock_set_t lock_set) if (ulock->holder) { if (ulock->blocked) { ulock->blocked = FALSE; - wait_queue_wakeup_all(&ulock->wait_queue, + wait_queue_wakeup64_all(&ulock->wait_queue, LOCK_SET_EVENT, THREAD_RESTART); } if (ulock->ho_wait) { ulock->ho_wait = FALSE; - wait_queue_wakeup_one(&ulock->wait_queue, + wait_queue_wakeup64_one(&ulock->wait_queue, LOCK_SET_HANDOFF, THREAD_RESTART); } @@ -306,27 +315,25 @@ lock_acquire (lock_set_t lock_set, int lock_id) * Block the current thread if the lock is already held. */ - if (ulock->holder != THR_ACT_NULL) { + if (ulock->holder != THREAD_NULL) { int wait_result; - lock_set_unlock(lock_set); - - if (ulock->holder == current_act()) { + if (ulock->holder == current_thread()) { ulock_unlock(ulock); return KERN_LOCK_OWNED_SELF; } ulock->blocked = TRUE; - wait_queue_assert_wait(&ulock->wait_queue, + wait_result = wait_queue_assert_wait64(&ulock->wait_queue, LOCK_SET_EVENT, - THREAD_ABORTSAFE); + THREAD_ABORTSAFE, 0); ulock_unlock(ulock); /* * Block - Wait for lock to become available. */ - - wait_result = thread_block((void (*)(void))0); + if (wait_result == THREAD_WAITING) + wait_result = thread_block(THREAD_CONTINUE_NULL); /* * Check the result status: @@ -374,7 +381,7 @@ lock_release (lock_set_t lock_set, int lock_id) ulock = (ulock_t) &lock_set->ulock_list[lock_id]; - return (lock_release_internal(ulock, current_act())); + return (ulock_release_internal(ulock, current_thread())); } kern_return_t @@ -407,10 +414,10 @@ lock_try (lock_set_t lock_set, int lock_id) * whether it already holds the lock or another thread does. */ - if (ulock->holder != THR_ACT_NULL) { + if (ulock->holder != THREAD_NULL) { lock_set_unlock(lock_set); - if (ulock->holder == current_act()) { + if (ulock->holder == current_thread()) { ulock_unlock(ulock); return KERN_LOCK_OWNED_SELF; } @@ -452,7 +459,7 @@ lock_make_stable (lock_set_t lock_set, int lock_id) ulock_lock(ulock); lock_set_unlock(lock_set); - if (ulock->holder != current_act()) { + if (ulock->holder != current_thread()) { ulock_unlock(ulock); return KERN_INVALID_RIGHT; } @@ -473,11 +480,10 @@ lock_make_stable (lock_set_t lock_set, int lock_id) * KERN_LOCK_UNSTABLE status, until the lock is made stable again. */ kern_return_t -lock_make_unstable (ulock_t ulock, thread_act_t thr_act) +lock_make_unstable (ulock_t ulock, thread_t thread) { lock_set_t lock_set; - lock_set = ulock->lock_set; lock_set_lock(lock_set); if (!lock_set->active) { @@ -488,7 +494,7 @@ lock_make_unstable (ulock_t ulock, thread_act_t thr_act) ulock_lock(ulock); lock_set_unlock(lock_set); - if (ulock->holder != thr_act) { + if (ulock->holder != thread) { ulock_unlock(ulock); return KERN_INVALID_RIGHT; } @@ -500,18 +506,16 @@ lock_make_unstable (ulock_t ulock, thread_act_t thr_act) } /* - * ROUTINE: lock_release_internal [internal] + * ROUTINE: ulock_release_internal [internal] * * Releases the ulock. * If any threads are blocked waiting for the ulock, one is woken-up. * */ kern_return_t -lock_release_internal (ulock_t ulock, thread_act_t thr_act) +ulock_release_internal (ulock_t ulock, thread_t thread) { lock_set_t lock_set; - int result; - if ((lock_set = ulock->lock_set) == LOCK_SET_NULL) return KERN_INVALID_ARGUMENT; @@ -524,9 +528,8 @@ lock_release_internal (ulock_t ulock, thread_act_t thr_act) ulock_lock(ulock); lock_set_unlock(lock_set); - if (ulock->holder != thr_act) { + if (ulock->holder != thread) { ulock_unlock(ulock); - lock_set_unlock(lock_set); return KERN_INVALID_RIGHT; } @@ -537,18 +540,18 @@ lock_release_internal (ulock_t ulock, thread_act_t thr_act) */ if (ulock->blocked) { wait_queue_t wq = &ulock->wait_queue; - thread_t thread; + thread_t wqthread; spl_t s; s = splsched(); wait_queue_lock(wq); - thread = wait_queue_wakeup_identity_locked(wq, + wqthread = wait_queue_wakeup64_identity_locked(wq, LOCK_SET_EVENT, THREAD_AWAKENED, TRUE); /* wait_queue now unlocked, thread locked */ - if (thread != THREAD_NULL) { + if (wqthread != THREAD_NULL) { /* * JMM - These ownership transfer macros have a * locking/race problem. To keep the thread from @@ -559,7 +562,7 @@ lock_release_internal (ulock_t ulock, thread_act_t thr_act) * Since this code was already broken before I got * here, I will leave it for now. */ - thread_unlock(thread); + thread_unlock(wqthread); splx(s); /* @@ -567,7 +570,7 @@ lock_release_internal (ulock_t ulock, thread_act_t thr_act) * from the current thread to the acquisition thread. */ ulock_ownership_clear(ulock); - ulock_ownership_set(ulock, thread); + ulock_ownership_set(ulock, wqthread); ulock_unlock(ulock); return KERN_SUCCESS; @@ -611,9 +614,8 @@ lock_handoff (lock_set_t lock_set, int lock_id) ulock_lock(ulock); lock_set_unlock(lock_set); - if (ulock->holder != current_act()) { + if (ulock->holder != current_thread()) { ulock_unlock(ulock); - lock_set_unlock(lock_set); return KERN_INVALID_RIGHT; } @@ -633,7 +635,7 @@ lock_handoff (lock_set_t lock_set, int lock_id) */ s = splsched(); wait_queue_lock(wq); - thread = wait_queue_wakeup_identity_locked( + thread = wait_queue_wakeup64_identity_locked( wq, LOCK_SET_HANDOFF, THREAD_AWAKENED, @@ -650,7 +652,8 @@ lock_handoff (lock_set_t lock_set, int lock_id) * changing states on us (nullifying the ownership * assignment) we need to keep the thread locked * during the assignment. But we can't because the - * macros take an activation lock, which is a mutex. + * macros take a thread mutex lock. + * * Since this code was already broken before I got * here, I will leave it for now. */ @@ -681,13 +684,13 @@ lock_handoff (lock_set_t lock_set, int lock_id) * for an accepting thread. */ ulock->ho_wait = TRUE; - wait_queue_assert_wait(&ulock->wait_queue, + wait_result = wait_queue_assert_wait64(&ulock->wait_queue, LOCK_SET_HANDOFF, - THREAD_ABORTSAFE); + THREAD_ABORTSAFE, 0); ulock_unlock(ulock); - ETAP_SET_REASON(current_thread(), BLOCKED_ON_LOCK_HANDOFF); - wait_result = thread_block((void (*)(void))0); + if (wait_result == THREAD_WAITING) + wait_result = thread_block(THREAD_CONTINUE_NULL); /* * If the thread was woken-up via some action other than @@ -701,17 +704,17 @@ lock_handoff (lock_set_t lock_set, int lock_id) case THREAD_INTERRUPTED: ulock_lock(ulock); - assert(ulock->holder == current_act()); + assert(ulock->holder == current_thread()); ulock->ho_wait = FALSE; ulock_unlock(ulock); return KERN_ABORTED; case THREAD_RESTART: goto retry; - - default: - panic("lock_handoff"); } + + panic("lock_handoff"); + return KERN_FAILURE; } kern_return_t @@ -747,7 +750,7 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) return KERN_ALREADY_WAITING; } - if (ulock->holder == current_act()) { + if (ulock->holder == current_thread()) { ulock_unlock(ulock); return KERN_LOCK_OWNED_SELF; } @@ -759,17 +762,15 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) */ if (ulock->ho_wait) { wait_queue_t wq = &ulock->wait_queue; - thread_t thread; /* * See who the lucky devil is, if he is still there waiting. */ - assert(ulock->holder != THR_ACT_NULL); - thread = ulock->holder->thread; + assert(ulock->holder != THREAD_NULL); - if (wait_queue_wakeup_thread(wq, + if (wait_queue_wakeup64_thread(wq, LOCK_SET_HANDOFF, - thread, + ulock->holder, THREAD_AWAKENED) == KERN_SUCCESS) { /* * Holder thread was still waiting to give it @@ -792,13 +793,13 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) } ulock->accept_wait = TRUE; - wait_queue_assert_wait(&ulock->wait_queue, + wait_result = wait_queue_assert_wait64(&ulock->wait_queue, LOCK_SET_HANDOFF, - THREAD_ABORTSAFE); + THREAD_ABORTSAFE, 0); ulock_unlock(ulock); - ETAP_SET_REASON(current_thread(), BLOCKED_ON_LOCK_HANDOFF); - wait_result = thread_block((void (*)(void))0); + if (wait_result == THREAD_WAITING) + wait_result = thread_block(THREAD_CONTINUE_NULL); /* * If the thread was woken-up via some action other than @@ -818,10 +819,10 @@ lock_handoff_accept (lock_set_t lock_set, int lock_id) case THREAD_RESTART: goto retry; - - default: - panic("lock_handoff_accept"); } + + panic("lock_handoff_accept"); + return KERN_FAILURE; } /* @@ -857,6 +858,19 @@ lock_set_dereference(lock_set_t lock_set) if (ref_count == 0) { size = sizeof(struct lock_set) + (sizeof(struct ulock) * (lock_set->n_ulocks - 1)); - kfree((vm_offset_t) lock_set, size); + kfree(lock_set, size); + } +} + +void +ulock_release_all( + thread_t thread) +{ + ulock_t ulock; + + while (!queue_empty(&thread->held_ulocks)) { + ulock = (ulock_t)queue_first(&thread->held_ulocks); + lock_make_unstable(ulock, thread); + ulock_release_internal(ulock, thread); } }