*
* @APPLE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <kern/etap_macros.h>
#include <kern/misc_protos.h>
#include <kern/thread.h>
+#include <kern/processor.h>
#include <kern/sched_prim.h>
#include <kern/xpr.h>
#include <kern/debug.h>
#ifdef __ppc__
#include <ppc/Firmware.h>
-#include <ppc/POWERMAC/mp/MPPlugIn.h>
#endif
+#include <sys/kdebug.h>
+
#define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
/*
usimple_lock_t l,
etap_event_t event)
{
+#ifndef MACHINE_SIMPLE_LOCK
USLDBG(usld_lock_init(l, event));
ETAPCALL(etap_simplelock_init((l),(event)));
hw_lock_init(&l->interlock);
+#else
+ simple_lock_init((simple_lock_t)l,event);
+#endif
}
usimple_lock(
usimple_lock_t l)
{
+#ifndef MACHINE_SIMPLE_LOCK
int i;
pc_t pc;
#if ETAP_LOCK_TRACE
ETAP_TIME_CLEAR(start_wait_time);
#endif /* ETAP_LOCK_TRACE */
-#ifdef __ppc__
- if(!hw_lock_to(&l->interlock, LockTimeOut)) { /* Try to get the lock with a timeout */
-
+ if(!hw_lock_to(&l->interlock, LockTimeOut)) /* Try to get the lock with a timeout */
panic("simple lock deadlock detection - l=%08X, cpu=%d, ret=%08X", l, cpu_number(), pc);
-#else /* __ppc__ */
- while (!hw_lock_try(&l->interlock)) {
- ETAPCALL(if (no_miss_info++ == 0)
- start_wait_time = etap_simplelock_miss(l));
- while (hw_lock_held(&l->interlock)) {
- /*
- * Spin watching the lock value in cache,
- * without consuming external bus cycles.
- * On most SMP architectures, the atomic
- * instruction(s) used by hw_lock_try
- * cost much, much more than an ordinary
- * memory read.
- */
-#if USLOCK_DEBUG
- if (count++ > max_lock_loops
-#if MACH_KDB && NCPUS > 1
- && l != &kdb_lock
-#endif /* MACH_KDB && NCPUS > 1 */
- ) {
- if (l == &printf_lock) {
- return;
- }
- mp_disable_preemption();
- panic("simple lock deadlock detection - l=%08X (=%08X), cpu=%d, ret=%08X",
- l, *hw_lock_addr(l->interlock), cpu_number(), pc);
- count = 0;
- mp_enable_preemption();
- }
-#endif /* USLOCK_DEBUG */
- }
-#endif /* 0 */
- }
ETAPCALL(etap_simplelock_hold(l, pc, start_wait_time));
USLDBG(usld_lock_post(l, pc));
+#else
+ simple_lock((simple_lock_t)l);
+#endif
}
usimple_unlock(
usimple_lock_t l)
{
+#ifndef MACHINE_SIMPLE_LOCK
pc_t pc;
// checkNMI(); /* (TEST/DEBUG) */
OBTAIN_PC(pc, l);
USLDBG(usld_unlock(l, pc));
ETAPCALL(etap_simplelock_unlock(l));
+#ifdef __ppc__
+ sync();
+#endif
hw_lock_unlock(&l->interlock);
+#else
+ simple_unlock_rwmb((simple_lock_t)l);
+#endif
}
usimple_lock_try(
usimple_lock_t l)
{
+#ifndef MACHINE_SIMPLE_LOCK
pc_t pc;
unsigned int success;
etap_time_t zero_time;
ETAPCALL(etap_simplelock_hold(l, pc, zero_time));
}
return success;
+#else
+ return(simple_lock_try((simple_lock_t)l));
+#endif
}
#if ETAP_LOCK_TRACE
ETAP_SET_REASON(current_thread(),
BLOCKED_ON_COMPLEX_LOCK);
thread_sleep_simple_lock((event_t) l,
- simple_lock_addr(l->interlock), FALSE);
- simple_lock(&l->interlock);
+ simple_lock_addr(l->interlock),
+ THREAD_UNINT);
+ /* interlock relocked */
}
}
l->want_write = TRUE;
ETAP_SET_REASON(current_thread(),
BLOCKED_ON_COMPLEX_LOCK);
thread_sleep_simple_lock((event_t) l,
- simple_lock_addr(l->interlock), FALSE);
- simple_lock(&l->interlock);
+ simple_lock_addr(l->interlock),
+ THREAD_UNINT);
+ /* interlock relocked */
}
}
if (l->can_sleep && (l->want_write || l->want_upgrade)) {
l->waiting = TRUE;
thread_sleep_simple_lock((event_t) l,
- simple_lock_addr(l->interlock), FALSE);
- simple_lock(&l->interlock);
+ simple_lock_addr(l->interlock),
+ THREAD_UNINT);
+ /* interlock relocked */
}
}
if (l->can_sleep && l->read_count != 0) {
l->waiting = TRUE;
thread_sleep_simple_lock((event_t) l,
- simple_lock_addr(l->interlock), FALSE);
- simple_lock(&l->interlock);
+ simple_lock_addr(l->interlock),
+ THREAD_UNINT);
+ /* interlock relocked */
}
}
kfree((vm_offset_t)m, sizeof(mutex_t));
}
-
/*
- * mutex_lock_wait: Invoked if the assembler routine mutex_lock () fails
- * because the mutex is already held by another thread. Called with the
- * interlock locked and returns with the interlock unlocked.
+ * mutex_lock_wait
+ *
+ * Invoked in order to wait on contention.
+ *
+ * Called with the interlock locked and
+ * returns it unlocked.
*/
-
void
mutex_lock_wait (
- mutex_t * m)
+ mutex_t *mutex,
+ thread_t holder)
{
- m->waiters++;
- ETAP_SET_REASON(current_thread(), BLOCKED_ON_MUTEX_LOCK);
- thread_sleep_interlock ((event_t) m, &m->interlock, THREAD_UNINT);
+ thread_t self = current_thread();
+#if !defined(i386)
+ integer_t priority;
+ spl_t s = splsched();
+
+ priority = self->sched_pri;
+ if (priority < self->priority)
+ priority = self->priority;
+ if (priority > MINPRI_KERNEL)
+ priority = MINPRI_KERNEL;
+ else
+ if (priority < BASEPRI_DEFAULT)
+ priority = BASEPRI_DEFAULT;
+
+ assert(holder->thread == holder); /* XXX */
+ thread_lock(holder);
+ if (mutex->promoted_pri == 0)
+ holder->promotions++;
+ if (holder->priority < MINPRI_KERNEL) {
+ holder->sched_mode |= TH_MODE_PROMOTED;
+ if ( mutex->promoted_pri < priority &&
+ holder->sched_pri < priority ) {
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
+ holder->sched_pri, priority, (int)holder, (int)mutex, 0);
+
+ set_sched_pri(holder, priority);
+ }
+ }
+ thread_unlock(holder);
+ splx(s);
+
+ if (mutex->promoted_pri < priority)
+ mutex->promoted_pri = priority;
+#endif
+
+ if (self->pending_promoter[self->pending_promoter_index] == NULL) {
+ self->pending_promoter[self->pending_promoter_index] = mutex;
+ mutex->waiters++;
+ }
+ else
+ if (self->pending_promoter[self->pending_promoter_index] != mutex) {
+ self->pending_promoter[++self->pending_promoter_index] = mutex;
+ mutex->waiters++;
+ }
+
+ assert_wait(mutex, THREAD_UNINT);
+ interlock_unlock(&mutex->interlock);
+
+ thread_block(THREAD_CONTINUE_NULL);
}
/*
- * mutex_unlock_wakeup: Invoked if the assembler routine mutex_unlock ()
- * fails because there are thread(s) waiting for this mutex. Called and
- * returns with the interlock locked.
+ * mutex_lock_acquire
+ *
+ * Invoked on acquiring the mutex when there is
+ * contention.
+ *
+ * Returns the current number of waiters.
+ *
+ * Called with the interlock locked.
*/
+int
+mutex_lock_acquire(
+ mutex_t *mutex)
+{
+ thread_t thread = current_thread();
+
+ if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
+ thread->pending_promoter[thread->pending_promoter_index] = NULL;
+ if (thread->pending_promoter_index > 0)
+ thread->pending_promoter_index--;
+ mutex->waiters--;
+ }
+
+#if !defined(i386)
+ if (mutex->waiters > 0) {
+ integer_t priority = mutex->promoted_pri;
+ spl_t s = splsched();
+
+ thread_lock(thread);
+ thread->promotions++;
+ if (thread->priority < MINPRI_KERNEL) {
+ thread->sched_mode |= TH_MODE_PROMOTED;
+ if (thread->sched_pri < priority) {
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
+ thread->sched_pri, priority, 0, (int)mutex, 0);
+
+ set_sched_pri(thread, priority);
+ }
+ }
+ thread_unlock(thread);
+ splx(s);
+ }
+ else
+ mutex->promoted_pri = 0;
+#endif
+
+ return (mutex->waiters);
+}
+/*
+ * mutex_unlock_wakeup
+ *
+ * Invoked on unlock when there is contention.
+ *
+ * Called with the interlock locked.
+ */
void
mutex_unlock_wakeup (
- mutex_t * m)
+ mutex_t *mutex,
+ thread_t holder)
+{
+#if !defined(i386)
+ thread_t thread = current_thread();
+
+ if (thread->top_act != holder)
+ panic("mutex_unlock_wakeup: mutex %x holder %x\n", mutex, holder);
+
+ if (thread->promotions > 0) {
+ spl_t s = splsched();
+
+ thread_lock(thread);
+ if ( --thread->promotions == 0 &&
+ (thread->sched_mode & TH_MODE_PROMOTED) ) {
+ thread->sched_mode &= ~TH_MODE_PROMOTED;
+ if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
+ thread->sched_pri, DEPRESSPRI, 0, (int)mutex, 0);
+
+ set_sched_pri(thread, DEPRESSPRI);
+ }
+ else {
+ if (thread->priority < thread->sched_pri) {
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
+ DBG_FUNC_NONE,
+ thread->sched_pri, thread->priority,
+ 0, (int)mutex, 0);
+ }
+
+ compute_priority(thread, FALSE);
+ }
+ }
+ thread_unlock(thread);
+ splx(s);
+ }
+#endif
+
+ assert(mutex->waiters > 0);
+ thread_wakeup_one(mutex);
+}
+
+boolean_t
+mutex_preblock_wait(
+ mutex_t *mutex,
+ thread_t thread,
+ thread_t holder)
{
- assert(m->waiters);
- m->waiters--;
- thread_wakeup_one ((event_t) m);
+ wait_result_t wresult;
+ integer_t priority;
+ wait_queue_t wq;
+
+ assert(holder == NULL || holder->thread == holder);
+
+ wq = wait_event_wait_queue((event_t)mutex);
+ if (!wait_queue_lock_try(wq))
+ return (FALSE);
+
+ if (holder != NULL && !thread_lock_try(holder)) {
+ wait_queue_unlock(wq);
+ return (FALSE);
+ }
+
+ wresult = wait_queue_assert_wait64_locked(wq, (uint32_t)mutex,
+ THREAD_UNINT, thread);
+ wait_queue_unlock(wq);
+ assert(wresult == THREAD_WAITING);
+
+ priority = thread->sched_pri;
+ if (priority < thread->priority)
+ priority = thread->priority;
+ if (priority > MINPRI_KERNEL)
+ priority = MINPRI_KERNEL;
+ else
+ if (priority < BASEPRI_DEFAULT)
+ priority = BASEPRI_DEFAULT;
+
+ if (holder != NULL) {
+ if (mutex->promoted_pri == 0)
+ holder->promotions++;
+ if (holder->priority < MINPRI_KERNEL) {
+ holder->sched_mode |= TH_MODE_PROMOTED;
+ if ( mutex->promoted_pri < priority &&
+ holder->sched_pri < priority ) {
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
+ holder->sched_pri, priority,
+ (int)holder, (int)mutex, 0);
+
+ set_sched_pri(holder, priority);
+ }
+ }
+ thread_unlock(holder);
+ }
+
+ if (mutex->promoted_pri < priority)
+ mutex->promoted_pri = priority;
+
+ if (thread->pending_promoter[thread->pending_promoter_index] == NULL) {
+ thread->pending_promoter[thread->pending_promoter_index] = mutex;
+ mutex->waiters++;
+ }
+ else
+ if (thread->pending_promoter[thread->pending_promoter_index] != mutex) {
+ thread->pending_promoter[++thread->pending_promoter_index] = mutex;
+ mutex->waiters++;
+ }
+
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_PREBLOCK_MUTEX) | DBG_FUNC_NONE,
+ (int)thread, thread->sched_pri, (int)mutex, 0, 0);
+
+ return (TRUE);
}
/*
void
mutex_pause(void)
{
- int wait_result;
+ wait_result_t wait_result;
+
+ wait_result = assert_wait_timeout( 1, THREAD_UNINT);
+ assert(wait_result == THREAD_WAITING);
- assert_wait_timeout( 1, THREAD_INTERRUPTIBLE);
ETAP_SET_REASON(current_thread(), BLOCKED_ON_MUTEX_LOCK);
- wait_result = thread_block((void (*)(void))0);
- if (wait_result != THREAD_TIMED_OUT)
- thread_cancel_timer();
+
+ wait_result = thread_block(THREAD_CONTINUE_NULL);
+ assert(wait_result == THREAD_TIMED_OUT);
}
#if MACH_KDB