/*
* Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#ifdef KERNEL_PRIVATE
* with that port may wake up any thread from any of those portsets,
* or one that was waiting locally on the port itself.
*/
-typedef struct wait_queue_link {
+typedef struct _wait_queue_link {
WaitQueueElement wql_element; /* element on master */
queue_chain_t wql_setlinks; /* element on set */
wait_queue_set_t wql_setqueue; /* set queue */
(((wq)->wq_type & ~1) == _WAIT_QUEUE_inited)
#define wait_queue_empty(wq) (queue_empty(&(wq)->wq_queue))
+
#define wait_queue_held(wq) (hw_lock_held(&(wq)->wq_interlock))
#define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock))
+/* For x86, the hardware timeout is in TSC units. */
+#if defined(i386)
+#define hwLockTimeOut LockTimeOutTSC
+#else
+#define hwLockTimeOut LockTimeOut
+#endif
/*
* Double the standard lock timeout, because wait queues tend
* to iterate over a number of threads - locking each. If there is
* a problem with a thread lock, it normally times out at the wait
* queue level first, hiding the real problem.
*/
-#define wait_queue_lock(wq) \
- ((void) (!hw_lock_to(&(wq)->wq_interlock, LockTimeOut * 2) ? \
- panic("wait queue deadlock - wq=0x%x, cpu=%d\n", \
- wq, cpu_number()) : 0))
-#define wait_queue_unlock(wq) \
- (assert(wait_queue_held(wq)), hw_lock_unlock(&(wq)->wq_interlock))
+static inline void wait_queue_lock(wait_queue_t wq) {
+ if (!hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2))
+ panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number(
+));
+}
+
+static inline void wait_queue_unlock(wait_queue_t wq) {
+ assert(wait_queue_held(wq));
+#if defined(__i386__)
+ /* DRK: On certain x86 systems, this spinlock is susceptible to
+ * lock starvation. Hence use an unlock variant which performs
+ * a cacheline flush to minimize cache affinity on acquisition.
+ */
+ i386_lock_unlock_with_flush(&(wq)->wq_interlock);
+#else
+ hw_lock_unlock(&(wq)->wq_interlock);
+#endif
+}
#define wqs_lock(wqs) wait_queue_lock(&(wqs)->wqs_wait_queue)
#define wqs_unlock(wqs) wait_queue_unlock(&(wqs)->wqs_wait_queue)
uint64_t deadline,
thread_t thread);
-/* peek to see which thread would be chosen for a wakeup - but keep on queue */
-__private_extern__ void wait_queue_peek64_locked(
- wait_queue_t wait_queue,
- event64_t event,
- thread_t *thread,
- wait_queue_t *found_queue);
-
-/* peek to see which thread would be chosen for a wakeup - but keep on queue */
+/* pull a thread from its wait queue */
__private_extern__ void wait_queue_pull_thread_locked(
wait_queue_t wait_queue,
thread_t thread,