/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-#ifndef _KERN_WAIT_QUEUE_H_
-#define _KERN_WAIT_QUEUE_H_
-#include <sys/appleapiopts.h>
+#ifdef KERNEL_PRIVATE
-#ifdef __APPLE_API_PRIVATE
+#ifndef _KERN_WAIT_QUEUE_H_
+#define _KERN_WAIT_QUEUE_H_
+#include <mach/mach_types.h>
#include <mach/sync_policy.h>
#include <mach/kern_return.h> /* for kern_return_t */
#include <kern/kern_types.h> /* for wait_queue_t */
-#ifdef MACH_KERNEL_PRIVATE
+#include <sys/cdefs.h>
+
+#ifdef MACH_KERNEL_PRIVATE
#include <kern/lock.h>
#include <kern/queue.h>
+#include <mach/branch_predicates.h>
-
+#include <machine/cpu_number.h>
+#include <machine/machine_routines.h> /* machine_timeout_suspended() */
/*
* wait_queue_t
* This is the definition of the common event wait queue
unsigned int /* flags */
/* boolean_t */ wq_type:16, /* only public field */
wq_fifo:1, /* fifo wakeup policy? */
- wq_isprepost:1, /* is waitq preposted? set only */
+ wq_prepost:1, /* waitq supports prepost? set only */
:0; /* force to long boundary */
hw_lock_data_t wq_interlock; /* interlock */
queue_head_t wq_queue; /* queue of elements */
typedef struct wait_queue_set {
WaitQueue wqs_wait_queue; /* our wait queue */
queue_head_t wqs_setlinks; /* links from set perspective */
- unsigned int wqs_refcount; /* refcount for preposting */
+ queue_head_t wqs_preposts; /* preposted links */
} WaitQueueSet;
#define wqs_type wqs_wait_queue.wq_type
#define wqs_fifo wqs_wait_queue.wq_fifo
-#define wqs_isprepost wqs_wait_queue.wq_isprepost
+#define wqs_prepost wqs_wait_queue.wq_prepost
#define wqs_queue wqs_wait_queue.wq_queue
/*
* with that port may wake up any thread from any of those portsets,
* or one that was waiting locally on the port itself.
*/
-typedef struct wait_queue_link {
+typedef struct _wait_queue_link {
WaitQueueElement wql_element; /* element on master */
queue_chain_t wql_setlinks; /* element on set */
+ queue_chain_t wql_preposts; /* element on set prepost list */
wait_queue_set_t wql_setqueue; /* set queue */
} WaitQueueLink;
(((wq)->wq_type & ~1) == _WAIT_QUEUE_inited)
#define wait_queue_empty(wq) (queue_empty(&(wq)->wq_queue))
+
#define wait_queue_held(wq) (hw_lock_held(&(wq)->wq_interlock))
#define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock))
+/* For x86, the hardware timeout is in TSC units. */
+#if defined(i386) || defined(x86_64)
+#define hwLockTimeOut LockTimeOutTSC
+#else
+#define hwLockTimeOut LockTimeOut
+#endif
/*
* Double the standard lock timeout, because wait queues tend
* to iterate over a number of threads - locking each. If there is
* a problem with a thread lock, it normally times out at the wait
* queue level first, hiding the real problem.
*/
-#define wait_queue_lock(wq) \
- ((void) (!hw_lock_to(&(wq)->wq_interlock, LockTimeOut * 2) ? \
- panic("wait queue deadlock - wq=0x%x, cpu=%d\n", \
- wq, cpu_number()) : 0))
-#define wait_queue_unlock(wq) \
- (assert(wait_queue_held(wq)), hw_lock_unlock(&(wq)->wq_interlock))
+static inline void wait_queue_lock(wait_queue_t wq) {
+ if (__improbable(hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2) == 0)) {
+ boolean_t wql_acquired = FALSE;
+
+ while (machine_timeout_suspended()) {
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * i386/x86_64 return with preemption disabled on a timeout for
+ * diagnostic purposes.
+ */
+ mp_enable_preemption();
+#endif
+ if ((wql_acquired = hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2)))
+ break;
+ }
+ if (wql_acquired == FALSE)
+ panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number());
+ }
+}
+
+static inline void wait_queue_unlock(wait_queue_t wq) {
+ assert(wait_queue_held(wq));
+ hw_lock_unlock(&(wq)->wq_interlock);
+}
#define wqs_lock(wqs) wait_queue_lock(&(wqs)->wqs_wait_queue)
#define wqs_unlock(wqs) wait_queue_unlock(&(wqs)->wqs_wait_queue)
#define wqs_lock_try(wqs) wait_queue__try_lock(&(wqs)->wqs_wait_queue)
+#define wqs_is_preposted(wqs) ((wqs)->wqs_prepost && !queue_empty(&(wqs)->wqs_preposts))
+
+#define wql_is_preposted(wql) ((wql)->wql_preposts.next != NULL)
+#define wql_clear_prepost(wql) ((wql)->wql_preposts.next = (wql)->wql_preposts.prev = NULL)
#define wait_queue_assert_possible(thread) \
((thread)->wait_queue == WAIT_QUEUE_NULL)
+/* bootstrap interface - can allocate/link wait_queues and sets after calling this */
+__private_extern__ void wait_queue_bootstrap(void);
+
/******** Decomposed interfaces (to build higher level constructs) ***********/
/* assert intent to wait on a locked wait queue */
wait_queue_t wait_queue,
event64_t wait_event,
wait_interrupt_t interruptible,
- boolean_t unlock);
+ uint64_t deadline,
+ thread_t thread);
-/* peek to see which thread would be chosen for a wakeup - but keep on queue */
-__private_extern__ void wait_queue_peek64_locked(
- wait_queue_t wait_queue,
- event64_t event,
- thread_t *thread,
- wait_queue_t *found_queue);
-
-/* peek to see which thread would be chosen for a wakeup - but keep on queue */
+/* pull a thread from its wait queue */
__private_extern__ void wait_queue_pull_thread_locked(
wait_queue_t wait_queue,
thread_t thread,
wait_result_t result,
boolean_t unlock);
-#endif /* MACH_KERNEL_PRIVATE */
+__private_extern__ uint32_t num_wait_queues;
+__private_extern__ struct wait_queue *wait_queues;
+/* The Jenkins "one at a time" hash.
+ * TBD: There may be some value to unrolling here,
+ * depending on the architecture.
+ */
+static inline uint32_t wq_hash(char *key)
+{
+ uint32_t hash = 0;
+ size_t i, length = sizeof(char *);
+
+ for (i = 0; i < length; i++) {
+ hash += key[i];
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash;
+}
+
+/* TBD: It should be possible to eliminate the divide here */
+#define wait_hash(event) \
+ (wq_hash((char *)&event) % (num_wait_queues))
+
+#endif /* MACH_KERNEL_PRIVATE */
+
+__BEGIN_DECLS
-#ifdef __APPLE_API_UNSTABLE
/******** Semi-Public interfaces (not a part of a higher construct) ************/
+extern unsigned int wait_queue_set_size(void);
+extern unsigned int wait_queue_link_size(void);
+
extern kern_return_t wait_queue_init(
wait_queue_t wait_queue,
int policy);
extern wait_queue_set_t wait_queue_set_alloc(
int policy);
+extern kern_return_t wait_queue_set_init(
+ wait_queue_set_t set_queue,
+ int policy);
+
extern kern_return_t wait_queue_set_free(
wait_queue_set_t set_queue);
extern kern_return_t wait_queue_link_free(
wait_queue_link_t link_element);
-#endif /* __APPLE_API_UNSTABLE */
-
-#ifdef __APPLE_API_EVOLVING
-
-extern wait_queue_t wait_queue_alloc(
- int policy);
+extern kern_return_t wait_queue_link(
+ wait_queue_t wait_queue,
+ wait_queue_set_t set_queue);
-extern kern_return_t wait_queue_free(
- wait_queue_t wait_queue);
+extern kern_return_t wait_queue_link_noalloc(
+ wait_queue_t wait_queue,
+ wait_queue_set_t set_queue,
+ wait_queue_link_t link);
-extern kern_return_t wait_queue_link(
+extern boolean_t wait_queue_member(
wait_queue_t wait_queue,
wait_queue_set_t set_queue);
extern kern_return_t wait_queue_set_unlink_all(
wait_queue_set_t set_queue);
+#ifdef XNU_KERNEL_PRIVATE
+extern kern_return_t wait_queue_set_unlink_one(
+ wait_queue_set_t set_queue,
+ wait_queue_link_t link);
+
+extern wait_queue_link_t wait_queue_link_allocate(void);
+
+#endif /* XNU_KERNEL_PRIVATE */
+
+/* legacy API */
+kern_return_t wait_queue_sub_init(
+ wait_queue_set_t set_queue,
+ int policy);
+
+kern_return_t wait_queue_sub_clearrefs(
+ wait_queue_set_t wq_set);
+
+extern kern_return_t wait_subqueue_unlink_all(
+ wait_queue_set_t set_queue);
+
+extern wait_queue_t wait_queue_alloc(
+ int policy);
+
+extern kern_return_t wait_queue_free(
+ wait_queue_t wait_queue);
+
/* assert intent to wait on <wait_queue,event64> pair */
extern wait_result_t wait_queue_assert_wait64(
wait_queue_t wait_queue,
event64_t wait_event,
- wait_interrupt_t interruptible);
+ wait_interrupt_t interruptible,
+ uint64_t deadline);
/* wakeup the most appropriate thread waiting on <wait_queue,event64> pair */
extern kern_return_t wait_queue_wakeup64_one(
thread_t thread,
wait_result_t result);
-#endif /* __APPLE_API_EVOLVING */
-
/*
* Compatibility Wait Queue APIs based on pointer events instead of 64bit
* integer events.
extern wait_result_t wait_queue_assert_wait(
wait_queue_t wait_queue,
event_t wait_event,
- wait_interrupt_t interruptible);
+ wait_interrupt_t interruptible,
+ uint64_t deadline);
/* wakeup the most appropriate thread waiting on <wait_queue,event> pair */
extern kern_return_t wait_queue_wakeup_one(
wait_queue_t wait_queue,
event_t wake_event,
- wait_result_t result);
+ wait_result_t result,
+ int priority);
/* wakeup all the threads waiting on <wait_queue,event> pair */
extern kern_return_t wait_queue_wakeup_all(
thread_t thread,
wait_result_t result);
-#endif /* __APPLE_API_PRIVATE */
+__END_DECLS
+
+#endif /* _KERN_WAIT_QUEUE_H_ */
-#endif /* _KERN_WAIT_QUEUE_H_ */
+#endif /* KERNEL_PRIVATE */