/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
+
+#ifdef KERNEL_PRIVATE
+
#ifndef _KERN_WAIT_QUEUE_H_
#define _KERN_WAIT_QUEUE_H_
-#include <kern/kern_types.h> /* for wait_queue_t */
+#include <mach/mach_types.h>
#include <mach/sync_policy.h>
#include <mach/kern_return.h> /* for kern_return_t */
+#include <kern/kern_types.h> /* for wait_queue_t */
+
+#include <sys/cdefs.h>
+
+#ifdef MACH_KERNEL_PRIVATE
#include <kern/lock.h>
#include <kern/queue.h>
+#include <mach/branch_predicates.h>
+#include <machine/cpu_number.h>
+#include <machine/machine_routines.h> /* machine_timeout_suspended() */
/*
* wait_queue_t
* This is the definition of the common event wait queue
* NOTE: Hardware locks are used to protect event wait
* queues since interrupt code is free to post events to
* them.
- * WARNING: Cannot change this data structure without updating SIZEOF_WAITQUEUE
*/
typedef struct wait_queue {
+ unsigned int /* flags */
+ /* boolean_t */ wq_type:16, /* only public field */
+ wq_fifo:1, /* fifo wakeup policy? */
+ wq_prepost:1, /* waitq supports prepost? set only */
+ :0; /* force to long boundary */
hw_lock_data_t wq_interlock; /* interlock */
- unsigned int /* flags */
- /* boolean_t */ wq_fifo:1, /* fifo wakeup policy? */
- wq_issub:1, /* is waitq linked? */
- wq_isprepost:1, /* is waitq preposted? sub only */
- :0; /* force to long boundary */
- queue_head_t wq_queue; /* queue of elements */
+ queue_head_t wq_queue; /* queue of elements */
} WaitQueue;
-#define SIZEOF_WAITQUEUE 16 /* 16 bytes for wq */
-#define SIZEOF_WAITQUEUE_SUB 28 /* 24 byets for wqs */
-#define SIZEOF_WAITQUEUE_ELEMENT 16 /* 16 byets per wqe */
-#define SIZEOF_WAITQUEUE_LINK 28 /* 28 byets per wqe */
-
-#ifdef MACH_KERNEL_PRIVATE
-
/*
- * wait_queue_sub_t
- * This is the common definition for a subordinate wait queue.
+ * wait_queue_set_t
+ * This is the common definition for a set wait queue.
* These can be linked as members/elements of multiple regular
* wait queues. They have an additional set of linkages to
* identify the linkage structures that point to them.
- * WARNING: Cannot change this data structure without updating SIZEOF_WAITQUEUE_SUB
*/
-typedef struct wait_queue_sub {
- WaitQueue wqs_wait_queue; /* our wait queue */
- queue_head_t wqs_sublinks; /* links from sub perspective */
- unsigned int wqs_refcount; /* refcount for preposting */
-} WaitQueueSub;
-
-
-#define WAIT_QUEUE_SUB_NULL ((wait_queue_sub_t)0)
+typedef struct wait_queue_set {
+ WaitQueue wqs_wait_queue; /* our wait queue */
+ queue_head_t wqs_setlinks; /* links from set perspective */
+ queue_head_t wqs_preposts; /* preposted links */
+} WaitQueueSet;
+#define wqs_type wqs_wait_queue.wq_type
+#define wqs_fifo wqs_wait_queue.wq_fifo
+#define wqs_prepost wqs_wait_queue.wq_prepost
+#define wqs_queue wqs_wait_queue.wq_queue
/*
* wait_queue_element_t
* queue. It is the common first fields in a thread shuttle
* and wait_queue_link_t. In that way, a wait queue can
* consist of both thread shuttle elements and links off of
- * to other (subordinate) wait queues.
+ * to other (set) wait queues.
*
- * WARNING: The first three fields of the thread shuttle
- * definition does not use this definition yet. Any change in
+ * WARNING: These fields correspond to fields in the thread
+ * shuttle (run queue links and run queue pointer). Any change in
* the layout here will have to be matched with a change there.
- * WARNING: Cannot change this data structure without updating SIZEOF_WAITQUEUE_ELEMENT
*/
typedef struct wait_queue_element {
queue_chain_t wqe_links; /* link of elements on this queue */
+ void * wqe_type; /* Identifies link vs. thread */
wait_queue_t wqe_queue; /* queue this element is on */
- event_t wqe_event; /* event this element is waiting for */
-} *wait_queue_element_t;
+} WaitQueueElement;
+typedef WaitQueueElement *wait_queue_element_t;
/*
* wait_queue_link_t
- * Specialized wait queue element type for linking subordinate
+ * Specialized wait queue element type for linking set
* event waits queues onto a wait queue. In this way, an event
* can be constructed so that any thread waiting on any number
* of associated wait queues can handle the event, while letting
* event queues of which it is a member. An IPC event post associated
* with that port may wake up any thread from any of those portsets,
* or one that was waiting locally on the port itself.
- * WARNING: Cannot change this data structure without updating SIZEOF_WAITQUEUE_LINK
*/
-typedef struct wait_queue_link {
- struct wait_queue_element wql_element; /* element on master */
- queue_chain_t wql_sublinks; /* element on sub */
- wait_queue_sub_t wql_subqueue; /* sub queue */
+typedef struct _wait_queue_link {
+ WaitQueueElement wql_element; /* element on master */
+ queue_chain_t wql_setlinks; /* element on set */
+ queue_chain_t wql_preposts; /* element on set prepost list */
+ wait_queue_set_t wql_setqueue; /* set queue */
} WaitQueueLink;
-
-#define WAIT_QUEUE_LINK_NULL ((wait_queue_link_t)0)
-
#define wql_links wql_element.wqe_links
+#define wql_type wql_element.wqe_type
#define wql_queue wql_element.wqe_queue
-#define wql_event wql_element.wqe_event
-#define wait_queue_empty(wq) (queue_empty(&(wq)->wq_queue))
+#define _WAIT_QUEUE_inited 0xf1d0
+#define _WAIT_QUEUE_SET_inited 0xf1d1
-#define wait_queue_held(wq) (hw_lock_held(&(wq)->wq_interlock))
+#define wait_queue_is_queue(wq) \
+ ((wq)->wq_type == _WAIT_QUEUE_inited)
-#define wait_queue_is_sub(wqs) ((wqs)->wqs_wait_queue.wq_issub)
-#define wqs_lock(wqs) wait_queue_lock(&(wqs)->wqs_wait_queue)
-#define wqs_unlock(wqs) wait_queue_unlock(&(wqs)->wqs_wait_queue)
-#define wqs_lock_try(wqs) wait_queue__try_lock(&(wqs)->wqs_wait_queue)
+#define wait_queue_is_set(wqs) \
+ ((wqs)->wqs_type == _WAIT_QUEUE_SET_inited)
-extern int wait_queue_subordinate;
-#define WAIT_QUEUE_SUBORDINATE &_wait_queue_subordinate
+#define wait_queue_is_valid(wq) \
+ (((wq)->wq_type & ~1) == _WAIT_QUEUE_inited)
-extern void wait_queue_init(
- wait_queue_t wait_queue,
- int policy);
+#define wait_queue_empty(wq) (queue_empty(&(wq)->wq_queue))
-extern kern_return_t wait_queue_link(
- wait_queue_t wait_queue,
- wait_queue_sub_t subordinate_queue);
+#define wait_queue_held(wq) (hw_lock_held(&(wq)->wq_interlock))
+#define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock))
-extern kern_return_t wait_queue_unlink(
- wait_queue_t wait_queue,
- wait_queue_sub_t subordinate_queue);
-extern void wait_queue_unlink_one(
- wait_queue_t wait_queue,
- wait_queue_sub_t *subordinate_queue_pointer);
+/* For x86, the hardware timeout is in TSC units. */
+#if defined(i386) || defined(x86_64)
+#define hwLockTimeOut LockTimeOutTSC
+#else
+#define hwLockTimeOut LockTimeOut
+#endif
+/*
+ * Double the standard lock timeout, because wait queues tend
+ * to iterate over a number of threads - locking each. If there is
+ * a problem with a thread lock, it normally times out at the wait
+ * queue level first, hiding the real problem.
+ */
-extern boolean_t wait_queue_member_queue(
- wait_queue_t wait_queue,
- wait_queue_sub_t subordinate_queue);
+static inline void wait_queue_lock(wait_queue_t wq) {
+ if (__improbable(hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2) == 0)) {
+ boolean_t wql_acquired = FALSE;
-extern kern_return_t clear_wait_queue_internal(
- thread_t thread,
- int result);
+ while (machine_timeout_suspended()) {
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * i386/x86_64 return with preemption disabled on a timeout for
+ * diagnostic purposes.
+ */
+ mp_enable_preemption();
+#endif
+ if ((wql_acquired = hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2)))
+ break;
+ }
+ if (wql_acquired == FALSE)
+ panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number());
+ }
+}
+
+static inline void wait_queue_unlock(wait_queue_t wq) {
+ assert(wait_queue_held(wq));
+ hw_lock_unlock(&(wq)->wq_interlock);
+}
-extern kern_return_t wait_queue_remove(
- thread_t thread);
+#define wqs_lock(wqs) wait_queue_lock(&(wqs)->wqs_wait_queue)
+#define wqs_unlock(wqs) wait_queue_unlock(&(wqs)->wqs_wait_queue)
+#define wqs_lock_try(wqs) wait_queue__try_lock(&(wqs)->wqs_wait_queue)
+#define wqs_is_preposted(wqs) ((wqs)->wqs_prepost && !queue_empty(&(wqs)->wqs_preposts))
+
+#define wql_is_preposted(wql) ((wql)->wql_preposts.next != NULL)
+#define wql_clear_prepost(wql) ((wql)->wql_preposts.next = (wql)->wql_preposts.prev = NULL)
#define wait_queue_assert_possible(thread) \
((thread)->wait_queue == WAIT_QUEUE_NULL)
-
+/* bootstrap interface - can allocate/link wait_queues and sets after calling this */
+__private_extern__ void wait_queue_bootstrap(void);
/******** Decomposed interfaces (to build higher level constructs) ***********/
-extern void wait_queue_lock(
- wait_queue_t wait_queue);
-
-extern void wait_queue_unlock(
- wait_queue_t wait_queue);
-
-extern boolean_t wait_queue_lock_try(
- wait_queue_t wait_queue);
-
/* assert intent to wait on a locked wait queue */
-extern boolean_t wait_queue_assert_wait_locked(
+__private_extern__ wait_result_t wait_queue_assert_wait64_locked(
wait_queue_t wait_queue,
- event_t wait_event,
- int interruptible,
- boolean_t unlock);
-
-/* peek to see which thread would be chosen for a wakeup - but keep on queue */
-extern void wait_queue_peek_locked(
- wait_queue_t wait_queue,
- event_t event,
- thread_t *thread,
- wait_queue_t *found_queue);
+ event64_t wait_event,
+ wait_interrupt_t interruptible,
+ uint64_t deadline,
+ thread_t thread);
-/* peek to see which thread would be chosen for a wakeup - but keep on queue */
-extern void wait_queue_pull_thread_locked(
+/* pull a thread from its wait queue */
+__private_extern__ void wait_queue_pull_thread_locked(
wait_queue_t wait_queue,
thread_t thread,
boolean_t unlock);
/* wakeup all threads waiting for a particular event on locked queue */
-extern kern_return_t wait_queue_wakeup_one_locked(
+__private_extern__ kern_return_t wait_queue_wakeup64_all_locked(
wait_queue_t wait_queue,
- event_t wake_event,
- int result,
+ event64_t wake_event,
+ wait_result_t result,
boolean_t unlock);
/* wakeup one thread waiting for a particular event on locked queue */
-extern kern_return_t wait_queue_wakeup_one_locked(
+__private_extern__ kern_return_t wait_queue_wakeup64_one_locked(
wait_queue_t wait_queue,
- event_t wake_event,
- int result,
+ event64_t wake_event,
+ wait_result_t result,
boolean_t unlock);
-/* return the identity of a thread that is waiting for <wait_queue, event> */
-extern thread_t wait_queue_recommend_locked(
- wait_queue_t wait_queue,
- event_t wake_event);
-
/* return identity of a thread awakened for a particular <wait_queue,event> */
-extern thread_t wait_queue_wakeup_identity_locked(
+__private_extern__ thread_t wait_queue_wakeup64_identity_locked(
wait_queue_t wait_queue,
- event_t wake_event,
- int result,
+ event64_t wake_event,
+ wait_result_t result,
boolean_t unlock);
/* wakeup thread iff its still waiting for a particular event on locked queue */
-extern kern_return_t wait_queue_wakeup_thread_locked(
+__private_extern__ kern_return_t wait_queue_wakeup64_thread_locked(
wait_queue_t wait_queue,
- event_t wake_event,
+ event64_t wake_event,
thread_t thread,
- int result,
+ wait_result_t result,
boolean_t unlock);
-#endif /* MACH_KERNEL_PRIVATE */
+__private_extern__ uint32_t num_wait_queues;
+__private_extern__ struct wait_queue *wait_queues;
+/* The Jenkins "one at a time" hash.
+ * TBD: There may be some value to unrolling here,
+ * depending on the architecture.
+ */
+static inline uint32_t wq_hash(char *key)
+{
+ uint32_t hash = 0;
+ size_t i, length = sizeof(char *);
+
+ for (i = 0; i < length; i++) {
+ hash += key[i];
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash;
+}
+
+/* TBD: It should be possible to eliminate the divide here */
+#define wait_hash(event) \
+ (wq_hash((char *)&event) % (num_wait_queues))
+
+#endif /* MACH_KERNEL_PRIVATE */
+
+__BEGIN_DECLS
+
+/******** Semi-Public interfaces (not a part of a higher construct) ************/
+
+extern unsigned int wait_queue_set_size(void);
+extern unsigned int wait_queue_link_size(void);
+
+extern kern_return_t wait_queue_init(
+ wait_queue_t wait_queue,
+ int policy);
+
+extern wait_queue_set_t wait_queue_set_alloc(
+ int policy);
+
+extern kern_return_t wait_queue_set_init(
+ wait_queue_set_t set_queue,
+ int policy);
+
+extern kern_return_t wait_queue_set_free(
+ wait_queue_set_t set_queue);
+
+extern wait_queue_link_t wait_queue_link_alloc(
+ int policy);
+
+extern kern_return_t wait_queue_link_free(
+ wait_queue_link_t link_element);
+
+extern kern_return_t wait_queue_link(
+ wait_queue_t wait_queue,
+ wait_queue_set_t set_queue);
+
+extern kern_return_t wait_queue_link_noalloc(
+ wait_queue_t wait_queue,
+ wait_queue_set_t set_queue,
+ wait_queue_link_t link);
+
+extern boolean_t wait_queue_member(
+ wait_queue_t wait_queue,
+ wait_queue_set_t set_queue);
+
+extern kern_return_t wait_queue_unlink(
+ wait_queue_t wait_queue,
+ wait_queue_set_t set_queue);
+
+extern kern_return_t wait_queue_unlink_all(
+ wait_queue_t wait_queue);
+
+extern kern_return_t wait_queue_set_unlink_all(
+ wait_queue_set_t set_queue);
+
+#ifdef XNU_KERNEL_PRIVATE
+extern kern_return_t wait_queue_set_unlink_one(
+ wait_queue_set_t set_queue,
+ wait_queue_link_t link);
+
+extern wait_queue_link_t wait_queue_link_allocate(void);
+
+#endif /* XNU_KERNEL_PRIVATE */
+
+/* legacy API */
+kern_return_t wait_queue_sub_init(
+ wait_queue_set_t set_queue,
+ int policy);
+
+kern_return_t wait_queue_sub_clearrefs(
+ wait_queue_set_t wq_set);
+
+extern kern_return_t wait_subqueue_unlink_all(
+ wait_queue_set_t set_queue);
extern wait_queue_t wait_queue_alloc(
- int policy);
+ int policy);
-extern void wait_queue_free(
+extern kern_return_t wait_queue_free(
wait_queue_t wait_queue);
-/******** Standalone interfaces (not a part of a higher construct) ************/
+/* assert intent to wait on <wait_queue,event64> pair */
+extern wait_result_t wait_queue_assert_wait64(
+ wait_queue_t wait_queue,
+ event64_t wait_event,
+ wait_interrupt_t interruptible,
+ uint64_t deadline);
+
+/* wakeup the most appropriate thread waiting on <wait_queue,event64> pair */
+extern kern_return_t wait_queue_wakeup64_one(
+ wait_queue_t wait_queue,
+ event64_t wake_event,
+ wait_result_t result);
+
+/* wakeup all the threads waiting on <wait_queue,event64> pair */
+extern kern_return_t wait_queue_wakeup64_all(
+ wait_queue_t wait_queue,
+ event64_t wake_event,
+ wait_result_t result);
+
+/* wakeup a specified thread waiting iff waiting on <wait_queue,event64> pair */
+extern kern_return_t wait_queue_wakeup64_thread(
+ wait_queue_t wait_queue,
+ event64_t wake_event,
+ thread_t thread,
+ wait_result_t result);
+
+/*
+ * Compatibility Wait Queue APIs based on pointer events instead of 64bit
+ * integer events.
+ */
/* assert intent to wait on <wait_queue,event> pair */
-extern boolean_t wait_queue_assert_wait(
+extern wait_result_t wait_queue_assert_wait(
wait_queue_t wait_queue,
event_t wait_event,
- int interruptible);
+ wait_interrupt_t interruptible,
+ uint64_t deadline);
/* wakeup the most appropriate thread waiting on <wait_queue,event> pair */
-extern kern_return_t wait_queue_wakeup_one(
+extern kern_return_t wait_queue_wakeup_one(
wait_queue_t wait_queue,
event_t wake_event,
- int result);
+ wait_result_t result,
+ int priority);
/* wakeup all the threads waiting on <wait_queue,event> pair */
-extern kern_return_t wait_queue_wakeup_all(
+extern kern_return_t wait_queue_wakeup_all(
wait_queue_t wait_queue,
event_t wake_event,
- int result);
+ wait_result_t result);
/* wakeup a specified thread waiting iff waiting on <wait_queue,event> pair */
-extern kern_return_t wait_queue_wakeup_thread(
+extern kern_return_t wait_queue_wakeup_thread(
wait_queue_t wait_queue,
event_t wake_event,
thread_t thread,
- int result);
+ wait_result_t result);
+
+__END_DECLS
+
+#endif /* _KERN_WAIT_QUEUE_H_ */
-#endif /* _KERN_WAIT_QUEUE_H_ */
+#endif /* KERNEL_PRIVATE */