/*
- * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_FREE_COPYRIGHT@
#include <kern/wait_queue.h>
+/* forward declarations */
+static boolean_t wait_queue_member_locked(
+ wait_queue_t wq,
+ wait_queue_set_t wq_set);
+
+void wait_queue_unlink_one(
+ wait_queue_t wq,
+ wait_queue_set_t *wq_setp);
+
+kern_return_t wait_queue_set_unlink_all_nofree(
+ wait_queue_set_t wq_set);
+
/*
* Routine: wait_queue_init
* Purpose:
if (wq != WAIT_QUEUE_NULL) {
ret = wait_queue_init(wq, policy);
if (ret != KERN_SUCCESS) {
- kfree((vm_offset_t)wq, sizeof(struct wait_queue));
+ kfree(wq, sizeof(struct wait_queue));
wq = WAIT_QUEUE_NULL;
}
}
return KERN_INVALID_ARGUMENT;
if (!queue_empty(&wq->wq_queue))
return KERN_FAILURE;
- kfree((vm_offset_t)wq, sizeof(struct wait_queue));
+ kfree(wq, sizeof(struct wait_queue));
return KERN_SUCCESS;
}
return KERN_SUCCESS;
}
-/* legacy API */
+
kern_return_t
wait_queue_sub_init(
wait_queue_set_t wqset,
return wait_queue_set_init(wqset, policy);
}
+kern_return_t
+wait_queue_sub_clearrefs(
+ wait_queue_set_t wq_set)
+{
+ if (!wait_queue_is_set(wq_set))
+ return KERN_INVALID_ARGUMENT;
+
+ wqs_lock(wq_set);
+ wq_set->wqs_refcount = 0;
+ wqs_unlock(wq_set);
+ return KERN_SUCCESS;
+}
+
/*
* Routine: wait_queue_set_alloc
* Purpose:
ret = wait_queue_set_init(wq_set, policy);
if (ret != KERN_SUCCESS) {
- kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
+ kfree(wq_set, sizeof(struct wait_queue_set));
wq_set = WAIT_QUEUE_SET_NULL;
}
}
if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
return KERN_FAILURE;
- kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
+ kfree(wq_set, sizeof(struct wait_queue_set));
return KERN_SUCCESS;
}
-kern_return_t
-wait_queue_sub_clearrefs(
- wait_queue_set_t wq_set)
-{
- if (!wait_queue_is_set(wq_set))
- return KERN_INVALID_ARGUMENT;
-
- wqs_lock(wq_set);
- wq_set->wqs_refcount = 0;
- wqs_unlock(wq_set);
- return KERN_SUCCESS;
-}
/*
*
* The wait queue is locked
* The set queue is just that, a set queue
*/
-__private_extern__ boolean_t
+static boolean_t
wait_queue_member_locked(
wait_queue_t wq,
wait_queue_set_t wq_set)
wait_queue_link_t wql;
kern_return_t ret;
- wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
+ wql = (wait_queue_link_t) kalloc(sizeof(struct _wait_queue_link));
if (wql == WAIT_QUEUE_LINK_NULL)
return KERN_RESOURCE_SHORTAGE;
ret = wait_queue_link_noalloc(wq, wq_set, wql);
if (ret != KERN_SUCCESS)
- kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
+ kfree(wql, sizeof(struct _wait_queue_link));
return ret;
}
wqs_unlock(wq_set);
wait_queue_unlock(wq);
splx(s);
- kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
+ kfree(wql, sizeof(struct _wait_queue_link));
return KERN_SUCCESS;
}
}
while(!queue_empty(links)) {
wql = (wait_queue_link_t) dequeue(links);
- kfree((vm_offset_t) wql, sizeof(struct wait_queue_link));
+ kfree(wql, sizeof(struct _wait_queue_link));
}
return(KERN_SUCCESS);
wait_queue_link_t wql;
wait_queue_t wq;
queue_t q;
- kern_return_t kret;
spl_t s;
if (!wait_queue_is_set(wq_set)) {
queue_t q;
queue_head_t links_queue_head;
queue_t links = &links_queue_head;
- kern_return_t kret;
spl_t s;
if (!wait_queue_is_set(wq_set)) {
while (!queue_empty (links)) {
wql = (wait_queue_link_t) dequeue(links);
- kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
+ kfree(wql, sizeof(struct _wait_queue_link));
}
return(KERN_SUCCESS);
}
wqs_unlock(wq_set);
wait_queue_unlock(wq);
splx(s);
- kfree((vm_offset_t)wql,sizeof(struct wait_queue_link));
+ kfree(wql,sizeof(struct _wait_queue_link));
*wq_setp = wq_set;
return;
}
wait_queue_t wq,
event64_t event,
wait_interrupt_t interruptible,
+ uint64_t deadline,
thread_t thread)
{
wait_result_t wait_result;
*/
wait_result = thread_mark_wait_locked(thread, interruptible);
if (wait_result == THREAD_WAITING) {
- if (thread->vm_privilege)
+ if (thread->options & TH_OPT_VMPRIV)
enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
else
enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
+
thread->wait_event = event;
thread->wait_queue = wq;
+
+ if (deadline != 0) {
+ if (!timer_call_enter(&thread->wait_timer, deadline))
+ thread->wait_timer_active++;
+ thread->wait_timer_is_set = TRUE;
+ }
}
return(wait_result);
}
wait_queue_assert_wait(
wait_queue_t wq,
event_t event,
- wait_interrupt_t interruptible)
+ wait_interrupt_t interruptible,
+ uint64_t deadline)
{
spl_t s;
wait_result_t ret;
- thread_t cur_thread = current_thread();
+ thread_t thread = current_thread();
/* If it is an invalid wait queue, you can't wait on it */
- if (!wait_queue_is_valid(wq)) {
- thread_t thread = current_thread();
+ if (!wait_queue_is_valid(wq))
return (thread->wait_result = THREAD_RESTART);
- }
s = splsched();
wait_queue_lock(wq);
- thread_lock(cur_thread);
- ret = wait_queue_assert_wait64_locked(
- wq, (event64_t)((uint32_t)event),
- interruptible, cur_thread);
- thread_unlock(cur_thread);
+ thread_lock(thread);
+ ret = wait_queue_assert_wait64_locked(wq, (event64_t)((uint32_t)event),
+ interruptible, deadline, thread);
+ thread_unlock(thread);
wait_queue_unlock(wq);
splx(s);
return(ret);
wait_queue_assert_wait64(
wait_queue_t wq,
event64_t event,
- wait_interrupt_t interruptible)
+ wait_interrupt_t interruptible,
+ uint64_t deadline)
{
spl_t s;
wait_result_t ret;
- thread_t cur_thread = current_thread();
+ thread_t thread = current_thread();
/* If it is an invalid wait queue, you cant wait on it */
- if (!wait_queue_is_valid(wq)) {
- thread_t thread = current_thread();
+ if (!wait_queue_is_valid(wq))
return (thread->wait_result = THREAD_RESTART);
- }
s = splsched();
wait_queue_lock(wq);
- thread_lock(cur_thread);
- ret = wait_queue_assert_wait64_locked(wq, event, interruptible, cur_thread);
- thread_unlock(cur_thread);
+ thread_lock(thread);
+ ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread);
+ thread_unlock(thread);
wait_queue_unlock(wq);
splx(s);
return(ret);
}
-
/*
* Routine: _wait_queue_select64_all
* Purpose:
queue_t q = &wake_queue_head;
kern_return_t res;
- assert(wait_queue_held(wq));
+// assert(wait_queue_held(wq));
+// if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
+// panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq); /* (BRINGUP) */
+// }
+
queue_init(q);
/*
res = KERN_NOT_WAITING;
while (!queue_empty (q)) {
thread_t thread = (thread_t) dequeue(q);
- res = thread_go_locked(thread, result);
+ res = thread_go(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
}
s = splsched();
wait_queue_lock(wq);
+// if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
+// panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq); /* (BRINGUP) */
+// }
ret = wait_queue_wakeup64_all_locked(
wq, (event64_t)((uint32_t)event),
result, TRUE);
* the event we are posting to this queue, pull
* it off the queue and stick it in out wake_queue.
*/
- thread_t t = (thread_t)wq_element;
-
+ t = (thread_t)wq_element;
if (t->wait_event == event) {
thread_lock(t);
remqueue(q, (queue_entry_t) t);
t->at_safe_point = FALSE;
return t; /* still locked */
}
+
+ t = THREAD_NULL;
}
wq_element = wqe_next;
}
return THREAD_NULL;
}
-/*
- * Routine: wait_queue_peek64_locked
- * Purpose:
- * Select the best thread from a wait queue that meet the
- * supplied criteria, but leave it on the queue it was
- * found on. The thread, and the actual wait_queue the
- * thread was found on are identified.
- * Conditions:
- * at splsched
- * wait queue locked
- * possibly recursive
- * Returns:
- * a locked thread - if one found
- * a locked waitq - the one the thread was found on
- * Note:
- * Both the waitq the thread was actually found on, and
- * the supplied wait queue, are locked after this.
- */
-__private_extern__ void
-wait_queue_peek64_locked(
- wait_queue_t wq,
- event64_t event,
- thread_t *tp,
- wait_queue_t *wqp)
-{
- wait_queue_element_t wq_element;
- wait_queue_element_t wqe_next;
- thread_t t;
- queue_t q;
-
- assert(wq->wq_fifo);
-
- *tp = THREAD_NULL;
-
- q = &wq->wq_queue;
-
- wq_element = (wait_queue_element_t) queue_first(q);
- while (!queue_end(q, (queue_entry_t)wq_element)) {
- WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
- wqe_next = (wait_queue_element_t)
- queue_next((queue_t) wq_element);
-
- /*
- * We may have to recurse if this is a compound wait queue.
- */
- if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
- wait_queue_link_t wql = (wait_queue_link_t)wq_element;
- wait_queue_t set_queue;
-
- /*
- * We have to check the set wait queue.
- */
- set_queue = (wait_queue_t)wql->wql_setqueue;
- wait_queue_lock(set_queue);
- if (! wait_queue_empty(set_queue)) {
- wait_queue_peek64_locked(set_queue, event, tp, wqp);
- }
- if (*tp != THREAD_NULL) {
- if (*wqp != set_queue)
- wait_queue_unlock(set_queue);
- return; /* thread and its waitq locked */
- }
-
- wait_queue_unlock(set_queue);
- } else {
-
- /*
- * Otherwise, its a thread. If it is waiting on
- * the event we are posting to this queue, return
- * it locked, but leave it on the queue.
- */
- thread_t t = (thread_t)wq_element;
-
- if (t->wait_event == event) {
- thread_lock(t);
- *tp = t;
- *wqp = wq;
- return;
- }
- }
- wq_element = wqe_next;
- }
-}
/*
* Routine: wait_queue_pull_thread_locked
* Purpose:
- * Pull a thread that was previously "peeked" off the wait
- * queue and (possibly) unlock the waitq.
+ * Pull a thread off its wait queue and (possibly) unlock
+ * the waitq.
* Conditions:
* at splsched
* wait queue locked
assert(wait_queue_held(wq));
-
thread = _wait_queue_select64_one(wq, event);
if (unlock)
wait_queue_unlock(wq);
if (thread) {
- res = thread_go_locked(thread, result);
+ res = thread_go(thread, result);
assert(res == KERN_SUCCESS);
}
return thread; /* still locked if not NULL */
if (thread) {
kern_return_t res;
- res = thread_go_locked(thread, result);
+ res = thread_go(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
return res;
if (thread) {
kern_return_t res;
- res = thread_go_locked(thread, result);
+ res = thread_go(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
splx(s);
if (thread) {
kern_return_t res;
- res = thread_go_locked(thread, result);
+ res = thread_go(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
splx(s);
if (res != KERN_SUCCESS)
return KERN_NOT_WAITING;
- res = thread_go_locked(thread, result);
+ res = thread_go(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
return res;
wait_queue_unlock(wq);
if (res == KERN_SUCCESS) {
- res = thread_go_locked(thread, result);
+ res = thread_go(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
splx(s);
wait_queue_unlock(wq);
if (res == KERN_SUCCESS) {
- res = thread_go_locked(thread, result);
+ res = thread_go(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
splx(s);