X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/55e303ae13a4cf49d70f2294092726f2fffb9ef2..4a3eedf9ecc9bbe3f3a5c6ce5e53ad199d639d32:/osfmk/kern/wait_queue.c diff --git a/osfmk/kern/wait_queue.c b/osfmk/kern/wait_queue.c index 48203732a..187ac8b13 100644 --- a/osfmk/kern/wait_queue.c +++ b/osfmk/kern/wait_queue.c @@ -1,16 +1,19 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ @@ -72,6 +75,18 @@ #include +/* forward declarations */ +static boolean_t wait_queue_member_locked( + wait_queue_t wq, + wait_queue_set_t wq_set); + +void wait_queue_unlink_one( + wait_queue_t wq, + wait_queue_set_t *wq_setp); + +kern_return_t wait_queue_set_unlink_all_nofree( + wait_queue_set_t wq_set); + /* * Routine: wait_queue_init * Purpose: @@ -117,7 +132,7 @@ wait_queue_alloc( if (wq != WAIT_QUEUE_NULL) { ret = wait_queue_init(wq, policy); if (ret != KERN_SUCCESS) { - kfree((vm_offset_t)wq, sizeof(struct wait_queue)); + kfree(wq, sizeof(struct wait_queue)); wq = WAIT_QUEUE_NULL; } } @@ -139,7 +154,7 @@ wait_queue_free( return KERN_INVALID_ARGUMENT; if (!queue_empty(&wq->wq_queue)) return KERN_FAILURE; - kfree((vm_offset_t)wq, sizeof(struct wait_queue)); + kfree(wq, sizeof(struct wait_queue)); return KERN_SUCCESS; } @@ -172,7 +187,7 @@ wait_queue_set_init( return KERN_SUCCESS; } -/* legacy API */ + kern_return_t wait_queue_sub_init( wait_queue_set_t wqset, @@ -181,6 +196,19 @@ wait_queue_sub_init( return wait_queue_set_init(wqset, policy); } +kern_return_t +wait_queue_sub_clearrefs( + wait_queue_set_t wq_set) +{ + if (!wait_queue_is_set(wq_set)) + return KERN_INVALID_ARGUMENT; + + wqs_lock(wq_set); + wq_set->wqs_refcount = 0; + wqs_unlock(wq_set); + return KERN_SUCCESS; +} + /* * Routine: wait_queue_set_alloc * Purpose: @@ -204,7 +232,7 @@ wait_queue_set_alloc( ret = wait_queue_set_init(wq_set, policy); if (ret != KERN_SUCCESS) { - kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set)); + kfree(wq_set, sizeof(struct wait_queue_set)); wq_set = WAIT_QUEUE_SET_NULL; } } @@ -228,22 +256,10 @@ wait_queue_set_free( if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue)) return KERN_FAILURE; - kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set)); + kfree(wq_set, sizeof(struct wait_queue_set)); return KERN_SUCCESS; } -kern_return_t -wait_queue_sub_clearrefs( - wait_queue_set_t wq_set) -{ - if (!wait_queue_is_set(wq_set)) - return KERN_INVALID_ARGUMENT; - - wqs_lock(wq_set); - wq_set->wqs_refcount = 0; - wqs_unlock(wq_set); - return KERN_SUCCESS; -} /* * @@ -325,7 +341,7 @@ MACRO_END * The wait queue is locked * The set queue is just that, a set queue */ -__private_extern__ boolean_t +static boolean_t wait_queue_member_locked( wait_queue_t wq, wait_queue_set_t wq_set) @@ -462,13 +478,13 @@ wait_queue_link( wait_queue_link_t wql; kern_return_t ret; - wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link)); + wql = (wait_queue_link_t) kalloc(sizeof(struct _wait_queue_link)); if (wql == WAIT_QUEUE_LINK_NULL) return KERN_RESOURCE_SHORTAGE; ret = wait_queue_link_noalloc(wq, wq_set, wql); if (ret != KERN_SUCCESS) - kfree((vm_offset_t)wql, sizeof(struct wait_queue_link)); + kfree(wql, sizeof(struct _wait_queue_link)); return ret; } @@ -535,7 +551,7 @@ wait_queue_unlink( wqs_unlock(wq_set); wait_queue_unlock(wq); splx(s); - kfree((vm_offset_t)wql, sizeof(struct wait_queue_link)); + kfree(wql, sizeof(struct _wait_queue_link)); return KERN_SUCCESS; } } @@ -655,7 +671,7 @@ wait_queue_unlink_all( while(!queue_empty(links)) { wql = (wait_queue_link_t) dequeue(links); - kfree((vm_offset_t) wql, sizeof(struct wait_queue_link)); + kfree(wql, sizeof(struct _wait_queue_link)); } return(KERN_SUCCESS); @@ -678,7 +694,6 @@ wait_queue_set_unlink_all_nofree( wait_queue_link_t wql; wait_queue_t wq; queue_t q; - kern_return_t kret; spl_t s; if (!wait_queue_is_set(wq_set)) { @@ -738,7 +753,6 @@ wait_queue_set_unlink_all( queue_t q; queue_head_t links_queue_head; queue_t links = &links_queue_head; - kern_return_t kret; spl_t s; if (!wait_queue_is_set(wq_set)) { @@ -774,7 +788,7 @@ retry: while (!queue_empty (links)) { wql = (wait_queue_link_t) dequeue(links); - kfree((vm_offset_t)wql, sizeof(struct wait_queue_link)); + kfree(wql, sizeof(struct _wait_queue_link)); } return(KERN_SUCCESS); } @@ -813,7 +827,7 @@ wait_queue_unlink_one( wqs_unlock(wq_set); wait_queue_unlock(wq); splx(s); - kfree((vm_offset_t)wql,sizeof(struct wait_queue_link)); + kfree(wql,sizeof(struct _wait_queue_link)); *wq_setp = wq_set; return; } @@ -843,6 +857,7 @@ wait_queue_assert_wait64_locked( wait_queue_t wq, event64_t event, wait_interrupt_t interruptible, + uint64_t deadline, thread_t thread) { wait_result_t wait_result; @@ -865,12 +880,19 @@ wait_queue_assert_wait64_locked( */ wait_result = thread_mark_wait_locked(thread, interruptible); if (wait_result == THREAD_WAITING) { - if (thread->vm_privilege) + if (thread->options & TH_OPT_VMPRIV) enqueue_head(&wq->wq_queue, (queue_entry_t) thread); else enqueue_tail(&wq->wq_queue, (queue_entry_t) thread); + thread->wait_event = event; thread->wait_queue = wq; + + if (deadline != 0) { + if (!timer_call_enter(&thread->wait_timer, deadline)) + thread->wait_timer_active++; + thread->wait_timer_is_set = TRUE; + } } return(wait_result); } @@ -888,25 +910,23 @@ wait_result_t wait_queue_assert_wait( wait_queue_t wq, event_t event, - wait_interrupt_t interruptible) + wait_interrupt_t interruptible, + uint64_t deadline) { spl_t s; wait_result_t ret; - thread_t cur_thread = current_thread(); + thread_t thread = current_thread(); /* If it is an invalid wait queue, you can't wait on it */ - if (!wait_queue_is_valid(wq)) { - thread_t thread = current_thread(); + if (!wait_queue_is_valid(wq)) return (thread->wait_result = THREAD_RESTART); - } s = splsched(); wait_queue_lock(wq); - thread_lock(cur_thread); - ret = wait_queue_assert_wait64_locked( - wq, (event64_t)((uint32_t)event), - interruptible, cur_thread); - thread_unlock(cur_thread); + thread_lock(thread); + ret = wait_queue_assert_wait64_locked(wq, (event64_t)((uint32_t)event), + interruptible, deadline, thread); + thread_unlock(thread); wait_queue_unlock(wq); splx(s); return(ret); @@ -924,29 +944,27 @@ wait_result_t wait_queue_assert_wait64( wait_queue_t wq, event64_t event, - wait_interrupt_t interruptible) + wait_interrupt_t interruptible, + uint64_t deadline) { spl_t s; wait_result_t ret; - thread_t cur_thread = current_thread(); + thread_t thread = current_thread(); /* If it is an invalid wait queue, you cant wait on it */ - if (!wait_queue_is_valid(wq)) { - thread_t thread = current_thread(); + if (!wait_queue_is_valid(wq)) return (thread->wait_result = THREAD_RESTART); - } s = splsched(); wait_queue_lock(wq); - thread_lock(cur_thread); - ret = wait_queue_assert_wait64_locked(wq, event, interruptible, cur_thread); - thread_unlock(cur_thread); + thread_lock(thread); + ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread); + thread_unlock(thread); wait_queue_unlock(wq); splx(s); return(ret); } - /* * Routine: _wait_queue_select64_all * Purpose: @@ -1047,7 +1065,11 @@ wait_queue_wakeup64_all_locked( queue_t q = &wake_queue_head; kern_return_t res; - assert(wait_queue_held(wq)); +// assert(wait_queue_held(wq)); +// if(!wq->wq_interlock.lock_data) { /* (BRINGUP */ +// panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq); /* (BRINGUP) */ +// } + queue_init(q); /* @@ -1065,7 +1087,7 @@ wait_queue_wakeup64_all_locked( res = KERN_NOT_WAITING; while (!queue_empty (q)) { thread_t thread = (thread_t) dequeue(q); - res = thread_go_locked(thread, result); + res = thread_go(thread, result); assert(res == KERN_SUCCESS); thread_unlock(thread); } @@ -1099,6 +1121,9 @@ wait_queue_wakeup_all( s = splsched(); wait_queue_lock(wq); +// if(!wq->wq_interlock.lock_data) { /* (BRINGUP */ +// panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq); /* (BRINGUP) */ +// } ret = wait_queue_wakeup64_all_locked( wq, (event64_t)((uint32_t)event), result, TRUE); @@ -1199,8 +1224,7 @@ _wait_queue_select64_one( * the event we are posting to this queue, pull * it off the queue and stick it in out wake_queue. */ - thread_t t = (thread_t)wq_element; - + t = (thread_t)wq_element; if (t->wait_event == event) { thread_lock(t); remqueue(q, (queue_entry_t) t); @@ -1209,101 +1233,20 @@ _wait_queue_select64_one( t->at_safe_point = FALSE; return t; /* still locked */ } + + t = THREAD_NULL; } wq_element = wqe_next; } return THREAD_NULL; } -/* - * Routine: wait_queue_peek64_locked - * Purpose: - * Select the best thread from a wait queue that meet the - * supplied criteria, but leave it on the queue it was - * found on. The thread, and the actual wait_queue the - * thread was found on are identified. - * Conditions: - * at splsched - * wait queue locked - * possibly recursive - * Returns: - * a locked thread - if one found - * a locked waitq - the one the thread was found on - * Note: - * Both the waitq the thread was actually found on, and - * the supplied wait queue, are locked after this. - */ -__private_extern__ void -wait_queue_peek64_locked( - wait_queue_t wq, - event64_t event, - thread_t *tp, - wait_queue_t *wqp) -{ - wait_queue_element_t wq_element; - wait_queue_element_t wqe_next; - thread_t t; - queue_t q; - - assert(wq->wq_fifo); - - *tp = THREAD_NULL; - - q = &wq->wq_queue; - - wq_element = (wait_queue_element_t) queue_first(q); - while (!queue_end(q, (queue_entry_t)wq_element)) { - WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); - wqe_next = (wait_queue_element_t) - queue_next((queue_t) wq_element); - - /* - * We may have to recurse if this is a compound wait queue. - */ - if (wq_element->wqe_type == WAIT_QUEUE_LINK) { - wait_queue_link_t wql = (wait_queue_link_t)wq_element; - wait_queue_t set_queue; - - /* - * We have to check the set wait queue. - */ - set_queue = (wait_queue_t)wql->wql_setqueue; - wait_queue_lock(set_queue); - if (! wait_queue_empty(set_queue)) { - wait_queue_peek64_locked(set_queue, event, tp, wqp); - } - if (*tp != THREAD_NULL) { - if (*wqp != set_queue) - wait_queue_unlock(set_queue); - return; /* thread and its waitq locked */ - } - - wait_queue_unlock(set_queue); - } else { - - /* - * Otherwise, its a thread. If it is waiting on - * the event we are posting to this queue, return - * it locked, but leave it on the queue. - */ - thread_t t = (thread_t)wq_element; - - if (t->wait_event == event) { - thread_lock(t); - *tp = t; - *wqp = wq; - return; - } - } - wq_element = wqe_next; - } -} /* * Routine: wait_queue_pull_thread_locked * Purpose: - * Pull a thread that was previously "peeked" off the wait - * queue and (possibly) unlock the waitq. + * Pull a thread off its wait queue and (possibly) unlock + * the waitq. * Conditions: * at splsched * wait queue locked @@ -1422,13 +1365,12 @@ wait_queue_wakeup64_identity_locked( assert(wait_queue_held(wq)); - thread = _wait_queue_select64_one(wq, event); if (unlock) wait_queue_unlock(wq); if (thread) { - res = thread_go_locked(thread, result); + res = thread_go(thread, result); assert(res == KERN_SUCCESS); } return thread; /* still locked if not NULL */ @@ -1467,7 +1409,7 @@ wait_queue_wakeup64_one_locked( if (thread) { kern_return_t res; - res = thread_go_locked(thread, result); + res = thread_go(thread, result); assert(res == KERN_SUCCESS); thread_unlock(thread); return res; @@ -1508,7 +1450,7 @@ wait_queue_wakeup_one( if (thread) { kern_return_t res; - res = thread_go_locked(thread, result); + res = thread_go(thread, result); assert(res == KERN_SUCCESS); thread_unlock(thread); splx(s); @@ -1550,7 +1492,7 @@ wait_queue_wakeup64_one( if (thread) { kern_return_t res; - res = thread_go_locked(thread, result); + res = thread_go(thread, result); assert(res == KERN_SUCCESS); thread_unlock(thread); splx(s); @@ -1604,7 +1546,7 @@ wait_queue_wakeup64_thread_locked( if (res != KERN_SUCCESS) return KERN_NOT_WAITING; - res = thread_go_locked(thread, result); + res = thread_go(thread, result); assert(res == KERN_SUCCESS); thread_unlock(thread); return res; @@ -1649,7 +1591,7 @@ wait_queue_wakeup_thread( wait_queue_unlock(wq); if (res == KERN_SUCCESS) { - res = thread_go_locked(thread, result); + res = thread_go(thread, result); assert(res == KERN_SUCCESS); thread_unlock(thread); splx(s); @@ -1698,7 +1640,7 @@ wait_queue_wakeup64_thread( wait_queue_unlock(wq); if (res == KERN_SUCCESS) { - res = thread_go_locked(thread, result); + res = thread_go(thread, result); assert(res == KERN_SUCCESS); thread_unlock(thread); splx(s);