X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/9bccf70c0258c7cac2dcb80011b2a964d884c552..b7266188b87f3620ec3f9f717e57194a7dd989fe:/osfmk/kern/thread_call.c diff --git a/osfmk/kern/thread_call.c b/osfmk/kern/thread_call.c index b69dcdd30..92f0b642b 100644 --- a/osfmk/kern/thread_call.c +++ b/osfmk/kern/thread_call.c @@ -1,421 +1,346 @@ /* - * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc. - * All rights reserved. + * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Thread-based callout module. - * - * HISTORY - * - * 10 July 1999 (debo) - * Pulled into Mac OS X (microkernel). - * - * 3 July 1993 (debo) - * Created. + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include +#include +#include +#include #include #include #include #include +#include + +#include #include #include #include -#define internal_call_num 768 +#include -#define thread_call_thread_min 4 +decl_simple_lock_data(static,thread_call_lock) -static -thread_call_data_t - internal_call_storage[internal_call_num]; +static zone_t thread_call_zone; -decl_simple_lock_data(static,thread_call_lock) +struct thread_call_group { + queue_head_t pending_queue; + uint32_t pending_count; -static -timer_call_data_t - thread_call_delayed_timer; + queue_head_t delayed_queue; -static -queue_head_t - internal_call_free_queue, - pending_call_queue, delayed_call_queue; + timer_call_data_t delayed_timer; -static -struct wait_queue - call_thread_idle_queue; + struct wait_queue idle_wqueue; + struct wait_queue daemon_wqueue; + uint32_t idle_count, active_count; +}; -static -thread_t - activate_thread; +typedef struct thread_call_group *thread_call_group_t; -static -boolean_t - activate_thread_awake; - -static struct { - int pending_num, - pending_hiwat; - int active_num, - active_hiwat, - active_lowat; - int delayed_num, - delayed_hiwat; - int idle_thread_num; - int thread_num, - thread_hiwat, - thread_lowat; -} thread_calls; +static struct thread_call_group thread_call_group0; -static boolean_t - thread_call_initialized = FALSE; +static boolean_t thread_call_daemon_awake; -static __inline__ thread_call_t - _internal_call_allocate(void); +#define thread_call_thread_min 4 -static __inline__ void -_internal_call_release( - thread_call_t call -); +#define internal_call_count 768 -static __inline__ void -_pending_call_enqueue( - thread_call_t call -), -_pending_call_dequeue( - thread_call_t call -), -_delayed_call_enqueue( - thread_call_t call -), -_delayed_call_dequeue( - thread_call_t call -); +static thread_call_data_t internal_call_storage[internal_call_count]; +static queue_head_t thread_call_internal_queue; -static void __inline__ -_set_delayed_call_timer( - thread_call_t call -); - -static boolean_t -_remove_from_pending_queue( - thread_call_func_t func, - thread_call_param_t param0, - boolean_t remove_all -), -_remove_from_delayed_queue( - thread_call_func_t func, - thread_call_param_t param0, - boolean_t remove_all -); +static __inline__ thread_call_t _internal_call_allocate(void); -static __inline__ void - _call_thread_wake(void); +static __inline__ void _internal_call_release( + thread_call_t call); -static void - _call_thread(void), - _activate_thread(void); +static __inline__ boolean_t _pending_call_enqueue( + thread_call_t call, + thread_call_group_t group), + _delayed_call_enqueue( + thread_call_t call, + thread_call_group_t group, + uint64_t deadline), + _call_dequeue( + thread_call_t call, + thread_call_group_t group); -static void -_delayed_call_timer( - timer_call_param_t p0, - timer_call_param_t p1 -); +static __inline__ void thread_call_wake( + thread_call_group_t group); + +static __inline__ void _set_delayed_call_timer( + thread_call_t call, + thread_call_group_t group); + +static boolean_t _remove_from_pending_queue( + thread_call_func_t func, + thread_call_param_t param0, + boolean_t remove_all), + _remove_from_delayed_queue( + thread_call_func_t func, + thread_call_param_t param0, + boolean_t remove_all); + +static void thread_call_daemon( + thread_call_group_t group), + thread_call_thread( + thread_call_group_t group); + +static void thread_call_delayed_timer( + timer_call_param_t p0, + timer_call_param_t p1); #define qe(x) ((queue_entry_t)(x)) #define TC(x) ((thread_call_t)(x)) /* - * Routine: thread_call_initialize [public] - * - * Description: Initialize this module, called - * early during system initialization. - * - * Preconditions: None. + * thread_call_initialize: * - * Postconditions: None. + * Initialize this module, called + * early during system initialization. */ - void thread_call_initialize(void) { - thread_call_t call; - spl_t s; + thread_call_t call; + thread_call_group_t group = &thread_call_group0; + kern_return_t result; + thread_t thread; + int i; + spl_t s; - if (thread_call_initialized) - panic("thread_call_initialize"); + i = sizeof (thread_call_data_t); + thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call"); - simple_lock_init(&thread_call_lock, ETAP_MISC_TIMER); + simple_lock_init(&thread_call_lock, 0); s = splsched(); simple_lock(&thread_call_lock); - queue_init(&pending_call_queue); - queue_init(&delayed_call_queue); + queue_init(&group->pending_queue); + queue_init(&group->delayed_queue); + + timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group); - queue_init(&internal_call_free_queue); + wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO); + wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO); + + queue_init(&thread_call_internal_queue); for ( call = internal_call_storage; - call < &internal_call_storage[internal_call_num]; + call < &internal_call_storage[internal_call_count]; call++) { - enqueue_tail(&internal_call_free_queue, qe(call)); + enqueue_tail(&thread_call_internal_queue, qe(call)); } - timer_call_setup(&thread_call_delayed_timer, _delayed_call_timer, NULL); - - wait_queue_init(&call_thread_idle_queue, SYNC_POLICY_FIFO); - thread_calls.thread_lowat = thread_call_thread_min; - - activate_thread_awake = TRUE; - thread_call_initialized = TRUE; + thread_call_daemon_awake = TRUE; simple_unlock(&thread_call_lock); splx(s); - activate_thread = kernel_thread_with_priority( - kernel_task, MAXPRI_KERNEL - 2, - _activate_thread, TRUE, TRUE); + result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread); + if (result != KERN_SUCCESS) + panic("thread_call_initialize"); + + thread_deallocate(thread); } void thread_call_setup( thread_call_t call, thread_call_func_t func, - thread_call_param_t param0 -) + thread_call_param_t param0) { call_entry_setup(call, func, param0); } /* - * Routine: _internal_call_allocate [private, inline] - * - * Purpose: Allocate an internal callout entry. + * _internal_call_allocate: * - * Preconditions: thread_call_lock held. + * Allocate an internal callout entry. * - * Postconditions: None. + * Called with thread_call_lock held. */ - static __inline__ thread_call_t _internal_call_allocate(void) { thread_call_t call; - if (queue_empty(&internal_call_free_queue)) + if (queue_empty(&thread_call_internal_queue)) panic("_internal_call_allocate"); - call = TC(dequeue_head(&internal_call_free_queue)); + call = TC(dequeue_head(&thread_call_internal_queue)); return (call); } /* - * Routine: _internal_call_release [private, inline] + * _internal_call_release: * - * Purpose: Release an internal callout entry which - * is no longer pending (or delayed). + * Release an internal callout entry which + * is no longer pending (or delayed). * - * Preconditions: thread_call_lock held. - * - * Postconditions: None. + * Called with thread_call_lock held. */ - -static __inline__ -void +static __inline__ void _internal_call_release( - thread_call_t call -) + thread_call_t call) { if ( call >= internal_call_storage && - call < &internal_call_storage[internal_call_num] ) - enqueue_tail(&internal_call_free_queue, qe(call)); + call < &internal_call_storage[internal_call_count] ) + enqueue_head(&thread_call_internal_queue, qe(call)); } /* - * Routine: _pending_call_enqueue [private, inline] + * _pending_call_enqueue: * - * Purpose: Place an entry at the end of the - * pending queue, to be executed soon. + * Place an entry at the end of the + * pending queue, to be executed soon. * - * Preconditions: thread_call_lock held. + * Returns TRUE if the entry was already + * on a queue. * - * Postconditions: None. + * Called with thread_call_lock held. */ - -static __inline__ -void +static __inline__ boolean_t _pending_call_enqueue( - thread_call_t call -) + thread_call_t call, + thread_call_group_t group) { - enqueue_tail(&pending_call_queue, qe(call)); - if (++thread_calls.pending_num > thread_calls.pending_hiwat) - thread_calls.pending_hiwat = thread_calls.pending_num; + queue_t old_queue; - call->state = PENDING; -} + old_queue = call_entry_enqueue_tail(call, &group->pending_queue); -/* - * Routine: _pending_call_dequeue [private, inline] - * - * Purpose: Remove an entry from the pending queue, - * effectively unscheduling it. - * - * Preconditions: thread_call_lock held. - * - * Postconditions: None. - */ + group->pending_count++; -static __inline__ -void -_pending_call_dequeue( - thread_call_t call -) -{ - (void)remque(qe(call)); - thread_calls.pending_num--; - - call->state = IDLE; + return (old_queue != NULL); } /* - * Routine: _delayed_call_enqueue [private, inline] + * _delayed_call_enqueue: * - * Purpose: Place an entry on the delayed queue, - * after existing entries with an earlier - * (or identical) deadline. + * Place an entry on the delayed queue, + * after existing entries with an earlier + * (or identical) deadline. * - * Preconditions: thread_call_lock held. + * Returns TRUE if the entry was already + * on a queue. * - * Postconditions: None. + * Called with thread_call_lock held. */ - -static __inline__ -void +static __inline__ boolean_t _delayed_call_enqueue( - thread_call_t call -) + thread_call_t call, + thread_call_group_t group, + uint64_t deadline) { - thread_call_t current; - - current = TC(queue_first(&delayed_call_queue)); - - while (TRUE) { - if ( queue_end(&delayed_call_queue, qe(current)) || - call->deadline < current->deadline ) { - current = TC(queue_prev(qe(current))); - break; - } - - current = TC(queue_next(qe(current))); - } + queue_t old_queue; - insque(qe(call), qe(current)); - if (++thread_calls.delayed_num > thread_calls.delayed_hiwat) - thread_calls.delayed_hiwat = thread_calls.delayed_num; - - call->state = DELAYED; + old_queue = call_entry_enqueue_deadline(call, &group->delayed_queue, deadline); + + if (old_queue == &group->pending_queue) + group->pending_count--; + + return (old_queue != NULL); } /* - * Routine: _delayed_call_dequeue [private, inline] + * _call_dequeue: * - * Purpose: Remove an entry from the delayed queue, - * effectively unscheduling it. + * Remove an entry from a queue. * - * Preconditions: thread_call_lock held. + * Returns TRUE if the entry was on a queue. * - * Postconditions: None. + * Called with thread_call_lock held. */ - -static __inline__ -void -_delayed_call_dequeue( - thread_call_t call -) +static __inline__ boolean_t +_call_dequeue( + thread_call_t call, + thread_call_group_t group) { - (void)remque(qe(call)); - thread_calls.delayed_num--; - - call->state = IDLE; + queue_t old_queue; + + old_queue = call_entry_dequeue(call); + + if (old_queue == &group->pending_queue) + group->pending_count--; + + return (old_queue != NULL); } /* - * Routine: _set_delayed_call_timer [private] + * _set_delayed_call_timer: * - * Purpose: Reset the timer so that it - * next expires when the entry is due. + * Reset the timer so that it + * next expires when the entry is due. * - * Preconditions: thread_call_lock held. - * - * Postconditions: None. + * Called with thread_call_lock held. */ - static __inline__ void _set_delayed_call_timer( - thread_call_t call -) + thread_call_t call, + thread_call_group_t group) { - timer_call_enter(&thread_call_delayed_timer, call->deadline); + timer_call_enter(&group->delayed_timer, call->deadline); } /* - * Routine: _remove_from_pending_queue [private] + * _remove_from_pending_queue: * - * Purpose: Remove the first (or all) matching - * entries from the pending queue, - * effectively unscheduling them. - * Returns whether any matching entries - * were found. + * Remove the first (or all) matching + * entries from the pending queue. * - * Preconditions: thread_call_lock held. + * Returns TRUE if any matching entries + * were found. * - * Postconditions: None. + * Called with thread_call_lock held. */ - -static -boolean_t +static boolean_t _remove_from_pending_queue( thread_call_func_t func, thread_call_param_t param0, - boolean_t remove_all -) + boolean_t remove_all) { - boolean_t call_removed = FALSE; - thread_call_t call; + boolean_t call_removed = FALSE; + thread_call_t call; + thread_call_group_t group = &thread_call_group0; - call = TC(queue_first(&pending_call_queue)); + call = TC(queue_first(&group->pending_queue)); - while (!queue_end(&pending_call_queue, qe(call))) { + while (!queue_end(&group->pending_queue, qe(call))) { if ( call->func == func && call->param0 == param0 ) { thread_call_t next = TC(queue_next(qe(call))); - _pending_call_dequeue(call); + _call_dequeue(call, group); _internal_call_release(call); @@ -433,38 +358,34 @@ _remove_from_pending_queue( } /* - * Routine: _remove_from_delayed_queue [private] + * _remove_from_delayed_queue: * - * Purpose: Remove the first (or all) matching - * entries from the delayed queue, - * effectively unscheduling them. - * Returns whether any matching entries - * were found. + * Remove the first (or all) matching + * entries from the delayed queue. * - * Preconditions: thread_call_lock held. + * Returns TRUE if any matching entries + * were found. * - * Postconditions: None. + * Called with thread_call_lock held. */ - -static -boolean_t +static boolean_t _remove_from_delayed_queue( thread_call_func_t func, thread_call_param_t param0, - boolean_t remove_all -) + boolean_t remove_all) { - boolean_t call_removed = FALSE; - thread_call_t call; + boolean_t call_removed = FALSE; + thread_call_t call; + thread_call_group_t group = &thread_call_group0; - call = TC(queue_first(&delayed_call_queue)); + call = TC(queue_first(&group->delayed_queue)); - while (!queue_end(&delayed_call_queue, qe(call))) { + while (!queue_end(&group->delayed_queue, qe(call))) { if ( call->func == func && call->param0 == param0 ) { thread_call_t next = TC(queue_next(qe(call))); - _delayed_call_dequeue(call); + _call_dequeue(call, group); _internal_call_release(call); @@ -481,38 +402,32 @@ _remove_from_delayed_queue( return (call_removed); } +#ifndef __LP64__ + /* - * Routine: thread_call_func [public] + * thread_call_func: * - * Purpose: Schedule a function callout. - * Guarantees { function, argument } - * uniqueness if unique_call is TRUE. + * Enqueue a function callout. * - * Preconditions: Callable from an interrupt context - * below splsched. - * - * Postconditions: None. + * Guarantees { function, argument } + * uniqueness if unique_call is TRUE. */ - void thread_call_func( thread_call_func_t func, thread_call_param_t param, - boolean_t unique_call -) + boolean_t unique_call) { - thread_call_t call; - int s; + thread_call_t call; + thread_call_group_t group = &thread_call_group0; + spl_t s; - if (!thread_call_initialized) - panic("thread_call_func"); - s = splsched(); simple_lock(&thread_call_lock); - call = TC(queue_first(&pending_call_queue)); + call = TC(queue_first(&group->pending_queue)); - while (unique_call && !queue_end(&pending_call_queue, qe(call))) { + while (unique_call && !queue_end(&group->pending_queue, qe(call))) { if ( call->func == func && call->param0 == param ) { break; @@ -521,47 +436,40 @@ thread_call_func( call = TC(queue_next(qe(call))); } - if (!unique_call || queue_end(&pending_call_queue, qe(call))) { + if (!unique_call || queue_end(&group->pending_queue, qe(call))) { call = _internal_call_allocate(); call->func = func; call->param0 = param; - call->param1 = 0; + call->param1 = NULL; - _pending_call_enqueue(call); + _pending_call_enqueue(call, group); - if (thread_calls.active_num <= 0) - _call_thread_wake(); + if (group->active_count == 0) + thread_call_wake(group); } simple_unlock(&thread_call_lock); splx(s); } +#endif /* __LP64__ */ + /* - * Routine: thread_call_func_delayed [public] - * - * Purpose: Schedule a function callout to - * occur at the stated time. + * thread_call_func_delayed: * - * Preconditions: Callable from an interrupt context - * below splsched. - * - * Postconditions: None. + * Enqueue a function callout to + * occur at the stated time. */ - void thread_call_func_delayed( thread_call_func_t func, thread_call_param_t param, - uint64_t deadline -) + uint64_t deadline) { - thread_call_t call; - int s; + thread_call_t call; + thread_call_group_t group = &thread_call_group0; + spl_t s; - if (!thread_call_initialized) - panic("thread_call_func_delayed"); - s = splsched(); simple_lock(&thread_call_lock); @@ -569,44 +477,36 @@ thread_call_func_delayed( call->func = func; call->param0 = param; call->param1 = 0; - call->deadline = deadline; - _delayed_call_enqueue(call); + _delayed_call_enqueue(call, group, deadline); - if (queue_first(&delayed_call_queue) == qe(call)) - _set_delayed_call_timer(call); + if (queue_first(&group->delayed_queue) == qe(call)) + _set_delayed_call_timer(call, group); simple_unlock(&thread_call_lock); splx(s); } /* - * Routine: thread_call_func_cancel [public] + * thread_call_func_cancel: * - * Purpose: Unschedule a function callout. - * Removes one (or all) - * { function, argument } - * instance(s) from either (or both) - * the pending and the delayed queue, - * in that order. Returns a boolean - * indicating whether any calls were - * cancelled. + * Dequeue a function callout. * - * Preconditions: Callable from an interrupt context - * below splsched. + * Removes one (or all) { function, argument } + * instance(s) from either (or both) + * the pending and the delayed queue, + * in that order. * - * Postconditions: None. + * Returns TRUE if any calls were cancelled. */ - boolean_t thread_call_func_cancel( thread_call_func_t func, thread_call_param_t param, - boolean_t cancel_all -) + boolean_t cancel_all) { boolean_t result; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -625,53 +525,37 @@ thread_call_func_cancel( } /* - * Routine: thread_call_allocate [public] - * - * Purpose: Allocate an external callout - * entry. + * thread_call_allocate: * - * Preconditions: None. - * - * Postconditions: None. + * Allocate a callout entry. */ - thread_call_t thread_call_allocate( thread_call_func_t func, - thread_call_param_t param0 -) + thread_call_param_t param0) { - thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t)); - - call->func = func; - call->param0 = param0; - call->state = IDLE; - + thread_call_t call = zalloc(thread_call_zone); + + call_entry_setup(call, func, param0); + return (call); } /* - * Routine: thread_call_free [public] + * thread_call_free: * - * Purpose: Free an external callout - * entry. - * - * Preconditions: None. - * - * Postconditions: None. + * Free a callout entry. */ - boolean_t thread_call_free( - thread_call_t call -) + thread_call_t call) { - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); - if (call->state != IDLE) { + if (call->queue != NULL) { simple_unlock(&thread_call_lock); splx(s); @@ -681,46 +565,35 @@ thread_call_free( simple_unlock(&thread_call_lock); splx(s); - kfree((vm_offset_t)call, sizeof (thread_call_data_t)); + zfree(thread_call_zone, call); return (TRUE); } /* - * Routine: thread_call_enter [public] - * - * Purpose: Schedule an external callout - * entry to occur "soon". Returns a - * boolean indicating whether the call - * had been already scheduled. + * thread_call_enter: * - * Preconditions: Callable from an interrupt context - * below splsched. + * Enqueue a callout entry to occur "soon". * - * Postconditions: None. + * Returns TRUE if the call was + * already on a queue. */ - boolean_t thread_call_enter( - thread_call_t call -) + thread_call_t call) { - boolean_t result = TRUE; - int s; + boolean_t result = TRUE; + thread_call_group_t group = &thread_call_group0; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); - if (call->state != PENDING) { - if (call->state == DELAYED) - _delayed_call_dequeue(call); - else if (call->state == IDLE) - result = FALSE; - - _pending_call_enqueue(call); + if (call->queue != &group->pending_queue) { + result = _pending_call_enqueue(call, group); - if (thread_calls.active_num <= 0) - _call_thread_wake(); + if (group->active_count == 0) + thread_call_wake(group); } call->param1 = 0; @@ -734,26 +607,21 @@ thread_call_enter( boolean_t thread_call_enter1( thread_call_t call, - thread_call_param_t param1 -) + thread_call_param_t param1) { - boolean_t result = TRUE; - int s; + boolean_t result = TRUE; + thread_call_group_t group = &thread_call_group0; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); - if (call->state != PENDING) { - if (call->state == DELAYED) - _delayed_call_dequeue(call); - else if (call->state == IDLE) - result = FALSE; - - _pending_call_enqueue(call); - - if (thread_calls.active_num <= 0) - _call_thread_wake(); - } + if (call->queue != &group->pending_queue) { + result = _pending_call_enqueue(call, group); + + if (group->active_count == 0) + thread_call_wake(group); + } call->param1 = param1; @@ -764,45 +632,32 @@ thread_call_enter1( } /* - * Routine: thread_call_enter_delayed [public] - * - * Purpose: Schedule an external callout - * entry to occur at the stated time. - * Returns a boolean indicating whether - * the call had been already scheduled. + * thread_call_enter_delayed: * - * Preconditions: Callable from an interrupt context - * below splsched. + * Enqueue a callout entry to occur + * at the stated time. * - * Postconditions: None. + * Returns TRUE if the call was + * already on a queue. */ - boolean_t thread_call_enter_delayed( thread_call_t call, - uint64_t deadline -) + uint64_t deadline) { - boolean_t result = TRUE; - int s; + boolean_t result = TRUE; + thread_call_group_t group = &thread_call_group0; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); - if (call->state == PENDING) - _pending_call_dequeue(call); - else if (call->state == DELAYED) - _delayed_call_dequeue(call); - else if (call->state == IDLE) - result = FALSE; - - call->param1 = 0; - call->deadline = deadline; + result = _delayed_call_enqueue(call, group, deadline); - _delayed_call_enqueue(call); + if (queue_first(&group->delayed_queue) == qe(call)) + _set_delayed_call_timer(call, group); - if (queue_first(&delayed_call_queue) == qe(call)) - _set_delayed_call_timer(call); + call->param1 = 0; simple_unlock(&thread_call_lock); splx(s); @@ -814,29 +669,21 @@ boolean_t thread_call_enter1_delayed( thread_call_t call, thread_call_param_t param1, - uint64_t deadline -) + uint64_t deadline) { - boolean_t result = TRUE; - int s; + boolean_t result = TRUE; + thread_call_group_t group = &thread_call_group0; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); - if (call->state == PENDING) - _pending_call_dequeue(call); - else if (call->state == DELAYED) - _delayed_call_dequeue(call); - else if (call->state == IDLE) - result = FALSE; + result = _delayed_call_enqueue(call, group, deadline); - call->param1 = param1; - call->deadline = deadline; + if (queue_first(&group->delayed_queue) == qe(call)) + _set_delayed_call_timer(call, group); - _delayed_call_enqueue(call); - - if (queue_first(&delayed_call_queue) == qe(call)) - _set_delayed_call_timer(call); + call->param1 = param1; simple_unlock(&thread_call_lock); splx(s); @@ -845,36 +692,25 @@ thread_call_enter1_delayed( } /* - * Routine: thread_call_cancel [public] - * - * Purpose: Unschedule a callout entry. - * Returns a boolean indicating - * whether the call had actually - * been scheduled. + * thread_call_cancel: * - * Preconditions: Callable from an interrupt context - * below splsched. + * Dequeue a callout entry. * - * Postconditions: None. + * Returns TRUE if the call was + * on a queue. */ - boolean_t thread_call_cancel( - thread_call_t call -) + thread_call_t call) { - boolean_t result = TRUE; - int s; + boolean_t result; + thread_call_group_t group = &thread_call_group0; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); - - if (call->state == PENDING) - _pending_call_dequeue(call); - else if (call->state == DELAYED) - _delayed_call_dequeue(call); - else - result = FALSE; + + result = _call_dequeue(call, group); simple_unlock(&thread_call_lock); splx(s); @@ -882,32 +718,29 @@ thread_call_cancel( return (result); } +#ifndef __LP64__ + /* - * Routine: thread_call_is_delayed [public] - * - * Purpose: Returns a boolean indicating - * whether a call is currently scheduled - * to occur at a later time. Optionally - * returns the expiration time. + * thread_call_is_delayed: * - * Preconditions: Callable from an interrupt context - * below splsched. + * Returns TRUE if the call is + * currently on a delayed queue. * - * Postconditions: None. + * Optionally returns the expiration time. */ - boolean_t thread_call_is_delayed( thread_call_t call, uint64_t *deadline) { - boolean_t result = FALSE; - int s; + boolean_t result = FALSE; + thread_call_group_t group = &thread_call_group0; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); - if (call->state == DELAYED) { + if (call->queue == &group->delayed_queue) { if (deadline != NULL) *deadline = call->deadline; result = TRUE; @@ -919,272 +752,213 @@ thread_call_is_delayed( return (result); } +#endif /* __LP64__ */ + /* - * Routine: _call_thread_wake [private, inline] - * - * Purpose: Wake a callout thread to service - * pending callout entries. May wake - * the activate thread in order to - * create additional callout threads. + * thread_call_wake: * - * Preconditions: thread_call_lock held. + * Wake a call thread to service + * pending call entries. May wake + * the daemon thread in order to + * create additional call threads. * - * Postconditions: None. + * Called with thread_call_lock held. */ - -static __inline__ -void -_call_thread_wake(void) +static __inline__ void +thread_call_wake( + thread_call_group_t group) { - if (wait_queue_wakeup_one( - &call_thread_idle_queue, &call_thread_idle_queue, - THREAD_AWAKENED) == KERN_SUCCESS) { - thread_calls.idle_thread_num--; - - if (++thread_calls.active_num > thread_calls.active_hiwat) - thread_calls.active_hiwat = thread_calls.active_num; + if (group->idle_count > 0 && wait_queue_wakeup_one(&group->idle_wqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) { + group->idle_count--; group->active_count++; } else - if (!activate_thread_awake) { - clear_wait(activate_thread, THREAD_AWAKENED); - activate_thread_awake = TRUE; + if (!thread_call_daemon_awake) { + thread_call_daemon_awake = TRUE; + wait_queue_wakeup_one(&group->daemon_wqueue, NULL, THREAD_AWAKENED); } } /* - * Routine: call_thread_block [private] - * - * Purpose: Hook via thread dispatch on - * the occasion of a callout blocking. + * sched_call_thread: * - * Preconditions: splsched. - * - * Postconditions: None. + * Call out invoked by the scheduler. */ - -void -call_thread_block(void) +static void +sched_call_thread( + int type, +__unused thread_t thread) { - simple_lock(&thread_call_lock); - - if (--thread_calls.active_num < thread_calls.active_lowat) - thread_calls.active_lowat = thread_calls.active_num; + thread_call_group_t group = &thread_call_group0; - if ( thread_calls.active_num <= 0 && - thread_calls.pending_num > 0 ) - _call_thread_wake(); - - simple_unlock(&thread_call_lock); -} + simple_lock(&thread_call_lock); -/* - * Routine: call_thread_unblock [private] - * - * Purpose: Hook via thread wakeup on - * the occasion of a callout unblocking. - * - * Preconditions: splsched. - * - * Postconditions: None. - */ + switch (type) { -void -call_thread_unblock(void) -{ - simple_lock(&thread_call_lock); + case SCHED_CALL_BLOCK: + if (--group->active_count == 0 && group->pending_count > 0) + thread_call_wake(group); + break; - if (++thread_calls.active_num > thread_calls.active_hiwat) - thread_calls.active_hiwat = thread_calls.active_num; + case SCHED_CALL_UNBLOCK: + group->active_count++; + break; + } simple_unlock(&thread_call_lock); } /* - * Routine: _call_thread [private] - * - * Purpose: Executed by a callout thread. - * - * Preconditions: None. - * - * Postconditions: None. + * thread_call_thread: */ - -static -void -_call_thread_continue(void) +static void +thread_call_thread( + thread_call_group_t group) { thread_t self = current_thread(); (void) splsched(); simple_lock(&thread_call_lock); - self->active_callout = TRUE; + thread_sched_call(self, sched_call_thread); - while (thread_calls.pending_num > 0) { + while (group->pending_count > 0) { thread_call_t call; thread_call_func_t func; thread_call_param_t param0, param1; - call = TC(dequeue_head(&pending_call_queue)); - thread_calls.pending_num--; + call = TC(dequeue_head(&group->pending_queue)); + group->pending_count--; func = call->func; param0 = call->param0; param1 = call->param1; - call->state = IDLE; + call->queue = NULL; _internal_call_release(call); simple_unlock(&thread_call_lock); (void) spllo(); + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE, + func, param0, param1, 0, 0); + (*func)(param0, param1); - (void)thread_funnel_set(self->funnel_lock, FALSE); + (void)thread_funnel_set(self->funnel_lock, FALSE); /* XXX */ (void) splsched(); simple_lock(&thread_call_lock); } - self->active_callout = FALSE; + thread_sched_call(self, NULL); + group->active_count--; - if (--thread_calls.active_num < thread_calls.active_lowat) - thread_calls.active_lowat = thread_calls.active_num; - - if (thread_calls.idle_thread_num < thread_calls.thread_lowat) { - thread_calls.idle_thread_num++; + if (group->idle_count < thread_call_thread_min) { + group->idle_count++; - wait_queue_assert_wait( - &call_thread_idle_queue, &call_thread_idle_queue, - THREAD_INTERRUPTIBLE); + wait_queue_assert_wait(&group->idle_wqueue, NULL, THREAD_UNINT, 0); simple_unlock(&thread_call_lock); (void) spllo(); - thread_block(_call_thread_continue); + thread_block_parameter((thread_continue_t)thread_call_thread, group); /* NOTREACHED */ } - - thread_calls.thread_num--; - + simple_unlock(&thread_call_lock); (void) spllo(); - (void) thread_terminate(self->top_act); + thread_terminate(self); /* NOTREACHED */ } -static -void -_call_thread(void) -{ - thread_t self = current_thread(); - - stack_privilege(self); - - _call_thread_continue(); - /* NOTREACHED */ -} - /* - * Routine: _activate_thread [private] - * - * Purpose: Executed by the activate thread. - * - * Preconditions: None. - * - * Postconditions: Never terminates. + * thread_call_daemon: */ - -static -void -_activate_thread_continue(void) +static void +thread_call_daemon_continue( + thread_call_group_t group) { + kern_return_t result; + thread_t thread; + (void) splsched(); simple_lock(&thread_call_lock); - while ( thread_calls.active_num <= 0 && - thread_calls.pending_num > 0 ) { - - if (++thread_calls.active_num > thread_calls.active_hiwat) - thread_calls.active_hiwat = thread_calls.active_num; - - if (++thread_calls.thread_num > thread_calls.thread_hiwat) - thread_calls.thread_hiwat = thread_calls.thread_num; + while (group->active_count == 0 && group->pending_count > 0) { + group->active_count++; simple_unlock(&thread_call_lock); (void) spllo(); - (void) kernel_thread_with_priority( - kernel_task, MAXPRI_KERNEL - 1, - _call_thread, TRUE, TRUE); + result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, BASEPRI_PREEMPT, &thread); + if (result != KERN_SUCCESS) + panic("thread_call_daemon"); + + thread_deallocate(thread); + (void) splsched(); simple_lock(&thread_call_lock); } - - assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE); - activate_thread_awake = FALSE; + + thread_call_daemon_awake = FALSE; + wait_queue_assert_wait(&group->daemon_wqueue, NULL, THREAD_UNINT, 0); simple_unlock(&thread_call_lock); (void) spllo(); - thread_block(_activate_thread_continue); + thread_block_parameter((thread_continue_t)thread_call_daemon_continue, group); /* NOTREACHED */ } -static -void -_activate_thread(void) +static void +thread_call_daemon( + thread_call_group_t group) { - thread_t self = current_thread(); + thread_t self = current_thread(); - self->vm_privilege = TRUE; + self->options |= TH_OPT_VMPRIV; vm_page_free_reserve(2); /* XXX */ - stack_privilege(self); - _activate_thread_continue(); + thread_call_daemon_continue(group); /* NOTREACHED */ } -static -void -_delayed_call_timer( - timer_call_param_t p0, - timer_call_param_t p1 +static void +thread_call_delayed_timer( + timer_call_param_t p0, + __unused timer_call_param_t p1 ) { - uint64_t timestamp; - thread_call_t call; - boolean_t new_pending = FALSE; - int s; + thread_call_t call; + thread_call_group_t group = p0; + boolean_t new_pending = FALSE; + uint64_t timestamp; - s = splsched(); simple_lock(&thread_call_lock); - clock_get_uptime(×tamp); + timestamp = mach_absolute_time(); - call = TC(queue_first(&delayed_call_queue)); + call = TC(queue_first(&group->delayed_queue)); - while (!queue_end(&delayed_call_queue, qe(call))) { + while (!queue_end(&group->delayed_queue, qe(call))) { if (call->deadline <= timestamp) { - _delayed_call_dequeue(call); - - _pending_call_enqueue(call); + _pending_call_enqueue(call, group); new_pending = TRUE; } else break; - call = TC(queue_first(&delayed_call_queue)); + call = TC(queue_first(&group->delayed_queue)); } - if (!queue_end(&delayed_call_queue, qe(call))) - _set_delayed_call_timer(call); + if (!queue_end(&group->delayed_queue, qe(call))) + _set_delayed_call_timer(call, group); - if (new_pending && thread_calls.active_num <= 0) - _call_thread_wake(); + if (new_pending && group->active_count == 0) + thread_call_wake(group); simple_unlock(&thread_call_lock); - splx(s); }