X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..21362eb3e66fd2c787aee132bce100a44d71a99c:/osfmk/kern/thread_call.c?ds=sidebyside diff --git a/osfmk/kern/thread_call.c b/osfmk/kern/thread_call.c index 693c80084..c5f1f44be 100644 --- a/osfmk/kern/thread_call.c +++ b/osfmk/kern/thread_call.c @@ -1,49 +1,52 @@ /* - * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc. + * Copyright (c) 1993-1995, 1999-2005 Apple Computer, Inc. * All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Thread-based callout module. - * - * HISTORY - * - * 10 July 1999 (debo) - * Pulled into Mac OS X (microkernel). - * - * 3 July 1993 (debo) - * Created. + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include +#include +#include +#include #include #include #include #include +#include + +#include #include #include #include +#include + #define internal_call_num 768 #define thread_call_thread_min 4 @@ -56,20 +59,16 @@ decl_simple_lock_data(static,thread_call_lock) static timer_call_data_t - thread_call_delayed_timers[NCPUS]; - -static -queue_head_t - internal_call_free_queue, - pending_call_queue, delayed_call_queue; + thread_call_delaytimer; static queue_head_t - idle_thread_queue; + thread_call_xxx_queue, + thread_call_pending_queue, thread_call_delayed_queue; static -thread_t - activate_thread; +struct wait_queue + call_thread_waitqueue; static boolean_t @@ -79,17 +78,15 @@ static struct { int pending_num, pending_hiwat; int active_num, - active_hiwat; + active_hiwat, + active_lowat; int delayed_num, delayed_hiwat; int idle_thread_num; int thread_num, thread_hiwat, thread_lowat; -} thread_calls; - -static boolean_t - thread_call_initialized = FALSE; +} thread_call_vars; static __inline__ thread_call_t _internal_call_allocate(void); @@ -113,7 +110,7 @@ _delayed_call_dequeue( thread_call_t call ); -static void __inline__ +static __inline__ void _set_delayed_call_timer( thread_call_t call ); @@ -160,46 +157,43 @@ _delayed_call_timer( void thread_call_initialize(void) { - thread_call_t call; - spl_t s; - int i; - - if (thread_call_initialized) - panic("thread_call_initialize"); + kern_return_t result; + thread_t thread; + thread_call_t call; + spl_t s; - simple_lock_init(&thread_call_lock, ETAP_MISC_TIMER); + simple_lock_init(&thread_call_lock, 0); s = splsched(); simple_lock(&thread_call_lock); - queue_init(&pending_call_queue); - queue_init(&delayed_call_queue); + queue_init(&thread_call_pending_queue); + queue_init(&thread_call_delayed_queue); - queue_init(&internal_call_free_queue); + queue_init(&thread_call_xxx_queue); for ( call = internal_call_storage; call < &internal_call_storage[internal_call_num]; call++) { - enqueue_tail(&internal_call_free_queue, qe(call)); + enqueue_tail(&thread_call_xxx_queue, qe(call)); } - for (i = 0; i < NCPUS; i++) { - timer_call_setup(&thread_call_delayed_timers[i], - _delayed_call_timer, NULL); - } + timer_call_setup(&thread_call_delaytimer, _delayed_call_timer, NULL); - queue_init(&idle_thread_queue); - thread_calls.thread_lowat = thread_call_thread_min; + wait_queue_init(&call_thread_waitqueue, SYNC_POLICY_FIFO); + thread_call_vars.thread_lowat = thread_call_thread_min; activate_thread_awake = TRUE; - thread_call_initialized = TRUE; simple_unlock(&thread_call_lock); splx(s); - activate_thread = kernel_thread_with_priority(kernel_task, - MAXPRI_KERNBAND-2, _activate_thread, TRUE); + result = kernel_thread_start_priority((thread_continue_t)_activate_thread, NULL, MAXPRI_KERNEL - 2, &thread); + if (result != KERN_SUCCESS) + panic("thread_call_initialize"); + + thread_deallocate(thread); } void @@ -227,10 +221,10 @@ _internal_call_allocate(void) { thread_call_t call; - if (queue_empty(&internal_call_free_queue)) + if (queue_empty(&thread_call_xxx_queue)) panic("_internal_call_allocate"); - call = TC(dequeue_head(&internal_call_free_queue)); + call = TC(dequeue_head(&thread_call_xxx_queue)); return (call); } @@ -254,7 +248,7 @@ _internal_call_release( { if ( call >= internal_call_storage && call < &internal_call_storage[internal_call_num] ) - enqueue_tail(&internal_call_free_queue, qe(call)); + enqueue_head(&thread_call_xxx_queue, qe(call)); } /* @@ -274,9 +268,9 @@ _pending_call_enqueue( thread_call_t call ) { - enqueue_tail(&pending_call_queue, qe(call)); - if (++thread_calls.pending_num > thread_calls.pending_hiwat) - thread_calls.pending_hiwat = thread_calls.pending_num; + enqueue_tail(&thread_call_pending_queue, qe(call)); + if (++thread_call_vars.pending_num > thread_call_vars.pending_hiwat) + thread_call_vars.pending_hiwat = thread_call_vars.pending_num; call->state = PENDING; } @@ -299,7 +293,7 @@ _pending_call_dequeue( ) { (void)remque(qe(call)); - thread_calls.pending_num--; + thread_call_vars.pending_num--; call->state = IDLE; } @@ -324,12 +318,11 @@ _delayed_call_enqueue( { thread_call_t current; - current = TC(queue_first(&delayed_call_queue)); + current = TC(queue_first(&thread_call_delayed_queue)); while (TRUE) { - if ( queue_end(&delayed_call_queue, qe(current)) || - CMP_ABSOLUTETIME(&call->deadline, - ¤t->deadline) < 0 ) { + if ( queue_end(&thread_call_delayed_queue, qe(current)) || + call->deadline < current->deadline ) { current = TC(queue_prev(qe(current))); break; } @@ -338,8 +331,8 @@ _delayed_call_enqueue( } insque(qe(call), qe(current)); - if (++thread_calls.delayed_num > thread_calls.delayed_hiwat) - thread_calls.delayed_hiwat = thread_calls.delayed_num; + if (++thread_call_vars.delayed_num > thread_call_vars.delayed_hiwat) + thread_call_vars.delayed_hiwat = thread_call_vars.delayed_num; call->state = DELAYED; } @@ -362,7 +355,7 @@ _delayed_call_dequeue( ) { (void)remque(qe(call)); - thread_calls.delayed_num--; + thread_call_vars.delayed_num--; call->state = IDLE; } @@ -383,9 +376,7 @@ _set_delayed_call_timer( thread_call_t call ) { - timer_call_t timer = &thread_call_delayed_timers[cpu_number()]; - - timer_call_enter(timer, call->deadline); + timer_call_enter(&thread_call_delaytimer, call->deadline); } /* @@ -413,9 +404,9 @@ _remove_from_pending_queue( boolean_t call_removed = FALSE; thread_call_t call; - call = TC(queue_first(&pending_call_queue)); + call = TC(queue_first(&thread_call_pending_queue)); - while (!queue_end(&pending_call_queue, qe(call))) { + while (!queue_end(&thread_call_pending_queue, qe(call))) { if ( call->func == func && call->param0 == param0 ) { thread_call_t next = TC(queue_next(qe(call))); @@ -462,9 +453,9 @@ _remove_from_delayed_queue( boolean_t call_removed = FALSE; thread_call_t call; - call = TC(queue_first(&delayed_call_queue)); + call = TC(queue_first(&thread_call_delayed_queue)); - while (!queue_end(&delayed_call_queue, qe(call))) { + while (!queue_end(&thread_call_delayed_queue, qe(call))) { if ( call->func == func && call->param0 == param0 ) { thread_call_t next = TC(queue_next(qe(call))); @@ -507,17 +498,14 @@ thread_call_func( ) { thread_call_t call; - int s; + spl_t s; - if (!thread_call_initialized) - panic("thread_call_func"); - s = splsched(); simple_lock(&thread_call_lock); - call = TC(queue_first(&pending_call_queue)); + call = TC(queue_first(&thread_call_pending_queue)); - while (unique_call && !queue_end(&pending_call_queue, qe(call))) { + while (unique_call && !queue_end(&thread_call_pending_queue, qe(call))) { if ( call->func == func && call->param0 == param ) { break; @@ -526,7 +514,7 @@ thread_call_func( call = TC(queue_next(qe(call))); } - if (!unique_call || queue_end(&pending_call_queue, qe(call))) { + if (!unique_call || queue_end(&thread_call_pending_queue, qe(call))) { call = _internal_call_allocate(); call->func = func; call->param0 = param; @@ -534,7 +522,8 @@ thread_call_func( _pending_call_enqueue(call); - _call_thread_wake(); + if (thread_call_vars.active_num <= 0) + _call_thread_wake(); } simple_unlock(&thread_call_lock); @@ -557,15 +546,12 @@ void thread_call_func_delayed( thread_call_func_t func, thread_call_param_t param, - AbsoluteTime deadline + uint64_t deadline ) { thread_call_t call; - int s; + spl_t s; - if (!thread_call_initialized) - panic("thread_call_func_delayed"); - s = splsched(); simple_lock(&thread_call_lock); @@ -577,7 +563,7 @@ thread_call_func_delayed( _delayed_call_enqueue(call); - if (queue_first(&delayed_call_queue) == qe(call)) + if (queue_first(&thread_call_delayed_queue) == qe(call)) _set_delayed_call_timer(call); simple_unlock(&thread_call_lock); @@ -610,7 +596,7 @@ thread_call_func_cancel( ) { boolean_t result; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -670,7 +656,7 @@ thread_call_free( thread_call_t call ) { - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -685,7 +671,7 @@ thread_call_free( simple_unlock(&thread_call_lock); splx(s); - kfree((vm_offset_t)call, sizeof (thread_call_data_t)); + kfree(call, sizeof (thread_call_data_t)); return (TRUE); } @@ -710,7 +696,7 @@ thread_call_enter( ) { boolean_t result = TRUE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -722,8 +708,9 @@ thread_call_enter( result = FALSE; _pending_call_enqueue(call); - - _call_thread_wake(); + + if (thread_call_vars.active_num <= 0) + _call_thread_wake(); } call->param1 = 0; @@ -741,7 +728,7 @@ thread_call_enter1( ) { boolean_t result = TRUE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -754,7 +741,8 @@ thread_call_enter1( _pending_call_enqueue(call); - _call_thread_wake(); + if (thread_call_vars.active_num <= 0) + _call_thread_wake(); } call->param1 = param1; @@ -782,11 +770,11 @@ thread_call_enter1( boolean_t thread_call_enter_delayed( thread_call_t call, - AbsoluteTime deadline + uint64_t deadline ) { boolean_t result = TRUE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -803,7 +791,7 @@ thread_call_enter_delayed( _delayed_call_enqueue(call); - if (queue_first(&delayed_call_queue) == qe(call)) + if (queue_first(&thread_call_delayed_queue) == qe(call)) _set_delayed_call_timer(call); simple_unlock(&thread_call_lock); @@ -816,11 +804,11 @@ boolean_t thread_call_enter1_delayed( thread_call_t call, thread_call_param_t param1, - AbsoluteTime deadline + uint64_t deadline ) { boolean_t result = TRUE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -837,7 +825,7 @@ thread_call_enter1_delayed( _delayed_call_enqueue(call); - if (queue_first(&delayed_call_queue) == qe(call)) + if (queue_first(&thread_call_delayed_queue) == qe(call)) _set_delayed_call_timer(call); simple_unlock(&thread_call_lock); @@ -866,7 +854,7 @@ thread_call_cancel( ) { boolean_t result = TRUE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -901,10 +889,10 @@ thread_call_cancel( boolean_t thread_call_is_delayed( thread_call_t call, - AbsoluteTime *deadline) + uint64_t *deadline) { boolean_t result = FALSE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -922,11 +910,11 @@ thread_call_is_delayed( } /* - * Routine: _call_thread_wake [private] + * Routine: _call_thread_wake [private, inline] * * Purpose: Wake a callout thread to service - * newly pending callout entries. May wake - * the activate thread to either wake or + * pending callout entries. May wake + * the activate thread in order to * create additional callout threads. * * Preconditions: thread_call_lock held. @@ -938,30 +926,66 @@ static __inline__ void _call_thread_wake(void) { - thread_t thread_to_wake; + if (wait_queue_wakeup_one(&call_thread_waitqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) { + thread_call_vars.idle_thread_num--; - if (!queue_empty(&idle_thread_queue)) { - queue_remove_first( - &idle_thread_queue, thread_to_wake, thread_t, wait_link); - clear_wait(thread_to_wake, THREAD_AWAKENED); - thread_calls.idle_thread_num--; + if (++thread_call_vars.active_num > thread_call_vars.active_hiwat) + thread_call_vars.active_hiwat = thread_call_vars.active_num; } else - thread_to_wake = THREAD_NULL; - - if (!activate_thread_awake && - (thread_to_wake == THREAD_NULL || thread_calls.thread_num < - (thread_calls.active_num + thread_calls.pending_num))) { - clear_wait(activate_thread, THREAD_AWAKENED); + if (!activate_thread_awake) { + thread_wakeup_one(&activate_thread_awake); activate_thread_awake = TRUE; } } -#if defined (__i386__) -#define NO_CONTINUATIONS (1) -#else -#define NO_CONTINUATIONS (0) -#endif +/* + * Routine: call_thread_block [private] + * + * Purpose: Hook via thread dispatch on + * the occasion of a callout blocking. + * + * Preconditions: splsched. + * + * Postconditions: None. + */ + +void +call_thread_block(void) +{ + simple_lock(&thread_call_lock); + + if (--thread_call_vars.active_num < thread_call_vars.active_lowat) + thread_call_vars.active_lowat = thread_call_vars.active_num; + + if ( thread_call_vars.active_num <= 0 && + thread_call_vars.pending_num > 0 ) + _call_thread_wake(); + + simple_unlock(&thread_call_lock); +} + +/* + * Routine: call_thread_unblock [private] + * + * Purpose: Hook via thread wakeup on + * the occasion of a callout unblocking. + * + * Preconditions: splsched. + * + * Postconditions: None. + */ + +void +call_thread_unblock(void) +{ + simple_lock(&thread_call_lock); + + if (++thread_call_vars.active_num > thread_call_vars.active_hiwat) + thread_call_vars.active_hiwat = thread_call_vars.active_num; + + simple_unlock(&thread_call_lock); +} /* * Routine: _call_thread [private] @@ -979,19 +1003,18 @@ _call_thread_continue(void) { thread_t self = current_thread(); -#if NO_CONTINUATIONS - loop: -#endif (void) splsched(); simple_lock(&thread_call_lock); - while (thread_calls.pending_num > 0) { + self->options |= TH_OPT_CALLOUT; + + while (thread_call_vars.pending_num > 0) { thread_call_t call; thread_call_func_t func; thread_call_param_t param0, param1; - call = TC(dequeue_head(&pending_call_queue)); - thread_calls.pending_num--; + call = TC(dequeue_head(&thread_call_pending_queue)); + thread_call_vars.pending_num--; func = call->func; param0 = call->param0; @@ -1001,50 +1024,44 @@ _call_thread_continue(void) _internal_call_release(call); - if (++thread_calls.active_num > thread_calls.active_hiwat) - thread_calls.active_hiwat = thread_calls.active_num; - - if (thread_calls.pending_num > 0) - _call_thread_wake(); - simple_unlock(&thread_call_lock); (void) spllo(); + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE, + (int)func, (int)param0, (int)param1, 0, 0); + (*func)(param0, param1); (void)thread_funnel_set(self->funnel_lock, FALSE); (void) splsched(); simple_lock(&thread_call_lock); - - thread_calls.active_num--; } + + self->options &= ~TH_OPT_CALLOUT; + + if (--thread_call_vars.active_num < thread_call_vars.active_lowat) + thread_call_vars.active_lowat = thread_call_vars.active_num; - if ((thread_calls.thread_num - thread_calls.active_num) <= - thread_calls.thread_lowat) { - queue_enter(&idle_thread_queue, self, thread_t, wait_link); - thread_calls.idle_thread_num++; + if (thread_call_vars.idle_thread_num < thread_call_vars.thread_lowat) { + thread_call_vars.idle_thread_num++; - assert_wait(&idle_thread_queue, THREAD_INTERRUPTIBLE); + wait_queue_assert_wait(&call_thread_waitqueue, NULL, THREAD_UNINT, 0); simple_unlock(&thread_call_lock); (void) spllo(); -#if NO_CONTINUATIONS - thread_block((void (*)(void)) 0); - goto loop; -#else - thread_block(_call_thread_continue); -#endif + thread_block((thread_continue_t)_call_thread_continue); /* NOTREACHED */ } - thread_calls.thread_num--; + thread_call_vars.thread_num--; simple_unlock(&thread_call_lock); (void) spllo(); - (void) thread_terminate(self->top_act); + thread_terminate(self); /* NOTREACHED */ } @@ -1052,10 +1069,6 @@ static void _call_thread(void) { - thread_t self = current_thread(); - - stack_privilege(self); - _call_thread_continue(); /* NOTREACHED */ } @@ -1074,45 +1087,33 @@ static void _activate_thread_continue(void) { -#if NO_CONTINUATIONS - loop: -#endif + kern_return_t result; + thread_t thread; + (void) splsched(); simple_lock(&thread_call_lock); - if (thread_calls.thread_num < - (thread_calls.active_num + thread_calls.pending_num)) { + while ( thread_call_vars.active_num <= 0 && + thread_call_vars.pending_num > 0 ) { + + if (++thread_call_vars.active_num > thread_call_vars.active_hiwat) + thread_call_vars.active_hiwat = thread_call_vars.active_num; - if (++thread_calls.thread_num > thread_calls.thread_hiwat) - thread_calls.thread_hiwat = thread_calls.thread_num; + if (++thread_call_vars.thread_num > thread_call_vars.thread_hiwat) + thread_call_vars.thread_hiwat = thread_call_vars.thread_num; simple_unlock(&thread_call_lock); (void) spllo(); - (void) kernel_thread_with_priority(kernel_task, - MAXPRI_KERNBAND-1, _call_thread, TRUE); -#if NO_CONTINUATIONS - thread_block((void (*)(void)) 0); - goto loop; -#else - thread_block(_activate_thread_continue); -#endif - /* NOTREACHED */ - } - else if (thread_calls.pending_num > 0) { - _call_thread_wake(); + result = kernel_thread_start_priority((thread_continue_t)_call_thread, NULL, MAXPRI_KERNEL - 1, &thread); + if (result != KERN_SUCCESS) + panic("activate_thread"); - simple_unlock(&thread_call_lock); - (void) spllo(); + thread_deallocate(thread); -#if NO_CONTINUATIONS - thread_block((void (*)(void)) 0); - goto loop; -#else - thread_block(_activate_thread_continue); -#endif - /* NOTREACHED */ - } + (void) splsched(); + simple_lock(&thread_call_lock); + } assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE); activate_thread_awake = FALSE; @@ -1120,12 +1121,7 @@ _activate_thread_continue(void) simple_unlock(&thread_call_lock); (void) spllo(); -#if NO_CONTINUATIONS - thread_block((void (*)(void)) 0); - goto loop; -#else - thread_block(_activate_thread_continue); -#endif + thread_block((thread_continue_t)_activate_thread_continue); /* NOTREACHED */ } @@ -1133,11 +1129,10 @@ static void _activate_thread(void) { - thread_t self = current_thread(); + thread_t self = current_thread(); - self->vm_privilege = TRUE; + self->options |= TH_OPT_VMPRIV; vm_page_free_reserve(2); /* XXX */ - stack_privilege(self); _activate_thread_continue(); /* NOTREACHED */ @@ -1146,24 +1141,24 @@ _activate_thread(void) static void _delayed_call_timer( - timer_call_param_t p0, - timer_call_param_t p1 + __unused timer_call_param_t p0, + __unused timer_call_param_t p1 ) { - AbsoluteTime timestamp; + uint64_t timestamp; thread_call_t call; boolean_t new_pending = FALSE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); clock_get_uptime(×tamp); - call = TC(queue_first(&delayed_call_queue)); + call = TC(queue_first(&thread_call_delayed_queue)); - while (!queue_end(&delayed_call_queue, qe(call))) { - if (CMP_ABSOLUTETIME(&call->deadline, ×tamp) <= 0) { + while (!queue_end(&thread_call_delayed_queue, qe(call))) { + if (call->deadline <= timestamp) { _delayed_call_dequeue(call); _pending_call_enqueue(call); @@ -1172,13 +1167,13 @@ _delayed_call_timer( else break; - call = TC(queue_first(&delayed_call_queue)); + call = TC(queue_first(&thread_call_delayed_queue)); } - if (!queue_end(&delayed_call_queue, qe(call))) + if (!queue_end(&thread_call_delayed_queue, qe(call))) _set_delayed_call_timer(call); - if (new_pending) + if (new_pending && thread_call_vars.active_num <= 0) _call_thread_wake(); simple_unlock(&thread_call_lock);