/*
- * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc.
- * All rights reserved.
+ * Copyright (c) 1993-1995, 1999-2007 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
- */
-/*
- * Thread-based callout module.
- *
- * HISTORY
- *
- * 10 July 1999 (debo)
- * Pulled into Mac OS X (microkernel).
- *
- * 3 July 1993 (debo)
- * Created.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <mach/mach_types.h>
+#include <mach/thread_act.h>
+#include <kern/kern_types.h>
+#include <kern/kalloc.h>
#include <kern/sched_prim.h>
#include <kern/clock.h>
#include <kern/task.h>
#include <kern/thread.h>
+#include <kern/wait_queue.h>
+
+#include <vm/vm_pageout.h>
#include <kern/thread_call.h>
#include <kern/call_entry.h>
#include <kern/timer_call.h>
+#include <sys/kdebug.h>
+
#define internal_call_num 768
#define thread_call_thread_min 4
static
timer_call_data_t
- thread_call_delayed_timers[NCPUS];
+ thread_call_delaytimer;
static
queue_head_t
- internal_call_free_queue,
- pending_call_queue, delayed_call_queue;
+ thread_call_xxx_queue,
+ thread_call_pending_queue, thread_call_delayed_queue;
static
-queue_head_t
- idle_thread_queue;
-
-static
-thread_t
- activate_thread;
+struct wait_queue
+ call_thread_waitqueue;
static
boolean_t
int pending_num,
pending_hiwat;
int active_num,
- active_hiwat;
+ active_hiwat,
+ active_lowat;
int delayed_num,
delayed_hiwat;
int idle_thread_num;
int thread_num,
thread_hiwat,
thread_lowat;
-} thread_calls;
-
-static boolean_t
- thread_call_initialized = FALSE;
+} thread_call_vars;
static __inline__ thread_call_t
_internal_call_allocate(void);
thread_call_t call
);
-static void __inline__
+static __inline__ void
_set_delayed_call_timer(
thread_call_t call
);
boolean_t remove_all
);
-static __inline__ void
+static inline void
_call_thread_wake(void);
static void
void
thread_call_initialize(void)
{
- thread_call_t call;
- spl_t s;
- int i;
-
- if (thread_call_initialized)
- panic("thread_call_initialize");
+ kern_return_t result;
+ thread_t thread;
+ thread_call_t call;
+ spl_t s;
- simple_lock_init(&thread_call_lock, ETAP_MISC_TIMER);
+ simple_lock_init(&thread_call_lock, 0);
s = splsched();
simple_lock(&thread_call_lock);
- queue_init(&pending_call_queue);
- queue_init(&delayed_call_queue);
+ queue_init(&thread_call_pending_queue);
+ queue_init(&thread_call_delayed_queue);
- queue_init(&internal_call_free_queue);
+ queue_init(&thread_call_xxx_queue);
for (
call = internal_call_storage;
call < &internal_call_storage[internal_call_num];
call++) {
- enqueue_tail(&internal_call_free_queue, qe(call));
+ enqueue_tail(&thread_call_xxx_queue, qe(call));
}
- for (i = 0; i < NCPUS; i++) {
- timer_call_setup(&thread_call_delayed_timers[i],
- _delayed_call_timer, NULL);
- }
+ timer_call_setup(&thread_call_delaytimer, _delayed_call_timer, NULL);
- queue_init(&idle_thread_queue);
- thread_calls.thread_lowat = thread_call_thread_min;
+ wait_queue_init(&call_thread_waitqueue, SYNC_POLICY_FIFO);
+ thread_call_vars.thread_lowat = thread_call_thread_min;
activate_thread_awake = TRUE;
- thread_call_initialized = TRUE;
simple_unlock(&thread_call_lock);
splx(s);
- activate_thread = kernel_thread_with_priority(kernel_task,
- MAXPRI_KERNBAND-2, _activate_thread, TRUE);
+ result = kernel_thread_start_priority((thread_continue_t)_activate_thread, NULL, MAXPRI_KERNEL - 2, &thread);
+ if (result != KERN_SUCCESS)
+ panic("thread_call_initialize");
+
+ thread_deallocate(thread);
}
void
{
thread_call_t call;
- if (queue_empty(&internal_call_free_queue))
+ if (queue_empty(&thread_call_xxx_queue))
panic("_internal_call_allocate");
- call = TC(dequeue_head(&internal_call_free_queue));
+ call = TC(dequeue_head(&thread_call_xxx_queue));
return (call);
}
{
if ( call >= internal_call_storage &&
call < &internal_call_storage[internal_call_num] )
- enqueue_tail(&internal_call_free_queue, qe(call));
+ enqueue_head(&thread_call_xxx_queue, qe(call));
}
/*
thread_call_t call
)
{
- enqueue_tail(&pending_call_queue, qe(call));
- if (++thread_calls.pending_num > thread_calls.pending_hiwat)
- thread_calls.pending_hiwat = thread_calls.pending_num;
+ enqueue_tail(&thread_call_pending_queue, qe(call));
+ if (++thread_call_vars.pending_num > thread_call_vars.pending_hiwat)
+ thread_call_vars.pending_hiwat = thread_call_vars.pending_num;
call->state = PENDING;
}
)
{
(void)remque(qe(call));
- thread_calls.pending_num--;
+ thread_call_vars.pending_num--;
call->state = IDLE;
}
{
thread_call_t current;
- current = TC(queue_first(&delayed_call_queue));
+ current = TC(queue_first(&thread_call_delayed_queue));
while (TRUE) {
- if ( queue_end(&delayed_call_queue, qe(current)) ||
- CMP_ABSOLUTETIME(&call->deadline,
- ¤t->deadline) < 0 ) {
+ if ( queue_end(&thread_call_delayed_queue, qe(current)) ||
+ call->deadline < current->deadline ) {
current = TC(queue_prev(qe(current)));
break;
}
}
insque(qe(call), qe(current));
- if (++thread_calls.delayed_num > thread_calls.delayed_hiwat)
- thread_calls.delayed_hiwat = thread_calls.delayed_num;
+ if (++thread_call_vars.delayed_num > thread_call_vars.delayed_hiwat)
+ thread_call_vars.delayed_hiwat = thread_call_vars.delayed_num;
call->state = DELAYED;
}
)
{
(void)remque(qe(call));
- thread_calls.delayed_num--;
+ thread_call_vars.delayed_num--;
call->state = IDLE;
}
thread_call_t call
)
{
- timer_call_t timer = &thread_call_delayed_timers[cpu_number()];
-
- timer_call_enter(timer, call->deadline);
+ timer_call_enter(&thread_call_delaytimer, call->deadline);
}
/*
boolean_t call_removed = FALSE;
thread_call_t call;
- call = TC(queue_first(&pending_call_queue));
+ call = TC(queue_first(&thread_call_pending_queue));
- while (!queue_end(&pending_call_queue, qe(call))) {
+ while (!queue_end(&thread_call_pending_queue, qe(call))) {
if ( call->func == func &&
call->param0 == param0 ) {
thread_call_t next = TC(queue_next(qe(call)));
boolean_t call_removed = FALSE;
thread_call_t call;
- call = TC(queue_first(&delayed_call_queue));
+ call = TC(queue_first(&thread_call_delayed_queue));
- while (!queue_end(&delayed_call_queue, qe(call))) {
+ while (!queue_end(&thread_call_delayed_queue, qe(call))) {
if ( call->func == func &&
call->param0 == param0 ) {
thread_call_t next = TC(queue_next(qe(call)));
)
{
thread_call_t call;
- int s;
+ spl_t s;
- if (!thread_call_initialized)
- panic("thread_call_func");
-
s = splsched();
simple_lock(&thread_call_lock);
- call = TC(queue_first(&pending_call_queue));
+ call = TC(queue_first(&thread_call_pending_queue));
- while (unique_call && !queue_end(&pending_call_queue, qe(call))) {
+ while (unique_call && !queue_end(&thread_call_pending_queue, qe(call))) {
if ( call->func == func &&
call->param0 == param ) {
break;
call = TC(queue_next(qe(call)));
}
- if (!unique_call || queue_end(&pending_call_queue, qe(call))) {
+ if (!unique_call || queue_end(&thread_call_pending_queue, qe(call))) {
call = _internal_call_allocate();
call->func = func;
call->param0 = param;
- call->param1 = 0;
+ call->param1 = NULL;
_pending_call_enqueue(call);
- _call_thread_wake();
+ if (thread_call_vars.active_num <= 0)
+ _call_thread_wake();
}
simple_unlock(&thread_call_lock);
thread_call_func_delayed(
thread_call_func_t func,
thread_call_param_t param,
- AbsoluteTime deadline
+ uint64_t deadline
)
{
thread_call_t call;
- int s;
+ spl_t s;
- if (!thread_call_initialized)
- panic("thread_call_func_delayed");
-
s = splsched();
simple_lock(&thread_call_lock);
_delayed_call_enqueue(call);
- if (queue_first(&delayed_call_queue) == qe(call))
+ if (queue_first(&thread_call_delayed_queue) == qe(call))
_set_delayed_call_timer(call);
simple_unlock(&thread_call_lock);
)
{
boolean_t result;
- int s;
+ spl_t s;
s = splsched();
simple_lock(&thread_call_lock);
thread_call_t call
)
{
- int s;
+ spl_t s;
s = splsched();
simple_lock(&thread_call_lock);
simple_unlock(&thread_call_lock);
splx(s);
- kfree((vm_offset_t)call, sizeof (thread_call_data_t));
+ kfree(call, sizeof (thread_call_data_t));
return (TRUE);
}
)
{
boolean_t result = TRUE;
- int s;
+ spl_t s;
s = splsched();
simple_lock(&thread_call_lock);
result = FALSE;
_pending_call_enqueue(call);
-
- _call_thread_wake();
+
+ if (thread_call_vars.active_num <= 0)
+ _call_thread_wake();
}
call->param1 = 0;
)
{
boolean_t result = TRUE;
- int s;
+ spl_t s;
s = splsched();
simple_lock(&thread_call_lock);
_pending_call_enqueue(call);
- _call_thread_wake();
+ if (thread_call_vars.active_num <= 0)
+ _call_thread_wake();
}
call->param1 = param1;
boolean_t
thread_call_enter_delayed(
thread_call_t call,
- AbsoluteTime deadline
+ uint64_t deadline
)
{
boolean_t result = TRUE;
- int s;
+ spl_t s;
s = splsched();
simple_lock(&thread_call_lock);
_delayed_call_enqueue(call);
- if (queue_first(&delayed_call_queue) == qe(call))
+ if (queue_first(&thread_call_delayed_queue) == qe(call))
_set_delayed_call_timer(call);
simple_unlock(&thread_call_lock);
thread_call_enter1_delayed(
thread_call_t call,
thread_call_param_t param1,
- AbsoluteTime deadline
+ uint64_t deadline
)
{
boolean_t result = TRUE;
- int s;
+ spl_t s;
s = splsched();
simple_lock(&thread_call_lock);
_delayed_call_enqueue(call);
- if (queue_first(&delayed_call_queue) == qe(call))
+ if (queue_first(&thread_call_delayed_queue) == qe(call))
_set_delayed_call_timer(call);
simple_unlock(&thread_call_lock);
)
{
boolean_t result = TRUE;
- int s;
+ spl_t s;
s = splsched();
simple_lock(&thread_call_lock);
boolean_t
thread_call_is_delayed(
thread_call_t call,
- AbsoluteTime *deadline)
+ uint64_t *deadline)
{
boolean_t result = FALSE;
- int s;
+ spl_t s;
s = splsched();
simple_lock(&thread_call_lock);
}
/*
- * Routine: _call_thread_wake [private]
+ * Routine: _call_thread_wake [private, inline]
*
* Purpose: Wake a callout thread to service
- * newly pending callout entries. May wake
- * the activate thread to either wake or
+ * pending callout entries. May wake
+ * the activate thread in order to
* create additional callout threads.
*
* Preconditions: thread_call_lock held.
* Postconditions: None.
*/
-static __inline__
-void
+static inline void
_call_thread_wake(void)
{
- thread_t thread_to_wake;
+ if (wait_queue_wakeup_one(&call_thread_waitqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
+ thread_call_vars.idle_thread_num--;
- if (!queue_empty(&idle_thread_queue)) {
- queue_remove_first(
- &idle_thread_queue, thread_to_wake, thread_t, wait_link);
- clear_wait(thread_to_wake, THREAD_AWAKENED);
- thread_calls.idle_thread_num--;
+ if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
+ thread_call_vars.active_hiwat = thread_call_vars.active_num;
}
else
- thread_to_wake = THREAD_NULL;
-
- if (!activate_thread_awake &&
- (thread_to_wake == THREAD_NULL || thread_calls.thread_num <
- (thread_calls.active_num + thread_calls.pending_num))) {
- clear_wait(activate_thread, THREAD_AWAKENED);
+ if (!activate_thread_awake) {
+ thread_wakeup_one(&activate_thread_awake);
activate_thread_awake = TRUE;
}
}
-#if defined (__i386__)
-#define NO_CONTINUATIONS (1)
-#else
-#define NO_CONTINUATIONS (0)
-#endif
+/*
+ * sched_call_thread:
+ *
+ * Call out invoked by the scheduler.
+ */
+
+static void
+sched_call_thread(
+ int type,
+__unused thread_t thread)
+{
+ simple_lock(&thread_call_lock);
+
+ switch (type) {
+
+ case SCHED_CALL_BLOCK:
+ if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
+ thread_call_vars.active_lowat = thread_call_vars.active_num;
+
+ if ( thread_call_vars.active_num <= 0 &&
+ thread_call_vars.pending_num > 0 )
+ _call_thread_wake();
+ break;
+
+ case SCHED_CALL_UNBLOCK:
+ if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
+ thread_call_vars.active_hiwat = thread_call_vars.active_num;
+ break;
+ }
+
+ simple_unlock(&thread_call_lock);
+}
/*
* Routine: _call_thread [private]
{
thread_t self = current_thread();
-#if NO_CONTINUATIONS
- loop:
-#endif
(void) splsched();
simple_lock(&thread_call_lock);
- while (thread_calls.pending_num > 0) {
+ thread_sched_call(self, sched_call_thread);
+
+ while (thread_call_vars.pending_num > 0) {
thread_call_t call;
thread_call_func_t func;
thread_call_param_t param0, param1;
- call = TC(dequeue_head(&pending_call_queue));
- thread_calls.pending_num--;
+ call = TC(dequeue_head(&thread_call_pending_queue));
+ thread_call_vars.pending_num--;
func = call->func;
param0 = call->param0;
_internal_call_release(call);
- if (++thread_calls.active_num > thread_calls.active_hiwat)
- thread_calls.active_hiwat = thread_calls.active_num;
-
- if (thread_calls.pending_num > 0)
- _call_thread_wake();
-
simple_unlock(&thread_call_lock);
(void) spllo();
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
+ (int)func, (int)param0, (int)param1, 0, 0);
+
(*func)(param0, param1);
(void)thread_funnel_set(self->funnel_lock, FALSE);
(void) splsched();
simple_lock(&thread_call_lock);
-
- thread_calls.active_num--;
}
+
+ thread_sched_call(self, NULL);
+
+ if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
+ thread_call_vars.active_lowat = thread_call_vars.active_num;
- if ((thread_calls.thread_num - thread_calls.active_num) <=
- thread_calls.thread_lowat) {
- queue_enter(&idle_thread_queue, self, thread_t, wait_link);
- thread_calls.idle_thread_num++;
+ if (thread_call_vars.idle_thread_num < thread_call_vars.thread_lowat) {
+ thread_call_vars.idle_thread_num++;
- assert_wait(&idle_thread_queue, THREAD_INTERRUPTIBLE);
+ wait_queue_assert_wait(&call_thread_waitqueue, NULL, THREAD_UNINT, 0);
simple_unlock(&thread_call_lock);
(void) spllo();
-#if NO_CONTINUATIONS
- thread_block((void (*)(void)) 0);
- goto loop;
-#else
- thread_block(_call_thread_continue);
-#endif
+ thread_block((thread_continue_t)_call_thread_continue);
/* NOTREACHED */
}
- thread_calls.thread_num--;
+ thread_call_vars.thread_num--;
simple_unlock(&thread_call_lock);
(void) spllo();
- (void) thread_terminate(self->top_act);
+ thread_terminate(self);
/* NOTREACHED */
}
void
_call_thread(void)
{
- thread_t self = current_thread();
-
- stack_privilege(self);
-
_call_thread_continue();
/* NOTREACHED */
}
void
_activate_thread_continue(void)
{
-#if NO_CONTINUATIONS
- loop:
-#endif
+ kern_return_t result;
+ thread_t thread;
+
(void) splsched();
simple_lock(&thread_call_lock);
- if (thread_calls.thread_num <
- (thread_calls.active_num + thread_calls.pending_num)) {
+ while ( thread_call_vars.active_num <= 0 &&
+ thread_call_vars.pending_num > 0 ) {
+
+ if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
+ thread_call_vars.active_hiwat = thread_call_vars.active_num;
- if (++thread_calls.thread_num > thread_calls.thread_hiwat)
- thread_calls.thread_hiwat = thread_calls.thread_num;
+ if (++thread_call_vars.thread_num > thread_call_vars.thread_hiwat)
+ thread_call_vars.thread_hiwat = thread_call_vars.thread_num;
simple_unlock(&thread_call_lock);
(void) spllo();
- (void) kernel_thread_with_priority(kernel_task,
- MAXPRI_KERNBAND-1, _call_thread, TRUE);
-#if NO_CONTINUATIONS
- thread_block((void (*)(void)) 0);
- goto loop;
-#else
- thread_block(_activate_thread_continue);
-#endif
- /* NOTREACHED */
- }
- else if (thread_calls.pending_num > 0) {
- _call_thread_wake();
+ result = kernel_thread_start_priority((thread_continue_t)_call_thread, NULL, MAXPRI_KERNEL - 1, &thread);
+ if (result != KERN_SUCCESS)
+ panic("activate_thread");
- simple_unlock(&thread_call_lock);
- (void) spllo();
+ thread_deallocate(thread);
-#if NO_CONTINUATIONS
- thread_block((void (*)(void)) 0);
- goto loop;
-#else
- thread_block(_activate_thread_continue);
-#endif
- /* NOTREACHED */
- }
+ (void) splsched();
+ simple_lock(&thread_call_lock);
+ }
assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE);
activate_thread_awake = FALSE;
simple_unlock(&thread_call_lock);
(void) spllo();
-#if NO_CONTINUATIONS
- thread_block((void (*)(void)) 0);
- goto loop;
-#else
- thread_block(_activate_thread_continue);
-#endif
+ thread_block((thread_continue_t)_activate_thread_continue);
/* NOTREACHED */
}
void
_activate_thread(void)
{
- thread_t self = current_thread();
+ thread_t self = current_thread();
- self->vm_privilege = TRUE;
+ self->options |= TH_OPT_VMPRIV;
vm_page_free_reserve(2); /* XXX */
- stack_privilege(self);
_activate_thread_continue();
/* NOTREACHED */
static
void
_delayed_call_timer(
- timer_call_param_t p0,
- timer_call_param_t p1
+ __unused timer_call_param_t p0,
+ __unused timer_call_param_t p1
)
{
- AbsoluteTime timestamp;
+ uint64_t timestamp;
thread_call_t call;
boolean_t new_pending = FALSE;
- int s;
+ spl_t s;
s = splsched();
simple_lock(&thread_call_lock);
clock_get_uptime(×tamp);
- call = TC(queue_first(&delayed_call_queue));
+ call = TC(queue_first(&thread_call_delayed_queue));
- while (!queue_end(&delayed_call_queue, qe(call))) {
- if (CMP_ABSOLUTETIME(&call->deadline, ×tamp) <= 0) {
+ while (!queue_end(&thread_call_delayed_queue, qe(call))) {
+ if (call->deadline <= timestamp) {
_delayed_call_dequeue(call);
_pending_call_enqueue(call);
else
break;
- call = TC(queue_first(&delayed_call_queue));
+ call = TC(queue_first(&thread_call_delayed_queue));
}
- if (!queue_end(&delayed_call_queue, qe(call)))
+ if (!queue_end(&thread_call_delayed_queue, qe(call)))
_set_delayed_call_timer(call);
- if (new_pending)
+ if (new_pending && thread_call_vars.active_num <= 0)
_call_thread_wake();
simple_unlock(&thread_call_lock);