/*
- * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <chud/chud_xnu.h>
#include <chud/chud_xnu_private.h>
+#include <chud/chud_thread.h>
#include <machine/machine_routines.h>
+#include <libkern/OSAtomic.h>
+
+#if KPC
+#include <kern/kpc.h>
+#endif
+
+#if KPERF
+#include <kperf/kperf.h>
+#endif
+
// include the correct file to find real_ncpus
#if defined(__i386__) || defined(__x86_64__)
# include <i386/mp.h>
-#endif // i386 or x86_64
+#else
+// fall back on declaring it extern. The linker will sort us out.
+extern unsigned int real_ncpus;
+#endif
-#if defined(__ppc__) || defined(__ppc64__)
-# include <ppc/cpu_internal.h>
-#endif // ppc or ppc64
+// Mask for supported options
+#define T_CHUD_BIND_OPT_MASK (-1UL)
+#if 0
#pragma mark **** thread binding ****
+#endif
+/*
+ * This method will bind a given thread to the requested CPU starting at the
+ * next time quantum. If the thread is the current thread, this method will
+ * force a thread_block(). The result is that if you call this method on the
+ * current thread, you will be on the requested CPU when this method returns.
+ */
__private_extern__ kern_return_t
-chudxnu_bind_thread(thread_t thread, int cpu)
+chudxnu_bind_thread(thread_t thread, int cpu, __unused int options)
{
processor_t proc = NULL;
-
- if(cpu >= real_ncpus) // sanity check
+
+ if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
return KERN_FAILURE;
+
+ // temporary restriction until after phase 2 of the scheduler
+ if(thread != current_thread())
+ return KERN_FAILURE;
proc = cpu_to_processor(cpu);
+ /*
+ * Potentially racey, but mainly to prevent bind to shutdown
+ * processor.
+ */
if(proc && !(proc->state == PROCESSOR_OFF_LINE) &&
- !(proc->state == PROCESSOR_SHUTDOWN)) {
- /* disallow bind to shutdown processor */
- thread_bind(thread, proc);
- if(thread==current_thread()) {
+ !(proc->state == PROCESSOR_SHUTDOWN)) {
+
+ thread_bind(proc);
+
+ /*
+ * If we're trying to bind the current thread, and
+ * we're not on the target cpu, and not at interrupt
+ * context, block the current thread to force a
+ * reschedule on the target CPU.
+ */
+ if(thread == current_thread() &&
+ !ml_at_interrupt_context() && cpu_number() != cpu) {
(void)thread_block(THREAD_CONTINUE_NULL);
}
return KERN_SUCCESS;
}
__private_extern__ kern_return_t
-chudxnu_unbind_thread(thread_t thread)
+chudxnu_unbind_thread(thread_t thread, __unused int options)
{
- thread_bind(thread, PROCESSOR_NULL);
+ if(thread == current_thread())
+ thread_bind(PROCESSOR_NULL);
return KERN_SUCCESS;
}
+__private_extern__ boolean_t
+chudxnu_thread_get_idle(thread_t thread) {
+ /*
+ * Instantaneous snapshot of the idle state of
+ * a given thread.
+ *
+ * Should be called only on an interrupted or
+ * suspended thread to avoid a race.
+ */
+ return ((thread->state & TH_IDLE) == TH_IDLE);
+}
+
+__private_extern__ int
+chudxnu_thread_get_scheduler_state(thread_t thread) {
+ /*
+ * Instantaneous snapshot of the scheduler state of
+ * a given thread.
+ *
+ * MUST ONLY be called on an interrupted or
+ * locked thread, to avoid a race.
+ */
+
+ int state = 0;
+ int schedulerState = (volatile int)(thread->state);
+ processor_t lastProcessor = (volatile processor_t)(thread->last_processor);
+
+ if ((PROCESSOR_NULL != lastProcessor) && (thread == lastProcessor->active_thread)) {
+ state |= CHUDXNU_TS_RUNNING;
+ }
+
+ if (schedulerState & TH_RUN) {
+ state |= CHUDXNU_TS_RUNNABLE;
+ }
+
+ if (schedulerState & TH_WAIT) {
+ state |= CHUDXNU_TS_WAIT;
+ }
+
+ if (schedulerState & TH_UNINT) {
+ state |= CHUDXNU_TS_UNINT;
+ }
+
+ if (schedulerState & TH_SUSP) {
+ state |= CHUDXNU_TS_SUSP;
+ }
+
+ if (schedulerState & TH_TERMINATE) {
+ state |= CHUDXNU_TS_TERMINATE;
+ }
+
+ if (schedulerState & TH_IDLE) {
+ state |= CHUDXNU_TS_IDLE;
+ }
+
+ return state;
+}
+
+#if 0
#pragma mark **** task and thread info ****
+#endif
-__private_extern__
-boolean_t chudxnu_is_64bit_task(task_t task)
+__private_extern__ boolean_t
+chudxnu_is_64bit_task(task_t task)
{
return (task_has_64BitAddr(task));
}
vm_size_t size, size_needed;
void *addr;
- if (pset == PROCESSOR_SET_NULL)
+ if (pset == PROCESSOR_SET_NULL || pset != &pset0)
return (KERN_INVALID_ARGUMENT);
- size = 0; addr = 0;
+ size = 0; addr = NULL;
for (;;) {
- pset_lock(pset);
- if (!pset->active) {
- pset_unlock(pset);
-
- return (KERN_FAILURE);
- }
+ lck_mtx_lock(&tasks_threads_lock);
if (type == THING_TASK)
- maxthings = pset->task_count;
+ maxthings = tasks_count;
else
- maxthings = pset->thread_count;
+ maxthings = threads_count;
/* do we have the memory we need? */
if (size_needed <= size)
break;
- /* unlock the pset and allocate more memory */
- pset_unlock(pset);
+ lck_mtx_unlock(&tasks_threads_lock);
if (size != 0)
kfree(addr, size);
case THING_TASK:
{
- task_t task, *tasks = (task_t *)addr;
+ task_t task, *task_list = (task_t *)addr;
- for (task = (task_t)queue_first(&pset->tasks);
- !queue_end(&pset->tasks, (queue_entry_t)task);
- task = (task_t)queue_next(&task->pset_tasks)) {
+ for (task = (task_t)queue_first(&tasks);
+ !queue_end(&tasks, (queue_entry_t)task);
+ task = (task_t)queue_next(&task->tasks)) {
task_reference_internal(task);
- tasks[actual++] = task;
+ task_list[actual++] = task;
}
break;
case THING_THREAD:
{
- thread_t thread, *threads = (thread_t *)addr;
+ thread_t thread, *thread_list = (thread_t *)addr;
- for (i = 0, thread = (thread_t)queue_first(&pset->threads);
- !queue_end(&pset->threads, (queue_entry_t)thread);
- thread = (thread_t)queue_next(&thread->pset_threads)) {
+ for (i = 0, thread = (thread_t)queue_first(&threads);
+ !queue_end(&threads, (queue_entry_t)thread);
+ thread = (thread_t)queue_next(&thread->threads)) {
thread_reference_internal(thread);
- threads[actual++] = thread;
+ thread_list[actual++] = thread;
}
break;
}
}
- pset_unlock(pset);
+ lck_mtx_unlock(&tasks_threads_lock);
if (actual < maxthings)
size_needed = actual * sizeof (mach_port_t);
if (actual == 0) {
/* no things, so return null pointer and deallocate memory */
- *thing_list = 0;
+ *thing_list = NULL;
*count = 0;
if (size != 0)
case THING_TASK:
{
- task_t *tasks = (task_t *)addr;
+ task_t *task_list = (task_t *)addr;
for (i = 0; i < actual; i++)
- task_deallocate(tasks[i]);
+ task_deallocate(task_list[i]);
break;
}
case THING_THREAD:
{
- thread_t *threads = (thread_t *)addr;
+ thread_t *thread_list = (thread_t *)addr;
for (i = 0; i < actual; i++)
- thread_deallocate(threads[i]);
+ thread_deallocate(thread_list[i]);
break;
}
}
mach_msg_type_number_t *count)
{
mach_msg_type_number_t actual;
- thread_t *threads;
+ thread_t *thread_list;
thread_t thread;
vm_size_t size, size_needed;
void *addr;
if (task == TASK_NULL)
return (KERN_INVALID_ARGUMENT);
- size = 0; addr = 0;
+ size = 0; addr = NULL;
for (;;) {
task_lock(task);
}
/* OK, have memory and the task is locked & active */
- threads = (thread_t *)addr;
+ thread_list = (thread_t *)addr;
i = j = 0;
for (thread = (thread_t)queue_first(&task->threads); i < actual;
++i, thread = (thread_t)queue_next(&thread->task_threads)) {
thread_reference_internal(thread);
- threads[j++] = thread;
+ thread_list[j++] = thread;
}
assert(queue_end(&task->threads, (queue_entry_t)thread));
if (actual == 0) {
/* no threads, so return null pointer and deallocate memory */
- *threads_out = 0;
+ *threads_out = NULL;
*count = 0;
if (size != 0)
newaddr = kalloc(size_needed);
if (newaddr == 0) {
for (i = 0; i < actual; ++i)
- thread_deallocate(threads[i]);
+ thread_deallocate(thread_list[i]);
kfree(addr, size);
return (KERN_RESOURCE_SHORTAGE);
}
bcopy(addr, newaddr, size_needed);
kfree(addr, size);
- threads = (thread_t *)newaddr;
+ thread_list = (thread_t *)newaddr;
}
- *threads_out = threads;
+ *threads_out = thread_list;
*count = actual;
}
task_array_t *task_list,
mach_msg_type_number_t *count)
{
- return chudxnu_private_processor_set_things(&default_pset, (mach_port_t **)task_list, count, THING_TASK);
+ return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)task_list, count, THING_TASK);
}
__private_extern__ kern_return_t
return KERN_FAILURE;
}
}
-
__private_extern__ kern_return_t
chudxnu_all_threads(
thread_array_t *thread_list,
mach_msg_type_number_t *count)
{
- return chudxnu_private_processor_set_things(&default_pset, (mach_port_t **)thread_list, count, THING_THREAD);
+ return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)thread_list, count, THING_THREAD);
}
__private_extern__ kern_return_t
return thread_info(thread, flavor, thread_info_out, thread_info_count);
}
-__private_extern__ kern_return_t
-chudxnu_thread_last_context_switch(thread_t thread, uint64_t *timestamp)
+
+/* thread marking stuff */
+
+__private_extern__ boolean_t
+chudxnu_thread_get_marked(thread_t thread)
{
- *timestamp = thread->last_switch;
- return KERN_SUCCESS;
+ if(thread)
+ return ((thread->t_chud & T_CHUD_MARKED) != 0);
+ return FALSE;
+}
+
+__private_extern__ boolean_t
+chudxnu_thread_set_marked(thread_t thread, boolean_t new_value)
+{
+ boolean_t old_val;
+
+ if(thread) {
+ if(new_value) {
+ // set the marked bit
+ old_val = OSBitOrAtomic(T_CHUD_MARKED, &(thread->t_chud));
+ } else {
+ // clear the marked bit
+ old_val = OSBitAndAtomic(~T_CHUD_MARKED, &(thread->t_chud));
+ }
+ return (old_val & T_CHUD_MARKED) == T_CHUD_MARKED;
+ }
+ return FALSE;
+}
+
+/* XXX: good thing this code is experimental... */
+
+/* external handler */
+extern void (*chudxnu_thread_ast_handler)(thread_t);
+void (*chudxnu_thread_ast_handler)(thread_t) = NULL;
+
+/* AST callback to dispatch to AppleProfile */
+extern void chudxnu_thread_ast(thread_t);
+void
+chudxnu_thread_ast(thread_t thread)
+{
+#if KPC
+ /* check for PMC work */
+ kpc_thread_ast_handler(thread);
+#endif
+
+#if KPERF
+ /* check for kperf work */
+ kperf_thread_ast_handler(thread);
+#endif
+
+ /* atomicness for kdebug events */
+ void (*handler)(thread_t) = chudxnu_thread_ast_handler;
+ if( handler )
+ handler( thread );
+
+ thread->t_chud = 0;
+}
+
+
+
+/* Get and set bits on the thread and trigger an AST handler */
+void chudxnu_set_thread_ast( thread_t thread );
+void
+chudxnu_set_thread_ast( thread_t thread )
+{
+ /* FIXME: only call this on current thread from an interrupt handler for now... */
+ if( thread != current_thread() )
+ panic( "unsafe AST set" );
+
+ act_set_kperf(thread);
+}
+
+/* get and set the thread bits */
+extern uint32_t chudxnu_get_thread_bits( thread_t thread );
+extern void chudxnu_set_thread_bits( thread_t thread, uint32_t bits );
+
+uint32_t
+chudxnu_get_thread_bits( thread_t thread )
+{
+ return thread->t_chud;
+}
+
+void
+chudxnu_set_thread_bits( thread_t thread, uint32_t bits )
+{
+ thread->t_chud = bits;
+}
+
+/* get and set thread dirty bits. so CHUD can track whether the thread
+ * has been dispatched since it last looked. caller must hold the
+ * thread lock
+ */
+boolean_t
+chudxnu_thread_get_dirty(thread_t thread)
+{
+ if( thread->c_switch != thread->chud_c_switch )
+ return TRUE;
+ else
+ return FALSE;
}
+void
+chudxnu_thread_set_dirty(thread_t thread, boolean_t makedirty)
+{
+ if( makedirty )
+ thread->chud_c_switch = thread->c_switch - 1;
+ else
+ thread->chud_c_switch = thread->c_switch;
+}