/*
- * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <libkern/OSAtomic.h>
+#if KPC
+#include <kern/kpc.h>
+#endif
+
+#if KPERF
+#include <kperf/kperf.h>
+#endif
+
// include the correct file to find real_ncpus
#if defined(__i386__) || defined(__x86_64__)
# include <i386/mp.h>
-#elif defined(__ppc__) || defined(__ppc64__)
-# include <ppc/cpu_internal.h>
-#elif defined(__arm__)
-# include <arm/cpu_internal.h>
#else
// fall back on declaring it extern. The linker will sort us out.
extern unsigned int real_ncpus;
// Mask for supported options
#define T_CHUD_BIND_OPT_MASK (-1UL)
+#if 0
#pragma mark **** thread binding ****
+#endif
/*
* This method will bind a given thread to the requested CPU starting at the
* reschedule on the target CPU.
*/
if(thread == current_thread() &&
- !(ml_at_interrupt_context() && cpu_number() == cpu)) {
+ !ml_at_interrupt_context() && cpu_number() != cpu) {
(void)thread_block(THREAD_CONTINUE_NULL);
}
return KERN_SUCCESS;
return ((thread->state & TH_IDLE) == TH_IDLE);
}
+__private_extern__ int
+chudxnu_thread_get_scheduler_state(thread_t thread) {
+ /*
+ * Instantaneous snapshot of the scheduler state of
+ * a given thread.
+ *
+ * MUST ONLY be called on an interrupted or
+ * locked thread, to avoid a race.
+ */
+
+ int state = 0;
+ int schedulerState = (volatile int)(thread->state);
+ processor_t lastProcessor = (volatile processor_t)(thread->last_processor);
+
+ if ((PROCESSOR_NULL != lastProcessor) && (thread == lastProcessor->active_thread)) {
+ state |= CHUDXNU_TS_RUNNING;
+ }
+
+ if (schedulerState & TH_RUN) {
+ state |= CHUDXNU_TS_RUNNABLE;
+ }
+
+ if (schedulerState & TH_WAIT) {
+ state |= CHUDXNU_TS_WAIT;
+ }
+
+ if (schedulerState & TH_UNINT) {
+ state |= CHUDXNU_TS_UNINT;
+ }
+
+ if (schedulerState & TH_SUSP) {
+ state |= CHUDXNU_TS_SUSP;
+ }
+
+ if (schedulerState & TH_TERMINATE) {
+ state |= CHUDXNU_TS_TERMINATE;
+ }
+
+ if (schedulerState & TH_IDLE) {
+ state |= CHUDXNU_TS_IDLE;
+ }
+
+ return state;
+}
+
+#if 0
#pragma mark **** task and thread info ****
+#endif
__private_extern__ boolean_t
chudxnu_is_64bit_task(task_t task)
size = 0; addr = NULL;
for (;;) {
- mutex_lock(&tasks_threads_lock);
+ lck_mtx_lock(&tasks_threads_lock);
if (type == THING_TASK)
maxthings = tasks_count;
if (size_needed <= size)
break;
- mutex_unlock(&tasks_threads_lock);
+ lck_mtx_unlock(&tasks_threads_lock);
if (size != 0)
kfree(addr, size);
}
}
- mutex_unlock(&tasks_threads_lock);
+ lck_mtx_unlock(&tasks_threads_lock);
if (actual < maxthings)
size_needed = actual * sizeof (mach_port_t);
}
-__private_extern__ kern_return_t
-chudxnu_thread_last_context_switch(thread_t thread, uint64_t *timestamp)
-{
- *timestamp = thread->last_switch;
- return KERN_SUCCESS;
-}
-
/* thread marking stuff */
__private_extern__ boolean_t
if(thread) {
if(new_value) {
// set the marked bit
- old_val = OSBitOrAtomic(T_CHUD_MARKED, (UInt32 *) &(thread->t_chud));
+ old_val = OSBitOrAtomic(T_CHUD_MARKED, &(thread->t_chud));
} else {
// clear the marked bit
- old_val = OSBitAndAtomic(~T_CHUD_MARKED, (UInt32 *) &(thread->t_chud));
+ old_val = OSBitAndAtomic(~T_CHUD_MARKED, &(thread->t_chud));
}
return (old_val & T_CHUD_MARKED) == T_CHUD_MARKED;
}
return FALSE;
}
+/* XXX: good thing this code is experimental... */
+
+/* external handler */
+extern void (*chudxnu_thread_ast_handler)(thread_t);
+void (*chudxnu_thread_ast_handler)(thread_t) = NULL;
+
+/* AST callback to dispatch to AppleProfile */
+extern void chudxnu_thread_ast(thread_t);
+void
+chudxnu_thread_ast(thread_t thread)
+{
+#if KPC
+ /* check for PMC work */
+ kpc_thread_ast_handler(thread);
+#endif
+
+#if KPERF
+ /* check for kperf work */
+ kperf_thread_ast_handler(thread);
+#endif
+
+ /* atomicness for kdebug events */
+ void (*handler)(thread_t) = chudxnu_thread_ast_handler;
+ if( handler )
+ handler( thread );
+
+ thread->t_chud = 0;
+}
+
+
+
+/* Get and set bits on the thread and trigger an AST handler */
+void chudxnu_set_thread_ast( thread_t thread );
+void
+chudxnu_set_thread_ast( thread_t thread )
+{
+ /* FIXME: only call this on current thread from an interrupt handler for now... */
+ if( thread != current_thread() )
+ panic( "unsafe AST set" );
+
+ act_set_kperf(thread);
+}
+
+/* get and set the thread bits */
+extern uint32_t chudxnu_get_thread_bits( thread_t thread );
+extern void chudxnu_set_thread_bits( thread_t thread, uint32_t bits );
+
+uint32_t
+chudxnu_get_thread_bits( thread_t thread )
+{
+ return thread->t_chud;
+}
+
+void
+chudxnu_set_thread_bits( thread_t thread, uint32_t bits )
+{
+ thread->t_chud = bits;
+}
+
+/* get and set thread dirty bits. so CHUD can track whether the thread
+ * has been dispatched since it last looked. caller must hold the
+ * thread lock
+ */
+boolean_t
+chudxnu_thread_get_dirty(thread_t thread)
+{
+ if( thread->c_switch != thread->chud_c_switch )
+ return TRUE;
+ else
+ return FALSE;
+}
+
+void
+chudxnu_thread_set_dirty(thread_t thread, boolean_t makedirty)
+{
+ if( makedirty )
+ thread->chud_c_switch = thread->c_switch - 1;
+ else
+ thread->chud_c_switch = thread->c_switch;
+}