#include <sys/dtrace.h>
#include <sys/dtrace_impl.h>
#include <machine/atomic.h>
+#include <kern/cambria_layout.h>
#include <kern/simple_lock.h>
#include <kern/sched_prim.h> /* for thread_wakeup() */
#include <kern/thread_call.h>
inline void
dtrace_membar_producer(void)
{
-#if __ARM_SMP__
__asm__ volatile ("dmb ish" : : : "memory");
-#else
- __asm__ volatile ("nop" : : : "memory");
-#endif
}
inline void
dtrace_membar_consumer(void)
{
-#if __ARM_SMP__
__asm__ volatile ("dmb ish" : : : "memory");
-#else
- __asm__ volatile ("nop" : : : "memory");
-#endif
}
/*
return ml_at_interrupt_context() ? 1 : 0;
}
-#if __ARM_SMP__
/*
* MP coordination
*/
thread_wakeup((event_t) &dt_xc_sync);
}
}
-#endif
/*
* dtrace_xcall() is not called from probe context.
void
dtrace_xcall(processorid_t cpu, dtrace_xcall_t f, void *arg)
{
-#if __ARM_SMP__
/* Only one dtrace_xcall in flight allowed */
lck_mtx_lock(&dt_xc_lock);
lck_mtx_unlock(&dt_xc_lock);
return;
-#else
-#pragma unused(cpu)
- /* On uniprocessor systems, the cpu should always be either ourselves or all */
- ASSERT(cpu == CPU->cpu_id || cpu == DTRACE_CPUALL);
-
- (*f)(arg);
- return;
-#endif
}
/*
return (uint64_t)get_saved_state_reg(regs, reg);
}
+uint64_t
+dtrace_getvmreg(uint_t ndx)
+{
+#pragma unused(ndx)
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
+ return 0;
+}
+
#define RETURN_OFFSET64 8
static int
dtrace_getustack_common(uint64_t * pcstack, int pcstack_limit, user_addr_t pc,
user_addr_t sp)
{
+ volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
int ret = 0;
ASSERT(pcstack == NULL || pcstack_limit > 0);
pc = dtrace_fuword64((sp + RETURN_OFFSET64));
sp = dtrace_fuword64(sp);
+
+ /* Truncate ustack if the iterator causes fault. */
+ if (*flags & CPU_DTRACE_FAULT) {
+ *flags &= ~CPU_DTRACE_FAULT;
+ break;
+ }
}
return ret;
void
dtrace_getupcstack(uint64_t * pcstack, int pcstack_limit)
{
- thread_t thread = current_thread();
- savearea_t *regs;
- user_addr_t pc, sp, fp;
+ thread_t thread = current_thread();
+ savearea_t *regs;
+ user_addr_t pc, sp, fp;
volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
int n;
pc = get_saved_state_pc(regs);
sp = get_saved_state_sp(regs);
- fp = get_saved_state_fp(regs);
+
+ {
+ fp = get_saved_state_fp(regs);
+ }
if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
*pcstack++ = (uint64_t) pc;
sp = dtrace_fuword64(sp);
}
-#if 0
- /* XXX ARMTODO*/
- /*
- * This is totally bogus: if we faulted, we're going to clear
- * the fault and break. This is to deal with the apparently
- * broken Java stacks on x86.
- */
+ /* Truncate ustack if the iterator causes fault. */
if (*flags & CPU_DTRACE_FAULT) {
*flags &= ~CPU_DTRACE_FAULT;
break;
}
-#endif
}
zero: