#include <sys/dtrace.h>
#include <sys/dtrace_impl.h>
#include <libkern/OSAtomic.h>
+#include <kern/kern_types.h>
+#include <kern/timer_call.h>
#include <kern/thread_call.h>
#include <kern/task.h>
#include <kern/sched_prim.h>
/*
* pid/proc
*/
-#define proc_t struct proc
+/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
+#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
/* Not called from probe context */
proc_t *
return PROC_NULL;
}
- task_suspend(p->task);
+ task_suspend_internal(p->task);
proc_lock(p);
proc_unlock(p);
- task_resume(p->task);
+ task_resume_internal(p->task);
proc_rele(p);
}
* cpuvar
*/
lck_mtx_t cpu_lock;
+lck_mtx_t cyc_lock;
lck_mtx_t mod_lock;
-cpu_t *cpu_list;
+dtrace_cpu_t *cpu_list;
cpu_core_t *cpu_core; /* XXX TLB lockdown? */
/*
return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
}
+/* XXX Get around const poisoning using structure assigns */
gid_t
-crgetgid(const cred_t *cr) { return cr->cr_groups[0]; }
+crgetgid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getgid(©_cr); }
uid_t
-crgetuid(const cred_t *cr) { return cr->cr_uid; }
+crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(©_cr); }
/*
* "cyclic"
*/
-/* osfmk/kern/timer_call.h */
-typedef void *call_entry_param_t;
-typedef void (*call_entry_func_t)(
- call_entry_param_t param0,
- call_entry_param_t param1);
-
-typedef struct call_entry {
- queue_chain_t q_link;
- call_entry_func_t func;
- call_entry_param_t param0;
- call_entry_param_t param1;
- uint64_t deadline;
- enum {
- IDLE,
- PENDING,
- DELAYED } state;
-} call_entry_data_t;
-
-
-typedef struct call_entry *timer_call_t;
-typedef void *timer_call_param_t;
-typedef void (*timer_call_func_t)(
- timer_call_param_t param0,
- timer_call_param_t param1);
-
-extern void
-timer_call_setup(
- timer_call_t call,
- timer_call_func_t func,
- timer_call_param_t param0);
-
-extern boolean_t
-timer_call_enter1(
- timer_call_t call,
- timer_call_param_t param1,
- uint64_t deadline);
-
-extern boolean_t
-timer_call_cancel(
- timer_call_t call);
-
typedef struct wrap_timer_call {
- cyc_handler_t hdlr;
- cyc_time_t when;
- uint64_t deadline;
- struct call_entry call;
+ /* node attributes */
+ cyc_handler_t hdlr;
+ cyc_time_t when;
+ uint64_t deadline;
+ int cpuid;
+ boolean_t suspended;
+ struct timer_call call;
+
+ /* next item in the linked list */
+ LIST_ENTRY(wrap_timer_call) entries;
} wrap_timer_call_t;
-#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
-#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
+#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
+#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
+
+/* CPU going online/offline notifications */
+void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL;
+void dtrace_cpu_state_changed(int, boolean_t);
+
+void
+dtrace_install_cpu_hooks(void) {
+ dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed;
+}
+
+void
+dtrace_cpu_state_changed(int cpuid, boolean_t is_running) {
+#pragma unused(cpuid)
+ wrap_timer_call_t *wrapTC = NULL;
+ boolean_t suspend = (is_running ? FALSE : TRUE);
+ dtrace_icookie_t s;
+
+ /* Ensure that we're not going to leave the CPU */
+ s = dtrace_interrupt_disable();
+ assert(cpuid == cpu_number());
+
+ LIST_FOREACH(wrapTC, &(cpu_list[cpu_number()].cpu_cyc_list), entries) {
+ assert(wrapTC->cpuid == cpu_number());
+ if (suspend) {
+ assert(!wrapTC->suspended);
+ /* If this fails, we'll panic anyway, so let's do this now. */
+ if (!timer_call_cancel(&wrapTC->call))
+ panic("timer_call_set_suspend() failed to cancel a timer call");
+ wrapTC->suspended = TRUE;
+ } else {
+ /* Rearm the timer, but ensure it was suspended first. */
+ assert(wrapTC->suspended);
+ clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(),
+ &wrapTC->deadline);
+ timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
+ TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+ wrapTC->suspended = FALSE;
+ }
+
+ }
+
+ /* Restore the previous interrupt state. */
+ dtrace_interrupt_enable(s);
+}
static void
_timer_call_apply_cyclic( void *ignore, void *vTChdl )
(*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
- timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline );
-
- /* Did timer_call_remove_cyclic request a wakeup call when this timer call was re-armed? */
- if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
- thread_wakeup((event_t)wrapTC);
+ timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
}
static cyclic_id_t
timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
{
uint64_t now;
+ dtrace_icookie_t s;
timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
wrapTC->hdlr = *handler;
wrapTC->deadline = now;
clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
- timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline );
+
+ /* Insert the timer to the list of the running timers on this CPU, and start it. */
+ s = dtrace_interrupt_disable();
+ wrapTC->cpuid = cpu_number();
+ LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
+ timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
+ TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+ wrapTC->suspended = FALSE;
+ dtrace_interrupt_enable(s);
return (cyclic_id_t)wrapTC;
}
+/*
+ * Executed on the CPU the timer is running on.
+ */
static void
timer_call_remove_cyclic(cyclic_id_t cyclic)
{
wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;
- while (!timer_call_cancel(&(wrapTC->call))) {
- int ret = assert_wait(wrapTC, THREAD_UNINT);
- ASSERT(ret == THREAD_WAITING);
+ assert(wrapTC);
+ assert(cpu_number() == wrapTC->cpuid);
- wrapTC->when.cyt_interval = WAKEUP_REAPER;
+ if (!timer_call_cancel(&wrapTC->call))
+ panic("timer_call_remove_cyclic() failed to cancel a timer call");
- ret = thread_block(THREAD_CONTINUE_NULL);
- ASSERT(ret == THREAD_AWAKENED);
- }
+ LIST_REMOVE(wrapTC, entries);
}
static void *
{
ASSERT( cyclic != CYCLIC_NONE );
- timer_call_remove_cyclic( cyclic );
+ /* Removing a timer call must be done on the CPU the timer is running on. */
+ wrap_timer_call_t *wrapTC = (wrap_timer_call_t *) cyclic;
+ dtrace_xcall(wrapTC->cpuid, (dtrace_xcall_t) timer_call_remove_cyclic, (void*) cyclic);
+
_FREE((void *)cyclic, M_TEMP);
}
t = (char *)cyc_list;
t += sizeof(cyc_omni_handler_t);
- cyc_list = (cyclic_id_list_t)t;
+ cyc_list = (cyclic_id_list_t)(uintptr_t)t;
t += sizeof(cyclic_id_t)*NCPU;
t += (sizeof(wrap_timer_call_t))*cpu_number();
- wrapTC = (wrap_timer_call_t *)t;
+ wrapTC = (wrap_timer_call_t *)(uintptr_t)t;
cyc_list[cpu_number()] = timer_call_add_cyclic(wrapTC, &cH, &cT);
}
t = (char *)cyc_list;
t += sizeof(cyc_omni_handler_t);
- cyc_list = (cyclic_id_list_t)t;
+ cyc_list = (cyclic_id_list_t)(uintptr_t)t;
- cid = cyc_list[cpu_number()];
- oarg = timer_call_get_cyclic_arg(cid);
-
- timer_call_remove_cyclic( cid );
- (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
+ /*
+ * If the processor was offline when dtrace started, we did not allocate
+ * a cyclic timer for this CPU.
+ */
+ if ((cid = cyc_list[cpu_number()]) != CYCLIC_NONE) {
+ oarg = timer_call_get_cyclic_arg(cid);
+ timer_call_remove_cyclic(cid);
+ (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
+ }
}
void
* and clock drift on later invocations is not a worry.
*/
uint64_t deadline = mach_absolute_time() + nanos;
-
+ /* DRK: consider using a lower priority callout here */
thread_call_enter_delayed(call, deadline);
return call;
int
ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
- char *name, int **data, uint_t *nelements)
+ const char *name, int **data, uint_t *nelements)
{
#pragma unused(match_dev,dip,flags)
unsigned int i;
}
int
-ddi_driver_major(dev_info_t *devi) { return (int)major(devi); }
+ddi_driver_major(dev_info_t *devi) { return (int)major(CAST_DOWN_EXPLICIT(int,devi)); }
int
ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
minor_t minor_num, const char *node_type, int flag)
{
#pragma unused(spec_type,node_type,flag)
- dev_t dev = makedev( (uint32_t)dip, minor_num );
+ dev_t dev = makedev( ddi_driver_major(dip), minor_num );
if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 ))
return DDI_FAILURE;
void* dt_kmem_alloc_aligned(size_t size, size_t align, int kmflag)
{
- void* buf;
- intptr_t p;
- void** buf_backup;
+ void *mem, **addr_to_free;
+ intptr_t mem_aligned;
+ size_t *size_to_free, hdr_size;
- buf = dt_kmem_alloc(align + sizeof(void*) + size, kmflag);
+ /* Must be a power of two. */
+ assert(align != 0);
+ assert((align & (align - 1)) == 0);
- if(!buf)
+ /*
+ * We are going to add a header to the allocation. It contains
+ * the address to free and the total size of the buffer.
+ */
+ hdr_size = sizeof(size_t) + sizeof(void*);
+ mem = dt_kmem_alloc(size + align + hdr_size, kmflag);
+ if (mem == NULL)
return NULL;
- p = (intptr_t)buf;
- p += sizeof(void*); /* now we have enough room to store the backup */
- p = P2ROUNDUP(p, align); /* and now we're aligned */
+ mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1));
+
+ /* Write the address to free in the header. */
+ addr_to_free = (void**) (mem_aligned - sizeof(void*));
+ *addr_to_free = mem;
- buf_backup = (void**)(p - sizeof(void*));
- *buf_backup = buf; /* back up the address we need to free */
+ /* Write the size to free in the header. */
+ size_to_free = (size_t*) (mem_aligned - hdr_size);
+ *size_to_free = size + align + hdr_size;
- return (void*)p;
+ return (void*) mem_aligned;
}
void* dt_kmem_zalloc_aligned(size_t size, size_t align, int kmflag)
void dt_kmem_free_aligned(void* buf, size_t size)
{
#pragma unused(size)
- intptr_t p;
- void** buf_backup;
+ intptr_t ptr = (intptr_t) buf;
+ void **addr_to_free = (void**) (ptr - sizeof(void*));
+ size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*)));
- p = (intptr_t)buf;
- p -= sizeof(void*);
- buf_backup = (void**)(p);
+ if (buf == NULL)
+ return;
- dt_kmem_free(*buf_backup, size + ((char*)buf - (char*)*buf_backup));
+ dt_kmem_free(*addr_to_free, *size_to_free);
}
/*
*/
kmem_cache_t *
kmem_cache_create(
- char *name, /* descriptive name for this cache */
+ const char *name, /* descriptive name for this cache */
size_t bufsize, /* size of the objects it manages */
size_t align, /* required object alignment */
int (*constructor)(void *, void *, int), /* object constructor */
p->blist = bl = blist_create( size );
blist_free(bl, 0, size);
- if (base) blist_alloc( bl, (daddr_t)base ); /* Chomp off initial ID(s) */
+ if (base) blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */
return (vmem_t *)p;
}
panic("vmem_alloc: failure after blist_resize!");
}
- return (void *)p;
+ return (void *)(uintptr_t)p;
}
void
{
struct blist_hdl *p = (struct blist_hdl *)vmp;
- blist_free( p->blist, (daddr_t)vaddr, (daddr_t)size );
+ blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size );
}
void
hrtime_t
dtrace_gethrestime(void)
{
- uint32_t secs, nanosecs;
+ clock_sec_t secs;
+ clock_nsec_t nanosecs;
uint64_t secs64, ns64;
clock_get_calendar_nanotime_nowait(&secs, &nanosecs);
uint32_t
dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
{
- if (OSCompareAndSwap( cmp, new, (unsigned long *)target ))
+ if (OSCompareAndSwap( (UInt32)cmp, (UInt32)new, (volatile UInt32 *)target ))
return cmp;
else
return ~cmp; /* Must return something *other* than cmp */
void *
dtrace_casptr(void *target, void *cmp, void *new)
{
-#if defined(__LP64__)
-#error dtrace_casptr implementation missing for LP64
-#else
- if (OSCompareAndSwap( (uint32_t)cmp, (uint32_t)new, (unsigned long *)target ))
+ if (OSCompareAndSwapPtr( cmp, new, (void**)target ))
return cmp;
else
return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */
-#endif
}
/*
ASSERT(kaddr + size >= kaddr);
- if (ml_at_interrupt_context() || /* Avoid possible copyio page fault on int stack, which panics! */
- 0 != recover || /* Avoid reentrancy into copyio facility. */
- uaddr + size < uaddr || /* Avoid address wrap. */
+ if ( uaddr + size < uaddr || /* Avoid address wrap. */
KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */
{
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
}
void
-dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len)
+dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
{
+#pragma unused(flags)
+
if (dtrace_copycheck( src, dst, len )) {
if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) {
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
}
void
-dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len)
+dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
{
+#pragma unused(flags)
+
size_t actual;
if (dtrace_copycheck( src, dst, len )) {
}
void
-dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len)
+dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
{
+#pragma unused(flags)
+
if (dtrace_copycheck( dst, src, len )) {
if (copyout((const void *)src, dst, (vm_size_t)len)) {
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
}
void
-dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len)
+dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
{
+#pragma unused(flags)
+
size_t actual;
if (dtrace_copycheck( dst, src, len )) {
return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE );
}
-void
-dtrace_vpanic(const char *format, va_list alist)
-{
- vuprintf( format, alist );
- panic("dtrace_vpanic");
-}
-
#define TOTTY 0x02
extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */
* 2002-01-24 gvdl Initial implementation of strstr
*/
-__private_extern__ char *
+__private_extern__ const char *
strstr(const char *in, const char *str)
{
char c;
c = *str++;
if (!c)
- return (char *) in; // Trivial empty string case
+ return (const char *) in; // Trivial empty string case
len = strlen(str);
do {
} while (sc != c);
} while (strncmp(in, str, len) != 0);
- return (char *) (in - 1);
+ return (const char *) (in - 1);
}
/*
int
dtrace_getstackdepth(int aframes)
{
- struct frame *fp = (struct frame *)dtrace_getfp();
+ struct frame *fp = (struct frame *)__builtin_frame_address(0);
struct frame *nextfp, *minfp, *stacktop;
int depth = 0;
int on_intr;
if ((on_intr = CPU_ON_INTR(CPU)) != 0)
stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
else
- stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + KERNEL_STACK_SIZE);
+ stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
minfp = fp;
vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
minfp = (struct frame *)kstack_base;
- stacktop = (struct frame *)(kstack_base + KERNEL_STACK_SIZE);
+ stacktop = (struct frame *)(kstack_base + kernel_stack_size);
on_intr = 0;
continue;