#include <sys/dtrace.h>
#include <sys/dtrace_impl.h>
#include <libkern/OSAtomic.h>
+#include <kern/kern_types.h>
+#include <kern/timer_call.h>
#include <kern/thread_call.h>
#include <kern/task.h>
#include <kern/sched_prim.h>
return PROC_NULL;
}
- task_suspend(p->task);
+ task_suspend_internal(p->task);
proc_lock(p);
proc_unlock(p);
- task_resume(p->task);
+ task_resume_internal(p->task);
proc_rele(p);
}
* cpuvar
*/
lck_mtx_t cpu_lock;
+lck_mtx_t cyc_lock;
lck_mtx_t mod_lock;
-cpu_t *cpu_list;
+dtrace_cpu_t *cpu_list;
cpu_core_t *cpu_core; /* XXX TLB lockdown? */
/*
return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
}
+/* XXX Get around const poisoning using structure assigns */
gid_t
-crgetgid(const cred_t *cr) { return cr->cr_groups[0]; }
+crgetgid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getgid(©_cr); }
uid_t
-crgetuid(const cred_t *cr) { return cr->cr_uid; }
+crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(©_cr); }
/*
* "cyclic"
*/
-/* osfmk/kern/timer_call.h */
-typedef void *call_entry_param_t;
-typedef void (*call_entry_func_t)(
- call_entry_param_t param0,
- call_entry_param_t param1);
-
-typedef struct call_entry {
- queue_chain_t q_link;
- call_entry_func_t func;
- call_entry_param_t param0;
- call_entry_param_t param1;
- uint64_t deadline;
- enum {
- IDLE,
- PENDING,
- DELAYED } state;
-} call_entry_data_t;
-
-
-typedef struct call_entry *timer_call_t;
-typedef void *timer_call_param_t;
-typedef void (*timer_call_func_t)(
- timer_call_param_t param0,
- timer_call_param_t param1);
-
-extern void
-timer_call_setup(
- timer_call_t call,
- timer_call_func_t func,
- timer_call_param_t param0);
-
-extern boolean_t
-timer_call_enter1(
- timer_call_t call,
- timer_call_param_t param1,
- uint64_t deadline);
-
-extern boolean_t
-timer_call_cancel(
- timer_call_t call);
-
typedef struct wrap_timer_call {
- cyc_handler_t hdlr;
- cyc_time_t when;
- uint64_t deadline;
- struct call_entry call;
+ /* node attributes */
+ cyc_handler_t hdlr;
+ cyc_time_t when;
+ uint64_t deadline;
+ int cpuid;
+ boolean_t suspended;
+ struct timer_call call;
+
+ /* next item in the linked list */
+ LIST_ENTRY(wrap_timer_call) entries;
} wrap_timer_call_t;
-#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
-#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
+#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
+#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
+
+/* CPU going online/offline notifications */
+void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL;
+void dtrace_cpu_state_changed(int, boolean_t);
+
+void
+dtrace_install_cpu_hooks(void) {
+ dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed;
+}
+
+void
+dtrace_cpu_state_changed(int cpuid, boolean_t is_running) {
+#pragma unused(cpuid)
+ wrap_timer_call_t *wrapTC = NULL;
+ boolean_t suspend = (is_running ? FALSE : TRUE);
+ dtrace_icookie_t s;
+
+ /* Ensure that we're not going to leave the CPU */
+ s = dtrace_interrupt_disable();
+ assert(cpuid == cpu_number());
+
+ LIST_FOREACH(wrapTC, &(cpu_list[cpu_number()].cpu_cyc_list), entries) {
+ assert(wrapTC->cpuid == cpu_number());
+ if (suspend) {
+ assert(!wrapTC->suspended);
+ /* If this fails, we'll panic anyway, so let's do this now. */
+ if (!timer_call_cancel(&wrapTC->call))
+ panic("timer_call_set_suspend() failed to cancel a timer call");
+ wrapTC->suspended = TRUE;
+ } else {
+ /* Rearm the timer, but ensure it was suspended first. */
+ assert(wrapTC->suspended);
+ clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(),
+ &wrapTC->deadline);
+ timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
+ TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+ wrapTC->suspended = FALSE;
+ }
+
+ }
+
+ /* Restore the previous interrupt state. */
+ dtrace_interrupt_enable(s);
+}
static void
_timer_call_apply_cyclic( void *ignore, void *vTChdl )
(*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
- timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline );
-
- /* Did timer_call_remove_cyclic request a wakeup call when this timer call was re-armed? */
- if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
- thread_wakeup((event_t)wrapTC);
+ timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
}
static cyclic_id_t
timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
{
uint64_t now;
+ dtrace_icookie_t s;
timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
wrapTC->hdlr = *handler;
wrapTC->deadline = now;
clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
- timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline );
+
+ /* Insert the timer to the list of the running timers on this CPU, and start it. */
+ s = dtrace_interrupt_disable();
+ wrapTC->cpuid = cpu_number();
+ LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
+ timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
+ TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+ wrapTC->suspended = FALSE;
+ dtrace_interrupt_enable(s);
return (cyclic_id_t)wrapTC;
}
+/*
+ * Executed on the CPU the timer is running on.
+ */
static void
timer_call_remove_cyclic(cyclic_id_t cyclic)
{
wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;
- while (!timer_call_cancel(&(wrapTC->call))) {
- int ret = assert_wait(wrapTC, THREAD_UNINT);
- ASSERT(ret == THREAD_WAITING);
+ assert(wrapTC);
+ assert(cpu_number() == wrapTC->cpuid);
- wrapTC->when.cyt_interval = WAKEUP_REAPER;
+ if (!timer_call_cancel(&wrapTC->call))
+ panic("timer_call_remove_cyclic() failed to cancel a timer call");
- ret = thread_block(THREAD_CONTINUE_NULL);
- ASSERT(ret == THREAD_AWAKENED);
- }
+ LIST_REMOVE(wrapTC, entries);
}
static void *
{
ASSERT( cyclic != CYCLIC_NONE );
- timer_call_remove_cyclic( cyclic );
+ /* Removing a timer call must be done on the CPU the timer is running on. */
+ wrap_timer_call_t *wrapTC = (wrap_timer_call_t *) cyclic;
+ dtrace_xcall(wrapTC->cpuid, (dtrace_xcall_t) timer_call_remove_cyclic, (void*) cyclic);
+
_FREE((void *)cyclic, M_TEMP);
}
t += sizeof(cyc_omni_handler_t);
cyc_list = (cyclic_id_list_t)(uintptr_t)t;
- cid = cyc_list[cpu_number()];
- oarg = timer_call_get_cyclic_arg(cid);
-
- timer_call_remove_cyclic( cid );
- (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
+ /*
+ * If the processor was offline when dtrace started, we did not allocate
+ * a cyclic timer for this CPU.
+ */
+ if ((cid = cyc_list[cpu_number()]) != CYCLIC_NONE) {
+ oarg = timer_call_get_cyclic_arg(cid);
+ timer_call_remove_cyclic(cid);
+ (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
+ }
}
void
* and clock drift on later invocations is not a worry.
*/
uint64_t deadline = mach_absolute_time() + nanos;
-
+ /* DRK: consider using a lower priority callout here */
thread_call_enter_delayed(call, deadline);
return call;
void* dt_kmem_alloc_aligned(size_t size, size_t align, int kmflag)
{
- void* buf;
- intptr_t p;
- void** buf_backup;
+ void *mem, **addr_to_free;
+ intptr_t mem_aligned;
+ size_t *size_to_free, hdr_size;
- buf = dt_kmem_alloc(align + sizeof(void*) + size, kmflag);
+ /* Must be a power of two. */
+ assert(align != 0);
+ assert((align & (align - 1)) == 0);
- if(!buf)
+ /*
+ * We are going to add a header to the allocation. It contains
+ * the address to free and the total size of the buffer.
+ */
+ hdr_size = sizeof(size_t) + sizeof(void*);
+ mem = dt_kmem_alloc(size + align + hdr_size, kmflag);
+ if (mem == NULL)
return NULL;
- p = (intptr_t)buf;
- p += sizeof(void*); /* now we have enough room to store the backup */
- p = P2ROUNDUP(p, align); /* and now we're aligned */
+ mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1));
- buf_backup = (void**)(p - sizeof(void*));
- *buf_backup = buf; /* back up the address we need to free */
+ /* Write the address to free in the header. */
+ addr_to_free = (void**) (mem_aligned - sizeof(void*));
+ *addr_to_free = mem;
- return (void*)p;
+ /* Write the size to free in the header. */
+ size_to_free = (size_t*) (mem_aligned - hdr_size);
+ *size_to_free = size + align + hdr_size;
+
+ return (void*) mem_aligned;
}
void* dt_kmem_zalloc_aligned(size_t size, size_t align, int kmflag)
void dt_kmem_free_aligned(void* buf, size_t size)
{
#pragma unused(size)
- intptr_t p;
- void** buf_backup;
+ intptr_t ptr = (intptr_t) buf;
+ void **addr_to_free = (void**) (ptr - sizeof(void*));
+ size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*)));
- p = (intptr_t)buf;
- p -= sizeof(void*);
- buf_backup = (void**)(p);
+ if (buf == NULL)
+ return;
- dt_kmem_free(*buf_backup, size + ((char*)buf - (char*)*buf_backup));
+ dt_kmem_free(*addr_to_free, *size_to_free);
}
/*
ASSERT(kaddr + size >= kaddr);
- if (ml_at_interrupt_context() || /* Avoid possible copyio page fault on int stack, which panics! */
- 0 != recover || /* Avoid reentrancy into copyio facility. */
- uaddr + size < uaddr || /* Avoid address wrap. */
+ if ( uaddr + size < uaddr || /* Avoid address wrap. */
KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */
{
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE );
}
-void
-dtrace_vpanic(const char *format, va_list alist)
-{
- vuprintf( format, alist );
- panic("dtrace_vpanic");
-}
-
#define TOTTY 0x02
extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */