#include <vm/pmap.h>
#include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
-
/*
* pid/proc
*/
return PROC_NULL;
}
- task_suspend(p->task);
+ task_suspend_internal(p->task);
proc_lock(p);
proc_unlock(p);
- task_resume(p->task);
+ task_resume_internal(p->task);
proc_rele(p);
}
* cpuvar
*/
lck_mtx_t cpu_lock;
+lck_mtx_t cyc_lock;
lck_mtx_t mod_lock;
dtrace_cpu_t *cpu_list;
*/
typedef struct wrap_timer_call {
- cyc_handler_t hdlr;
- cyc_time_t when;
- uint64_t deadline;
- struct timer_call call;
+ /* node attributes */
+ cyc_handler_t hdlr;
+ cyc_time_t when;
+ uint64_t deadline;
+ int cpuid;
+ boolean_t suspended;
+ struct timer_call call;
+
+ /* next item in the linked list */
+ LIST_ENTRY(wrap_timer_call) entries;
} wrap_timer_call_t;
-#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
-#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
+#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
+#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
+
+
+typedef struct cyc_list {
+ cyc_omni_handler_t cyl_omni;
+ wrap_timer_call_t cyl_wrap_by_cpus[];
+} cyc_list_t;
+
+/* CPU going online/offline notifications */
+void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL;
+void dtrace_cpu_state_changed(int, boolean_t);
+
+void
+dtrace_install_cpu_hooks(void) {
+ dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed;
+}
+
+void
+dtrace_cpu_state_changed(int cpuid, boolean_t is_running) {
+#pragma unused(cpuid)
+ wrap_timer_call_t *wrapTC = NULL;
+ boolean_t suspend = (is_running ? FALSE : TRUE);
+ dtrace_icookie_t s;
+
+ /* Ensure that we're not going to leave the CPU */
+ s = dtrace_interrupt_disable();
+ assert(cpuid == cpu_number());
+
+ LIST_FOREACH(wrapTC, &(cpu_list[cpu_number()].cpu_cyc_list), entries) {
+ assert(wrapTC->cpuid == cpu_number());
+ if (suspend) {
+ assert(!wrapTC->suspended);
+ /* If this fails, we'll panic anyway, so let's do this now. */
+ if (!timer_call_cancel(&wrapTC->call))
+ panic("timer_call_set_suspend() failed to cancel a timer call");
+ wrapTC->suspended = TRUE;
+ } else {
+ /* Rearm the timer, but ensure it was suspended first. */
+ assert(wrapTC->suspended);
+ clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(),
+ &wrapTC->deadline);
+ timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
+ TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+ wrapTC->suspended = FALSE;
+ }
+
+ }
+
+ /* Restore the previous interrupt state. */
+ dtrace_interrupt_enable(s);
+}
static void
_timer_call_apply_cyclic( void *ignore, void *vTChdl )
clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
-
- /* Did timer_call_remove_cyclic request a wakeup call when this timer call was re-armed? */
- if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
- thread_wakeup((event_t)wrapTC);
}
static cyclic_id_t
timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
{
uint64_t now;
+ dtrace_icookie_t s;
timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
wrapTC->hdlr = *handler;
wrapTC->deadline = now;
clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
- timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
+
+ /* Insert the timer to the list of the running timers on this CPU, and start it. */
+ s = dtrace_interrupt_disable();
+ wrapTC->cpuid = cpu_number();
+ LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
+ timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
+ TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+ wrapTC->suspended = FALSE;
+ dtrace_interrupt_enable(s);
return (cyclic_id_t)wrapTC;
}
+/*
+ * Executed on the CPU the timer is running on.
+ */
static void
-timer_call_remove_cyclic(cyclic_id_t cyclic)
+timer_call_remove_cyclic(wrap_timer_call_t *wrapTC)
{
- wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;
+ assert(wrapTC);
+ assert(cpu_number() == wrapTC->cpuid);
- while (!timer_call_cancel(&(wrapTC->call))) {
- int ret = assert_wait(wrapTC, THREAD_UNINT);
- ASSERT(ret == THREAD_WAITING);
+ if (!timer_call_cancel(&wrapTC->call))
+ panic("timer_call_remove_cyclic() failed to cancel a timer call");
- wrapTC->when.cyt_interval = WAKEUP_REAPER;
-
- ret = thread_block(THREAD_CONTINUE_NULL);
- ASSERT(ret == THREAD_AWAKENED);
- }
+ LIST_REMOVE(wrapTC, entries);
}
static void *
-timer_call_get_cyclic_arg(cyclic_id_t cyclic)
-{
- wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;
-
+timer_call_get_cyclic_arg(wrap_timer_call_t *wrapTC)
+{
return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL);
-}
+}
cyclic_id_t
cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when)
{
ASSERT( cyclic != CYCLIC_NONE );
- timer_call_remove_cyclic( cyclic );
+ /* Removing a timer call must be done on the CPU the timer is running on. */
+ wrap_timer_call_t *wrapTC = (wrap_timer_call_t *) cyclic;
+ dtrace_xcall(wrapTC->cpuid, (dtrace_xcall_t) timer_call_remove_cyclic, (void*) cyclic);
+
_FREE((void *)cyclic, M_TEMP);
}
static void
-_cyclic_add_omni(cyclic_id_list_t cyc_list)
+_cyclic_add_omni(cyc_list_t *cyc_list)
{
cyc_time_t cT;
cyc_handler_t cH;
- wrap_timer_call_t *wrapTC;
- cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list;
- char *t;
-
- (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT);
-
- t = (char *)cyc_list;
- t += sizeof(cyc_omni_handler_t);
- cyc_list = (cyclic_id_list_t)(uintptr_t)t;
+ cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
- t += sizeof(cyclic_id_t)*NCPU;
- t += (sizeof(wrap_timer_call_t))*cpu_number();
- wrapTC = (wrap_timer_call_t *)(uintptr_t)t;
+ (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT);
- cyc_list[cpu_number()] = timer_call_add_cyclic(wrapTC, &cH, &cT);
+ wrap_timer_call_t *wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()];
+ timer_call_add_cyclic(wrapTC, &cH, &cT);
}
cyclic_id_list_t
cyclic_add_omni(cyc_omni_handler_t *omni)
{
- cyclic_id_list_t cyc_list =
- _MALLOC( (sizeof(wrap_timer_call_t))*NCPU +
- sizeof(cyclic_id_t)*NCPU +
- sizeof(cyc_omni_handler_t), M_TEMP, M_ZERO | M_WAITOK);
+ cyc_list_t *cyc_list =
+ _MALLOC(sizeof(cyc_list_t) + NCPU * sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
+
if (NULL == cyc_list)
- return (cyclic_id_list_t)CYCLIC_NONE;
+ return NULL;
+
+ cyc_list->cyl_omni = *omni;
- *(cyc_omni_handler_t *)cyc_list = *omni;
dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list);
- return cyc_list;
+ return (cyclic_id_list_t)cyc_list;
}
static void
-_cyclic_remove_omni(cyclic_id_list_t cyc_list)
+_cyclic_remove_omni(cyc_list_t *cyc_list)
{
- cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list;
+ cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
void *oarg;
- cyclic_id_t cid;
- char *t;
-
- t = (char *)cyc_list;
- t += sizeof(cyc_omni_handler_t);
- cyc_list = (cyclic_id_list_t)(uintptr_t)t;
-
- cid = cyc_list[cpu_number()];
- oarg = timer_call_get_cyclic_arg(cid);
+ wrap_timer_call_t *wrapTC;
- timer_call_remove_cyclic( cid );
- (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
+ /*
+ * If the processor was offline when dtrace started, we did not allocate
+ * a cyclic timer for this CPU.
+ */
+ if ((wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()]) != NULL) {
+ oarg = timer_call_get_cyclic_arg(wrapTC);
+ timer_call_remove_cyclic(wrapTC);
+ (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
+ }
}
void
cyclic_remove_omni(cyclic_id_list_t cyc_list)
{
- ASSERT( cyc_list != (cyclic_id_list_t)CYCLIC_NONE );
+ ASSERT(cyc_list != NULL);
dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list);
_FREE(cyc_list, M_TEMP);
#pragma unused(devi)
}
-#define NSOFT_STATES 32 /* XXX No more than 32 clients at a time, please. */
-static void *soft[NSOFT_STATES];
-
-int
-ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
-{
-#pragma unused(n_items)
- int i;
-
- for (i = 0; i < NSOFT_STATES; ++i) soft[i] = _MALLOC(size, M_TEMP, M_ZERO | M_WAITOK);
- *(size_t *)state_p = size;
- return 0;
-}
-
-int
-ddi_soft_state_zalloc(void *state, int item)
-{
-#pragma unused(state)
- if (item < NSOFT_STATES)
- return DDI_SUCCESS;
- else
- return DDI_FAILURE;
-}
-
-void *
-ddi_get_soft_state(void *state, int item)
-{
-#pragma unused(state)
- ASSERT(item < NSOFT_STATES);
- return soft[item];
-}
-
-int
-ddi_soft_state_free(void *state, int item)
-{
- ASSERT(item < NSOFT_STATES);
- bzero( soft[item], (size_t)state );
- return DDI_SUCCESS;
-}
-
-void
-ddi_soft_state_fini(void **state_p)
-{
-#pragma unused(state_p)
- int i;
-
- for (i = 0; i < NSOFT_STATES; ++i) _FREE( soft[i], M_TEMP );
-}
static unsigned int gRegisteredProps = 0;
static struct {
void* dt_kmem_alloc_aligned(size_t size, size_t align, int kmflag)
{
- void* buf;
- intptr_t p;
- void** buf_backup;
+ void *mem, **addr_to_free;
+ intptr_t mem_aligned;
+ size_t *size_to_free, hdr_size;
- buf = dt_kmem_alloc(align + sizeof(void*) + size, kmflag);
+ /* Must be a power of two. */
+ assert(align != 0);
+ assert((align & (align - 1)) == 0);
- if(!buf)
+ /*
+ * We are going to add a header to the allocation. It contains
+ * the address to free and the total size of the buffer.
+ */
+ hdr_size = sizeof(size_t) + sizeof(void*);
+ mem = dt_kmem_alloc(size + align + hdr_size, kmflag);
+ if (mem == NULL)
return NULL;
- p = (intptr_t)buf;
- p += sizeof(void*); /* now we have enough room to store the backup */
- p = P2ROUNDUP(p, align); /* and now we're aligned */
+ mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1));
+
+ /* Write the address to free in the header. */
+ addr_to_free = (void**) (mem_aligned - sizeof(void*));
+ *addr_to_free = mem;
- buf_backup = (void**)(p - sizeof(void*));
- *buf_backup = buf; /* back up the address we need to free */
+ /* Write the size to free in the header. */
+ size_to_free = (size_t*) (mem_aligned - hdr_size);
+ *size_to_free = size + align + hdr_size;
- return (void*)p;
+ return (void*) mem_aligned;
}
void* dt_kmem_zalloc_aligned(size_t size, size_t align, int kmflag)
void dt_kmem_free_aligned(void* buf, size_t size)
{
#pragma unused(size)
- intptr_t p;
- void** buf_backup;
+ intptr_t ptr = (intptr_t) buf;
+ void **addr_to_free = (void**) (ptr - sizeof(void*));
+ size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*)));
- p = (intptr_t)buf;
- p -= sizeof(void*);
- buf_backup = (void**)(p);
+ if (buf == NULL)
+ return;
- dt_kmem_free(*buf_backup, size + ((char*)buf - (char*)*buf_backup));
+ dt_kmem_free(*addr_to_free, *size_to_free);
}
/*