#define super IOEventSource
OSDefineMetaClassAndStructors(IOTimerEventSource, IOEventSource)
-OSMetaClassDefineReservedUnused(IOTimerEventSource, 0);
-OSMetaClassDefineReservedUnused(IOTimerEventSource, 1);
-OSMetaClassDefineReservedUnused(IOTimerEventSource, 2);
+OSMetaClassDefineReservedUsed(IOTimerEventSource, 0);
+OSMetaClassDefineReservedUsed(IOTimerEventSource, 1);
+OSMetaClassDefineReservedUsed(IOTimerEventSource, 2);
OSMetaClassDefineReservedUnused(IOTimerEventSource, 3);
OSMetaClassDefineReservedUnused(IOTimerEventSource, 4);
OSMetaClassDefineReservedUnused(IOTimerEventSource, 5);
// Timeout handler function. This function is called by the kernel when
// the timeout interval expires.
//
+
+static __inline__ void
+InvokeAction(IOTimerEventSource::Action action, IOTimerEventSource * ts,
+ OSObject * owner, IOWorkLoop * workLoop)
+{
+ bool trace = (gIOKitTrace & kIOTraceTimers) ? true : false;
+
+ if (trace)
+ IOTimeStampStartConstant(IODBG_TIMES(IOTIMES_ACTION),
+ VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner));
+
+ (*action)(owner, ts);
+
+#if CONFIG_DTRACE
+ DTRACE_TMR3(iotescallout__expire, Action, action, OSObject, owner, void, workLoop);
+#endif
+
+ if (trace)
+ IOTimeStampEndConstant(IODBG_TIMES(IOTIMES_ACTION),
+ VM_KERNEL_UNSLIDE(action), VM_KERNEL_ADDRHIDE(owner));
+}
+
void IOTimerEventSource::timeout(void *self)
{
IOTimerEventSource *me = (IOTimerEventSource *) self;
doit = (Action) me->action;
if (doit && me->enabled && AbsoluteTime_to_scalar(&me->abstime))
{
- bool trace = (gIOKitTrace & kIOTraceTimers) ? true : false;
-
- if (trace)
- IOTimeStampStartConstant(IODBG_TIMES(IOTIMES_ACTION),
- VM_KERNEL_UNSLIDE(doit), (uintptr_t) me->owner);
-
- (*doit)(me->owner, me);
-#if CONFIG_DTRACE
- DTRACE_TMR3(iotescallout__expire, Action, doit, OSObject, me->owner, void, me->workLoop);
-#endif
-
- if (trace)
- IOTimeStampEndConstant(IODBG_TIMES(IOTIMES_ACTION),
- VM_KERNEL_UNSLIDE(doit), (uintptr_t) me->owner);
+ InvokeAction(doit, me, me->owner, me->workLoop);
}
IOStatisticsOpenGate();
wl->openGate();
doit = (Action) me->action;
if (doit && (me->reserved->calloutGeneration == count))
{
- bool trace = (gIOKitTrace & kIOTraceTimers) ? true : false;
-
- if (trace)
- IOTimeStampStartConstant(IODBG_TIMES(IOTIMES_ACTION),
- VM_KERNEL_UNSLIDE(doit), (uintptr_t) me->owner);
-
- (*doit)(me->owner, me);
-#if CONFIG_DTRACE
- DTRACE_TMR3(iotescallout__expire, Action, doit, OSObject, me->owner, void, me->workLoop);
-#endif
-
- if (trace)
- IOTimeStampEndConstant(IODBG_TIMES(IOTIMES_ACTION),
- VM_KERNEL_UNSLIDE(doit), (uintptr_t) me->owner);
+ InvokeAction(doit, me, me->owner, me->workLoop);
}
IOStatisticsOpenGate();
wl->openGate();
me->release();
}
+// -- work loop delivery
+
+bool IOTimerEventSource::checkForWork()
+{
+ Action doit;
+
+ if (reserved
+ && (reserved->calloutGenerationSignaled == reserved->calloutGeneration)
+ && enabled && (doit = (Action) action))
+ {
+ reserved->calloutGenerationSignaled = ~reserved->calloutGeneration;
+ InvokeAction(doit, this, owner, workLoop);
+ }
+
+ return false;
+}
+
+void IOTimerEventSource::timeoutSignaled(void * self, void * c)
+{
+ IOTimerEventSource *me = (IOTimerEventSource *) self;
+
+ me->reserved->calloutGenerationSignaled = (SInt32)(long) c;
+ if (me->enabled) me->signalWorkAvailable();
+}
+
+// --
+
void IOTimerEventSource::setTimeoutFunc()
{
+ thread_call_priority_t pri;
+ uint32_t options;
+
+ if (reserved) panic("setTimeoutFunc already %p, %p", this, reserved);
+
// reserved != 0 means IOTimerEventSource::timeoutAndRelease is being used,
// not a subclassed implementation
reserved = IONew(ExpansionData, 1);
- calloutEntry = (void *) thread_call_allocate((thread_call_func_t) &IOTimerEventSource::timeoutAndRelease,
- (thread_call_param_t) this);
+ reserved->calloutGenerationSignaled = ~reserved->calloutGeneration;
+ options = abstime;
+ abstime = 0;
+
+ thread_call_options_t tcoptions = 0;
+ thread_call_func_t func = NULL;
+
+ switch (kIOTimerEventSourceOptionsPriorityMask & options)
+ {
+ case kIOTimerEventSourceOptionsPriorityHigh:
+ pri = THREAD_CALL_PRIORITY_HIGH;
+ func = &IOTimerEventSource::timeoutAndRelease;
+ break;
+
+ case kIOTimerEventSourceOptionsPriorityKernel:
+ pri = THREAD_CALL_PRIORITY_KERNEL;
+ func = &IOTimerEventSource::timeoutAndRelease;
+ break;
+
+ case kIOTimerEventSourceOptionsPriorityKernelHigh:
+ pri = THREAD_CALL_PRIORITY_KERNEL_HIGH;
+ func = &IOTimerEventSource::timeoutAndRelease;
+ break;
+
+ case kIOTimerEventSourceOptionsPriorityUser:
+ pri = THREAD_CALL_PRIORITY_USER;
+ func = &IOTimerEventSource::timeoutAndRelease;
+ break;
+
+ case kIOTimerEventSourceOptionsPriorityLow:
+ pri = THREAD_CALL_PRIORITY_LOW;
+ func = &IOTimerEventSource::timeoutAndRelease;
+ break;
+
+ case kIOTimerEventSourceOptionsPriorityWorkLoop:
+ pri = THREAD_CALL_PRIORITY_KERNEL;
+ tcoptions |= THREAD_CALL_OPTIONS_SIGNAL;
+ if (kIOTimerEventSourceOptionsAllowReenter & options) break;
+ func = &IOTimerEventSource::timeoutSignaled;
+ break;
+
+ default:
+ break;
+ }
+
+ assertf(func, "IOTimerEventSource options 0x%x", options);
+ if (!func) return; // init will fail
+
+ if (THREAD_CALL_OPTIONS_SIGNAL & tcoptions) flags |= kActive;
+ else flags |= kPassive;
+
+ if (!(kIOTimerEventSourceOptionsAllowReenter & options)) tcoptions |= THREAD_CALL_OPTIONS_ONCE;
+
+ calloutEntry = (void *) thread_call_allocate_with_options(func,
+ (thread_call_param_t) this, pri, tcoptions);
+ assert(calloutEntry);
}
bool IOTimerEventSource::init(OSObject *inOwner, Action inAction)
return true;
}
+bool IOTimerEventSource::init(uint32_t options, OSObject *inOwner, Action inAction)
+{
+ abstime = options;
+ return (init(inOwner, inAction));
+}
+
IOTimerEventSource *
-IOTimerEventSource::timerEventSource(OSObject *inOwner, Action inAction)
+IOTimerEventSource::timerEventSource(uint32_t inOptions, OSObject *inOwner, Action inAction)
{
IOTimerEventSource *me = new IOTimerEventSource;
- if (me && !me->init(inOwner, inAction)) {
+ if (me && !me->init(inOptions, inOwner, inAction)) {
me->release();
return 0;
}
return me;
}
+#define _thread_call_cancel(tc) ((kActive & flags) ? thread_call_cancel_wait((tc)) : thread_call_cancel((tc)))
+
+IOTimerEventSource *
+IOTimerEventSource::timerEventSource(OSObject *inOwner, Action inAction)
+{
+ return (IOTimerEventSource::timerEventSource(
+ kIOTimerEventSourceOptionsPriorityKernelHigh,
+ inOwner, inAction));
+}
+
void IOTimerEventSource::free()
{
if (calloutEntry) {
+ __assert_only bool freed;
+
cancelTimeout();
- thread_call_free((thread_call_t) calloutEntry);
+
+ freed = thread_call_free((thread_call_t) calloutEntry);
+ assert(freed);
}
if (reserved)
{
if (reserved)
reserved->calloutGeneration++;
- bool active = thread_call_cancel((thread_call_t) calloutEntry);
+ bool active = _thread_call_cancel((thread_call_t) calloutEntry);
AbsoluteTime_to_scalar(&abstime) = 0;
- if (active && reserved)
+ if (active && reserved && (kPassive & flags))
{
release();
workLoop->release();
{
if (reserved)
reserved->calloutGeneration++;
- bool active = thread_call_cancel((thread_call_t) calloutEntry);
+ bool active = _thread_call_cancel((thread_call_t) calloutEntry);
super::disable();
- if (active && reserved)
+ if (active && reserved && (kPassive & flags))
{
release();
workLoop->release();
IOReturn IOTimerEventSource::setTimeout(AbsoluteTime interval)
{
AbsoluteTime end;
+ clock_absolutetime_interval_to_deadline(interval, &end);
+ return wakeAtTime(end);
+}
- clock_get_uptime(&end);
- ADD_ABSOLUTETIME(&end, &interval);
+IOReturn IOTimerEventSource::setTimeout(uint32_t options,
+ AbsoluteTime abstime, AbsoluteTime leeway)
+{
+ AbsoluteTime end;
+ clock_continuoustime_interval_to_deadline(abstime, &end);
+ return wakeAtTime(options, end, leeway);
- return wakeAtTime(end);
}
IOReturn IOTimerEventSource::wakeAtTimeTicks(UInt32 ticks)
}
IOReturn IOTimerEventSource::wakeAtTime(AbsoluteTime inAbstime)
+{
+ return wakeAtTime(0, inAbstime, 0);
+}
+
+IOReturn IOTimerEventSource::wakeAtTime(uint32_t options, AbsoluteTime inAbstime, AbsoluteTime leeway)
{
if (!action)
return kIOReturnNoResources;
abstime = inAbstime;
if ( enabled && AbsoluteTime_to_scalar(&inAbstime) && AbsoluteTime_to_scalar(&abstime) && workLoop )
{
+ uint32_t tcoptions = 0;
+
+ if (kIOTimeOptionsWithLeeway & options) tcoptions |= THREAD_CALL_DELAY_LEEWAY;
+ if (kIOTimeOptionsContinuous & options) tcoptions |= THREAD_CALL_CONTINUOUS;
+
if (reserved)
{
- retain();
- workLoop->retain();
+ if (kPassive & flags)
+ {
+ retain();
+ workLoop->retain();
+ }
reserved->workLoop = workLoop;
reserved->calloutGeneration++;
- if (thread_call_enter1_delayed((thread_call_t) calloutEntry,
- (void *)(uintptr_t) reserved->calloutGeneration, inAbstime))
+ if (thread_call_enter_delayed_with_leeway((thread_call_t) calloutEntry,
+ (void *)(uintptr_t) reserved->calloutGeneration, inAbstime, leeway, tcoptions)
+ && (kPassive & flags))
{
release();
workLoop->release();
}
}
else
- thread_call_enter_delayed((thread_call_t) calloutEntry, inAbstime);
+ {
+ thread_call_enter_delayed_with_leeway((thread_call_t) calloutEntry,
+ NULL, inAbstime, leeway, tcoptions);
+ }
}
return kIOReturnSuccess;