#include <mach/mach_types.h>
-#include <kern/lock.h>
#include <kern/spl.h>
#include <kern/sched_prim.h>
#include <kern/thread.h>
#include <mach/mach_traps.h>
#include <mach/mach_time.h>
+#include <sys/kdebug.h>
+
uint32_t hz_tick_interval = 1;
static struct clock_calend {
uint64_t epoch;
uint64_t offset;
+ uint64_t epoch_absolute;
int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
uint64_t adjstart; /* Absolute time value for start of this adjustment period */
static void calend_adjust_call(void);
static uint32_t calend_adjust(void);
-static thread_call_data_t calend_wakecall;
-
-extern void IOKitResetTime(void);
-
void _clock_delay_until_deadline(uint64_t interval,
uint64_t deadline);
+void _clock_delay_until_deadline_with_leeway(uint64_t interval,
+ uint64_t deadline,
+ uint64_t leeway);
static uint64_t clock_boottime; /* Seconds boottime epoch */
clock_lock_init();
timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
- thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
clock_oldconfig();
}
clock_get_calendar_microtime(
clock_sec_t *secs,
clock_usec_t *microsecs)
+{
+ clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
+}
+
+/*
+ * clock_get_calendar_absolute_and_microtime:
+ *
+ * Returns the current calendar value,
+ * microseconds as the fraction. Also
+ * returns mach_absolute_time if abstime
+ * is not NULL.
+ */
+void
+clock_get_calendar_absolute_and_microtime(
+ clock_sec_t *secs,
+ clock_usec_t *microsecs,
+ uint64_t *abstime)
{
uint64_t now;
spl_t s;
clock_lock();
now = mach_absolute_time();
+ if (abstime)
+ *abstime = now;
if (clock_calend.adjdelta < 0) {
uint32_t t32;
clock_sec_t sys;
clock_usec_t microsys;
clock_sec_t newsecs;
+ clock_usec_t newmicrosecs;
spl_t s;
- newsecs = (microsecs < 500*USEC_PER_SEC)? secs: secs + 1;
+ newsecs = secs;
+ newmicrosecs = microsecs;
s = splclock();
clock_lock();
nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
+ clock_interval_to_absolutetime_interval((uint32_t) secs, NSEC_PER_SEC, &clock_calend.epoch_absolute);
+ clock_calend.epoch_absolute += clock_calend.offset;
+
/*
* Cancel any adjustment in progress.
*/
/*
* Set the new value for the platform clock.
*/
- PESetGMTTimeOfDay(newsecs);
+ PESetUTCTimeOfDay(newsecs, newmicrosecs);
splx(s);
*
* Also sends host notifications.
*/
+
+uint64_t mach_absolutetime_asleep;
+uint64_t mach_absolutetime_last_sleep;
+
void
clock_initialize_calendar(void)
{
- clock_sec_t sys, secs = PEGetGMTTimeOfDay();
- clock_usec_t microsys, microsecs = 0;
+ clock_sec_t sys, secs;
+ clock_usec_t microsys, microsecs;
+ uint64_t new_epoch;
spl_t s;
+ PEGetUTCTimeOfDay(&secs, µsecs);
+
s = splclock();
clock_lock();
/*
* Set the new calendar epoch.
*/
+
clock_calend.epoch = secs;
nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
+ clock_interval_to_absolutetime_interval((uint32_t) secs, NSEC_PER_SEC, &new_epoch);
+ new_epoch += clock_calend.offset;
+
+ if (clock_calend.epoch_absolute)
+ {
+ mach_absolutetime_last_sleep = new_epoch - clock_calend.epoch_absolute;
+ mach_absolutetime_asleep += mach_absolutetime_last_sleep;
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_CLOCK,MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
+ (uintptr_t) mach_absolutetime_last_sleep,
+ (uintptr_t) mach_absolutetime_asleep,
+ (uintptr_t) (mach_absolutetime_last_sleep >> 32),
+ (uintptr_t) (mach_absolutetime_asleep >> 32),
+ 0);
+ }
+ clock_calend.epoch_absolute = new_epoch;
+
/*
* Cancel any adjustment in progress.
*/
interval = calend_set_adjustment(secs, microsecs);
if (interval != 0) {
calend_adjdeadline = mach_absolute_time() + interval;
- if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
+ if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
calend_adjactive++;
}
else
/*
* Compute the total adjustment time in nanoseconds.
*/
- total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
+ total = ((int64_t)*secs * (int64_t)NSEC_PER_SEC) + (*microsecs * (int64_t)NSEC_PER_USEC);
/*
* Disable commpage gettimeofday().
* Positive adjustment. If greater than the preset 'big'
* threshold, slew at a faster rate, capping if necessary.
*/
- if (total > calend_adjbig)
+ if (total > (int64_t) calend_adjbig)
delta *= 10;
if (delta > total)
delta = (int32_t)total;
* greater than the preset 'big' threshold, slew at a faster
* rate, capping if necessary.
*/
- if (total < -calend_adjbig)
+ if (total < (int64_t) -calend_adjbig)
delta *= 10;
delta = -delta;
if (delta < total)
* remaining uncorrected time from it.
*/
if (ototal != 0) {
- *secs = (long)(ototal / NSEC_PER_SEC);
- *microsecs = (int)((ototal % NSEC_PER_SEC) / NSEC_PER_USEC);
+ *secs = (long)(ototal / (long)NSEC_PER_SEC);
+ *microsecs = (int)((ototal % (int)NSEC_PER_SEC) / (int)NSEC_PER_USEC);
}
else
*secs = *microsecs = 0;
if (interval != 0) {
clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
- if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
+ if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
calend_adjactive++;
}
}
return (interval);
}
-/*
- * clock_wakeup_calendar:
- *
- * Interface to power management, used
- * to initiate the reset of the calendar
- * on wake from sleep event.
- */
-void
-clock_wakeup_calendar(void)
-{
- thread_call_enter(&calend_wakecall);
-}
-
/*
* Wait / delay routines.
*/
uint64_t deadline = args->deadline;
wait_result_t wresult;
- wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
+ wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
+ TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
if (wresult == THREAD_WAITING)
wresult = thread_block(mach_wait_until_continue);
uint64_t interval,
uint64_t deadline)
{
+ _clock_delay_until_deadline_with_leeway(interval, deadline, 0);
+}
+
+/*
+ * Like _clock_delay_until_deadline, but it accepts a
+ * leeway value.
+ */
+void
+_clock_delay_until_deadline_with_leeway(
+ uint64_t interval,
+ uint64_t deadline,
+ uint64_t leeway)
+{
if (interval == 0)
return;
ml_get_interrupts_enabled() == FALSE ) {
machine_delay_until(interval, deadline);
} else {
- assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
+ /*
+ * For now, assume a leeway request of 0 means the client does not want a leeway
+ * value. We may want to change this interpretation in the future.
+ */
+
+ if (leeway) {
+ assert_wait_deadline_with_leeway((event_t)clock_delay_until, THREAD_UNINT, TIMEOUT_URGENCY_LEEWAY, deadline, leeway);
+ } else {
+ assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
+ }
thread_block(THREAD_CONTINUE_NULL);
}
}
-
void
delay_for_interval(
uint32_t interval,
_clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
}
+void
+delay_for_interval_with_leeway(
+ uint32_t interval,
+ uint32_t leeway,
+ uint32_t scale_factor)
+{
+ uint64_t abstime_interval;
+ uint64_t abstime_leeway;
+
+ clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
+ clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
+
+ _clock_delay_until_deadline_with_leeway(abstime_interval, mach_absolute_time() + abstime_interval, abstime_leeway);
+}
+
void
delay(
int usec)
}
#endif /* CONFIG_DTRACE */
+