* real-time clock.
*/
-#include <libkern/OSTypes.h>
-
#include <mach/mach_types.h>
#include <kern/clock.h>
#include <kern/macro_help.h>
#include <kern/spl.h>
+#include <kern/host_notify.h>
+
#include <machine/mach_param.h> /* HZ */
+#include <machine/commpage.h>
#include <ppc/proc_reg.h>
#include <pexpert/pexpert.h>
kern_return_t calend_gettime(
mach_timespec_t *cur_time);
-kern_return_t calend_settime(
- mach_timespec_t *cur_time);
-
kern_return_t calend_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
struct clock_ops calend_ops = {
calend_config, calend_init,
- calend_gettime, calend_settime,
+ calend_gettime, 0,
calend_getattr, 0,
0,
};
/* local data declarations */
-static struct rtclock {
- mach_timespec_t calend_offset;
- boolean_t calend_is_set;
+static struct rtclock_calend {
+ uint32_t epoch;
+ uint32_t microepoch;
- mach_timebase_info_data_t timebase_const;
+ uint64_t epoch1;
- struct rtclock_timer {
- uint64_t deadline;
- boolean_t is_set;
- } timer[NCPUS];
+ int64_t adjtotal;
+ int32_t adjdelta;
+} rtclock_calend;
- clock_timer_func_t timer_expire;
+static boolean_t rtclock_initialized;
- timer_call_data_t alarm_timer;
+static uint64_t rtclock_tick_deadline[NCPUS];
- /* debugging */
- uint64_t last_abstime[NCPUS];
- int last_decr[NCPUS];
+#define NSEC_PER_HZ (NSEC_PER_SEC / HZ)
+static uint32_t rtclock_tick_interval;
- decl_simple_lock_data(,lock) /* real-time clock device lock */
-} rtclock;
+static uint32_t rtclock_sec_divisor;
-static boolean_t rtclock_initialized;
+static mach_timebase_info_data_t rtclock_timebase_const;
-static uint64_t rtclock_tick_deadline[NCPUS];
-static uint64_t rtclock_tick_interval;
+static boolean_t rtclock_timebase_initialized;
+
+static struct rtclock_timer {
+ uint64_t deadline;
+ uint32_t
+ /*boolean_t*/ is_set:1,
+ has_expired:1,
+ :0;
+} rtclock_timer[NCPUS];
+
+static clock_timer_func_t rtclock_timer_expire;
+
+static timer_call_data_t rtclock_alarm_timer;
static void timespec_to_absolutetime(
- mach_timespec_t timespec,
- uint64_t *result);
+ mach_timespec_t *ts,
+ uint64_t *result);
static int deadline_to_decrementer(
- uint64_t deadline,
- uint64_t now);
+ uint64_t deadline,
+ uint64_t now);
-static void rtclock_alarm_timer(
+static void rtclock_alarm_expire(
timer_call_param_t p0,
timer_call_param_t p1);
/* global data declarations */
-#define RTC_TICKPERIOD (NSEC_PER_SEC / HZ)
-
#define DECREMENTER_MAX 0x7FFFFFFFUL
#define DECREMENTER_MIN 0xAUL
natural_t rtclock_decrementer_min;
+decl_simple_lock_data(static,rtclock_lock)
+
/*
* Macros to lock/unlock real-time clock device.
*/
#define LOCK_RTC(s) \
MACRO_BEGIN \
(s) = splclock(); \
- simple_lock(&rtclock.lock); \
+ simple_lock(&rtclock_lock); \
MACRO_END
#define UNLOCK_RTC(s) \
MACRO_BEGIN \
- simple_unlock(&rtclock.lock); \
+ simple_unlock(&rtclock_lock); \
splx(s); \
MACRO_END
timebase_callback(
struct timebase_freq_t *freq)
{
- natural_t numer, denom;
- int n;
+ uint32_t numer, denom;
+ uint64_t abstime;
spl_t s;
- denom = freq->timebase_num;
- n = 9;
- while (!(denom % 10)) {
- if (n < 1)
- break;
- denom /= 10;
- n--;
- }
+ if ( freq->timebase_den < 1 || freq->timebase_den > 4 ||
+ freq->timebase_num < freq->timebase_den )
+ panic("rtclock timebase_callback: invalid constant %d / %d",
+ freq->timebase_num, freq->timebase_den);
- numer = freq->timebase_den;
- while (n-- > 0) {
- numer *= 10;
- }
+ denom = freq->timebase_num;
+ numer = freq->timebase_den * NSEC_PER_SEC;
LOCK_RTC(s);
- rtclock.timebase_const.numer = numer;
- rtclock.timebase_const.denom = denom;
+ if (!rtclock_timebase_initialized) {
+ commpage_set_timestamp(0,0,0,0);
+
+ rtclock_timebase_const.numer = numer;
+ rtclock_timebase_const.denom = denom;
+ rtclock_sec_divisor = freq->timebase_num / freq->timebase_den;
+
+ nanoseconds_to_absolutetime(NSEC_PER_HZ, &abstime);
+ rtclock_tick_interval = abstime;
+ }
+ else {
+ UNLOCK_RTC(s);
+ printf("rtclock timebase_callback: late old %d / %d new %d / %d",
+ rtclock_timebase_const.numer, rtclock_timebase_const.denom,
+ numer, denom);
+ return;
+ }
UNLOCK_RTC(s);
+
+ clock_timebase_init();
}
/*
if (cpu_number() != master_cpu)
return(1);
- timer_call_setup(&rtclock.alarm_timer, rtclock_alarm_timer, NULL);
+ timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
- simple_lock_init(&rtclock.lock, ETAP_MISC_RT_CLOCK);
+ simple_lock_init(&rtclock_lock, ETAP_MISC_RT_CLOCK);
PE_register_timebase_callback(timebase_callback);
panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu);
}
/* Set decrementer and hence our next tick due */
- clock_get_uptime(&abstime);
+ abstime = mach_absolute_time();
rtclock_tick_deadline[mycpu] = abstime;
rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
mtdec(decr);
- rtclock.last_decr[mycpu] = decr;
return(1);
}
- /*
- * Initialize non-zero clock structure values.
- */
- clock_interval_to_absolutetime_interval(RTC_TICKPERIOD, 1,
- &rtclock_tick_interval);
/* Set decrementer and our next tick due */
- clock_get_uptime(&abstime);
+ abstime = mach_absolute_time();
rtclock_tick_deadline[mycpu] = abstime;
rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
mtdec(decr);
- rtclock.last_decr[mycpu] = decr;
rtclock_initialized = TRUE;
return (1);
}
-#define UnsignedWide_to_scalar(x) (*(uint64_t *)(x))
-#define scalar_to_UnsignedWide(x) (*(UnsignedWide *)(x))
-
-/*
- * Perform a full 64 bit by 32 bit unsigned multiply,
- * yielding a 96 bit product. The most significant
- * portion of the product is returned as a 64 bit
- * quantity, with the lower portion as a 32 bit word.
- */
-static void
-umul_64by32(
- UnsignedWide now64,
- uint32_t mult32,
- UnsignedWide *result64,
- uint32_t *result32)
-{
- uint32_t mid, mid2;
-
- asm volatile(" mullw %0,%1,%2" :
- "=r" (*result32) :
- "r" (now64.lo), "r" (mult32));
-
- asm volatile(" mullw %0,%1,%2" :
- "=r" (mid2) :
- "r" (now64.hi), "r" (mult32));
- asm volatile(" mulhwu %0,%1,%2" :
- "=r" (mid) :
- "r" (now64.lo), "r" (mult32));
-
- asm volatile(" mulhwu %0,%1,%2" :
- "=r" (result64->hi) :
- "r" (now64.hi), "r" (mult32));
-
- asm volatile(" addc %0,%2,%3;
- addze %1,%4" :
- "=r" (result64->lo), "=r" (result64->hi) :
- "r" (mid), "r" (mid2), "1" (result64->hi));
-}
-
-/*
- * Perform a partial 64 bit by 32 bit unsigned multiply,
- * yielding a 64 bit product. Only the least significant
- * 64 bits of the product are calculated and returned.
- */
-static void
-umul_64by32to64(
- UnsignedWide now64,
- uint32_t mult32,
- UnsignedWide *result64)
-{
- uint32_t mid, mid2;
-
- asm volatile(" mullw %0,%1,%2" :
- "=r" (result64->lo) :
- "r" (now64.lo), "r" (mult32));
-
- asm volatile(" mullw %0,%1,%2" :
- "=r" (mid2) :
- "r" (now64.hi), "r" (mult32));
- asm volatile(" mulhwu %0,%1,%2" :
- "=r" (mid) :
- "r" (now64.lo), "r" (mult32));
-
- asm volatile(" add %0,%1,%2" :
- "=r" (result64->hi) :
- "r" (mid), "r" (mid2));
-}
-
-/*
- * Perform an unsigned division of a 96 bit value
- * by a 32 bit value, yielding a 96 bit quotient.
- * The most significant portion of the product is
- * returned as a 64 bit quantity, with the lower
- * portion as a 32 bit word.
- */
-static void
-udiv_96by32(
- UnsignedWide now64,
- uint32_t now32,
- uint32_t div32,
- UnsignedWide *result64,
- uint32_t *result32)
-{
- UnsignedWide t64;
-
- if (now64.hi > 0 || now64.lo >= div32) {
- UnsignedWide_to_scalar(result64) =
- UnsignedWide_to_scalar(&now64) / div32;
-
- umul_64by32to64(*result64, div32, &t64);
-
- UnsignedWide_to_scalar(&t64) =
- UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
-
- *result32 = (((uint64_t)t64.lo << 32) | now32) / div32;
- }
- else {
- UnsignedWide_to_scalar(result64) =
- (((uint64_t)now64.lo << 32) | now32) / div32;
-
- *result32 = result64->lo;
- result64->lo = result64->hi;
- result64->hi = 0;
- }
-}
-
-/*
- * Perform an unsigned division of a 96 bit value
- * by a 32 bit value, yielding a 64 bit quotient.
- * Any higher order bits of the quotient are simply
- * discarded.
- */
-static void
-udiv_96by32to64(
- UnsignedWide now64,
- uint32_t now32,
- uint32_t div32,
- UnsignedWide *result64)
-{
- UnsignedWide t64;
-
- if (now64.hi > 0 || now64.lo >= div32) {
- UnsignedWide_to_scalar(result64) =
- UnsignedWide_to_scalar(&now64) / div32;
-
- umul_64by32to64(*result64, div32, &t64);
-
- UnsignedWide_to_scalar(&t64) =
- UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
-
- result64->hi = result64->lo;
- result64->lo = (((uint64_t)t64.lo << 32) | now32) / div32;
- }
- else {
- UnsignedWide_to_scalar(result64) =
- (((uint64_t)now64.lo << 32) | now32) / div32;
- }
-}
-
-/*
- * Perform an unsigned division of a 96 bit value
- * by a 32 bit value, yielding a 32 bit quotient,
- * and a 32 bit remainder. Any higher order bits
- * of the quotient are simply discarded.
- */
-static void
-udiv_96by32to32and32(
- UnsignedWide now64,
- uint32_t now32,
- uint32_t div32,
- uint32_t *result32,
- uint32_t *remain32)
-{
- UnsignedWide t64, u64;
-
- if (now64.hi > 0 || now64.lo >= div32) {
- UnsignedWide_to_scalar(&t64) =
- UnsignedWide_to_scalar(&now64) / div32;
-
- umul_64by32to64(t64, div32, &t64);
-
- UnsignedWide_to_scalar(&t64) =
- UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
-
- UnsignedWide_to_scalar(&t64) = ((uint64_t)t64.lo << 32) | now32;
-
- UnsignedWide_to_scalar(&u64) =
- UnsignedWide_to_scalar(&t64) / div32;
-
- *result32 = u64.lo;
-
- umul_64by32to64(u64, div32, &u64);
-
- *remain32 = UnsignedWide_to_scalar(&t64) -
- UnsignedWide_to_scalar(&u64);
- }
- else {
- UnsignedWide_to_scalar(&t64) = ((uint64_t)now64.lo << 32) | now32;
-
- UnsignedWide_to_scalar(&u64) =
- UnsignedWide_to_scalar(&t64) / div32;
-
- *result32 = u64.lo;
-
- umul_64by32to64(u64, div32, &u64);
-
- *remain32 = UnsignedWide_to_scalar(&t64) -
- UnsignedWide_to_scalar(&u64);
- }
-}
-
-/*
- * Get the clock device time. This routine is responsible
- * for converting the device's machine dependent time value
- * into a canonical mach_timespec_t value.
- *
- * SMP configurations - *the processor clocks are synchronised*
- */
kern_return_t
-sysclk_gettime_internal(
- mach_timespec_t *time) /* OUT */
+sysclk_gettime(
+ mach_timespec_t *time) /* OUT */
{
- UnsignedWide now;
- UnsignedWide t64;
- uint32_t t32;
- uint32_t numer, denom;
-
- numer = rtclock.timebase_const.numer;
- denom = rtclock.timebase_const.denom;
-
- clock_get_uptime((uint64_t *)&now);
-
- umul_64by32(now, numer, &t64, &t32);
+ uint64_t now, t64;
+ uint32_t divisor;
- udiv_96by32(t64, t32, denom, &t64, &t32);
+ now = mach_absolute_time();
- udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
- &time->tv_sec, &time->tv_nsec);
+ time->tv_sec = t64 = now / (divisor = rtclock_sec_divisor);
+ now -= (t64 * divisor);
+ time->tv_nsec = (now * NSEC_PER_SEC) / divisor;
return (KERN_SUCCESS);
}
-kern_return_t
-sysclk_gettime(
- mach_timespec_t *time) /* OUT */
+void
+clock_get_system_microtime(
+ uint32_t *secs,
+ uint32_t *microsecs)
{
- UnsignedWide now;
- UnsignedWide t64;
- uint32_t t32;
- uint32_t numer, denom;
- spl_t s;
+ uint64_t now, t64;
+ uint32_t divisor;
- LOCK_RTC(s);
- numer = rtclock.timebase_const.numer;
- denom = rtclock.timebase_const.denom;
- UNLOCK_RTC(s);
+ now = mach_absolute_time();
- clock_get_uptime((uint64_t *)&now);
-
- umul_64by32(now, numer, &t64, &t32);
+ *secs = t64 = now / (divisor = rtclock_sec_divisor);
+ now -= (t64 * divisor);
+ *microsecs = (now * USEC_PER_SEC) / divisor;
+}
- udiv_96by32(t64, t32, denom, &t64, &t32);
+void
+clock_get_system_nanotime(
+ uint32_t *secs,
+ uint32_t *nanosecs)
+{
+ uint64_t now, t64;
+ uint32_t divisor;
- udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
- &time->tv_sec, &time->tv_nsec);
+ now = mach_absolute_time();
- return (KERN_SUCCESS);
+ *secs = t64 = now / (divisor = rtclock_sec_divisor);
+ now -= (t64 * divisor);
+ *nanosecs = (now * NSEC_PER_SEC) / divisor;
}
/*
*/
kern_return_t
sysclk_getattr(
- clock_flavor_t flavor,
- clock_attr_t attr, /* OUT */
+ clock_flavor_t flavor,
+ clock_attr_t attr, /* OUT */
mach_msg_type_number_t *count) /* IN/OUT */
{
- spl_t s;
+ spl_t s;
if (*count != 1)
return (KERN_FAILURE);
+
switch (flavor) {
case CLOCK_GET_TIME_RES: /* >0 res */
case CLOCK_ALARM_MINRES:
case CLOCK_ALARM_MAXRES:
LOCK_RTC(s);
- *(clock_res_t *) attr = RTC_TICKPERIOD;
+ *(clock_res_t *) attr = NSEC_PER_HZ;
UNLOCK_RTC(s);
break;
default:
return (KERN_INVALID_VALUE);
}
+
return (KERN_SUCCESS);
}
sysclk_setalarm(
mach_timespec_t *deadline)
{
- uint64_t abstime;
+ uint64_t abstime;
- timespec_to_absolutetime(*deadline, &abstime);
- timer_call_enter(&rtclock.alarm_timer, abstime);
+ timespec_to_absolutetime(deadline, &abstime);
+ timer_call_enter(&rtclock_alarm_timer, abstime);
}
/*
*/
kern_return_t
calend_gettime(
- mach_timespec_t *curr_time) /* OUT */
+ mach_timespec_t *time) /* OUT */
{
- spl_t s;
-
- LOCK_RTC(s);
- if (!rtclock.calend_is_set) {
- UNLOCK_RTC(s);
- return (KERN_FAILURE);
- }
-
- (void) sysclk_gettime_internal(curr_time);
- ADD_MACH_TIMESPEC(curr_time, &rtclock.calend_offset);
- UNLOCK_RTC(s);
-
- return (KERN_SUCCESS);
-}
-
-/*
- * Set the current clock time.
- */
-kern_return_t
-calend_settime(
- mach_timespec_t *new_time)
-{
- mach_timespec_t curr_time;
- spl_t s;
-
- LOCK_RTC(s);
- (void) sysclk_gettime_internal(&curr_time);
- rtclock.calend_offset = *new_time;
- SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
- rtclock.calend_is_set = TRUE;
- UNLOCK_RTC(s);
-
- PESetGMTTimeOfDay(new_time->tv_sec);
+ clock_get_calendar_nanotime(
+ &time->tv_sec, &time->tv_nsec);
return (KERN_SUCCESS);
}
*/
kern_return_t
calend_getattr(
- clock_flavor_t flavor,
- clock_attr_t attr, /* OUT */
+ clock_flavor_t flavor,
+ clock_attr_t attr, /* OUT */
mach_msg_type_number_t *count) /* IN/OUT */
{
- spl_t s;
+ spl_t s;
if (*count != 1)
return (KERN_FAILURE);
+
switch (flavor) {
case CLOCK_GET_TIME_RES: /* >0 res */
LOCK_RTC(s);
- *(clock_res_t *) attr = RTC_TICKPERIOD;
+ *(clock_res_t *) attr = NSEC_PER_HZ;
UNLOCK_RTC(s);
break;
default:
return (KERN_INVALID_VALUE);
}
+
return (KERN_SUCCESS);
}
void
-clock_adjust_calendar(
- clock_res_t nsec)
+clock_get_calendar_microtime(
+ uint32_t *secs,
+ uint32_t *microsecs)
{
- spl_t s;
+ uint32_t epoch, microepoch;
+ uint64_t now, t64;
+ spl_t s = splclock();
+
+ simple_lock(&rtclock_lock);
+
+ if (rtclock_calend.adjdelta >= 0) {
+ uint32_t divisor;
+
+ now = mach_absolute_time();
+
+ epoch = rtclock_calend.epoch;
+ microepoch = rtclock_calend.microepoch;
+
+ simple_unlock(&rtclock_lock);
+
+ *secs = t64 = now / (divisor = rtclock_sec_divisor);
+ now -= (t64 * divisor);
+ *microsecs = (now * USEC_PER_SEC) / divisor;
+
+ if ((*microsecs += microepoch) >= USEC_PER_SEC) {
+ *microsecs -= USEC_PER_SEC;
+ epoch += 1;
+ }
+
+ *secs += epoch;
+ }
+ else {
+ uint32_t delta, t32;
+
+ delta = -rtclock_calend.adjdelta;
+
+ t64 = mach_absolute_time() - rtclock_calend.epoch1;
+
+ *secs = rtclock_calend.epoch;
+ *microsecs = rtclock_calend.microepoch;
+
+ simple_unlock(&rtclock_lock);
+
+ t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ if (t32 > delta)
+ *microsecs += (t32 - delta);
+
+ if (*microsecs >= USEC_PER_SEC) {
+ *microsecs -= USEC_PER_SEC;
+ *secs += 1;
+ }
+ }
+
+ splx(s);
+}
+
+/* This is only called from the gettimeofday() syscall. As a side
+ * effect, it updates the commpage timestamp. Otherwise it is
+ * identical to clock_get_calendar_microtime(). Because most
+ * gettimeofday() calls are handled by the commpage in user mode,
+ * this routine should be infrequently used except when slowing down
+ * the clock.
+ */
+void
+clock_gettimeofday(
+ uint32_t *secs_p,
+ uint32_t *microsecs_p)
+{
+ uint32_t epoch, microepoch;
+ uint32_t secs, microsecs;
+ uint64_t now, t64, secs_64, usec_64;
+ spl_t s = splclock();
+
+ simple_lock(&rtclock_lock);
+
+ if (rtclock_calend.adjdelta >= 0) {
+ now = mach_absolute_time();
+
+ epoch = rtclock_calend.epoch;
+ microepoch = rtclock_calend.microepoch;
+
+ secs = secs_64 = now / rtclock_sec_divisor;
+ t64 = now - (secs_64 * rtclock_sec_divisor);
+ microsecs = usec_64 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ if ((microsecs += microepoch) >= USEC_PER_SEC) {
+ microsecs -= USEC_PER_SEC;
+ epoch += 1;
+ }
+
+ secs += epoch;
+
+ /* adjust "now" to be absolute time at _start_ of usecond */
+ now -= t64 - ((usec_64 * rtclock_sec_divisor) / USEC_PER_SEC);
+
+ commpage_set_timestamp(now,secs,microsecs,rtclock_sec_divisor);
+ }
+ else {
+ uint32_t delta, t32;
+
+ delta = -rtclock_calend.adjdelta;
+
+ now = mach_absolute_time() - rtclock_calend.epoch1;
+
+ secs = rtclock_calend.epoch;
+ microsecs = rtclock_calend.microepoch;
+
+ t32 = (now * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ if (t32 > delta)
+ microsecs += (t32 - delta);
+
+ if (microsecs >= USEC_PER_SEC) {
+ microsecs -= USEC_PER_SEC;
+ secs += 1;
+ }
+ /* no need to disable timestamp, it is already off */
+ }
+
+ simple_unlock(&rtclock_lock);
+ splx(s);
+
+ *secs_p = secs;
+ *microsecs_p = microsecs;
+}
+
+void
+clock_get_calendar_nanotime(
+ uint32_t *secs,
+ uint32_t *nanosecs)
+{
+ uint32_t epoch, nanoepoch;
+ uint64_t now, t64;
+ spl_t s = splclock();
+
+ simple_lock(&rtclock_lock);
+
+ if (rtclock_calend.adjdelta >= 0) {
+ uint32_t divisor;
+
+ now = mach_absolute_time();
+
+ epoch = rtclock_calend.epoch;
+ nanoepoch = rtclock_calend.microepoch * NSEC_PER_USEC;
+
+ simple_unlock(&rtclock_lock);
+
+ *secs = t64 = now / (divisor = rtclock_sec_divisor);
+ now -= (t64 * divisor);
+ *nanosecs = ((now * USEC_PER_SEC) / divisor) * NSEC_PER_USEC;
+
+ if ((*nanosecs += nanoepoch) >= NSEC_PER_SEC) {
+ *nanosecs -= NSEC_PER_SEC;
+ epoch += 1;
+ }
+
+ *secs += epoch;
+ }
+ else {
+ uint32_t delta, t32;
+
+ delta = -rtclock_calend.adjdelta;
+
+ t64 = mach_absolute_time() - rtclock_calend.epoch1;
+
+ *secs = rtclock_calend.epoch;
+ *nanosecs = rtclock_calend.microepoch * NSEC_PER_USEC;
+
+ simple_unlock(&rtclock_lock);
+
+ t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ if (t32 > delta)
+ *nanosecs += ((t32 - delta) * NSEC_PER_USEC);
+
+ if (*nanosecs >= NSEC_PER_SEC) {
+ *nanosecs -= NSEC_PER_SEC;
+ *secs += 1;
+ }
+ }
+
+ splx(s);
+}
+
+void
+clock_set_calendar_microtime(
+ uint32_t secs,
+ uint32_t microsecs)
+{
+ uint32_t sys, microsys;
+ uint32_t newsecs;
+ spl_t s;
+
+ newsecs = (microsecs < 500*USEC_PER_SEC)?
+ secs: secs + 1;
LOCK_RTC(s);
- if (rtclock.calend_is_set)
- ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec);
+ commpage_set_timestamp(0,0,0,0);
+
+ clock_get_system_microtime(&sys, µsys);
+ if ((int32_t)(microsecs -= microsys) < 0) {
+ microsecs += USEC_PER_SEC;
+ secs -= 1;
+ }
+
+ secs -= sys;
+
+ rtclock_calend.epoch = secs;
+ rtclock_calend.microepoch = microsecs;
+ rtclock_calend.epoch1 = 0;
+ rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
UNLOCK_RTC(s);
+
+ PESetGMTTimeOfDay(newsecs);
+
+ host_notify_calendar_change();
}
-void
-clock_initialize_calendar(void)
+#define tickadj (40) /* "standard" skew, us / tick */
+#define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
+
+uint32_t
+clock_set_calendar_adjtime(
+ int32_t *secs,
+ int32_t *microsecs)
+{
+ int64_t total, ototal;
+ uint32_t interval = 0;
+ spl_t s;
+
+ total = (int64_t)*secs * USEC_PER_SEC + *microsecs;
+
+ LOCK_RTC(s);
+ commpage_set_timestamp(0,0,0,0);
+
+ ototal = rtclock_calend.adjtotal;
+
+ if (rtclock_calend.adjdelta < 0) {
+ uint64_t now, t64;
+ uint32_t delta, t32;
+ uint32_t sys, microsys;
+
+ delta = -rtclock_calend.adjdelta;
+
+ sys = rtclock_calend.epoch;
+ microsys = rtclock_calend.microepoch;
+
+ now = mach_absolute_time();
+
+ t64 = now - rtclock_calend.epoch1;
+ t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ if (t32 > delta)
+ microsys += (t32 - delta);
+
+ if (microsys >= USEC_PER_SEC) {
+ microsys -= USEC_PER_SEC;
+ sys += 1;
+ }
+
+ rtclock_calend.epoch = sys;
+ rtclock_calend.microepoch = microsys;
+
+ sys = t64 = now / rtclock_sec_divisor;
+ now -= (t64 * rtclock_sec_divisor);
+ microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ if ((int32_t)(rtclock_calend.microepoch -= microsys) < 0) {
+ rtclock_calend.microepoch += USEC_PER_SEC;
+ sys += 1;
+ }
+
+ rtclock_calend.epoch -= sys;
+ }
+
+ if (total != 0) {
+ int32_t delta = tickadj;
+
+ if (total > 0) {
+ if (total > bigadj)
+ delta *= 10;
+ if (delta > total)
+ delta = total;
+
+ rtclock_calend.epoch1 = 0;
+ }
+ else {
+ uint64_t now, t64;
+ uint32_t sys, microsys;
+
+ if (total < -bigadj)
+ delta *= 10;
+ delta = -delta;
+ if (delta < total)
+ delta = total;
+
+ rtclock_calend.epoch1 = now = mach_absolute_time();
+
+ sys = t64 = now / rtclock_sec_divisor;
+ now -= (t64 * rtclock_sec_divisor);
+ microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ if ((rtclock_calend.microepoch += microsys) >= USEC_PER_SEC) {
+ rtclock_calend.microepoch -= USEC_PER_SEC;
+ sys += 1;
+ }
+
+ rtclock_calend.epoch += sys;
+ }
+
+ rtclock_calend.adjtotal = total;
+ rtclock_calend.adjdelta = delta;
+
+ interval = rtclock_tick_interval;
+ }
+ else {
+ rtclock_calend.epoch1 = 0;
+ rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
+ }
+
+ UNLOCK_RTC(s);
+
+ if (ototal == 0)
+ *secs = *microsecs = 0;
+ else {
+ *secs = ototal / USEC_PER_SEC;
+ *microsecs = ototal % USEC_PER_SEC;
+ }
+
+ return (interval);
+}
+
+uint32_t
+clock_adjust_calendar(void)
{
- mach_timespec_t curr_time;
- long seconds = PEGetGMTTimeOfDay();
- spl_t s;
+ uint32_t micronew, interval = 0;
+ int32_t delta;
+ spl_t s;
LOCK_RTC(s);
- (void) sysclk_gettime_internal(&curr_time);
- if (curr_time.tv_nsec < 500*USEC_PER_SEC)
- rtclock.calend_offset.tv_sec = seconds;
+ commpage_set_timestamp(0,0,0,0);
+
+ delta = rtclock_calend.adjdelta;
+
+ if (delta > 0) {
+ micronew = rtclock_calend.microepoch + delta;
+ if (micronew >= USEC_PER_SEC) {
+ micronew -= USEC_PER_SEC;
+ rtclock_calend.epoch += 1;
+ }
+
+ rtclock_calend.microepoch = micronew;
+
+ rtclock_calend.adjtotal -= delta;
+ if (delta > rtclock_calend.adjtotal)
+ rtclock_calend.adjdelta = rtclock_calend.adjtotal;
+ }
else
- rtclock.calend_offset.tv_sec = seconds + 1;
- rtclock.calend_offset.tv_nsec = 0;
- SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
- rtclock.calend_is_set = TRUE;
+ if (delta < 0) {
+ uint64_t now, t64;
+ uint32_t t32;
+
+ now = mach_absolute_time();
+
+ t64 = now - rtclock_calend.epoch1;
+
+ rtclock_calend.epoch1 = now;
+
+ t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ micronew = rtclock_calend.microepoch + t32 + delta;
+ if (micronew >= USEC_PER_SEC) {
+ micronew -= USEC_PER_SEC;
+ rtclock_calend.epoch += 1;
+ }
+
+ rtclock_calend.microepoch = micronew;
+
+ rtclock_calend.adjtotal -= delta;
+ if (delta < rtclock_calend.adjtotal)
+ rtclock_calend.adjdelta = rtclock_calend.adjtotal;
+
+ if (rtclock_calend.adjdelta == 0) {
+ uint32_t sys, microsys;
+
+ sys = t64 = now / rtclock_sec_divisor;
+ now -= (t64 * rtclock_sec_divisor);
+ microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ if ((int32_t)(rtclock_calend.microepoch -= microsys) < 0) {
+ rtclock_calend.microepoch += USEC_PER_SEC;
+ sys += 1;
+ }
+
+ rtclock_calend.epoch -= sys;
+
+ rtclock_calend.epoch1 = 0;
+ }
+ }
+
+ if (rtclock_calend.adjdelta != 0)
+ interval = rtclock_tick_interval;
+
UNLOCK_RTC(s);
+
+ return (interval);
}
-mach_timespec_t
-clock_get_calendar_offset(void)
+void
+clock_initialize_calendar(void)
{
- mach_timespec_t result = MACH_TIMESPEC_ZERO;
- spl_t s;
+ uint32_t sys, microsys;
+ uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
+ spl_t s;
LOCK_RTC(s);
- if (rtclock.calend_is_set)
- result = rtclock.calend_offset;
+ commpage_set_timestamp(0,0,0,0);
+
+ clock_get_system_microtime(&sys, µsys);
+ if ((int32_t)(microsecs -= microsys) < 0) {
+ microsecs += USEC_PER_SEC;
+ secs -= 1;
+ }
+
+ secs -= sys;
+
+ rtclock_calend.epoch = secs;
+ rtclock_calend.microepoch = microsecs;
+ rtclock_calend.epoch1 = 0;
+ rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
UNLOCK_RTC(s);
- return (result);
+ host_notify_calendar_change();
}
void
clock_timebase_info(
mach_timebase_info_t info)
{
- spl_t s;
+ spl_t s;
LOCK_RTC(s);
- *info = rtclock.timebase_const;
+ rtclock_timebase_initialized = TRUE;
+ *info = rtclock_timebase_const;
UNLOCK_RTC(s);
}
s = splclock();
mycpu = cpu_number();
- mytimer = &rtclock.timer[mycpu];
- clock_get_uptime(&abstime);
- rtclock.last_abstime[mycpu] = abstime;
+ mytimer = &rtclock_timer[mycpu];
mytimer->deadline = deadline;
mytimer->is_set = TRUE;
- if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) {
- decr = deadline_to_decrementer(mytimer->deadline, abstime);
- if ( rtclock_decrementer_min != 0 &&
- rtclock_decrementer_min < (natural_t)decr )
- decr = rtclock_decrementer_min;
-
- mtdec(decr);
- rtclock.last_decr[mycpu] = decr;
-
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
- | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
+ if (!mytimer->has_expired) {
+ abstime = mach_absolute_time();
+ if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) {
+ decr = deadline_to_decrementer(mytimer->deadline, abstime);
+ if ( rtclock_decrementer_min != 0 &&
+ rtclock_decrementer_min < (natural_t)decr )
+ decr = rtclock_decrementer_min;
+
+ mtdec(decr);
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
+ | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
+ }
}
splx(s);
}
spl_t s;
LOCK_RTC(s);
- if (rtclock.timer_expire == NULL)
- rtclock.timer_expire = func;
+ if (rtclock_timer_expire == NULL)
+ rtclock_timer_expire = func;
UNLOCK_RTC(s);
}
spl_t old_spl)
{
uint64_t abstime;
- int decr[3], mycpu = cpu_number();
- struct rtclock_timer *mytimer = &rtclock.timer[mycpu];
+ int decr1, decr2, mycpu = cpu_number();
+ struct rtclock_timer *mytimer = &rtclock_timer[mycpu];
/*
* We may receive interrupts too early, we must reject them.
return;
}
- decr[1] = decr[2] = DECREMENTER_MAX;
+ decr1 = decr2 = DECREMENTER_MAX;
- clock_get_uptime(&abstime);
- rtclock.last_abstime[mycpu] = abstime;
+ abstime = mach_absolute_time();
if ( rtclock_tick_deadline[mycpu] <= abstime ) {
clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
&rtclock_tick_deadline[mycpu]);
hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0);
}
- clock_get_uptime(&abstime);
- rtclock.last_abstime[mycpu] = abstime;
+ abstime = mach_absolute_time();
if ( mytimer->is_set &&
mytimer->deadline <= abstime ) {
- mytimer->is_set = FALSE;
- (*rtclock.timer_expire)(abstime);
+ mytimer->has_expired = TRUE; mytimer->is_set = FALSE;
+ (*rtclock_timer_expire)(abstime);
+ mytimer->has_expired = FALSE;
}
- clock_get_uptime(&abstime);
- rtclock.last_abstime[mycpu] = abstime;
- decr[1] = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
+ abstime = mach_absolute_time();
+ decr1 = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
if (mytimer->is_set)
- decr[2] = deadline_to_decrementer(mytimer->deadline, abstime);
+ decr2 = deadline_to_decrementer(mytimer->deadline, abstime);
- if (decr[1] > decr[2])
- decr[1] = decr[2];
+ if (decr1 > decr2)
+ decr1 = decr2;
if ( rtclock_decrementer_min != 0 &&
- rtclock_decrementer_min < (natural_t)decr[1] )
- decr[1] = rtclock_decrementer_min;
+ rtclock_decrementer_min < (natural_t)decr1 )
+ decr1 = rtclock_decrementer_min;
- mtdec(decr[1]);
- rtclock.last_decr[mycpu] = decr[1];
+ mtdec(decr1);
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
- | DBG_FUNC_NONE, decr[1], 3, 0, 0, 0);
+ | DBG_FUNC_NONE, decr1, 3, 0, 0, 0);
}
static void
-rtclock_alarm_timer(
+rtclock_alarm_expire(
timer_call_param_t p0,
timer_call_param_t p1)
{
clock_alarm_intr(SYSTEM_CLOCK, ×tamp);
}
-void
-clock_get_uptime(
- uint64_t *result0)
-{
- UnsignedWide *result = (UnsignedWide *)result0;
- uint32_t hi, lo, hic;
-
- do {
- asm volatile(" mftbu %0" : "=r" (hi));
- asm volatile(" mftb %0" : "=r" (lo));
- asm volatile(" mftbu %0" : "=r" (hic));
- } while (hic != hi);
-
- result->lo = lo;
- result->hi = hi;
-}
-
static int
deadline_to_decrementer(
- uint64_t deadline,
- uint64_t now)
+ uint64_t deadline,
+ uint64_t now)
{
- uint64_t delt;
+ uint64_t delt;
if (deadline <= now)
return DECREMENTER_MIN;
static void
timespec_to_absolutetime(
- mach_timespec_t timespec,
- uint64_t *result0)
+ mach_timespec_t *ts,
+ uint64_t *result)
{
- UnsignedWide *result = (UnsignedWide *)result0;
- UnsignedWide t64;
- uint32_t t32;
- uint32_t numer, denom;
- spl_t s;
-
- LOCK_RTC(s);
- numer = rtclock.timebase_const.numer;
- denom = rtclock.timebase_const.denom;
- UNLOCK_RTC(s);
-
- asm volatile(" mullw %0,%1,%2" :
- "=r" (t64.lo) :
- "r" (timespec.tv_sec), "r" (NSEC_PER_SEC));
-
- asm volatile(" mulhwu %0,%1,%2" :
- "=r" (t64.hi) :
- "r" (timespec.tv_sec), "r" (NSEC_PER_SEC));
+ uint32_t divisor;
- UnsignedWide_to_scalar(&t64) += timespec.tv_nsec;
-
- umul_64by32(t64, denom, &t64, &t32);
-
- udiv_96by32(t64, t32, numer, &t64, &t32);
-
- result->hi = t64.lo;
- result->lo = t32;
+ *result = ((uint64_t)ts->tv_sec * (divisor = rtclock_sec_divisor)) +
+ ((uint64_t)ts->tv_nsec * divisor) / NSEC_PER_SEC;
}
void
uint32_t scale_factor,
uint64_t *result)
{
- uint64_t abstime;
+ uint64_t abstime;
clock_get_uptime(result);
clock_interval_to_absolutetime_interval(
uint32_t interval,
uint32_t scale_factor,
- uint64_t *result0)
+ uint64_t *result)
{
- UnsignedWide *result = (UnsignedWide *)result0;
- UnsignedWide t64;
- uint32_t t32;
- uint32_t numer, denom;
- spl_t s;
-
- LOCK_RTC(s);
- numer = rtclock.timebase_const.numer;
- denom = rtclock.timebase_const.denom;
- UNLOCK_RTC(s);
-
- asm volatile(" mullw %0,%1,%2" :
- "=r" (t64.lo) :
- "r" (interval), "r" (scale_factor));
- asm volatile(" mulhwu %0,%1,%2" :
- "=r" (t64.hi) :
- "r" (interval), "r" (scale_factor));
-
- umul_64by32(t64, denom, &t64, &t32);
-
- udiv_96by32(t64, t32, numer, &t64, &t32);
-
- result->hi = t64.lo;
- result->lo = t32;
+ uint64_t nanosecs = (uint64_t)interval * scale_factor;
+ uint64_t t64;
+ uint32_t divisor;
+
+ *result = (t64 = nanosecs / NSEC_PER_SEC) *
+ (divisor = rtclock_sec_divisor);
+ nanosecs -= (t64 * NSEC_PER_SEC);
+ *result += (nanosecs * divisor) / NSEC_PER_SEC;
}
void
uint64_t abstime,
uint64_t *result)
{
- UnsignedWide t64;
- uint32_t t32;
- uint32_t numer, denom;
- spl_t s;
+ uint64_t t64;
+ uint32_t divisor;
- LOCK_RTC(s);
- numer = rtclock.timebase_const.numer;
- denom = rtclock.timebase_const.denom;
- UNLOCK_RTC(s);
-
- UnsignedWide_to_scalar(&t64) = abstime;
-
- umul_64by32(t64, numer, &t64, &t32);
-
- udiv_96by32to64(t64, t32, denom, (void *)result);
+ *result = (t64 = abstime / (divisor = rtclock_sec_divisor)) * NSEC_PER_SEC;
+ abstime -= (t64 * divisor);
+ *result += (abstime * NSEC_PER_SEC) / divisor;
}
void
nanoseconds_to_absolutetime(
- uint64_t nanoseconds,
+ uint64_t nanosecs,
uint64_t *result)
{
- UnsignedWide t64;
- uint32_t t32;
- uint32_t numer, denom;
- spl_t s;
-
- LOCK_RTC(s);
- numer = rtclock.timebase_const.numer;
- denom = rtclock.timebase_const.denom;
- UNLOCK_RTC(s);
-
- UnsignedWide_to_scalar(&t64) = nanoseconds;
-
- umul_64by32(t64, denom, &t64, &t32);
+ uint64_t t64;
+ uint32_t divisor;
- udiv_96by32to64(t64, t32, numer, (void *)result);
+ *result = (t64 = nanosecs / NSEC_PER_SEC) *
+ (divisor = rtclock_sec_divisor);
+ nanosecs -= (t64 * NSEC_PER_SEC);
+ *result += (nanosecs * divisor) / NSEC_PER_SEC;
}
/*
clock_interval_to_deadline(interval, scale_factor, &end);
do {
- clock_get_uptime(&now);
+ now = mach_absolute_time();
} while (now < end);
}
uint64_t now;
do {
- clock_get_uptime(&now);
+ now = mach_absolute_time();
} while (now < deadline);
}