+ uint32_t epoch, microepoch;
+ uint64_t now, t64;
+ spl_t s = splclock();
+
+ simple_lock(&rtclock_lock);
+
+ if (rtclock_calend.adjdelta >= 0) {
+ uint32_t divisor;
+
+ now = mach_absolute_time();
+
+ epoch = rtclock_calend.epoch;
+ microepoch = rtclock_calend.microepoch;
+
+ simple_unlock(&rtclock_lock);
+
+ *secs = t64 = now / (divisor = rtclock_sec_divisor);
+ now -= (t64 * divisor);
+ *microsecs = (now * USEC_PER_SEC) / divisor;
+
+ TIME_ADD(*secs, epoch, *microsecs, microepoch, USEC_PER_SEC);
+ }
+ else {
+ uint32_t delta, t32;
+
+ delta = -rtclock_calend.adjdelta;
+
+ now = mach_absolute_time();
+
+ *secs = rtclock_calend.epoch;
+ *microsecs = rtclock_calend.microepoch;
+
+ if (now > rtclock_calend.epoch1) {
+ t64 = now - rtclock_calend.epoch1;
+
+ t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ if (t32 > delta)
+ TIME_ADD(*secs, 0, *microsecs, (t32 - delta), USEC_PER_SEC);
+ }
+
+ simple_unlock(&rtclock_lock);
+ }
+
+ splx(s);
+}
+
+/* This is only called from the gettimeofday() syscall. As a side
+ * effect, it updates the commpage timestamp. Otherwise it is
+ * identical to clock_get_calendar_microtime(). Because most
+ * gettimeofday() calls are handled by the commpage in user mode,
+ * this routine should be infrequently used except when slowing down
+ * the clock.
+ */
+void
+clock_gettimeofday(
+ uint32_t *secs_p,
+ uint32_t *microsecs_p)
+{
+ uint32_t epoch, microepoch;
+ uint32_t secs, microsecs;
+ uint64_t now, t64, secs_64, usec_64;
+ spl_t s = splclock();
+
+ simple_lock(&rtclock_lock);
+
+ if (rtclock_calend.adjdelta >= 0) {
+ now = mach_absolute_time();
+
+ epoch = rtclock_calend.epoch;
+ microepoch = rtclock_calend.microepoch;
+
+ secs = secs_64 = now / rtclock_sec_divisor;
+ t64 = now - (secs_64 * rtclock_sec_divisor);
+ microsecs = usec_64 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ TIME_ADD(secs, epoch, microsecs, microepoch, USEC_PER_SEC);
+
+ /* adjust "now" to be absolute time at _start_ of usecond */
+ now -= t64 - ((usec_64 * rtclock_sec_divisor) / USEC_PER_SEC);
+
+ commpage_set_timestamp(now,secs,microsecs,rtclock_sec_divisor);
+ }
+ else {
+ uint32_t delta, t32;
+
+ delta = -rtclock_calend.adjdelta;
+
+ now = mach_absolute_time();
+
+ secs = rtclock_calend.epoch;
+ microsecs = rtclock_calend.microepoch;
+
+ if (now > rtclock_calend.epoch1) {
+ t64 = now - rtclock_calend.epoch1;
+
+ t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ if (t32 > delta)
+ TIME_ADD(secs, 0, microsecs, (t32 - delta), USEC_PER_SEC);
+ }
+
+ /* no need to disable timestamp, it is already off */
+ }
+
+ simple_unlock(&rtclock_lock);
+ splx(s);
+
+ *secs_p = secs;
+ *microsecs_p = microsecs;
+}
+
+void
+clock_get_calendar_nanotime(
+ uint32_t *secs,
+ uint32_t *nanosecs)
+{
+ uint32_t epoch, nanoepoch;
+ uint64_t now, t64;
+ spl_t s = splclock();
+
+ simple_lock(&rtclock_lock);
+
+ if (rtclock_calend.adjdelta >= 0) {
+ uint32_t divisor;
+
+ now = mach_absolute_time();
+
+ epoch = rtclock_calend.epoch;
+ nanoepoch = rtclock_calend.microepoch * NSEC_PER_USEC;
+
+ simple_unlock(&rtclock_lock);
+
+ *secs = t64 = now / (divisor = rtclock_sec_divisor);
+ now -= (t64 * divisor);
+ *nanosecs = ((now * USEC_PER_SEC) / divisor) * NSEC_PER_USEC;
+
+ TIME_ADD(*secs, epoch, *nanosecs, nanoepoch, NSEC_PER_SEC);
+ }
+ else {
+ uint32_t delta, t32;
+
+ delta = -rtclock_calend.adjdelta;
+
+ now = mach_absolute_time();
+
+ *secs = rtclock_calend.epoch;
+ *nanosecs = rtclock_calend.microepoch * NSEC_PER_USEC;
+
+ if (now > rtclock_calend.epoch1) {
+ t64 = now - rtclock_calend.epoch1;
+
+ t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ if (t32 > delta)
+ TIME_ADD(*secs, 0, *nanosecs, ((t32 - delta) * NSEC_PER_USEC), NSEC_PER_SEC);
+ }
+
+ simple_unlock(&rtclock_lock);
+ }
+
+ splx(s);
+}
+
+void
+clock_set_calendar_microtime(
+ uint32_t secs,
+ uint32_t microsecs)
+{
+ uint32_t sys, microsys;
+ uint32_t newsecs;
+ spl_t s;
+
+ newsecs = (microsecs < 500*USEC_PER_SEC)?
+ secs: secs + 1;
+
+ s = splclock();
+ simple_lock(&rtclock_lock);
+
+ commpage_set_timestamp(0,0,0,0);
+
+ /*
+ * Cancel any adjustment in progress.
+ */
+ if (rtclock_calend.adjdelta < 0) {
+ uint64_t now, t64;
+ uint32_t delta, t32;
+
+ delta = -rtclock_calend.adjdelta;
+
+ sys = rtclock_calend.epoch;
+ microsys = rtclock_calend.microepoch;
+
+ now = mach_absolute_time();
+
+ if (now > rtclock_calend.epoch1)
+ t64 = now - rtclock_calend.epoch1;
+ else
+ t64 = 0;
+
+ t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ if (t32 > delta)
+ TIME_ADD(sys, 0, microsys, (t32 - delta), USEC_PER_SEC);
+
+ rtclock_calend.epoch = sys;
+ rtclock_calend.microepoch = microsys;
+
+ sys = t64 = now / rtclock_sec_divisor;
+ now -= (t64 * rtclock_sec_divisor);
+ microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
+
+ TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
+ }
+
+ rtclock_calend.epoch1 = 0;
+ rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
+
+ /*
+ * Calculate the new calendar epoch based on
+ * the new value and the system clock.
+ */
+ clock_get_system_microtime(&sys, µsys);
+ TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
+
+ /*
+ * Adjust the boottime based on the delta.
+ */
+ rtclock_boottime += secs - rtclock_calend.epoch;
+
+ /*
+ * Set the new calendar epoch.
+ */
+ rtclock_calend.epoch = secs;
+ rtclock_calend.microepoch = microsecs;
+
+ simple_unlock(&rtclock_lock);
+
+ /*
+ * Set the new value for the platform clock.
+ */
+ PESetGMTTimeOfDay(newsecs);
+
+ splx(s);
+
+ /*
+ * Send host notifications.
+ */
+ host_notify_calendar_change();
+}
+
+#define tickadj (40) /* "standard" skew, us / tick */
+#define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
+
+uint32_t
+clock_set_calendar_adjtime(
+ int32_t *secs,
+ int32_t *microsecs)
+{
+ int64_t total, ototal;
+ uint32_t interval = 0;
+ spl_t s;
+
+ total = (int64_t)*secs * USEC_PER_SEC + *microsecs;