/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* the cpu clock counted by the timestamp MSR.
*/
-#include <platforms.h>
-#include <mach_kdb.h>
#include <mach/mach_types.h>
#include <kern/misc_protos.h>
#include <kern/spl.h>
#include <kern/assert.h>
+#include <kern/timer_queue.h>
#include <mach/vm_prot.h>
#include <vm/pmap.h>
#include <vm/vm_kern.h> /* for kernel_map */
-#include <i386/ipl.h>
-#include <i386/pit.h>
#include <architecture/i386/pio.h>
-#include <i386/misc_protos.h>
-#include <i386/proc_reg.h>
#include <i386/machine_cpu.h>
-#include <i386/mp.h>
#include <i386/cpuid.h>
-#include <i386/cpu_data.h>
#include <i386/cpu_threads.h>
-#include <i386/perfmon.h>
+#include <i386/mp.h>
#include <i386/machine_routines.h>
+#include <i386/pal_routines.h>
+#include <i386/proc_reg.h>
+#include <i386/misc_protos.h>
#include <pexpert/pexpert.h>
#include <machine/limits.h>
#include <machine/commpage.h>
#include <sys/kdebug.h>
#include <i386/tsc.h>
-#include <i386/hpet.h>
-#include <i386/rtclock.h>
-
-#define MAX(a,b) (((a)>(b))?(a):(b))
-#define MIN(a,b) (((a)>(b))?(b):(a))
-
-#define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
-
+#include <i386/rtclock_protos.h>
#define UI_CPUFREQ_ROUNDING_FACTOR 10000000
-int rtclock_config(void);
-
int rtclock_init(void);
-uint64_t rtc_decrementer_min;
-
-void rtclock_intr(x86_saved_state_t *regs);
-static uint64_t maxDec; /* longest interval our hardware timer can handle (nsec) */
-
-/* XXX this should really be in a header somewhere */
-extern clock_timer_func_t rtclock_timer_expire;
+uint64_t tsc_rebase_abs_time = 0;
static void rtc_set_timescale(uint64_t cycles);
static uint64_t rtc_export_speed(uint64_t cycles);
-extern void rtc_nanotime_store(
- uint64_t tsc,
- uint64_t nsec,
- uint32_t scale,
- uint32_t shift,
- rtc_nanotime_t *dst);
-
-extern void rtc_nanotime_load(
- rtc_nanotime_t *src,
- rtc_nanotime_t *dst);
-
-rtc_nanotime_t rtc_nanotime_info;
-
-/*
- * tsc_to_nanoseconds:
- *
- * Basic routine to convert a raw 64 bit TSC value to a
- * 64 bit nanosecond value. The conversion is implemented
- * based on the scale factor and an implicit 32 bit shift.
- */
-static inline uint64_t
-_tsc_to_nanoseconds(uint64_t value)
-{
- asm volatile("movl %%edx,%%esi ;"
- "mull %%ecx ;"
- "movl %%edx,%%edi ;"
- "movl %%esi,%%eax ;"
- "mull %%ecx ;"
- "addl %%edi,%%eax ;"
- "adcl $0,%%edx "
- : "+A" (value) : "c" (rtc_nanotime_info.scale) : "esi", "edi");
-
- return (value);
-}
-
-uint64_t
-tsc_to_nanoseconds(uint64_t value)
-{
- return _tsc_to_nanoseconds(value);
-}
-
-static uint32_t
-deadline_to_decrementer(
- uint64_t deadline,
- uint64_t now)
-{
- uint64_t delta;
-
- if (deadline <= now)
- return rtc_decrementer_min;
- else {
- delta = deadline - now;
- return MIN(MAX(rtc_decrementer_min,delta),maxDec);
- }
-}
-
-static void
-rtc_lapic_start_ticking(void)
+void
+rtc_timer_start(void)
{
- uint64_t abstime;
- uint64_t first_tick;
- cpu_data_t *cdp = current_cpu_datap();
-
- abstime = mach_absolute_time();
- rtclock_tick_interval = NSEC_PER_HZ;
-
- first_tick = abstime + rtclock_tick_interval;
- cdp->rtclock_intr_deadline = first_tick;
-
/*
* Force a complete re-evaluation of timer deadlines.
*/
- cdp->rtcPop = EndOfAllTime;
- etimer_resync_deadlines();
+ x86_lcpu()->rtcDeadline = EndOfAllTime;
+ timer_resync_deadlines();
}
-/*
- * Configure the real-time clock device. Return success (1)
- * or failure (0).
- */
-
-int
-rtclock_config(void)
+static inline uint32_t
+_absolutetime_to_microtime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *microsecs)
{
- /* nothing to do */
- return (1);
+ uint32_t remain;
+ *secs = abstime / (uint64_t)NSEC_PER_SEC;
+ remain = (uint32_t)(abstime % (uint64_t)NSEC_PER_SEC);
+ *microsecs = remain / NSEC_PER_USEC;
+ return remain;
}
+static inline void
+_absolutetime_to_nanotime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *nanosecs)
+{
+ *secs = abstime / (uint64_t)NSEC_PER_SEC;
+ *nanosecs = (clock_usec_t)(abstime % (uint64_t)NSEC_PER_SEC);
+}
/*
* Nanotime/mach_absolutime_time
* be guaranteed by the caller.
*/
static inline void
-rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
+rtc_nanotime_set_commpage(pal_rtc_nanotime_t *rntp)
{
commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
}
/*
* rtc_nanotime_init:
*
- * Intialize the nanotime info from the base time. Since
- * the base value might be from a lower resolution clock,
- * we compare it to the TSC derived value, and use the
- * greater of the two values.
+ * Intialize the nanotime info from the base time.
*/
static inline void
-_rtc_nanotime_init(rtc_nanotime_t *rntp, uint64_t base)
+_rtc_nanotime_init(pal_rtc_nanotime_t *rntp, uint64_t base)
{
- uint64_t nsecs, tsc = rdtsc64();
+ uint64_t tsc = rdtsc64();
- nsecs = _tsc_to_nanoseconds(tsc);
- rtc_nanotime_store(tsc, MAX(nsecs, base), rntp->scale, rntp->shift, rntp);
+ _pal_rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
}
-static void
+void
rtc_nanotime_init(uint64_t base)
{
- rtc_nanotime_t *rntp = &rtc_nanotime_info;
-
- _rtc_nanotime_init(rntp, base);
- rtc_nanotime_set_commpage(rntp);
+ _rtc_nanotime_init(&pal_rtc_nanotime_info, base);
+ rtc_nanotime_set_commpage(&pal_rtc_nanotime_info);
}
/*
- * rtc_nanotime_init:
+ * rtc_nanotime_init_commpage:
*
* Call back from the commpage initialization to
* cause the commpage data to be filled in once the
{
spl_t s = splclock();
- rtc_nanotime_set_commpage(&rtc_nanotime_info);
-
+ rtc_nanotime_set_commpage(&pal_rtc_nanotime_info);
splx(s);
}
/*
- * rtc_nanotime_update:
- *
- * Update the nanotime info from the base time. Since
- * the base value might be from a lower resolution clock,
- * we compare it to the TSC derived value, and use the
- * greater of the two values.
+ * rtc_nanotime_read:
*
- * N.B. In comparison to the above init routine, this assumes
- * that the TSC has remained monotonic compared to the tsc_base
- * value, which is not the case after S3 sleep.
+ * Returns the current nanotime value, accessable from any
+ * context.
*/
-static inline void
-_rtc_nanotime_update(rtc_nanotime_t *rntp, uint64_t base)
+static inline uint64_t
+rtc_nanotime_read(void)
{
- uint64_t nsecs, tsc = rdtsc64();
-
- nsecs = rntp->ns_base + _tsc_to_nanoseconds(tsc - rntp->tsc_base);
- rtc_nanotime_store(tsc, MAX(nsecs, base), rntp->scale, rntp->shift, rntp);
-}
-
-static void
-rtc_nanotime_update(
- uint64_t base)
-{
- rtc_nanotime_t *rntp = &rtc_nanotime_info;
-
- assert(!ml_get_interrupts_enabled());
-
- _rtc_nanotime_update(rntp, base);
- rtc_nanotime_set_commpage(rntp);
+ return _rtc_nanotime_read(&pal_rtc_nanotime_info);
}
/*
- * rtc_nanotime_read:
+ * rtc_clock_napped:
*
- * Returns the current nanotime value, accessable from any
- * context.
+ * Invoked from power management when we exit from a low C-State (>= C4)
+ * and the TSC has stopped counting. The nanotime data is updated according
+ * to the provided value which represents the new value for nanotime.
*/
-static uint64_t
-rtc_nanotime_read(void)
+void
+rtc_clock_napped(uint64_t base, uint64_t tsc_base)
{
- rtc_nanotime_t rnt, *rntp = &rtc_nanotime_info;
- uint64_t result;
-
- do {
- rtc_nanotime_load(rntp, &rnt);
- result = rnt.ns_base + _tsc_to_nanoseconds(rdtsc64() - rnt.tsc_base);
- } while (rntp->tsc_base != rnt.tsc_base);
+ pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
+ uint64_t oldnsecs;
+ uint64_t newnsecs;
+ uint64_t tsc;
- return (result);
+ assert(!ml_get_interrupts_enabled());
+ tsc = rdtsc64();
+ oldnsecs = rntp->ns_base + _rtc_tsc_to_nanoseconds(tsc - rntp->tsc_base, rntp);
+ newnsecs = base + _rtc_tsc_to_nanoseconds(tsc - tsc_base, rntp);
+
+ /*
+ * Only update the base values if time using the new base values
+ * is later than the time using the old base values.
+ */
+ if (oldnsecs < newnsecs) {
+ _pal_rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp);
+ rtc_nanotime_set_commpage(rntp);
+ }
}
/*
- * rtc_clock_napped:
- *
- * Invoked from power manangement when we have awoken from a nap (C3/C4)
- * during which the TSC lost counts. The nanotime data is updated according
- * to the provided nanosecond base value.
- *
- * The caller must guarantee non-reentrancy.
+ * Invoked from power management to correct the SFLM TSC entry drift problem:
+ * a small delta is added to the tsc_base. This is equivalent to nudgin time
+ * backwards. We require this to be on the order of a TSC quantum which won't
+ * cause callers of mach_absolute_time() to see time going backwards!
*/
void
-rtc_clock_napped(
- uint64_t base)
+rtc_clock_adjust(uint64_t tsc_base_delta)
{
- rtc_nanotime_update(base);
+ pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
+
+ assert(!ml_get_interrupts_enabled());
+ assert(tsc_base_delta < 100ULL); /* i.e. it's small */
+ _rtc_nanotime_adjust(tsc_base_delta, rntp);
+ rtc_nanotime_set_commpage(rntp);
}
void
rtc_clock_stepped(__unused uint32_t new_frequency,
__unused uint32_t old_frequency)
{
- panic("rtc_clock_stepping unsupported");
+ panic("rtc_clock_stepped unsupported");
}
/*
* rtc_sleep_wakeup:
*
- * Invoked from power manageent when we have awoken from a sleep (S3)
- * and the TSC has been reset. The nanotime data is updated based on
- * the HPET value.
+ * Invoked from power management when we have awoken from a sleep (S3)
+ * and the TSC has been reset, or from Deep Idle (S0) sleep when the TSC
+ * has progressed. The nanotime data is updated based on the passed-in value.
*
* The caller must guarantee non-reentrancy.
*/
void
-rtc_sleep_wakeup(void)
+rtc_sleep_wakeup(
+ uint64_t base)
{
- boolean_t istate;
-
- istate = ml_set_interrupts_enabled(FALSE);
+ /* Set fixed configuration for lapic timers */
+ rtc_timer->rtc_config();
/*
* Reset nanotime.
* The timestamp counter will have been reset
* but nanotime (uptime) marches onward.
*/
- rtc_nanotime_init(tmrCvt(rdHPET(), hpetCvtt2n));
-
- /* Restart tick interrupts from the LAPIC timer */
- rtc_lapic_start_ticking();
+ rtc_nanotime_init(base);
+}
- ml_set_interrupts_enabled(istate);
+void
+rtc_decrementer_configure(void) {
+ rtc_timer->rtc_config();
+}
+/*
+ * rtclock_early_init() is called very early at boot to
+ * establish mach_absolute_time() and set it to zero.
+ */
+void
+rtclock_early_init(void)
+{
+ assert(tscFreq);
+ rtc_set_timescale(tscFreq);
}
/*
if (cpu_number() == master_cpu) {
assert(tscFreq);
- rtc_set_timescale(tscFreq);
/*
* Adjust and set the exported cpu speed.
gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
- /*
- * Compute the longest interval we can represent.
- */
- maxDec = tmrCvt(0x7fffffffULL, busFCvtt2n);
- kprintf("maxDec: %lld\n", maxDec);
-
- /* Minimum interval is 1usec */
- rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC, 0ULL);
- /* Point LAPIC interrupts to hardclock() */
- lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
-
+ rtc_timer_init();
clock_timebase_init();
ml_init_lock_timeout();
+ ml_init_delay_spin_threshold(10);
}
- rtc_lapic_start_ticking();
+ /* Set fixed configuration for lapic timers */
+ rtc_timer->rtc_config();
+ rtc_timer_start();
return (1);
}
static void
rtc_set_timescale(uint64_t cycles)
{
- rtc_nanotime_info.scale = ((uint64_t)NSEC_PER_SEC << 32) / cycles;
- rtc_nanotime_info.shift = 32;
+ pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
+ uint32_t shift = 0;
+
+ /* the "scale" factor will overflow unless cycles>SLOW_TSC_THRESHOLD */
+
+ while ( cycles <= SLOW_TSC_THRESHOLD) {
+ shift++;
+ cycles <<= 1;
+ }
+
+ rntp->scale = (uint32_t)(((uint64_t)NSEC_PER_SEC << 32) / cycles);
+
+ rntp->shift = shift;
+
+ /*
+ * On some platforms, the TSC is not reset at warm boot. But the
+ * rebase time must be relative to the current boot so we can't use
+ * mach_absolute_time(). Instead, we convert the TSC delta since boot
+ * to nanoseconds.
+ */
+ if (tsc_rebase_abs_time == 0)
+ tsc_rebase_abs_time = _rtc_tsc_to_nanoseconds(
+ rdtsc64() - tsc_at_boot, rntp);
rtc_nanotime_init(0);
}
static uint64_t
rtc_export_speed(uint64_t cyc_per_sec)
{
+ pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
uint64_t cycles;
+ if (rntp->shift != 0 )
+ printf("Slow TSC, rtc_nanotime.shift == %d\n", rntp->shift);
+
/* Round: */
cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
/ UI_CPUFREQ_ROUNDING_FACTOR)
void
clock_get_system_microtime(
- uint32_t *secs,
- uint32_t *microsecs)
+ clock_sec_t *secs,
+ clock_usec_t *microsecs)
{
uint64_t now = rtc_nanotime_read();
- uint32_t remain;
-
- asm volatile(
- "divl %3"
- : "=a" (*secs), "=d" (remain)
- : "A" (now), "r" (NSEC_PER_SEC));
- asm volatile(
- "divl %3"
- : "=a" (*microsecs)
- : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
+
+ _absolutetime_to_microtime(now, secs, microsecs);
}
void
clock_get_system_nanotime(
- uint32_t *secs,
- uint32_t *nanosecs)
+ clock_sec_t *secs,
+ clock_nsec_t *nanosecs)
{
uint64_t now = rtc_nanotime_read();
- asm volatile(
- "divl %3"
- : "=a" (*secs), "=d" (*nanosecs)
- : "A" (now), "r" (NSEC_PER_SEC));
+ _absolutetime_to_nanotime(now, secs, nanosecs);
}
void
-clock_gettimeofday_set_commpage(
- uint64_t abstime,
- uint64_t epoch,
- uint64_t offset,
- uint32_t *secs,
- uint32_t *microsecs)
+clock_gettimeofday_set_commpage(uint64_t abstime, uint64_t sec, uint64_t frac, uint64_t scale, uint64_t tick_per_sec)
{
- uint64_t now = abstime;
- uint32_t remain;
-
- now += offset;
-
- asm volatile(
- "divl %3"
- : "=a" (*secs), "=d" (remain)
- : "A" (now), "r" (NSEC_PER_SEC));
- asm volatile(
- "divl %3"
- : "=a" (*microsecs)
- : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
-
- *secs += epoch;
-
- commpage_set_timestamp(abstime - remain, *secs, NSEC_PER_SEC);
+ commpage_set_timestamp(abstime, sec, frac, scale, tick_per_sec);
}
void
info->numer = info->denom = 1;
}
-void
-clock_set_timer_func(
- clock_timer_func_t func)
-{
- if (rtclock_timer_expire == NULL)
- rtclock_timer_expire = func;
-}
-
/*
* Real-time clock device interrupt.
*/
{
uint64_t rip;
boolean_t user_mode = FALSE;
- uint64_t abstime;
- uint32_t latency;
- cpu_data_t *pp = current_cpu_datap();
assert(get_preemption_level() > 0);
assert(!ml_get_interrupts_enabled());
- abstime = rtc_nanotime_read();
- latency = (uint32_t) abstime - pp->rtcPop;
-
if (is_saved_state64(tregs) == TRUE) {
x86_saved_state64_t *regs;
regs = saved_state64(tregs);
- user_mode = TRUE;
+ if (regs->isf.cs & 0x03)
+ user_mode = TRUE;
rip = regs->isf.rip;
} else {
x86_saved_state32_t *regs;
rip = regs->eip;
}
- /* Log the interrupt service latency (-ve value expected by tool) */
- KERNEL_DEBUG_CONSTANT(
- MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
- -latency, (uint32_t)rip, user_mode, 0, 0);
-
/* call the generic etimer */
- etimer_intr(user_mode, rip);
+ timer_intr(user_mode, rip);
}
+
/*
* Request timer pop from the hardware
*/
-int
-setPop(
- uint64_t time)
-{
- uint64_t now;
- uint32_t decr;
- uint64_t count;
-
- now = rtc_nanotime_read(); /* The time in nanoseconds */
- decr = deadline_to_decrementer(time, now);
-
- count = tmrCvt(decr, busFCvtn2t);
- lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
-
- return decr; /* Pass back what we set */
-}
-
-
-void
-resetPop(void)
+uint64_t
+setPop(uint64_t time)
{
uint64_t now;
- uint32_t decr;
- uint64_t count;
- cpu_data_t *cdp = current_cpu_datap();
+ uint64_t pop;
- now = rtc_nanotime_read();
+ /* 0 and EndOfAllTime are special-cases for "clear the timer" */
+ if (time == 0 || time == EndOfAllTime ) {
+ time = EndOfAllTime;
+ now = 0;
+ pop = rtc_timer->rtc_set(0, 0);
+ } else {
+ now = rtc_nanotime_read(); /* The time in nanoseconds */
+ pop = rtc_timer->rtc_set(time, now);
+ }
- decr = deadline_to_decrementer(cdp->rtcPop, now);
+ /* Record requested and actual deadlines set */
+ x86_lcpu()->rtcDeadline = time;
+ x86_lcpu()->rtcPop = pop;
- count = tmrCvt(decr, busFCvtn2t);
- lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t)count);
+ return pop - now;
}
-
uint64_t
mach_absolute_time(void)
{
return rtc_nanotime_read();
}
+uint64_t
+mach_approximate_time(void)
+{
+ return rtc_nanotime_read();
+}
+
void
clock_interval_to_absolutetime_interval(
uint32_t interval,
void
absolutetime_to_microtime(
uint64_t abstime,
- uint32_t *secs,
- uint32_t *microsecs)
-{
- uint32_t remain;
-
- asm volatile(
- "divl %3"
- : "=a" (*secs), "=d" (remain)
- : "A" (abstime), "r" (NSEC_PER_SEC));
- asm volatile(
- "divl %3"
- : "=a" (*microsecs)
- : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
-}
-
-void
-absolutetime_to_nanotime(
- uint64_t abstime,
- uint32_t *secs,
- uint32_t *nanosecs)
+ clock_sec_t *secs,
+ clock_usec_t *microsecs)
{
- asm volatile(
- "divl %3"
- : "=a" (*secs), "=d" (*nanosecs)
- : "A" (abstime), "r" (NSEC_PER_SEC));
+ _absolutetime_to_microtime(abstime, secs, microsecs);
}
void
nanotime_to_absolutetime(
- uint32_t secs,
- uint32_t nanosecs,
+ clock_sec_t secs,
+ clock_nsec_t nanosecs,
uint64_t *result)
{
*result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
void
machine_delay_until(
+ uint64_t interval,
uint64_t deadline)
{
- uint64_t now;
-
- do {
+ (void)interval;
+ while (mach_absolute_time() < deadline) {
cpu_pause();
- now = mach_absolute_time();
- } while (now < deadline);
+ }
}