]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/clock.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
index b9d0a075baa9da9c1fd2ff7e1252e105533335b7..1bd578496eebf317883bf4ded4492c61e978fc90 100644 (file)
@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -33,7 +33,6 @@
 
 #include <mach/mach_types.h>
 
 
 #include <mach/mach_types.h>
 
-#include <kern/lock.h>
 #include <kern/spl.h>
 #include <kern/sched_prim.h>
 #include <kern/thread.h>
 #include <kern/spl.h>
 #include <kern/sched_prim.h>
 #include <kern/thread.h>
 #include <mach/mach_traps.h>
 #include <mach/mach_time.h>
 
 #include <mach/mach_traps.h>
 #include <mach/mach_time.h>
 
-decl_simple_lock_data(static,clock_lock)
+uint32_t       hz_tick_interval = 1;
+
+
+decl_simple_lock_data(,clock_lock)
+
+#define clock_lock()   \
+       simple_lock(&clock_lock)
+
+#define clock_unlock() \
+       simple_unlock(&clock_lock)
+
+#define clock_lock_init()      \
+       simple_lock_init(&clock_lock, 0)
+
 
 /*
  *     Time of day (calendar) variables.
 
 /*
  *     Time of day (calendar) variables.
@@ -59,9 +71,34 @@ decl_simple_lock_data(static,clock_lock)
  *     where CONV converts absolute time units into seconds and a fraction.
  */
 static struct clock_calend {
  *     where CONV converts absolute time units into seconds and a fraction.
  */
 static struct clock_calend {
-       uint64_t                        epoch;
-       uint64_t                        offset;
-}                                      clock_calend;
+       uint64_t        epoch;
+       uint64_t        offset;
+
+       int32_t         adjdelta;       /* Nanosecond time delta for this adjustment period */
+       uint64_t        adjstart;       /* Absolute time value for start of this adjustment period */
+       uint32_t        adjoffset;      /* Absolute time offset for this adjustment period as absolute value */
+} clock_calend;
+
+#if    CONFIG_DTRACE
+
+/*
+ *     Unlocked calendar flipflop; this is used to track a clock_calend such
+ *     that we can safely access a snapshot of a valid  clock_calend structure
+ *     without needing to take any locks to do it.
+ *
+ *     The trick is to use a generation count and set the low bit when it is
+ *     being updated/read; by doing this, we guarantee, through use of the
+ *     hw_atomic functions, that the generation is incremented when the bit
+ *     is cleared atomically (by using a 1 bit add).
+ */
+static struct unlocked_clock_calend {
+       struct clock_calend     calend;         /* copy of calendar */
+       uint32_t                gen;            /* generation count */
+} flipflop[ 2];
+
+static void clock_track_calend_nowait(void);
+
+#endif
 
 /*
  *     Calendar adjustment variables and values.
 
 /*
  *     Calendar adjustment variables and values.
@@ -70,21 +107,16 @@ static struct clock_calend {
 #define calend_adjskew         (40 * NSEC_PER_USEC)    /* "standard" skew, ns / period */
 #define        calend_adjbig           (NSEC_PER_SEC)                  /* use 10x skew above adjbig ns */
 
 #define calend_adjskew         (40 * NSEC_PER_USEC)    /* "standard" skew, ns / period */
 #define        calend_adjbig           (NSEC_PER_SEC)                  /* use 10x skew above adjbig ns */
 
-static uint64_t                        calend_adjstart;                /* Absolute time value for start of this adjustment period */
-static uint32_t                        calend_adjoffset;               /* Absolute time offset for this adjustment period as absolute value */
-
-static int32_t                 calend_adjdelta;                /* Nanosecond time delta for this adjustment period */
-static int64_t                 calend_adjtotal;                /* Nanosecond remaining total adjustment */
-
-static uint64_t                        calend_adjdeadline;             /* Absolute time value for next adjustment period */
-static uint32_t                        calend_adjinterval;             /* Absolute time interval of adjustment period */
+static int64_t                         calend_adjtotal;                /* Nanosecond remaining total adjustment */
+static uint64_t                                calend_adjdeadline;             /* Absolute time value for next adjustment period */
+static uint32_t                                calend_adjinterval;             /* Absolute time interval of adjustment period */
 
 static timer_call_data_t       calend_adjcall;
 static uint32_t                                calend_adjactive;
 
 static uint32_t                calend_set_adjustment(
 
 static timer_call_data_t       calend_adjcall;
 static uint32_t                                calend_adjactive;
 
 static uint32_t                calend_set_adjustment(
-                                               int32_t                 *secs,
-                                               int32_t                 *microsecs);
+                                               long                    *secs,
+                                               int                             *microsecs);
 
 static void                    calend_adjust_call(void);
 static uint32_t                calend_adjust(void);
 
 static void                    calend_adjust_call(void);
 static uint32_t                calend_adjust(void);
@@ -93,6 +125,9 @@ static thread_call_data_t    calend_wakecall;
 
 extern void    IOKitResetTime(void);
 
 
 extern void    IOKitResetTime(void);
 
+void _clock_delay_until_deadline(uint64_t              interval,
+                                                                uint64_t               deadline);
+
 static uint64_t                clock_boottime;                         /* Seconds boottime epoch */
 
 #define TIME_ADD(rsecs, secs, rfrac, frac, unit)       \
 static uint64_t                clock_boottime;                         /* Seconds boottime epoch */
 
 #define TIME_ADD(rsecs, secs, rfrac, frac, unit)       \
@@ -106,7 +141,7 @@ MACRO_END
 
 #define TIME_SUB(rsecs, secs, rfrac, frac, unit)       \
 MACRO_BEGIN                                                                                    \
 
 #define TIME_SUB(rsecs, secs, rfrac, frac, unit)       \
 MACRO_BEGIN                                                                                    \
-       if ((int32_t)((rfrac) -= (frac)) < 0) {                 \
+       if ((int)((rfrac) -= (frac)) < 0) {                             \
                (rfrac) += (unit);                                                      \
                (rsecs) -= 1;                                                           \
        }                                                                                               \
                (rfrac) += (unit);                                                      \
                (rsecs) -= 1;                                                           \
        }                                                                                               \
@@ -121,17 +156,12 @@ MACRO_END
 void
 clock_config(void)
 {
 void
 clock_config(void)
 {
-       simple_lock_init(&clock_lock, 0);
+       clock_lock_init();
 
        timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
        thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
 
        clock_oldconfig();
 
        timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
        thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
 
        clock_oldconfig();
-
-       /*
-        * Initialize the timer callouts.
-        */
-       timer_call_initialize();
 }
 
 /*
 }
 
 /*
@@ -159,7 +189,10 @@ clock_timebase_init(void)
        uint64_t        abstime;
 
        nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
        uint64_t        abstime;
 
        nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
-       calend_adjinterval = abstime;
+       calend_adjinterval = (uint32_t)abstime;
+
+       nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
+       hz_tick_interval = (uint32_t)abstime;
 
        sched_timebase_init();
 }
 
        sched_timebase_init();
 }
@@ -195,27 +228,55 @@ mach_timebase_info_trap(
  */
 void
 clock_get_calendar_microtime(
  */
 void
 clock_get_calendar_microtime(
-       uint32_t                        *secs,
-       uint32_t                        *microsecs)
+       clock_sec_t                     *secs,
+       clock_usec_t            *microsecs)
+{
+       clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
+}
+
+/*
+ *     clock_get_calendar_absolute_and_microtime:
+ *
+ *     Returns the current calendar value,
+ *     microseconds as the fraction. Also
+ *     returns mach_absolute_time if abstime
+ *     is not NULL.
+ */
+void
+clock_get_calendar_absolute_and_microtime(
+       clock_sec_t                     *secs,
+       clock_usec_t            *microsecs,
+       uint64_t                *abstime)
 {
        uint64_t                now;
        spl_t                   s;
 
        s = splclock();
 {
        uint64_t                now;
        spl_t                   s;
 
        s = splclock();
-       simple_lock(&clock_lock);
+       clock_lock();
 
        now = mach_absolute_time();
 
        now = mach_absolute_time();
+       if (abstime)
+               *abstime = now;
 
 
-       if (calend_adjdelta < 0) {
+       if (clock_calend.adjdelta < 0) {
                uint32_t        t32;
 
                uint32_t        t32;
 
-               if (now > calend_adjstart) {
-                       t32 = now - calend_adjstart;
+               /* 
+                * Since offset is decremented during a negative adjustment,
+                * ensure that time increases monotonically without going
+                * temporarily backwards.
+                * If the delta has not yet passed, now is set to the start
+                * of the current adjustment period; otherwise, we're between
+                * the expiry of the delta and the next call to calend_adjust(),
+                * and we offset accordingly.
+                */
+               if (now > clock_calend.adjstart) {
+                       t32 = (uint32_t)(now - clock_calend.adjstart);
 
 
-                       if (t32 > calend_adjoffset)
-                               now -= calend_adjoffset;
+                       if (t32 > clock_calend.adjoffset)
+                               now -= clock_calend.adjoffset;
                        else
                        else
-                               now = calend_adjstart;
+                               now = clock_calend.adjstart;
                }
        }
 
                }
        }
 
@@ -223,9 +284,9 @@ clock_get_calendar_microtime(
 
        absolutetime_to_microtime(now, secs, microsecs);
 
 
        absolutetime_to_microtime(now, secs, microsecs);
 
-       *secs += clock_calend.epoch;
+       *secs += (clock_sec_t)clock_calend.epoch;
 
 
-       simple_unlock(&clock_lock);
+       clock_unlock();
        splx(s);
 }
 
        splx(s);
 }
 
@@ -241,38 +302,39 @@ clock_get_calendar_microtime(
  */
 void
 clock_get_calendar_nanotime(
  */
 void
 clock_get_calendar_nanotime(
-       uint32_t                        *secs,
-       uint32_t                        *nanosecs)
+       clock_sec_t                     *secs,
+       clock_nsec_t            *nanosecs)
 {
        uint64_t                now;
        spl_t                   s;
 
        s = splclock();
 {
        uint64_t                now;
        spl_t                   s;
 
        s = splclock();
-       simple_lock(&clock_lock);
+       clock_lock();
 
        now = mach_absolute_time();
 
 
        now = mach_absolute_time();
 
-       if (calend_adjdelta < 0) {
+       if (clock_calend.adjdelta < 0) {
                uint32_t        t32;
 
                uint32_t        t32;
 
-               if (now > calend_adjstart) {
-                       t32 = now - calend_adjstart;
+               if (now > clock_calend.adjstart) {
+                       t32 = (uint32_t)(now - clock_calend.adjstart);
 
 
-                       if (t32 > calend_adjoffset)
-                               now -= calend_adjoffset;
+                       if (t32 > clock_calend.adjoffset)
+                               now -= clock_calend.adjoffset;
                        else
                        else
-                               now = calend_adjstart;
+                               now = clock_calend.adjstart;
                }
        }
 
        now += clock_calend.offset;
 
        absolutetime_to_microtime(now, secs, nanosecs);
                }
        }
 
        now += clock_calend.offset;
 
        absolutetime_to_microtime(now, secs, nanosecs);
+
        *nanosecs *= NSEC_PER_USEC;
 
        *nanosecs *= NSEC_PER_USEC;
 
-       *secs += clock_calend.epoch;
+       *secs += (clock_sec_t)clock_calend.epoch;
 
 
-       simple_unlock(&clock_lock);
+       clock_unlock();
        splx(s);
 }
 
        splx(s);
 }
 
@@ -289,40 +351,40 @@ clock_get_calendar_nanotime(
  */
 void
 clock_gettimeofday(
  */
 void
 clock_gettimeofday(
-       uint32_t                        *secs,
-       uint32_t                        *microsecs)
+       clock_sec_t             *secs,
+       clock_usec_t    *microsecs)
 {
        uint64_t                now;
        spl_t                   s;
 
        s = splclock();
 {
        uint64_t                now;
        spl_t                   s;
 
        s = splclock();
-       simple_lock(&clock_lock);
+       clock_lock();
 
        now = mach_absolute_time();
 
 
        now = mach_absolute_time();
 
-       if (calend_adjdelta >= 0) {
+       if (clock_calend.adjdelta >= 0) {
                clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
        }
        else {
                uint32_t        t32;
 
                clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
        }
        else {
                uint32_t        t32;
 
-               if (now > calend_adjstart) {
-                       t32 = now - calend_adjstart;
+               if (now > clock_calend.adjstart) {
+                       t32 = (uint32_t)(now - clock_calend.adjstart);
 
 
-                       if (t32 > calend_adjoffset)
-                               now -= calend_adjoffset;
+                       if (t32 > clock_calend.adjoffset)
+                               now -= clock_calend.adjoffset;
                        else
                        else
-                               now = calend_adjstart;
+                               now = clock_calend.adjstart;
                }
 
                now += clock_calend.offset;
 
                absolutetime_to_microtime(now, secs, microsecs);
 
                }
 
                now += clock_calend.offset;
 
                absolutetime_to_microtime(now, secs, microsecs);
 
-               *secs += clock_calend.epoch;
+               *secs += (clock_sec_t)clock_calend.epoch;
        }
 
        }
 
-       simple_unlock(&clock_lock);
+       clock_unlock();
        splx(s);
 }
 
        splx(s);
 }
 
@@ -340,20 +402,22 @@ clock_gettimeofday(
  */
 void
 clock_set_calendar_microtime(
  */
 void
 clock_set_calendar_microtime(
-       uint32_t                        secs,
-       uint32_t                        microsecs)
+       clock_sec_t                     secs,
+       clock_usec_t            microsecs)
 {
 {
-       uint32_t                sys, microsys;
-       uint32_t                newsecs;
-       spl_t                   s;
+       clock_sec_t                     sys;
+       clock_usec_t            microsys;
+       clock_sec_t                     newsecs;
+    clock_usec_t        newmicrosecs;
+       spl_t                           s;
 
 
-       newsecs = (microsecs < 500*USEC_PER_SEC)?
-                                               secs: secs + 1;
+    newsecs = secs;
+    newmicrosecs = microsecs;
 
        s = splclock();
 
        s = splclock();
-       simple_lock(&clock_lock);
+       clock_lock();
 
 
-    commpage_set_timestamp(0,0,0);
+       commpage_disable_timestamp();
 
        /*
         *      Calculate the new calendar epoch based on
 
        /*
         *      Calculate the new calendar epoch based on
@@ -371,19 +435,20 @@ clock_set_calendar_microtime(
         *      Set the new calendar epoch.
         */
        clock_calend.epoch = secs;
         *      Set the new calendar epoch.
         */
        clock_calend.epoch = secs;
+
        nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
 
        /*
         *      Cancel any adjustment in progress.
         */
        nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
 
        /*
         *      Cancel any adjustment in progress.
         */
-       calend_adjdelta = calend_adjtotal = 0;
+       calend_adjtotal = clock_calend.adjdelta = 0;
 
 
-       simple_unlock(&clock_lock);
+       clock_unlock();
 
        /*
         *      Set the new value for the platform clock.
         */
 
        /*
         *      Set the new value for the platform clock.
         */
-       PESetGMTTimeOfDay(newsecs);
+       PESetUTCTimeOfDay(newsecs, newmicrosecs);
 
        splx(s);
 
 
        splx(s);
 
@@ -391,6 +456,10 @@ clock_set_calendar_microtime(
         *      Send host notifications.
         */
        host_notify_calendar_change();
         *      Send host notifications.
         */
        host_notify_calendar_change();
+       
+#if CONFIG_DTRACE
+       clock_track_calend_nowait();
+#endif
 }
 
 /*
 }
 
 /*
@@ -405,16 +474,18 @@ clock_set_calendar_microtime(
 void
 clock_initialize_calendar(void)
 {
 void
 clock_initialize_calendar(void)
 {
-       uint32_t                sys, microsys;
-       uint32_t                microsecs = 0, secs = PEGetGMTTimeOfDay();
-       spl_t                   s;
+       clock_sec_t                     sys, secs;
+       clock_usec_t            microsys, microsecs;
+       spl_t                           s;
+
+    PEGetUTCTimeOfDay(&secs, &microsecs);
 
        s = splclock();
 
        s = splclock();
-       simple_lock(&clock_lock);
+       clock_lock();
 
 
-    commpage_set_timestamp(0,0,0);
+       commpage_disable_timestamp();
 
 
-       if ((int32_t)secs >= (int32_t)clock_boottime) {
+       if ((long)secs >= (long)clock_boottime) {
                /*
                 *      Initialize the boot time based on the platform clock.
                 */
                /*
                 *      Initialize the boot time based on the platform clock.
                 */
@@ -432,21 +503,26 @@ clock_initialize_calendar(void)
                 *      Set the new calendar epoch.
                 */
                clock_calend.epoch = secs;
                 *      Set the new calendar epoch.
                 */
                clock_calend.epoch = secs;
+
                nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
 
                /*
                 *       Cancel any adjustment in progress.
                 */
                nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
 
                /*
                 *       Cancel any adjustment in progress.
                 */
-               calend_adjdelta = calend_adjtotal = 0;
+               calend_adjtotal = clock_calend.adjdelta = 0;
        }
 
        }
 
-       simple_unlock(&clock_lock);
+       clock_unlock();
        splx(s);
 
        /*
         *      Send host notifications.
         */
        host_notify_calendar_change();
        splx(s);
 
        /*
         *      Send host notifications.
         */
        host_notify_calendar_change();
+       
+#if CONFIG_DTRACE
+       clock_track_calend_nowait();
+#endif
 }
 
 /*
 }
 
 /*
@@ -456,11 +532,19 @@ clock_initialize_calendar(void)
  */
 void
 clock_get_boottime_nanotime(
  */
 void
 clock_get_boottime_nanotime(
-       uint32_t                        *secs,
-       uint32_t                        *nanosecs)
+       clock_sec_t                     *secs,
+       clock_nsec_t            *nanosecs)
 {
 {
-       *secs = clock_boottime;
+       spl_t   s;
+
+       s = splclock();
+       clock_lock();
+
+       *secs = (clock_sec_t)clock_boottime;
        *nanosecs = 0;
        *nanosecs = 0;
+
+       clock_unlock();
+       splx(s);
 }
 
 /*
 }
 
 /*
@@ -473,86 +557,146 @@ clock_get_boottime_nanotime(
  */
 void
 clock_adjtime(
  */
 void
 clock_adjtime(
-       int32_t         *secs,
-       int32_t         *microsecs)
+       long            *secs,
+       int                     *microsecs)
 {
        uint32_t        interval;
        spl_t           s;
 
        s = splclock();
 {
        uint32_t        interval;
        spl_t           s;
 
        s = splclock();
-       simple_lock(&clock_lock);
+       clock_lock();
 
        interval = calend_set_adjustment(secs, microsecs);
        if (interval != 0) {
                calend_adjdeadline = mach_absolute_time() + interval;
 
        interval = calend_set_adjustment(secs, microsecs);
        if (interval != 0) {
                calend_adjdeadline = mach_absolute_time() + interval;
-               if (!timer_call_enter(&calend_adjcall, calend_adjdeadline))
+               if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
                        calend_adjactive++;
        }
        else
        if (timer_call_cancel(&calend_adjcall))
                calend_adjactive--;
 
                        calend_adjactive++;
        }
        else
        if (timer_call_cancel(&calend_adjcall))
                calend_adjactive--;
 
-       simple_unlock(&clock_lock);
+       clock_unlock();
        splx(s);
 }
 
 static uint32_t
 calend_set_adjustment(
        splx(s);
 }
 
 static uint32_t
 calend_set_adjustment(
-       int32_t                         *secs,
-       int32_t                         *microsecs)
+       long                    *secs,
+       int                             *microsecs)
 {
        uint64_t                now, t64;
        int64_t                 total, ototal;
        uint32_t                interval = 0;
 
 {
        uint64_t                now, t64;
        int64_t                 total, ototal;
        uint32_t                interval = 0;
 
-       total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
+       /* 
+        * Compute the total adjustment time in nanoseconds.
+        */
+       total = ((int64_t)*secs * (int64_t)NSEC_PER_SEC) + (*microsecs * (int64_t)NSEC_PER_USEC);
 
 
-    commpage_set_timestamp(0,0,0);
+       /* 
+        * Disable commpage gettimeofday().
+        */
+       commpage_disable_timestamp();
 
 
+       /* 
+        * Get current absolute time.
+        */
        now = mach_absolute_time();
 
        now = mach_absolute_time();
 
+       /* 
+        * Save the old adjustment total for later return.
+        */
        ototal = calend_adjtotal;
 
        ototal = calend_adjtotal;
 
+       /*
+        * Is a new correction specified?
+        */
        if (total != 0) {
        if (total != 0) {
+               /*
+                * Set delta to the standard, small, adjustment skew.
+                */
                int32_t         delta = calend_adjskew;
 
                if (total > 0) {
                int32_t         delta = calend_adjskew;
 
                if (total > 0) {
-                       if (total > calend_adjbig)
+                       /*
+                        * Positive adjustment. If greater than the preset 'big' 
+                        * threshold, slew at a faster rate, capping if necessary.
+                        */
+                       if (total > (int64_t) calend_adjbig)
                                delta *= 10;
                        if (delta > total)
                                delta *= 10;
                        if (delta > total)
-                               delta = total;
+                               delta = (int32_t)total;
 
 
+                       /* 
+                        * Convert the delta back from ns to absolute time and store in adjoffset.
+                        */
                        nanoseconds_to_absolutetime((uint64_t)delta, &t64);
                        nanoseconds_to_absolutetime((uint64_t)delta, &t64);
-                       calend_adjoffset = t64;
+                       clock_calend.adjoffset = (uint32_t)t64;
                }
                else {
                }
                else {
-                       if (total < -calend_adjbig)
+                       /*
+                        * Negative adjustment; therefore, negate the delta. If 
+                        * greater than the preset 'big' threshold, slew at a faster 
+                        * rate, capping if necessary.
+                        */
+                       if (total < (int64_t) -calend_adjbig)
                                delta *= 10;
                        delta = -delta;
                        if (delta < total)
                                delta *= 10;
                        delta = -delta;
                        if (delta < total)
-                               delta = total;
-
-                       calend_adjstart = now;
-
+                               delta = (int32_t)total;
+
+                       /* 
+                        * Save the current absolute time. Subsequent time operations occuring
+                        * during this negative correction can make use of this value to ensure 
+                        * that time increases monotonically.
+                        */
+                       clock_calend.adjstart = now;
+
+                       /* 
+                        * Convert the delta back from ns to absolute time and store in adjoffset.
+                        */
                        nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
                        nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
-                       calend_adjoffset = t64;
+                       clock_calend.adjoffset = (uint32_t)t64;
                }
 
                }
 
+               /* 
+                * Store the total adjustment time in ns. 
+                */
                calend_adjtotal = total;
                calend_adjtotal = total;
-               calend_adjdelta = delta;
+               
+               /* 
+                * Store the delta for this adjustment period in ns. 
+                */
+               clock_calend.adjdelta = delta;
 
 
+               /* 
+                * Set the interval in absolute time for later return. 
+                */
                interval = calend_adjinterval;
        }
                interval = calend_adjinterval;
        }
-       else
-               calend_adjdelta = calend_adjtotal = 0;
+       else {
+               /* 
+                * No change; clear any prior adjustment.
+                */
+               calend_adjtotal = clock_calend.adjdelta = 0;
+       }
 
 
+       /* 
+        * If an prior correction was in progress, return the
+        * remaining uncorrected time from it. 
+        */
        if (ototal != 0) {
        if (ototal != 0) {
-               *secs = ototal / NSEC_PER_SEC;
-               *microsecs = (ototal % NSEC_PER_SEC) / NSEC_PER_USEC;
+               *secs = (long)(ototal / (long)NSEC_PER_SEC);
+               *microsecs = (int)((ototal % (int)NSEC_PER_SEC) / (int)NSEC_PER_USEC);
        }
        else
                *secs = *microsecs = 0;
 
        }
        else
                *secs = *microsecs = 0;
 
+#if CONFIG_DTRACE
+       clock_track_calend_nowait();
+#endif
+       
        return (interval);
 }
 
        return (interval);
 }
 
@@ -563,20 +707,19 @@ calend_adjust_call(void)
        spl_t           s;
 
        s = splclock();
        spl_t           s;
 
        s = splclock();
-       simple_lock(&clock_lock);
+       clock_lock();
 
        if (--calend_adjactive == 0) {
                interval = calend_adjust();
                if (interval != 0) {
 
        if (--calend_adjactive == 0) {
                interval = calend_adjust();
                if (interval != 0) {
-                       clock_deadline_for_periodic_event(interval, mach_absolute_time(),
-                                                                                                                               &calend_adjdeadline);
+                       clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
 
 
-                       if (!timer_call_enter(&calend_adjcall, calend_adjdeadline))
+                       if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
                                calend_adjactive++;
                }
        }
 
                                calend_adjactive++;
                }
        }
 
-       simple_unlock(&clock_lock);
+       clock_unlock();
        splx(s);
 }
 
        splx(s);
 }
 
@@ -587,42 +730,46 @@ calend_adjust(void)
        int32_t                 delta;
        uint32_t                interval = 0;
 
        int32_t                 delta;
        uint32_t                interval = 0;
 
-    commpage_set_timestamp(0,0,0);
+       commpage_disable_timestamp();
 
        now = mach_absolute_time();
 
 
        now = mach_absolute_time();
 
-       delta = calend_adjdelta;
+       delta = clock_calend.adjdelta;
 
        if (delta > 0) {
 
        if (delta > 0) {
-               clock_calend.offset += calend_adjoffset;
+               clock_calend.offset += clock_calend.adjoffset;
 
                calend_adjtotal -= delta;
                if (delta > calend_adjtotal) {
 
                calend_adjtotal -= delta;
                if (delta > calend_adjtotal) {
-                       calend_adjdelta = delta = calend_adjtotal;
+                       clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
 
                        nanoseconds_to_absolutetime((uint64_t)delta, &t64);
 
                        nanoseconds_to_absolutetime((uint64_t)delta, &t64);
-                       calend_adjoffset = t64;
+                       clock_calend.adjoffset = (uint32_t)t64;
                }
        }
        else
                }
        }
        else
-       if (delta < 0) {
-               clock_calend.offset -= calend_adjoffset;
+               if (delta < 0) {
+                       clock_calend.offset -= clock_calend.adjoffset;
 
 
-               calend_adjtotal -= delta;
-               if (delta < calend_adjtotal) {
-                       calend_adjdelta = delta = calend_adjtotal;
+                       calend_adjtotal -= delta;
+                       if (delta < calend_adjtotal) {
+                               clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
 
 
-                       nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
-                       calend_adjoffset = t64;
-               }
+                               nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
+                               clock_calend.adjoffset = (uint32_t)t64;
+                       }
 
 
-               if (calend_adjdelta != 0)
-                       calend_adjstart = now;
-       }
+                       if (clock_calend.adjdelta != 0)
+                               clock_calend.adjstart = now;
+               }
 
 
-       if (calend_adjdelta != 0)
+       if (clock_calend.adjdelta != 0)
                interval = calend_adjinterval;
 
                interval = calend_adjinterval;
 
+#if CONFIG_DTRACE
+       clock_track_calend_nowait();
+#endif
+
        return (interval);
 }
 
        return (interval);
 }
 
@@ -651,6 +798,15 @@ mach_wait_until_continue(
        /*NOTREACHED*/
 }
 
        /*NOTREACHED*/
 }
 
+/*
+ * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
+ *
+ * Parameters:    args->deadline          Amount of time to wait
+ *
+ * Returns:        0                      Success
+ *                !0                      Not success           
+ *
+ */
 kern_return_t
 mach_wait_until_trap(
        struct mach_wait_until_trap_args        *args)
 kern_return_t
 mach_wait_until_trap(
        struct mach_wait_until_trap_args        *args)
@@ -658,7 +814,8 @@ mach_wait_until_trap(
        uint64_t                deadline = args->deadline;
        wait_result_t   wresult;
 
        uint64_t                deadline = args->deadline;
        wait_result_t   wresult;
 
-       wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
+       wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
+                                                  TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
        if (wresult == THREAD_WAITING)
                wresult = thread_block(mach_wait_until_continue);
 
        if (wresult == THREAD_WAITING)
                wresult = thread_block(mach_wait_until_continue);
 
@@ -674,27 +831,44 @@ clock_delay_until(
        if (now >= deadline)
                return;
 
        if (now >= deadline)
                return;
 
-       if (    (deadline - now) < (8 * sched_cswtime)  ||
+       _clock_delay_until_deadline(deadline - now, deadline);
+}
+
+/*
+ * Preserve the original precise interval that the client
+ * requested for comparison to the spin threshold.
+ */
+void
+_clock_delay_until_deadline(
+       uint64_t                interval,
+       uint64_t                deadline)
+{
+
+       if (interval == 0)
+               return;
+
+       if (    ml_delay_should_spin(interval)  ||
                        get_preemption_level() != 0                             ||
                        get_preemption_level() != 0                             ||
-                       ml_get_interrupts_enabled() == FALSE    )
-               machine_delay_until(deadline);
-       else {
-               assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
+                       ml_get_interrupts_enabled() == FALSE    ) {
+               machine_delay_until(interval, deadline);
+       else {
+               assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
 
                thread_block(THREAD_CONTINUE_NULL);
        }
 }
 
 
                thread_block(THREAD_CONTINUE_NULL);
        }
 }
 
+
 void
 delay_for_interval(
        uint32_t                interval,
        uint32_t                scale_factor)
 {
 void
 delay_for_interval(
        uint32_t                interval,
        uint32_t                scale_factor)
 {
-       uint64_t                end;
+       uint64_t                abstime;
 
 
-       clock_interval_to_deadline(interval, scale_factor, &end);
+       clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
 
 
-       clock_delay_until(end);
+       _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
 }
 
 void
 }
 
 void
@@ -753,3 +927,103 @@ clock_deadline_for_periodic_event(
                        *deadline = abstime + interval;
        }
 }
                        *deadline = abstime + interval;
        }
 }
+
+#if    CONFIG_DTRACE
+
+/*
+ * clock_get_calendar_nanotime_nowait
+ *
+ * Description:        Non-blocking version of clock_get_calendar_nanotime()
+ *
+ * Notes:      This function operates by separately tracking calendar time
+ *             updates using a two element structure to copy the calendar
+ *             state, which may be asynchronously modified.  It utilizes
+ *             barrier instructions in the tracking process and in the local
+ *             stable snapshot process in order to ensure that a consistent
+ *             snapshot is used to perform the calculation.
+ */
+void
+clock_get_calendar_nanotime_nowait(
+       clock_sec_t                     *secs,
+       clock_nsec_t            *nanosecs)
+{
+       int i = 0;
+       uint64_t                now;
+       struct unlocked_clock_calend stable;
+
+       for (;;) {
+               stable = flipflop[i];           /* take snapshot */
+
+               /*
+                * Use a barrier instructions to ensure atomicity.  We AND
+                * off the "in progress" bit to get the current generation
+                * count.
+                */
+               (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
+
+               /*
+                * If an update _is_ in progress, the generation count will be
+                * off by one, if it _was_ in progress, it will be off by two,
+                * and if we caught it at a good time, it will be equal (and
+                * our snapshot is threfore stable).
+                */
+               if (flipflop[i].gen == stable.gen)
+                       break;
+
+               /* Switch to the oher element of the flipflop, and try again. */
+               i ^= 1;
+       }
+
+       now = mach_absolute_time();
+
+       if (stable.calend.adjdelta < 0) {
+               uint32_t        t32;
+
+               if (now > stable.calend.adjstart) {
+                       t32 = (uint32_t)(now - stable.calend.adjstart);
+
+                       if (t32 > stable.calend.adjoffset)
+                               now -= stable.calend.adjoffset;
+                       else
+                               now = stable.calend.adjstart;
+               }
+       }
+
+       now += stable.calend.offset;
+
+       absolutetime_to_microtime(now, secs, nanosecs);
+       *nanosecs *= NSEC_PER_USEC;
+
+       *secs += (clock_sec_t)stable.calend.epoch;
+}
+
+static void 
+clock_track_calend_nowait(void)
+{
+       int i;
+
+       for (i = 0; i < 2; i++) {
+               struct clock_calend tmp = clock_calend;
+
+               /*
+                * Set the low bit if the generation count; since we use a
+                * barrier instruction to do this, we are guaranteed that this
+                * will flag an update in progress to an async caller trying
+                * to examine the contents.
+                */
+               (void)hw_atomic_or(&flipflop[i].gen, 1);
+
+               flipflop[i].calend = tmp;
+
+               /*
+                * Increment the generation count to clear the low bit to
+                * signal completion.  If a caller compares the generation
+                * count after taking a copy while in progress, the count
+                * will be off by two.
+                */
+               (void)hw_atomic_add(&flipflop[i].gen, 1);
+       }
+}
+
+#endif /* CONFIG_DTRACE */
+