]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/clock.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
index 0a256f2204fd8fb82b67293b9a3081d6c6595aed..1264c6661af4f4050d935af2ec4e4f433bf30f42 100644 (file)
@@ -1,8 +1,8 @@
 /*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
@@ -22,7 +22,7 @@
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
 #include <sys/kdebug.h>
 #include <sys/timex.h>
 #include <kern/arithmetic_128.h>
+#include <os/log.h>
 
-uint32_t       hz_tick_interval = 1;
+#if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
+// On ARM64, the hwclock keeps ticking across a normal S2R so we use it to reset the
+// system clock after a normal wake. However, on hibernation we cut power to the hwclock,
+// so we have to add an offset to the hwclock to compute continuous_time after hibernate resume.
+uint64_t hwclock_conttime_offset = 0;
+#endif /* HIBERNATION && HAS_CONTINUOUS_HWCLOCK */
 
+#if HIBERNATION_USES_LEGACY_CLOCK || !HAS_CONTINUOUS_HWCLOCK
+#define ENABLE_LEGACY_CLOCK_CODE 1
+#endif /* HIBERNATION_USES_LEGACY_CLOCK || !HAS_CONTINUOUS_HWCLOCK */
 
-decl_simple_lock_data(,clock_lock)
-lck_grp_attr_t * settime_lock_grp_attr;
-lck_grp_t * settime_lock_grp;
-lck_attr_t * settime_lock_attr;
-lck_mtx_t settime_lock;
+#if HIBERNATION_USES_LEGACY_CLOCK
+#include <IOKit/IOHibernatePrivate.h>
+#endif /* HIBERNATION_USES_LEGACY_CLOCK */
 
-#define clock_lock()   \
-       simple_lock(&clock_lock)
+uint32_t        hz_tick_interval = 1;
+#if ENABLE_LEGACY_CLOCK_CODE
+static uint64_t has_monotonic_clock = 0;
+#endif /* ENABLE_LEGACY_CLOCK_CODE */
 
-#define clock_unlock() \
-       simple_unlock(&clock_lock)
+SIMPLE_LOCK_DECLARE(clock_lock, 0);
+
+static LCK_GRP_DECLARE(settime_lock_grp, "settime");
+static LCK_MTX_DECLARE(settime_lock, &settime_lock_grp);
 
-#define clock_lock_init()      \
-       simple_lock_init(&clock_lock, 0)
+#define clock_lock()    \
+       simple_lock(&clock_lock, LCK_GRP_NULL)
+
+#define clock_unlock()  \
+       simple_unlock(&clock_lock)
 
 #ifdef kdp_simple_lock_is_acquired
-boolean_t kdp_clock_is_locked()
+boolean_t
+kdp_clock_is_locked()
 {
        return kdp_simple_lock_is_acquired(&clock_lock);
 }
 #endif
 
 struct bintime {
-       time_t  sec;
+       time_t  sec;
        uint64_t frac;
 };
 
@@ -122,8 +137,9 @@ bintime_addx(struct bintime *_bt, uint64_t _x)
 
        _u = _bt->frac;
        _bt->frac += _x;
-       if (_u > _bt->frac)
+       if (_u > _bt->frac) {
                _bt->sec++;
+       }
 }
 
 static __inline void
@@ -133,14 +149,15 @@ bintime_subx(struct bintime *_bt, uint64_t _x)
 
        _u = _bt->frac;
        _bt->frac -= _x;
-       if (_u < _bt->frac)
+       if (_u < _bt->frac) {
                _bt->sec--;
+       }
 }
 
 static __inline void
 bintime_addns(struct bintime *bt, uint64_t ns)
 {
-       bt->sec += ns/ (uint64_t)NSEC_PER_SEC;
+       bt->sec += ns / (uint64_t)NSEC_PER_SEC;
        ns = ns % (uint64_t)NSEC_PER_SEC;
        if (ns) {
                /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
@@ -152,7 +169,7 @@ bintime_addns(struct bintime *bt, uint64_t ns)
 static __inline void
 bintime_subns(struct bintime *bt, uint64_t ns)
 {
-       bt->sec -= ns/ (uint64_t)NSEC_PER_SEC;
+       bt->sec -= ns / (uint64_t)NSEC_PER_SEC;
        ns = ns % (uint64_t)NSEC_PER_SEC;
        if (ns) {
                /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
@@ -164,19 +181,20 @@ bintime_subns(struct bintime *bt, uint64_t ns)
 static __inline void
 bintime_addxns(struct bintime *bt, uint64_t a, int64_t xns)
 {
-       uint64_t uxns = (xns > 0)?(uint64_t )xns:(uint64_t)-xns;
+       uint64_t uxns = (xns > 0)?(uint64_t)xns:(uint64_t)-xns;
        uint64_t ns = multi_overflow(a, uxns);
        if (xns > 0) {
-               if (ns)
+               if (ns) {
                        bintime_addns(bt, ns);
+               }
                ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
                bintime_addx(bt, ns);
-       }
-       else{
-               if (ns)
+       } else {
+               if (ns) {
                        bintime_subns(bt, ns);
+               }
                ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
-               bintime_subx(bt,ns);
+               bintime_subx(bt, ns);
        }
 }
 
@@ -188,8 +206,9 @@ bintime_add(struct bintime *_bt, const struct bintime *_bt2)
 
        _u = _bt->frac;
        _bt->frac += _bt2->frac;
-       if (_u > _bt->frac)
+       if (_u > _bt->frac) {
                _bt->sec++;
+       }
        _bt->sec += _bt2->sec;
 }
 
@@ -200,15 +219,15 @@ bintime_sub(struct bintime *_bt, const struct bintime *_bt2)
 
        _u = _bt->frac;
        _bt->frac -= _bt2->frac;
-       if (_u < _bt->frac)
+       if (_u < _bt->frac) {
                _bt->sec--;
+       }
        _bt->sec -= _bt2->sec;
 }
 
 static __inline void
 clock2bintime(const clock_sec_t *secs, const clock_usec_t *microsecs, struct bintime *_bt)
 {
-
        _bt->sec = *secs;
        /* 18446744073709 = int(2^64 / 1000000) */
        _bt->frac = *microsecs * (uint64_t)18446744073709LL;
@@ -217,7 +236,6 @@ clock2bintime(const clock_sec_t *secs, const clock_usec_t *microsecs, struct bin
 static __inline void
 bintime2usclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *microsecs)
 {
-
        *secs = _bt->sec;
        *microsecs = ((uint64_t)USEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
 }
@@ -225,11 +243,11 @@ bintime2usclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *micr
 static __inline void
 bintime2nsclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *nanosecs)
 {
-
        *secs = _bt->sec;
        *nanosecs = ((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
 }
 
+#if ENABLE_LEGACY_CLOCK_CODE
 static __inline void
 bintime2absolutetime(const struct bintime *_bt, uint64_t *abs)
 {
@@ -237,6 +255,16 @@ bintime2absolutetime(const struct bintime *_bt, uint64_t *abs)
        nsec = (uint64_t) _bt->sec * (uint64_t)NSEC_PER_SEC + (((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32);
        nanoseconds_to_absolutetime(nsec, abs);
 }
+
+struct latched_time {
+       uint64_t monotonic_time_usec;
+       uint64_t mach_time;
+};
+
+extern int
+kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+
+#endif /* ENABLE_LEGACY_CLOCK_CODE */
 /*
  *     Time of day (calendar) variables.
  *
@@ -245,32 +273,37 @@ bintime2absolutetime(const struct bintime *_bt, uint64_t *abs)
  *     TOD <- bintime + delta*scale
  *
  *     where :
- *     bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
+ *      bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
  *     delta is ticks elapsed since last scale update.
  *     scale is computed according to an adjustment provided by ntp_kern.
  */
 static struct clock_calend {
-       uint64_t                s_scale_ns; /* scale to apply for each second elapsed, it converts in ns */
-       int64_t                 s_adj_nsx; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
-       uint64_t                tick_scale_x; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
-       uint64_t                offset_count; /* abs time from which apply current scales */
-       struct bintime          offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
-       struct bintime          bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
-       struct bintime          boottime; /* boot time expressed in (sec, 64 bits frac of a second) */
-       struct bintime          basesleep;
+       uint64_t                s_scale_ns; /* scale to apply for each second elapsed, it converts in ns */
+       int64_t                 s_adj_nsx; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
+       uint64_t                tick_scale_x; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
+       uint64_t                offset_count; /* abs time from which apply current scales */
+       struct bintime          offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
+       struct bintime          bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
+       struct bintime          boottime; /* boot time expressed in (sec, 64 bits frac of a second) */
+#if ENABLE_LEGACY_CLOCK_CODE
+       struct bintime          basesleep;
+#endif /* ENABLE_LEGACY_CLOCK_CODE */
 } clock_calend;
 
 static uint64_t ticks_per_sec; /* ticks in a second (expressed in abs time) */
 
 #if DEVELOPMENT || DEBUG
-clock_sec_t last_utc_sec = 0;
-clock_usec_t last_utc_usec = 0;
-clock_sec_t max_utc_sec = 0;
-clock_sec_t last_sys_sec = 0;
-clock_usec_t last_sys_usec = 0;
+extern int g_should_log_clock_adjustments;
+
+static void print_all_clock_variables(const char*, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* calend_cp);
+static void print_all_clock_variables_internal(const char *, struct clock_calend* calend_cp);
+#else
+#define print_all_clock_variables(...) do { } while (0)
+#define print_all_clock_variables_internal(...) do { } while (0)
 #endif
 
-#if    CONFIG_DTRACE
+#if     CONFIG_DTRACE
+
 
 /*
  *     Unlocked calendar flipflop; this is used to track a clock_calend such
@@ -279,13 +312,13 @@ clock_usec_t last_sys_usec = 0;
  *
  *     The trick is to use a generation count and set the low bit when it is
  *     being updated/read; by doing this, we guarantee, through use of the
- *     hw_atomic functions, that the generation is incremented when the bit
+ *     os_atomic functions, that the generation is incremented when the bit
  *     is cleared atomically (by using a 1 bit add).
  */
 static struct unlocked_clock_calend {
-       struct clock_calend     calend;         /* copy of calendar */
-       uint32_t                gen;            /* generation count */
-} flipflop[ 2];
+       struct clock_calend     calend;         /* copy of calendar */
+       uint32_t                gen;            /* generation count */
+} flipflop[2];
 
 static void clock_track_calend_nowait(void);
 
@@ -298,22 +331,22 @@ void _clock_delay_until_deadline_with_leeway(uint64_t interval, uint64_t deadlin
 static uint64_t clock_boottime;
 static uint32_t clock_boottime_usec;
 
-#define TIME_ADD(rsecs, secs, rfrac, frac, unit)       \
-MACRO_BEGIN                                                                                    \
-       if (((rfrac) += (frac)) >= (unit)) {                    \
-               (rfrac) -= (unit);                                                      \
-               (rsecs) += 1;                                                           \
-       }                                                                                               \
-       (rsecs) += (secs);                                                              \
+#define TIME_ADD(rsecs, secs, rfrac, frac, unit)        \
+MACRO_BEGIN                                                                                     \
+       if (((rfrac) += (frac)) >= (unit)) {                    \
+               (rfrac) -= (unit);                                                      \
+               (rsecs) += 1;                                                           \
+       }                                                                                               \
+       (rsecs) += (secs);                                                              \
 MACRO_END
 
-#define TIME_SUB(rsecs, secs, rfrac, frac, unit)       \
-MACRO_BEGIN                                                                                    \
-       if ((int)((rfrac) -= (frac)) < 0) {                             \
-               (rfrac) += (unit);                                                      \
-               (rsecs) -= 1;                                                           \
-       }                                                                                               \
-       (rsecs) -= (secs);                                                              \
+#define TIME_SUB(rsecs, secs, rfrac, frac, unit)        \
+MACRO_BEGIN                                                                                     \
+       if ((int)((rfrac) -= (frac)) < 0) {                             \
+               (rfrac) += (unit);                                                      \
+               (rsecs) -= 1;                                                           \
+       }                                                                                               \
+       (rsecs) -= (secs);                                                              \
 MACRO_END
 
 /*
@@ -324,14 +357,6 @@ MACRO_END
 void
 clock_config(void)
 {
-
-       clock_lock_init();
-
-       settime_lock_grp_attr = lck_grp_attr_alloc_init();
-       settime_lock_grp = lck_grp_alloc_init("settime grp", settime_lock_grp_attr);
-       settime_lock_attr = lck_attr_alloc_init();
-       lck_mtx_init(&settime_lock, settime_lock_grp, settime_lock_attr);
-
        clock_oldconfig();
 
        ntp_init();
@@ -361,7 +386,7 @@ clock_init(void)
 void
 clock_timebase_init(void)
 {
-       uint64_t        abstime;
+       uint64_t        abstime;
 
        nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
        hz_tick_interval = (uint32_t)abstime;
@@ -378,14 +403,14 @@ kern_return_t
 mach_timebase_info_trap(
        struct mach_timebase_info_trap_args *args)
 {
-       mach_vm_address_t                       out_info_addr = args->info;
-       mach_timebase_info_data_t       info;
+       mach_vm_address_t                       out_info_addr = args->info;
+       mach_timebase_info_data_t       info = {};
 
        clock_timebase_info(&info);
 
-       copyout((void *)&info, out_info_addr, sizeof (info));
+       copyout((void *)&info, out_info_addr, sizeof(info));
 
-       return (KERN_SUCCESS);
+       return KERN_SUCCESS;
 }
 
 /*
@@ -400,8 +425,8 @@ mach_timebase_info_trap(
  */
 void
 clock_get_calendar_microtime(
-       clock_sec_t             *secs,
-       clock_usec_t            *microsecs)
+       clock_sec_t             *secs,
+       clock_usec_t            *microsecs)
 {
        clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
 }
@@ -466,7 +491,7 @@ get_scale_factors_from_adj(int64_t adjustment, uint64_t* tick_scale_x, uint64_t*
         * Keep it as additional adjustment for the next sec.
         */
        frac = (adjustment > 0)? ((uint32_t) adjustment) : -((uint32_t) (-adjustment));
-       *s_adj_nsx = (frac>0)? frac << 32 : -( (-frac) << 32);
+       *s_adj_nsx = (frac > 0)? ((uint64_t) frac) << 32 : -(((uint64_t) (-frac)) << 32);
 
        return;
 }
@@ -493,18 +518,18 @@ scale_delta(uint64_t delta, uint64_t tick_scale_x, uint64_t s_scale_ns, int64_t
         * s_adj_nsx -> additional adj expressed in 64 bit frac of ns to apply to each sec.
         */
        if (delta > ticks_per_sec) {
-               sec = (delta/ticks_per_sec);
+               sec = (delta / ticks_per_sec);
                new_ns = sec * s_scale_ns;
                bintime_addns(&bt, new_ns);
                if (s_adj_nsx) {
                        if (sec == 1) {
                                /* shortcut, no overflow can occur */
-                               if (s_adj_nsx > 0)
-                                       bintime_addx(&bt, (uint64_t)s_adj_nsx/ (uint64_t)NSEC_PER_SEC);
-                               else
-                                       bintime_subx(&bt, (uint64_t)-s_adj_nsx/ (uint64_t)NSEC_PER_SEC);
-                       }
-                       else{
+                               if (s_adj_nsx > 0) {
+                                       bintime_addx(&bt, (uint64_t)s_adj_nsx / (uint64_t)NSEC_PER_SEC);
+                               } else {
+                                       bintime_subx(&bt, (uint64_t)-s_adj_nsx / (uint64_t)NSEC_PER_SEC);
+                               }
+                       } else {
                                /*
                                 * s_adj_nsx is 64 bit frac of ns.
                                 * sec*s_adj_nsx might overflow in int64_t.
@@ -514,10 +539,10 @@ scale_delta(uint64_t delta, uint64_t tick_scale_x, uint64_t s_scale_ns, int64_t
                        }
                }
                delta = (delta % ticks_per_sec);
-        }
+       }
 
        over = multi_overflow(tick_scale_x, delta);
-       if(over){
+       if (over) {
                bt.sec += over;
        }
 
@@ -551,16 +576,17 @@ get_scaled_time(uint64_t now)
 
 static void
 clock_get_calendar_absolute_and_microtime_locked(
-       clock_sec_t             *secs,
-       clock_usec_t            *microsecs,
-       uint64_t                *abstime)
+       clock_sec_t             *secs,
+       clock_usec_t            *microsecs,
+       uint64_t                *abstime)
 {
        uint64_t now;
        struct bintime bt;
 
        now  = mach_absolute_time();
-       if (abstime)
+       if (abstime) {
                *abstime = now;
+       }
 
        bt = get_scaled_time(now);
        bintime_add(&bt, &clock_calend.bintime);
@@ -569,16 +595,17 @@ clock_get_calendar_absolute_and_microtime_locked(
 
 static void
 clock_get_calendar_absolute_and_nanotime_locked(
-       clock_sec_t             *secs,
-       clock_usec_t            *nanosecs,
-       uint64_t                *abstime)
+       clock_sec_t             *secs,
+       clock_usec_t            *nanosecs,
+       uint64_t                *abstime)
 {
        uint64_t now;
        struct bintime bt;
 
        now  = mach_absolute_time();
-       if (abstime)
+       if (abstime) {
                *abstime = now;
+       }
 
        bt = get_scaled_time(now);
        bintime_add(&bt, &clock_calend.bintime);
@@ -595,11 +622,11 @@ clock_get_calendar_absolute_and_nanotime_locked(
  */
 void
 clock_get_calendar_absolute_and_microtime(
-       clock_sec_t             *secs,
-       clock_usec_t            *microsecs,
-       uint64_t                *abstime)
+       clock_sec_t             *secs,
+       clock_usec_t            *microsecs,
+       uint64_t                *abstime)
 {
-       spl_t                   s;
+       spl_t                   s;
 
        s = splclock();
        clock_lock();
@@ -622,10 +649,10 @@ clock_get_calendar_absolute_and_microtime(
  */
 void
 clock_get_calendar_nanotime(
-       clock_sec_t             *secs,
-       clock_nsec_t            *nanosecs)
+       clock_sec_t             *secs,
+       clock_nsec_t            *nanosecs)
 {
-       spl_t                   s;
+       spl_t                   s;
 
        s = splclock();
        clock_lock();
@@ -649,21 +676,21 @@ clock_get_calendar_nanotime(
  */
 void
 clock_gettimeofday(
-       clock_sec_t     *secs,
-       clock_usec_t    *microsecs)
+       clock_sec_t     *secs,
+       clock_usec_t    *microsecs)
 {
        clock_gettimeofday_and_absolute_time(secs, microsecs, NULL);
 }
 
 void
 clock_gettimeofday_and_absolute_time(
-       clock_sec_t     *secs,
-       clock_usec_t    *microsecs,
-       uint64_t        *mach_time)
+       clock_sec_t     *secs,
+       clock_usec_t    *microsecs,
+       uint64_t        *mach_time)
 {
-       uint64_t                now;
-       spl_t                   s;
-       struct bintime  bt;
+       uint64_t                now;
+       spl_t                   s;
+       struct bintime  bt;
 
        s = splclock();
        clock_lock();
@@ -697,19 +724,19 @@ clock_gettimeofday_and_absolute_time(
  */
 void
 clock_set_calendar_microtime(
-       clock_sec_t             secs,
-       clock_usec_t            microsecs)
+       clock_sec_t             secs,
+       clock_usec_t            microsecs)
 {
-       uint64_t                absolutesys;
-       clock_sec_t             newsecs;
-       clock_sec_t             oldsecs;
-       clock_usec_t            newmicrosecs;
-       clock_usec_t            oldmicrosecs;
-       uint64_t                commpage_value;
-       spl_t                   s;
-       struct bintime          bt;
-       clock_sec_t             deltasecs;
-       clock_usec_t            deltamicrosecs;
+       uint64_t                absolutesys;
+       clock_sec_t             newsecs;
+       clock_sec_t             oldsecs;
+       clock_usec_t            newmicrosecs;
+       clock_usec_t            oldmicrosecs;
+       uint64_t                commpage_value;
+       spl_t                   s;
+       struct bintime          bt;
+       clock_sec_t             deltasecs;
+       clock_usec_t            deltamicrosecs;
 
        newsecs = secs;
        newmicrosecs = microsecs;
@@ -727,6 +754,9 @@ clock_set_calendar_microtime(
        s = splclock();
        clock_lock();
 
+#if DEVELOPMENT || DEBUG
+       struct clock_calend clock_calend_cp = clock_calend;
+#endif
        commpage_disable_timestamp();
 
        /*
@@ -734,29 +764,35 @@ clock_set_calendar_microtime(
         */
        clock_get_calendar_absolute_and_microtime_locked(&oldsecs, &oldmicrosecs, &absolutesys);
 
+#if DEVELOPMENT || DEBUG
+       if (g_should_log_clock_adjustments) {
+               os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
+                   __func__, (unsigned long)oldsecs, oldmicrosecs, absolutesys);
+               os_log(OS_LOG_DEFAULT, "%s requested %lu s %d u\n",
+                   __func__, (unsigned long)secs, microsecs );
+       }
+#endif
+
        if (oldsecs < secs || (oldsecs == secs && oldmicrosecs < microsecs)) {
                // moving forwards
                deltasecs = secs;
                deltamicrosecs = microsecs;
 
                TIME_SUB(deltasecs, oldsecs, deltamicrosecs, oldmicrosecs, USEC_PER_SEC);
-               TIME_ADD(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
 
+               TIME_ADD(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
                clock2bintime(&deltasecs, &deltamicrosecs, &bt);
                bintime_add(&clock_calend.boottime, &bt);
-               bintime_add(&clock_calend.basesleep, &bt);
-
        } else {
                // moving backwards
                deltasecs = oldsecs;
                deltamicrosecs = oldmicrosecs;
 
                TIME_SUB(deltasecs, secs, deltamicrosecs, microsecs, USEC_PER_SEC);
-               TIME_SUB(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
 
+               TIME_SUB(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
                clock2bintime(&deltasecs, &deltamicrosecs, &bt);
                bintime_sub(&clock_calend.boottime, &bt);
-               bintime_sub(&clock_calend.basesleep, &bt);
        }
 
        clock_calend.bintime = clock_calend.boottime;
@@ -766,6 +802,10 @@ clock_set_calendar_microtime(
 
        clock_gettimeofday_set_commpage(absolutesys, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
 
+#if DEVELOPMENT || DEBUG
+       struct clock_calend clock_calend_cp1 = clock_calend;
+#endif
+
        commpage_value = clock_boottime * USEC_PER_SEC + clock_boottime_usec;
 
        clock_unlock();
@@ -775,8 +815,22 @@ clock_set_calendar_microtime(
         *      Set the new value for the platform clock.
         *      This call might block, so interrupts must be enabled.
         */
+#if DEVELOPMENT || DEBUG
+       uint64_t now_b = mach_absolute_time();
+#endif
+
        PESetUTCTimeOfDay(newsecs, newmicrosecs);
 
+#if DEVELOPMENT || DEBUG
+       uint64_t now_a = mach_absolute_time();
+       if (g_should_log_clock_adjustments) {
+               os_log(OS_LOG_DEFAULT, "%s mach bef PESet %llu mach aft %llu \n", __func__, now_b, now_a);
+       }
+#endif
+
+       print_all_clock_variables_internal(__func__, &clock_calend_cp);
+       print_all_clock_variables_internal(__func__, &clock_calend_cp1);
+
        commpage_update_boottime(commpage_value);
 
        /*
@@ -825,7 +879,6 @@ clock_get_calendar_uptime(clock_sec_t *secs)
 void
 clock_update_calendar(void)
 {
-
        uint64_t now, delta;
        struct bintime bt;
        spl_t s;
@@ -857,6 +910,12 @@ clock_update_calendar(void)
         */
        ntp_update_second(&adjustment, clock_calend.bintime.sec);
 
+#if DEVELOPMENT || DEBUG
+       if (g_should_log_clock_adjustments) {
+               os_log(OS_LOG_DEFAULT, "%s adjustment %lld\n", __func__, adjustment);
+       }
+#endif
+
        /*
         * recomputing scale factors.
         */
@@ -864,56 +923,157 @@ clock_update_calendar(void)
 
        clock_gettimeofday_set_commpage(now, clock_calend.bintime.sec, clock_calend.bintime.frac, clock_calend.tick_scale_x, ticks_per_sec);
 
+#if DEVELOPMENT || DEBUG
+       struct clock_calend calend_cp = clock_calend;
+#endif
+
        clock_unlock();
        splx(s);
+
+       print_all_clock_variables(__func__, NULL, NULL, NULL, NULL, &calend_cp);
+}
+
+
+#if DEVELOPMENT || DEBUG
+
+void
+print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp)
+{
+       clock_sec_t     offset_secs;
+       clock_usec_t    offset_microsecs;
+       clock_sec_t     bintime_secs;
+       clock_usec_t    bintime_microsecs;
+       clock_sec_t     bootime_secs;
+       clock_usec_t    bootime_microsecs;
+
+       if (!g_should_log_clock_adjustments) {
+               return;
+       }
+
+       bintime2usclock(&clock_calend_cp->offset, &offset_secs, &offset_microsecs);
+       bintime2usclock(&clock_calend_cp->bintime, &bintime_secs, &bintime_microsecs);
+       bintime2usclock(&clock_calend_cp->boottime, &bootime_secs, &bootime_microsecs);
+
+       os_log(OS_LOG_DEFAULT, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n",
+           func, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx,
+           clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count);
+       os_log(OS_LOG_DEFAULT, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n",
+           func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac,
+           (unsigned long)offset_secs, offset_microsecs);
+       os_log(OS_LOG_DEFAULT, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n",
+           func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac,
+           (unsigned long)bintime_secs, bintime_microsecs);
+       os_log(OS_LOG_DEFAULT, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n",
+           func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac,
+           (unsigned long)bootime_secs, bootime_microsecs);
+
+#if !HAS_CONTINUOUS_HWCLOCK
+       clock_sec_t     basesleep_secs;
+       clock_usec_t    basesleep_microsecs;
+
+       bintime2usclock(&clock_calend_cp->basesleep, &basesleep_secs, &basesleep_microsecs);
+       os_log(OS_LOG_DEFAULT, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n",
+           func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac,
+           (unsigned long)basesleep_secs, basesleep_microsecs);
+#endif
+}
+
+
+void
+print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp)
+{
+       if (!g_should_log_clock_adjustments) {
+               return;
+       }
+
+       struct bintime  bt;
+       clock_sec_t     wall_secs;
+       clock_usec_t    wall_microsecs;
+       uint64_t now;
+       uint64_t delta;
+
+       if (pmu_secs) {
+               os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec);
+       }
+       if (sys_secs) {
+               os_log(OS_LOG_DEFAULT, "%s sys %lu s %d u \n", func, (unsigned long)*sys_secs, *sys_usec);
+       }
+
+       print_all_clock_variables_internal(func, clock_calend_cp);
+
+       now = mach_absolute_time();
+       delta = now - clock_calend_cp->offset_count;
+
+       bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx);
+       bintime_add(&bt, &clock_calend_cp->bintime);
+       bintime2usclock(&bt, &wall_secs, &wall_microsecs);
+
+       os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
+           func, (unsigned long)wall_secs, wall_microsecs, now);
 }
 
+
+#endif /* DEVELOPMENT || DEBUG */
+
+
 /*
  *     clock_initialize_calendar:
  *
  *     Set the calendar and related clocks
- *     from the platform clock at boot or
- *     wake event.
+ *     from the platform clock at boot.
  *
  *     Also sends host notifications.
  */
-
 void
 clock_initialize_calendar(void)
 {
-       clock_sec_t             sys;  // sleepless time since boot in seconds
-       clock_sec_t             secs; // Current UTC time
-       clock_sec_t             utc_offset_secs; // Difference in current UTC time and sleepless time since boot
-       clock_usec_t            microsys;  
-       clock_usec_t            microsecs; 
-       clock_usec_t            utc_offset_microsecs; 
-       spl_t                   s;
-       struct bintime          bt;
-
+       clock_sec_t             sys;  // sleepless time since boot in seconds
+       clock_sec_t             secs; // Current UTC time
+       clock_sec_t             utc_offset_secs; // Difference in current UTC time and sleepless time since boot
+       clock_usec_t            microsys;
+       clock_usec_t            microsecs;
+       clock_usec_t            utc_offset_microsecs;
+       spl_t                   s;
+       struct bintime          bt;
+#if ENABLE_LEGACY_CLOCK_CODE
+       struct bintime          monotonic_bt;
+       struct latched_time     monotonic_time;
+       uint64_t                monotonic_usec_total;
+       clock_sec_t             sys2, monotonic_sec;
+       clock_usec_t            microsys2, monotonic_usec;
+       size_t                  size;
+
+#endif /* ENABLE_LEGACY_CLOCK_CODE */
+       //Get the UTC time and corresponding sys time
        PEGetUTCTimeOfDay(&secs, &microsecs);
+       clock_get_system_microtime(&sys, &microsys);
+
+#if ENABLE_LEGACY_CLOCK_CODE
+       /*
+        * If the platform has a monotonic clock, use kern.monotonicclock_usecs
+        * to estimate the sleep/wake time, otherwise use the UTC time to estimate
+        * the sleep time.
+        */
+       size = sizeof(monotonic_time);
+       if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
+               has_monotonic_clock = 0;
+               os_log(OS_LOG_DEFAULT, "%s system does not have monotonic clock\n", __func__);
+       } else {
+               has_monotonic_clock = 1;
+               monotonic_usec_total = monotonic_time.monotonic_time_usec;
+               absolutetime_to_microtime(monotonic_time.mach_time, &sys2, &microsys2);
+               os_log(OS_LOG_DEFAULT, "%s system has monotonic clock\n", __func__);
+       }
+#endif /* ENABLE_LEGACY_CLOCK_CODE */
 
        s = splclock();
        clock_lock();
 
        commpage_disable_timestamp();
 
-       /*
-        *      Calculate the new calendar epoch based on
-        *      the platform clock and the system clock.
-        */
-       clock_get_system_microtime(&sys, &microsys);
        utc_offset_secs = secs;
        utc_offset_microsecs = microsecs;
 
-#if DEVELOPMENT || DEBUG
-       last_utc_sec = secs;
-       last_utc_usec = microsecs;
-       last_sys_sec = sys;
-       last_sys_usec = microsys;
-       if (secs > max_utc_sec)
-               max_utc_sec = secs;
-#endif
-
        /*
         * We normally expect the UTC clock to be always-on and produce
         * greater readings than the tick counter.  There may be corner cases
@@ -923,13 +1083,16 @@ clock_initialize_calendar(void)
         * in-line with the tick counter measurements as a best effort in that case.
         */
        if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
+               os_log(OS_LOG_DEFAULT, "%s WARNING: UTC time is less then sys time, (%lu s %d u) UTC (%lu s %d u) sys\n",
+                   __func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys);
                secs = utc_offset_secs = sys;
                microsecs = utc_offset_microsecs = microsys;
        }
 
+       // UTC - sys
        // This macro stores the subtraction result in utc_offset_secs and utc_offset_microsecs
        TIME_SUB(utc_offset_secs, sys, utc_offset_microsecs, microsys, USEC_PER_SEC);
-
+       // This function converts utc_offset_secs and utc_offset_microsecs in bintime
        clock2bintime(&utc_offset_secs, &utc_offset_microsecs, &bt);
 
        /*
@@ -952,110 +1115,318 @@ clock_initialize_calendar(void)
        clock_calend.s_scale_ns = NSEC_PER_SEC;
        clock_calend.s_adj_nsx = 0;
 
-       clock_calend.basesleep = bt;
+#if ENABLE_LEGACY_CLOCK_CODE
+       if (has_monotonic_clock) {
+               monotonic_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
+               monotonic_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
+
+               // monotonic clock - sys
+               // This macro stores the subtraction result in monotonic_sec and monotonic_usec
+               TIME_SUB(monotonic_sec, sys2, monotonic_usec, microsys2, USEC_PER_SEC);
+               clock2bintime(&monotonic_sec, &monotonic_usec, &monotonic_bt);
 
+               // set the baseleep as the difference between monotonic clock - sys
+               clock_calend.basesleep = monotonic_bt;
+       }
+#endif /* ENABLE_LEGACY_CLOCK_CODE */
        commpage_update_mach_continuous_time(mach_absolutetime_asleep);
 
+#if DEVELOPMENT || DEBUG
+       struct clock_calend clock_calend_cp = clock_calend;
+#endif
+
        clock_unlock();
        splx(s);
 
+       print_all_clock_variables(__func__, &secs, &microsecs, &sys, &microsys, &clock_calend_cp);
+
        /*
         *      Send host notifications.
         */
        host_notify_calendar_change();
-       
+
 #if CONFIG_DTRACE
        clock_track_calend_nowait();
 #endif
 }
 
+#if HAS_CONTINUOUS_HWCLOCK
 
-void
-clock_wakeup_calendar(void)
+static void
+scale_sleep_time(void)
 {
-       clock_sec_t             sys;  // sleepless time since boot in seconds
-       clock_sec_t             secs; // Current UTC time
-       clock_usec_t            microsys;  
-       clock_usec_t            microsecs; 
-       spl_t                   s;
-       struct bintime          utc_offset_bt, last_sleep_bt;
+       /* Apply the current NTP frequency adjustment to the time slept.
+        * The frequency adjustment remains stable between calls to ntp_adjtime(),
+        * and should thus provide a reasonable approximation of the total adjustment
+        * required for the time slept. */
+       struct bintime sleep_time;
+       uint64_t tick_scale_x, s_scale_ns;
+       int64_t s_adj_nsx;
+       int64_t sleep_adj = ntp_get_freq();
+       if (sleep_adj) {
+               get_scale_factors_from_adj(sleep_adj, &tick_scale_x, &s_scale_ns, &s_adj_nsx);
+               sleep_time = scale_delta(mach_absolutetime_last_sleep, tick_scale_x, s_scale_ns, s_adj_nsx);
+       } else {
+               tick_scale_x = (uint64_t)1 << 63;
+               tick_scale_x /= ticks_per_sec;
+               tick_scale_x *= 2;
+               sleep_time.sec = mach_absolutetime_last_sleep / ticks_per_sec;
+               sleep_time.frac = (mach_absolutetime_last_sleep % ticks_per_sec) * tick_scale_x;
+       }
+       bintime_add(&clock_calend.offset, &sleep_time);
+       bintime_add(&clock_calend.bintime, &sleep_time);
+}
 
-       PEGetUTCTimeOfDay(&secs, &microsecs);
+static void
+clock_wakeup_calendar_hwclock(void)
+{
+       spl_t   s;
 
        s = splclock();
        clock_lock();
 
        commpage_disable_timestamp();
 
-       /*
-        * Calculate the new calendar epoch based on
-        * the platform clock and the system clock.
-        */
-       clock_get_system_microtime(&sys, &microsys);
+       uint64_t abstime = mach_absolute_time();
+       uint64_t total_sleep_time = mach_continuous_time() - abstime;
 
-#if DEVELOPMENT || DEBUG
-       last_utc_sec = secs;
-       last_utc_usec = microsecs;
-       last_sys_sec = sys;
-       last_sys_usec = microsys;
-       if (secs > max_utc_sec)
-               max_utc_sec = secs;
+       mach_absolutetime_last_sleep = total_sleep_time - mach_absolutetime_asleep;
+       mach_absolutetime_asleep = total_sleep_time;
+
+       scale_sleep_time();
+
+       KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE),
+           (uintptr_t)mach_absolutetime_last_sleep,
+           (uintptr_t)mach_absolutetime_asleep,
+           (uintptr_t)(mach_absolutetime_last_sleep >> 32),
+           (uintptr_t)(mach_absolutetime_asleep >> 32));
+
+       commpage_update_mach_continuous_time(mach_absolutetime_asleep);
+#if HIBERNATION
+       commpage_update_mach_continuous_time_hw_offset(hwclock_conttime_offset);
 #endif
+       adjust_cont_time_thread_calls();
+
+       clock_unlock();
+       splx(s);
+
+       host_notify_calendar_change();
+
+#if CONFIG_DTRACE
+       clock_track_calend_nowait();
+#endif
+}
+
+#endif /* HAS_CONTINUOUS_HWCLOCK */
+
+#if ENABLE_LEGACY_CLOCK_CODE
+
+static void
+clock_wakeup_calendar_legacy(void)
+{
+       clock_sec_t             wake_sys_sec;
+       clock_usec_t            wake_sys_usec;
+       clock_sec_t             wake_sec;
+       clock_usec_t            wake_usec;
+       clock_sec_t             wall_time_sec;
+       clock_usec_t            wall_time_usec;
+       clock_sec_t             diff_sec;
+       clock_usec_t            diff_usec;
+       clock_sec_t             var_s;
+       clock_usec_t            var_us;
+       spl_t                   s;
+       struct bintime          bt, last_sleep_bt;
+       struct latched_time     monotonic_time;
+       uint64_t                monotonic_usec_total;
+       uint64_t                wake_abs;
+       size_t                  size;
 
        /*
-        * We normally expect the UTC clock to be always-on and produce
-        * greater readings than the tick counter.  There may be corner cases
-        * due to differing clock resolutions (UTC clock is likely lower) and
-        * errors reading the UTC clock (some implementations return 0 on error)
-        * in which that doesn't hold true.  Bring the UTC measurements in-line
-        * with the tick counter measurements as a best effort in that case.
+        * If the platform has the monotonic clock use that to
+        * compute the sleep time. The monotonic clock does not have an offset
+        * that can be modified, so nor kernel or userspace can change the time
+        * of this clock, it can only monotonically increase over time.
+        * During sleep mach_absolute_time (sys time) does not tick,
+        * so the sleep time is the difference between the current monotonic time
+        * less the absolute time and the previous difference stored at wake time.
+        *
+        * basesleep = (monotonic - sys) ---> computed at last wake
+        * sleep_time = (monotonic - sys) - basesleep
+        *
+        * If the platform does not support monotonic clock we set the wall time to what the
+        * UTC clock returns us.
+        * Setting the wall time to UTC time implies that we loose all the adjustments
+        * done during wake time through adjtime/ntp_adjustime.
+        * The UTC time is the monotonic clock + an offset that can be set
+        * by kernel.
+        * The time slept in this case is the difference between wall time and UTC
+        * at wake.
+        *
+        * IMPORTANT:
+        * We assume that only the kernel is setting the offset of the PMU/RTC and that
+        * it is doing it only througth the settimeofday interface.
         */
-       if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
-               secs = sys;
-               microsecs = microsys;
+       if (has_monotonic_clock) {
+#if DEVELOPMENT || DEBUG
+               /*
+                * Just for debugging, get the wake UTC time.
+                */
+               PEGetUTCTimeOfDay(&var_s, &var_us);
+#endif
+               /*
+                * Get monotonic time with corresponding sys time
+                */
+               size = sizeof(monotonic_time);
+               if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
+                       panic("%s: could not call kern.monotonicclock_usecs", __func__);
+               }
+               wake_abs = monotonic_time.mach_time;
+               absolutetime_to_microtime(wake_abs, &wake_sys_sec, &wake_sys_usec);
+
+               monotonic_usec_total = monotonic_time.monotonic_time_usec;
+               wake_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
+               wake_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
+       } else {
+               /*
+                * Get UTC time and corresponding sys time
+                */
+               PEGetUTCTimeOfDay(&wake_sec, &wake_usec);
+               wake_abs = mach_absolute_time();
+               absolutetime_to_microtime(wake_abs, &wake_sys_sec, &wake_sys_usec);
        }
 
-       // This macro stores the subtraction result in secs and microsecs
-       TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
-       clock2bintime(&secs, &microsecs, &utc_offset_bt);
+#if DEVELOPMENT || DEBUG
+       os_log(OS_LOG_DEFAULT, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec, wake_usec, (has_monotonic_clock)?"monotonic":"UTC", wake_abs);
+       if (has_monotonic_clock) {
+               os_log(OS_LOG_DEFAULT, "UTC time %lu s %d u\n", (unsigned long)var_s, var_us);
+       }
+#endif /* DEVELOPMENT || DEBUG */
+
+       s = splclock();
+       clock_lock();
+
+       commpage_disable_timestamp();
+
+#if DEVELOPMENT || DEBUG
+       struct clock_calend clock_calend_cp1 = clock_calend;
+#endif /* DEVELOPMENT || DEBUG */
 
        /*
-        * Safety belt: the UTC clock will likely have a lower resolution than the tick counter.
-        * It's also possible that the device didn't fully transition to the powered-off state on
-        * the most recent sleep, so the tick counter may not have reset or may have only briefly
-        * tured off.  In that case it's possible for the difference between the UTC clock and the
-        * tick counter to be less than the previously recorded value in clock.calend.basesleep.
-        * In that case simply record that we slept for 0 ticks.
-        */ 
-       if ((utc_offset_bt.sec > clock_calend.basesleep.sec) ||
-           ((utc_offset_bt.sec == clock_calend.basesleep.sec) && (utc_offset_bt.frac > clock_calend.basesleep.frac))) {
-
-               last_sleep_bt = utc_offset_bt;
-               bintime_sub(&last_sleep_bt, &clock_calend.basesleep);
-               clock_calend.basesleep = utc_offset_bt;
-
-               bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
-               mach_absolutetime_asleep += mach_absolutetime_last_sleep;
-
-               bintime_add(&clock_calend.offset, &last_sleep_bt);
-               bintime_add(&clock_calend.bintime, &last_sleep_bt);
-       } else
+        * We normally expect the UTC/monotonic clock to be always-on and produce
+        * greater readings than the sys counter.  There may be corner cases
+        * due to differing clock resolutions (UTC/monotonic clock is likely lower) and
+        * and errors reading the UTC/monotonic clock (some implementations return 0
+        * on error) in which that doesn't hold true.
+        */
+       if ((wake_sys_sec > wake_sec) || ((wake_sys_sec == wake_sec) && (wake_sys_usec > wake_usec))) {
+               os_log_error(OS_LOG_DEFAULT, "WARNING: %s clock is less then sys clock at wake: %lu s %d u vs %lu s %d u, defaulting sleep time to zero\n", (has_monotonic_clock)?"monotonic":"UTC", (unsigned long)wake_sec, wake_usec, (unsigned long)wake_sys_sec, wake_sys_usec);
                mach_absolutetime_last_sleep = 0;
+               goto done;
+       }
 
-       KERNEL_DEBUG_CONSTANT(
-                 MACHDBG_CODE(DBG_MACH_CLOCK,MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
-                 (uintptr_t) mach_absolutetime_last_sleep,
-                 (uintptr_t) mach_absolutetime_asleep,
-                 (uintptr_t) (mach_absolutetime_last_sleep >> 32),
-                 (uintptr_t) (mach_absolutetime_asleep >> 32),
-                 0);
+       if (has_monotonic_clock) {
+               /*
+                * computer the difference monotonic - sys
+                * we already checked that monotonic time is
+                * greater than sys.
+                */
+               diff_sec = wake_sec;
+               diff_usec = wake_usec;
+               // This macro stores the subtraction result in diff_sec and diff_usec
+               TIME_SUB(diff_sec, wake_sys_sec, diff_usec, wake_sys_usec, USEC_PER_SEC);
+               //This function converts diff_sec and diff_usec in bintime
+               clock2bintime(&diff_sec, &diff_usec, &bt);
+
+               /*
+                * Safety belt: the monotonic clock will likely have a lower resolution than the sys counter.
+                * It's also possible that the device didn't fully transition to the powered-off state on
+                * the most recent sleep, so the sys counter may not have reset or may have only briefly
+                * turned off.  In that case it's possible for the difference between the monotonic clock and the
+                * sys counter to be less than the previously recorded value in clock.calend.basesleep.
+                * In that case simply record that we slept for 0 ticks.
+                */
+               if ((bt.sec > clock_calend.basesleep.sec) ||
+                   ((bt.sec == clock_calend.basesleep.sec) && (bt.frac > clock_calend.basesleep.frac))) {
+                       //last_sleep is the difference between (current monotonic - abs) and (last wake monotonic - abs)
+                       last_sleep_bt = bt;
+                       bintime_sub(&last_sleep_bt, &clock_calend.basesleep);
+
+                       bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
+                       mach_absolutetime_asleep += mach_absolutetime_last_sleep;
+
+                       //set basesleep to current monotonic - abs
+                       clock_calend.basesleep = bt;
+
+                       //update wall time
+                       bintime_add(&clock_calend.offset, &last_sleep_bt);
+                       bintime_add(&clock_calend.bintime, &last_sleep_bt);
+
+                       bintime2usclock(&last_sleep_bt, &var_s, &var_us);
+                       os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long) var_s, var_us);
+               } else {
+                       bintime2usclock(&clock_calend.basesleep, &var_s, &var_us);
+                       os_log_error(OS_LOG_DEFAULT, "WARNING: last wake monotonic-sys time (%lu s %d u) is greater then current monotonic-sys time(%lu s %d u), defaulting sleep time to zero\n", (unsigned long) var_s, var_us, (unsigned long) diff_sec, diff_usec);
+
+                       mach_absolutetime_last_sleep = 0;
+               }
+       } else {
+               /*
+                * set the wall time to UTC value
+                */
+               bt = get_scaled_time(wake_abs);
+               bintime_add(&bt, &clock_calend.bintime);
+               bintime2usclock(&bt, &wall_time_sec, &wall_time_usec);
+
+               if (wall_time_sec > wake_sec || (wall_time_sec == wake_sec && wall_time_usec > wake_usec)) {
+                       os_log(OS_LOG_DEFAULT, "WARNING: wall time (%lu s %d u) is greater than current UTC time (%lu s %d u), defaulting sleep time to zero\n", (unsigned long) wall_time_sec, wall_time_usec, (unsigned long) wake_sec, wake_usec);
+
+                       mach_absolutetime_last_sleep = 0;
+               } else {
+                       diff_sec = wake_sec;
+                       diff_usec = wake_usec;
+                       // This macro stores the subtraction result in diff_sec and diff_usec
+                       TIME_SUB(diff_sec, wall_time_sec, diff_usec, wall_time_usec, USEC_PER_SEC);
+                       //This function converts diff_sec and diff_usec in bintime
+                       clock2bintime(&diff_sec, &diff_usec, &bt);
+
+                       //time slept in this case is the difference between PMU/RTC and wall time
+                       last_sleep_bt = bt;
+
+                       bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
+                       mach_absolutetime_asleep += mach_absolutetime_last_sleep;
+
+                       //update wall time
+                       bintime_add(&clock_calend.offset, &last_sleep_bt);
+                       bintime_add(&clock_calend.bintime, &last_sleep_bt);
+
+                       bintime2usclock(&last_sleep_bt, &var_s, &var_us);
+                       os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long)var_s, var_us);
+               }
+       }
+done:
+       KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE),
+           (uintptr_t)mach_absolutetime_last_sleep,
+           (uintptr_t)mach_absolutetime_asleep,
+           (uintptr_t)(mach_absolutetime_last_sleep >> 32),
+           (uintptr_t)(mach_absolutetime_asleep >> 32));
 
        commpage_update_mach_continuous_time(mach_absolutetime_asleep);
        adjust_cont_time_thread_calls();
 
+#if DEVELOPMENT || DEBUG
+       struct clock_calend clock_calend_cp = clock_calend;
+#endif
+
        clock_unlock();
        splx(s);
 
+#if DEVELOPMENT || DEBUG
+       if (g_should_log_clock_adjustments) {
+               print_all_clock_variables("clock_wakeup_calendar: BEFORE", NULL, NULL, NULL, NULL, &clock_calend_cp1);
+               print_all_clock_variables("clock_wakeup_calendar: AFTER", NULL, NULL, NULL, NULL, &clock_calend_cp);
+       }
+#endif /* DEVELOPMENT || DEBUG */
+
        host_notify_calendar_change();
 
 #if CONFIG_DTRACE
@@ -1063,7 +1434,26 @@ clock_wakeup_calendar(void)
 #endif
 }
 
+#endif /* ENABLE_LEGACY_CLOCK_CODE */
 
+void
+clock_wakeup_calendar(void)
+{
+#if HAS_CONTINUOUS_HWCLOCK
+#if HIBERNATION_USES_LEGACY_CLOCK
+       if (gIOHibernateState) {
+               // if we're resuming from hibernation, we have to take the legacy wakeup path
+               return clock_wakeup_calendar_legacy();
+       }
+#endif /* HIBERNATION_USES_LEGACY_CLOCK */
+       // use the hwclock wakeup path
+       return clock_wakeup_calendar_hwclock();
+#elif ENABLE_LEGACY_CLOCK_CODE
+       return clock_wakeup_calendar_legacy();
+#else
+#error "can't determine which clock code to run"
+#endif
+}
 
 /*
  *     clock_get_boottime_nanotime:
@@ -1072,10 +1462,10 @@ clock_wakeup_calendar(void)
  */
 void
 clock_get_boottime_nanotime(
-       clock_sec_t                     *secs,
-       clock_nsec_t            *nanosecs)
+       clock_sec_t                     *secs,
+       clock_nsec_t            *nanosecs)
 {
-       spl_t   s;
+       spl_t   s;
 
        s = splclock();
        clock_lock();
@@ -1094,10 +1484,10 @@ clock_get_boottime_nanotime(
  */
 void
 clock_get_boottime_microtime(
-       clock_sec_t                     *secs,
-       clock_usec_t            *microsecs)
+       clock_sec_t                     *secs,
+       clock_usec_t            *microsecs)
 {
-       spl_t   s;
+       spl_t   s;
 
        s = splclock();
        clock_lock();
@@ -1115,8 +1505,8 @@ clock_get_boottime_microtime(
  */
 static void
 mach_wait_until_continue(
-       __unused void   *parameter,
-       wait_result_t   wresult)
+       __unused void   *parameter,
+       wait_result_t   wresult)
 {
        thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
        /*NOTREACHED*/
@@ -1128,32 +1518,35 @@ mach_wait_until_continue(
  * Parameters:    args->deadline          Amount of time to wait
  *
  * Returns:        0                      Success
- *                !0                      Not success           
+ *                !0                      Not success
  *
  */
 kern_return_t
 mach_wait_until_trap(
-       struct mach_wait_until_trap_args        *args)
+       struct mach_wait_until_trap_args        *args)
 {
-       uint64_t                deadline = args->deadline;
-       wait_result_t   wresult;
+       uint64_t                deadline = args->deadline;
+       wait_result_t   wresult;
+
 
        wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
-                                                  TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
-       if (wresult == THREAD_WAITING)
+           TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
+       if (wresult == THREAD_WAITING) {
                wresult = thread_block(mach_wait_until_continue);
+       }
 
-       return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
+       return (wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS;
 }
 
 void
 clock_delay_until(
-       uint64_t                deadline)
+       uint64_t                deadline)
 {
-       uint64_t                now = mach_absolute_time();
+       uint64_t                now = mach_absolute_time();
 
-       if (now >= deadline)
+       if (now >= deadline) {
                return;
+       }
 
        _clock_delay_until_deadline(deadline - now, deadline);
 }
@@ -1164,8 +1557,8 @@ clock_delay_until(
  */
 void
 _clock_delay_until_deadline(
-       uint64_t                interval,
-       uint64_t                deadline)
+       uint64_t                interval,
+       uint64_t                deadline)
 {
        _clock_delay_until_deadline_with_leeway(interval, deadline, 0);
 }
@@ -1176,17 +1569,17 @@ _clock_delay_until_deadline(
  */
 void
 _clock_delay_until_deadline_with_leeway(
-       uint64_t                interval,
-       uint64_t                deadline,
-       uint64_t                leeway)
+       uint64_t                interval,
+       uint64_t                deadline,
+       uint64_t                leeway)
 {
-
-       if (interval == 0)
+       if (interval == 0) {
                return;
+       }
 
-       if (    ml_delay_should_spin(interval)  ||
-                       get_preemption_level() != 0                             ||
-                       ml_get_interrupts_enabled() == FALSE    ) {
+       if (ml_delay_should_spin(interval) ||
+           get_preemption_level() != 0 ||
+           ml_get_interrupts_enabled() == FALSE) {
                machine_delay_until(interval, deadline);
        } else {
                /*
@@ -1206,10 +1599,10 @@ _clock_delay_until_deadline_with_leeway(
 
 void
 delay_for_interval(
-       uint32_t                interval,
-       uint32_t                scale_factor)
+       uint32_t                interval,
+       uint32_t                scale_factor)
 {
-       uint64_t                abstime;
+       uint64_t                abstime;
 
        clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
 
@@ -1218,12 +1611,12 @@ delay_for_interval(
 
 void
 delay_for_interval_with_leeway(
-       uint32_t                interval,
-       uint32_t                leeway,
-       uint32_t                scale_factor)
+       uint32_t                interval,
+       uint32_t                leeway,
+       uint32_t                scale_factor)
 {
-       uint64_t                abstime_interval;
-       uint64_t                abstime_leeway;
+       uint64_t                abstime_interval;
+       uint64_t                abstime_leeway;
 
        clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
        clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
@@ -1233,7 +1626,7 @@ delay_for_interval_with_leeway(
 
 void
 delay(
-       int             usec)
+       int             usec)
 {
        delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
 }
@@ -1243,115 +1636,158 @@ delay(
  */
 void
 clock_interval_to_deadline(
-       uint32_t                        interval,
-       uint32_t                        scale_factor,
-       uint64_t                        *result)
+       uint32_t                        interval,
+       uint32_t                        scale_factor,
+       uint64_t                        *result)
 {
-       uint64_t        abstime;
+       uint64_t        abstime;
 
        clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
 
-       *result = mach_absolute_time() + abstime;
+       if (os_add_overflow(mach_absolute_time(), abstime, result)) {
+               *result = UINT64_MAX;
+       }
+}
+
+void
+nanoseconds_to_deadline(
+       uint64_t                        interval,
+       uint64_t                        *result)
+{
+       uint64_t        abstime;
+
+       nanoseconds_to_absolutetime(interval, &abstime);
+
+       if (os_add_overflow(mach_absolute_time(), abstime, result)) {
+               *result = UINT64_MAX;
+       }
 }
 
 void
 clock_absolutetime_interval_to_deadline(
-       uint64_t                        abstime,
-       uint64_t                        *result)
+       uint64_t                        abstime,
+       uint64_t                        *result)
 {
-       *result = mach_absolute_time() + abstime;
+       if (os_add_overflow(mach_absolute_time(), abstime, result)) {
+               *result = UINT64_MAX;
+       }
 }
 
 void
 clock_continuoustime_interval_to_deadline(
-       uint64_t                        conttime,
-       uint64_t                        *result)
+       uint64_t                        conttime,
+       uint64_t                        *result)
 {
-       *result = mach_continuous_time() + conttime;
+       if (os_add_overflow(mach_continuous_time(), conttime, result)) {
+               *result = UINT64_MAX;
+       }
 }
 
 void
 clock_get_uptime(
-       uint64_t        *result)
+       uint64_t        *result)
 {
        *result = mach_absolute_time();
 }
 
 void
 clock_deadline_for_periodic_event(
-       uint64_t                        interval,
-       uint64_t                        abstime,
-       uint64_t                        *deadline)
+       uint64_t                        interval,
+       uint64_t                        abstime,
+       uint64_t                        *deadline)
 {
        assert(interval != 0);
 
-       *deadline += interval;
+       // *deadline += interval;
+       if (os_add_overflow(*deadline, interval, deadline)) {
+               *deadline = UINT64_MAX;
+       }
 
        if (*deadline <= abstime) {
-               *deadline = abstime + interval;
-               abstime = mach_absolute_time();
+               // *deadline = abstime + interval;
+               if (os_add_overflow(abstime, interval, deadline)) {
+                       *deadline = UINT64_MAX;
+               }
 
-               if (*deadline <= abstime)
-                       *deadline = abstime + interval;
+               abstime = mach_absolute_time();
+               if (*deadline <= abstime) {
+                       // *deadline = abstime + interval;
+                       if (os_add_overflow(abstime, interval, deadline)) {
+                               *deadline = UINT64_MAX;
+                       }
+               }
        }
 }
 
 uint64_t
 mach_continuous_time(void)
 {
-       while(1) {      
+#if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
+       return ml_get_hwclock() + hwclock_conttime_offset;
+#elif HAS_CONTINUOUS_HWCLOCK
+       return ml_get_hwclock();
+#else
+       while (1) {
                uint64_t read1 = mach_absolutetime_asleep;
                uint64_t absolute = mach_absolute_time();
                OSMemoryBarrier();
                uint64_t read2 = mach_absolutetime_asleep;
 
-               if(__builtin_expect(read1 == read2, 1)) {
+               if (__builtin_expect(read1 == read2, 1)) {
                        return absolute + read1;
                }
        }
+#endif
 }
 
 uint64_t
 mach_continuous_approximate_time(void)
 {
-       while(1) {
+#if HAS_CONTINUOUS_HWCLOCK
+       return mach_continuous_time();
+#else
+       while (1) {
                uint64_t read1 = mach_absolutetime_asleep;
                uint64_t absolute = mach_approximate_time();
                OSMemoryBarrier();
                uint64_t read2 = mach_absolutetime_asleep;
 
-               if(__builtin_expect(read1 == read2, 1)) {
+               if (__builtin_expect(read1 == read2, 1)) {
                        return absolute + read1;
                }
        }
+#endif
 }
 
 /*
  * continuoustime_to_absolutetime
  * Must be called with interrupts disabled
  * Returned value is only valid until the next update to
- * mach_continuous_time 
+ * mach_continuous_time
  */
 uint64_t
-continuoustime_to_absolutetime(uint64_t conttime) {
-       if (conttime <= mach_absolutetime_asleep)
+continuoustime_to_absolutetime(uint64_t conttime)
+{
+       if (conttime <= mach_absolutetime_asleep) {
                return 0;
-       else
+       } else {
                return conttime - mach_absolutetime_asleep;
+       }
 }
 
 /*
  * absolutetime_to_continuoustime
  * Must be called with interrupts disabled
  * Returned value is only valid until the next update to
- * mach_continuous_time 
+ * mach_continuous_time
  */
 uint64_t
-absolutetime_to_continuoustime(uint64_t abstime) {
+absolutetime_to_continuoustime(uint64_t abstime)
+{
        return abstime + mach_absolutetime_asleep;
 }
 
-#if    CONFIG_DTRACE
+#if     CONFIG_DTRACE
 
 /*
  * clock_get_calendar_nanotime_nowait
@@ -1367,23 +1803,23 @@ absolutetime_to_continuoustime(uint64_t abstime) {
  */
 void
 clock_get_calendar_nanotime_nowait(
-       clock_sec_t                     *secs,
-       clock_nsec_t            *nanosecs)
+       clock_sec_t                     *secs,
+       clock_nsec_t            *nanosecs)
 {
        int i = 0;
-       uint64_t                now;
+       uint64_t                now;
        struct unlocked_clock_calend stable;
        struct bintime bt;
 
        for (;;) {
-               stable = flipflop[i];           /* take snapshot */
+               stable = flipflop[i];           /* take snapshot */
 
                /*
                 * Use a barrier instructions to ensure atomicity.  We AND
                 * off the "in progress" bit to get the current generation
                 * count.
                 */
-               (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
+               os_atomic_andnot(&stable.gen, 1, relaxed);
 
                /*
                 * If an update _is_ in progress, the generation count will be
@@ -1391,8 +1827,9 @@ clock_get_calendar_nanotime_nowait(
                 * and if we caught it at a good time, it will be equal (and
                 * our snapshot is threfore stable).
                 */
-               if (flipflop[i].gen == stable.gen)
+               if (flipflop[i].gen == stable.gen) {
                        break;
+               }
 
                /* Switch to the other element of the flipflop, and try again. */
                i ^= 1;
@@ -1407,7 +1844,7 @@ clock_get_calendar_nanotime_nowait(
        bintime2nsclock(&bt, secs, nanosecs);
 }
 
-static void 
+static void
 clock_track_calend_nowait(void)
 {
        int i;
@@ -1421,7 +1858,7 @@ clock_track_calend_nowait(void)
                 * will flag an update in progress to an async caller trying
                 * to examine the contents.
                 */
-               (void)hw_atomic_or(&flipflop[i].gen, 1);
+               os_atomic_or(&flipflop[i].gen, 1, relaxed);
 
                flipflop[i].calend = tmp;
 
@@ -1431,9 +1868,8 @@ clock_track_calend_nowait(void)
                 * count after taking a copy while in progress, the count
                 * will be off by two.
                 */
-               (void)hw_atomic_add(&flipflop[i].gen, 1);
+               os_atomic_inc(&flipflop[i].gen, relaxed);
        }
 }
 
-#endif /* CONFIG_DTRACE */
-
+#endif  /* CONFIG_DTRACE */