2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
34 * Copyright (c) 1982, 1986, 1993
35 * The Regents of the University of California. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)time.h 8.5 (Berkeley) 5/4/95
65 #include <mach/mach_types.h>
68 #include <kern/sched_prim.h>
69 #include <kern/thread.h>
70 #include <kern/clock.h>
71 #include <kern/host_notify.h>
72 #include <kern/thread_call.h>
73 #include <libkern/OSAtomic.h>
75 #include <IOKit/IOPlatformExpert.h>
77 #include <machine/commpage.h>
78 #include <machine/config.h>
79 #include <machine/machine_routines.h>
81 #include <mach/mach_traps.h>
82 #include <mach/mach_time.h>
84 #include <sys/kdebug.h>
85 #include <sys/timex.h>
86 #include <kern/arithmetic_128.h>
89 #if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
90 // On ARM64, the hwclock keeps ticking across a normal S2R so we use it to reset the
91 // system clock after a normal wake. However, on hibernation we cut power to the hwclock,
92 // so we have to add an offset to the hwclock to compute continuous_time after hibernate resume.
93 uint64_t hwclock_conttime_offset
= 0;
94 #endif /* HIBERNATION && HAS_CONTINUOUS_HWCLOCK */
96 #if HIBERNATION_USES_LEGACY_CLOCK || !HAS_CONTINUOUS_HWCLOCK
97 #define ENABLE_LEGACY_CLOCK_CODE 1
98 #endif /* HIBERNATION_USES_LEGACY_CLOCK || !HAS_CONTINUOUS_HWCLOCK */
100 #if HIBERNATION_USES_LEGACY_CLOCK
101 #include <IOKit/IOHibernatePrivate.h>
102 #endif /* HIBERNATION_USES_LEGACY_CLOCK */
104 uint32_t hz_tick_interval
= 1;
105 #if ENABLE_LEGACY_CLOCK_CODE
106 static uint64_t has_monotonic_clock
= 0;
107 #endif /* ENABLE_LEGACY_CLOCK_CODE */
109 SIMPLE_LOCK_DECLARE(clock_lock
, 0);
111 static LCK_GRP_DECLARE(settime_lock_grp
, "settime");
112 static LCK_MTX_DECLARE(settime_lock
, &settime_lock_grp
);
114 #define clock_lock() \
115 simple_lock(&clock_lock, LCK_GRP_NULL)
117 #define clock_unlock() \
118 simple_unlock(&clock_lock)
120 #ifdef kdp_simple_lock_is_acquired
122 kdp_clock_is_locked()
124 return kdp_simple_lock_is_acquired(&clock_lock
);
134 bintime_addx(struct bintime
*_bt
, uint64_t _x
)
140 if (_u
> _bt
->frac
) {
146 bintime_subx(struct bintime
*_bt
, uint64_t _x
)
152 if (_u
< _bt
->frac
) {
158 bintime_addns(struct bintime
*bt
, uint64_t ns
)
160 bt
->sec
+= ns
/ (uint64_t)NSEC_PER_SEC
;
161 ns
= ns
% (uint64_t)NSEC_PER_SEC
;
163 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
164 ns
= ns
* (uint64_t)18446744073LL;
165 bintime_addx(bt
, ns
);
170 bintime_subns(struct bintime
*bt
, uint64_t ns
)
172 bt
->sec
-= ns
/ (uint64_t)NSEC_PER_SEC
;
173 ns
= ns
% (uint64_t)NSEC_PER_SEC
;
175 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
176 ns
= ns
* (uint64_t)18446744073LL;
177 bintime_subx(bt
, ns
);
182 bintime_addxns(struct bintime
*bt
, uint64_t a
, int64_t xns
)
184 uint64_t uxns
= (xns
> 0)?(uint64_t)xns
:(uint64_t)-xns
;
185 uint64_t ns
= multi_overflow(a
, uxns
);
188 bintime_addns(bt
, ns
);
190 ns
= (a
* uxns
) / (uint64_t)NSEC_PER_SEC
;
191 bintime_addx(bt
, ns
);
194 bintime_subns(bt
, ns
);
196 ns
= (a
* uxns
) / (uint64_t)NSEC_PER_SEC
;
197 bintime_subx(bt
, ns
);
203 bintime_add(struct bintime
*_bt
, const struct bintime
*_bt2
)
208 _bt
->frac
+= _bt2
->frac
;
209 if (_u
> _bt
->frac
) {
212 _bt
->sec
+= _bt2
->sec
;
216 bintime_sub(struct bintime
*_bt
, const struct bintime
*_bt2
)
221 _bt
->frac
-= _bt2
->frac
;
222 if (_u
< _bt
->frac
) {
225 _bt
->sec
-= _bt2
->sec
;
229 clock2bintime(const clock_sec_t
*secs
, const clock_usec_t
*microsecs
, struct bintime
*_bt
)
232 /* 18446744073709 = int(2^64 / 1000000) */
233 _bt
->frac
= *microsecs
* (uint64_t)18446744073709LL;
237 bintime2usclock(const struct bintime
*_bt
, clock_sec_t
*secs
, clock_usec_t
*microsecs
)
240 *microsecs
= ((uint64_t)USEC_PER_SEC
* (uint32_t)(_bt
->frac
>> 32)) >> 32;
244 bintime2nsclock(const struct bintime
*_bt
, clock_sec_t
*secs
, clock_usec_t
*nanosecs
)
247 *nanosecs
= ((uint64_t)NSEC_PER_SEC
* (uint32_t)(_bt
->frac
>> 32)) >> 32;
250 #if ENABLE_LEGACY_CLOCK_CODE
252 bintime2absolutetime(const struct bintime
*_bt
, uint64_t *abs
)
255 nsec
= (uint64_t) _bt
->sec
* (uint64_t)NSEC_PER_SEC
+ (((uint64_t)NSEC_PER_SEC
* (uint32_t)(_bt
->frac
>> 32)) >> 32);
256 nanoseconds_to_absolutetime(nsec
, abs
);
259 struct latched_time
{
260 uint64_t monotonic_time_usec
;
265 kernel_sysctlbyname(const char *name
, void *oldp
, size_t *oldlenp
, void *newp
, size_t newlen
);
267 #endif /* ENABLE_LEGACY_CLOCK_CODE */
269 * Time of day (calendar) variables.
273 * TOD <- bintime + delta*scale
276 * bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
277 * delta is ticks elapsed since last scale update.
278 * scale is computed according to an adjustment provided by ntp_kern.
280 static struct clock_calend
{
281 uint64_t s_scale_ns
; /* scale to apply for each second elapsed, it converts in ns */
282 int64_t s_adj_nsx
; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
283 uint64_t tick_scale_x
; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
284 uint64_t offset_count
; /* abs time from which apply current scales */
285 struct bintime offset
; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
286 struct bintime bintime
; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
287 struct bintime boottime
; /* boot time expressed in (sec, 64 bits frac of a second) */
288 #if ENABLE_LEGACY_CLOCK_CODE
289 struct bintime basesleep
;
290 #endif /* ENABLE_LEGACY_CLOCK_CODE */
293 static uint64_t ticks_per_sec
; /* ticks in a second (expressed in abs time) */
295 #if DEVELOPMENT || DEBUG
296 extern int g_should_log_clock_adjustments
;
298 static void print_all_clock_variables(const char*, clock_sec_t
* pmu_secs
, clock_usec_t
* pmu_usec
, clock_sec_t
* sys_secs
, clock_usec_t
* sys_usec
, struct clock_calend
* calend_cp
);
299 static void print_all_clock_variables_internal(const char *, struct clock_calend
* calend_cp
);
301 #define print_all_clock_variables(...) do { } while (0)
302 #define print_all_clock_variables_internal(...) do { } while (0)
309 * Unlocked calendar flipflop; this is used to track a clock_calend such
310 * that we can safely access a snapshot of a valid clock_calend structure
311 * without needing to take any locks to do it.
313 * The trick is to use a generation count and set the low bit when it is
314 * being updated/read; by doing this, we guarantee, through use of the
315 * os_atomic functions, that the generation is incremented when the bit
316 * is cleared atomically (by using a 1 bit add).
318 static struct unlocked_clock_calend
{
319 struct clock_calend calend
; /* copy of calendar */
320 uint32_t gen
; /* generation count */
323 static void clock_track_calend_nowait(void);
327 void _clock_delay_until_deadline(uint64_t interval
, uint64_t deadline
);
328 void _clock_delay_until_deadline_with_leeway(uint64_t interval
, uint64_t deadline
, uint64_t leeway
);
330 /* Boottime variables*/
331 static uint64_t clock_boottime
;
332 static uint32_t clock_boottime_usec
;
334 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
336 if (((rfrac) += (frac)) >= (unit)) { \
343 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
345 if ((int)((rfrac) -= (frac)) < 0) { \
355 * Called once at boot to configure the clock subsystem.
364 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC
, &ticks_per_sec
);
370 * Called on a processor each time started.
379 * clock_timebase_init:
381 * Called by machine dependent code
382 * to initialize areas dependent on the
383 * timebase value. May be called multiple
384 * times during start up.
387 clock_timebase_init(void)
391 nanoseconds_to_absolutetime(NSEC_PER_SEC
/ 100, &abstime
);
392 hz_tick_interval
= (uint32_t)abstime
;
394 sched_timebase_init();
398 * mach_timebase_info_trap:
400 * User trap returns timebase constant.
403 mach_timebase_info_trap(
404 struct mach_timebase_info_trap_args
*args
)
406 mach_vm_address_t out_info_addr
= args
->info
;
407 mach_timebase_info_data_t info
= {};
409 clock_timebase_info(&info
);
411 copyout((void *)&info
, out_info_addr
, sizeof(info
));
421 * clock_get_calendar_microtime:
423 * Returns the current calendar value,
424 * microseconds as the fraction.
427 clock_get_calendar_microtime(
429 clock_usec_t
*microsecs
)
431 clock_get_calendar_absolute_and_microtime(secs
, microsecs
, NULL
);
435 * get_scale_factors_from_adj:
437 * computes scale factors from the value given in adjustment.
439 * Part of the code has been taken from tc_windup of FreeBSD
440 * written by Poul-Henning Kamp <phk@FreeBSD.ORG>, Julien Ridoux and
441 * Konstantin Belousov.
442 * https://github.com/freebsd/freebsd/blob/master/sys/kern/kern_tc.c
445 get_scale_factors_from_adj(int64_t adjustment
, uint64_t* tick_scale_x
, uint64_t* s_scale_ns
, int64_t* s_adj_nsx
)
451 * Calculating the scaling factor. We want the number of 1/2^64
452 * fractions of a second per period of the hardware counter, taking
453 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
454 * processing provides us with.
456 * The th_adjustment is nanoseconds per second with 32 bit binary
457 * fraction and we want 64 bit binary fraction of second:
459 * x = a * 2^32 / 10^9 = a * 4.294967296
461 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
462 * we can only multiply by about 850 without overflowing, that
463 * leaves no suitably precise fractions for multiply before divide.
465 * Divide before multiply with a fraction of 2199/512 results in a
466 * systematic undercompensation of 10PPM of th_adjustment. On a
467 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
469 * We happily sacrifice the lowest of the 64 bits of our result
470 * to the goddess of code clarity.
473 scale
= (uint64_t)1 << 63;
474 scale
+= (adjustment
/ 1024) * 2199;
475 scale
/= ticks_per_sec
;
476 *tick_scale_x
= scale
* 2;
480 * it contains ns (without fraction) to add to the next sec.
481 * Get ns scale factor for the next sec.
483 nano
= (adjustment
> 0)? adjustment
>> 32 : -((-adjustment
) >> 32);
484 scale
= (uint64_t) NSEC_PER_SEC
;
490 * it contains 32 bit frac of ns to add to the next sec.
491 * Keep it as additional adjustment for the next sec.
493 frac
= (adjustment
> 0)? ((uint32_t) adjustment
) : -((uint32_t) (-adjustment
));
494 *s_adj_nsx
= (frac
> 0)? ((uint64_t) frac
) << 32 : -(((uint64_t) (-frac
)) << 32);
502 * returns a bintime struct representing delta scaled accordingly to the
503 * scale factors provided to this function.
505 static struct bintime
506 scale_delta(uint64_t delta
, uint64_t tick_scale_x
, uint64_t s_scale_ns
, int64_t s_adj_nsx
)
508 uint64_t sec
, new_ns
, over
;
515 * If more than one second is elapsed,
516 * scale fully elapsed seconds using scale factors for seconds.
517 * s_scale_ns -> scales sec to ns.
518 * s_adj_nsx -> additional adj expressed in 64 bit frac of ns to apply to each sec.
520 if (delta
> ticks_per_sec
) {
521 sec
= (delta
/ ticks_per_sec
);
522 new_ns
= sec
* s_scale_ns
;
523 bintime_addns(&bt
, new_ns
);
526 /* shortcut, no overflow can occur */
528 bintime_addx(&bt
, (uint64_t)s_adj_nsx
/ (uint64_t)NSEC_PER_SEC
);
530 bintime_subx(&bt
, (uint64_t)-s_adj_nsx
/ (uint64_t)NSEC_PER_SEC
);
534 * s_adj_nsx is 64 bit frac of ns.
535 * sec*s_adj_nsx might overflow in int64_t.
536 * use bintime_addxns to not lose overflowed ns.
538 bintime_addxns(&bt
, sec
, s_adj_nsx
);
541 delta
= (delta
% ticks_per_sec
);
544 over
= multi_overflow(tick_scale_x
, delta
);
550 * scale elapsed ticks using the scale factor for ticks.
552 bintime_addx(&bt
, delta
* tick_scale_x
);
560 * returns the scaled time of the time elapsed from the last time
561 * scale factors were updated to now.
563 static struct bintime
564 get_scaled_time(uint64_t now
)
569 * Compute ticks elapsed since last scale update.
570 * This time will be scaled according to the value given by ntp kern.
572 delta
= now
- clock_calend
.offset_count
;
574 return scale_delta(delta
, clock_calend
.tick_scale_x
, clock_calend
.s_scale_ns
, clock_calend
.s_adj_nsx
);
578 clock_get_calendar_absolute_and_microtime_locked(
580 clock_usec_t
*microsecs
,
586 now
= mach_absolute_time();
591 bt
= get_scaled_time(now
);
592 bintime_add(&bt
, &clock_calend
.bintime
);
593 bintime2usclock(&bt
, secs
, microsecs
);
597 clock_get_calendar_absolute_and_nanotime_locked(
599 clock_usec_t
*nanosecs
,
605 now
= mach_absolute_time();
610 bt
= get_scaled_time(now
);
611 bintime_add(&bt
, &clock_calend
.bintime
);
612 bintime2nsclock(&bt
, secs
, nanosecs
);
616 * clock_get_calendar_absolute_and_microtime:
618 * Returns the current calendar value,
619 * microseconds as the fraction. Also
620 * returns mach_absolute_time if abstime
624 clock_get_calendar_absolute_and_microtime(
626 clock_usec_t
*microsecs
,
634 clock_get_calendar_absolute_and_microtime_locked(secs
, microsecs
, abstime
);
641 * clock_get_calendar_nanotime:
643 * Returns the current calendar value,
644 * nanoseconds as the fraction.
646 * Since we do not have an interface to
647 * set the calendar with resolution greater
648 * than a microsecond, we honor that here.
651 clock_get_calendar_nanotime(
653 clock_nsec_t
*nanosecs
)
660 clock_get_calendar_absolute_and_nanotime_locked(secs
, nanosecs
, NULL
);
667 * clock_gettimeofday:
669 * Kernel interface for commpage implementation of
670 * gettimeofday() syscall.
672 * Returns the current calendar value, and updates the
673 * commpage info as appropriate. Because most calls to
674 * gettimeofday() are handled in user mode by the commpage,
675 * this routine should be used infrequently.
680 clock_usec_t
*microsecs
)
682 clock_gettimeofday_and_absolute_time(secs
, microsecs
, NULL
);
686 clock_gettimeofday_and_absolute_time(
688 clock_usec_t
*microsecs
,
698 now
= mach_absolute_time();
699 bt
= get_scaled_time(now
);
700 bintime_add(&bt
, &clock_calend
.bintime
);
701 bintime2usclock(&bt
, secs
, microsecs
);
703 clock_gettimeofday_set_commpage(now
, bt
.sec
, bt
.frac
, clock_calend
.tick_scale_x
, ticks_per_sec
);
714 * clock_set_calendar_microtime:
716 * Sets the current calendar value by
717 * recalculating the epoch and offset
718 * from the system clock.
720 * Also adjusts the boottime to keep the
721 * value consistent, writes the new
722 * calendar value to the platform clock,
723 * and sends calendar change notifications.
726 clock_set_calendar_microtime(
728 clock_usec_t microsecs
)
730 uint64_t absolutesys
;
733 clock_usec_t newmicrosecs
;
734 clock_usec_t oldmicrosecs
;
735 uint64_t commpage_value
;
738 clock_sec_t deltasecs
;
739 clock_usec_t deltamicrosecs
;
742 newmicrosecs
= microsecs
;
745 * settime_lock mtx is used to avoid that racing settimeofdays update the wall clock and
746 * the platform clock concurrently.
748 * clock_lock cannot be used for this race because it is acquired from interrupt context
749 * and it needs interrupts disabled while instead updating the platform clock needs to be
750 * called with interrupts enabled.
752 lck_mtx_lock(&settime_lock
);
757 #if DEVELOPMENT || DEBUG
758 struct clock_calend clock_calend_cp
= clock_calend
;
760 commpage_disable_timestamp();
763 * Adjust the boottime based on the delta.
765 clock_get_calendar_absolute_and_microtime_locked(&oldsecs
, &oldmicrosecs
, &absolutesys
);
767 #if DEVELOPMENT || DEBUG
768 if (g_should_log_clock_adjustments
) {
769 os_log(OS_LOG_DEFAULT
, "%s wall %lu s %d u computed with %llu abs\n",
770 __func__
, (unsigned long)oldsecs
, oldmicrosecs
, absolutesys
);
771 os_log(OS_LOG_DEFAULT
, "%s requested %lu s %d u\n",
772 __func__
, (unsigned long)secs
, microsecs
);
776 if (oldsecs
< secs
|| (oldsecs
== secs
&& oldmicrosecs
< microsecs
)) {
779 deltamicrosecs
= microsecs
;
781 TIME_SUB(deltasecs
, oldsecs
, deltamicrosecs
, oldmicrosecs
, USEC_PER_SEC
);
783 TIME_ADD(clock_boottime
, deltasecs
, clock_boottime_usec
, deltamicrosecs
, USEC_PER_SEC
);
784 clock2bintime(&deltasecs
, &deltamicrosecs
, &bt
);
785 bintime_add(&clock_calend
.boottime
, &bt
);
789 deltamicrosecs
= oldmicrosecs
;
791 TIME_SUB(deltasecs
, secs
, deltamicrosecs
, microsecs
, USEC_PER_SEC
);
793 TIME_SUB(clock_boottime
, deltasecs
, clock_boottime_usec
, deltamicrosecs
, USEC_PER_SEC
);
794 clock2bintime(&deltasecs
, &deltamicrosecs
, &bt
);
795 bintime_sub(&clock_calend
.boottime
, &bt
);
798 clock_calend
.bintime
= clock_calend
.boottime
;
799 bintime_add(&clock_calend
.bintime
, &clock_calend
.offset
);
801 clock2bintime((clock_sec_t
*) &secs
, (clock_usec_t
*) µsecs
, &bt
);
803 clock_gettimeofday_set_commpage(absolutesys
, bt
.sec
, bt
.frac
, clock_calend
.tick_scale_x
, ticks_per_sec
);
805 #if DEVELOPMENT || DEBUG
806 struct clock_calend clock_calend_cp1
= clock_calend
;
809 commpage_value
= clock_boottime
* USEC_PER_SEC
+ clock_boottime_usec
;
815 * Set the new value for the platform clock.
816 * This call might block, so interrupts must be enabled.
818 #if DEVELOPMENT || DEBUG
819 uint64_t now_b
= mach_absolute_time();
822 PESetUTCTimeOfDay(newsecs
, newmicrosecs
);
824 #if DEVELOPMENT || DEBUG
825 uint64_t now_a
= mach_absolute_time();
826 if (g_should_log_clock_adjustments
) {
827 os_log(OS_LOG_DEFAULT
, "%s mach bef PESet %llu mach aft %llu \n", __func__
, now_b
, now_a
);
831 print_all_clock_variables_internal(__func__
, &clock_calend_cp
);
832 print_all_clock_variables_internal(__func__
, &clock_calend_cp1
);
834 commpage_update_boottime(commpage_value
);
837 * Send host notifications.
839 host_notify_calendar_change();
840 host_notify_calendar_set();
843 clock_track_calend_nowait();
846 lck_mtx_unlock(&settime_lock
);
849 uint64_t mach_absolutetime_asleep
= 0;
850 uint64_t mach_absolutetime_last_sleep
= 0;
853 clock_get_calendar_uptime(clock_sec_t
*secs
)
862 now
= mach_absolute_time();
864 bt
= get_scaled_time(now
);
865 bintime_add(&bt
, &clock_calend
.offset
);
875 * clock_update_calendar:
877 * called by ntp timer to update scale factors.
880 clock_update_calendar(void)
890 now
= mach_absolute_time();
893 * scale the time elapsed since the last update and
896 bt
= get_scaled_time(now
);
897 bintime_add(&clock_calend
.offset
, &bt
);
900 * update the base from which apply next scale factors.
902 delta
= now
- clock_calend
.offset_count
;
903 clock_calend
.offset_count
+= delta
;
905 clock_calend
.bintime
= clock_calend
.offset
;
906 bintime_add(&clock_calend
.bintime
, &clock_calend
.boottime
);
909 * recompute next adjustment.
911 ntp_update_second(&adjustment
, clock_calend
.bintime
.sec
);
913 #if DEVELOPMENT || DEBUG
914 if (g_should_log_clock_adjustments
) {
915 os_log(OS_LOG_DEFAULT
, "%s adjustment %lld\n", __func__
, adjustment
);
920 * recomputing scale factors.
922 get_scale_factors_from_adj(adjustment
, &clock_calend
.tick_scale_x
, &clock_calend
.s_scale_ns
, &clock_calend
.s_adj_nsx
);
924 clock_gettimeofday_set_commpage(now
, clock_calend
.bintime
.sec
, clock_calend
.bintime
.frac
, clock_calend
.tick_scale_x
, ticks_per_sec
);
926 #if DEVELOPMENT || DEBUG
927 struct clock_calend calend_cp
= clock_calend
;
933 print_all_clock_variables(__func__
, NULL
, NULL
, NULL
, NULL
, &calend_cp
);
937 #if DEVELOPMENT || DEBUG
940 print_all_clock_variables_internal(const char* func
, struct clock_calend
* clock_calend_cp
)
942 clock_sec_t offset_secs
;
943 clock_usec_t offset_microsecs
;
944 clock_sec_t bintime_secs
;
945 clock_usec_t bintime_microsecs
;
946 clock_sec_t bootime_secs
;
947 clock_usec_t bootime_microsecs
;
949 if (!g_should_log_clock_adjustments
) {
953 bintime2usclock(&clock_calend_cp
->offset
, &offset_secs
, &offset_microsecs
);
954 bintime2usclock(&clock_calend_cp
->bintime
, &bintime_secs
, &bintime_microsecs
);
955 bintime2usclock(&clock_calend_cp
->boottime
, &bootime_secs
, &bootime_microsecs
);
957 os_log(OS_LOG_DEFAULT
, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n",
958 func
, clock_calend_cp
->s_scale_ns
, clock_calend_cp
->s_adj_nsx
,
959 clock_calend_cp
->tick_scale_x
, clock_calend_cp
->offset_count
);
960 os_log(OS_LOG_DEFAULT
, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n",
961 func
, clock_calend_cp
->offset
.sec
, clock_calend_cp
->offset
.frac
,
962 (unsigned long)offset_secs
, offset_microsecs
);
963 os_log(OS_LOG_DEFAULT
, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n",
964 func
, clock_calend_cp
->bintime
.sec
, clock_calend_cp
->bintime
.frac
,
965 (unsigned long)bintime_secs
, bintime_microsecs
);
966 os_log(OS_LOG_DEFAULT
, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n",
967 func
, clock_calend_cp
->boottime
.sec
, clock_calend_cp
->boottime
.frac
,
968 (unsigned long)bootime_secs
, bootime_microsecs
);
970 #if !HAS_CONTINUOUS_HWCLOCK
971 clock_sec_t basesleep_secs
;
972 clock_usec_t basesleep_microsecs
;
974 bintime2usclock(&clock_calend_cp
->basesleep
, &basesleep_secs
, &basesleep_microsecs
);
975 os_log(OS_LOG_DEFAULT
, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n",
976 func
, clock_calend_cp
->basesleep
.sec
, clock_calend_cp
->basesleep
.frac
,
977 (unsigned long)basesleep_secs
, basesleep_microsecs
);
983 print_all_clock_variables(const char* func
, clock_sec_t
* pmu_secs
, clock_usec_t
* pmu_usec
, clock_sec_t
* sys_secs
, clock_usec_t
* sys_usec
, struct clock_calend
* clock_calend_cp
)
985 if (!g_should_log_clock_adjustments
) {
990 clock_sec_t wall_secs
;
991 clock_usec_t wall_microsecs
;
996 os_log(OS_LOG_DEFAULT
, "%s PMU %lu s %d u \n", func
, (unsigned long)*pmu_secs
, *pmu_usec
);
999 os_log(OS_LOG_DEFAULT
, "%s sys %lu s %d u \n", func
, (unsigned long)*sys_secs
, *sys_usec
);
1002 print_all_clock_variables_internal(func
, clock_calend_cp
);
1004 now
= mach_absolute_time();
1005 delta
= now
- clock_calend_cp
->offset_count
;
1007 bt
= scale_delta(delta
, clock_calend_cp
->tick_scale_x
, clock_calend_cp
->s_scale_ns
, clock_calend_cp
->s_adj_nsx
);
1008 bintime_add(&bt
, &clock_calend_cp
->bintime
);
1009 bintime2usclock(&bt
, &wall_secs
, &wall_microsecs
);
1011 os_log(OS_LOG_DEFAULT
, "%s wall %lu s %d u computed with %llu abs\n",
1012 func
, (unsigned long)wall_secs
, wall_microsecs
, now
);
1016 #endif /* DEVELOPMENT || DEBUG */
1020 * clock_initialize_calendar:
1022 * Set the calendar and related clocks
1023 * from the platform clock at boot.
1025 * Also sends host notifications.
1028 clock_initialize_calendar(void)
1030 clock_sec_t sys
; // sleepless time since boot in seconds
1031 clock_sec_t secs
; // Current UTC time
1032 clock_sec_t utc_offset_secs
; // Difference in current UTC time and sleepless time since boot
1033 clock_usec_t microsys
;
1034 clock_usec_t microsecs
;
1035 clock_usec_t utc_offset_microsecs
;
1038 #if ENABLE_LEGACY_CLOCK_CODE
1039 struct bintime monotonic_bt
;
1040 struct latched_time monotonic_time
;
1041 uint64_t monotonic_usec_total
;
1042 clock_sec_t sys2
, monotonic_sec
;
1043 clock_usec_t microsys2
, monotonic_usec
;
1046 #endif /* ENABLE_LEGACY_CLOCK_CODE */
1047 //Get the UTC time and corresponding sys time
1048 PEGetUTCTimeOfDay(&secs
, µsecs
);
1049 clock_get_system_microtime(&sys
, µsys
);
1051 #if ENABLE_LEGACY_CLOCK_CODE
1053 * If the platform has a monotonic clock, use kern.monotonicclock_usecs
1054 * to estimate the sleep/wake time, otherwise use the UTC time to estimate
1057 size
= sizeof(monotonic_time
);
1058 if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time
, &size
, NULL
, 0) != 0) {
1059 has_monotonic_clock
= 0;
1060 os_log(OS_LOG_DEFAULT
, "%s system does not have monotonic clock\n", __func__
);
1062 has_monotonic_clock
= 1;
1063 monotonic_usec_total
= monotonic_time
.monotonic_time_usec
;
1064 absolutetime_to_microtime(monotonic_time
.mach_time
, &sys2
, µsys2
);
1065 os_log(OS_LOG_DEFAULT
, "%s system has monotonic clock\n", __func__
);
1067 #endif /* ENABLE_LEGACY_CLOCK_CODE */
1072 commpage_disable_timestamp();
1074 utc_offset_secs
= secs
;
1075 utc_offset_microsecs
= microsecs
;
1078 * We normally expect the UTC clock to be always-on and produce
1079 * greater readings than the tick counter. There may be corner cases
1080 * due to differing clock resolutions (UTC clock is likely lower) and
1081 * and errors reading the UTC clock (some implementations return 0
1082 * on error) in which that doesn't hold true. Bring the UTC measurements
1083 * in-line with the tick counter measurements as a best effort in that case.
1085 if ((sys
> secs
) || ((sys
== secs
) && (microsys
> microsecs
))) {
1086 os_log(OS_LOG_DEFAULT
, "%s WARNING: UTC time is less then sys time, (%lu s %d u) UTC (%lu s %d u) sys\n",
1087 __func__
, (unsigned long) secs
, microsecs
, (unsigned long)sys
, microsys
);
1088 secs
= utc_offset_secs
= sys
;
1089 microsecs
= utc_offset_microsecs
= microsys
;
1093 // This macro stores the subtraction result in utc_offset_secs and utc_offset_microsecs
1094 TIME_SUB(utc_offset_secs
, sys
, utc_offset_microsecs
, microsys
, USEC_PER_SEC
);
1095 // This function converts utc_offset_secs and utc_offset_microsecs in bintime
1096 clock2bintime(&utc_offset_secs
, &utc_offset_microsecs
, &bt
);
1099 * Initialize the boot time based on the platform clock.
1101 clock_boottime
= secs
;
1102 clock_boottime_usec
= microsecs
;
1103 commpage_update_boottime(clock_boottime
* USEC_PER_SEC
+ clock_boottime_usec
);
1105 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC
, &ticks_per_sec
);
1106 clock_calend
.boottime
= bt
;
1107 clock_calend
.bintime
= bt
;
1108 clock_calend
.offset
.sec
= 0;
1109 clock_calend
.offset
.frac
= 0;
1111 clock_calend
.tick_scale_x
= (uint64_t)1 << 63;
1112 clock_calend
.tick_scale_x
/= ticks_per_sec
;
1113 clock_calend
.tick_scale_x
*= 2;
1115 clock_calend
.s_scale_ns
= NSEC_PER_SEC
;
1116 clock_calend
.s_adj_nsx
= 0;
1118 #if ENABLE_LEGACY_CLOCK_CODE
1119 if (has_monotonic_clock
) {
1120 monotonic_sec
= monotonic_usec_total
/ (clock_sec_t
)USEC_PER_SEC
;
1121 monotonic_usec
= monotonic_usec_total
% (clock_usec_t
)USEC_PER_SEC
;
1123 // monotonic clock - sys
1124 // This macro stores the subtraction result in monotonic_sec and monotonic_usec
1125 TIME_SUB(monotonic_sec
, sys2
, monotonic_usec
, microsys2
, USEC_PER_SEC
);
1126 clock2bintime(&monotonic_sec
, &monotonic_usec
, &monotonic_bt
);
1128 // set the baseleep as the difference between monotonic clock - sys
1129 clock_calend
.basesleep
= monotonic_bt
;
1131 #endif /* ENABLE_LEGACY_CLOCK_CODE */
1132 commpage_update_mach_continuous_time(mach_absolutetime_asleep
);
1134 #if DEVELOPMENT || DEBUG
1135 struct clock_calend clock_calend_cp
= clock_calend
;
1141 print_all_clock_variables(__func__
, &secs
, µsecs
, &sys
, µsys
, &clock_calend_cp
);
1144 * Send host notifications.
1146 host_notify_calendar_change();
1149 clock_track_calend_nowait();
1153 #if HAS_CONTINUOUS_HWCLOCK
1156 scale_sleep_time(void)
1158 /* Apply the current NTP frequency adjustment to the time slept.
1159 * The frequency adjustment remains stable between calls to ntp_adjtime(),
1160 * and should thus provide a reasonable approximation of the total adjustment
1161 * required for the time slept. */
1162 struct bintime sleep_time
;
1163 uint64_t tick_scale_x
, s_scale_ns
;
1165 int64_t sleep_adj
= ntp_get_freq();
1167 get_scale_factors_from_adj(sleep_adj
, &tick_scale_x
, &s_scale_ns
, &s_adj_nsx
);
1168 sleep_time
= scale_delta(mach_absolutetime_last_sleep
, tick_scale_x
, s_scale_ns
, s_adj_nsx
);
1170 tick_scale_x
= (uint64_t)1 << 63;
1171 tick_scale_x
/= ticks_per_sec
;
1173 sleep_time
.sec
= mach_absolutetime_last_sleep
/ ticks_per_sec
;
1174 sleep_time
.frac
= (mach_absolutetime_last_sleep
% ticks_per_sec
) * tick_scale_x
;
1176 bintime_add(&clock_calend
.offset
, &sleep_time
);
1177 bintime_add(&clock_calend
.bintime
, &sleep_time
);
1181 clock_wakeup_calendar_hwclock(void)
1188 commpage_disable_timestamp();
1190 uint64_t abstime
= mach_absolute_time();
1191 uint64_t total_sleep_time
= mach_continuous_time() - abstime
;
1193 mach_absolutetime_last_sleep
= total_sleep_time
- mach_absolutetime_asleep
;
1194 mach_absolutetime_asleep
= total_sleep_time
;
1198 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK
, MACH_EPOCH_CHANGE
),
1199 (uintptr_t)mach_absolutetime_last_sleep
,
1200 (uintptr_t)mach_absolutetime_asleep
,
1201 (uintptr_t)(mach_absolutetime_last_sleep
>> 32),
1202 (uintptr_t)(mach_absolutetime_asleep
>> 32));
1204 commpage_update_mach_continuous_time(mach_absolutetime_asleep
);
1206 commpage_update_mach_continuous_time_hw_offset(hwclock_conttime_offset
);
1208 adjust_cont_time_thread_calls();
1213 host_notify_calendar_change();
1216 clock_track_calend_nowait();
1220 #endif /* HAS_CONTINUOUS_HWCLOCK */
1222 #if ENABLE_LEGACY_CLOCK_CODE
1225 clock_wakeup_calendar_legacy(void)
1227 clock_sec_t wake_sys_sec
;
1228 clock_usec_t wake_sys_usec
;
1229 clock_sec_t wake_sec
;
1230 clock_usec_t wake_usec
;
1231 clock_sec_t wall_time_sec
;
1232 clock_usec_t wall_time_usec
;
1233 clock_sec_t diff_sec
;
1234 clock_usec_t diff_usec
;
1236 clock_usec_t var_us
;
1238 struct bintime bt
, last_sleep_bt
;
1239 struct latched_time monotonic_time
;
1240 uint64_t monotonic_usec_total
;
1245 * If the platform has the monotonic clock use that to
1246 * compute the sleep time. The monotonic clock does not have an offset
1247 * that can be modified, so nor kernel or userspace can change the time
1248 * of this clock, it can only monotonically increase over time.
1249 * During sleep mach_absolute_time (sys time) does not tick,
1250 * so the sleep time is the difference between the current monotonic time
1251 * less the absolute time and the previous difference stored at wake time.
1253 * basesleep = (monotonic - sys) ---> computed at last wake
1254 * sleep_time = (monotonic - sys) - basesleep
1256 * If the platform does not support monotonic clock we set the wall time to what the
1257 * UTC clock returns us.
1258 * Setting the wall time to UTC time implies that we loose all the adjustments
1259 * done during wake time through adjtime/ntp_adjustime.
1260 * The UTC time is the monotonic clock + an offset that can be set
1262 * The time slept in this case is the difference between wall time and UTC
1266 * We assume that only the kernel is setting the offset of the PMU/RTC and that
1267 * it is doing it only througth the settimeofday interface.
1269 if (has_monotonic_clock
) {
1270 #if DEVELOPMENT || DEBUG
1272 * Just for debugging, get the wake UTC time.
1274 PEGetUTCTimeOfDay(&var_s
, &var_us
);
1277 * Get monotonic time with corresponding sys time
1279 size
= sizeof(monotonic_time
);
1280 if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time
, &size
, NULL
, 0) != 0) {
1281 panic("%s: could not call kern.monotonicclock_usecs", __func__
);
1283 wake_abs
= monotonic_time
.mach_time
;
1284 absolutetime_to_microtime(wake_abs
, &wake_sys_sec
, &wake_sys_usec
);
1286 monotonic_usec_total
= monotonic_time
.monotonic_time_usec
;
1287 wake_sec
= monotonic_usec_total
/ (clock_sec_t
)USEC_PER_SEC
;
1288 wake_usec
= monotonic_usec_total
% (clock_usec_t
)USEC_PER_SEC
;
1291 * Get UTC time and corresponding sys time
1293 PEGetUTCTimeOfDay(&wake_sec
, &wake_usec
);
1294 wake_abs
= mach_absolute_time();
1295 absolutetime_to_microtime(wake_abs
, &wake_sys_sec
, &wake_sys_usec
);
1298 #if DEVELOPMENT || DEBUG
1299 os_log(OS_LOG_DEFAULT
, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec
, wake_usec
, (has_monotonic_clock
)?"monotonic":"UTC", wake_abs
);
1300 if (has_monotonic_clock
) {
1301 os_log(OS_LOG_DEFAULT
, "UTC time %lu s %d u\n", (unsigned long)var_s
, var_us
);
1303 #endif /* DEVELOPMENT || DEBUG */
1308 commpage_disable_timestamp();
1310 #if DEVELOPMENT || DEBUG
1311 struct clock_calend clock_calend_cp1
= clock_calend
;
1312 #endif /* DEVELOPMENT || DEBUG */
1315 * We normally expect the UTC/monotonic clock to be always-on and produce
1316 * greater readings than the sys counter. There may be corner cases
1317 * due to differing clock resolutions (UTC/monotonic clock is likely lower) and
1318 * and errors reading the UTC/monotonic clock (some implementations return 0
1319 * on error) in which that doesn't hold true.
1321 if ((wake_sys_sec
> wake_sec
) || ((wake_sys_sec
== wake_sec
) && (wake_sys_usec
> wake_usec
))) {
1322 os_log_error(OS_LOG_DEFAULT
, "WARNING: %s clock is less then sys clock at wake: %lu s %d u vs %lu s %d u, defaulting sleep time to zero\n", (has_monotonic_clock
)?"monotonic":"UTC", (unsigned long)wake_sec
, wake_usec
, (unsigned long)wake_sys_sec
, wake_sys_usec
);
1323 mach_absolutetime_last_sleep
= 0;
1327 if (has_monotonic_clock
) {
1329 * computer the difference monotonic - sys
1330 * we already checked that monotonic time is
1333 diff_sec
= wake_sec
;
1334 diff_usec
= wake_usec
;
1335 // This macro stores the subtraction result in diff_sec and diff_usec
1336 TIME_SUB(diff_sec
, wake_sys_sec
, diff_usec
, wake_sys_usec
, USEC_PER_SEC
);
1337 //This function converts diff_sec and diff_usec in bintime
1338 clock2bintime(&diff_sec
, &diff_usec
, &bt
);
1341 * Safety belt: the monotonic clock will likely have a lower resolution than the sys counter.
1342 * It's also possible that the device didn't fully transition to the powered-off state on
1343 * the most recent sleep, so the sys counter may not have reset or may have only briefly
1344 * turned off. In that case it's possible for the difference between the monotonic clock and the
1345 * sys counter to be less than the previously recorded value in clock.calend.basesleep.
1346 * In that case simply record that we slept for 0 ticks.
1348 if ((bt
.sec
> clock_calend
.basesleep
.sec
) ||
1349 ((bt
.sec
== clock_calend
.basesleep
.sec
) && (bt
.frac
> clock_calend
.basesleep
.frac
))) {
1350 //last_sleep is the difference between (current monotonic - abs) and (last wake monotonic - abs)
1352 bintime_sub(&last_sleep_bt
, &clock_calend
.basesleep
);
1354 bintime2absolutetime(&last_sleep_bt
, &mach_absolutetime_last_sleep
);
1355 mach_absolutetime_asleep
+= mach_absolutetime_last_sleep
;
1357 //set basesleep to current monotonic - abs
1358 clock_calend
.basesleep
= bt
;
1361 bintime_add(&clock_calend
.offset
, &last_sleep_bt
);
1362 bintime_add(&clock_calend
.bintime
, &last_sleep_bt
);
1364 bintime2usclock(&last_sleep_bt
, &var_s
, &var_us
);
1365 os_log(OS_LOG_DEFAULT
, "time_slept (%lu s %d u)\n", (unsigned long) var_s
, var_us
);
1367 bintime2usclock(&clock_calend
.basesleep
, &var_s
, &var_us
);
1368 os_log_error(OS_LOG_DEFAULT
, "WARNING: last wake monotonic-sys time (%lu s %d u) is greater then current monotonic-sys time(%lu s %d u), defaulting sleep time to zero\n", (unsigned long) var_s
, var_us
, (unsigned long) diff_sec
, diff_usec
);
1370 mach_absolutetime_last_sleep
= 0;
1374 * set the wall time to UTC value
1376 bt
= get_scaled_time(wake_abs
);
1377 bintime_add(&bt
, &clock_calend
.bintime
);
1378 bintime2usclock(&bt
, &wall_time_sec
, &wall_time_usec
);
1380 if (wall_time_sec
> wake_sec
|| (wall_time_sec
== wake_sec
&& wall_time_usec
> wake_usec
)) {
1381 os_log(OS_LOG_DEFAULT
, "WARNING: wall time (%lu s %d u) is greater than current UTC time (%lu s %d u), defaulting sleep time to zero\n", (unsigned long) wall_time_sec
, wall_time_usec
, (unsigned long) wake_sec
, wake_usec
);
1383 mach_absolutetime_last_sleep
= 0;
1385 diff_sec
= wake_sec
;
1386 diff_usec
= wake_usec
;
1387 // This macro stores the subtraction result in diff_sec and diff_usec
1388 TIME_SUB(diff_sec
, wall_time_sec
, diff_usec
, wall_time_usec
, USEC_PER_SEC
);
1389 //This function converts diff_sec and diff_usec in bintime
1390 clock2bintime(&diff_sec
, &diff_usec
, &bt
);
1392 //time slept in this case is the difference between PMU/RTC and wall time
1395 bintime2absolutetime(&last_sleep_bt
, &mach_absolutetime_last_sleep
);
1396 mach_absolutetime_asleep
+= mach_absolutetime_last_sleep
;
1399 bintime_add(&clock_calend
.offset
, &last_sleep_bt
);
1400 bintime_add(&clock_calend
.bintime
, &last_sleep_bt
);
1402 bintime2usclock(&last_sleep_bt
, &var_s
, &var_us
);
1403 os_log(OS_LOG_DEFAULT
, "time_slept (%lu s %d u)\n", (unsigned long)var_s
, var_us
);
1407 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK
, MACH_EPOCH_CHANGE
),
1408 (uintptr_t)mach_absolutetime_last_sleep
,
1409 (uintptr_t)mach_absolutetime_asleep
,
1410 (uintptr_t)(mach_absolutetime_last_sleep
>> 32),
1411 (uintptr_t)(mach_absolutetime_asleep
>> 32));
1413 commpage_update_mach_continuous_time(mach_absolutetime_asleep
);
1414 adjust_cont_time_thread_calls();
1416 #if DEVELOPMENT || DEBUG
1417 struct clock_calend clock_calend_cp
= clock_calend
;
1423 #if DEVELOPMENT || DEBUG
1424 if (g_should_log_clock_adjustments
) {
1425 print_all_clock_variables("clock_wakeup_calendar: BEFORE", NULL
, NULL
, NULL
, NULL
, &clock_calend_cp1
);
1426 print_all_clock_variables("clock_wakeup_calendar: AFTER", NULL
, NULL
, NULL
, NULL
, &clock_calend_cp
);
1428 #endif /* DEVELOPMENT || DEBUG */
1430 host_notify_calendar_change();
1433 clock_track_calend_nowait();
1437 #endif /* ENABLE_LEGACY_CLOCK_CODE */
1440 clock_wakeup_calendar(void)
1442 #if HAS_CONTINUOUS_HWCLOCK
1443 #if HIBERNATION_USES_LEGACY_CLOCK
1444 if (gIOHibernateState
) {
1445 // if we're resuming from hibernation, we have to take the legacy wakeup path
1446 return clock_wakeup_calendar_legacy();
1448 #endif /* HIBERNATION_USES_LEGACY_CLOCK */
1449 // use the hwclock wakeup path
1450 return clock_wakeup_calendar_hwclock();
1451 #elif ENABLE_LEGACY_CLOCK_CODE
1452 return clock_wakeup_calendar_legacy();
1454 #error "can't determine which clock code to run"
1459 * clock_get_boottime_nanotime:
1461 * Return the boottime, used by sysctl.
1464 clock_get_boottime_nanotime(
1466 clock_nsec_t
*nanosecs
)
1473 *secs
= (clock_sec_t
)clock_boottime
;
1474 *nanosecs
= (clock_nsec_t
)clock_boottime_usec
* NSEC_PER_USEC
;
1481 * clock_get_boottime_nanotime:
1483 * Return the boottime, used by sysctl.
1486 clock_get_boottime_microtime(
1488 clock_usec_t
*microsecs
)
1495 *secs
= (clock_sec_t
)clock_boottime
;
1496 *microsecs
= (clock_nsec_t
)clock_boottime_usec
;
1504 * Wait / delay routines.
1507 mach_wait_until_continue(
1508 __unused
void *parameter
,
1509 wait_result_t wresult
)
1511 thread_syscall_return((wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
1516 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
1518 * Parameters: args->deadline Amount of time to wait
1520 * Returns: 0 Success
1525 mach_wait_until_trap(
1526 struct mach_wait_until_trap_args
*args
)
1528 uint64_t deadline
= args
->deadline
;
1529 wait_result_t wresult
;
1532 wresult
= assert_wait_deadline_with_leeway((event_t
)mach_wait_until_trap
, THREAD_ABORTSAFE
,
1533 TIMEOUT_URGENCY_USER_NORMAL
, deadline
, 0);
1534 if (wresult
== THREAD_WAITING
) {
1535 wresult
= thread_block(mach_wait_until_continue
);
1538 return (wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
;
1545 uint64_t now
= mach_absolute_time();
1547 if (now
>= deadline
) {
1551 _clock_delay_until_deadline(deadline
- now
, deadline
);
1555 * Preserve the original precise interval that the client
1556 * requested for comparison to the spin threshold.
1559 _clock_delay_until_deadline(
1563 _clock_delay_until_deadline_with_leeway(interval
, deadline
, 0);
1567 * Like _clock_delay_until_deadline, but it accepts a
1571 _clock_delay_until_deadline_with_leeway(
1576 if (interval
== 0) {
1580 if (ml_delay_should_spin(interval
) ||
1581 get_preemption_level() != 0 ||
1582 ml_get_interrupts_enabled() == FALSE
) {
1583 machine_delay_until(interval
, deadline
);
1586 * For now, assume a leeway request of 0 means the client does not want a leeway
1587 * value. We may want to change this interpretation in the future.
1591 assert_wait_deadline_with_leeway((event_t
)clock_delay_until
, THREAD_UNINT
, TIMEOUT_URGENCY_LEEWAY
, deadline
, leeway
);
1593 assert_wait_deadline((event_t
)clock_delay_until
, THREAD_UNINT
, deadline
);
1596 thread_block(THREAD_CONTINUE_NULL
);
1603 uint32_t scale_factor
)
1607 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1609 _clock_delay_until_deadline(abstime
, mach_absolute_time() + abstime
);
1613 delay_for_interval_with_leeway(
1616 uint32_t scale_factor
)
1618 uint64_t abstime_interval
;
1619 uint64_t abstime_leeway
;
1621 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime_interval
);
1622 clock_interval_to_absolutetime_interval(leeway
, scale_factor
, &abstime_leeway
);
1624 _clock_delay_until_deadline_with_leeway(abstime_interval
, mach_absolute_time() + abstime_interval
, abstime_leeway
);
1631 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);
1635 * Miscellaneous routines.
1638 clock_interval_to_deadline(
1640 uint32_t scale_factor
,
1645 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1647 if (os_add_overflow(mach_absolute_time(), abstime
, result
)) {
1648 *result
= UINT64_MAX
;
1653 nanoseconds_to_deadline(
1659 nanoseconds_to_absolutetime(interval
, &abstime
);
1661 if (os_add_overflow(mach_absolute_time(), abstime
, result
)) {
1662 *result
= UINT64_MAX
;
1667 clock_absolutetime_interval_to_deadline(
1671 if (os_add_overflow(mach_absolute_time(), abstime
, result
)) {
1672 *result
= UINT64_MAX
;
1677 clock_continuoustime_interval_to_deadline(
1681 if (os_add_overflow(mach_continuous_time(), conttime
, result
)) {
1682 *result
= UINT64_MAX
;
1690 *result
= mach_absolute_time();
1694 clock_deadline_for_periodic_event(
1699 assert(interval
!= 0);
1701 // *deadline += interval;
1702 if (os_add_overflow(*deadline
, interval
, deadline
)) {
1703 *deadline
= UINT64_MAX
;
1706 if (*deadline
<= abstime
) {
1707 // *deadline = abstime + interval;
1708 if (os_add_overflow(abstime
, interval
, deadline
)) {
1709 *deadline
= UINT64_MAX
;
1712 abstime
= mach_absolute_time();
1713 if (*deadline
<= abstime
) {
1714 // *deadline = abstime + interval;
1715 if (os_add_overflow(abstime
, interval
, deadline
)) {
1716 *deadline
= UINT64_MAX
;
1723 mach_continuous_time(void)
1725 #if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
1726 return ml_get_hwclock() + hwclock_conttime_offset
;
1727 #elif HAS_CONTINUOUS_HWCLOCK
1728 return ml_get_hwclock();
1731 uint64_t read1
= mach_absolutetime_asleep
;
1732 uint64_t absolute
= mach_absolute_time();
1734 uint64_t read2
= mach_absolutetime_asleep
;
1736 if (__builtin_expect(read1
== read2
, 1)) {
1737 return absolute
+ read1
;
1744 mach_continuous_approximate_time(void)
1746 #if HAS_CONTINUOUS_HWCLOCK
1747 return mach_continuous_time();
1750 uint64_t read1
= mach_absolutetime_asleep
;
1751 uint64_t absolute
= mach_approximate_time();
1753 uint64_t read2
= mach_absolutetime_asleep
;
1755 if (__builtin_expect(read1
== read2
, 1)) {
1756 return absolute
+ read1
;
1763 * continuoustime_to_absolutetime
1764 * Must be called with interrupts disabled
1765 * Returned value is only valid until the next update to
1766 * mach_continuous_time
1769 continuoustime_to_absolutetime(uint64_t conttime
)
1771 if (conttime
<= mach_absolutetime_asleep
) {
1774 return conttime
- mach_absolutetime_asleep
;
1779 * absolutetime_to_continuoustime
1780 * Must be called with interrupts disabled
1781 * Returned value is only valid until the next update to
1782 * mach_continuous_time
1785 absolutetime_to_continuoustime(uint64_t abstime
)
1787 return abstime
+ mach_absolutetime_asleep
;
1793 * clock_get_calendar_nanotime_nowait
1795 * Description: Non-blocking version of clock_get_calendar_nanotime()
1797 * Notes: This function operates by separately tracking calendar time
1798 * updates using a two element structure to copy the calendar
1799 * state, which may be asynchronously modified. It utilizes
1800 * barrier instructions in the tracking process and in the local
1801 * stable snapshot process in order to ensure that a consistent
1802 * snapshot is used to perform the calculation.
1805 clock_get_calendar_nanotime_nowait(
1807 clock_nsec_t
*nanosecs
)
1811 struct unlocked_clock_calend stable
;
1815 stable
= flipflop
[i
]; /* take snapshot */
1818 * Use a barrier instructions to ensure atomicity. We AND
1819 * off the "in progress" bit to get the current generation
1822 os_atomic_andnot(&stable
.gen
, 1, relaxed
);
1825 * If an update _is_ in progress, the generation count will be
1826 * off by one, if it _was_ in progress, it will be off by two,
1827 * and if we caught it at a good time, it will be equal (and
1828 * our snapshot is threfore stable).
1830 if (flipflop
[i
].gen
== stable
.gen
) {
1834 /* Switch to the other element of the flipflop, and try again. */
1838 now
= mach_absolute_time();
1840 bt
= get_scaled_time(now
);
1842 bintime_add(&bt
, &clock_calend
.bintime
);
1844 bintime2nsclock(&bt
, secs
, nanosecs
);
1848 clock_track_calend_nowait(void)
1852 for (i
= 0; i
< 2; i
++) {
1853 struct clock_calend tmp
= clock_calend
;
1856 * Set the low bit if the generation count; since we use a
1857 * barrier instruction to do this, we are guaranteed that this
1858 * will flag an update in progress to an async caller trying
1859 * to examine the contents.
1861 os_atomic_or(&flipflop
[i
].gen
, 1, relaxed
);
1863 flipflop
[i
].calend
= tmp
;
1866 * Increment the generation count to clear the low bit to
1867 * signal completion. If a caller compares the generation
1868 * count after taking a copy while in progress, the count
1869 * will be off by two.
1871 os_atomic_inc(&flipflop
[i
].gen
, relaxed
);
1875 #endif /* CONFIG_DTRACE */