2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
34 * Copyright (c) 1982, 1986, 1993
35 * The Regents of the University of California. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)time.h 8.5 (Berkeley) 5/4/95
65 #include <mach/mach_types.h>
68 #include <kern/sched_prim.h>
69 #include <kern/thread.h>
70 #include <kern/clock.h>
71 #include <kern/host_notify.h>
72 #include <kern/thread_call.h>
73 #include <libkern/OSAtomic.h>
75 #include <IOKit/IOPlatformExpert.h>
77 #include <machine/commpage.h>
78 #include <machine/config.h>
79 #include <machine/machine_routines.h>
81 #include <mach/mach_traps.h>
82 #include <mach/mach_time.h>
84 #include <sys/kdebug.h>
85 #include <sys/timex.h>
86 #include <kern/arithmetic_128.h>
89 uint32_t hz_tick_interval
= 1;
90 #if !HAS_CONTINUOUS_HWCLOCK
91 static uint64_t has_monotonic_clock
= 0;
94 decl_simple_lock_data(, clock_lock
);
95 lck_grp_attr_t
* settime_lock_grp_attr
;
96 lck_grp_t
* settime_lock_grp
;
97 lck_attr_t
* settime_lock_attr
;
98 lck_mtx_t settime_lock
;
100 #define clock_lock() \
101 simple_lock(&clock_lock, LCK_GRP_NULL)
103 #define clock_unlock() \
104 simple_unlock(&clock_lock)
106 #define clock_lock_init() \
107 simple_lock_init(&clock_lock, 0)
109 #ifdef kdp_simple_lock_is_acquired
111 kdp_clock_is_locked()
113 return kdp_simple_lock_is_acquired(&clock_lock
);
123 bintime_addx(struct bintime
*_bt
, uint64_t _x
)
129 if (_u
> _bt
->frac
) {
135 bintime_subx(struct bintime
*_bt
, uint64_t _x
)
141 if (_u
< _bt
->frac
) {
147 bintime_addns(struct bintime
*bt
, uint64_t ns
)
149 bt
->sec
+= ns
/ (uint64_t)NSEC_PER_SEC
;
150 ns
= ns
% (uint64_t)NSEC_PER_SEC
;
152 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
153 ns
= ns
* (uint64_t)18446744073LL;
154 bintime_addx(bt
, ns
);
159 bintime_subns(struct bintime
*bt
, uint64_t ns
)
161 bt
->sec
-= ns
/ (uint64_t)NSEC_PER_SEC
;
162 ns
= ns
% (uint64_t)NSEC_PER_SEC
;
164 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
165 ns
= ns
* (uint64_t)18446744073LL;
166 bintime_subx(bt
, ns
);
171 bintime_addxns(struct bintime
*bt
, uint64_t a
, int64_t xns
)
173 uint64_t uxns
= (xns
> 0)?(uint64_t)xns
:(uint64_t)-xns
;
174 uint64_t ns
= multi_overflow(a
, uxns
);
177 bintime_addns(bt
, ns
);
179 ns
= (a
* uxns
) / (uint64_t)NSEC_PER_SEC
;
180 bintime_addx(bt
, ns
);
183 bintime_subns(bt
, ns
);
185 ns
= (a
* uxns
) / (uint64_t)NSEC_PER_SEC
;
186 bintime_subx(bt
, ns
);
192 bintime_add(struct bintime
*_bt
, const struct bintime
*_bt2
)
197 _bt
->frac
+= _bt2
->frac
;
198 if (_u
> _bt
->frac
) {
201 _bt
->sec
+= _bt2
->sec
;
205 bintime_sub(struct bintime
*_bt
, const struct bintime
*_bt2
)
210 _bt
->frac
-= _bt2
->frac
;
211 if (_u
< _bt
->frac
) {
214 _bt
->sec
-= _bt2
->sec
;
218 clock2bintime(const clock_sec_t
*secs
, const clock_usec_t
*microsecs
, struct bintime
*_bt
)
221 /* 18446744073709 = int(2^64 / 1000000) */
222 _bt
->frac
= *microsecs
* (uint64_t)18446744073709LL;
226 bintime2usclock(const struct bintime
*_bt
, clock_sec_t
*secs
, clock_usec_t
*microsecs
)
229 *microsecs
= ((uint64_t)USEC_PER_SEC
* (uint32_t)(_bt
->frac
>> 32)) >> 32;
233 bintime2nsclock(const struct bintime
*_bt
, clock_sec_t
*secs
, clock_usec_t
*nanosecs
)
236 *nanosecs
= ((uint64_t)NSEC_PER_SEC
* (uint32_t)(_bt
->frac
>> 32)) >> 32;
239 #if !defined(HAS_CONTINUOUS_HWCLOCK)
241 bintime2absolutetime(const struct bintime
*_bt
, uint64_t *abs
)
244 nsec
= (uint64_t) _bt
->sec
* (uint64_t)NSEC_PER_SEC
+ (((uint64_t)NSEC_PER_SEC
* (uint32_t)(_bt
->frac
>> 32)) >> 32);
245 nanoseconds_to_absolutetime(nsec
, abs
);
248 struct latched_time
{
249 uint64_t monotonic_time_usec
;
254 kernel_sysctlbyname(const char *name
, void *oldp
, size_t *oldlenp
, void *newp
, size_t newlen
);
258 * Time of day (calendar) variables.
262 * TOD <- bintime + delta*scale
265 * bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
266 * delta is ticks elapsed since last scale update.
267 * scale is computed according to an adjustment provided by ntp_kern.
269 static struct clock_calend
{
270 uint64_t s_scale_ns
; /* scale to apply for each second elapsed, it converts in ns */
271 int64_t s_adj_nsx
; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
272 uint64_t tick_scale_x
; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
273 uint64_t offset_count
; /* abs time from which apply current scales */
274 struct bintime offset
; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
275 struct bintime bintime
; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
276 struct bintime boottime
; /* boot time expressed in (sec, 64 bits frac of a second) */
277 #if !HAS_CONTINUOUS_HWCLOCK
278 struct bintime basesleep
;
282 static uint64_t ticks_per_sec
; /* ticks in a second (expressed in abs time) */
284 #if DEVELOPMENT || DEBUG
285 extern int g_should_log_clock_adjustments
;
287 static void print_all_clock_variables(const char*, clock_sec_t
* pmu_secs
, clock_usec_t
* pmu_usec
, clock_sec_t
* sys_secs
, clock_usec_t
* sys_usec
, struct clock_calend
* calend_cp
);
288 static void print_all_clock_variables_internal(const char *, struct clock_calend
* calend_cp
);
290 #define print_all_clock_variables(...) do { } while (0)
291 #define print_all_clock_variables_internal(...) do { } while (0)
298 * Unlocked calendar flipflop; this is used to track a clock_calend such
299 * that we can safely access a snapshot of a valid clock_calend structure
300 * without needing to take any locks to do it.
302 * The trick is to use a generation count and set the low bit when it is
303 * being updated/read; by doing this, we guarantee, through use of the
304 * os_atomic functions, that the generation is incremented when the bit
305 * is cleared atomically (by using a 1 bit add).
307 static struct unlocked_clock_calend
{
308 struct clock_calend calend
; /* copy of calendar */
309 uint32_t gen
; /* generation count */
312 static void clock_track_calend_nowait(void);
316 void _clock_delay_until_deadline(uint64_t interval
, uint64_t deadline
);
317 void _clock_delay_until_deadline_with_leeway(uint64_t interval
, uint64_t deadline
, uint64_t leeway
);
319 /* Boottime variables*/
320 static uint64_t clock_boottime
;
321 static uint32_t clock_boottime_usec
;
323 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
325 if (((rfrac) += (frac)) >= (unit)) { \
332 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
334 if ((int)((rfrac) -= (frac)) < 0) { \
344 * Called once at boot to configure the clock subsystem.
351 settime_lock_grp_attr
= lck_grp_attr_alloc_init();
352 settime_lock_grp
= lck_grp_alloc_init("settime grp", settime_lock_grp_attr
);
353 settime_lock_attr
= lck_attr_alloc_init();
354 lck_mtx_init(&settime_lock
, settime_lock_grp
, settime_lock_attr
);
360 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC
, &ticks_per_sec
);
366 * Called on a processor each time started.
375 * clock_timebase_init:
377 * Called by machine dependent code
378 * to initialize areas dependent on the
379 * timebase value. May be called multiple
380 * times during start up.
383 clock_timebase_init(void)
387 nanoseconds_to_absolutetime(NSEC_PER_SEC
/ 100, &abstime
);
388 hz_tick_interval
= (uint32_t)abstime
;
390 sched_timebase_init();
394 * mach_timebase_info_trap:
396 * User trap returns timebase constant.
399 mach_timebase_info_trap(
400 struct mach_timebase_info_trap_args
*args
)
402 mach_vm_address_t out_info_addr
= args
->info
;
403 mach_timebase_info_data_t info
= {};
405 clock_timebase_info(&info
);
407 copyout((void *)&info
, out_info_addr
, sizeof(info
));
417 * clock_get_calendar_microtime:
419 * Returns the current calendar value,
420 * microseconds as the fraction.
423 clock_get_calendar_microtime(
425 clock_usec_t
*microsecs
)
427 clock_get_calendar_absolute_and_microtime(secs
, microsecs
, NULL
);
431 * get_scale_factors_from_adj:
433 * computes scale factors from the value given in adjustment.
435 * Part of the code has been taken from tc_windup of FreeBSD
436 * written by Poul-Henning Kamp <phk@FreeBSD.ORG>, Julien Ridoux and
437 * Konstantin Belousov.
438 * https://github.com/freebsd/freebsd/blob/master/sys/kern/kern_tc.c
441 get_scale_factors_from_adj(int64_t adjustment
, uint64_t* tick_scale_x
, uint64_t* s_scale_ns
, int64_t* s_adj_nsx
)
447 * Calculating the scaling factor. We want the number of 1/2^64
448 * fractions of a second per period of the hardware counter, taking
449 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
450 * processing provides us with.
452 * The th_adjustment is nanoseconds per second with 32 bit binary
453 * fraction and we want 64 bit binary fraction of second:
455 * x = a * 2^32 / 10^9 = a * 4.294967296
457 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
458 * we can only multiply by about 850 without overflowing, that
459 * leaves no suitably precise fractions for multiply before divide.
461 * Divide before multiply with a fraction of 2199/512 results in a
462 * systematic undercompensation of 10PPM of th_adjustment. On a
463 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
465 * We happily sacrifice the lowest of the 64 bits of our result
466 * to the goddess of code clarity.
469 scale
= (uint64_t)1 << 63;
470 scale
+= (adjustment
/ 1024) * 2199;
471 scale
/= ticks_per_sec
;
472 *tick_scale_x
= scale
* 2;
476 * it contains ns (without fraction) to add to the next sec.
477 * Get ns scale factor for the next sec.
479 nano
= (adjustment
> 0)? adjustment
>> 32 : -((-adjustment
) >> 32);
480 scale
= (uint64_t) NSEC_PER_SEC
;
486 * it contains 32 bit frac of ns to add to the next sec.
487 * Keep it as additional adjustment for the next sec.
489 frac
= (adjustment
> 0)? ((uint32_t) adjustment
) : -((uint32_t) (-adjustment
));
490 *s_adj_nsx
= (frac
> 0)? frac
<< 32 : -((-frac
) << 32);
498 * returns a bintime struct representing delta scaled accordingly to the
499 * scale factors provided to this function.
501 static struct bintime
502 scale_delta(uint64_t delta
, uint64_t tick_scale_x
, uint64_t s_scale_ns
, int64_t s_adj_nsx
)
504 uint64_t sec
, new_ns
, over
;
511 * If more than one second is elapsed,
512 * scale fully elapsed seconds using scale factors for seconds.
513 * s_scale_ns -> scales sec to ns.
514 * s_adj_nsx -> additional adj expressed in 64 bit frac of ns to apply to each sec.
516 if (delta
> ticks_per_sec
) {
517 sec
= (delta
/ ticks_per_sec
);
518 new_ns
= sec
* s_scale_ns
;
519 bintime_addns(&bt
, new_ns
);
522 /* shortcut, no overflow can occur */
524 bintime_addx(&bt
, (uint64_t)s_adj_nsx
/ (uint64_t)NSEC_PER_SEC
);
526 bintime_subx(&bt
, (uint64_t)-s_adj_nsx
/ (uint64_t)NSEC_PER_SEC
);
530 * s_adj_nsx is 64 bit frac of ns.
531 * sec*s_adj_nsx might overflow in int64_t.
532 * use bintime_addxns to not lose overflowed ns.
534 bintime_addxns(&bt
, sec
, s_adj_nsx
);
537 delta
= (delta
% ticks_per_sec
);
540 over
= multi_overflow(tick_scale_x
, delta
);
546 * scale elapsed ticks using the scale factor for ticks.
548 bintime_addx(&bt
, delta
* tick_scale_x
);
556 * returns the scaled time of the time elapsed from the last time
557 * scale factors were updated to now.
559 static struct bintime
560 get_scaled_time(uint64_t now
)
565 * Compute ticks elapsed since last scale update.
566 * This time will be scaled according to the value given by ntp kern.
568 delta
= now
- clock_calend
.offset_count
;
570 return scale_delta(delta
, clock_calend
.tick_scale_x
, clock_calend
.s_scale_ns
, clock_calend
.s_adj_nsx
);
574 clock_get_calendar_absolute_and_microtime_locked(
576 clock_usec_t
*microsecs
,
582 now
= mach_absolute_time();
587 bt
= get_scaled_time(now
);
588 bintime_add(&bt
, &clock_calend
.bintime
);
589 bintime2usclock(&bt
, secs
, microsecs
);
593 clock_get_calendar_absolute_and_nanotime_locked(
595 clock_usec_t
*nanosecs
,
601 now
= mach_absolute_time();
606 bt
= get_scaled_time(now
);
607 bintime_add(&bt
, &clock_calend
.bintime
);
608 bintime2nsclock(&bt
, secs
, nanosecs
);
612 * clock_get_calendar_absolute_and_microtime:
614 * Returns the current calendar value,
615 * microseconds as the fraction. Also
616 * returns mach_absolute_time if abstime
620 clock_get_calendar_absolute_and_microtime(
622 clock_usec_t
*microsecs
,
630 clock_get_calendar_absolute_and_microtime_locked(secs
, microsecs
, abstime
);
637 * clock_get_calendar_nanotime:
639 * Returns the current calendar value,
640 * nanoseconds as the fraction.
642 * Since we do not have an interface to
643 * set the calendar with resolution greater
644 * than a microsecond, we honor that here.
647 clock_get_calendar_nanotime(
649 clock_nsec_t
*nanosecs
)
656 clock_get_calendar_absolute_and_nanotime_locked(secs
, nanosecs
, NULL
);
663 * clock_gettimeofday:
665 * Kernel interface for commpage implementation of
666 * gettimeofday() syscall.
668 * Returns the current calendar value, and updates the
669 * commpage info as appropriate. Because most calls to
670 * gettimeofday() are handled in user mode by the commpage,
671 * this routine should be used infrequently.
676 clock_usec_t
*microsecs
)
678 clock_gettimeofday_and_absolute_time(secs
, microsecs
, NULL
);
682 clock_gettimeofday_and_absolute_time(
684 clock_usec_t
*microsecs
,
694 now
= mach_absolute_time();
695 bt
= get_scaled_time(now
);
696 bintime_add(&bt
, &clock_calend
.bintime
);
697 bintime2usclock(&bt
, secs
, microsecs
);
699 clock_gettimeofday_set_commpage(now
, bt
.sec
, bt
.frac
, clock_calend
.tick_scale_x
, ticks_per_sec
);
710 * clock_set_calendar_microtime:
712 * Sets the current calendar value by
713 * recalculating the epoch and offset
714 * from the system clock.
716 * Also adjusts the boottime to keep the
717 * value consistent, writes the new
718 * calendar value to the platform clock,
719 * and sends calendar change notifications.
722 clock_set_calendar_microtime(
724 clock_usec_t microsecs
)
726 uint64_t absolutesys
;
729 clock_usec_t newmicrosecs
;
730 clock_usec_t oldmicrosecs
;
731 uint64_t commpage_value
;
734 clock_sec_t deltasecs
;
735 clock_usec_t deltamicrosecs
;
738 newmicrosecs
= microsecs
;
741 * settime_lock mtx is used to avoid that racing settimeofdays update the wall clock and
742 * the platform clock concurrently.
744 * clock_lock cannot be used for this race because it is acquired from interrupt context
745 * and it needs interrupts disabled while instead updating the platform clock needs to be
746 * called with interrupts enabled.
748 lck_mtx_lock(&settime_lock
);
753 #if DEVELOPMENT || DEBUG
754 struct clock_calend clock_calend_cp
= clock_calend
;
756 commpage_disable_timestamp();
759 * Adjust the boottime based on the delta.
761 clock_get_calendar_absolute_and_microtime_locked(&oldsecs
, &oldmicrosecs
, &absolutesys
);
763 #if DEVELOPMENT || DEBUG
764 if (g_should_log_clock_adjustments
) {
765 os_log(OS_LOG_DEFAULT
, "%s wall %lu s %d u computed with %llu abs\n",
766 __func__
, (unsigned long)oldsecs
, oldmicrosecs
, absolutesys
);
767 os_log(OS_LOG_DEFAULT
, "%s requested %lu s %d u\n",
768 __func__
, (unsigned long)secs
, microsecs
);
772 if (oldsecs
< secs
|| (oldsecs
== secs
&& oldmicrosecs
< microsecs
)) {
775 deltamicrosecs
= microsecs
;
777 TIME_SUB(deltasecs
, oldsecs
, deltamicrosecs
, oldmicrosecs
, USEC_PER_SEC
);
779 TIME_ADD(clock_boottime
, deltasecs
, clock_boottime_usec
, deltamicrosecs
, USEC_PER_SEC
);
780 clock2bintime(&deltasecs
, &deltamicrosecs
, &bt
);
781 bintime_add(&clock_calend
.boottime
, &bt
);
785 deltamicrosecs
= oldmicrosecs
;
787 TIME_SUB(deltasecs
, secs
, deltamicrosecs
, microsecs
, USEC_PER_SEC
);
789 TIME_SUB(clock_boottime
, deltasecs
, clock_boottime_usec
, deltamicrosecs
, USEC_PER_SEC
);
790 clock2bintime(&deltasecs
, &deltamicrosecs
, &bt
);
791 bintime_sub(&clock_calend
.boottime
, &bt
);
794 clock_calend
.bintime
= clock_calend
.boottime
;
795 bintime_add(&clock_calend
.bintime
, &clock_calend
.offset
);
797 clock2bintime((clock_sec_t
*) &secs
, (clock_usec_t
*) µsecs
, &bt
);
799 clock_gettimeofday_set_commpage(absolutesys
, bt
.sec
, bt
.frac
, clock_calend
.tick_scale_x
, ticks_per_sec
);
801 #if DEVELOPMENT || DEBUG
802 struct clock_calend clock_calend_cp1
= clock_calend
;
805 commpage_value
= clock_boottime
* USEC_PER_SEC
+ clock_boottime_usec
;
811 * Set the new value for the platform clock.
812 * This call might block, so interrupts must be enabled.
814 #if DEVELOPMENT || DEBUG
815 uint64_t now_b
= mach_absolute_time();
818 PESetUTCTimeOfDay(newsecs
, newmicrosecs
);
820 #if DEVELOPMENT || DEBUG
821 uint64_t now_a
= mach_absolute_time();
822 if (g_should_log_clock_adjustments
) {
823 os_log(OS_LOG_DEFAULT
, "%s mach bef PESet %llu mach aft %llu \n", __func__
, now_b
, now_a
);
827 print_all_clock_variables_internal(__func__
, &clock_calend_cp
);
828 print_all_clock_variables_internal(__func__
, &clock_calend_cp1
);
830 commpage_update_boottime(commpage_value
);
833 * Send host notifications.
835 host_notify_calendar_change();
836 host_notify_calendar_set();
839 clock_track_calend_nowait();
842 lck_mtx_unlock(&settime_lock
);
845 uint64_t mach_absolutetime_asleep
= 0;
846 uint64_t mach_absolutetime_last_sleep
= 0;
849 clock_get_calendar_uptime(clock_sec_t
*secs
)
858 now
= mach_absolute_time();
860 bt
= get_scaled_time(now
);
861 bintime_add(&bt
, &clock_calend
.offset
);
871 * clock_update_calendar:
873 * called by ntp timer to update scale factors.
876 clock_update_calendar(void)
886 now
= mach_absolute_time();
889 * scale the time elapsed since the last update and
892 bt
= get_scaled_time(now
);
893 bintime_add(&clock_calend
.offset
, &bt
);
896 * update the base from which apply next scale factors.
898 delta
= now
- clock_calend
.offset_count
;
899 clock_calend
.offset_count
+= delta
;
901 clock_calend
.bintime
= clock_calend
.offset
;
902 bintime_add(&clock_calend
.bintime
, &clock_calend
.boottime
);
905 * recompute next adjustment.
907 ntp_update_second(&adjustment
, clock_calend
.bintime
.sec
);
909 #if DEVELOPMENT || DEBUG
910 if (g_should_log_clock_adjustments
) {
911 os_log(OS_LOG_DEFAULT
, "%s adjustment %lld\n", __func__
, adjustment
);
916 * recomputing scale factors.
918 get_scale_factors_from_adj(adjustment
, &clock_calend
.tick_scale_x
, &clock_calend
.s_scale_ns
, &clock_calend
.s_adj_nsx
);
920 clock_gettimeofday_set_commpage(now
, clock_calend
.bintime
.sec
, clock_calend
.bintime
.frac
, clock_calend
.tick_scale_x
, ticks_per_sec
);
922 #if DEVELOPMENT || DEBUG
923 struct clock_calend calend_cp
= clock_calend
;
929 print_all_clock_variables(__func__
, NULL
, NULL
, NULL
, NULL
, &calend_cp
);
933 #if DEVELOPMENT || DEBUG
936 print_all_clock_variables_internal(const char* func
, struct clock_calend
* clock_calend_cp
)
938 clock_sec_t offset_secs
;
939 clock_usec_t offset_microsecs
;
940 clock_sec_t bintime_secs
;
941 clock_usec_t bintime_microsecs
;
942 clock_sec_t bootime_secs
;
943 clock_usec_t bootime_microsecs
;
945 if (!g_should_log_clock_adjustments
) {
949 bintime2usclock(&clock_calend_cp
->offset
, &offset_secs
, &offset_microsecs
);
950 bintime2usclock(&clock_calend_cp
->bintime
, &bintime_secs
, &bintime_microsecs
);
951 bintime2usclock(&clock_calend_cp
->boottime
, &bootime_secs
, &bootime_microsecs
);
953 os_log(OS_LOG_DEFAULT
, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n",
954 func
, clock_calend_cp
->s_scale_ns
, clock_calend_cp
->s_adj_nsx
,
955 clock_calend_cp
->tick_scale_x
, clock_calend_cp
->offset_count
);
956 os_log(OS_LOG_DEFAULT
, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n",
957 func
, clock_calend_cp
->offset
.sec
, clock_calend_cp
->offset
.frac
,
958 (unsigned long)offset_secs
, offset_microsecs
);
959 os_log(OS_LOG_DEFAULT
, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n",
960 func
, clock_calend_cp
->bintime
.sec
, clock_calend_cp
->bintime
.frac
,
961 (unsigned long)bintime_secs
, bintime_microsecs
);
962 os_log(OS_LOG_DEFAULT
, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n",
963 func
, clock_calend_cp
->boottime
.sec
, clock_calend_cp
->boottime
.frac
,
964 (unsigned long)bootime_secs
, bootime_microsecs
);
966 #if !HAS_CONTINUOUS_HWCLOCK
967 clock_sec_t basesleep_secs
;
968 clock_usec_t basesleep_microsecs
;
970 bintime2usclock(&clock_calend_cp
->basesleep
, &basesleep_secs
, &basesleep_microsecs
);
971 os_log(OS_LOG_DEFAULT
, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n",
972 func
, clock_calend_cp
->basesleep
.sec
, clock_calend_cp
->basesleep
.frac
,
973 (unsigned long)basesleep_secs
, basesleep_microsecs
);
979 print_all_clock_variables(const char* func
, clock_sec_t
* pmu_secs
, clock_usec_t
* pmu_usec
, clock_sec_t
* sys_secs
, clock_usec_t
* sys_usec
, struct clock_calend
* clock_calend_cp
)
981 if (!g_should_log_clock_adjustments
) {
986 clock_sec_t wall_secs
;
987 clock_usec_t wall_microsecs
;
992 os_log(OS_LOG_DEFAULT
, "%s PMU %lu s %d u \n", func
, (unsigned long)*pmu_secs
, *pmu_usec
);
995 os_log(OS_LOG_DEFAULT
, "%s sys %lu s %d u \n", func
, (unsigned long)*sys_secs
, *sys_usec
);
998 print_all_clock_variables_internal(func
, clock_calend_cp
);
1000 now
= mach_absolute_time();
1001 delta
= now
- clock_calend_cp
->offset_count
;
1003 bt
= scale_delta(delta
, clock_calend_cp
->tick_scale_x
, clock_calend_cp
->s_scale_ns
, clock_calend_cp
->s_adj_nsx
);
1004 bintime_add(&bt
, &clock_calend_cp
->bintime
);
1005 bintime2usclock(&bt
, &wall_secs
, &wall_microsecs
);
1007 os_log(OS_LOG_DEFAULT
, "%s wall %lu s %d u computed with %llu abs\n",
1008 func
, (unsigned long)wall_secs
, wall_microsecs
, now
);
1012 #endif /* DEVELOPMENT || DEBUG */
1016 * clock_initialize_calendar:
1018 * Set the calendar and related clocks
1019 * from the platform clock at boot.
1021 * Also sends host notifications.
1024 clock_initialize_calendar(void)
1026 clock_sec_t sys
; // sleepless time since boot in seconds
1027 clock_sec_t secs
; // Current UTC time
1028 clock_sec_t utc_offset_secs
; // Difference in current UTC time and sleepless time since boot
1029 clock_usec_t microsys
;
1030 clock_usec_t microsecs
;
1031 clock_usec_t utc_offset_microsecs
;
1034 #if !HAS_CONTINUOUS_HWCLOCK
1035 struct bintime monotonic_bt
;
1036 struct latched_time monotonic_time
;
1037 uint64_t monotonic_usec_total
;
1038 clock_sec_t sys2
, monotonic_sec
;
1039 clock_usec_t microsys2
, monotonic_usec
;
1043 //Get the UTC time and corresponding sys time
1044 PEGetUTCTimeOfDay(&secs
, µsecs
);
1045 clock_get_system_microtime(&sys
, µsys
);
1047 #if !HAS_CONTINUOUS_HWCLOCK
1049 * If the platform has a monotonic clock, use kern.monotonicclock_usecs
1050 * to estimate the sleep/wake time, otherwise use the UTC time to estimate
1053 size
= sizeof(monotonic_time
);
1054 if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time
, &size
, NULL
, 0) != 0) {
1055 has_monotonic_clock
= 0;
1056 os_log(OS_LOG_DEFAULT
, "%s system does not have monotonic clock\n", __func__
);
1058 has_monotonic_clock
= 1;
1059 monotonic_usec_total
= monotonic_time
.monotonic_time_usec
;
1060 absolutetime_to_microtime(monotonic_time
.mach_time
, &sys2
, µsys2
);
1061 os_log(OS_LOG_DEFAULT
, "%s system has monotonic clock\n", __func__
);
1068 commpage_disable_timestamp();
1070 utc_offset_secs
= secs
;
1071 utc_offset_microsecs
= microsecs
;
1074 * We normally expect the UTC clock to be always-on and produce
1075 * greater readings than the tick counter. There may be corner cases
1076 * due to differing clock resolutions (UTC clock is likely lower) and
1077 * and errors reading the UTC clock (some implementations return 0
1078 * on error) in which that doesn't hold true. Bring the UTC measurements
1079 * in-line with the tick counter measurements as a best effort in that case.
1081 if ((sys
> secs
) || ((sys
== secs
) && (microsys
> microsecs
))) {
1082 os_log(OS_LOG_DEFAULT
, "%s WARNING: UTC time is less then sys time, (%lu s %d u) UTC (%lu s %d u) sys\n",
1083 __func__
, (unsigned long) secs
, microsecs
, (unsigned long)sys
, microsys
);
1084 secs
= utc_offset_secs
= sys
;
1085 microsecs
= utc_offset_microsecs
= microsys
;
1089 // This macro stores the subtraction result in utc_offset_secs and utc_offset_microsecs
1090 TIME_SUB(utc_offset_secs
, sys
, utc_offset_microsecs
, microsys
, USEC_PER_SEC
);
1091 // This function converts utc_offset_secs and utc_offset_microsecs in bintime
1092 clock2bintime(&utc_offset_secs
, &utc_offset_microsecs
, &bt
);
1095 * Initialize the boot time based on the platform clock.
1097 clock_boottime
= secs
;
1098 clock_boottime_usec
= microsecs
;
1099 commpage_update_boottime(clock_boottime
* USEC_PER_SEC
+ clock_boottime_usec
);
1101 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC
, &ticks_per_sec
);
1102 clock_calend
.boottime
= bt
;
1103 clock_calend
.bintime
= bt
;
1104 clock_calend
.offset
.sec
= 0;
1105 clock_calend
.offset
.frac
= 0;
1107 clock_calend
.tick_scale_x
= (uint64_t)1 << 63;
1108 clock_calend
.tick_scale_x
/= ticks_per_sec
;
1109 clock_calend
.tick_scale_x
*= 2;
1111 clock_calend
.s_scale_ns
= NSEC_PER_SEC
;
1112 clock_calend
.s_adj_nsx
= 0;
1114 #if !HAS_CONTINUOUS_HWCLOCK
1115 if (has_monotonic_clock
) {
1116 monotonic_sec
= monotonic_usec_total
/ (clock_sec_t
)USEC_PER_SEC
;
1117 monotonic_usec
= monotonic_usec_total
% (clock_usec_t
)USEC_PER_SEC
;
1119 // monotonic clock - sys
1120 // This macro stores the subtraction result in monotonic_sec and monotonic_usec
1121 TIME_SUB(monotonic_sec
, sys2
, monotonic_usec
, microsys2
, USEC_PER_SEC
);
1122 clock2bintime(&monotonic_sec
, &monotonic_usec
, &monotonic_bt
);
1124 // set the baseleep as the difference between monotonic clock - sys
1125 clock_calend
.basesleep
= monotonic_bt
;
1128 commpage_update_mach_continuous_time(mach_absolutetime_asleep
);
1130 #if DEVELOPMENT || DEBUG
1131 struct clock_calend clock_calend_cp
= clock_calend
;
1137 print_all_clock_variables(__func__
, &secs
, µsecs
, &sys
, µsys
, &clock_calend_cp
);
1140 * Send host notifications.
1142 host_notify_calendar_change();
1145 clock_track_calend_nowait();
1149 #if HAS_CONTINUOUS_HWCLOCK
1152 scale_sleep_time(void)
1154 /* Apply the current NTP frequency adjustment to the time slept.
1155 * The frequency adjustment remains stable between calls to ntp_adjtime(),
1156 * and should thus provide a reasonable approximation of the total adjustment
1157 * required for the time slept. */
1158 struct bintime sleep_time
;
1159 uint64_t tick_scale_x
, s_scale_ns
;
1161 int64_t sleep_adj
= ntp_get_freq();
1163 get_scale_factors_from_adj(sleep_adj
, &tick_scale_x
, &s_scale_ns
, &s_adj_nsx
);
1164 sleep_time
= scale_delta(mach_absolutetime_last_sleep
, tick_scale_x
, s_scale_ns
, s_adj_nsx
);
1166 tick_scale_x
= (uint64_t)1 << 63;
1167 tick_scale_x
/= ticks_per_sec
;
1169 sleep_time
.sec
= mach_absolutetime_last_sleep
/ ticks_per_sec
;
1170 sleep_time
.frac
= (mach_absolutetime_last_sleep
% ticks_per_sec
) * tick_scale_x
;
1172 bintime_add(&clock_calend
.offset
, &sleep_time
);
1173 bintime_add(&clock_calend
.bintime
, &sleep_time
);
1177 clock_wakeup_calendar(void)
1184 commpage_disable_timestamp();
1186 uint64_t abstime
= mach_absolute_time();
1187 uint64_t total_sleep_time
= ml_get_hwclock() - abstime
;
1189 mach_absolutetime_last_sleep
= total_sleep_time
- mach_absolutetime_asleep
;
1190 mach_absolutetime_asleep
= total_sleep_time
;
1194 KERNEL_DEBUG_CONSTANT(
1195 MACHDBG_CODE(DBG_MACH_CLOCK
, MACH_EPOCH_CHANGE
) | DBG_FUNC_NONE
,
1196 (uintptr_t) mach_absolutetime_last_sleep
,
1197 (uintptr_t) mach_absolutetime_asleep
,
1198 (uintptr_t) (mach_absolutetime_last_sleep
>> 32),
1199 (uintptr_t) (mach_absolutetime_asleep
>> 32),
1202 commpage_update_mach_continuous_time(mach_absolutetime_asleep
);
1203 adjust_cont_time_thread_calls();
1208 host_notify_calendar_change();
1211 clock_track_calend_nowait();
1215 #else /* HAS_CONTINUOUS_HWCLOCK */
1218 clock_wakeup_calendar(void)
1220 clock_sec_t wake_sys_sec
;
1221 clock_usec_t wake_sys_usec
;
1222 clock_sec_t wake_sec
;
1223 clock_usec_t wake_usec
;
1224 clock_sec_t wall_time_sec
;
1225 clock_usec_t wall_time_usec
;
1226 clock_sec_t diff_sec
;
1227 clock_usec_t diff_usec
;
1229 clock_usec_t var_us
;
1231 struct bintime bt
, last_sleep_bt
;
1232 struct latched_time monotonic_time
;
1233 uint64_t monotonic_usec_total
;
1238 * If the platform has the monotonic clock use that to
1239 * compute the sleep time. The monotonic clock does not have an offset
1240 * that can be modified, so nor kernel or userspace can change the time
1241 * of this clock, it can only monotonically increase over time.
1242 * During sleep mach_absolute_time (sys time) does not tick,
1243 * so the sleep time is the difference between the current monotonic time
1244 * less the absolute time and the previous difference stored at wake time.
1246 * basesleep = (monotonic - sys) ---> computed at last wake
1247 * sleep_time = (monotonic - sys) - basesleep
1249 * If the platform does not support monotonic clock we set the wall time to what the
1250 * UTC clock returns us.
1251 * Setting the wall time to UTC time implies that we loose all the adjustments
1252 * done during wake time through adjtime/ntp_adjustime.
1253 * The UTC time is the monotonic clock + an offset that can be set
1255 * The time slept in this case is the difference between wall time and UTC
1259 * We assume that only the kernel is setting the offset of the PMU/RTC and that
1260 * it is doing it only througth the settimeofday interface.
1262 if (has_monotonic_clock
) {
1263 #if DEVELOPMENT || DEBUG
1265 * Just for debugging, get the wake UTC time.
1267 PEGetUTCTimeOfDay(&var_s
, &var_us
);
1270 * Get monotonic time with corresponding sys time
1272 size
= sizeof(monotonic_time
);
1273 if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time
, &size
, NULL
, 0) != 0) {
1274 panic("%s: could not call kern.monotonicclock_usecs", __func__
);
1276 wake_abs
= monotonic_time
.mach_time
;
1277 absolutetime_to_microtime(wake_abs
, &wake_sys_sec
, &wake_sys_usec
);
1279 monotonic_usec_total
= monotonic_time
.monotonic_time_usec
;
1280 wake_sec
= monotonic_usec_total
/ (clock_sec_t
)USEC_PER_SEC
;
1281 wake_usec
= monotonic_usec_total
% (clock_usec_t
)USEC_PER_SEC
;
1284 * Get UTC time and corresponding sys time
1286 PEGetUTCTimeOfDay(&wake_sec
, &wake_usec
);
1287 wake_abs
= mach_absolute_time();
1288 absolutetime_to_microtime(wake_abs
, &wake_sys_sec
, &wake_sys_usec
);
1291 #if DEVELOPMENT || DEBUG
1292 os_log(OS_LOG_DEFAULT
, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec
, wake_usec
, (has_monotonic_clock
)?"monotonic":"UTC", wake_abs
);
1293 if (has_monotonic_clock
) {
1294 os_log(OS_LOG_DEFAULT
, "UTC time %lu s %d u\n", (unsigned long)var_s
, var_us
);
1296 #endif /* DEVELOPMENT || DEBUG */
1301 commpage_disable_timestamp();
1303 #if DEVELOPMENT || DEBUG
1304 struct clock_calend clock_calend_cp1
= clock_calend
;
1305 #endif /* DEVELOPMENT || DEBUG */
1308 * We normally expect the UTC/monotonic clock to be always-on and produce
1309 * greater readings than the sys counter. There may be corner cases
1310 * due to differing clock resolutions (UTC/monotonic clock is likely lower) and
1311 * and errors reading the UTC/monotonic clock (some implementations return 0
1312 * on error) in which that doesn't hold true.
1314 if ((wake_sys_sec
> wake_sec
) || ((wake_sys_sec
== wake_sec
) && (wake_sys_usec
> wake_usec
))) {
1315 os_log_error(OS_LOG_DEFAULT
, "WARNING: %s clock is less then sys clock at wake: %lu s %d u vs %lu s %d u, defaulting sleep time to zero\n", (has_monotonic_clock
)?"monotonic":"UTC", (unsigned long)wake_sec
, wake_usec
, (unsigned long)wake_sys_sec
, wake_sys_usec
);
1316 mach_absolutetime_last_sleep
= 0;
1320 if (has_monotonic_clock
) {
1322 * computer the difference monotonic - sys
1323 * we already checked that monotonic time is
1326 diff_sec
= wake_sec
;
1327 diff_usec
= wake_usec
;
1328 // This macro stores the subtraction result in diff_sec and diff_usec
1329 TIME_SUB(diff_sec
, wake_sys_sec
, diff_usec
, wake_sys_usec
, USEC_PER_SEC
);
1330 //This function converts diff_sec and diff_usec in bintime
1331 clock2bintime(&diff_sec
, &diff_usec
, &bt
);
1334 * Safety belt: the monotonic clock will likely have a lower resolution than the sys counter.
1335 * It's also possible that the device didn't fully transition to the powered-off state on
1336 * the most recent sleep, so the sys counter may not have reset or may have only briefly
1337 * turned off. In that case it's possible for the difference between the monotonic clock and the
1338 * sys counter to be less than the previously recorded value in clock.calend.basesleep.
1339 * In that case simply record that we slept for 0 ticks.
1341 if ((bt
.sec
> clock_calend
.basesleep
.sec
) ||
1342 ((bt
.sec
== clock_calend
.basesleep
.sec
) && (bt
.frac
> clock_calend
.basesleep
.frac
))) {
1343 //last_sleep is the difference between (current monotonic - abs) and (last wake monotonic - abs)
1345 bintime_sub(&last_sleep_bt
, &clock_calend
.basesleep
);
1347 bintime2absolutetime(&last_sleep_bt
, &mach_absolutetime_last_sleep
);
1348 mach_absolutetime_asleep
+= mach_absolutetime_last_sleep
;
1350 //set basesleep to current monotonic - abs
1351 clock_calend
.basesleep
= bt
;
1354 bintime_add(&clock_calend
.offset
, &last_sleep_bt
);
1355 bintime_add(&clock_calend
.bintime
, &last_sleep_bt
);
1357 bintime2usclock(&last_sleep_bt
, &var_s
, &var_us
);
1358 os_log(OS_LOG_DEFAULT
, "time_slept (%lu s %d u)\n", (unsigned long) var_s
, var_us
);
1360 bintime2usclock(&clock_calend
.basesleep
, &var_s
, &var_us
);
1361 os_log_error(OS_LOG_DEFAULT
, "WARNING: last wake monotonic-sys time (%lu s %d u) is greater then current monotonic-sys time(%lu s %d u), defaulting sleep time to zero\n", (unsigned long) var_s
, var_us
, (unsigned long) diff_sec
, diff_usec
);
1363 mach_absolutetime_last_sleep
= 0;
1367 * set the wall time to UTC value
1369 bt
= get_scaled_time(wake_abs
);
1370 bintime_add(&bt
, &clock_calend
.bintime
);
1371 bintime2usclock(&bt
, &wall_time_sec
, &wall_time_usec
);
1373 if (wall_time_sec
> wake_sec
|| (wall_time_sec
== wake_sec
&& wall_time_usec
> wake_usec
)) {
1374 os_log(OS_LOG_DEFAULT
, "WARNING: wall time (%lu s %d u) is greater than current UTC time (%lu s %d u), defaulting sleep time to zero\n", (unsigned long) wall_time_sec
, wall_time_usec
, (unsigned long) wake_sec
, wake_usec
);
1376 mach_absolutetime_last_sleep
= 0;
1378 diff_sec
= wake_sec
;
1379 diff_usec
= wake_usec
;
1380 // This macro stores the subtraction result in diff_sec and diff_usec
1381 TIME_SUB(diff_sec
, wall_time_sec
, diff_usec
, wall_time_usec
, USEC_PER_SEC
);
1382 //This function converts diff_sec and diff_usec in bintime
1383 clock2bintime(&diff_sec
, &diff_usec
, &bt
);
1385 //time slept in this case is the difference between PMU/RTC and wall time
1388 bintime2absolutetime(&last_sleep_bt
, &mach_absolutetime_last_sleep
);
1389 mach_absolutetime_asleep
+= mach_absolutetime_last_sleep
;
1392 bintime_add(&clock_calend
.offset
, &last_sleep_bt
);
1393 bintime_add(&clock_calend
.bintime
, &last_sleep_bt
);
1395 bintime2usclock(&last_sleep_bt
, &var_s
, &var_us
);
1396 os_log(OS_LOG_DEFAULT
, "time_slept (%lu s %d u)\n", (unsigned long)var_s
, var_us
);
1400 KERNEL_DEBUG_CONSTANT(
1401 MACHDBG_CODE(DBG_MACH_CLOCK
, MACH_EPOCH_CHANGE
) | DBG_FUNC_NONE
,
1402 (uintptr_t) mach_absolutetime_last_sleep
,
1403 (uintptr_t) mach_absolutetime_asleep
,
1404 (uintptr_t) (mach_absolutetime_last_sleep
>> 32),
1405 (uintptr_t) (mach_absolutetime_asleep
>> 32),
1408 commpage_update_mach_continuous_time(mach_absolutetime_asleep
);
1409 adjust_cont_time_thread_calls();
1411 #if DEVELOPMENT || DEBUG
1412 struct clock_calend clock_calend_cp
= clock_calend
;
1418 #if DEVELOPMENT || DEBUG
1419 if (g_should_log_clock_adjustments
) {
1420 print_all_clock_variables("clock_wakeup_calendar: BEFORE", NULL
, NULL
, NULL
, NULL
, &clock_calend_cp1
);
1421 print_all_clock_variables("clock_wakeup_calendar: AFTER", NULL
, NULL
, NULL
, NULL
, &clock_calend_cp
);
1423 #endif /* DEVELOPMENT || DEBUG */
1425 host_notify_calendar_change();
1428 clock_track_calend_nowait();
1432 #endif /* !HAS_CONTINUOUS_HWCLOCK */
1435 * clock_get_boottime_nanotime:
1437 * Return the boottime, used by sysctl.
1440 clock_get_boottime_nanotime(
1442 clock_nsec_t
*nanosecs
)
1449 *secs
= (clock_sec_t
)clock_boottime
;
1450 *nanosecs
= (clock_nsec_t
)clock_boottime_usec
* NSEC_PER_USEC
;
1457 * clock_get_boottime_nanotime:
1459 * Return the boottime, used by sysctl.
1462 clock_get_boottime_microtime(
1464 clock_usec_t
*microsecs
)
1471 *secs
= (clock_sec_t
)clock_boottime
;
1472 *microsecs
= (clock_nsec_t
)clock_boottime_usec
;
1480 * Wait / delay routines.
1483 mach_wait_until_continue(
1484 __unused
void *parameter
,
1485 wait_result_t wresult
)
1487 thread_syscall_return((wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
1492 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
1494 * Parameters: args->deadline Amount of time to wait
1496 * Returns: 0 Success
1501 mach_wait_until_trap(
1502 struct mach_wait_until_trap_args
*args
)
1504 uint64_t deadline
= args
->deadline
;
1505 wait_result_t wresult
;
1507 wresult
= assert_wait_deadline_with_leeway((event_t
)mach_wait_until_trap
, THREAD_ABORTSAFE
,
1508 TIMEOUT_URGENCY_USER_NORMAL
, deadline
, 0);
1509 if (wresult
== THREAD_WAITING
) {
1510 wresult
= thread_block(mach_wait_until_continue
);
1513 return (wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
;
1520 uint64_t now
= mach_absolute_time();
1522 if (now
>= deadline
) {
1526 _clock_delay_until_deadline(deadline
- now
, deadline
);
1530 * Preserve the original precise interval that the client
1531 * requested for comparison to the spin threshold.
1534 _clock_delay_until_deadline(
1538 _clock_delay_until_deadline_with_leeway(interval
, deadline
, 0);
1542 * Like _clock_delay_until_deadline, but it accepts a
1546 _clock_delay_until_deadline_with_leeway(
1551 if (interval
== 0) {
1555 if (ml_delay_should_spin(interval
) ||
1556 get_preemption_level() != 0 ||
1557 ml_get_interrupts_enabled() == FALSE
) {
1558 machine_delay_until(interval
, deadline
);
1561 * For now, assume a leeway request of 0 means the client does not want a leeway
1562 * value. We may want to change this interpretation in the future.
1566 assert_wait_deadline_with_leeway((event_t
)clock_delay_until
, THREAD_UNINT
, TIMEOUT_URGENCY_LEEWAY
, deadline
, leeway
);
1568 assert_wait_deadline((event_t
)clock_delay_until
, THREAD_UNINT
, deadline
);
1571 thread_block(THREAD_CONTINUE_NULL
);
1578 uint32_t scale_factor
)
1582 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1584 _clock_delay_until_deadline(abstime
, mach_absolute_time() + abstime
);
1588 delay_for_interval_with_leeway(
1591 uint32_t scale_factor
)
1593 uint64_t abstime_interval
;
1594 uint64_t abstime_leeway
;
1596 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime_interval
);
1597 clock_interval_to_absolutetime_interval(leeway
, scale_factor
, &abstime_leeway
);
1599 _clock_delay_until_deadline_with_leeway(abstime_interval
, mach_absolute_time() + abstime_interval
, abstime_leeway
);
1606 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);
1610 * Miscellaneous routines.
1613 clock_interval_to_deadline(
1615 uint32_t scale_factor
,
1620 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1622 if (os_add_overflow(mach_absolute_time(), abstime
, result
)) {
1623 *result
= UINT64_MAX
;
1628 clock_absolutetime_interval_to_deadline(
1632 if (os_add_overflow(mach_absolute_time(), abstime
, result
)) {
1633 *result
= UINT64_MAX
;
1638 clock_continuoustime_interval_to_deadline(
1642 if (os_add_overflow(mach_continuous_time(), conttime
, result
)) {
1643 *result
= UINT64_MAX
;
1651 *result
= mach_absolute_time();
1655 clock_deadline_for_periodic_event(
1660 assert(interval
!= 0);
1662 // *deadline += interval;
1663 if (os_add_overflow(*deadline
, interval
, deadline
)) {
1664 *deadline
= UINT64_MAX
;
1667 if (*deadline
<= abstime
) {
1668 // *deadline = abstime + interval;
1669 if (os_add_overflow(abstime
, interval
, deadline
)) {
1670 *deadline
= UINT64_MAX
;
1673 abstime
= mach_absolute_time();
1674 if (*deadline
<= abstime
) {
1675 // *deadline = abstime + interval;
1676 if (os_add_overflow(abstime
, interval
, deadline
)) {
1677 *deadline
= UINT64_MAX
;
1684 mach_continuous_time(void)
1686 #if HAS_CONTINUOUS_HWCLOCK
1687 return ml_get_hwclock();
1690 uint64_t read1
= mach_absolutetime_asleep
;
1691 uint64_t absolute
= mach_absolute_time();
1693 uint64_t read2
= mach_absolutetime_asleep
;
1695 if (__builtin_expect(read1
== read2
, 1)) {
1696 return absolute
+ read1
;
1703 mach_continuous_approximate_time(void)
1705 #if HAS_CONTINUOUS_HWCLOCK
1706 return ml_get_hwclock();
1709 uint64_t read1
= mach_absolutetime_asleep
;
1710 uint64_t absolute
= mach_approximate_time();
1712 uint64_t read2
= mach_absolutetime_asleep
;
1714 if (__builtin_expect(read1
== read2
, 1)) {
1715 return absolute
+ read1
;
1722 * continuoustime_to_absolutetime
1723 * Must be called with interrupts disabled
1724 * Returned value is only valid until the next update to
1725 * mach_continuous_time
1728 continuoustime_to_absolutetime(uint64_t conttime
)
1730 if (conttime
<= mach_absolutetime_asleep
) {
1733 return conttime
- mach_absolutetime_asleep
;
1738 * absolutetime_to_continuoustime
1739 * Must be called with interrupts disabled
1740 * Returned value is only valid until the next update to
1741 * mach_continuous_time
1744 absolutetime_to_continuoustime(uint64_t abstime
)
1746 return abstime
+ mach_absolutetime_asleep
;
1752 * clock_get_calendar_nanotime_nowait
1754 * Description: Non-blocking version of clock_get_calendar_nanotime()
1756 * Notes: This function operates by separately tracking calendar time
1757 * updates using a two element structure to copy the calendar
1758 * state, which may be asynchronously modified. It utilizes
1759 * barrier instructions in the tracking process and in the local
1760 * stable snapshot process in order to ensure that a consistent
1761 * snapshot is used to perform the calculation.
1764 clock_get_calendar_nanotime_nowait(
1766 clock_nsec_t
*nanosecs
)
1770 struct unlocked_clock_calend stable
;
1774 stable
= flipflop
[i
]; /* take snapshot */
1777 * Use a barrier instructions to ensure atomicity. We AND
1778 * off the "in progress" bit to get the current generation
1781 os_atomic_andnot(&stable
.gen
, 1, relaxed
);
1784 * If an update _is_ in progress, the generation count will be
1785 * off by one, if it _was_ in progress, it will be off by two,
1786 * and if we caught it at a good time, it will be equal (and
1787 * our snapshot is threfore stable).
1789 if (flipflop
[i
].gen
== stable
.gen
) {
1793 /* Switch to the other element of the flipflop, and try again. */
1797 now
= mach_absolute_time();
1799 bt
= get_scaled_time(now
);
1801 bintime_add(&bt
, &clock_calend
.bintime
);
1803 bintime2nsclock(&bt
, secs
, nanosecs
);
1807 clock_track_calend_nowait(void)
1811 for (i
= 0; i
< 2; i
++) {
1812 struct clock_calend tmp
= clock_calend
;
1815 * Set the low bit if the generation count; since we use a
1816 * barrier instruction to do this, we are guaranteed that this
1817 * will flag an update in progress to an async caller trying
1818 * to examine the contents.
1820 os_atomic_or(&flipflop
[i
].gen
, 1, relaxed
);
1822 flipflop
[i
].calend
= tmp
;
1825 * Increment the generation count to clear the low bit to
1826 * signal completion. If a caller compares the generation
1827 * count after taking a copy while in progress, the count
1828 * will be off by two.
1830 os_atomic_inc(&flipflop
[i
].gen
, relaxed
);
1834 #endif /* CONFIG_DTRACE */