2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
34 * Copyright (c) 1982, 1986, 1993
35 * The Regents of the University of California. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)time.h 8.5 (Berkeley) 5/4/95
65 #include <mach/mach_types.h>
68 #include <kern/sched_prim.h>
69 #include <kern/thread.h>
70 #include <kern/clock.h>
71 #include <kern/host_notify.h>
72 #include <kern/thread_call.h>
73 #include <libkern/OSAtomic.h>
75 #include <IOKit/IOPlatformExpert.h>
77 #include <machine/commpage.h>
78 #include <machine/config.h>
79 #include <machine/machine_routines.h>
81 #include <mach/mach_traps.h>
82 #include <mach/mach_time.h>
84 #include <sys/kdebug.h>
85 #include <sys/timex.h>
86 #include <kern/arithmetic_128.h>
88 uint32_t hz_tick_interval
= 1;
91 decl_simple_lock_data(,clock_lock
)
92 lck_grp_attr_t
* settime_lock_grp_attr
;
93 lck_grp_t
* settime_lock_grp
;
94 lck_attr_t
* settime_lock_attr
;
95 lck_mtx_t settime_lock
;
97 #define clock_lock() \
98 simple_lock(&clock_lock)
100 #define clock_unlock() \
101 simple_unlock(&clock_lock)
103 #define clock_lock_init() \
104 simple_lock_init(&clock_lock, 0)
106 #ifdef kdp_simple_lock_is_acquired
107 boolean_t
kdp_clock_is_locked()
109 return kdp_simple_lock_is_acquired(&clock_lock
);
119 bintime_addx(struct bintime
*_bt
, uint64_t _x
)
130 bintime_subx(struct bintime
*_bt
, uint64_t _x
)
141 bintime_addns(struct bintime
*bt
, uint64_t ns
)
143 bt
->sec
+= ns
/ (uint64_t)NSEC_PER_SEC
;
144 ns
= ns
% (uint64_t)NSEC_PER_SEC
;
146 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
147 ns
= ns
* (uint64_t)18446744073LL;
148 bintime_addx(bt
, ns
);
153 bintime_subns(struct bintime
*bt
, uint64_t ns
)
155 bt
->sec
-= ns
/ (uint64_t)NSEC_PER_SEC
;
156 ns
= ns
% (uint64_t)NSEC_PER_SEC
;
158 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
159 ns
= ns
* (uint64_t)18446744073LL;
160 bintime_subx(bt
, ns
);
165 bintime_addxns(struct bintime
*bt
, uint64_t a
, int64_t xns
)
167 uint64_t uxns
= (xns
> 0)?(uint64_t )xns
:(uint64_t)-xns
;
168 uint64_t ns
= multi_overflow(a
, uxns
);
171 bintime_addns(bt
, ns
);
172 ns
= (a
* uxns
) / (uint64_t)NSEC_PER_SEC
;
173 bintime_addx(bt
, ns
);
177 bintime_subns(bt
, ns
);
178 ns
= (a
* uxns
) / (uint64_t)NSEC_PER_SEC
;
185 bintime_add(struct bintime
*_bt
, const struct bintime
*_bt2
)
190 _bt
->frac
+= _bt2
->frac
;
193 _bt
->sec
+= _bt2
->sec
;
197 bintime_sub(struct bintime
*_bt
, const struct bintime
*_bt2
)
202 _bt
->frac
-= _bt2
->frac
;
205 _bt
->sec
-= _bt2
->sec
;
209 clock2bintime(const clock_sec_t
*secs
, const clock_usec_t
*microsecs
, struct bintime
*_bt
)
213 /* 18446744073709 = int(2^64 / 1000000) */
214 _bt
->frac
= *microsecs
* (uint64_t)18446744073709LL;
218 bintime2usclock(const struct bintime
*_bt
, clock_sec_t
*secs
, clock_usec_t
*microsecs
)
222 *microsecs
= ((uint64_t)USEC_PER_SEC
* (uint32_t)(_bt
->frac
>> 32)) >> 32;
226 bintime2nsclock(const struct bintime
*_bt
, clock_sec_t
*secs
, clock_usec_t
*nanosecs
)
230 *nanosecs
= ((uint64_t)NSEC_PER_SEC
* (uint32_t)(_bt
->frac
>> 32)) >> 32;
234 bintime2absolutetime(const struct bintime
*_bt
, uint64_t *abs
)
237 nsec
= (uint64_t) _bt
->sec
* (uint64_t)NSEC_PER_SEC
+ (((uint64_t)NSEC_PER_SEC
* (uint32_t)(_bt
->frac
>> 32)) >> 32);
238 nanoseconds_to_absolutetime(nsec
, abs
);
241 * Time of day (calendar) variables.
245 * TOD <- bintime + delta*scale
248 * bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
249 * delta is ticks elapsed since last scale update.
250 * scale is computed according to an adjustment provided by ntp_kern.
252 static struct clock_calend
{
253 uint64_t s_scale_ns
; /* scale to apply for each second elapsed, it converts in ns */
254 int64_t s_adj_nsx
; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
255 uint64_t tick_scale_x
; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
256 uint64_t offset_count
; /* abs time from which apply current scales */
257 struct bintime offset
; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
258 struct bintime bintime
; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
259 struct bintime boottime
; /* boot time expressed in (sec, 64 bits frac of a second) */
260 struct bintime basesleep
;
263 static uint64_t ticks_per_sec
; /* ticks in a second (expressed in abs time) */
265 #if DEVELOPMENT || DEBUG
266 clock_sec_t last_utc_sec
= 0;
267 clock_usec_t last_utc_usec
= 0;
268 clock_sec_t max_utc_sec
= 0;
269 clock_sec_t last_sys_sec
= 0;
270 clock_usec_t last_sys_usec
= 0;
276 * Unlocked calendar flipflop; this is used to track a clock_calend such
277 * that we can safely access a snapshot of a valid clock_calend structure
278 * without needing to take any locks to do it.
280 * The trick is to use a generation count and set the low bit when it is
281 * being updated/read; by doing this, we guarantee, through use of the
282 * hw_atomic functions, that the generation is incremented when the bit
283 * is cleared atomically (by using a 1 bit add).
285 static struct unlocked_clock_calend
{
286 struct clock_calend calend
; /* copy of calendar */
287 uint32_t gen
; /* generation count */
290 static void clock_track_calend_nowait(void);
294 void _clock_delay_until_deadline(uint64_t interval
, uint64_t deadline
);
295 void _clock_delay_until_deadline_with_leeway(uint64_t interval
, uint64_t deadline
, uint64_t leeway
);
297 /* Boottime variables*/
298 static uint64_t clock_boottime
;
299 static uint32_t clock_boottime_usec
;
301 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
303 if (((rfrac) += (frac)) >= (unit)) { \
310 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
312 if ((int)((rfrac) -= (frac)) < 0) { \
322 * Called once at boot to configure the clock subsystem.
330 settime_lock_grp_attr
= lck_grp_attr_alloc_init();
331 settime_lock_grp
= lck_grp_alloc_init("settime grp", settime_lock_grp_attr
);
332 settime_lock_attr
= lck_attr_alloc_init();
333 lck_mtx_init(&settime_lock
, settime_lock_grp
, settime_lock_attr
);
339 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC
, &ticks_per_sec
);
345 * Called on a processor each time started.
354 * clock_timebase_init:
356 * Called by machine dependent code
357 * to initialize areas dependent on the
358 * timebase value. May be called multiple
359 * times during start up.
362 clock_timebase_init(void)
366 nanoseconds_to_absolutetime(NSEC_PER_SEC
/ 100, &abstime
);
367 hz_tick_interval
= (uint32_t)abstime
;
369 sched_timebase_init();
373 * mach_timebase_info_trap:
375 * User trap returns timebase constant.
378 mach_timebase_info_trap(
379 struct mach_timebase_info_trap_args
*args
)
381 mach_vm_address_t out_info_addr
= args
->info
;
382 mach_timebase_info_data_t info
;
384 clock_timebase_info(&info
);
386 copyout((void *)&info
, out_info_addr
, sizeof (info
));
388 return (KERN_SUCCESS
);
396 * clock_get_calendar_microtime:
398 * Returns the current calendar value,
399 * microseconds as the fraction.
402 clock_get_calendar_microtime(
404 clock_usec_t
*microsecs
)
406 clock_get_calendar_absolute_and_microtime(secs
, microsecs
, NULL
);
410 * get_scale_factors_from_adj:
412 * computes scale factors from the value given in adjustment.
414 * Part of the code has been taken from tc_windup of FreeBSD
415 * written by Poul-Henning Kamp <phk@FreeBSD.ORG>, Julien Ridoux and
416 * Konstantin Belousov.
417 * https://github.com/freebsd/freebsd/blob/master/sys/kern/kern_tc.c
420 get_scale_factors_from_adj(int64_t adjustment
, uint64_t* tick_scale_x
, uint64_t* s_scale_ns
, int64_t* s_adj_nsx
)
426 * Calculating the scaling factor. We want the number of 1/2^64
427 * fractions of a second per period of the hardware counter, taking
428 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
429 * processing provides us with.
431 * The th_adjustment is nanoseconds per second with 32 bit binary
432 * fraction and we want 64 bit binary fraction of second:
434 * x = a * 2^32 / 10^9 = a * 4.294967296
436 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
437 * we can only multiply by about 850 without overflowing, that
438 * leaves no suitably precise fractions for multiply before divide.
440 * Divide before multiply with a fraction of 2199/512 results in a
441 * systematic undercompensation of 10PPM of th_adjustment. On a
442 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
444 * We happily sacrifice the lowest of the 64 bits of our result
445 * to the goddess of code clarity.
448 scale
= (uint64_t)1 << 63;
449 scale
+= (adjustment
/ 1024) * 2199;
450 scale
/= ticks_per_sec
;
451 *tick_scale_x
= scale
* 2;
455 * it contains ns (without fraction) to add to the next sec.
456 * Get ns scale factor for the next sec.
458 nano
= (adjustment
> 0)? adjustment
>> 32 : -((-adjustment
) >> 32);
459 scale
= (uint64_t) NSEC_PER_SEC
;
465 * it contains 32 bit frac of ns to add to the next sec.
466 * Keep it as additional adjustment for the next sec.
468 frac
= (adjustment
> 0)? ((uint32_t) adjustment
) : -((uint32_t) (-adjustment
));
469 *s_adj_nsx
= (frac
>0)? frac
<< 32 : -( (-frac
) << 32);
477 * returns a bintime struct representing delta scaled accordingly to the
478 * scale factors provided to this function.
480 static struct bintime
481 scale_delta(uint64_t delta
, uint64_t tick_scale_x
, uint64_t s_scale_ns
, int64_t s_adj_nsx
)
483 uint64_t sec
, new_ns
, over
;
490 * If more than one second is elapsed,
491 * scale fully elapsed seconds using scale factors for seconds.
492 * s_scale_ns -> scales sec to ns.
493 * s_adj_nsx -> additional adj expressed in 64 bit frac of ns to apply to each sec.
495 if (delta
> ticks_per_sec
) {
496 sec
= (delta
/ticks_per_sec
);
497 new_ns
= sec
* s_scale_ns
;
498 bintime_addns(&bt
, new_ns
);
501 /* shortcut, no overflow can occur */
503 bintime_addx(&bt
, (uint64_t)s_adj_nsx
/ (uint64_t)NSEC_PER_SEC
);
505 bintime_subx(&bt
, (uint64_t)-s_adj_nsx
/ (uint64_t)NSEC_PER_SEC
);
509 * s_adj_nsx is 64 bit frac of ns.
510 * sec*s_adj_nsx might overflow in int64_t.
511 * use bintime_addxns to not lose overflowed ns.
513 bintime_addxns(&bt
, sec
, s_adj_nsx
);
516 delta
= (delta
% ticks_per_sec
);
519 over
= multi_overflow(tick_scale_x
, delta
);
525 * scale elapsed ticks using the scale factor for ticks.
527 bintime_addx(&bt
, delta
* tick_scale_x
);
535 * returns the scaled time of the time elapsed from the last time
536 * scale factors were updated to now.
538 static struct bintime
539 get_scaled_time(uint64_t now
)
544 * Compute ticks elapsed since last scale update.
545 * This time will be scaled according to the value given by ntp kern.
547 delta
= now
- clock_calend
.offset_count
;
549 return scale_delta(delta
, clock_calend
.tick_scale_x
, clock_calend
.s_scale_ns
, clock_calend
.s_adj_nsx
);
553 clock_get_calendar_absolute_and_microtime_locked(
555 clock_usec_t
*microsecs
,
561 now
= mach_absolute_time();
565 bt
= get_scaled_time(now
);
566 bintime_add(&bt
, &clock_calend
.bintime
);
567 bintime2usclock(&bt
, secs
, microsecs
);
571 clock_get_calendar_absolute_and_nanotime_locked(
573 clock_usec_t
*nanosecs
,
579 now
= mach_absolute_time();
583 bt
= get_scaled_time(now
);
584 bintime_add(&bt
, &clock_calend
.bintime
);
585 bintime2nsclock(&bt
, secs
, nanosecs
);
589 * clock_get_calendar_absolute_and_microtime:
591 * Returns the current calendar value,
592 * microseconds as the fraction. Also
593 * returns mach_absolute_time if abstime
597 clock_get_calendar_absolute_and_microtime(
599 clock_usec_t
*microsecs
,
607 clock_get_calendar_absolute_and_microtime_locked(secs
, microsecs
, abstime
);
614 * clock_get_calendar_nanotime:
616 * Returns the current calendar value,
617 * nanoseconds as the fraction.
619 * Since we do not have an interface to
620 * set the calendar with resolution greater
621 * than a microsecond, we honor that here.
624 clock_get_calendar_nanotime(
626 clock_nsec_t
*nanosecs
)
633 clock_get_calendar_absolute_and_nanotime_locked(secs
, nanosecs
, NULL
);
640 * clock_gettimeofday:
642 * Kernel interface for commpage implementation of
643 * gettimeofday() syscall.
645 * Returns the current calendar value, and updates the
646 * commpage info as appropriate. Because most calls to
647 * gettimeofday() are handled in user mode by the commpage,
648 * this routine should be used infrequently.
653 clock_usec_t
*microsecs
)
655 clock_gettimeofday_and_absolute_time(secs
, microsecs
, NULL
);
659 clock_gettimeofday_and_absolute_time(
661 clock_usec_t
*microsecs
,
671 now
= mach_absolute_time();
672 bt
= get_scaled_time(now
);
673 bintime_add(&bt
, &clock_calend
.bintime
);
674 bintime2usclock(&bt
, secs
, microsecs
);
676 clock_gettimeofday_set_commpage(now
, bt
.sec
, bt
.frac
, clock_calend
.tick_scale_x
, ticks_per_sec
);
687 * clock_set_calendar_microtime:
689 * Sets the current calendar value by
690 * recalculating the epoch and offset
691 * from the system clock.
693 * Also adjusts the boottime to keep the
694 * value consistent, writes the new
695 * calendar value to the platform clock,
696 * and sends calendar change notifications.
699 clock_set_calendar_microtime(
701 clock_usec_t microsecs
)
703 uint64_t absolutesys
;
706 clock_usec_t newmicrosecs
;
707 clock_usec_t oldmicrosecs
;
708 uint64_t commpage_value
;
711 clock_sec_t deltasecs
;
712 clock_usec_t deltamicrosecs
;
715 newmicrosecs
= microsecs
;
718 * settime_lock mtx is used to avoid that racing settimeofdays update the wall clock and
719 * the platform clock concurrently.
721 * clock_lock cannot be used for this race because it is acquired from interrupt context
722 * and it needs interrupts disabled while instead updating the platform clock needs to be
723 * called with interrupts enabled.
725 lck_mtx_lock(&settime_lock
);
730 commpage_disable_timestamp();
733 * Adjust the boottime based on the delta.
735 clock_get_calendar_absolute_and_microtime_locked(&oldsecs
, &oldmicrosecs
, &absolutesys
);
737 if (oldsecs
< secs
|| (oldsecs
== secs
&& oldmicrosecs
< microsecs
)) {
740 deltamicrosecs
= microsecs
;
742 TIME_SUB(deltasecs
, oldsecs
, deltamicrosecs
, oldmicrosecs
, USEC_PER_SEC
);
743 TIME_ADD(clock_boottime
, deltasecs
, clock_boottime_usec
, deltamicrosecs
, USEC_PER_SEC
);
745 clock2bintime(&deltasecs
, &deltamicrosecs
, &bt
);
746 bintime_add(&clock_calend
.boottime
, &bt
);
747 bintime_add(&clock_calend
.basesleep
, &bt
);
752 deltamicrosecs
= oldmicrosecs
;
754 TIME_SUB(deltasecs
, secs
, deltamicrosecs
, microsecs
, USEC_PER_SEC
);
755 TIME_SUB(clock_boottime
, deltasecs
, clock_boottime_usec
, deltamicrosecs
, USEC_PER_SEC
);
757 clock2bintime(&deltasecs
, &deltamicrosecs
, &bt
);
758 bintime_sub(&clock_calend
.boottime
, &bt
);
759 bintime_sub(&clock_calend
.basesleep
, &bt
);
762 clock_calend
.bintime
= clock_calend
.boottime
;
763 bintime_add(&clock_calend
.bintime
, &clock_calend
.offset
);
765 clock2bintime((clock_sec_t
*) &secs
, (clock_usec_t
*) µsecs
, &bt
);
767 clock_gettimeofday_set_commpage(absolutesys
, bt
.sec
, bt
.frac
, clock_calend
.tick_scale_x
, ticks_per_sec
);
769 commpage_value
= clock_boottime
* USEC_PER_SEC
+ clock_boottime_usec
;
775 * Set the new value for the platform clock.
776 * This call might block, so interrupts must be enabled.
778 PESetUTCTimeOfDay(newsecs
, newmicrosecs
);
780 commpage_update_boottime(commpage_value
);
783 * Send host notifications.
785 host_notify_calendar_change();
786 host_notify_calendar_set();
789 clock_track_calend_nowait();
792 lck_mtx_unlock(&settime_lock
);
795 uint64_t mach_absolutetime_asleep
= 0;
796 uint64_t mach_absolutetime_last_sleep
= 0;
799 clock_get_calendar_uptime(clock_sec_t
*secs
)
808 now
= mach_absolute_time();
810 bt
= get_scaled_time(now
);
811 bintime_add(&bt
, &clock_calend
.offset
);
821 * clock_update_calendar:
823 * called by ntp timer to update scale factors.
826 clock_update_calendar(void)
837 now
= mach_absolute_time();
840 * scale the time elapsed since the last update and
843 bt
= get_scaled_time(now
);
844 bintime_add(&clock_calend
.offset
, &bt
);
847 * update the base from which apply next scale factors.
849 delta
= now
- clock_calend
.offset_count
;
850 clock_calend
.offset_count
+= delta
;
852 clock_calend
.bintime
= clock_calend
.offset
;
853 bintime_add(&clock_calend
.bintime
, &clock_calend
.boottime
);
856 * recompute next adjustment.
858 ntp_update_second(&adjustment
, clock_calend
.bintime
.sec
);
861 * recomputing scale factors.
863 get_scale_factors_from_adj(adjustment
, &clock_calend
.tick_scale_x
, &clock_calend
.s_scale_ns
, &clock_calend
.s_adj_nsx
);
865 clock_gettimeofday_set_commpage(now
, clock_calend
.bintime
.sec
, clock_calend
.bintime
.frac
, clock_calend
.tick_scale_x
, ticks_per_sec
);
872 * clock_initialize_calendar:
874 * Set the calendar and related clocks
875 * from the platform clock at boot or
878 * Also sends host notifications.
882 clock_initialize_calendar(void)
884 clock_sec_t sys
; // sleepless time since boot in seconds
885 clock_sec_t secs
; // Current UTC time
886 clock_sec_t utc_offset_secs
; // Difference in current UTC time and sleepless time since boot
887 clock_usec_t microsys
;
888 clock_usec_t microsecs
;
889 clock_usec_t utc_offset_microsecs
;
893 PEGetUTCTimeOfDay(&secs
, µsecs
);
898 commpage_disable_timestamp();
901 * Calculate the new calendar epoch based on
902 * the platform clock and the system clock.
904 clock_get_system_microtime(&sys
, µsys
);
905 utc_offset_secs
= secs
;
906 utc_offset_microsecs
= microsecs
;
908 #if DEVELOPMENT || DEBUG
910 last_utc_usec
= microsecs
;
912 last_sys_usec
= microsys
;
913 if (secs
> max_utc_sec
)
918 * We normally expect the UTC clock to be always-on and produce
919 * greater readings than the tick counter. There may be corner cases
920 * due to differing clock resolutions (UTC clock is likely lower) and
921 * and errors reading the UTC clock (some implementations return 0
922 * on error) in which that doesn't hold true. Bring the UTC measurements
923 * in-line with the tick counter measurements as a best effort in that case.
925 if ((sys
> secs
) || ((sys
== secs
) && (microsys
> microsecs
))) {
926 secs
= utc_offset_secs
= sys
;
927 microsecs
= utc_offset_microsecs
= microsys
;
930 // This macro stores the subtraction result in utc_offset_secs and utc_offset_microsecs
931 TIME_SUB(utc_offset_secs
, sys
, utc_offset_microsecs
, microsys
, USEC_PER_SEC
);
933 clock2bintime(&utc_offset_secs
, &utc_offset_microsecs
, &bt
);
936 * Initialize the boot time based on the platform clock.
938 clock_boottime
= secs
;
939 clock_boottime_usec
= microsecs
;
940 commpage_update_boottime(clock_boottime
* USEC_PER_SEC
+ clock_boottime_usec
);
942 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC
, &ticks_per_sec
);
943 clock_calend
.boottime
= bt
;
944 clock_calend
.bintime
= bt
;
945 clock_calend
.offset
.sec
= 0;
946 clock_calend
.offset
.frac
= 0;
948 clock_calend
.tick_scale_x
= (uint64_t)1 << 63;
949 clock_calend
.tick_scale_x
/= ticks_per_sec
;
950 clock_calend
.tick_scale_x
*= 2;
952 clock_calend
.s_scale_ns
= NSEC_PER_SEC
;
953 clock_calend
.s_adj_nsx
= 0;
955 clock_calend
.basesleep
= bt
;
957 commpage_update_mach_continuous_time(mach_absolutetime_asleep
);
963 * Send host notifications.
965 host_notify_calendar_change();
968 clock_track_calend_nowait();
974 clock_wakeup_calendar(void)
976 clock_sec_t sys
; // sleepless time since boot in seconds
977 clock_sec_t secs
; // Current UTC time
978 clock_usec_t microsys
;
979 clock_usec_t microsecs
;
981 struct bintime utc_offset_bt
, last_sleep_bt
;
983 PEGetUTCTimeOfDay(&secs
, µsecs
);
988 commpage_disable_timestamp();
991 * Calculate the new calendar epoch based on
992 * the platform clock and the system clock.
994 clock_get_system_microtime(&sys
, µsys
);
996 #if DEVELOPMENT || DEBUG
998 last_utc_usec
= microsecs
;
1000 last_sys_usec
= microsys
;
1001 if (secs
> max_utc_sec
)
1006 * We normally expect the UTC clock to be always-on and produce
1007 * greater readings than the tick counter. There may be corner cases
1008 * due to differing clock resolutions (UTC clock is likely lower) and
1009 * errors reading the UTC clock (some implementations return 0 on error)
1010 * in which that doesn't hold true. Bring the UTC measurements in-line
1011 * with the tick counter measurements as a best effort in that case.
1013 if ((sys
> secs
) || ((sys
== secs
) && (microsys
> microsecs
))) {
1015 microsecs
= microsys
;
1018 // This macro stores the subtraction result in secs and microsecs
1019 TIME_SUB(secs
, sys
, microsecs
, microsys
, USEC_PER_SEC
);
1020 clock2bintime(&secs
, µsecs
, &utc_offset_bt
);
1023 * Safety belt: the UTC clock will likely have a lower resolution than the tick counter.
1024 * It's also possible that the device didn't fully transition to the powered-off state on
1025 * the most recent sleep, so the tick counter may not have reset or may have only briefly
1026 * tured off. In that case it's possible for the difference between the UTC clock and the
1027 * tick counter to be less than the previously recorded value in clock.calend.basesleep.
1028 * In that case simply record that we slept for 0 ticks.
1030 if ((utc_offset_bt
.sec
> clock_calend
.basesleep
.sec
) ||
1031 ((utc_offset_bt
.sec
== clock_calend
.basesleep
.sec
) && (utc_offset_bt
.frac
> clock_calend
.basesleep
.frac
))) {
1033 last_sleep_bt
= utc_offset_bt
;
1034 bintime_sub(&last_sleep_bt
, &clock_calend
.basesleep
);
1035 clock_calend
.basesleep
= utc_offset_bt
;
1037 bintime2absolutetime(&last_sleep_bt
, &mach_absolutetime_last_sleep
);
1038 mach_absolutetime_asleep
+= mach_absolutetime_last_sleep
;
1040 bintime_add(&clock_calend
.offset
, &last_sleep_bt
);
1041 bintime_add(&clock_calend
.bintime
, &last_sleep_bt
);
1043 mach_absolutetime_last_sleep
= 0;
1045 KERNEL_DEBUG_CONSTANT(
1046 MACHDBG_CODE(DBG_MACH_CLOCK
,MACH_EPOCH_CHANGE
) | DBG_FUNC_NONE
,
1047 (uintptr_t) mach_absolutetime_last_sleep
,
1048 (uintptr_t) mach_absolutetime_asleep
,
1049 (uintptr_t) (mach_absolutetime_last_sleep
>> 32),
1050 (uintptr_t) (mach_absolutetime_asleep
>> 32),
1053 commpage_update_mach_continuous_time(mach_absolutetime_asleep
);
1054 adjust_cont_time_thread_calls();
1059 host_notify_calendar_change();
1062 clock_track_calend_nowait();
1069 * clock_get_boottime_nanotime:
1071 * Return the boottime, used by sysctl.
1074 clock_get_boottime_nanotime(
1076 clock_nsec_t
*nanosecs
)
1083 *secs
= (clock_sec_t
)clock_boottime
;
1084 *nanosecs
= (clock_nsec_t
)clock_boottime_usec
* NSEC_PER_USEC
;
1091 * clock_get_boottime_nanotime:
1093 * Return the boottime, used by sysctl.
1096 clock_get_boottime_microtime(
1098 clock_usec_t
*microsecs
)
1105 *secs
= (clock_sec_t
)clock_boottime
;
1106 *microsecs
= (clock_nsec_t
)clock_boottime_usec
;
1114 * Wait / delay routines.
1117 mach_wait_until_continue(
1118 __unused
void *parameter
,
1119 wait_result_t wresult
)
1121 thread_syscall_return((wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
1126 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
1128 * Parameters: args->deadline Amount of time to wait
1130 * Returns: 0 Success
1135 mach_wait_until_trap(
1136 struct mach_wait_until_trap_args
*args
)
1138 uint64_t deadline
= args
->deadline
;
1139 wait_result_t wresult
;
1141 wresult
= assert_wait_deadline_with_leeway((event_t
)mach_wait_until_trap
, THREAD_ABORTSAFE
,
1142 TIMEOUT_URGENCY_USER_NORMAL
, deadline
, 0);
1143 if (wresult
== THREAD_WAITING
)
1144 wresult
= thread_block(mach_wait_until_continue
);
1146 return ((wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
1153 uint64_t now
= mach_absolute_time();
1155 if (now
>= deadline
)
1158 _clock_delay_until_deadline(deadline
- now
, deadline
);
1162 * Preserve the original precise interval that the client
1163 * requested for comparison to the spin threshold.
1166 _clock_delay_until_deadline(
1170 _clock_delay_until_deadline_with_leeway(interval
, deadline
, 0);
1174 * Like _clock_delay_until_deadline, but it accepts a
1178 _clock_delay_until_deadline_with_leeway(
1187 if ( ml_delay_should_spin(interval
) ||
1188 get_preemption_level() != 0 ||
1189 ml_get_interrupts_enabled() == FALSE
) {
1190 machine_delay_until(interval
, deadline
);
1193 * For now, assume a leeway request of 0 means the client does not want a leeway
1194 * value. We may want to change this interpretation in the future.
1198 assert_wait_deadline_with_leeway((event_t
)clock_delay_until
, THREAD_UNINT
, TIMEOUT_URGENCY_LEEWAY
, deadline
, leeway
);
1200 assert_wait_deadline((event_t
)clock_delay_until
, THREAD_UNINT
, deadline
);
1203 thread_block(THREAD_CONTINUE_NULL
);
1210 uint32_t scale_factor
)
1214 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1216 _clock_delay_until_deadline(abstime
, mach_absolute_time() + abstime
);
1220 delay_for_interval_with_leeway(
1223 uint32_t scale_factor
)
1225 uint64_t abstime_interval
;
1226 uint64_t abstime_leeway
;
1228 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime_interval
);
1229 clock_interval_to_absolutetime_interval(leeway
, scale_factor
, &abstime_leeway
);
1231 _clock_delay_until_deadline_with_leeway(abstime_interval
, mach_absolute_time() + abstime_interval
, abstime_leeway
);
1238 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);
1242 * Miscellaneous routines.
1245 clock_interval_to_deadline(
1247 uint32_t scale_factor
,
1252 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1254 *result
= mach_absolute_time() + abstime
;
1258 clock_absolutetime_interval_to_deadline(
1262 *result
= mach_absolute_time() + abstime
;
1266 clock_continuoustime_interval_to_deadline(
1270 *result
= mach_continuous_time() + conttime
;
1277 *result
= mach_absolute_time();
1281 clock_deadline_for_periodic_event(
1286 assert(interval
!= 0);
1288 *deadline
+= interval
;
1290 if (*deadline
<= abstime
) {
1291 *deadline
= abstime
+ interval
;
1292 abstime
= mach_absolute_time();
1294 if (*deadline
<= abstime
)
1295 *deadline
= abstime
+ interval
;
1300 mach_continuous_time(void)
1303 uint64_t read1
= mach_absolutetime_asleep
;
1304 uint64_t absolute
= mach_absolute_time();
1306 uint64_t read2
= mach_absolutetime_asleep
;
1308 if(__builtin_expect(read1
== read2
, 1)) {
1309 return absolute
+ read1
;
1315 mach_continuous_approximate_time(void)
1318 uint64_t read1
= mach_absolutetime_asleep
;
1319 uint64_t absolute
= mach_approximate_time();
1321 uint64_t read2
= mach_absolutetime_asleep
;
1323 if(__builtin_expect(read1
== read2
, 1)) {
1324 return absolute
+ read1
;
1330 * continuoustime_to_absolutetime
1331 * Must be called with interrupts disabled
1332 * Returned value is only valid until the next update to
1333 * mach_continuous_time
1336 continuoustime_to_absolutetime(uint64_t conttime
) {
1337 if (conttime
<= mach_absolutetime_asleep
)
1340 return conttime
- mach_absolutetime_asleep
;
1344 * absolutetime_to_continuoustime
1345 * Must be called with interrupts disabled
1346 * Returned value is only valid until the next update to
1347 * mach_continuous_time
1350 absolutetime_to_continuoustime(uint64_t abstime
) {
1351 return abstime
+ mach_absolutetime_asleep
;
1357 * clock_get_calendar_nanotime_nowait
1359 * Description: Non-blocking version of clock_get_calendar_nanotime()
1361 * Notes: This function operates by separately tracking calendar time
1362 * updates using a two element structure to copy the calendar
1363 * state, which may be asynchronously modified. It utilizes
1364 * barrier instructions in the tracking process and in the local
1365 * stable snapshot process in order to ensure that a consistent
1366 * snapshot is used to perform the calculation.
1369 clock_get_calendar_nanotime_nowait(
1371 clock_nsec_t
*nanosecs
)
1375 struct unlocked_clock_calend stable
;
1379 stable
= flipflop
[i
]; /* take snapshot */
1382 * Use a barrier instructions to ensure atomicity. We AND
1383 * off the "in progress" bit to get the current generation
1386 (void)hw_atomic_and(&stable
.gen
, ~(uint32_t)1);
1389 * If an update _is_ in progress, the generation count will be
1390 * off by one, if it _was_ in progress, it will be off by two,
1391 * and if we caught it at a good time, it will be equal (and
1392 * our snapshot is threfore stable).
1394 if (flipflop
[i
].gen
== stable
.gen
)
1397 /* Switch to the other element of the flipflop, and try again. */
1401 now
= mach_absolute_time();
1403 bt
= get_scaled_time(now
);
1405 bintime_add(&bt
, &clock_calend
.bintime
);
1407 bintime2nsclock(&bt
, secs
, nanosecs
);
1411 clock_track_calend_nowait(void)
1415 for (i
= 0; i
< 2; i
++) {
1416 struct clock_calend tmp
= clock_calend
;
1419 * Set the low bit if the generation count; since we use a
1420 * barrier instruction to do this, we are guaranteed that this
1421 * will flag an update in progress to an async caller trying
1422 * to examine the contents.
1424 (void)hw_atomic_or(&flipflop
[i
].gen
, 1);
1426 flipflop
[i
].calend
= tmp
;
1429 * Increment the generation count to clear the low bit to
1430 * signal completion. If a caller compares the generation
1431 * count after taking a copy while in progress, the count
1432 * will be off by two.
1434 (void)hw_atomic_add(&flipflop
[i
].gen
, 1);
1438 #endif /* CONFIG_DTRACE */