]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/clock.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
CommitLineData
1c79356b 1/*
cb323159 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
1c79356b 32 */
5ba3f43e
A
33/*-
34 * Copyright (c) 1982, 1986, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)time.h 8.5 (Berkeley) 5/4/95
62 * $FreeBSD$
63 */
1c79356b 64
91447636 65#include <mach/mach_types.h>
91447636 66
1c79356b 67#include <kern/spl.h>
55e303ae 68#include <kern/sched_prim.h>
1c79356b 69#include <kern/thread.h>
1c79356b 70#include <kern/clock.h>
0c530ab8 71#include <kern/host_notify.h>
39037602
A
72#include <kern/thread_call.h>
73#include <libkern/OSAtomic.h>
0c530ab8
A
74
75#include <IOKit/IOPlatformExpert.h>
c0fea474 76
0c530ab8 77#include <machine/commpage.h>
5ba3f43e
A
78#include <machine/config.h>
79#include <machine/machine_routines.h>
1c79356b 80
91447636 81#include <mach/mach_traps.h>
1c79356b
A
82#include <mach/mach_time.h>
83
3e170ce0 84#include <sys/kdebug.h>
5ba3f43e
A
85#include <sys/timex.h>
86#include <kern/arithmetic_128.h>
cc8bc92a 87#include <os/log.h>
3e170ce0 88
f427ee49
A
89#if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
90// On ARM64, the hwclock keeps ticking across a normal S2R so we use it to reset the
91// system clock after a normal wake. However, on hibernation we cut power to the hwclock,
92// so we have to add an offset to the hwclock to compute continuous_time after hibernate resume.
93uint64_t hwclock_conttime_offset = 0;
94#endif /* HIBERNATION && HAS_CONTINUOUS_HWCLOCK */
95
96#if HIBERNATION_USES_LEGACY_CLOCK || !HAS_CONTINUOUS_HWCLOCK
97#define ENABLE_LEGACY_CLOCK_CODE 1
98#endif /* HIBERNATION_USES_LEGACY_CLOCK || !HAS_CONTINUOUS_HWCLOCK */
99
100#if HIBERNATION_USES_LEGACY_CLOCK
101#include <IOKit/IOHibernatePrivate.h>
102#endif /* HIBERNATION_USES_LEGACY_CLOCK */
103
0a7de745 104uint32_t hz_tick_interval = 1;
f427ee49 105#if ENABLE_LEGACY_CLOCK_CODE
cc8bc92a 106static uint64_t has_monotonic_clock = 0;
f427ee49
A
107#endif /* ENABLE_LEGACY_CLOCK_CODE */
108
109SIMPLE_LOCK_DECLARE(clock_lock, 0);
2d21ac55 110
f427ee49
A
111static LCK_GRP_DECLARE(settime_lock_grp, "settime");
112static LCK_MTX_DECLARE(settime_lock, &settime_lock_grp);
91447636 113
0a7de745
A
114#define clock_lock() \
115 simple_lock(&clock_lock, LCK_GRP_NULL)
b0d623f7 116
0a7de745 117#define clock_unlock() \
b0d623f7
A
118 simple_unlock(&clock_lock)
119
39037602 120#ifdef kdp_simple_lock_is_acquired
0a7de745
A
121boolean_t
122kdp_clock_is_locked()
39037602
A
123{
124 return kdp_simple_lock_is_acquired(&clock_lock);
125}
126#endif
b0d623f7 127
5ba3f43e 128struct bintime {
0a7de745 129 time_t sec;
5ba3f43e
A
130 uint64_t frac;
131};
132
133static __inline void
134bintime_addx(struct bintime *_bt, uint64_t _x)
135{
136 uint64_t _u;
137
138 _u = _bt->frac;
139 _bt->frac += _x;
0a7de745 140 if (_u > _bt->frac) {
5ba3f43e 141 _bt->sec++;
0a7de745 142 }
5ba3f43e
A
143}
144
145static __inline void
146bintime_subx(struct bintime *_bt, uint64_t _x)
147{
148 uint64_t _u;
149
150 _u = _bt->frac;
151 _bt->frac -= _x;
0a7de745 152 if (_u < _bt->frac) {
5ba3f43e 153 _bt->sec--;
0a7de745 154 }
5ba3f43e
A
155}
156
157static __inline void
158bintime_addns(struct bintime *bt, uint64_t ns)
159{
0a7de745 160 bt->sec += ns / (uint64_t)NSEC_PER_SEC;
5ba3f43e
A
161 ns = ns % (uint64_t)NSEC_PER_SEC;
162 if (ns) {
163 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
164 ns = ns * (uint64_t)18446744073LL;
165 bintime_addx(bt, ns);
166 }
167}
168
169static __inline void
170bintime_subns(struct bintime *bt, uint64_t ns)
171{
0a7de745 172 bt->sec -= ns / (uint64_t)NSEC_PER_SEC;
5ba3f43e
A
173 ns = ns % (uint64_t)NSEC_PER_SEC;
174 if (ns) {
175 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
176 ns = ns * (uint64_t)18446744073LL;
177 bintime_subx(bt, ns);
178 }
179}
180
181static __inline void
182bintime_addxns(struct bintime *bt, uint64_t a, int64_t xns)
183{
0a7de745 184 uint64_t uxns = (xns > 0)?(uint64_t)xns:(uint64_t)-xns;
5ba3f43e
A
185 uint64_t ns = multi_overflow(a, uxns);
186 if (xns > 0) {
0a7de745 187 if (ns) {
5ba3f43e 188 bintime_addns(bt, ns);
0a7de745 189 }
5ba3f43e
A
190 ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
191 bintime_addx(bt, ns);
0a7de745
A
192 } else {
193 if (ns) {
5ba3f43e 194 bintime_subns(bt, ns);
0a7de745 195 }
5ba3f43e 196 ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
0a7de745 197 bintime_subx(bt, ns);
5ba3f43e
A
198 }
199}
200
201
202static __inline void
203bintime_add(struct bintime *_bt, const struct bintime *_bt2)
204{
205 uint64_t _u;
206
207 _u = _bt->frac;
208 _bt->frac += _bt2->frac;
0a7de745 209 if (_u > _bt->frac) {
5ba3f43e 210 _bt->sec++;
0a7de745 211 }
5ba3f43e
A
212 _bt->sec += _bt2->sec;
213}
214
215static __inline void
216bintime_sub(struct bintime *_bt, const struct bintime *_bt2)
217{
218 uint64_t _u;
219
220 _u = _bt->frac;
221 _bt->frac -= _bt2->frac;
0a7de745 222 if (_u < _bt->frac) {
5ba3f43e 223 _bt->sec--;
0a7de745 224 }
5ba3f43e
A
225 _bt->sec -= _bt2->sec;
226}
227
228static __inline void
229clock2bintime(const clock_sec_t *secs, const clock_usec_t *microsecs, struct bintime *_bt)
230{
5ba3f43e
A
231 _bt->sec = *secs;
232 /* 18446744073709 = int(2^64 / 1000000) */
233 _bt->frac = *microsecs * (uint64_t)18446744073709LL;
234}
235
236static __inline void
237bintime2usclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *microsecs)
238{
5ba3f43e
A
239 *secs = _bt->sec;
240 *microsecs = ((uint64_t)USEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
241}
242
243static __inline void
244bintime2nsclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *nanosecs)
245{
5ba3f43e
A
246 *secs = _bt->sec;
247 *nanosecs = ((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
248}
249
f427ee49 250#if ENABLE_LEGACY_CLOCK_CODE
5ba3f43e
A
251static __inline void
252bintime2absolutetime(const struct bintime *_bt, uint64_t *abs)
253{
254 uint64_t nsec;
255 nsec = (uint64_t) _bt->sec * (uint64_t)NSEC_PER_SEC + (((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32);
256 nanoseconds_to_absolutetime(nsec, abs);
257}
cc8bc92a
A
258
259struct latched_time {
0a7de745
A
260 uint64_t monotonic_time_usec;
261 uint64_t mach_time;
cc8bc92a
A
262};
263
264extern int
265kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
266
f427ee49 267#endif /* ENABLE_LEGACY_CLOCK_CODE */
1c79356b 268/*
0c530ab8
A
269 * Time of day (calendar) variables.
270 *
271 * Algorithm:
272 *
5ba3f43e 273 * TOD <- bintime + delta*scale
0c530ab8 274 *
5ba3f43e 275 * where :
0a7de745 276 * bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
5ba3f43e
A
277 * delta is ticks elapsed since last scale update.
278 * scale is computed according to an adjustment provided by ntp_kern.
1c79356b 279 */
0c530ab8 280static struct clock_calend {
0a7de745
A
281 uint64_t s_scale_ns; /* scale to apply for each second elapsed, it converts in ns */
282 int64_t s_adj_nsx; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
283 uint64_t tick_scale_x; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
284 uint64_t offset_count; /* abs time from which apply current scales */
285 struct bintime offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
286 struct bintime bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
287 struct bintime boottime; /* boot time expressed in (sec, 64 bits frac of a second) */
f427ee49 288#if ENABLE_LEGACY_CLOCK_CODE
0a7de745 289 struct bintime basesleep;
f427ee49 290#endif /* ENABLE_LEGACY_CLOCK_CODE */
2d21ac55
A
291} clock_calend;
292
5ba3f43e
A
293static uint64_t ticks_per_sec; /* ticks in a second (expressed in abs time) */
294
cc8bc92a
A
295#if DEVELOPMENT || DEBUG
296extern int g_should_log_clock_adjustments;
297
298static void print_all_clock_variables(const char*, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* calend_cp);
299static void print_all_clock_variables_internal(const char *, struct clock_calend* calend_cp);
300#else
301#define print_all_clock_variables(...) do { } while (0)
302#define print_all_clock_variables_internal(...) do { } while (0)
303#endif
304
0a7de745 305#if CONFIG_DTRACE
b0d623f7 306
cc8bc92a 307
2d21ac55
A
308/*
309 * Unlocked calendar flipflop; this is used to track a clock_calend such
310 * that we can safely access a snapshot of a valid clock_calend structure
311 * without needing to take any locks to do it.
312 *
313 * The trick is to use a generation count and set the low bit when it is
314 * being updated/read; by doing this, we guarantee, through use of the
cb323159 315 * os_atomic functions, that the generation is incremented when the bit
2d21ac55
A
316 * is cleared atomically (by using a 1 bit add).
317 */
318static struct unlocked_clock_calend {
0a7de745
A
319 struct clock_calend calend; /* copy of calendar */
320 uint32_t gen; /* generation count */
321} flipflop[2];
b0d623f7
A
322
323static void clock_track_calend_nowait(void);
324
2d21ac55 325#endif
1c79356b 326
5ba3f43e
A
327void _clock_delay_until_deadline(uint64_t interval, uint64_t deadline);
328void _clock_delay_until_deadline_with_leeway(uint64_t interval, uint64_t deadline, uint64_t leeway);
9bccf70c 329
5ba3f43e 330/* Boottime variables*/
39037602
A
331static uint64_t clock_boottime;
332static uint32_t clock_boottime_usec;
4452a7af 333
0a7de745
A
334#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
335MACRO_BEGIN \
336 if (((rfrac) += (frac)) >= (unit)) { \
337 (rfrac) -= (unit); \
338 (rsecs) += 1; \
339 } \
340 (rsecs) += (secs); \
0c530ab8
A
341MACRO_END
342
0a7de745
A
343#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
344MACRO_BEGIN \
345 if ((int)((rfrac) -= (frac)) < 0) { \
346 (rfrac) += (unit); \
347 (rsecs) -= 1; \
348 } \
349 (rsecs) -= (secs); \
0c530ab8 350MACRO_END
1c79356b
A
351
352/*
91447636
A
353 * clock_config:
354 *
355 * Called once at boot to configure the clock subsystem.
1c79356b
A
356 */
357void
358clock_config(void)
359{
0c530ab8 360 clock_oldconfig();
5ba3f43e
A
361
362 ntp_init();
363
364 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
1c79356b
A
365}
366
367/*
91447636
A
368 * clock_init:
369 *
370 * Called on a processor each time started.
1c79356b
A
371 */
372void
373clock_init(void)
374{
0c530ab8 375 clock_oldinit();
1c79356b
A
376}
377
55e303ae 378/*
0c530ab8
A
379 * clock_timebase_init:
380 *
381 * Called by machine dependent code
382 * to initialize areas dependent on the
383 * timebase value. May be called multiple
384 * times during start up.
55e303ae
A
385 */
386void
387clock_timebase_init(void)
388{
0a7de745 389 uint64_t abstime;
5d5c5d0d 390
2d21ac55 391 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
b0d623f7 392 hz_tick_interval = (uint32_t)abstime;
89b3af67 393
0c530ab8 394 sched_timebase_init();
8ad349bb 395}
c0fea474 396
8ad349bb 397/*
0c530ab8
A
398 * mach_timebase_info_trap:
399 *
400 * User trap returns timebase constant.
8ad349bb 401 */
6601e61a 402kern_return_t
0c530ab8
A
403mach_timebase_info_trap(
404 struct mach_timebase_info_trap_args *args)
6601e61a 405{
0a7de745
A
406 mach_vm_address_t out_info_addr = args->info;
407 mach_timebase_info_data_t info = {};
6601e61a 408
0c530ab8 409 clock_timebase_info(&info);
89b3af67 410
0a7de745 411 copyout((void *)&info, out_info_addr, sizeof(info));
4452a7af 412
0a7de745 413 return KERN_SUCCESS;
8f6c56a5 414}
5d5c5d0d 415
8f6c56a5 416/*
0c530ab8 417 * Calendar routines.
8f6c56a5 418 */
4452a7af 419
6601e61a 420/*
0c530ab8
A
421 * clock_get_calendar_microtime:
422 *
423 * Returns the current calendar value,
424 * microseconds as the fraction.
6601e61a 425 */
0c530ab8
A
426void
427clock_get_calendar_microtime(
0a7de745
A
428 clock_sec_t *secs,
429 clock_usec_t *microsecs)
39236c6e
A
430{
431 clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
432}
433
5ba3f43e
A
434/*
435 * get_scale_factors_from_adj:
436 *
437 * computes scale factors from the value given in adjustment.
438 *
439 * Part of the code has been taken from tc_windup of FreeBSD
440 * written by Poul-Henning Kamp <phk@FreeBSD.ORG>, Julien Ridoux and
441 * Konstantin Belousov.
442 * https://github.com/freebsd/freebsd/blob/master/sys/kern/kern_tc.c
443 */
444static void
445get_scale_factors_from_adj(int64_t adjustment, uint64_t* tick_scale_x, uint64_t* s_scale_ns, int64_t* s_adj_nsx)
446{
447 uint64_t scale;
448 int64_t nano, frac;
449
450 /*-
451 * Calculating the scaling factor. We want the number of 1/2^64
452 * fractions of a second per period of the hardware counter, taking
453 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
454 * processing provides us with.
455 *
456 * The th_adjustment is nanoseconds per second with 32 bit binary
457 * fraction and we want 64 bit binary fraction of second:
458 *
459 * x = a * 2^32 / 10^9 = a * 4.294967296
460 *
461 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
462 * we can only multiply by about 850 without overflowing, that
463 * leaves no suitably precise fractions for multiply before divide.
464 *
465 * Divide before multiply with a fraction of 2199/512 results in a
466 * systematic undercompensation of 10PPM of th_adjustment. On a
467 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
468 *
469 * We happily sacrifice the lowest of the 64 bits of our result
470 * to the goddess of code clarity.
471 *
472 */
473 scale = (uint64_t)1 << 63;
474 scale += (adjustment / 1024) * 2199;
475 scale /= ticks_per_sec;
476 *tick_scale_x = scale * 2;
477
478 /*
479 * hi part of adj
480 * it contains ns (without fraction) to add to the next sec.
481 * Get ns scale factor for the next sec.
482 */
483 nano = (adjustment > 0)? adjustment >> 32 : -((-adjustment) >> 32);
484 scale = (uint64_t) NSEC_PER_SEC;
485 scale += nano;
486 *s_scale_ns = scale;
487
488 /*
489 * lo part of adj
490 * it contains 32 bit frac of ns to add to the next sec.
491 * Keep it as additional adjustment for the next sec.
492 */
493 frac = (adjustment > 0)? ((uint32_t) adjustment) : -((uint32_t) (-adjustment));
f427ee49 494 *s_adj_nsx = (frac > 0)? ((uint64_t) frac) << 32 : -(((uint64_t) (-frac)) << 32);
5ba3f43e
A
495
496 return;
497}
498
499/*
500 * scale_delta:
501 *
502 * returns a bintime struct representing delta scaled accordingly to the
503 * scale factors provided to this function.
504 */
505static struct bintime
506scale_delta(uint64_t delta, uint64_t tick_scale_x, uint64_t s_scale_ns, int64_t s_adj_nsx)
507{
508 uint64_t sec, new_ns, over;
509 struct bintime bt;
510
511 bt.sec = 0;
512 bt.frac = 0;
513
514 /*
515 * If more than one second is elapsed,
516 * scale fully elapsed seconds using scale factors for seconds.
517 * s_scale_ns -> scales sec to ns.
518 * s_adj_nsx -> additional adj expressed in 64 bit frac of ns to apply to each sec.
519 */
520 if (delta > ticks_per_sec) {
0a7de745 521 sec = (delta / ticks_per_sec);
5ba3f43e
A
522 new_ns = sec * s_scale_ns;
523 bintime_addns(&bt, new_ns);
524 if (s_adj_nsx) {
525 if (sec == 1) {
526 /* shortcut, no overflow can occur */
0a7de745
A
527 if (s_adj_nsx > 0) {
528 bintime_addx(&bt, (uint64_t)s_adj_nsx / (uint64_t)NSEC_PER_SEC);
529 } else {
530 bintime_subx(&bt, (uint64_t)-s_adj_nsx / (uint64_t)NSEC_PER_SEC);
531 }
532 } else {
5ba3f43e
A
533 /*
534 * s_adj_nsx is 64 bit frac of ns.
535 * sec*s_adj_nsx might overflow in int64_t.
536 * use bintime_addxns to not lose overflowed ns.
537 */
538 bintime_addxns(&bt, sec, s_adj_nsx);
539 }
540 }
541 delta = (delta % ticks_per_sec);
0a7de745 542 }
5ba3f43e
A
543
544 over = multi_overflow(tick_scale_x, delta);
0a7de745 545 if (over) {
5ba3f43e
A
546 bt.sec += over;
547 }
548
549 /*
550 * scale elapsed ticks using the scale factor for ticks.
551 */
552 bintime_addx(&bt, delta * tick_scale_x);
553
554 return bt;
555}
556
557/*
558 * get_scaled_time:
559 *
560 * returns the scaled time of the time elapsed from the last time
561 * scale factors were updated to now.
562 */
563static struct bintime
564get_scaled_time(uint64_t now)
565{
566 uint64_t delta;
567
568 /*
569 * Compute ticks elapsed since last scale update.
570 * This time will be scaled according to the value given by ntp kern.
571 */
572 delta = now - clock_calend.offset_count;
573
574 return scale_delta(delta, clock_calend.tick_scale_x, clock_calend.s_scale_ns, clock_calend.s_adj_nsx);
575}
576
39037602
A
577static void
578clock_get_calendar_absolute_and_microtime_locked(
0a7de745
A
579 clock_sec_t *secs,
580 clock_usec_t *microsecs,
581 uint64_t *abstime)
6601e61a 582{
5ba3f43e
A
583 uint64_t now;
584 struct bintime bt;
585
586 now = mach_absolute_time();
0a7de745 587 if (abstime) {
39236c6e 588 *abstime = now;
0a7de745 589 }
4452a7af 590
5ba3f43e
A
591 bt = get_scaled_time(now);
592 bintime_add(&bt, &clock_calend.bintime);
593 bintime2usclock(&bt, secs, microsecs);
594}
0c530ab8 595
5ba3f43e
A
596static void
597clock_get_calendar_absolute_and_nanotime_locked(
0a7de745
A
598 clock_sec_t *secs,
599 clock_usec_t *nanosecs,
600 uint64_t *abstime)
5ba3f43e
A
601{
602 uint64_t now;
603 struct bintime bt;
0c530ab8 604
5ba3f43e 605 now = mach_absolute_time();
0a7de745 606 if (abstime) {
5ba3f43e 607 *abstime = now;
0a7de745 608 }
0c530ab8 609
5ba3f43e
A
610 bt = get_scaled_time(now);
611 bintime_add(&bt, &clock_calend.bintime);
612 bintime2nsclock(&bt, secs, nanosecs);
39037602
A
613}
614
615/*
616 * clock_get_calendar_absolute_and_microtime:
617 *
618 * Returns the current calendar value,
619 * microseconds as the fraction. Also
620 * returns mach_absolute_time if abstime
621 * is not NULL.
622 */
623void
624clock_get_calendar_absolute_and_microtime(
0a7de745
A
625 clock_sec_t *secs,
626 clock_usec_t *microsecs,
627 uint64_t *abstime)
39037602 628{
0a7de745 629 spl_t s;
39037602
A
630
631 s = splclock();
632 clock_lock();
633
634 clock_get_calendar_absolute_and_microtime_locked(secs, microsecs, abstime);
0c530ab8 635
b0d623f7 636 clock_unlock();
0c530ab8 637 splx(s);
21362eb3 638}
89b3af67 639
21362eb3 640/*
0c530ab8
A
641 * clock_get_calendar_nanotime:
642 *
643 * Returns the current calendar value,
644 * nanoseconds as the fraction.
645 *
646 * Since we do not have an interface to
647 * set the calendar with resolution greater
648 * than a microsecond, we honor that here.
21362eb3 649 */
0c530ab8
A
650void
651clock_get_calendar_nanotime(
0a7de745
A
652 clock_sec_t *secs,
653 clock_nsec_t *nanosecs)
21362eb3 654{
0a7de745 655 spl_t s;
0c530ab8
A
656
657 s = splclock();
b0d623f7 658 clock_lock();
0c530ab8 659
5ba3f43e 660 clock_get_calendar_absolute_and_nanotime_locked(secs, nanosecs, NULL);
0c530ab8 661
b0d623f7 662 clock_unlock();
0c530ab8 663 splx(s);
6601e61a 664}
4452a7af 665
6601e61a 666/*
0c530ab8
A
667 * clock_gettimeofday:
668 *
669 * Kernel interface for commpage implementation of
670 * gettimeofday() syscall.
671 *
672 * Returns the current calendar value, and updates the
673 * commpage info as appropriate. Because most calls to
674 * gettimeofday() are handled in user mode by the commpage,
675 * this routine should be used infrequently.
6601e61a 676 */
0c530ab8
A
677void
678clock_gettimeofday(
0a7de745
A
679 clock_sec_t *secs,
680 clock_usec_t *microsecs)
39037602
A
681{
682 clock_gettimeofday_and_absolute_time(secs, microsecs, NULL);
683}
684
685void
686clock_gettimeofday_and_absolute_time(
0a7de745
A
687 clock_sec_t *secs,
688 clock_usec_t *microsecs,
689 uint64_t *mach_time)
6601e61a 690{
0a7de745
A
691 uint64_t now;
692 spl_t s;
693 struct bintime bt;
4452a7af 694
0c530ab8 695 s = splclock();
b0d623f7 696 clock_lock();
0c530ab8
A
697
698 now = mach_absolute_time();
5ba3f43e
A
699 bt = get_scaled_time(now);
700 bintime_add(&bt, &clock_calend.bintime);
701 bintime2usclock(&bt, secs, microsecs);
0c530ab8 702
5ba3f43e 703 clock_gettimeofday_set_commpage(now, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
1c79356b 704
b0d623f7 705 clock_unlock();
0c530ab8 706 splx(s);
39037602
A
707
708 if (mach_time) {
709 *mach_time = now;
710 }
1c79356b
A
711}
712
713/*
0c530ab8
A
714 * clock_set_calendar_microtime:
715 *
716 * Sets the current calendar value by
717 * recalculating the epoch and offset
718 * from the system clock.
719 *
720 * Also adjusts the boottime to keep the
721 * value consistent, writes the new
722 * calendar value to the platform clock,
723 * and sends calendar change notifications.
1c79356b 724 */
0c530ab8
A
725void
726clock_set_calendar_microtime(
0a7de745
A
727 clock_sec_t secs,
728 clock_usec_t microsecs)
1c79356b 729{
0a7de745
A
730 uint64_t absolutesys;
731 clock_sec_t newsecs;
732 clock_sec_t oldsecs;
733 clock_usec_t newmicrosecs;
734 clock_usec_t oldmicrosecs;
735 uint64_t commpage_value;
736 spl_t s;
737 struct bintime bt;
738 clock_sec_t deltasecs;
739 clock_usec_t deltamicrosecs;
5ba3f43e
A
740
741 newsecs = secs;
742 newmicrosecs = microsecs;
8ad349bb 743
5ba3f43e
A
744 /*
745 * settime_lock mtx is used to avoid that racing settimeofdays update the wall clock and
746 * the platform clock concurrently.
747 *
748 * clock_lock cannot be used for this race because it is acquired from interrupt context
749 * and it needs interrupts disabled while instead updating the platform clock needs to be
750 * called with interrupts enabled.
751 */
752 lck_mtx_lock(&settime_lock);
0c530ab8
A
753
754 s = splclock();
b0d623f7 755 clock_lock();
8ad349bb 756
cc8bc92a
A
757#if DEVELOPMENT || DEBUG
758 struct clock_calend clock_calend_cp = clock_calend;
759#endif
2d21ac55 760 commpage_disable_timestamp();
8f6c56a5 761
89b3af67 762 /*
39037602 763 * Adjust the boottime based on the delta.
89b3af67 764 */
39037602 765 clock_get_calendar_absolute_and_microtime_locked(&oldsecs, &oldmicrosecs, &absolutesys);
5ba3f43e 766
cc8bc92a
A
767#if DEVELOPMENT || DEBUG
768 if (g_should_log_clock_adjustments) {
769 os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
0a7de745 770 __func__, (unsigned long)oldsecs, oldmicrosecs, absolutesys);
cc8bc92a 771 os_log(OS_LOG_DEFAULT, "%s requested %lu s %d u\n",
0a7de745 772 __func__, (unsigned long)secs, microsecs );
cc8bc92a
A
773 }
774#endif
775
5ba3f43e 776 if (oldsecs < secs || (oldsecs == secs && oldmicrosecs < microsecs)) {
39037602 777 // moving forwards
5ba3f43e
A
778 deltasecs = secs;
779 deltamicrosecs = microsecs;
780
39037602 781 TIME_SUB(deltasecs, oldsecs, deltamicrosecs, oldmicrosecs, USEC_PER_SEC);
5ba3f43e 782
cc8bc92a 783 TIME_ADD(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
5ba3f43e
A
784 clock2bintime(&deltasecs, &deltamicrosecs, &bt);
785 bintime_add(&clock_calend.boottime, &bt);
39037602
A
786 } else {
787 // moving backwards
5ba3f43e
A
788 deltasecs = oldsecs;
789 deltamicrosecs = oldmicrosecs;
790
39037602 791 TIME_SUB(deltasecs, secs, deltamicrosecs, microsecs, USEC_PER_SEC);
8f6c56a5 792
cc8bc92a 793 TIME_SUB(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
5ba3f43e
A
794 clock2bintime(&deltasecs, &deltamicrosecs, &bt);
795 bintime_sub(&clock_calend.boottime, &bt);
5ba3f43e 796 }
21362eb3 797
5ba3f43e
A
798 clock_calend.bintime = clock_calend.boottime;
799 bintime_add(&clock_calend.bintime, &clock_calend.offset);
6d2010ae 800
5ba3f43e 801 clock2bintime((clock_sec_t *) &secs, (clock_usec_t *) &microsecs, &bt);
21362eb3 802
5ba3f43e 803 clock_gettimeofday_set_commpage(absolutesys, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
3e170ce0 804
cc8bc92a
A
805#if DEVELOPMENT || DEBUG
806 struct clock_calend clock_calend_cp1 = clock_calend;
807#endif
808
5ba3f43e 809 commpage_value = clock_boottime * USEC_PER_SEC + clock_boottime_usec;
21362eb3 810
b0d623f7 811 clock_unlock();
5ba3f43e 812 splx(s);
6601e61a 813
0c530ab8
A
814 /*
815 * Set the new value for the platform clock.
5ba3f43e 816 * This call might block, so interrupts must be enabled.
0c530ab8 817 */
cc8bc92a
A
818#if DEVELOPMENT || DEBUG
819 uint64_t now_b = mach_absolute_time();
820#endif
821
fe8ab488 822 PESetUTCTimeOfDay(newsecs, newmicrosecs);
6601e61a 823
cc8bc92a
A
824#if DEVELOPMENT || DEBUG
825 uint64_t now_a = mach_absolute_time();
826 if (g_should_log_clock_adjustments) {
827 os_log(OS_LOG_DEFAULT, "%s mach bef PESet %llu mach aft %llu \n", __func__, now_b, now_a);
828 }
829#endif
830
831 print_all_clock_variables_internal(__func__, &clock_calend_cp);
832 print_all_clock_variables_internal(__func__, &clock_calend_cp1);
833
39037602
A
834 commpage_update_boottime(commpage_value);
835
0c530ab8
A
836 /*
837 * Send host notifications.
838 */
839 host_notify_calendar_change();
39037602
A
840 host_notify_calendar_set();
841
2d21ac55
A
842#if CONFIG_DTRACE
843 clock_track_calend_nowait();
844#endif
5ba3f43e
A
845
846 lck_mtx_unlock(&settime_lock);
847}
848
849uint64_t mach_absolutetime_asleep = 0;
850uint64_t mach_absolutetime_last_sleep = 0;
851
852void
853clock_get_calendar_uptime(clock_sec_t *secs)
854{
855 uint64_t now;
856 spl_t s;
857 struct bintime bt;
858
859 s = splclock();
860 clock_lock();
861
862 now = mach_absolute_time();
863
864 bt = get_scaled_time(now);
865 bintime_add(&bt, &clock_calend.offset);
866
867 *secs = bt.sec;
868
869 clock_unlock();
870 splx(s);
871}
872
873
874/*
875 * clock_update_calendar:
876 *
877 * called by ntp timer to update scale factors.
878 */
879void
880clock_update_calendar(void)
881{
5ba3f43e
A
882 uint64_t now, delta;
883 struct bintime bt;
884 spl_t s;
885 int64_t adjustment;
886
887 s = splclock();
888 clock_lock();
889
890 now = mach_absolute_time();
891
892 /*
893 * scale the time elapsed since the last update and
894 * add it to offset.
895 */
896 bt = get_scaled_time(now);
897 bintime_add(&clock_calend.offset, &bt);
898
899 /*
900 * update the base from which apply next scale factors.
901 */
902 delta = now - clock_calend.offset_count;
903 clock_calend.offset_count += delta;
904
905 clock_calend.bintime = clock_calend.offset;
906 bintime_add(&clock_calend.bintime, &clock_calend.boottime);
907
908 /*
909 * recompute next adjustment.
910 */
911 ntp_update_second(&adjustment, clock_calend.bintime.sec);
912
cc8bc92a
A
913#if DEVELOPMENT || DEBUG
914 if (g_should_log_clock_adjustments) {
915 os_log(OS_LOG_DEFAULT, "%s adjustment %lld\n", __func__, adjustment);
916 }
917#endif
0a7de745 918
5ba3f43e
A
919 /*
920 * recomputing scale factors.
921 */
922 get_scale_factors_from_adj(adjustment, &clock_calend.tick_scale_x, &clock_calend.s_scale_ns, &clock_calend.s_adj_nsx);
923
924 clock_gettimeofday_set_commpage(now, clock_calend.bintime.sec, clock_calend.bintime.frac, clock_calend.tick_scale_x, ticks_per_sec);
925
cc8bc92a
A
926#if DEVELOPMENT || DEBUG
927 struct clock_calend calend_cp = clock_calend;
928#endif
929
5ba3f43e
A
930 clock_unlock();
931 splx(s);
cc8bc92a 932
0a7de745 933 print_all_clock_variables(__func__, NULL, NULL, NULL, NULL, &calend_cp);
1c79356b
A
934}
935
cc8bc92a
A
936
937#if DEVELOPMENT || DEBUG
938
0a7de745
A
939void
940print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp)
cc8bc92a
A
941{
942 clock_sec_t offset_secs;
943 clock_usec_t offset_microsecs;
944 clock_sec_t bintime_secs;
945 clock_usec_t bintime_microsecs;
946 clock_sec_t bootime_secs;
947 clock_usec_t bootime_microsecs;
0a7de745
A
948
949 if (!g_should_log_clock_adjustments) {
950 return;
951 }
cc8bc92a
A
952
953 bintime2usclock(&clock_calend_cp->offset, &offset_secs, &offset_microsecs);
954 bintime2usclock(&clock_calend_cp->bintime, &bintime_secs, &bintime_microsecs);
955 bintime2usclock(&clock_calend_cp->boottime, &bootime_secs, &bootime_microsecs);
956
957 os_log(OS_LOG_DEFAULT, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n",
0a7de745
A
958 func, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx,
959 clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count);
cc8bc92a 960 os_log(OS_LOG_DEFAULT, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n",
0a7de745
A
961 func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac,
962 (unsigned long)offset_secs, offset_microsecs);
cc8bc92a 963 os_log(OS_LOG_DEFAULT, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n",
0a7de745
A
964 func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac,
965 (unsigned long)bintime_secs, bintime_microsecs);
cc8bc92a 966 os_log(OS_LOG_DEFAULT, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n",
0a7de745
A
967 func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac,
968 (unsigned long)bootime_secs, bootime_microsecs);
cc8bc92a 969
c6bf4f31 970#if !HAS_CONTINUOUS_HWCLOCK
cc8bc92a 971 clock_sec_t basesleep_secs;
0a7de745
A
972 clock_usec_t basesleep_microsecs;
973
cc8bc92a
A
974 bintime2usclock(&clock_calend_cp->basesleep, &basesleep_secs, &basesleep_microsecs);
975 os_log(OS_LOG_DEFAULT, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n",
0a7de745
A
976 func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac,
977 (unsigned long)basesleep_secs, basesleep_microsecs);
c6bf4f31 978#endif
cc8bc92a
A
979}
980
981
0a7de745
A
982void
983print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp)
cc8bc92a 984{
0a7de745 985 if (!g_should_log_clock_adjustments) {
cc8bc92a 986 return;
0a7de745 987 }
cc8bc92a
A
988
989 struct bintime bt;
990 clock_sec_t wall_secs;
991 clock_usec_t wall_microsecs;
992 uint64_t now;
993 uint64_t delta;
994
995 if (pmu_secs) {
0a7de745 996 os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec);
cc8bc92a
A
997 }
998 if (sys_secs) {
999 os_log(OS_LOG_DEFAULT, "%s sys %lu s %d u \n", func, (unsigned long)*sys_secs, *sys_usec);
1000 }
1001
1002 print_all_clock_variables_internal(func, clock_calend_cp);
1003
1004 now = mach_absolute_time();
0a7de745 1005 delta = now - clock_calend_cp->offset_count;
cc8bc92a 1006
0a7de745 1007 bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx);
cc8bc92a
A
1008 bintime_add(&bt, &clock_calend_cp->bintime);
1009 bintime2usclock(&bt, &wall_secs, &wall_microsecs);
1010
1011 os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
0a7de745 1012 func, (unsigned long)wall_secs, wall_microsecs, now);
cc8bc92a
A
1013}
1014
1015
1016#endif /* DEVELOPMENT || DEBUG */
1017
1018
1c79356b 1019/*
0c530ab8
A
1020 * clock_initialize_calendar:
1021 *
1022 * Set the calendar and related clocks
cc8bc92a 1023 * from the platform clock at boot.
0c530ab8
A
1024 *
1025 * Also sends host notifications.
1c79356b
A
1026 */
1027void
0c530ab8 1028clock_initialize_calendar(void)
1c79356b 1029{
0a7de745
A
1030 clock_sec_t sys; // sleepless time since boot in seconds
1031 clock_sec_t secs; // Current UTC time
1032 clock_sec_t utc_offset_secs; // Difference in current UTC time and sleepless time since boot
1033 clock_usec_t microsys;
1034 clock_usec_t microsecs;
1035 clock_usec_t utc_offset_microsecs;
1036 spl_t s;
1037 struct bintime bt;
f427ee49 1038#if ENABLE_LEGACY_CLOCK_CODE
0a7de745
A
1039 struct bintime monotonic_bt;
1040 struct latched_time monotonic_time;
1041 uint64_t monotonic_usec_total;
cc8bc92a 1042 clock_sec_t sys2, monotonic_sec;
0a7de745
A
1043 clock_usec_t microsys2, monotonic_usec;
1044 size_t size;
cc8bc92a 1045
f427ee49 1046#endif /* ENABLE_LEGACY_CLOCK_CODE */
d9a64523 1047 //Get the UTC time and corresponding sys time
39037602 1048 PEGetUTCTimeOfDay(&secs, &microsecs);
cc8bc92a
A
1049 clock_get_system_microtime(&sys, &microsys);
1050
f427ee49 1051#if ENABLE_LEGACY_CLOCK_CODE
cc8bc92a
A
1052 /*
1053 * If the platform has a monotonic clock, use kern.monotonicclock_usecs
d9a64523
A
1054 * to estimate the sleep/wake time, otherwise use the UTC time to estimate
1055 * the sleep time.
cc8bc92a
A
1056 */
1057 size = sizeof(monotonic_time);
1058 if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
1059 has_monotonic_clock = 0;
d9a64523 1060 os_log(OS_LOG_DEFAULT, "%s system does not have monotonic clock\n", __func__);
cc8bc92a
A
1061 } else {
1062 has_monotonic_clock = 1;
1063 monotonic_usec_total = monotonic_time.monotonic_time_usec;
1064 absolutetime_to_microtime(monotonic_time.mach_time, &sys2, &microsys2);
d9a64523 1065 os_log(OS_LOG_DEFAULT, "%s system has monotonic clock\n", __func__);
cc8bc92a 1066 }
f427ee49 1067#endif /* ENABLE_LEGACY_CLOCK_CODE */
fe8ab488 1068
0c530ab8 1069 s = splclock();
b0d623f7 1070 clock_lock();
1c79356b 1071
2d21ac55 1072 commpage_disable_timestamp();
1c79356b 1073
5ba3f43e
A
1074 utc_offset_secs = secs;
1075 utc_offset_microsecs = microsecs;
1076
5ba3f43e
A
1077 /*
1078 * We normally expect the UTC clock to be always-on and produce
1079 * greater readings than the tick counter. There may be corner cases
1080 * due to differing clock resolutions (UTC clock is likely lower) and
1081 * and errors reading the UTC clock (some implementations return 0
1082 * on error) in which that doesn't hold true. Bring the UTC measurements
1083 * in-line with the tick counter measurements as a best effort in that case.
1084 */
1085 if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
d9a64523 1086 os_log(OS_LOG_DEFAULT, "%s WARNING: UTC time is less then sys time, (%lu s %d u) UTC (%lu s %d u) sys\n",
0a7de745 1087 __func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys);
5ba3f43e
A
1088 secs = utc_offset_secs = sys;
1089 microsecs = utc_offset_microsecs = microsys;
1090 }
1c79356b 1091
d9a64523 1092 // UTC - sys
5ba3f43e
A
1093 // This macro stores the subtraction result in utc_offset_secs and utc_offset_microsecs
1094 TIME_SUB(utc_offset_secs, sys, utc_offset_microsecs, microsys, USEC_PER_SEC);
d9a64523 1095 // This function converts utc_offset_secs and utc_offset_microsecs in bintime
5ba3f43e 1096 clock2bintime(&utc_offset_secs, &utc_offset_microsecs, &bt);
6d2010ae 1097
5ba3f43e
A
1098 /*
1099 * Initialize the boot time based on the platform clock.
1100 */
1101 clock_boottime = secs;
1102 clock_boottime_usec = microsecs;
1103 commpage_update_boottime(clock_boottime * USEC_PER_SEC + clock_boottime_usec);
1c79356b 1104
5ba3f43e
A
1105 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
1106 clock_calend.boottime = bt;
1107 clock_calend.bintime = bt;
1108 clock_calend.offset.sec = 0;
1109 clock_calend.offset.frac = 0;
3e170ce0 1110
5ba3f43e
A
1111 clock_calend.tick_scale_x = (uint64_t)1 << 63;
1112 clock_calend.tick_scale_x /= ticks_per_sec;
1113 clock_calend.tick_scale_x *= 2;
39037602 1114
5ba3f43e
A
1115 clock_calend.s_scale_ns = NSEC_PER_SEC;
1116 clock_calend.s_adj_nsx = 0;
3e170ce0 1117
f427ee49 1118#if ENABLE_LEGACY_CLOCK_CODE
cc8bc92a 1119 if (has_monotonic_clock) {
cc8bc92a
A
1120 monotonic_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
1121 monotonic_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
1c79356b 1122
d9a64523 1123 // monotonic clock - sys
cc8bc92a
A
1124 // This macro stores the subtraction result in monotonic_sec and monotonic_usec
1125 TIME_SUB(monotonic_sec, sys2, monotonic_usec, microsys2, USEC_PER_SEC);
1126 clock2bintime(&monotonic_sec, &monotonic_usec, &monotonic_bt);
1127
1128 // set the baseleep as the difference between monotonic clock - sys
1129 clock_calend.basesleep = monotonic_bt;
cc8bc92a 1130 }
f427ee49 1131#endif /* ENABLE_LEGACY_CLOCK_CODE */
39037602 1132 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
39037602 1133
cc8bc92a
A
1134#if DEVELOPMENT || DEBUG
1135 struct clock_calend clock_calend_cp = clock_calend;
1136#endif
1137
b0d623f7 1138 clock_unlock();
0c530ab8
A
1139 splx(s);
1140
0a7de745 1141 print_all_clock_variables(__func__, &secs, &microsecs, &sys, &microsys, &clock_calend_cp);
cc8bc92a 1142
1c79356b 1143 /*
0c530ab8 1144 * Send host notifications.
1c79356b 1145 */
0c530ab8 1146 host_notify_calendar_change();
0a7de745 1147
2d21ac55
A
1148#if CONFIG_DTRACE
1149 clock_track_calend_nowait();
1150#endif
1c79356b
A
1151}
1152
c6bf4f31
A
1153#if HAS_CONTINUOUS_HWCLOCK
1154
1155static void
1156scale_sleep_time(void)
1157{
1158 /* Apply the current NTP frequency adjustment to the time slept.
1159 * The frequency adjustment remains stable between calls to ntp_adjtime(),
1160 * and should thus provide a reasonable approximation of the total adjustment
1161 * required for the time slept. */
1162 struct bintime sleep_time;
1163 uint64_t tick_scale_x, s_scale_ns;
1164 int64_t s_adj_nsx;
1165 int64_t sleep_adj = ntp_get_freq();
1166 if (sleep_adj) {
1167 get_scale_factors_from_adj(sleep_adj, &tick_scale_x, &s_scale_ns, &s_adj_nsx);
1168 sleep_time = scale_delta(mach_absolutetime_last_sleep, tick_scale_x, s_scale_ns, s_adj_nsx);
1169 } else {
1170 tick_scale_x = (uint64_t)1 << 63;
1171 tick_scale_x /= ticks_per_sec;
1172 tick_scale_x *= 2;
1173 sleep_time.sec = mach_absolutetime_last_sleep / ticks_per_sec;
1174 sleep_time.frac = (mach_absolutetime_last_sleep % ticks_per_sec) * tick_scale_x;
1175 }
1176 bintime_add(&clock_calend.offset, &sleep_time);
1177 bintime_add(&clock_calend.bintime, &sleep_time);
1178}
1179
f427ee49
A
1180static void
1181clock_wakeup_calendar_hwclock(void)
c6bf4f31
A
1182{
1183 spl_t s;
1184
1185 s = splclock();
1186 clock_lock();
1187
1188 commpage_disable_timestamp();
1189
1190 uint64_t abstime = mach_absolute_time();
f427ee49 1191 uint64_t total_sleep_time = mach_continuous_time() - abstime;
c6bf4f31
A
1192
1193 mach_absolutetime_last_sleep = total_sleep_time - mach_absolutetime_asleep;
1194 mach_absolutetime_asleep = total_sleep_time;
1195
1196 scale_sleep_time();
1197
f427ee49
A
1198 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE),
1199 (uintptr_t)mach_absolutetime_last_sleep,
1200 (uintptr_t)mach_absolutetime_asleep,
1201 (uintptr_t)(mach_absolutetime_last_sleep >> 32),
1202 (uintptr_t)(mach_absolutetime_asleep >> 32));
c6bf4f31
A
1203
1204 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
f427ee49
A
1205#if HIBERNATION
1206 commpage_update_mach_continuous_time_hw_offset(hwclock_conttime_offset);
1207#endif
c6bf4f31
A
1208 adjust_cont_time_thread_calls();
1209
1210 clock_unlock();
1211 splx(s);
1212
1213 host_notify_calendar_change();
1214
1215#if CONFIG_DTRACE
1216 clock_track_calend_nowait();
1217#endif
1218}
1219
f427ee49 1220#endif /* HAS_CONTINUOUS_HWCLOCK */
5ba3f43e 1221
f427ee49
A
1222#if ENABLE_LEGACY_CLOCK_CODE
1223
1224static void
1225clock_wakeup_calendar_legacy(void)
1c79356b 1226{
0a7de745 1227 clock_sec_t wake_sys_sec;
d9a64523 1228 clock_usec_t wake_sys_usec;
0a7de745
A
1229 clock_sec_t wake_sec;
1230 clock_usec_t wake_usec;
d9a64523
A
1231 clock_sec_t wall_time_sec;
1232 clock_usec_t wall_time_usec;
0a7de745
A
1233 clock_sec_t diff_sec;
1234 clock_usec_t diff_usec;
d9a64523
A
1235 clock_sec_t var_s;
1236 clock_usec_t var_us;
0a7de745
A
1237 spl_t s;
1238 struct bintime bt, last_sleep_bt;
cc8bc92a 1239 struct latched_time monotonic_time;
0a7de745
A
1240 uint64_t monotonic_usec_total;
1241 uint64_t wake_abs;
1242 size_t size;
5ba3f43e 1243
cc8bc92a
A
1244 /*
1245 * If the platform has the monotonic clock use that to
1246 * compute the sleep time. The monotonic clock does not have an offset
1247 * that can be modified, so nor kernel or userspace can change the time
1248 * of this clock, it can only monotonically increase over time.
d9a64523
A
1249 * During sleep mach_absolute_time (sys time) does not tick,
1250 * so the sleep time is the difference between the current monotonic time
cc8bc92a
A
1251 * less the absolute time and the previous difference stored at wake time.
1252 *
d9a64523 1253 * basesleep = (monotonic - sys) ---> computed at last wake
cc8bc92a
A
1254 * sleep_time = (monotonic - sys) - basesleep
1255 *
d9a64523
A
1256 * If the platform does not support monotonic clock we set the wall time to what the
1257 * UTC clock returns us.
1258 * Setting the wall time to UTC time implies that we loose all the adjustments
1259 * done during wake time through adjtime/ntp_adjustime.
1260 * The UTC time is the monotonic clock + an offset that can be set
cc8bc92a 1261 * by kernel.
d9a64523
A
1262 * The time slept in this case is the difference between wall time and UTC
1263 * at wake.
cc8bc92a
A
1264 *
1265 * IMPORTANT:
d9a64523 1266 * We assume that only the kernel is setting the offset of the PMU/RTC and that
cc8bc92a 1267 * it is doing it only througth the settimeofday interface.
cc8bc92a
A
1268 */
1269 if (has_monotonic_clock) {
d9a64523
A
1270#if DEVELOPMENT || DEBUG
1271 /*
1272 * Just for debugging, get the wake UTC time.
1273 */
1274 PEGetUTCTimeOfDay(&var_s, &var_us);
1275#endif
1276 /*
1277 * Get monotonic time with corresponding sys time
1278 */
cc8bc92a
A
1279 size = sizeof(monotonic_time);
1280 if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
1281 panic("%s: could not call kern.monotonicclock_usecs", __func__);
1282 }
d9a64523
A
1283 wake_abs = monotonic_time.mach_time;
1284 absolutetime_to_microtime(wake_abs, &wake_sys_sec, &wake_sys_usec);
cc8bc92a 1285
d9a64523
A
1286 monotonic_usec_total = monotonic_time.monotonic_time_usec;
1287 wake_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
1288 wake_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
cc8bc92a 1289 } else {
d9a64523
A
1290 /*
1291 * Get UTC time and corresponding sys time
1292 */
1293 PEGetUTCTimeOfDay(&wake_sec, &wake_usec);
1294 wake_abs = mach_absolute_time();
1295 absolutetime_to_microtime(wake_abs, &wake_sys_sec, &wake_sys_usec);
cc8bc92a 1296 }
b0d623f7 1297
d9a64523 1298#if DEVELOPMENT || DEBUG
0a7de745
A
1299 os_log(OS_LOG_DEFAULT, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec, wake_usec, (has_monotonic_clock)?"monotonic":"UTC", wake_abs);
1300 if (has_monotonic_clock) {
1301 os_log(OS_LOG_DEFAULT, "UTC time %lu s %d u\n", (unsigned long)var_s, var_us);
1302 }
d9a64523
A
1303#endif /* DEVELOPMENT || DEBUG */
1304
b0d623f7
A
1305 s = splclock();
1306 clock_lock();
0a7de745 1307
5ba3f43e
A
1308 commpage_disable_timestamp();
1309
cc8bc92a
A
1310#if DEVELOPMENT || DEBUG
1311 struct clock_calend clock_calend_cp1 = clock_calend;
1312#endif /* DEVELOPMENT || DEBUG */
5ba3f43e 1313
5ba3f43e 1314 /*
d9a64523
A
1315 * We normally expect the UTC/monotonic clock to be always-on and produce
1316 * greater readings than the sys counter. There may be corner cases
1317 * due to differing clock resolutions (UTC/monotonic clock is likely lower) and
1318 * and errors reading the UTC/monotonic clock (some implementations return 0
1319 * on error) in which that doesn't hold true.
5ba3f43e 1320 */
d9a64523
A
1321 if ((wake_sys_sec > wake_sec) || ((wake_sys_sec == wake_sec) && (wake_sys_usec > wake_usec))) {
1322 os_log_error(OS_LOG_DEFAULT, "WARNING: %s clock is less then sys clock at wake: %lu s %d u vs %lu s %d u, defaulting sleep time to zero\n", (has_monotonic_clock)?"monotonic":"UTC", (unsigned long)wake_sec, wake_usec, (unsigned long)wake_sys_sec, wake_sys_usec);
5ba3f43e 1323 mach_absolutetime_last_sleep = 0;
d9a64523 1324 goto done;
cc8bc92a 1325 }
5ba3f43e 1326
d9a64523
A
1327 if (has_monotonic_clock) {
1328 /*
1329 * computer the difference monotonic - sys
1330 * we already checked that monotonic time is
1331 * greater than sys.
1332 */
1333 diff_sec = wake_sec;
1334 diff_usec = wake_usec;
1335 // This macro stores the subtraction result in diff_sec and diff_usec
1336 TIME_SUB(diff_sec, wake_sys_sec, diff_usec, wake_sys_usec, USEC_PER_SEC);
1337 //This function converts diff_sec and diff_usec in bintime
1338 clock2bintime(&diff_sec, &diff_usec, &bt);
1339
1340 /*
1341 * Safety belt: the monotonic clock will likely have a lower resolution than the sys counter.
1342 * It's also possible that the device didn't fully transition to the powered-off state on
1343 * the most recent sleep, so the sys counter may not have reset or may have only briefly
1344 * turned off. In that case it's possible for the difference between the monotonic clock and the
1345 * sys counter to be less than the previously recorded value in clock.calend.basesleep.
1346 * In that case simply record that we slept for 0 ticks.
1347 */
1348 if ((bt.sec > clock_calend.basesleep.sec) ||
1349 ((bt.sec == clock_calend.basesleep.sec) && (bt.frac > clock_calend.basesleep.frac))) {
d9a64523
A
1350 //last_sleep is the difference between (current monotonic - abs) and (last wake monotonic - abs)
1351 last_sleep_bt = bt;
1352 bintime_sub(&last_sleep_bt, &clock_calend.basesleep);
1353
1354 bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
1355 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
1356
1357 //set basesleep to current monotonic - abs
1358 clock_calend.basesleep = bt;
1359
1360 //update wall time
1361 bintime_add(&clock_calend.offset, &last_sleep_bt);
1362 bintime_add(&clock_calend.bintime, &last_sleep_bt);
1363
1364 bintime2usclock(&last_sleep_bt, &var_s, &var_us);
1365 os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long) var_s, var_us);
d9a64523
A
1366 } else {
1367 bintime2usclock(&clock_calend.basesleep, &var_s, &var_us);
1368 os_log_error(OS_LOG_DEFAULT, "WARNING: last wake monotonic-sys time (%lu s %d u) is greater then current monotonic-sys time(%lu s %d u), defaulting sleep time to zero\n", (unsigned long) var_s, var_us, (unsigned long) diff_sec, diff_usec);
1369
1370 mach_absolutetime_last_sleep = 0;
1371 }
1372 } else {
1373 /*
1374 * set the wall time to UTC value
1375 */
1376 bt = get_scaled_time(wake_abs);
1377 bintime_add(&bt, &clock_calend.bintime);
1378 bintime2usclock(&bt, &wall_time_sec, &wall_time_usec);
1379
0a7de745 1380 if (wall_time_sec > wake_sec || (wall_time_sec == wake_sec && wall_time_usec > wake_usec)) {
d9a64523
A
1381 os_log(OS_LOG_DEFAULT, "WARNING: wall time (%lu s %d u) is greater than current UTC time (%lu s %d u), defaulting sleep time to zero\n", (unsigned long) wall_time_sec, wall_time_usec, (unsigned long) wake_sec, wake_usec);
1382
1383 mach_absolutetime_last_sleep = 0;
1384 } else {
1385 diff_sec = wake_sec;
1386 diff_usec = wake_usec;
1387 // This macro stores the subtraction result in diff_sec and diff_usec
1388 TIME_SUB(diff_sec, wall_time_sec, diff_usec, wall_time_usec, USEC_PER_SEC);
1389 //This function converts diff_sec and diff_usec in bintime
1390 clock2bintime(&diff_sec, &diff_usec, &bt);
1391
1392 //time slept in this case is the difference between PMU/RTC and wall time
1393 last_sleep_bt = bt;
1394
1395 bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
1396 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
1397
1398 //update wall time
1399 bintime_add(&clock_calend.offset, &last_sleep_bt);
1400 bintime_add(&clock_calend.bintime, &last_sleep_bt);
1401
1402 bintime2usclock(&last_sleep_bt, &var_s, &var_us);
1403 os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long)var_s, var_us);
1404 }
1405 }
1406done:
f427ee49
A
1407 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE),
1408 (uintptr_t)mach_absolutetime_last_sleep,
1409 (uintptr_t)mach_absolutetime_asleep,
1410 (uintptr_t)(mach_absolutetime_last_sleep >> 32),
1411 (uintptr_t)(mach_absolutetime_asleep >> 32));
5ba3f43e
A
1412
1413 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
1414 adjust_cont_time_thread_calls();
39037602 1415
cc8bc92a
A
1416#if DEVELOPMENT || DEBUG
1417 struct clock_calend clock_calend_cp = clock_calend;
1418#endif
1419
39037602
A
1420 clock_unlock();
1421 splx(s);
5ba3f43e 1422
cc8bc92a
A
1423#if DEVELOPMENT || DEBUG
1424 if (g_should_log_clock_adjustments) {
d9a64523
A
1425 print_all_clock_variables("clock_wakeup_calendar: BEFORE", NULL, NULL, NULL, NULL, &clock_calend_cp1);
1426 print_all_clock_variables("clock_wakeup_calendar: AFTER", NULL, NULL, NULL, NULL, &clock_calend_cp);
cc8bc92a
A
1427 }
1428#endif /* DEVELOPMENT || DEBUG */
1429
5ba3f43e
A
1430 host_notify_calendar_change();
1431
1432#if CONFIG_DTRACE
1433 clock_track_calend_nowait();
1434#endif
39037602
A
1435}
1436
f427ee49
A
1437#endif /* ENABLE_LEGACY_CLOCK_CODE */
1438
1439void
1440clock_wakeup_calendar(void)
1441{
1442#if HAS_CONTINUOUS_HWCLOCK
1443#if HIBERNATION_USES_LEGACY_CLOCK
1444 if (gIOHibernateState) {
1445 // if we're resuming from hibernation, we have to take the legacy wakeup path
1446 return clock_wakeup_calendar_legacy();
1447 }
1448#endif /* HIBERNATION_USES_LEGACY_CLOCK */
1449 // use the hwclock wakeup path
1450 return clock_wakeup_calendar_hwclock();
1451#elif ENABLE_LEGACY_CLOCK_CODE
1452 return clock_wakeup_calendar_legacy();
1453#else
1454#error "can't determine which clock code to run"
1455#endif
1456}
5ba3f43e 1457
39037602
A
1458/*
1459 * clock_get_boottime_nanotime:
1460 *
1461 * Return the boottime, used by sysctl.
1462 */
1463void
5ba3f43e 1464clock_get_boottime_nanotime(
0a7de745
A
1465 clock_sec_t *secs,
1466 clock_nsec_t *nanosecs)
39037602 1467{
0a7de745 1468 spl_t s;
39037602
A
1469
1470 s = splclock();
1471 clock_lock();
1472
1473 *secs = (clock_sec_t)clock_boottime;
5ba3f43e 1474 *nanosecs = (clock_nsec_t)clock_boottime_usec * NSEC_PER_USEC;
b0d623f7
A
1475
1476 clock_unlock();
1477 splx(s);
1c79356b
A
1478}
1479
1480/*
5ba3f43e 1481 * clock_get_boottime_nanotime:
0c530ab8 1482 *
5ba3f43e 1483 * Return the boottime, used by sysctl.
6601e61a 1484 */
1c79356b 1485void
5ba3f43e 1486clock_get_boottime_microtime(
0a7de745
A
1487 clock_sec_t *secs,
1488 clock_usec_t *microsecs)
1c79356b 1489{
0a7de745 1490 spl_t s;
1c79356b 1491
0c530ab8 1492 s = splclock();
b0d623f7 1493 clock_lock();
1c79356b 1494
5ba3f43e
A
1495 *secs = (clock_sec_t)clock_boottime;
1496 *microsecs = (clock_nsec_t)clock_boottime_usec;
0c530ab8 1497
b0d623f7 1498 clock_unlock();
0c530ab8 1499 splx(s);
1c79356b
A
1500}
1501
0c530ab8 1502
0c530ab8
A
1503/*
1504 * Wait / delay routines.
1505 */
91447636
A
1506static void
1507mach_wait_until_continue(
0a7de745
A
1508 __unused void *parameter,
1509 wait_result_t wresult)
91447636
A
1510{
1511 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
1512 /*NOTREACHED*/
1513}
1514
316670eb
A
1515/*
1516 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
1517 *
1518 * Parameters: args->deadline Amount of time to wait
1519 *
1520 * Returns: 0 Success
0a7de745 1521 * !0 Not success
316670eb
A
1522 *
1523 */
1c79356b 1524kern_return_t
91447636 1525mach_wait_until_trap(
0a7de745 1526 struct mach_wait_until_trap_args *args)
91447636 1527{
0a7de745
A
1528 uint64_t deadline = args->deadline;
1529 wait_result_t wresult;
91447636 1530
f427ee49 1531
39236c6e 1532 wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
0a7de745
A
1533 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
1534 if (wresult == THREAD_WAITING) {
91447636 1535 wresult = thread_block(mach_wait_until_continue);
0a7de745 1536 }
91447636 1537
0a7de745 1538 return (wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS;
91447636
A
1539}
1540
91447636
A
1541void
1542clock_delay_until(
0a7de745 1543 uint64_t deadline)
1c79356b 1544{
0a7de745 1545 uint64_t now = mach_absolute_time();
91447636 1546
0a7de745 1547 if (now >= deadline) {
91447636 1548 return;
0a7de745 1549 }
1c79356b 1550
316670eb
A
1551 _clock_delay_until_deadline(deadline - now, deadline);
1552}
1553
1554/*
1555 * Preserve the original precise interval that the client
1556 * requested for comparison to the spin threshold.
1557 */
1558void
1559_clock_delay_until_deadline(
0a7de745
A
1560 uint64_t interval,
1561 uint64_t deadline)
316670eb 1562{
3e170ce0
A
1563 _clock_delay_until_deadline_with_leeway(interval, deadline, 0);
1564}
1565
1566/*
1567 * Like _clock_delay_until_deadline, but it accepts a
1568 * leeway value.
1569 */
1570void
1571_clock_delay_until_deadline_with_leeway(
0a7de745
A
1572 uint64_t interval,
1573 uint64_t deadline,
1574 uint64_t leeway)
3e170ce0 1575{
0a7de745 1576 if (interval == 0) {
316670eb 1577 return;
0a7de745 1578 }
316670eb 1579
0a7de745
A
1580 if (ml_delay_should_spin(interval) ||
1581 get_preemption_level() != 0 ||
1582 ml_get_interrupts_enabled() == FALSE) {
bd504ef0 1583 machine_delay_until(interval, deadline);
316670eb 1584 } else {
3e170ce0
A
1585 /*
1586 * For now, assume a leeway request of 0 means the client does not want a leeway
1587 * value. We may want to change this interpretation in the future.
1588 */
1589
1590 if (leeway) {
1591 assert_wait_deadline_with_leeway((event_t)clock_delay_until, THREAD_UNINT, TIMEOUT_URGENCY_LEEWAY, deadline, leeway);
1592 } else {
1593 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
1594 }
91447636
A
1595
1596 thread_block(THREAD_CONTINUE_NULL);
9bccf70c 1597 }
91447636 1598}
1c79356b 1599
91447636
A
1600void
1601delay_for_interval(
0a7de745
A
1602 uint32_t interval,
1603 uint32_t scale_factor)
91447636 1604{
0a7de745 1605 uint64_t abstime;
91447636 1606
316670eb 1607 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
91447636 1608
316670eb 1609 _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
91447636
A
1610}
1611
3e170ce0
A
1612void
1613delay_for_interval_with_leeway(
0a7de745
A
1614 uint32_t interval,
1615 uint32_t leeway,
1616 uint32_t scale_factor)
3e170ce0 1617{
0a7de745
A
1618 uint64_t abstime_interval;
1619 uint64_t abstime_leeway;
3e170ce0
A
1620
1621 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
1622 clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
1623
1624 _clock_delay_until_deadline_with_leeway(abstime_interval, mach_absolute_time() + abstime_interval, abstime_leeway);
1625}
1626
91447636
A
1627void
1628delay(
0a7de745 1629 int usec)
91447636
A
1630{
1631 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1c79356b 1632}
9bccf70c 1633
0c530ab8
A
1634/*
1635 * Miscellaneous routines.
1636 */
55e303ae 1637void
0c530ab8 1638clock_interval_to_deadline(
0a7de745
A
1639 uint32_t interval,
1640 uint32_t scale_factor,
1641 uint64_t *result)
9bccf70c 1642{
0a7de745 1643 uint64_t abstime;
c0fea474 1644
0c530ab8 1645 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
6601e61a 1646
ea3f0419
A
1647 if (os_add_overflow(mach_absolute_time(), abstime, result)) {
1648 *result = UINT64_MAX;
1649 }
8f6c56a5 1650}
5d5c5d0d 1651
f427ee49
A
1652void
1653nanoseconds_to_deadline(
1654 uint64_t interval,
1655 uint64_t *result)
1656{
1657 uint64_t abstime;
1658
1659 nanoseconds_to_absolutetime(interval, &abstime);
1660
1661 if (os_add_overflow(mach_absolute_time(), abstime, result)) {
1662 *result = UINT64_MAX;
1663 }
1664}
1665
0c530ab8
A
1666void
1667clock_absolutetime_interval_to_deadline(
0a7de745
A
1668 uint64_t abstime,
1669 uint64_t *result)
8f6c56a5 1670{
ea3f0419
A
1671 if (os_add_overflow(mach_absolute_time(), abstime, result)) {
1672 *result = UINT64_MAX;
1673 }
21362eb3 1674}
89b3af67 1675
39037602
A
1676void
1677clock_continuoustime_interval_to_deadline(
0a7de745
A
1678 uint64_t conttime,
1679 uint64_t *result)
39037602 1680{
ea3f0419
A
1681 if (os_add_overflow(mach_continuous_time(), conttime, result)) {
1682 *result = UINT64_MAX;
1683 }
39037602
A
1684}
1685
4452a7af 1686void
0c530ab8 1687clock_get_uptime(
0a7de745 1688 uint64_t *result)
21362eb3 1689{
0c530ab8 1690 *result = mach_absolute_time();
6601e61a 1691}
4452a7af 1692
0c530ab8
A
1693void
1694clock_deadline_for_periodic_event(
0a7de745
A
1695 uint64_t interval,
1696 uint64_t abstime,
1697 uint64_t *deadline)
6601e61a 1698{
0c530ab8
A
1699 assert(interval != 0);
1700
ea3f0419
A
1701 // *deadline += interval;
1702 if (os_add_overflow(*deadline, interval, deadline)) {
1703 *deadline = UINT64_MAX;
1704 }
0c530ab8
A
1705
1706 if (*deadline <= abstime) {
ea3f0419
A
1707 // *deadline = abstime + interval;
1708 if (os_add_overflow(abstime, interval, deadline)) {
1709 *deadline = UINT64_MAX;
1710 }
55e303ae 1711
ea3f0419 1712 abstime = mach_absolute_time();
0a7de745 1713 if (*deadline <= abstime) {
ea3f0419
A
1714 // *deadline = abstime + interval;
1715 if (os_add_overflow(abstime, interval, deadline)) {
1716 *deadline = UINT64_MAX;
1717 }
0a7de745 1718 }
0c530ab8 1719 }
55e303ae 1720}
2d21ac55 1721
39037602
A
1722uint64_t
1723mach_continuous_time(void)
1724{
f427ee49
A
1725#if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
1726 return ml_get_hwclock() + hwclock_conttime_offset;
1727#elif HAS_CONTINUOUS_HWCLOCK
c6bf4f31
A
1728 return ml_get_hwclock();
1729#else
0a7de745 1730 while (1) {
39037602
A
1731 uint64_t read1 = mach_absolutetime_asleep;
1732 uint64_t absolute = mach_absolute_time();
1733 OSMemoryBarrier();
1734 uint64_t read2 = mach_absolutetime_asleep;
1735
0a7de745 1736 if (__builtin_expect(read1 == read2, 1)) {
39037602
A
1737 return absolute + read1;
1738 }
1739 }
c6bf4f31 1740#endif
39037602
A
1741}
1742
1743uint64_t
1744mach_continuous_approximate_time(void)
1745{
c6bf4f31 1746#if HAS_CONTINUOUS_HWCLOCK
f427ee49 1747 return mach_continuous_time();
c6bf4f31 1748#else
0a7de745 1749 while (1) {
39037602
A
1750 uint64_t read1 = mach_absolutetime_asleep;
1751 uint64_t absolute = mach_approximate_time();
1752 OSMemoryBarrier();
1753 uint64_t read2 = mach_absolutetime_asleep;
1754
0a7de745 1755 if (__builtin_expect(read1 == read2, 1)) {
39037602
A
1756 return absolute + read1;
1757 }
1758 }
c6bf4f31 1759#endif
39037602
A
1760}
1761
1762/*
1763 * continuoustime_to_absolutetime
1764 * Must be called with interrupts disabled
1765 * Returned value is only valid until the next update to
0a7de745 1766 * mach_continuous_time
39037602
A
1767 */
1768uint64_t
0a7de745
A
1769continuoustime_to_absolutetime(uint64_t conttime)
1770{
1771 if (conttime <= mach_absolutetime_asleep) {
39037602 1772 return 0;
0a7de745 1773 } else {
39037602 1774 return conttime - mach_absolutetime_asleep;
0a7de745 1775 }
39037602
A
1776}
1777
1778/*
1779 * absolutetime_to_continuoustime
1780 * Must be called with interrupts disabled
1781 * Returned value is only valid until the next update to
0a7de745 1782 * mach_continuous_time
39037602
A
1783 */
1784uint64_t
0a7de745
A
1785absolutetime_to_continuoustime(uint64_t abstime)
1786{
39037602
A
1787 return abstime + mach_absolutetime_asleep;
1788}
1789
0a7de745 1790#if CONFIG_DTRACE
2d21ac55
A
1791
1792/*
1793 * clock_get_calendar_nanotime_nowait
1794 *
1795 * Description: Non-blocking version of clock_get_calendar_nanotime()
1796 *
1797 * Notes: This function operates by separately tracking calendar time
1798 * updates using a two element structure to copy the calendar
1799 * state, which may be asynchronously modified. It utilizes
1800 * barrier instructions in the tracking process and in the local
1801 * stable snapshot process in order to ensure that a consistent
1802 * snapshot is used to perform the calculation.
1803 */
1804void
1805clock_get_calendar_nanotime_nowait(
0a7de745
A
1806 clock_sec_t *secs,
1807 clock_nsec_t *nanosecs)
2d21ac55
A
1808{
1809 int i = 0;
0a7de745 1810 uint64_t now;
2d21ac55 1811 struct unlocked_clock_calend stable;
5ba3f43e 1812 struct bintime bt;
2d21ac55
A
1813
1814 for (;;) {
0a7de745 1815 stable = flipflop[i]; /* take snapshot */
2d21ac55
A
1816
1817 /*
1818 * Use a barrier instructions to ensure atomicity. We AND
1819 * off the "in progress" bit to get the current generation
1820 * count.
1821 */
cb323159 1822 os_atomic_andnot(&stable.gen, 1, relaxed);
2d21ac55
A
1823
1824 /*
1825 * If an update _is_ in progress, the generation count will be
1826 * off by one, if it _was_ in progress, it will be off by two,
1827 * and if we caught it at a good time, it will be equal (and
1828 * our snapshot is threfore stable).
1829 */
0a7de745 1830 if (flipflop[i].gen == stable.gen) {
2d21ac55 1831 break;
0a7de745 1832 }
2d21ac55 1833
5ba3f43e 1834 /* Switch to the other element of the flipflop, and try again. */
2d21ac55
A
1835 i ^= 1;
1836 }
1837
1838 now = mach_absolute_time();
1839
5ba3f43e 1840 bt = get_scaled_time(now);
2d21ac55 1841
5ba3f43e 1842 bintime_add(&bt, &clock_calend.bintime);
2d21ac55 1843
5ba3f43e 1844 bintime2nsclock(&bt, secs, nanosecs);
2d21ac55
A
1845}
1846
0a7de745 1847static void
2d21ac55
A
1848clock_track_calend_nowait(void)
1849{
1850 int i;
1851
1852 for (i = 0; i < 2; i++) {
1853 struct clock_calend tmp = clock_calend;
1854
1855 /*
1856 * Set the low bit if the generation count; since we use a
1857 * barrier instruction to do this, we are guaranteed that this
1858 * will flag an update in progress to an async caller trying
1859 * to examine the contents.
1860 */
cb323159 1861 os_atomic_or(&flipflop[i].gen, 1, relaxed);
2d21ac55
A
1862
1863 flipflop[i].calend = tmp;
1864
1865 /*
1866 * Increment the generation count to clear the low bit to
1867 * signal completion. If a caller compares the generation
1868 * count after taking a copy while in progress, the count
1869 * will be off by two.
1870 */
cb323159 1871 os_atomic_inc(&flipflop[i].gen, relaxed);
2d21ac55
A
1872 }
1873}
b0d623f7 1874
0a7de745 1875#endif /* CONFIG_DTRACE */