]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/clock.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
CommitLineData
1c79356b 1/*
cb323159 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
1c79356b 32 */
5ba3f43e
A
33/*-
34 * Copyright (c) 1982, 1986, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)time.h 8.5 (Berkeley) 5/4/95
62 * $FreeBSD$
63 */
1c79356b 64
91447636 65#include <mach/mach_types.h>
91447636 66
1c79356b 67#include <kern/spl.h>
55e303ae 68#include <kern/sched_prim.h>
1c79356b 69#include <kern/thread.h>
1c79356b 70#include <kern/clock.h>
0c530ab8 71#include <kern/host_notify.h>
39037602
A
72#include <kern/thread_call.h>
73#include <libkern/OSAtomic.h>
0c530ab8
A
74
75#include <IOKit/IOPlatformExpert.h>
c0fea474 76
0c530ab8 77#include <machine/commpage.h>
5ba3f43e
A
78#include <machine/config.h>
79#include <machine/machine_routines.h>
1c79356b 80
91447636 81#include <mach/mach_traps.h>
1c79356b
A
82#include <mach/mach_time.h>
83
3e170ce0 84#include <sys/kdebug.h>
5ba3f43e
A
85#include <sys/timex.h>
86#include <kern/arithmetic_128.h>
cc8bc92a 87#include <os/log.h>
3e170ce0 88
0a7de745 89uint32_t hz_tick_interval = 1;
cc8bc92a 90static uint64_t has_monotonic_clock = 0;
2d21ac55 91
cb323159 92decl_simple_lock_data(, clock_lock);
5ba3f43e
A
93lck_grp_attr_t * settime_lock_grp_attr;
94lck_grp_t * settime_lock_grp;
95lck_attr_t * settime_lock_attr;
96lck_mtx_t settime_lock;
91447636 97
0a7de745
A
98#define clock_lock() \
99 simple_lock(&clock_lock, LCK_GRP_NULL)
b0d623f7 100
0a7de745 101#define clock_unlock() \
b0d623f7
A
102 simple_unlock(&clock_lock)
103
0a7de745 104#define clock_lock_init() \
b0d623f7
A
105 simple_lock_init(&clock_lock, 0)
106
39037602 107#ifdef kdp_simple_lock_is_acquired
0a7de745
A
108boolean_t
109kdp_clock_is_locked()
39037602
A
110{
111 return kdp_simple_lock_is_acquired(&clock_lock);
112}
113#endif
b0d623f7 114
5ba3f43e 115struct bintime {
0a7de745 116 time_t sec;
5ba3f43e
A
117 uint64_t frac;
118};
119
120static __inline void
121bintime_addx(struct bintime *_bt, uint64_t _x)
122{
123 uint64_t _u;
124
125 _u = _bt->frac;
126 _bt->frac += _x;
0a7de745 127 if (_u > _bt->frac) {
5ba3f43e 128 _bt->sec++;
0a7de745 129 }
5ba3f43e
A
130}
131
132static __inline void
133bintime_subx(struct bintime *_bt, uint64_t _x)
134{
135 uint64_t _u;
136
137 _u = _bt->frac;
138 _bt->frac -= _x;
0a7de745 139 if (_u < _bt->frac) {
5ba3f43e 140 _bt->sec--;
0a7de745 141 }
5ba3f43e
A
142}
143
144static __inline void
145bintime_addns(struct bintime *bt, uint64_t ns)
146{
0a7de745 147 bt->sec += ns / (uint64_t)NSEC_PER_SEC;
5ba3f43e
A
148 ns = ns % (uint64_t)NSEC_PER_SEC;
149 if (ns) {
150 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
151 ns = ns * (uint64_t)18446744073LL;
152 bintime_addx(bt, ns);
153 }
154}
155
156static __inline void
157bintime_subns(struct bintime *bt, uint64_t ns)
158{
0a7de745 159 bt->sec -= ns / (uint64_t)NSEC_PER_SEC;
5ba3f43e
A
160 ns = ns % (uint64_t)NSEC_PER_SEC;
161 if (ns) {
162 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
163 ns = ns * (uint64_t)18446744073LL;
164 bintime_subx(bt, ns);
165 }
166}
167
168static __inline void
169bintime_addxns(struct bintime *bt, uint64_t a, int64_t xns)
170{
0a7de745 171 uint64_t uxns = (xns > 0)?(uint64_t)xns:(uint64_t)-xns;
5ba3f43e
A
172 uint64_t ns = multi_overflow(a, uxns);
173 if (xns > 0) {
0a7de745 174 if (ns) {
5ba3f43e 175 bintime_addns(bt, ns);
0a7de745 176 }
5ba3f43e
A
177 ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
178 bintime_addx(bt, ns);
0a7de745
A
179 } else {
180 if (ns) {
5ba3f43e 181 bintime_subns(bt, ns);
0a7de745 182 }
5ba3f43e 183 ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
0a7de745 184 bintime_subx(bt, ns);
5ba3f43e
A
185 }
186}
187
188
189static __inline void
190bintime_add(struct bintime *_bt, const struct bintime *_bt2)
191{
192 uint64_t _u;
193
194 _u = _bt->frac;
195 _bt->frac += _bt2->frac;
0a7de745 196 if (_u > _bt->frac) {
5ba3f43e 197 _bt->sec++;
0a7de745 198 }
5ba3f43e
A
199 _bt->sec += _bt2->sec;
200}
201
202static __inline void
203bintime_sub(struct bintime *_bt, const struct bintime *_bt2)
204{
205 uint64_t _u;
206
207 _u = _bt->frac;
208 _bt->frac -= _bt2->frac;
0a7de745 209 if (_u < _bt->frac) {
5ba3f43e 210 _bt->sec--;
0a7de745 211 }
5ba3f43e
A
212 _bt->sec -= _bt2->sec;
213}
214
215static __inline void
216clock2bintime(const clock_sec_t *secs, const clock_usec_t *microsecs, struct bintime *_bt)
217{
5ba3f43e
A
218 _bt->sec = *secs;
219 /* 18446744073709 = int(2^64 / 1000000) */
220 _bt->frac = *microsecs * (uint64_t)18446744073709LL;
221}
222
223static __inline void
224bintime2usclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *microsecs)
225{
5ba3f43e
A
226 *secs = _bt->sec;
227 *microsecs = ((uint64_t)USEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
228}
229
230static __inline void
231bintime2nsclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *nanosecs)
232{
5ba3f43e
A
233 *secs = _bt->sec;
234 *nanosecs = ((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
235}
236
237static __inline void
238bintime2absolutetime(const struct bintime *_bt, uint64_t *abs)
239{
240 uint64_t nsec;
241 nsec = (uint64_t) _bt->sec * (uint64_t)NSEC_PER_SEC + (((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32);
242 nanoseconds_to_absolutetime(nsec, abs);
243}
cc8bc92a
A
244
245struct latched_time {
0a7de745
A
246 uint64_t monotonic_time_usec;
247 uint64_t mach_time;
cc8bc92a
A
248};
249
250extern int
251kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
252
1c79356b 253/*
0c530ab8
A
254 * Time of day (calendar) variables.
255 *
256 * Algorithm:
257 *
5ba3f43e 258 * TOD <- bintime + delta*scale
0c530ab8 259 *
5ba3f43e 260 * where :
0a7de745 261 * bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
5ba3f43e
A
262 * delta is ticks elapsed since last scale update.
263 * scale is computed according to an adjustment provided by ntp_kern.
1c79356b 264 */
0c530ab8 265static struct clock_calend {
0a7de745
A
266 uint64_t s_scale_ns; /* scale to apply for each second elapsed, it converts in ns */
267 int64_t s_adj_nsx; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
268 uint64_t tick_scale_x; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
269 uint64_t offset_count; /* abs time from which apply current scales */
270 struct bintime offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
271 struct bintime bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
272 struct bintime boottime; /* boot time expressed in (sec, 64 bits frac of a second) */
273 struct bintime basesleep;
2d21ac55
A
274} clock_calend;
275
5ba3f43e
A
276static uint64_t ticks_per_sec; /* ticks in a second (expressed in abs time) */
277
cc8bc92a
A
278#if DEVELOPMENT || DEBUG
279extern int g_should_log_clock_adjustments;
280
281static void print_all_clock_variables(const char*, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* calend_cp);
282static void print_all_clock_variables_internal(const char *, struct clock_calend* calend_cp);
283#else
284#define print_all_clock_variables(...) do { } while (0)
285#define print_all_clock_variables_internal(...) do { } while (0)
286#endif
287
0a7de745 288#if CONFIG_DTRACE
b0d623f7 289
cc8bc92a 290
2d21ac55
A
291/*
292 * Unlocked calendar flipflop; this is used to track a clock_calend such
293 * that we can safely access a snapshot of a valid clock_calend structure
294 * without needing to take any locks to do it.
295 *
296 * The trick is to use a generation count and set the low bit when it is
297 * being updated/read; by doing this, we guarantee, through use of the
cb323159 298 * os_atomic functions, that the generation is incremented when the bit
2d21ac55
A
299 * is cleared atomically (by using a 1 bit add).
300 */
301static struct unlocked_clock_calend {
0a7de745
A
302 struct clock_calend calend; /* copy of calendar */
303 uint32_t gen; /* generation count */
304} flipflop[2];
b0d623f7
A
305
306static void clock_track_calend_nowait(void);
307
2d21ac55 308#endif
1c79356b 309
5ba3f43e
A
310void _clock_delay_until_deadline(uint64_t interval, uint64_t deadline);
311void _clock_delay_until_deadline_with_leeway(uint64_t interval, uint64_t deadline, uint64_t leeway);
9bccf70c 312
5ba3f43e 313/* Boottime variables*/
39037602
A
314static uint64_t clock_boottime;
315static uint32_t clock_boottime_usec;
4452a7af 316
0a7de745
A
317#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
318MACRO_BEGIN \
319 if (((rfrac) += (frac)) >= (unit)) { \
320 (rfrac) -= (unit); \
321 (rsecs) += 1; \
322 } \
323 (rsecs) += (secs); \
0c530ab8
A
324MACRO_END
325
0a7de745
A
326#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
327MACRO_BEGIN \
328 if ((int)((rfrac) -= (frac)) < 0) { \
329 (rfrac) += (unit); \
330 (rsecs) -= 1; \
331 } \
332 (rsecs) -= (secs); \
0c530ab8 333MACRO_END
1c79356b
A
334
335/*
91447636
A
336 * clock_config:
337 *
338 * Called once at boot to configure the clock subsystem.
1c79356b
A
339 */
340void
341clock_config(void)
342{
b0d623f7 343 clock_lock_init();
8f6c56a5 344
5ba3f43e
A
345 settime_lock_grp_attr = lck_grp_attr_alloc_init();
346 settime_lock_grp = lck_grp_alloc_init("settime grp", settime_lock_grp_attr);
347 settime_lock_attr = lck_attr_alloc_init();
348 lck_mtx_init(&settime_lock, settime_lock_grp, settime_lock_attr);
6601e61a 349
0c530ab8 350 clock_oldconfig();
5ba3f43e
A
351
352 ntp_init();
353
354 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
1c79356b
A
355}
356
357/*
91447636
A
358 * clock_init:
359 *
360 * Called on a processor each time started.
1c79356b
A
361 */
362void
363clock_init(void)
364{
0c530ab8 365 clock_oldinit();
1c79356b
A
366}
367
55e303ae 368/*
0c530ab8
A
369 * clock_timebase_init:
370 *
371 * Called by machine dependent code
372 * to initialize areas dependent on the
373 * timebase value. May be called multiple
374 * times during start up.
55e303ae
A
375 */
376void
377clock_timebase_init(void)
378{
0a7de745 379 uint64_t abstime;
5d5c5d0d 380
2d21ac55 381 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
b0d623f7 382 hz_tick_interval = (uint32_t)abstime;
89b3af67 383
0c530ab8 384 sched_timebase_init();
8ad349bb 385}
c0fea474 386
8ad349bb 387/*
0c530ab8
A
388 * mach_timebase_info_trap:
389 *
390 * User trap returns timebase constant.
8ad349bb 391 */
6601e61a 392kern_return_t
0c530ab8
A
393mach_timebase_info_trap(
394 struct mach_timebase_info_trap_args *args)
6601e61a 395{
0a7de745
A
396 mach_vm_address_t out_info_addr = args->info;
397 mach_timebase_info_data_t info = {};
6601e61a 398
0c530ab8 399 clock_timebase_info(&info);
89b3af67 400
0a7de745 401 copyout((void *)&info, out_info_addr, sizeof(info));
4452a7af 402
0a7de745 403 return KERN_SUCCESS;
8f6c56a5 404}
5d5c5d0d 405
8f6c56a5 406/*
0c530ab8 407 * Calendar routines.
8f6c56a5 408 */
4452a7af 409
6601e61a 410/*
0c530ab8
A
411 * clock_get_calendar_microtime:
412 *
413 * Returns the current calendar value,
414 * microseconds as the fraction.
6601e61a 415 */
0c530ab8
A
416void
417clock_get_calendar_microtime(
0a7de745
A
418 clock_sec_t *secs,
419 clock_usec_t *microsecs)
39236c6e
A
420{
421 clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
422}
423
5ba3f43e
A
424/*
425 * get_scale_factors_from_adj:
426 *
427 * computes scale factors from the value given in adjustment.
428 *
429 * Part of the code has been taken from tc_windup of FreeBSD
430 * written by Poul-Henning Kamp <phk@FreeBSD.ORG>, Julien Ridoux and
431 * Konstantin Belousov.
432 * https://github.com/freebsd/freebsd/blob/master/sys/kern/kern_tc.c
433 */
434static void
435get_scale_factors_from_adj(int64_t adjustment, uint64_t* tick_scale_x, uint64_t* s_scale_ns, int64_t* s_adj_nsx)
436{
437 uint64_t scale;
438 int64_t nano, frac;
439
440 /*-
441 * Calculating the scaling factor. We want the number of 1/2^64
442 * fractions of a second per period of the hardware counter, taking
443 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
444 * processing provides us with.
445 *
446 * The th_adjustment is nanoseconds per second with 32 bit binary
447 * fraction and we want 64 bit binary fraction of second:
448 *
449 * x = a * 2^32 / 10^9 = a * 4.294967296
450 *
451 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
452 * we can only multiply by about 850 without overflowing, that
453 * leaves no suitably precise fractions for multiply before divide.
454 *
455 * Divide before multiply with a fraction of 2199/512 results in a
456 * systematic undercompensation of 10PPM of th_adjustment. On a
457 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
458 *
459 * We happily sacrifice the lowest of the 64 bits of our result
460 * to the goddess of code clarity.
461 *
462 */
463 scale = (uint64_t)1 << 63;
464 scale += (adjustment / 1024) * 2199;
465 scale /= ticks_per_sec;
466 *tick_scale_x = scale * 2;
467
468 /*
469 * hi part of adj
470 * it contains ns (without fraction) to add to the next sec.
471 * Get ns scale factor for the next sec.
472 */
473 nano = (adjustment > 0)? adjustment >> 32 : -((-adjustment) >> 32);
474 scale = (uint64_t) NSEC_PER_SEC;
475 scale += nano;
476 *s_scale_ns = scale;
477
478 /*
479 * lo part of adj
480 * it contains 32 bit frac of ns to add to the next sec.
481 * Keep it as additional adjustment for the next sec.
482 */
483 frac = (adjustment > 0)? ((uint32_t) adjustment) : -((uint32_t) (-adjustment));
0a7de745 484 *s_adj_nsx = (frac > 0)? frac << 32 : -((-frac) << 32);
5ba3f43e
A
485
486 return;
487}
488
489/*
490 * scale_delta:
491 *
492 * returns a bintime struct representing delta scaled accordingly to the
493 * scale factors provided to this function.
494 */
495static struct bintime
496scale_delta(uint64_t delta, uint64_t tick_scale_x, uint64_t s_scale_ns, int64_t s_adj_nsx)
497{
498 uint64_t sec, new_ns, over;
499 struct bintime bt;
500
501 bt.sec = 0;
502 bt.frac = 0;
503
504 /*
505 * If more than one second is elapsed,
506 * scale fully elapsed seconds using scale factors for seconds.
507 * s_scale_ns -> scales sec to ns.
508 * s_adj_nsx -> additional adj expressed in 64 bit frac of ns to apply to each sec.
509 */
510 if (delta > ticks_per_sec) {
0a7de745 511 sec = (delta / ticks_per_sec);
5ba3f43e
A
512 new_ns = sec * s_scale_ns;
513 bintime_addns(&bt, new_ns);
514 if (s_adj_nsx) {
515 if (sec == 1) {
516 /* shortcut, no overflow can occur */
0a7de745
A
517 if (s_adj_nsx > 0) {
518 bintime_addx(&bt, (uint64_t)s_adj_nsx / (uint64_t)NSEC_PER_SEC);
519 } else {
520 bintime_subx(&bt, (uint64_t)-s_adj_nsx / (uint64_t)NSEC_PER_SEC);
521 }
522 } else {
5ba3f43e
A
523 /*
524 * s_adj_nsx is 64 bit frac of ns.
525 * sec*s_adj_nsx might overflow in int64_t.
526 * use bintime_addxns to not lose overflowed ns.
527 */
528 bintime_addxns(&bt, sec, s_adj_nsx);
529 }
530 }
531 delta = (delta % ticks_per_sec);
0a7de745 532 }
5ba3f43e
A
533
534 over = multi_overflow(tick_scale_x, delta);
0a7de745 535 if (over) {
5ba3f43e
A
536 bt.sec += over;
537 }
538
539 /*
540 * scale elapsed ticks using the scale factor for ticks.
541 */
542 bintime_addx(&bt, delta * tick_scale_x);
543
544 return bt;
545}
546
547/*
548 * get_scaled_time:
549 *
550 * returns the scaled time of the time elapsed from the last time
551 * scale factors were updated to now.
552 */
553static struct bintime
554get_scaled_time(uint64_t now)
555{
556 uint64_t delta;
557
558 /*
559 * Compute ticks elapsed since last scale update.
560 * This time will be scaled according to the value given by ntp kern.
561 */
562 delta = now - clock_calend.offset_count;
563
564 return scale_delta(delta, clock_calend.tick_scale_x, clock_calend.s_scale_ns, clock_calend.s_adj_nsx);
565}
566
39037602
A
567static void
568clock_get_calendar_absolute_and_microtime_locked(
0a7de745
A
569 clock_sec_t *secs,
570 clock_usec_t *microsecs,
571 uint64_t *abstime)
6601e61a 572{
5ba3f43e
A
573 uint64_t now;
574 struct bintime bt;
575
576 now = mach_absolute_time();
0a7de745 577 if (abstime) {
39236c6e 578 *abstime = now;
0a7de745 579 }
4452a7af 580
5ba3f43e
A
581 bt = get_scaled_time(now);
582 bintime_add(&bt, &clock_calend.bintime);
583 bintime2usclock(&bt, secs, microsecs);
584}
0c530ab8 585
5ba3f43e
A
586static void
587clock_get_calendar_absolute_and_nanotime_locked(
0a7de745
A
588 clock_sec_t *secs,
589 clock_usec_t *nanosecs,
590 uint64_t *abstime)
5ba3f43e
A
591{
592 uint64_t now;
593 struct bintime bt;
0c530ab8 594
5ba3f43e 595 now = mach_absolute_time();
0a7de745 596 if (abstime) {
5ba3f43e 597 *abstime = now;
0a7de745 598 }
0c530ab8 599
5ba3f43e
A
600 bt = get_scaled_time(now);
601 bintime_add(&bt, &clock_calend.bintime);
602 bintime2nsclock(&bt, secs, nanosecs);
39037602
A
603}
604
605/*
606 * clock_get_calendar_absolute_and_microtime:
607 *
608 * Returns the current calendar value,
609 * microseconds as the fraction. Also
610 * returns mach_absolute_time if abstime
611 * is not NULL.
612 */
613void
614clock_get_calendar_absolute_and_microtime(
0a7de745
A
615 clock_sec_t *secs,
616 clock_usec_t *microsecs,
617 uint64_t *abstime)
39037602 618{
0a7de745 619 spl_t s;
39037602
A
620
621 s = splclock();
622 clock_lock();
623
624 clock_get_calendar_absolute_and_microtime_locked(secs, microsecs, abstime);
0c530ab8 625
b0d623f7 626 clock_unlock();
0c530ab8 627 splx(s);
21362eb3 628}
89b3af67 629
21362eb3 630/*
0c530ab8
A
631 * clock_get_calendar_nanotime:
632 *
633 * Returns the current calendar value,
634 * nanoseconds as the fraction.
635 *
636 * Since we do not have an interface to
637 * set the calendar with resolution greater
638 * than a microsecond, we honor that here.
21362eb3 639 */
0c530ab8
A
640void
641clock_get_calendar_nanotime(
0a7de745
A
642 clock_sec_t *secs,
643 clock_nsec_t *nanosecs)
21362eb3 644{
0a7de745 645 spl_t s;
0c530ab8
A
646
647 s = splclock();
b0d623f7 648 clock_lock();
0c530ab8 649
5ba3f43e 650 clock_get_calendar_absolute_and_nanotime_locked(secs, nanosecs, NULL);
0c530ab8 651
b0d623f7 652 clock_unlock();
0c530ab8 653 splx(s);
6601e61a 654}
4452a7af 655
6601e61a 656/*
0c530ab8
A
657 * clock_gettimeofday:
658 *
659 * Kernel interface for commpage implementation of
660 * gettimeofday() syscall.
661 *
662 * Returns the current calendar value, and updates the
663 * commpage info as appropriate. Because most calls to
664 * gettimeofday() are handled in user mode by the commpage,
665 * this routine should be used infrequently.
6601e61a 666 */
0c530ab8
A
667void
668clock_gettimeofday(
0a7de745
A
669 clock_sec_t *secs,
670 clock_usec_t *microsecs)
39037602
A
671{
672 clock_gettimeofday_and_absolute_time(secs, microsecs, NULL);
673}
674
675void
676clock_gettimeofday_and_absolute_time(
0a7de745
A
677 clock_sec_t *secs,
678 clock_usec_t *microsecs,
679 uint64_t *mach_time)
6601e61a 680{
0a7de745
A
681 uint64_t now;
682 spl_t s;
683 struct bintime bt;
4452a7af 684
0c530ab8 685 s = splclock();
b0d623f7 686 clock_lock();
0c530ab8
A
687
688 now = mach_absolute_time();
5ba3f43e
A
689 bt = get_scaled_time(now);
690 bintime_add(&bt, &clock_calend.bintime);
691 bintime2usclock(&bt, secs, microsecs);
0c530ab8 692
5ba3f43e 693 clock_gettimeofday_set_commpage(now, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
1c79356b 694
b0d623f7 695 clock_unlock();
0c530ab8 696 splx(s);
39037602
A
697
698 if (mach_time) {
699 *mach_time = now;
700 }
1c79356b
A
701}
702
703/*
0c530ab8
A
704 * clock_set_calendar_microtime:
705 *
706 * Sets the current calendar value by
707 * recalculating the epoch and offset
708 * from the system clock.
709 *
710 * Also adjusts the boottime to keep the
711 * value consistent, writes the new
712 * calendar value to the platform clock,
713 * and sends calendar change notifications.
1c79356b 714 */
0c530ab8
A
715void
716clock_set_calendar_microtime(
0a7de745
A
717 clock_sec_t secs,
718 clock_usec_t microsecs)
1c79356b 719{
0a7de745
A
720 uint64_t absolutesys;
721 clock_sec_t newsecs;
722 clock_sec_t oldsecs;
723 clock_usec_t newmicrosecs;
724 clock_usec_t oldmicrosecs;
725 uint64_t commpage_value;
726 spl_t s;
727 struct bintime bt;
728 clock_sec_t deltasecs;
729 clock_usec_t deltamicrosecs;
5ba3f43e
A
730
731 newsecs = secs;
732 newmicrosecs = microsecs;
8ad349bb 733
5ba3f43e
A
734 /*
735 * settime_lock mtx is used to avoid that racing settimeofdays update the wall clock and
736 * the platform clock concurrently.
737 *
738 * clock_lock cannot be used for this race because it is acquired from interrupt context
739 * and it needs interrupts disabled while instead updating the platform clock needs to be
740 * called with interrupts enabled.
741 */
742 lck_mtx_lock(&settime_lock);
0c530ab8
A
743
744 s = splclock();
b0d623f7 745 clock_lock();
8ad349bb 746
cc8bc92a
A
747#if DEVELOPMENT || DEBUG
748 struct clock_calend clock_calend_cp = clock_calend;
749#endif
2d21ac55 750 commpage_disable_timestamp();
8f6c56a5 751
89b3af67 752 /*
39037602 753 * Adjust the boottime based on the delta.
89b3af67 754 */
39037602 755 clock_get_calendar_absolute_and_microtime_locked(&oldsecs, &oldmicrosecs, &absolutesys);
5ba3f43e 756
cc8bc92a
A
757#if DEVELOPMENT || DEBUG
758 if (g_should_log_clock_adjustments) {
759 os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
0a7de745 760 __func__, (unsigned long)oldsecs, oldmicrosecs, absolutesys);
cc8bc92a 761 os_log(OS_LOG_DEFAULT, "%s requested %lu s %d u\n",
0a7de745 762 __func__, (unsigned long)secs, microsecs );
cc8bc92a
A
763 }
764#endif
765
5ba3f43e 766 if (oldsecs < secs || (oldsecs == secs && oldmicrosecs < microsecs)) {
39037602 767 // moving forwards
5ba3f43e
A
768 deltasecs = secs;
769 deltamicrosecs = microsecs;
770
39037602 771 TIME_SUB(deltasecs, oldsecs, deltamicrosecs, oldmicrosecs, USEC_PER_SEC);
5ba3f43e 772
cc8bc92a 773 TIME_ADD(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
5ba3f43e
A
774 clock2bintime(&deltasecs, &deltamicrosecs, &bt);
775 bintime_add(&clock_calend.boottime, &bt);
39037602
A
776 } else {
777 // moving backwards
5ba3f43e
A
778 deltasecs = oldsecs;
779 deltamicrosecs = oldmicrosecs;
780
39037602 781 TIME_SUB(deltasecs, secs, deltamicrosecs, microsecs, USEC_PER_SEC);
8f6c56a5 782
cc8bc92a 783 TIME_SUB(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
5ba3f43e
A
784 clock2bintime(&deltasecs, &deltamicrosecs, &bt);
785 bintime_sub(&clock_calend.boottime, &bt);
5ba3f43e 786 }
21362eb3 787
5ba3f43e
A
788 clock_calend.bintime = clock_calend.boottime;
789 bintime_add(&clock_calend.bintime, &clock_calend.offset);
6d2010ae 790
5ba3f43e 791 clock2bintime((clock_sec_t *) &secs, (clock_usec_t *) &microsecs, &bt);
21362eb3 792
5ba3f43e 793 clock_gettimeofday_set_commpage(absolutesys, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
3e170ce0 794
cc8bc92a
A
795#if DEVELOPMENT || DEBUG
796 struct clock_calend clock_calend_cp1 = clock_calend;
797#endif
798
5ba3f43e 799 commpage_value = clock_boottime * USEC_PER_SEC + clock_boottime_usec;
21362eb3 800
b0d623f7 801 clock_unlock();
5ba3f43e 802 splx(s);
6601e61a 803
0c530ab8
A
804 /*
805 * Set the new value for the platform clock.
5ba3f43e 806 * This call might block, so interrupts must be enabled.
0c530ab8 807 */
cc8bc92a
A
808#if DEVELOPMENT || DEBUG
809 uint64_t now_b = mach_absolute_time();
810#endif
811
fe8ab488 812 PESetUTCTimeOfDay(newsecs, newmicrosecs);
6601e61a 813
cc8bc92a
A
814#if DEVELOPMENT || DEBUG
815 uint64_t now_a = mach_absolute_time();
816 if (g_should_log_clock_adjustments) {
817 os_log(OS_LOG_DEFAULT, "%s mach bef PESet %llu mach aft %llu \n", __func__, now_b, now_a);
818 }
819#endif
820
821 print_all_clock_variables_internal(__func__, &clock_calend_cp);
822 print_all_clock_variables_internal(__func__, &clock_calend_cp1);
823
39037602
A
824 commpage_update_boottime(commpage_value);
825
0c530ab8
A
826 /*
827 * Send host notifications.
828 */
829 host_notify_calendar_change();
39037602
A
830 host_notify_calendar_set();
831
2d21ac55
A
832#if CONFIG_DTRACE
833 clock_track_calend_nowait();
834#endif
5ba3f43e
A
835
836 lck_mtx_unlock(&settime_lock);
837}
838
839uint64_t mach_absolutetime_asleep = 0;
840uint64_t mach_absolutetime_last_sleep = 0;
841
842void
843clock_get_calendar_uptime(clock_sec_t *secs)
844{
845 uint64_t now;
846 spl_t s;
847 struct bintime bt;
848
849 s = splclock();
850 clock_lock();
851
852 now = mach_absolute_time();
853
854 bt = get_scaled_time(now);
855 bintime_add(&bt, &clock_calend.offset);
856
857 *secs = bt.sec;
858
859 clock_unlock();
860 splx(s);
861}
862
863
864/*
865 * clock_update_calendar:
866 *
867 * called by ntp timer to update scale factors.
868 */
869void
870clock_update_calendar(void)
871{
5ba3f43e
A
872 uint64_t now, delta;
873 struct bintime bt;
874 spl_t s;
875 int64_t adjustment;
876
877 s = splclock();
878 clock_lock();
879
880 now = mach_absolute_time();
881
882 /*
883 * scale the time elapsed since the last update and
884 * add it to offset.
885 */
886 bt = get_scaled_time(now);
887 bintime_add(&clock_calend.offset, &bt);
888
889 /*
890 * update the base from which apply next scale factors.
891 */
892 delta = now - clock_calend.offset_count;
893 clock_calend.offset_count += delta;
894
895 clock_calend.bintime = clock_calend.offset;
896 bintime_add(&clock_calend.bintime, &clock_calend.boottime);
897
898 /*
899 * recompute next adjustment.
900 */
901 ntp_update_second(&adjustment, clock_calend.bintime.sec);
902
cc8bc92a
A
903#if DEVELOPMENT || DEBUG
904 if (g_should_log_clock_adjustments) {
905 os_log(OS_LOG_DEFAULT, "%s adjustment %lld\n", __func__, adjustment);
906 }
907#endif
0a7de745 908
5ba3f43e
A
909 /*
910 * recomputing scale factors.
911 */
912 get_scale_factors_from_adj(adjustment, &clock_calend.tick_scale_x, &clock_calend.s_scale_ns, &clock_calend.s_adj_nsx);
913
914 clock_gettimeofday_set_commpage(now, clock_calend.bintime.sec, clock_calend.bintime.frac, clock_calend.tick_scale_x, ticks_per_sec);
915
cc8bc92a
A
916#if DEVELOPMENT || DEBUG
917 struct clock_calend calend_cp = clock_calend;
918#endif
919
5ba3f43e
A
920 clock_unlock();
921 splx(s);
cc8bc92a 922
0a7de745 923 print_all_clock_variables(__func__, NULL, NULL, NULL, NULL, &calend_cp);
1c79356b
A
924}
925
cc8bc92a
A
926
927#if DEVELOPMENT || DEBUG
928
0a7de745
A
929void
930print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp)
cc8bc92a
A
931{
932 clock_sec_t offset_secs;
933 clock_usec_t offset_microsecs;
934 clock_sec_t bintime_secs;
935 clock_usec_t bintime_microsecs;
936 clock_sec_t bootime_secs;
937 clock_usec_t bootime_microsecs;
0a7de745
A
938
939 if (!g_should_log_clock_adjustments) {
940 return;
941 }
cc8bc92a
A
942
943 bintime2usclock(&clock_calend_cp->offset, &offset_secs, &offset_microsecs);
944 bintime2usclock(&clock_calend_cp->bintime, &bintime_secs, &bintime_microsecs);
945 bintime2usclock(&clock_calend_cp->boottime, &bootime_secs, &bootime_microsecs);
946
947 os_log(OS_LOG_DEFAULT, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n",
0a7de745
A
948 func, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx,
949 clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count);
cc8bc92a 950 os_log(OS_LOG_DEFAULT, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n",
0a7de745
A
951 func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac,
952 (unsigned long)offset_secs, offset_microsecs);
cc8bc92a 953 os_log(OS_LOG_DEFAULT, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n",
0a7de745
A
954 func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac,
955 (unsigned long)bintime_secs, bintime_microsecs);
cc8bc92a 956 os_log(OS_LOG_DEFAULT, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n",
0a7de745
A
957 func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac,
958 (unsigned long)bootime_secs, bootime_microsecs);
cc8bc92a
A
959
960 clock_sec_t basesleep_secs;
0a7de745
A
961 clock_usec_t basesleep_microsecs;
962
cc8bc92a
A
963 bintime2usclock(&clock_calend_cp->basesleep, &basesleep_secs, &basesleep_microsecs);
964 os_log(OS_LOG_DEFAULT, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n",
0a7de745
A
965 func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac,
966 (unsigned long)basesleep_secs, basesleep_microsecs);
cc8bc92a
A
967}
968
969
0a7de745
A
970void
971print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp)
cc8bc92a 972{
0a7de745 973 if (!g_should_log_clock_adjustments) {
cc8bc92a 974 return;
0a7de745 975 }
cc8bc92a
A
976
977 struct bintime bt;
978 clock_sec_t wall_secs;
979 clock_usec_t wall_microsecs;
980 uint64_t now;
981 uint64_t delta;
982
983 if (pmu_secs) {
0a7de745 984 os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec);
cc8bc92a
A
985 }
986 if (sys_secs) {
987 os_log(OS_LOG_DEFAULT, "%s sys %lu s %d u \n", func, (unsigned long)*sys_secs, *sys_usec);
988 }
989
990 print_all_clock_variables_internal(func, clock_calend_cp);
991
992 now = mach_absolute_time();
0a7de745 993 delta = now - clock_calend_cp->offset_count;
cc8bc92a 994
0a7de745 995 bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx);
cc8bc92a
A
996 bintime_add(&bt, &clock_calend_cp->bintime);
997 bintime2usclock(&bt, &wall_secs, &wall_microsecs);
998
999 os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
0a7de745 1000 func, (unsigned long)wall_secs, wall_microsecs, now);
cc8bc92a
A
1001}
1002
1003
1004#endif /* DEVELOPMENT || DEBUG */
1005
1006
1c79356b 1007/*
0c530ab8
A
1008 * clock_initialize_calendar:
1009 *
1010 * Set the calendar and related clocks
cc8bc92a 1011 * from the platform clock at boot.
0c530ab8
A
1012 *
1013 * Also sends host notifications.
1c79356b
A
1014 */
1015void
0c530ab8 1016clock_initialize_calendar(void)
1c79356b 1017{
0a7de745
A
1018 clock_sec_t sys; // sleepless time since boot in seconds
1019 clock_sec_t secs; // Current UTC time
1020 clock_sec_t utc_offset_secs; // Difference in current UTC time and sleepless time since boot
1021 clock_usec_t microsys;
1022 clock_usec_t microsecs;
1023 clock_usec_t utc_offset_microsecs;
1024 spl_t s;
1025 struct bintime bt;
1026 struct bintime monotonic_bt;
1027 struct latched_time monotonic_time;
1028 uint64_t monotonic_usec_total;
cc8bc92a 1029 clock_sec_t sys2, monotonic_sec;
0a7de745
A
1030 clock_usec_t microsys2, monotonic_usec;
1031 size_t size;
cc8bc92a 1032
d9a64523 1033 //Get the UTC time and corresponding sys time
39037602 1034 PEGetUTCTimeOfDay(&secs, &microsecs);
cc8bc92a
A
1035 clock_get_system_microtime(&sys, &microsys);
1036
1037 /*
1038 * If the platform has a monotonic clock, use kern.monotonicclock_usecs
d9a64523
A
1039 * to estimate the sleep/wake time, otherwise use the UTC time to estimate
1040 * the sleep time.
cc8bc92a
A
1041 */
1042 size = sizeof(monotonic_time);
1043 if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
1044 has_monotonic_clock = 0;
d9a64523 1045 os_log(OS_LOG_DEFAULT, "%s system does not have monotonic clock\n", __func__);
cc8bc92a
A
1046 } else {
1047 has_monotonic_clock = 1;
1048 monotonic_usec_total = monotonic_time.monotonic_time_usec;
1049 absolutetime_to_microtime(monotonic_time.mach_time, &sys2, &microsys2);
d9a64523 1050 os_log(OS_LOG_DEFAULT, "%s system has monotonic clock\n", __func__);
cc8bc92a 1051 }
fe8ab488 1052
0c530ab8 1053 s = splclock();
b0d623f7 1054 clock_lock();
1c79356b 1055
2d21ac55 1056 commpage_disable_timestamp();
1c79356b 1057
5ba3f43e
A
1058 utc_offset_secs = secs;
1059 utc_offset_microsecs = microsecs;
1060
5ba3f43e
A
1061 /*
1062 * We normally expect the UTC clock to be always-on and produce
1063 * greater readings than the tick counter. There may be corner cases
1064 * due to differing clock resolutions (UTC clock is likely lower) and
1065 * and errors reading the UTC clock (some implementations return 0
1066 * on error) in which that doesn't hold true. Bring the UTC measurements
1067 * in-line with the tick counter measurements as a best effort in that case.
1068 */
1069 if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
d9a64523 1070 os_log(OS_LOG_DEFAULT, "%s WARNING: UTC time is less then sys time, (%lu s %d u) UTC (%lu s %d u) sys\n",
0a7de745 1071 __func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys);
5ba3f43e
A
1072 secs = utc_offset_secs = sys;
1073 microsecs = utc_offset_microsecs = microsys;
1074 }
1c79356b 1075
d9a64523 1076 // UTC - sys
5ba3f43e
A
1077 // This macro stores the subtraction result in utc_offset_secs and utc_offset_microsecs
1078 TIME_SUB(utc_offset_secs, sys, utc_offset_microsecs, microsys, USEC_PER_SEC);
d9a64523 1079 // This function converts utc_offset_secs and utc_offset_microsecs in bintime
5ba3f43e 1080 clock2bintime(&utc_offset_secs, &utc_offset_microsecs, &bt);
6d2010ae 1081
5ba3f43e
A
1082 /*
1083 * Initialize the boot time based on the platform clock.
1084 */
1085 clock_boottime = secs;
1086 clock_boottime_usec = microsecs;
1087 commpage_update_boottime(clock_boottime * USEC_PER_SEC + clock_boottime_usec);
1c79356b 1088
5ba3f43e
A
1089 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
1090 clock_calend.boottime = bt;
1091 clock_calend.bintime = bt;
1092 clock_calend.offset.sec = 0;
1093 clock_calend.offset.frac = 0;
3e170ce0 1094
5ba3f43e
A
1095 clock_calend.tick_scale_x = (uint64_t)1 << 63;
1096 clock_calend.tick_scale_x /= ticks_per_sec;
1097 clock_calend.tick_scale_x *= 2;
39037602 1098
5ba3f43e
A
1099 clock_calend.s_scale_ns = NSEC_PER_SEC;
1100 clock_calend.s_adj_nsx = 0;
3e170ce0 1101
cc8bc92a 1102 if (has_monotonic_clock) {
cc8bc92a
A
1103 monotonic_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
1104 monotonic_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
1c79356b 1105
d9a64523 1106 // monotonic clock - sys
cc8bc92a
A
1107 // This macro stores the subtraction result in monotonic_sec and monotonic_usec
1108 TIME_SUB(monotonic_sec, sys2, monotonic_usec, microsys2, USEC_PER_SEC);
1109 clock2bintime(&monotonic_sec, &monotonic_usec, &monotonic_bt);
1110
1111 // set the baseleep as the difference between monotonic clock - sys
1112 clock_calend.basesleep = monotonic_bt;
cc8bc92a 1113 }
39037602 1114 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
39037602 1115
cc8bc92a
A
1116#if DEVELOPMENT || DEBUG
1117 struct clock_calend clock_calend_cp = clock_calend;
1118#endif
1119
b0d623f7 1120 clock_unlock();
0c530ab8
A
1121 splx(s);
1122
0a7de745 1123 print_all_clock_variables(__func__, &secs, &microsecs, &sys, &microsys, &clock_calend_cp);
cc8bc92a 1124
1c79356b 1125 /*
0c530ab8 1126 * Send host notifications.
1c79356b 1127 */
0c530ab8 1128 host_notify_calendar_change();
0a7de745 1129
2d21ac55
A
1130#if CONFIG_DTRACE
1131 clock_track_calend_nowait();
1132#endif
1c79356b
A
1133}
1134
5ba3f43e 1135
0c530ab8 1136void
5ba3f43e 1137clock_wakeup_calendar(void)
1c79356b 1138{
0a7de745 1139 clock_sec_t wake_sys_sec;
d9a64523 1140 clock_usec_t wake_sys_usec;
0a7de745
A
1141 clock_sec_t wake_sec;
1142 clock_usec_t wake_usec;
d9a64523
A
1143 clock_sec_t wall_time_sec;
1144 clock_usec_t wall_time_usec;
0a7de745
A
1145 clock_sec_t diff_sec;
1146 clock_usec_t diff_usec;
d9a64523
A
1147 clock_sec_t var_s;
1148 clock_usec_t var_us;
0a7de745
A
1149 spl_t s;
1150 struct bintime bt, last_sleep_bt;
cc8bc92a 1151 struct latched_time monotonic_time;
0a7de745
A
1152 uint64_t monotonic_usec_total;
1153 uint64_t wake_abs;
1154 size_t size;
5ba3f43e 1155
cc8bc92a
A
1156 /*
1157 * If the platform has the monotonic clock use that to
1158 * compute the sleep time. The monotonic clock does not have an offset
1159 * that can be modified, so nor kernel or userspace can change the time
1160 * of this clock, it can only monotonically increase over time.
d9a64523
A
1161 * During sleep mach_absolute_time (sys time) does not tick,
1162 * so the sleep time is the difference between the current monotonic time
cc8bc92a
A
1163 * less the absolute time and the previous difference stored at wake time.
1164 *
d9a64523 1165 * basesleep = (monotonic - sys) ---> computed at last wake
cc8bc92a
A
1166 * sleep_time = (monotonic - sys) - basesleep
1167 *
d9a64523
A
1168 * If the platform does not support monotonic clock we set the wall time to what the
1169 * UTC clock returns us.
1170 * Setting the wall time to UTC time implies that we loose all the adjustments
1171 * done during wake time through adjtime/ntp_adjustime.
1172 * The UTC time is the monotonic clock + an offset that can be set
cc8bc92a 1173 * by kernel.
d9a64523
A
1174 * The time slept in this case is the difference between wall time and UTC
1175 * at wake.
cc8bc92a
A
1176 *
1177 * IMPORTANT:
d9a64523 1178 * We assume that only the kernel is setting the offset of the PMU/RTC and that
cc8bc92a 1179 * it is doing it only througth the settimeofday interface.
cc8bc92a
A
1180 */
1181 if (has_monotonic_clock) {
d9a64523
A
1182#if DEVELOPMENT || DEBUG
1183 /*
1184 * Just for debugging, get the wake UTC time.
1185 */
1186 PEGetUTCTimeOfDay(&var_s, &var_us);
1187#endif
1188 /*
1189 * Get monotonic time with corresponding sys time
1190 */
cc8bc92a
A
1191 size = sizeof(monotonic_time);
1192 if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
1193 panic("%s: could not call kern.monotonicclock_usecs", __func__);
1194 }
d9a64523
A
1195 wake_abs = monotonic_time.mach_time;
1196 absolutetime_to_microtime(wake_abs, &wake_sys_sec, &wake_sys_usec);
cc8bc92a 1197
d9a64523
A
1198 monotonic_usec_total = monotonic_time.monotonic_time_usec;
1199 wake_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
1200 wake_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
cc8bc92a 1201 } else {
d9a64523
A
1202 /*
1203 * Get UTC time and corresponding sys time
1204 */
1205 PEGetUTCTimeOfDay(&wake_sec, &wake_usec);
1206 wake_abs = mach_absolute_time();
1207 absolutetime_to_microtime(wake_abs, &wake_sys_sec, &wake_sys_usec);
cc8bc92a 1208 }
b0d623f7 1209
d9a64523 1210#if DEVELOPMENT || DEBUG
0a7de745
A
1211 os_log(OS_LOG_DEFAULT, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec, wake_usec, (has_monotonic_clock)?"monotonic":"UTC", wake_abs);
1212 if (has_monotonic_clock) {
1213 os_log(OS_LOG_DEFAULT, "UTC time %lu s %d u\n", (unsigned long)var_s, var_us);
1214 }
d9a64523
A
1215#endif /* DEVELOPMENT || DEBUG */
1216
b0d623f7
A
1217 s = splclock();
1218 clock_lock();
0a7de745 1219
5ba3f43e
A
1220 commpage_disable_timestamp();
1221
cc8bc92a
A
1222#if DEVELOPMENT || DEBUG
1223 struct clock_calend clock_calend_cp1 = clock_calend;
1224#endif /* DEVELOPMENT || DEBUG */
5ba3f43e 1225
5ba3f43e 1226 /*
d9a64523
A
1227 * We normally expect the UTC/monotonic clock to be always-on and produce
1228 * greater readings than the sys counter. There may be corner cases
1229 * due to differing clock resolutions (UTC/monotonic clock is likely lower) and
1230 * and errors reading the UTC/monotonic clock (some implementations return 0
1231 * on error) in which that doesn't hold true.
5ba3f43e 1232 */
d9a64523
A
1233 if ((wake_sys_sec > wake_sec) || ((wake_sys_sec == wake_sec) && (wake_sys_usec > wake_usec))) {
1234 os_log_error(OS_LOG_DEFAULT, "WARNING: %s clock is less then sys clock at wake: %lu s %d u vs %lu s %d u, defaulting sleep time to zero\n", (has_monotonic_clock)?"monotonic":"UTC", (unsigned long)wake_sec, wake_usec, (unsigned long)wake_sys_sec, wake_sys_usec);
5ba3f43e 1235 mach_absolutetime_last_sleep = 0;
d9a64523 1236 goto done;
cc8bc92a 1237 }
5ba3f43e 1238
d9a64523
A
1239 if (has_monotonic_clock) {
1240 /*
1241 * computer the difference monotonic - sys
1242 * we already checked that monotonic time is
1243 * greater than sys.
1244 */
1245 diff_sec = wake_sec;
1246 diff_usec = wake_usec;
1247 // This macro stores the subtraction result in diff_sec and diff_usec
1248 TIME_SUB(diff_sec, wake_sys_sec, diff_usec, wake_sys_usec, USEC_PER_SEC);
1249 //This function converts diff_sec and diff_usec in bintime
1250 clock2bintime(&diff_sec, &diff_usec, &bt);
1251
1252 /*
1253 * Safety belt: the monotonic clock will likely have a lower resolution than the sys counter.
1254 * It's also possible that the device didn't fully transition to the powered-off state on
1255 * the most recent sleep, so the sys counter may not have reset or may have only briefly
1256 * turned off. In that case it's possible for the difference between the monotonic clock and the
1257 * sys counter to be less than the previously recorded value in clock.calend.basesleep.
1258 * In that case simply record that we slept for 0 ticks.
1259 */
1260 if ((bt.sec > clock_calend.basesleep.sec) ||
1261 ((bt.sec == clock_calend.basesleep.sec) && (bt.frac > clock_calend.basesleep.frac))) {
d9a64523
A
1262 //last_sleep is the difference between (current monotonic - abs) and (last wake monotonic - abs)
1263 last_sleep_bt = bt;
1264 bintime_sub(&last_sleep_bt, &clock_calend.basesleep);
1265
1266 bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
1267 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
1268
1269 //set basesleep to current monotonic - abs
1270 clock_calend.basesleep = bt;
1271
1272 //update wall time
1273 bintime_add(&clock_calend.offset, &last_sleep_bt);
1274 bintime_add(&clock_calend.bintime, &last_sleep_bt);
1275
1276 bintime2usclock(&last_sleep_bt, &var_s, &var_us);
1277 os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long) var_s, var_us);
d9a64523
A
1278 } else {
1279 bintime2usclock(&clock_calend.basesleep, &var_s, &var_us);
1280 os_log_error(OS_LOG_DEFAULT, "WARNING: last wake monotonic-sys time (%lu s %d u) is greater then current monotonic-sys time(%lu s %d u), defaulting sleep time to zero\n", (unsigned long) var_s, var_us, (unsigned long) diff_sec, diff_usec);
1281
1282 mach_absolutetime_last_sleep = 0;
1283 }
1284 } else {
1285 /*
1286 * set the wall time to UTC value
1287 */
1288 bt = get_scaled_time(wake_abs);
1289 bintime_add(&bt, &clock_calend.bintime);
1290 bintime2usclock(&bt, &wall_time_sec, &wall_time_usec);
1291
0a7de745 1292 if (wall_time_sec > wake_sec || (wall_time_sec == wake_sec && wall_time_usec > wake_usec)) {
d9a64523
A
1293 os_log(OS_LOG_DEFAULT, "WARNING: wall time (%lu s %d u) is greater than current UTC time (%lu s %d u), defaulting sleep time to zero\n", (unsigned long) wall_time_sec, wall_time_usec, (unsigned long) wake_sec, wake_usec);
1294
1295 mach_absolutetime_last_sleep = 0;
1296 } else {
1297 diff_sec = wake_sec;
1298 diff_usec = wake_usec;
1299 // This macro stores the subtraction result in diff_sec and diff_usec
1300 TIME_SUB(diff_sec, wall_time_sec, diff_usec, wall_time_usec, USEC_PER_SEC);
1301 //This function converts diff_sec and diff_usec in bintime
1302 clock2bintime(&diff_sec, &diff_usec, &bt);
1303
1304 //time slept in this case is the difference between PMU/RTC and wall time
1305 last_sleep_bt = bt;
1306
1307 bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
1308 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
1309
1310 //update wall time
1311 bintime_add(&clock_calend.offset, &last_sleep_bt);
1312 bintime_add(&clock_calend.bintime, &last_sleep_bt);
1313
1314 bintime2usclock(&last_sleep_bt, &var_s, &var_us);
1315 os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long)var_s, var_us);
1316 }
1317 }
1318done:
5ba3f43e 1319 KERNEL_DEBUG_CONSTANT(
0a7de745
A
1320 MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
1321 (uintptr_t) mach_absolutetime_last_sleep,
1322 (uintptr_t) mach_absolutetime_asleep,
1323 (uintptr_t) (mach_absolutetime_last_sleep >> 32),
1324 (uintptr_t) (mach_absolutetime_asleep >> 32),
1325 0);
5ba3f43e
A
1326
1327 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
1328 adjust_cont_time_thread_calls();
39037602 1329
cc8bc92a
A
1330#if DEVELOPMENT || DEBUG
1331 struct clock_calend clock_calend_cp = clock_calend;
1332#endif
1333
39037602
A
1334 clock_unlock();
1335 splx(s);
5ba3f43e 1336
cc8bc92a
A
1337#if DEVELOPMENT || DEBUG
1338 if (g_should_log_clock_adjustments) {
d9a64523
A
1339 print_all_clock_variables("clock_wakeup_calendar: BEFORE", NULL, NULL, NULL, NULL, &clock_calend_cp1);
1340 print_all_clock_variables("clock_wakeup_calendar: AFTER", NULL, NULL, NULL, NULL, &clock_calend_cp);
cc8bc92a
A
1341 }
1342#endif /* DEVELOPMENT || DEBUG */
1343
5ba3f43e
A
1344 host_notify_calendar_change();
1345
1346#if CONFIG_DTRACE
1347 clock_track_calend_nowait();
1348#endif
39037602
A
1349}
1350
5ba3f43e 1351
39037602
A
1352/*
1353 * clock_get_boottime_nanotime:
1354 *
1355 * Return the boottime, used by sysctl.
1356 */
1357void
5ba3f43e 1358clock_get_boottime_nanotime(
0a7de745
A
1359 clock_sec_t *secs,
1360 clock_nsec_t *nanosecs)
39037602 1361{
0a7de745 1362 spl_t s;
39037602
A
1363
1364 s = splclock();
1365 clock_lock();
1366
1367 *secs = (clock_sec_t)clock_boottime;
5ba3f43e 1368 *nanosecs = (clock_nsec_t)clock_boottime_usec * NSEC_PER_USEC;
b0d623f7
A
1369
1370 clock_unlock();
1371 splx(s);
1c79356b
A
1372}
1373
1374/*
5ba3f43e 1375 * clock_get_boottime_nanotime:
0c530ab8 1376 *
5ba3f43e 1377 * Return the boottime, used by sysctl.
6601e61a 1378 */
1c79356b 1379void
5ba3f43e 1380clock_get_boottime_microtime(
0a7de745
A
1381 clock_sec_t *secs,
1382 clock_usec_t *microsecs)
1c79356b 1383{
0a7de745 1384 spl_t s;
1c79356b 1385
0c530ab8 1386 s = splclock();
b0d623f7 1387 clock_lock();
1c79356b 1388
5ba3f43e
A
1389 *secs = (clock_sec_t)clock_boottime;
1390 *microsecs = (clock_nsec_t)clock_boottime_usec;
0c530ab8 1391
b0d623f7 1392 clock_unlock();
0c530ab8 1393 splx(s);
1c79356b
A
1394}
1395
0c530ab8 1396
0c530ab8
A
1397/*
1398 * Wait / delay routines.
1399 */
91447636
A
1400static void
1401mach_wait_until_continue(
0a7de745
A
1402 __unused void *parameter,
1403 wait_result_t wresult)
91447636
A
1404{
1405 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
1406 /*NOTREACHED*/
1407}
1408
316670eb
A
1409/*
1410 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
1411 *
1412 * Parameters: args->deadline Amount of time to wait
1413 *
1414 * Returns: 0 Success
0a7de745 1415 * !0 Not success
316670eb
A
1416 *
1417 */
1c79356b 1418kern_return_t
91447636 1419mach_wait_until_trap(
0a7de745 1420 struct mach_wait_until_trap_args *args)
91447636 1421{
0a7de745
A
1422 uint64_t deadline = args->deadline;
1423 wait_result_t wresult;
91447636 1424
39236c6e 1425 wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
0a7de745
A
1426 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
1427 if (wresult == THREAD_WAITING) {
91447636 1428 wresult = thread_block(mach_wait_until_continue);
0a7de745 1429 }
91447636 1430
0a7de745 1431 return (wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS;
91447636
A
1432}
1433
91447636
A
1434void
1435clock_delay_until(
0a7de745 1436 uint64_t deadline)
1c79356b 1437{
0a7de745 1438 uint64_t now = mach_absolute_time();
91447636 1439
0a7de745 1440 if (now >= deadline) {
91447636 1441 return;
0a7de745 1442 }
1c79356b 1443
316670eb
A
1444 _clock_delay_until_deadline(deadline - now, deadline);
1445}
1446
1447/*
1448 * Preserve the original precise interval that the client
1449 * requested for comparison to the spin threshold.
1450 */
1451void
1452_clock_delay_until_deadline(
0a7de745
A
1453 uint64_t interval,
1454 uint64_t deadline)
316670eb 1455{
3e170ce0
A
1456 _clock_delay_until_deadline_with_leeway(interval, deadline, 0);
1457}
1458
1459/*
1460 * Like _clock_delay_until_deadline, but it accepts a
1461 * leeway value.
1462 */
1463void
1464_clock_delay_until_deadline_with_leeway(
0a7de745
A
1465 uint64_t interval,
1466 uint64_t deadline,
1467 uint64_t leeway)
3e170ce0 1468{
0a7de745 1469 if (interval == 0) {
316670eb 1470 return;
0a7de745 1471 }
316670eb 1472
0a7de745
A
1473 if (ml_delay_should_spin(interval) ||
1474 get_preemption_level() != 0 ||
1475 ml_get_interrupts_enabled() == FALSE) {
bd504ef0 1476 machine_delay_until(interval, deadline);
316670eb 1477 } else {
3e170ce0
A
1478 /*
1479 * For now, assume a leeway request of 0 means the client does not want a leeway
1480 * value. We may want to change this interpretation in the future.
1481 */
1482
1483 if (leeway) {
1484 assert_wait_deadline_with_leeway((event_t)clock_delay_until, THREAD_UNINT, TIMEOUT_URGENCY_LEEWAY, deadline, leeway);
1485 } else {
1486 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
1487 }
91447636
A
1488
1489 thread_block(THREAD_CONTINUE_NULL);
9bccf70c 1490 }
91447636 1491}
1c79356b 1492
91447636
A
1493void
1494delay_for_interval(
0a7de745
A
1495 uint32_t interval,
1496 uint32_t scale_factor)
91447636 1497{
0a7de745 1498 uint64_t abstime;
91447636 1499
316670eb 1500 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
91447636 1501
316670eb 1502 _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
91447636
A
1503}
1504
3e170ce0
A
1505void
1506delay_for_interval_with_leeway(
0a7de745
A
1507 uint32_t interval,
1508 uint32_t leeway,
1509 uint32_t scale_factor)
3e170ce0 1510{
0a7de745
A
1511 uint64_t abstime_interval;
1512 uint64_t abstime_leeway;
3e170ce0
A
1513
1514 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
1515 clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
1516
1517 _clock_delay_until_deadline_with_leeway(abstime_interval, mach_absolute_time() + abstime_interval, abstime_leeway);
1518}
1519
91447636
A
1520void
1521delay(
0a7de745 1522 int usec)
91447636
A
1523{
1524 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1c79356b 1525}
9bccf70c 1526
0c530ab8
A
1527/*
1528 * Miscellaneous routines.
1529 */
55e303ae 1530void
0c530ab8 1531clock_interval_to_deadline(
0a7de745
A
1532 uint32_t interval,
1533 uint32_t scale_factor,
1534 uint64_t *result)
9bccf70c 1535{
0a7de745 1536 uint64_t abstime;
c0fea474 1537
0c530ab8 1538 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
6601e61a 1539
0c530ab8 1540 *result = mach_absolute_time() + abstime;
8f6c56a5 1541}
5d5c5d0d 1542
0c530ab8
A
1543void
1544clock_absolutetime_interval_to_deadline(
0a7de745
A
1545 uint64_t abstime,
1546 uint64_t *result)
8f6c56a5 1547{
0c530ab8 1548 *result = mach_absolute_time() + abstime;
21362eb3 1549}
89b3af67 1550
39037602
A
1551void
1552clock_continuoustime_interval_to_deadline(
0a7de745
A
1553 uint64_t conttime,
1554 uint64_t *result)
39037602
A
1555{
1556 *result = mach_continuous_time() + conttime;
1557}
1558
4452a7af 1559void
0c530ab8 1560clock_get_uptime(
0a7de745 1561 uint64_t *result)
21362eb3 1562{
0c530ab8 1563 *result = mach_absolute_time();
6601e61a 1564}
4452a7af 1565
0c530ab8
A
1566void
1567clock_deadline_for_periodic_event(
0a7de745
A
1568 uint64_t interval,
1569 uint64_t abstime,
1570 uint64_t *deadline)
6601e61a 1571{
0c530ab8
A
1572 assert(interval != 0);
1573
1574 *deadline += interval;
1575
1576 if (*deadline <= abstime) {
1577 *deadline = abstime + interval;
1578 abstime = mach_absolute_time();
55e303ae 1579
0a7de745 1580 if (*deadline <= abstime) {
0c530ab8 1581 *deadline = abstime + interval;
0a7de745 1582 }
0c530ab8 1583 }
55e303ae 1584}
2d21ac55 1585
39037602
A
1586uint64_t
1587mach_continuous_time(void)
1588{
0a7de745 1589 while (1) {
39037602
A
1590 uint64_t read1 = mach_absolutetime_asleep;
1591 uint64_t absolute = mach_absolute_time();
1592 OSMemoryBarrier();
1593 uint64_t read2 = mach_absolutetime_asleep;
1594
0a7de745 1595 if (__builtin_expect(read1 == read2, 1)) {
39037602
A
1596 return absolute + read1;
1597 }
1598 }
1599}
1600
1601uint64_t
1602mach_continuous_approximate_time(void)
1603{
0a7de745 1604 while (1) {
39037602
A
1605 uint64_t read1 = mach_absolutetime_asleep;
1606 uint64_t absolute = mach_approximate_time();
1607 OSMemoryBarrier();
1608 uint64_t read2 = mach_absolutetime_asleep;
1609
0a7de745 1610 if (__builtin_expect(read1 == read2, 1)) {
39037602
A
1611 return absolute + read1;
1612 }
1613 }
1614}
1615
1616/*
1617 * continuoustime_to_absolutetime
1618 * Must be called with interrupts disabled
1619 * Returned value is only valid until the next update to
0a7de745 1620 * mach_continuous_time
39037602
A
1621 */
1622uint64_t
0a7de745
A
1623continuoustime_to_absolutetime(uint64_t conttime)
1624{
1625 if (conttime <= mach_absolutetime_asleep) {
39037602 1626 return 0;
0a7de745 1627 } else {
39037602 1628 return conttime - mach_absolutetime_asleep;
0a7de745 1629 }
39037602
A
1630}
1631
1632/*
1633 * absolutetime_to_continuoustime
1634 * Must be called with interrupts disabled
1635 * Returned value is only valid until the next update to
0a7de745 1636 * mach_continuous_time
39037602
A
1637 */
1638uint64_t
0a7de745
A
1639absolutetime_to_continuoustime(uint64_t abstime)
1640{
39037602
A
1641 return abstime + mach_absolutetime_asleep;
1642}
1643
0a7de745 1644#if CONFIG_DTRACE
2d21ac55
A
1645
1646/*
1647 * clock_get_calendar_nanotime_nowait
1648 *
1649 * Description: Non-blocking version of clock_get_calendar_nanotime()
1650 *
1651 * Notes: This function operates by separately tracking calendar time
1652 * updates using a two element structure to copy the calendar
1653 * state, which may be asynchronously modified. It utilizes
1654 * barrier instructions in the tracking process and in the local
1655 * stable snapshot process in order to ensure that a consistent
1656 * snapshot is used to perform the calculation.
1657 */
1658void
1659clock_get_calendar_nanotime_nowait(
0a7de745
A
1660 clock_sec_t *secs,
1661 clock_nsec_t *nanosecs)
2d21ac55
A
1662{
1663 int i = 0;
0a7de745 1664 uint64_t now;
2d21ac55 1665 struct unlocked_clock_calend stable;
5ba3f43e 1666 struct bintime bt;
2d21ac55
A
1667
1668 for (;;) {
0a7de745 1669 stable = flipflop[i]; /* take snapshot */
2d21ac55
A
1670
1671 /*
1672 * Use a barrier instructions to ensure atomicity. We AND
1673 * off the "in progress" bit to get the current generation
1674 * count.
1675 */
cb323159 1676 os_atomic_andnot(&stable.gen, 1, relaxed);
2d21ac55
A
1677
1678 /*
1679 * If an update _is_ in progress, the generation count will be
1680 * off by one, if it _was_ in progress, it will be off by two,
1681 * and if we caught it at a good time, it will be equal (and
1682 * our snapshot is threfore stable).
1683 */
0a7de745 1684 if (flipflop[i].gen == stable.gen) {
2d21ac55 1685 break;
0a7de745 1686 }
2d21ac55 1687
5ba3f43e 1688 /* Switch to the other element of the flipflop, and try again. */
2d21ac55
A
1689 i ^= 1;
1690 }
1691
1692 now = mach_absolute_time();
1693
5ba3f43e 1694 bt = get_scaled_time(now);
2d21ac55 1695
5ba3f43e 1696 bintime_add(&bt, &clock_calend.bintime);
2d21ac55 1697
5ba3f43e 1698 bintime2nsclock(&bt, secs, nanosecs);
2d21ac55
A
1699}
1700
0a7de745 1701static void
2d21ac55
A
1702clock_track_calend_nowait(void)
1703{
1704 int i;
1705
1706 for (i = 0; i < 2; i++) {
1707 struct clock_calend tmp = clock_calend;
1708
1709 /*
1710 * Set the low bit if the generation count; since we use a
1711 * barrier instruction to do this, we are guaranteed that this
1712 * will flag an update in progress to an async caller trying
1713 * to examine the contents.
1714 */
cb323159 1715 os_atomic_or(&flipflop[i].gen, 1, relaxed);
2d21ac55
A
1716
1717 flipflop[i].calend = tmp;
1718
1719 /*
1720 * Increment the generation count to clear the low bit to
1721 * signal completion. If a caller compares the generation
1722 * count after taking a copy while in progress, the count
1723 * will be off by two.
1724 */
cb323159 1725 os_atomic_inc(&flipflop[i].gen, relaxed);
2d21ac55
A
1726 }
1727}
b0d623f7 1728
0a7de745 1729#endif /* CONFIG_DTRACE */