]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 */
33 /*-
34 * Copyright (c) 1982, 1986, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)time.h 8.5 (Berkeley) 5/4/95
62 * $FreeBSD$
63 */
64
65 #include <mach/mach_types.h>
66
67 #include <kern/spl.h>
68 #include <kern/sched_prim.h>
69 #include <kern/thread.h>
70 #include <kern/clock.h>
71 #include <kern/host_notify.h>
72 #include <kern/thread_call.h>
73 #include <libkern/OSAtomic.h>
74
75 #include <IOKit/IOPlatformExpert.h>
76
77 #include <machine/commpage.h>
78 #include <machine/config.h>
79 #include <machine/machine_routines.h>
80
81 #include <mach/mach_traps.h>
82 #include <mach/mach_time.h>
83
84 #include <sys/kdebug.h>
85 #include <sys/timex.h>
86 #include <kern/arithmetic_128.h>
87 #include <os/log.h>
88
89 uint32_t hz_tick_interval = 1;
90 #if !HAS_CONTINUOUS_HWCLOCK
91 static uint64_t has_monotonic_clock = 0;
92 #endif
93
94 decl_simple_lock_data(, clock_lock);
95 lck_grp_attr_t * settime_lock_grp_attr;
96 lck_grp_t * settime_lock_grp;
97 lck_attr_t * settime_lock_attr;
98 lck_mtx_t settime_lock;
99
100 #define clock_lock() \
101 simple_lock(&clock_lock, LCK_GRP_NULL)
102
103 #define clock_unlock() \
104 simple_unlock(&clock_lock)
105
106 #define clock_lock_init() \
107 simple_lock_init(&clock_lock, 0)
108
109 #ifdef kdp_simple_lock_is_acquired
110 boolean_t
111 kdp_clock_is_locked()
112 {
113 return kdp_simple_lock_is_acquired(&clock_lock);
114 }
115 #endif
116
117 struct bintime {
118 time_t sec;
119 uint64_t frac;
120 };
121
122 static __inline void
123 bintime_addx(struct bintime *_bt, uint64_t _x)
124 {
125 uint64_t _u;
126
127 _u = _bt->frac;
128 _bt->frac += _x;
129 if (_u > _bt->frac) {
130 _bt->sec++;
131 }
132 }
133
134 static __inline void
135 bintime_subx(struct bintime *_bt, uint64_t _x)
136 {
137 uint64_t _u;
138
139 _u = _bt->frac;
140 _bt->frac -= _x;
141 if (_u < _bt->frac) {
142 _bt->sec--;
143 }
144 }
145
146 static __inline void
147 bintime_addns(struct bintime *bt, uint64_t ns)
148 {
149 bt->sec += ns / (uint64_t)NSEC_PER_SEC;
150 ns = ns % (uint64_t)NSEC_PER_SEC;
151 if (ns) {
152 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
153 ns = ns * (uint64_t)18446744073LL;
154 bintime_addx(bt, ns);
155 }
156 }
157
158 static __inline void
159 bintime_subns(struct bintime *bt, uint64_t ns)
160 {
161 bt->sec -= ns / (uint64_t)NSEC_PER_SEC;
162 ns = ns % (uint64_t)NSEC_PER_SEC;
163 if (ns) {
164 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
165 ns = ns * (uint64_t)18446744073LL;
166 bintime_subx(bt, ns);
167 }
168 }
169
170 static __inline void
171 bintime_addxns(struct bintime *bt, uint64_t a, int64_t xns)
172 {
173 uint64_t uxns = (xns > 0)?(uint64_t)xns:(uint64_t)-xns;
174 uint64_t ns = multi_overflow(a, uxns);
175 if (xns > 0) {
176 if (ns) {
177 bintime_addns(bt, ns);
178 }
179 ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
180 bintime_addx(bt, ns);
181 } else {
182 if (ns) {
183 bintime_subns(bt, ns);
184 }
185 ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
186 bintime_subx(bt, ns);
187 }
188 }
189
190
191 static __inline void
192 bintime_add(struct bintime *_bt, const struct bintime *_bt2)
193 {
194 uint64_t _u;
195
196 _u = _bt->frac;
197 _bt->frac += _bt2->frac;
198 if (_u > _bt->frac) {
199 _bt->sec++;
200 }
201 _bt->sec += _bt2->sec;
202 }
203
204 static __inline void
205 bintime_sub(struct bintime *_bt, const struct bintime *_bt2)
206 {
207 uint64_t _u;
208
209 _u = _bt->frac;
210 _bt->frac -= _bt2->frac;
211 if (_u < _bt->frac) {
212 _bt->sec--;
213 }
214 _bt->sec -= _bt2->sec;
215 }
216
217 static __inline void
218 clock2bintime(const clock_sec_t *secs, const clock_usec_t *microsecs, struct bintime *_bt)
219 {
220 _bt->sec = *secs;
221 /* 18446744073709 = int(2^64 / 1000000) */
222 _bt->frac = *microsecs * (uint64_t)18446744073709LL;
223 }
224
225 static __inline void
226 bintime2usclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *microsecs)
227 {
228 *secs = _bt->sec;
229 *microsecs = ((uint64_t)USEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
230 }
231
232 static __inline void
233 bintime2nsclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *nanosecs)
234 {
235 *secs = _bt->sec;
236 *nanosecs = ((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
237 }
238
239 #if !defined(HAS_CONTINUOUS_HWCLOCK)
240 static __inline void
241 bintime2absolutetime(const struct bintime *_bt, uint64_t *abs)
242 {
243 uint64_t nsec;
244 nsec = (uint64_t) _bt->sec * (uint64_t)NSEC_PER_SEC + (((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32);
245 nanoseconds_to_absolutetime(nsec, abs);
246 }
247
248 struct latched_time {
249 uint64_t monotonic_time_usec;
250 uint64_t mach_time;
251 };
252
253 extern int
254 kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
255
256 #endif
257 /*
258 * Time of day (calendar) variables.
259 *
260 * Algorithm:
261 *
262 * TOD <- bintime + delta*scale
263 *
264 * where :
265 * bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
266 * delta is ticks elapsed since last scale update.
267 * scale is computed according to an adjustment provided by ntp_kern.
268 */
269 static struct clock_calend {
270 uint64_t s_scale_ns; /* scale to apply for each second elapsed, it converts in ns */
271 int64_t s_adj_nsx; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
272 uint64_t tick_scale_x; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
273 uint64_t offset_count; /* abs time from which apply current scales */
274 struct bintime offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
275 struct bintime bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
276 struct bintime boottime; /* boot time expressed in (sec, 64 bits frac of a second) */
277 #if !HAS_CONTINUOUS_HWCLOCK
278 struct bintime basesleep;
279 #endif
280 } clock_calend;
281
282 static uint64_t ticks_per_sec; /* ticks in a second (expressed in abs time) */
283
284 #if DEVELOPMENT || DEBUG
285 extern int g_should_log_clock_adjustments;
286
287 static void print_all_clock_variables(const char*, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* calend_cp);
288 static void print_all_clock_variables_internal(const char *, struct clock_calend* calend_cp);
289 #else
290 #define print_all_clock_variables(...) do { } while (0)
291 #define print_all_clock_variables_internal(...) do { } while (0)
292 #endif
293
294 #if CONFIG_DTRACE
295
296
297 /*
298 * Unlocked calendar flipflop; this is used to track a clock_calend such
299 * that we can safely access a snapshot of a valid clock_calend structure
300 * without needing to take any locks to do it.
301 *
302 * The trick is to use a generation count and set the low bit when it is
303 * being updated/read; by doing this, we guarantee, through use of the
304 * os_atomic functions, that the generation is incremented when the bit
305 * is cleared atomically (by using a 1 bit add).
306 */
307 static struct unlocked_clock_calend {
308 struct clock_calend calend; /* copy of calendar */
309 uint32_t gen; /* generation count */
310 } flipflop[2];
311
312 static void clock_track_calend_nowait(void);
313
314 #endif
315
316 void _clock_delay_until_deadline(uint64_t interval, uint64_t deadline);
317 void _clock_delay_until_deadline_with_leeway(uint64_t interval, uint64_t deadline, uint64_t leeway);
318
319 /* Boottime variables*/
320 static uint64_t clock_boottime;
321 static uint32_t clock_boottime_usec;
322
323 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
324 MACRO_BEGIN \
325 if (((rfrac) += (frac)) >= (unit)) { \
326 (rfrac) -= (unit); \
327 (rsecs) += 1; \
328 } \
329 (rsecs) += (secs); \
330 MACRO_END
331
332 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
333 MACRO_BEGIN \
334 if ((int)((rfrac) -= (frac)) < 0) { \
335 (rfrac) += (unit); \
336 (rsecs) -= 1; \
337 } \
338 (rsecs) -= (secs); \
339 MACRO_END
340
341 /*
342 * clock_config:
343 *
344 * Called once at boot to configure the clock subsystem.
345 */
346 void
347 clock_config(void)
348 {
349 clock_lock_init();
350
351 settime_lock_grp_attr = lck_grp_attr_alloc_init();
352 settime_lock_grp = lck_grp_alloc_init("settime grp", settime_lock_grp_attr);
353 settime_lock_attr = lck_attr_alloc_init();
354 lck_mtx_init(&settime_lock, settime_lock_grp, settime_lock_attr);
355
356 clock_oldconfig();
357
358 ntp_init();
359
360 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
361 }
362
363 /*
364 * clock_init:
365 *
366 * Called on a processor each time started.
367 */
368 void
369 clock_init(void)
370 {
371 clock_oldinit();
372 }
373
374 /*
375 * clock_timebase_init:
376 *
377 * Called by machine dependent code
378 * to initialize areas dependent on the
379 * timebase value. May be called multiple
380 * times during start up.
381 */
382 void
383 clock_timebase_init(void)
384 {
385 uint64_t abstime;
386
387 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
388 hz_tick_interval = (uint32_t)abstime;
389
390 sched_timebase_init();
391 }
392
393 /*
394 * mach_timebase_info_trap:
395 *
396 * User trap returns timebase constant.
397 */
398 kern_return_t
399 mach_timebase_info_trap(
400 struct mach_timebase_info_trap_args *args)
401 {
402 mach_vm_address_t out_info_addr = args->info;
403 mach_timebase_info_data_t info = {};
404
405 clock_timebase_info(&info);
406
407 copyout((void *)&info, out_info_addr, sizeof(info));
408
409 return KERN_SUCCESS;
410 }
411
412 /*
413 * Calendar routines.
414 */
415
416 /*
417 * clock_get_calendar_microtime:
418 *
419 * Returns the current calendar value,
420 * microseconds as the fraction.
421 */
422 void
423 clock_get_calendar_microtime(
424 clock_sec_t *secs,
425 clock_usec_t *microsecs)
426 {
427 clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
428 }
429
430 /*
431 * get_scale_factors_from_adj:
432 *
433 * computes scale factors from the value given in adjustment.
434 *
435 * Part of the code has been taken from tc_windup of FreeBSD
436 * written by Poul-Henning Kamp <phk@FreeBSD.ORG>, Julien Ridoux and
437 * Konstantin Belousov.
438 * https://github.com/freebsd/freebsd/blob/master/sys/kern/kern_tc.c
439 */
440 static void
441 get_scale_factors_from_adj(int64_t adjustment, uint64_t* tick_scale_x, uint64_t* s_scale_ns, int64_t* s_adj_nsx)
442 {
443 uint64_t scale;
444 int64_t nano, frac;
445
446 /*-
447 * Calculating the scaling factor. We want the number of 1/2^64
448 * fractions of a second per period of the hardware counter, taking
449 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
450 * processing provides us with.
451 *
452 * The th_adjustment is nanoseconds per second with 32 bit binary
453 * fraction and we want 64 bit binary fraction of second:
454 *
455 * x = a * 2^32 / 10^9 = a * 4.294967296
456 *
457 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
458 * we can only multiply by about 850 without overflowing, that
459 * leaves no suitably precise fractions for multiply before divide.
460 *
461 * Divide before multiply with a fraction of 2199/512 results in a
462 * systematic undercompensation of 10PPM of th_adjustment. On a
463 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
464 *
465 * We happily sacrifice the lowest of the 64 bits of our result
466 * to the goddess of code clarity.
467 *
468 */
469 scale = (uint64_t)1 << 63;
470 scale += (adjustment / 1024) * 2199;
471 scale /= ticks_per_sec;
472 *tick_scale_x = scale * 2;
473
474 /*
475 * hi part of adj
476 * it contains ns (without fraction) to add to the next sec.
477 * Get ns scale factor for the next sec.
478 */
479 nano = (adjustment > 0)? adjustment >> 32 : -((-adjustment) >> 32);
480 scale = (uint64_t) NSEC_PER_SEC;
481 scale += nano;
482 *s_scale_ns = scale;
483
484 /*
485 * lo part of adj
486 * it contains 32 bit frac of ns to add to the next sec.
487 * Keep it as additional adjustment for the next sec.
488 */
489 frac = (adjustment > 0)? ((uint32_t) adjustment) : -((uint32_t) (-adjustment));
490 *s_adj_nsx = (frac > 0)? frac << 32 : -((-frac) << 32);
491
492 return;
493 }
494
495 /*
496 * scale_delta:
497 *
498 * returns a bintime struct representing delta scaled accordingly to the
499 * scale factors provided to this function.
500 */
501 static struct bintime
502 scale_delta(uint64_t delta, uint64_t tick_scale_x, uint64_t s_scale_ns, int64_t s_adj_nsx)
503 {
504 uint64_t sec, new_ns, over;
505 struct bintime bt;
506
507 bt.sec = 0;
508 bt.frac = 0;
509
510 /*
511 * If more than one second is elapsed,
512 * scale fully elapsed seconds using scale factors for seconds.
513 * s_scale_ns -> scales sec to ns.
514 * s_adj_nsx -> additional adj expressed in 64 bit frac of ns to apply to each sec.
515 */
516 if (delta > ticks_per_sec) {
517 sec = (delta / ticks_per_sec);
518 new_ns = sec * s_scale_ns;
519 bintime_addns(&bt, new_ns);
520 if (s_adj_nsx) {
521 if (sec == 1) {
522 /* shortcut, no overflow can occur */
523 if (s_adj_nsx > 0) {
524 bintime_addx(&bt, (uint64_t)s_adj_nsx / (uint64_t)NSEC_PER_SEC);
525 } else {
526 bintime_subx(&bt, (uint64_t)-s_adj_nsx / (uint64_t)NSEC_PER_SEC);
527 }
528 } else {
529 /*
530 * s_adj_nsx is 64 bit frac of ns.
531 * sec*s_adj_nsx might overflow in int64_t.
532 * use bintime_addxns to not lose overflowed ns.
533 */
534 bintime_addxns(&bt, sec, s_adj_nsx);
535 }
536 }
537 delta = (delta % ticks_per_sec);
538 }
539
540 over = multi_overflow(tick_scale_x, delta);
541 if (over) {
542 bt.sec += over;
543 }
544
545 /*
546 * scale elapsed ticks using the scale factor for ticks.
547 */
548 bintime_addx(&bt, delta * tick_scale_x);
549
550 return bt;
551 }
552
553 /*
554 * get_scaled_time:
555 *
556 * returns the scaled time of the time elapsed from the last time
557 * scale factors were updated to now.
558 */
559 static struct bintime
560 get_scaled_time(uint64_t now)
561 {
562 uint64_t delta;
563
564 /*
565 * Compute ticks elapsed since last scale update.
566 * This time will be scaled according to the value given by ntp kern.
567 */
568 delta = now - clock_calend.offset_count;
569
570 return scale_delta(delta, clock_calend.tick_scale_x, clock_calend.s_scale_ns, clock_calend.s_adj_nsx);
571 }
572
573 static void
574 clock_get_calendar_absolute_and_microtime_locked(
575 clock_sec_t *secs,
576 clock_usec_t *microsecs,
577 uint64_t *abstime)
578 {
579 uint64_t now;
580 struct bintime bt;
581
582 now = mach_absolute_time();
583 if (abstime) {
584 *abstime = now;
585 }
586
587 bt = get_scaled_time(now);
588 bintime_add(&bt, &clock_calend.bintime);
589 bintime2usclock(&bt, secs, microsecs);
590 }
591
592 static void
593 clock_get_calendar_absolute_and_nanotime_locked(
594 clock_sec_t *secs,
595 clock_usec_t *nanosecs,
596 uint64_t *abstime)
597 {
598 uint64_t now;
599 struct bintime bt;
600
601 now = mach_absolute_time();
602 if (abstime) {
603 *abstime = now;
604 }
605
606 bt = get_scaled_time(now);
607 bintime_add(&bt, &clock_calend.bintime);
608 bintime2nsclock(&bt, secs, nanosecs);
609 }
610
611 /*
612 * clock_get_calendar_absolute_and_microtime:
613 *
614 * Returns the current calendar value,
615 * microseconds as the fraction. Also
616 * returns mach_absolute_time if abstime
617 * is not NULL.
618 */
619 void
620 clock_get_calendar_absolute_and_microtime(
621 clock_sec_t *secs,
622 clock_usec_t *microsecs,
623 uint64_t *abstime)
624 {
625 spl_t s;
626
627 s = splclock();
628 clock_lock();
629
630 clock_get_calendar_absolute_and_microtime_locked(secs, microsecs, abstime);
631
632 clock_unlock();
633 splx(s);
634 }
635
636 /*
637 * clock_get_calendar_nanotime:
638 *
639 * Returns the current calendar value,
640 * nanoseconds as the fraction.
641 *
642 * Since we do not have an interface to
643 * set the calendar with resolution greater
644 * than a microsecond, we honor that here.
645 */
646 void
647 clock_get_calendar_nanotime(
648 clock_sec_t *secs,
649 clock_nsec_t *nanosecs)
650 {
651 spl_t s;
652
653 s = splclock();
654 clock_lock();
655
656 clock_get_calendar_absolute_and_nanotime_locked(secs, nanosecs, NULL);
657
658 clock_unlock();
659 splx(s);
660 }
661
662 /*
663 * clock_gettimeofday:
664 *
665 * Kernel interface for commpage implementation of
666 * gettimeofday() syscall.
667 *
668 * Returns the current calendar value, and updates the
669 * commpage info as appropriate. Because most calls to
670 * gettimeofday() are handled in user mode by the commpage,
671 * this routine should be used infrequently.
672 */
673 void
674 clock_gettimeofday(
675 clock_sec_t *secs,
676 clock_usec_t *microsecs)
677 {
678 clock_gettimeofday_and_absolute_time(secs, microsecs, NULL);
679 }
680
681 void
682 clock_gettimeofday_and_absolute_time(
683 clock_sec_t *secs,
684 clock_usec_t *microsecs,
685 uint64_t *mach_time)
686 {
687 uint64_t now;
688 spl_t s;
689 struct bintime bt;
690
691 s = splclock();
692 clock_lock();
693
694 now = mach_absolute_time();
695 bt = get_scaled_time(now);
696 bintime_add(&bt, &clock_calend.bintime);
697 bintime2usclock(&bt, secs, microsecs);
698
699 clock_gettimeofday_set_commpage(now, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
700
701 clock_unlock();
702 splx(s);
703
704 if (mach_time) {
705 *mach_time = now;
706 }
707 }
708
709 /*
710 * clock_set_calendar_microtime:
711 *
712 * Sets the current calendar value by
713 * recalculating the epoch and offset
714 * from the system clock.
715 *
716 * Also adjusts the boottime to keep the
717 * value consistent, writes the new
718 * calendar value to the platform clock,
719 * and sends calendar change notifications.
720 */
721 void
722 clock_set_calendar_microtime(
723 clock_sec_t secs,
724 clock_usec_t microsecs)
725 {
726 uint64_t absolutesys;
727 clock_sec_t newsecs;
728 clock_sec_t oldsecs;
729 clock_usec_t newmicrosecs;
730 clock_usec_t oldmicrosecs;
731 uint64_t commpage_value;
732 spl_t s;
733 struct bintime bt;
734 clock_sec_t deltasecs;
735 clock_usec_t deltamicrosecs;
736
737 newsecs = secs;
738 newmicrosecs = microsecs;
739
740 /*
741 * settime_lock mtx is used to avoid that racing settimeofdays update the wall clock and
742 * the platform clock concurrently.
743 *
744 * clock_lock cannot be used for this race because it is acquired from interrupt context
745 * and it needs interrupts disabled while instead updating the platform clock needs to be
746 * called with interrupts enabled.
747 */
748 lck_mtx_lock(&settime_lock);
749
750 s = splclock();
751 clock_lock();
752
753 #if DEVELOPMENT || DEBUG
754 struct clock_calend clock_calend_cp = clock_calend;
755 #endif
756 commpage_disable_timestamp();
757
758 /*
759 * Adjust the boottime based on the delta.
760 */
761 clock_get_calendar_absolute_and_microtime_locked(&oldsecs, &oldmicrosecs, &absolutesys);
762
763 #if DEVELOPMENT || DEBUG
764 if (g_should_log_clock_adjustments) {
765 os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
766 __func__, (unsigned long)oldsecs, oldmicrosecs, absolutesys);
767 os_log(OS_LOG_DEFAULT, "%s requested %lu s %d u\n",
768 __func__, (unsigned long)secs, microsecs );
769 }
770 #endif
771
772 if (oldsecs < secs || (oldsecs == secs && oldmicrosecs < microsecs)) {
773 // moving forwards
774 deltasecs = secs;
775 deltamicrosecs = microsecs;
776
777 TIME_SUB(deltasecs, oldsecs, deltamicrosecs, oldmicrosecs, USEC_PER_SEC);
778
779 TIME_ADD(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
780 clock2bintime(&deltasecs, &deltamicrosecs, &bt);
781 bintime_add(&clock_calend.boottime, &bt);
782 } else {
783 // moving backwards
784 deltasecs = oldsecs;
785 deltamicrosecs = oldmicrosecs;
786
787 TIME_SUB(deltasecs, secs, deltamicrosecs, microsecs, USEC_PER_SEC);
788
789 TIME_SUB(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
790 clock2bintime(&deltasecs, &deltamicrosecs, &bt);
791 bintime_sub(&clock_calend.boottime, &bt);
792 }
793
794 clock_calend.bintime = clock_calend.boottime;
795 bintime_add(&clock_calend.bintime, &clock_calend.offset);
796
797 clock2bintime((clock_sec_t *) &secs, (clock_usec_t *) &microsecs, &bt);
798
799 clock_gettimeofday_set_commpage(absolutesys, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
800
801 #if DEVELOPMENT || DEBUG
802 struct clock_calend clock_calend_cp1 = clock_calend;
803 #endif
804
805 commpage_value = clock_boottime * USEC_PER_SEC + clock_boottime_usec;
806
807 clock_unlock();
808 splx(s);
809
810 /*
811 * Set the new value for the platform clock.
812 * This call might block, so interrupts must be enabled.
813 */
814 #if DEVELOPMENT || DEBUG
815 uint64_t now_b = mach_absolute_time();
816 #endif
817
818 PESetUTCTimeOfDay(newsecs, newmicrosecs);
819
820 #if DEVELOPMENT || DEBUG
821 uint64_t now_a = mach_absolute_time();
822 if (g_should_log_clock_adjustments) {
823 os_log(OS_LOG_DEFAULT, "%s mach bef PESet %llu mach aft %llu \n", __func__, now_b, now_a);
824 }
825 #endif
826
827 print_all_clock_variables_internal(__func__, &clock_calend_cp);
828 print_all_clock_variables_internal(__func__, &clock_calend_cp1);
829
830 commpage_update_boottime(commpage_value);
831
832 /*
833 * Send host notifications.
834 */
835 host_notify_calendar_change();
836 host_notify_calendar_set();
837
838 #if CONFIG_DTRACE
839 clock_track_calend_nowait();
840 #endif
841
842 lck_mtx_unlock(&settime_lock);
843 }
844
845 uint64_t mach_absolutetime_asleep = 0;
846 uint64_t mach_absolutetime_last_sleep = 0;
847
848 void
849 clock_get_calendar_uptime(clock_sec_t *secs)
850 {
851 uint64_t now;
852 spl_t s;
853 struct bintime bt;
854
855 s = splclock();
856 clock_lock();
857
858 now = mach_absolute_time();
859
860 bt = get_scaled_time(now);
861 bintime_add(&bt, &clock_calend.offset);
862
863 *secs = bt.sec;
864
865 clock_unlock();
866 splx(s);
867 }
868
869
870 /*
871 * clock_update_calendar:
872 *
873 * called by ntp timer to update scale factors.
874 */
875 void
876 clock_update_calendar(void)
877 {
878 uint64_t now, delta;
879 struct bintime bt;
880 spl_t s;
881 int64_t adjustment;
882
883 s = splclock();
884 clock_lock();
885
886 now = mach_absolute_time();
887
888 /*
889 * scale the time elapsed since the last update and
890 * add it to offset.
891 */
892 bt = get_scaled_time(now);
893 bintime_add(&clock_calend.offset, &bt);
894
895 /*
896 * update the base from which apply next scale factors.
897 */
898 delta = now - clock_calend.offset_count;
899 clock_calend.offset_count += delta;
900
901 clock_calend.bintime = clock_calend.offset;
902 bintime_add(&clock_calend.bintime, &clock_calend.boottime);
903
904 /*
905 * recompute next adjustment.
906 */
907 ntp_update_second(&adjustment, clock_calend.bintime.sec);
908
909 #if DEVELOPMENT || DEBUG
910 if (g_should_log_clock_adjustments) {
911 os_log(OS_LOG_DEFAULT, "%s adjustment %lld\n", __func__, adjustment);
912 }
913 #endif
914
915 /*
916 * recomputing scale factors.
917 */
918 get_scale_factors_from_adj(adjustment, &clock_calend.tick_scale_x, &clock_calend.s_scale_ns, &clock_calend.s_adj_nsx);
919
920 clock_gettimeofday_set_commpage(now, clock_calend.bintime.sec, clock_calend.bintime.frac, clock_calend.tick_scale_x, ticks_per_sec);
921
922 #if DEVELOPMENT || DEBUG
923 struct clock_calend calend_cp = clock_calend;
924 #endif
925
926 clock_unlock();
927 splx(s);
928
929 print_all_clock_variables(__func__, NULL, NULL, NULL, NULL, &calend_cp);
930 }
931
932
933 #if DEVELOPMENT || DEBUG
934
935 void
936 print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp)
937 {
938 clock_sec_t offset_secs;
939 clock_usec_t offset_microsecs;
940 clock_sec_t bintime_secs;
941 clock_usec_t bintime_microsecs;
942 clock_sec_t bootime_secs;
943 clock_usec_t bootime_microsecs;
944
945 if (!g_should_log_clock_adjustments) {
946 return;
947 }
948
949 bintime2usclock(&clock_calend_cp->offset, &offset_secs, &offset_microsecs);
950 bintime2usclock(&clock_calend_cp->bintime, &bintime_secs, &bintime_microsecs);
951 bintime2usclock(&clock_calend_cp->boottime, &bootime_secs, &bootime_microsecs);
952
953 os_log(OS_LOG_DEFAULT, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n",
954 func, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx,
955 clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count);
956 os_log(OS_LOG_DEFAULT, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n",
957 func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac,
958 (unsigned long)offset_secs, offset_microsecs);
959 os_log(OS_LOG_DEFAULT, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n",
960 func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac,
961 (unsigned long)bintime_secs, bintime_microsecs);
962 os_log(OS_LOG_DEFAULT, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n",
963 func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac,
964 (unsigned long)bootime_secs, bootime_microsecs);
965
966 #if !HAS_CONTINUOUS_HWCLOCK
967 clock_sec_t basesleep_secs;
968 clock_usec_t basesleep_microsecs;
969
970 bintime2usclock(&clock_calend_cp->basesleep, &basesleep_secs, &basesleep_microsecs);
971 os_log(OS_LOG_DEFAULT, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n",
972 func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac,
973 (unsigned long)basesleep_secs, basesleep_microsecs);
974 #endif
975 }
976
977
978 void
979 print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp)
980 {
981 if (!g_should_log_clock_adjustments) {
982 return;
983 }
984
985 struct bintime bt;
986 clock_sec_t wall_secs;
987 clock_usec_t wall_microsecs;
988 uint64_t now;
989 uint64_t delta;
990
991 if (pmu_secs) {
992 os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec);
993 }
994 if (sys_secs) {
995 os_log(OS_LOG_DEFAULT, "%s sys %lu s %d u \n", func, (unsigned long)*sys_secs, *sys_usec);
996 }
997
998 print_all_clock_variables_internal(func, clock_calend_cp);
999
1000 now = mach_absolute_time();
1001 delta = now - clock_calend_cp->offset_count;
1002
1003 bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx);
1004 bintime_add(&bt, &clock_calend_cp->bintime);
1005 bintime2usclock(&bt, &wall_secs, &wall_microsecs);
1006
1007 os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
1008 func, (unsigned long)wall_secs, wall_microsecs, now);
1009 }
1010
1011
1012 #endif /* DEVELOPMENT || DEBUG */
1013
1014
1015 /*
1016 * clock_initialize_calendar:
1017 *
1018 * Set the calendar and related clocks
1019 * from the platform clock at boot.
1020 *
1021 * Also sends host notifications.
1022 */
1023 void
1024 clock_initialize_calendar(void)
1025 {
1026 clock_sec_t sys; // sleepless time since boot in seconds
1027 clock_sec_t secs; // Current UTC time
1028 clock_sec_t utc_offset_secs; // Difference in current UTC time and sleepless time since boot
1029 clock_usec_t microsys;
1030 clock_usec_t microsecs;
1031 clock_usec_t utc_offset_microsecs;
1032 spl_t s;
1033 struct bintime bt;
1034 #if !HAS_CONTINUOUS_HWCLOCK
1035 struct bintime monotonic_bt;
1036 struct latched_time monotonic_time;
1037 uint64_t monotonic_usec_total;
1038 clock_sec_t sys2, monotonic_sec;
1039 clock_usec_t microsys2, monotonic_usec;
1040 size_t size;
1041
1042 #endif
1043 //Get the UTC time and corresponding sys time
1044 PEGetUTCTimeOfDay(&secs, &microsecs);
1045 clock_get_system_microtime(&sys, &microsys);
1046
1047 #if !HAS_CONTINUOUS_HWCLOCK
1048 /*
1049 * If the platform has a monotonic clock, use kern.monotonicclock_usecs
1050 * to estimate the sleep/wake time, otherwise use the UTC time to estimate
1051 * the sleep time.
1052 */
1053 size = sizeof(monotonic_time);
1054 if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
1055 has_monotonic_clock = 0;
1056 os_log(OS_LOG_DEFAULT, "%s system does not have monotonic clock\n", __func__);
1057 } else {
1058 has_monotonic_clock = 1;
1059 monotonic_usec_total = monotonic_time.monotonic_time_usec;
1060 absolutetime_to_microtime(monotonic_time.mach_time, &sys2, &microsys2);
1061 os_log(OS_LOG_DEFAULT, "%s system has monotonic clock\n", __func__);
1062 }
1063 #endif
1064
1065 s = splclock();
1066 clock_lock();
1067
1068 commpage_disable_timestamp();
1069
1070 utc_offset_secs = secs;
1071 utc_offset_microsecs = microsecs;
1072
1073 /*
1074 * We normally expect the UTC clock to be always-on and produce
1075 * greater readings than the tick counter. There may be corner cases
1076 * due to differing clock resolutions (UTC clock is likely lower) and
1077 * and errors reading the UTC clock (some implementations return 0
1078 * on error) in which that doesn't hold true. Bring the UTC measurements
1079 * in-line with the tick counter measurements as a best effort in that case.
1080 */
1081 if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
1082 os_log(OS_LOG_DEFAULT, "%s WARNING: UTC time is less then sys time, (%lu s %d u) UTC (%lu s %d u) sys\n",
1083 __func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys);
1084 secs = utc_offset_secs = sys;
1085 microsecs = utc_offset_microsecs = microsys;
1086 }
1087
1088 // UTC - sys
1089 // This macro stores the subtraction result in utc_offset_secs and utc_offset_microsecs
1090 TIME_SUB(utc_offset_secs, sys, utc_offset_microsecs, microsys, USEC_PER_SEC);
1091 // This function converts utc_offset_secs and utc_offset_microsecs in bintime
1092 clock2bintime(&utc_offset_secs, &utc_offset_microsecs, &bt);
1093
1094 /*
1095 * Initialize the boot time based on the platform clock.
1096 */
1097 clock_boottime = secs;
1098 clock_boottime_usec = microsecs;
1099 commpage_update_boottime(clock_boottime * USEC_PER_SEC + clock_boottime_usec);
1100
1101 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
1102 clock_calend.boottime = bt;
1103 clock_calend.bintime = bt;
1104 clock_calend.offset.sec = 0;
1105 clock_calend.offset.frac = 0;
1106
1107 clock_calend.tick_scale_x = (uint64_t)1 << 63;
1108 clock_calend.tick_scale_x /= ticks_per_sec;
1109 clock_calend.tick_scale_x *= 2;
1110
1111 clock_calend.s_scale_ns = NSEC_PER_SEC;
1112 clock_calend.s_adj_nsx = 0;
1113
1114 #if !HAS_CONTINUOUS_HWCLOCK
1115 if (has_monotonic_clock) {
1116 monotonic_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
1117 monotonic_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
1118
1119 // monotonic clock - sys
1120 // This macro stores the subtraction result in monotonic_sec and monotonic_usec
1121 TIME_SUB(monotonic_sec, sys2, monotonic_usec, microsys2, USEC_PER_SEC);
1122 clock2bintime(&monotonic_sec, &monotonic_usec, &monotonic_bt);
1123
1124 // set the baseleep as the difference between monotonic clock - sys
1125 clock_calend.basesleep = monotonic_bt;
1126 }
1127 #endif
1128 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
1129
1130 #if DEVELOPMENT || DEBUG
1131 struct clock_calend clock_calend_cp = clock_calend;
1132 #endif
1133
1134 clock_unlock();
1135 splx(s);
1136
1137 print_all_clock_variables(__func__, &secs, &microsecs, &sys, &microsys, &clock_calend_cp);
1138
1139 /*
1140 * Send host notifications.
1141 */
1142 host_notify_calendar_change();
1143
1144 #if CONFIG_DTRACE
1145 clock_track_calend_nowait();
1146 #endif
1147 }
1148
1149 #if HAS_CONTINUOUS_HWCLOCK
1150
1151 static void
1152 scale_sleep_time(void)
1153 {
1154 /* Apply the current NTP frequency adjustment to the time slept.
1155 * The frequency adjustment remains stable between calls to ntp_adjtime(),
1156 * and should thus provide a reasonable approximation of the total adjustment
1157 * required for the time slept. */
1158 struct bintime sleep_time;
1159 uint64_t tick_scale_x, s_scale_ns;
1160 int64_t s_adj_nsx;
1161 int64_t sleep_adj = ntp_get_freq();
1162 if (sleep_adj) {
1163 get_scale_factors_from_adj(sleep_adj, &tick_scale_x, &s_scale_ns, &s_adj_nsx);
1164 sleep_time = scale_delta(mach_absolutetime_last_sleep, tick_scale_x, s_scale_ns, s_adj_nsx);
1165 } else {
1166 tick_scale_x = (uint64_t)1 << 63;
1167 tick_scale_x /= ticks_per_sec;
1168 tick_scale_x *= 2;
1169 sleep_time.sec = mach_absolutetime_last_sleep / ticks_per_sec;
1170 sleep_time.frac = (mach_absolutetime_last_sleep % ticks_per_sec) * tick_scale_x;
1171 }
1172 bintime_add(&clock_calend.offset, &sleep_time);
1173 bintime_add(&clock_calend.bintime, &sleep_time);
1174 }
1175
1176 void
1177 clock_wakeup_calendar(void)
1178 {
1179 spl_t s;
1180
1181 s = splclock();
1182 clock_lock();
1183
1184 commpage_disable_timestamp();
1185
1186 uint64_t abstime = mach_absolute_time();
1187 uint64_t total_sleep_time = ml_get_hwclock() - abstime;
1188
1189 mach_absolutetime_last_sleep = total_sleep_time - mach_absolutetime_asleep;
1190 mach_absolutetime_asleep = total_sleep_time;
1191
1192 scale_sleep_time();
1193
1194 KERNEL_DEBUG_CONSTANT(
1195 MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
1196 (uintptr_t) mach_absolutetime_last_sleep,
1197 (uintptr_t) mach_absolutetime_asleep,
1198 (uintptr_t) (mach_absolutetime_last_sleep >> 32),
1199 (uintptr_t) (mach_absolutetime_asleep >> 32),
1200 0);
1201
1202 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
1203 adjust_cont_time_thread_calls();
1204
1205 clock_unlock();
1206 splx(s);
1207
1208 host_notify_calendar_change();
1209
1210 #if CONFIG_DTRACE
1211 clock_track_calend_nowait();
1212 #endif
1213 }
1214
1215 #else /* HAS_CONTINUOUS_HWCLOCK */
1216
1217 void
1218 clock_wakeup_calendar(void)
1219 {
1220 clock_sec_t wake_sys_sec;
1221 clock_usec_t wake_sys_usec;
1222 clock_sec_t wake_sec;
1223 clock_usec_t wake_usec;
1224 clock_sec_t wall_time_sec;
1225 clock_usec_t wall_time_usec;
1226 clock_sec_t diff_sec;
1227 clock_usec_t diff_usec;
1228 clock_sec_t var_s;
1229 clock_usec_t var_us;
1230 spl_t s;
1231 struct bintime bt, last_sleep_bt;
1232 struct latched_time monotonic_time;
1233 uint64_t monotonic_usec_total;
1234 uint64_t wake_abs;
1235 size_t size;
1236
1237 /*
1238 * If the platform has the monotonic clock use that to
1239 * compute the sleep time. The monotonic clock does not have an offset
1240 * that can be modified, so nor kernel or userspace can change the time
1241 * of this clock, it can only monotonically increase over time.
1242 * During sleep mach_absolute_time (sys time) does not tick,
1243 * so the sleep time is the difference between the current monotonic time
1244 * less the absolute time and the previous difference stored at wake time.
1245 *
1246 * basesleep = (monotonic - sys) ---> computed at last wake
1247 * sleep_time = (monotonic - sys) - basesleep
1248 *
1249 * If the platform does not support monotonic clock we set the wall time to what the
1250 * UTC clock returns us.
1251 * Setting the wall time to UTC time implies that we loose all the adjustments
1252 * done during wake time through adjtime/ntp_adjustime.
1253 * The UTC time is the monotonic clock + an offset that can be set
1254 * by kernel.
1255 * The time slept in this case is the difference between wall time and UTC
1256 * at wake.
1257 *
1258 * IMPORTANT:
1259 * We assume that only the kernel is setting the offset of the PMU/RTC and that
1260 * it is doing it only througth the settimeofday interface.
1261 */
1262 if (has_monotonic_clock) {
1263 #if DEVELOPMENT || DEBUG
1264 /*
1265 * Just for debugging, get the wake UTC time.
1266 */
1267 PEGetUTCTimeOfDay(&var_s, &var_us);
1268 #endif
1269 /*
1270 * Get monotonic time with corresponding sys time
1271 */
1272 size = sizeof(monotonic_time);
1273 if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
1274 panic("%s: could not call kern.monotonicclock_usecs", __func__);
1275 }
1276 wake_abs = monotonic_time.mach_time;
1277 absolutetime_to_microtime(wake_abs, &wake_sys_sec, &wake_sys_usec);
1278
1279 monotonic_usec_total = monotonic_time.monotonic_time_usec;
1280 wake_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
1281 wake_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
1282 } else {
1283 /*
1284 * Get UTC time and corresponding sys time
1285 */
1286 PEGetUTCTimeOfDay(&wake_sec, &wake_usec);
1287 wake_abs = mach_absolute_time();
1288 absolutetime_to_microtime(wake_abs, &wake_sys_sec, &wake_sys_usec);
1289 }
1290
1291 #if DEVELOPMENT || DEBUG
1292 os_log(OS_LOG_DEFAULT, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec, wake_usec, (has_monotonic_clock)?"monotonic":"UTC", wake_abs);
1293 if (has_monotonic_clock) {
1294 os_log(OS_LOG_DEFAULT, "UTC time %lu s %d u\n", (unsigned long)var_s, var_us);
1295 }
1296 #endif /* DEVELOPMENT || DEBUG */
1297
1298 s = splclock();
1299 clock_lock();
1300
1301 commpage_disable_timestamp();
1302
1303 #if DEVELOPMENT || DEBUG
1304 struct clock_calend clock_calend_cp1 = clock_calend;
1305 #endif /* DEVELOPMENT || DEBUG */
1306
1307 /*
1308 * We normally expect the UTC/monotonic clock to be always-on and produce
1309 * greater readings than the sys counter. There may be corner cases
1310 * due to differing clock resolutions (UTC/monotonic clock is likely lower) and
1311 * and errors reading the UTC/monotonic clock (some implementations return 0
1312 * on error) in which that doesn't hold true.
1313 */
1314 if ((wake_sys_sec > wake_sec) || ((wake_sys_sec == wake_sec) && (wake_sys_usec > wake_usec))) {
1315 os_log_error(OS_LOG_DEFAULT, "WARNING: %s clock is less then sys clock at wake: %lu s %d u vs %lu s %d u, defaulting sleep time to zero\n", (has_monotonic_clock)?"monotonic":"UTC", (unsigned long)wake_sec, wake_usec, (unsigned long)wake_sys_sec, wake_sys_usec);
1316 mach_absolutetime_last_sleep = 0;
1317 goto done;
1318 }
1319
1320 if (has_monotonic_clock) {
1321 /*
1322 * computer the difference monotonic - sys
1323 * we already checked that monotonic time is
1324 * greater than sys.
1325 */
1326 diff_sec = wake_sec;
1327 diff_usec = wake_usec;
1328 // This macro stores the subtraction result in diff_sec and diff_usec
1329 TIME_SUB(diff_sec, wake_sys_sec, diff_usec, wake_sys_usec, USEC_PER_SEC);
1330 //This function converts diff_sec and diff_usec in bintime
1331 clock2bintime(&diff_sec, &diff_usec, &bt);
1332
1333 /*
1334 * Safety belt: the monotonic clock will likely have a lower resolution than the sys counter.
1335 * It's also possible that the device didn't fully transition to the powered-off state on
1336 * the most recent sleep, so the sys counter may not have reset or may have only briefly
1337 * turned off. In that case it's possible for the difference between the monotonic clock and the
1338 * sys counter to be less than the previously recorded value in clock.calend.basesleep.
1339 * In that case simply record that we slept for 0 ticks.
1340 */
1341 if ((bt.sec > clock_calend.basesleep.sec) ||
1342 ((bt.sec == clock_calend.basesleep.sec) && (bt.frac > clock_calend.basesleep.frac))) {
1343 //last_sleep is the difference between (current monotonic - abs) and (last wake monotonic - abs)
1344 last_sleep_bt = bt;
1345 bintime_sub(&last_sleep_bt, &clock_calend.basesleep);
1346
1347 bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
1348 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
1349
1350 //set basesleep to current monotonic - abs
1351 clock_calend.basesleep = bt;
1352
1353 //update wall time
1354 bintime_add(&clock_calend.offset, &last_sleep_bt);
1355 bintime_add(&clock_calend.bintime, &last_sleep_bt);
1356
1357 bintime2usclock(&last_sleep_bt, &var_s, &var_us);
1358 os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long) var_s, var_us);
1359 } else {
1360 bintime2usclock(&clock_calend.basesleep, &var_s, &var_us);
1361 os_log_error(OS_LOG_DEFAULT, "WARNING: last wake monotonic-sys time (%lu s %d u) is greater then current monotonic-sys time(%lu s %d u), defaulting sleep time to zero\n", (unsigned long) var_s, var_us, (unsigned long) diff_sec, diff_usec);
1362
1363 mach_absolutetime_last_sleep = 0;
1364 }
1365 } else {
1366 /*
1367 * set the wall time to UTC value
1368 */
1369 bt = get_scaled_time(wake_abs);
1370 bintime_add(&bt, &clock_calend.bintime);
1371 bintime2usclock(&bt, &wall_time_sec, &wall_time_usec);
1372
1373 if (wall_time_sec > wake_sec || (wall_time_sec == wake_sec && wall_time_usec > wake_usec)) {
1374 os_log(OS_LOG_DEFAULT, "WARNING: wall time (%lu s %d u) is greater than current UTC time (%lu s %d u), defaulting sleep time to zero\n", (unsigned long) wall_time_sec, wall_time_usec, (unsigned long) wake_sec, wake_usec);
1375
1376 mach_absolutetime_last_sleep = 0;
1377 } else {
1378 diff_sec = wake_sec;
1379 diff_usec = wake_usec;
1380 // This macro stores the subtraction result in diff_sec and diff_usec
1381 TIME_SUB(diff_sec, wall_time_sec, diff_usec, wall_time_usec, USEC_PER_SEC);
1382 //This function converts diff_sec and diff_usec in bintime
1383 clock2bintime(&diff_sec, &diff_usec, &bt);
1384
1385 //time slept in this case is the difference between PMU/RTC and wall time
1386 last_sleep_bt = bt;
1387
1388 bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
1389 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
1390
1391 //update wall time
1392 bintime_add(&clock_calend.offset, &last_sleep_bt);
1393 bintime_add(&clock_calend.bintime, &last_sleep_bt);
1394
1395 bintime2usclock(&last_sleep_bt, &var_s, &var_us);
1396 os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long)var_s, var_us);
1397 }
1398 }
1399 done:
1400 KERNEL_DEBUG_CONSTANT(
1401 MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
1402 (uintptr_t) mach_absolutetime_last_sleep,
1403 (uintptr_t) mach_absolutetime_asleep,
1404 (uintptr_t) (mach_absolutetime_last_sleep >> 32),
1405 (uintptr_t) (mach_absolutetime_asleep >> 32),
1406 0);
1407
1408 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
1409 adjust_cont_time_thread_calls();
1410
1411 #if DEVELOPMENT || DEBUG
1412 struct clock_calend clock_calend_cp = clock_calend;
1413 #endif
1414
1415 clock_unlock();
1416 splx(s);
1417
1418 #if DEVELOPMENT || DEBUG
1419 if (g_should_log_clock_adjustments) {
1420 print_all_clock_variables("clock_wakeup_calendar: BEFORE", NULL, NULL, NULL, NULL, &clock_calend_cp1);
1421 print_all_clock_variables("clock_wakeup_calendar: AFTER", NULL, NULL, NULL, NULL, &clock_calend_cp);
1422 }
1423 #endif /* DEVELOPMENT || DEBUG */
1424
1425 host_notify_calendar_change();
1426
1427 #if CONFIG_DTRACE
1428 clock_track_calend_nowait();
1429 #endif
1430 }
1431
1432 #endif /* !HAS_CONTINUOUS_HWCLOCK */
1433
1434 /*
1435 * clock_get_boottime_nanotime:
1436 *
1437 * Return the boottime, used by sysctl.
1438 */
1439 void
1440 clock_get_boottime_nanotime(
1441 clock_sec_t *secs,
1442 clock_nsec_t *nanosecs)
1443 {
1444 spl_t s;
1445
1446 s = splclock();
1447 clock_lock();
1448
1449 *secs = (clock_sec_t)clock_boottime;
1450 *nanosecs = (clock_nsec_t)clock_boottime_usec * NSEC_PER_USEC;
1451
1452 clock_unlock();
1453 splx(s);
1454 }
1455
1456 /*
1457 * clock_get_boottime_nanotime:
1458 *
1459 * Return the boottime, used by sysctl.
1460 */
1461 void
1462 clock_get_boottime_microtime(
1463 clock_sec_t *secs,
1464 clock_usec_t *microsecs)
1465 {
1466 spl_t s;
1467
1468 s = splclock();
1469 clock_lock();
1470
1471 *secs = (clock_sec_t)clock_boottime;
1472 *microsecs = (clock_nsec_t)clock_boottime_usec;
1473
1474 clock_unlock();
1475 splx(s);
1476 }
1477
1478
1479 /*
1480 * Wait / delay routines.
1481 */
1482 static void
1483 mach_wait_until_continue(
1484 __unused void *parameter,
1485 wait_result_t wresult)
1486 {
1487 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
1488 /*NOTREACHED*/
1489 }
1490
1491 /*
1492 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
1493 *
1494 * Parameters: args->deadline Amount of time to wait
1495 *
1496 * Returns: 0 Success
1497 * !0 Not success
1498 *
1499 */
1500 kern_return_t
1501 mach_wait_until_trap(
1502 struct mach_wait_until_trap_args *args)
1503 {
1504 uint64_t deadline = args->deadline;
1505 wait_result_t wresult;
1506
1507 wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
1508 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
1509 if (wresult == THREAD_WAITING) {
1510 wresult = thread_block(mach_wait_until_continue);
1511 }
1512
1513 return (wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS;
1514 }
1515
1516 void
1517 clock_delay_until(
1518 uint64_t deadline)
1519 {
1520 uint64_t now = mach_absolute_time();
1521
1522 if (now >= deadline) {
1523 return;
1524 }
1525
1526 _clock_delay_until_deadline(deadline - now, deadline);
1527 }
1528
1529 /*
1530 * Preserve the original precise interval that the client
1531 * requested for comparison to the spin threshold.
1532 */
1533 void
1534 _clock_delay_until_deadline(
1535 uint64_t interval,
1536 uint64_t deadline)
1537 {
1538 _clock_delay_until_deadline_with_leeway(interval, deadline, 0);
1539 }
1540
1541 /*
1542 * Like _clock_delay_until_deadline, but it accepts a
1543 * leeway value.
1544 */
1545 void
1546 _clock_delay_until_deadline_with_leeway(
1547 uint64_t interval,
1548 uint64_t deadline,
1549 uint64_t leeway)
1550 {
1551 if (interval == 0) {
1552 return;
1553 }
1554
1555 if (ml_delay_should_spin(interval) ||
1556 get_preemption_level() != 0 ||
1557 ml_get_interrupts_enabled() == FALSE) {
1558 machine_delay_until(interval, deadline);
1559 } else {
1560 /*
1561 * For now, assume a leeway request of 0 means the client does not want a leeway
1562 * value. We may want to change this interpretation in the future.
1563 */
1564
1565 if (leeway) {
1566 assert_wait_deadline_with_leeway((event_t)clock_delay_until, THREAD_UNINT, TIMEOUT_URGENCY_LEEWAY, deadline, leeway);
1567 } else {
1568 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
1569 }
1570
1571 thread_block(THREAD_CONTINUE_NULL);
1572 }
1573 }
1574
1575 void
1576 delay_for_interval(
1577 uint32_t interval,
1578 uint32_t scale_factor)
1579 {
1580 uint64_t abstime;
1581
1582 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1583
1584 _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
1585 }
1586
1587 void
1588 delay_for_interval_with_leeway(
1589 uint32_t interval,
1590 uint32_t leeway,
1591 uint32_t scale_factor)
1592 {
1593 uint64_t abstime_interval;
1594 uint64_t abstime_leeway;
1595
1596 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
1597 clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
1598
1599 _clock_delay_until_deadline_with_leeway(abstime_interval, mach_absolute_time() + abstime_interval, abstime_leeway);
1600 }
1601
1602 void
1603 delay(
1604 int usec)
1605 {
1606 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1607 }
1608
1609 /*
1610 * Miscellaneous routines.
1611 */
1612 void
1613 clock_interval_to_deadline(
1614 uint32_t interval,
1615 uint32_t scale_factor,
1616 uint64_t *result)
1617 {
1618 uint64_t abstime;
1619
1620 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1621
1622 if (os_add_overflow(mach_absolute_time(), abstime, result)) {
1623 *result = UINT64_MAX;
1624 }
1625 }
1626
1627 void
1628 clock_absolutetime_interval_to_deadline(
1629 uint64_t abstime,
1630 uint64_t *result)
1631 {
1632 if (os_add_overflow(mach_absolute_time(), abstime, result)) {
1633 *result = UINT64_MAX;
1634 }
1635 }
1636
1637 void
1638 clock_continuoustime_interval_to_deadline(
1639 uint64_t conttime,
1640 uint64_t *result)
1641 {
1642 if (os_add_overflow(mach_continuous_time(), conttime, result)) {
1643 *result = UINT64_MAX;
1644 }
1645 }
1646
1647 void
1648 clock_get_uptime(
1649 uint64_t *result)
1650 {
1651 *result = mach_absolute_time();
1652 }
1653
1654 void
1655 clock_deadline_for_periodic_event(
1656 uint64_t interval,
1657 uint64_t abstime,
1658 uint64_t *deadline)
1659 {
1660 assert(interval != 0);
1661
1662 // *deadline += interval;
1663 if (os_add_overflow(*deadline, interval, deadline)) {
1664 *deadline = UINT64_MAX;
1665 }
1666
1667 if (*deadline <= abstime) {
1668 // *deadline = abstime + interval;
1669 if (os_add_overflow(abstime, interval, deadline)) {
1670 *deadline = UINT64_MAX;
1671 }
1672
1673 abstime = mach_absolute_time();
1674 if (*deadline <= abstime) {
1675 // *deadline = abstime + interval;
1676 if (os_add_overflow(abstime, interval, deadline)) {
1677 *deadline = UINT64_MAX;
1678 }
1679 }
1680 }
1681 }
1682
1683 uint64_t
1684 mach_continuous_time(void)
1685 {
1686 #if HAS_CONTINUOUS_HWCLOCK
1687 return ml_get_hwclock();
1688 #else
1689 while (1) {
1690 uint64_t read1 = mach_absolutetime_asleep;
1691 uint64_t absolute = mach_absolute_time();
1692 OSMemoryBarrier();
1693 uint64_t read2 = mach_absolutetime_asleep;
1694
1695 if (__builtin_expect(read1 == read2, 1)) {
1696 return absolute + read1;
1697 }
1698 }
1699 #endif
1700 }
1701
1702 uint64_t
1703 mach_continuous_approximate_time(void)
1704 {
1705 #if HAS_CONTINUOUS_HWCLOCK
1706 return ml_get_hwclock();
1707 #else
1708 while (1) {
1709 uint64_t read1 = mach_absolutetime_asleep;
1710 uint64_t absolute = mach_approximate_time();
1711 OSMemoryBarrier();
1712 uint64_t read2 = mach_absolutetime_asleep;
1713
1714 if (__builtin_expect(read1 == read2, 1)) {
1715 return absolute + read1;
1716 }
1717 }
1718 #endif
1719 }
1720
1721 /*
1722 * continuoustime_to_absolutetime
1723 * Must be called with interrupts disabled
1724 * Returned value is only valid until the next update to
1725 * mach_continuous_time
1726 */
1727 uint64_t
1728 continuoustime_to_absolutetime(uint64_t conttime)
1729 {
1730 if (conttime <= mach_absolutetime_asleep) {
1731 return 0;
1732 } else {
1733 return conttime - mach_absolutetime_asleep;
1734 }
1735 }
1736
1737 /*
1738 * absolutetime_to_continuoustime
1739 * Must be called with interrupts disabled
1740 * Returned value is only valid until the next update to
1741 * mach_continuous_time
1742 */
1743 uint64_t
1744 absolutetime_to_continuoustime(uint64_t abstime)
1745 {
1746 return abstime + mach_absolutetime_asleep;
1747 }
1748
1749 #if CONFIG_DTRACE
1750
1751 /*
1752 * clock_get_calendar_nanotime_nowait
1753 *
1754 * Description: Non-blocking version of clock_get_calendar_nanotime()
1755 *
1756 * Notes: This function operates by separately tracking calendar time
1757 * updates using a two element structure to copy the calendar
1758 * state, which may be asynchronously modified. It utilizes
1759 * barrier instructions in the tracking process and in the local
1760 * stable snapshot process in order to ensure that a consistent
1761 * snapshot is used to perform the calculation.
1762 */
1763 void
1764 clock_get_calendar_nanotime_nowait(
1765 clock_sec_t *secs,
1766 clock_nsec_t *nanosecs)
1767 {
1768 int i = 0;
1769 uint64_t now;
1770 struct unlocked_clock_calend stable;
1771 struct bintime bt;
1772
1773 for (;;) {
1774 stable = flipflop[i]; /* take snapshot */
1775
1776 /*
1777 * Use a barrier instructions to ensure atomicity. We AND
1778 * off the "in progress" bit to get the current generation
1779 * count.
1780 */
1781 os_atomic_andnot(&stable.gen, 1, relaxed);
1782
1783 /*
1784 * If an update _is_ in progress, the generation count will be
1785 * off by one, if it _was_ in progress, it will be off by two,
1786 * and if we caught it at a good time, it will be equal (and
1787 * our snapshot is threfore stable).
1788 */
1789 if (flipflop[i].gen == stable.gen) {
1790 break;
1791 }
1792
1793 /* Switch to the other element of the flipflop, and try again. */
1794 i ^= 1;
1795 }
1796
1797 now = mach_absolute_time();
1798
1799 bt = get_scaled_time(now);
1800
1801 bintime_add(&bt, &clock_calend.bintime);
1802
1803 bintime2nsclock(&bt, secs, nanosecs);
1804 }
1805
1806 static void
1807 clock_track_calend_nowait(void)
1808 {
1809 int i;
1810
1811 for (i = 0; i < 2; i++) {
1812 struct clock_calend tmp = clock_calend;
1813
1814 /*
1815 * Set the low bit if the generation count; since we use a
1816 * barrier instruction to do this, we are guaranteed that this
1817 * will flag an update in progress to an async caller trying
1818 * to examine the contents.
1819 */
1820 os_atomic_or(&flipflop[i].gen, 1, relaxed);
1821
1822 flipflop[i].calend = tmp;
1823
1824 /*
1825 * Increment the generation count to clear the low bit to
1826 * signal completion. If a caller compares the generation
1827 * count after taking a copy while in progress, the count
1828 * will be off by two.
1829 */
1830 os_atomic_inc(&flipflop[i].gen, relaxed);
1831 }
1832 }
1833
1834 #endif /* CONFIG_DTRACE */