]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/clock.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
1c79356b 32 */
5ba3f43e
A
33/*-
34 * Copyright (c) 1982, 1986, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)time.h 8.5 (Berkeley) 5/4/95
62 * $FreeBSD$
63 */
1c79356b 64
91447636 65#include <mach/mach_types.h>
91447636 66
1c79356b 67#include <kern/spl.h>
55e303ae 68#include <kern/sched_prim.h>
1c79356b 69#include <kern/thread.h>
1c79356b 70#include <kern/clock.h>
0c530ab8 71#include <kern/host_notify.h>
39037602
A
72#include <kern/thread_call.h>
73#include <libkern/OSAtomic.h>
0c530ab8
A
74
75#include <IOKit/IOPlatformExpert.h>
c0fea474 76
0c530ab8 77#include <machine/commpage.h>
5ba3f43e
A
78#include <machine/config.h>
79#include <machine/machine_routines.h>
1c79356b 80
91447636 81#include <mach/mach_traps.h>
1c79356b
A
82#include <mach/mach_time.h>
83
3e170ce0 84#include <sys/kdebug.h>
5ba3f43e
A
85#include <sys/timex.h>
86#include <kern/arithmetic_128.h>
cc8bc92a 87#include <os/log.h>
3e170ce0 88
2d21ac55 89uint32_t hz_tick_interval = 1;
cc8bc92a 90static uint64_t has_monotonic_clock = 0;
2d21ac55 91
6d2010ae 92decl_simple_lock_data(,clock_lock)
5ba3f43e
A
93lck_grp_attr_t * settime_lock_grp_attr;
94lck_grp_t * settime_lock_grp;
95lck_attr_t * settime_lock_attr;
96lck_mtx_t settime_lock;
91447636 97
b0d623f7
A
98#define clock_lock() \
99 simple_lock(&clock_lock)
100
101#define clock_unlock() \
102 simple_unlock(&clock_lock)
103
104#define clock_lock_init() \
105 simple_lock_init(&clock_lock, 0)
106
39037602
A
107#ifdef kdp_simple_lock_is_acquired
108boolean_t kdp_clock_is_locked()
109{
110 return kdp_simple_lock_is_acquired(&clock_lock);
111}
112#endif
b0d623f7 113
5ba3f43e
A
114struct bintime {
115 time_t sec;
116 uint64_t frac;
117};
118
119static __inline void
120bintime_addx(struct bintime *_bt, uint64_t _x)
121{
122 uint64_t _u;
123
124 _u = _bt->frac;
125 _bt->frac += _x;
126 if (_u > _bt->frac)
127 _bt->sec++;
128}
129
130static __inline void
131bintime_subx(struct bintime *_bt, uint64_t _x)
132{
133 uint64_t _u;
134
135 _u = _bt->frac;
136 _bt->frac -= _x;
137 if (_u < _bt->frac)
138 _bt->sec--;
139}
140
141static __inline void
142bintime_addns(struct bintime *bt, uint64_t ns)
143{
144 bt->sec += ns/ (uint64_t)NSEC_PER_SEC;
145 ns = ns % (uint64_t)NSEC_PER_SEC;
146 if (ns) {
147 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
148 ns = ns * (uint64_t)18446744073LL;
149 bintime_addx(bt, ns);
150 }
151}
152
153static __inline void
154bintime_subns(struct bintime *bt, uint64_t ns)
155{
156 bt->sec -= ns/ (uint64_t)NSEC_PER_SEC;
157 ns = ns % (uint64_t)NSEC_PER_SEC;
158 if (ns) {
159 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
160 ns = ns * (uint64_t)18446744073LL;
161 bintime_subx(bt, ns);
162 }
163}
164
165static __inline void
166bintime_addxns(struct bintime *bt, uint64_t a, int64_t xns)
167{
168 uint64_t uxns = (xns > 0)?(uint64_t )xns:(uint64_t)-xns;
169 uint64_t ns = multi_overflow(a, uxns);
170 if (xns > 0) {
171 if (ns)
172 bintime_addns(bt, ns);
173 ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
174 bintime_addx(bt, ns);
175 }
176 else{
177 if (ns)
178 bintime_subns(bt, ns);
179 ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
180 bintime_subx(bt,ns);
181 }
182}
183
184
185static __inline void
186bintime_add(struct bintime *_bt, const struct bintime *_bt2)
187{
188 uint64_t _u;
189
190 _u = _bt->frac;
191 _bt->frac += _bt2->frac;
192 if (_u > _bt->frac)
193 _bt->sec++;
194 _bt->sec += _bt2->sec;
195}
196
197static __inline void
198bintime_sub(struct bintime *_bt, const struct bintime *_bt2)
199{
200 uint64_t _u;
201
202 _u = _bt->frac;
203 _bt->frac -= _bt2->frac;
204 if (_u < _bt->frac)
205 _bt->sec--;
206 _bt->sec -= _bt2->sec;
207}
208
209static __inline void
210clock2bintime(const clock_sec_t *secs, const clock_usec_t *microsecs, struct bintime *_bt)
211{
212
213 _bt->sec = *secs;
214 /* 18446744073709 = int(2^64 / 1000000) */
215 _bt->frac = *microsecs * (uint64_t)18446744073709LL;
216}
217
218static __inline void
219bintime2usclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *microsecs)
220{
221
222 *secs = _bt->sec;
223 *microsecs = ((uint64_t)USEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
224}
225
226static __inline void
227bintime2nsclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *nanosecs)
228{
229
230 *secs = _bt->sec;
231 *nanosecs = ((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
232}
233
234static __inline void
235bintime2absolutetime(const struct bintime *_bt, uint64_t *abs)
236{
237 uint64_t nsec;
238 nsec = (uint64_t) _bt->sec * (uint64_t)NSEC_PER_SEC + (((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32);
239 nanoseconds_to_absolutetime(nsec, abs);
240}
cc8bc92a
A
241
242struct latched_time {
243 uint64_t monotonic_time_usec;
244 uint64_t mach_time;
245};
246
247extern int
248kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
249
1c79356b 250/*
0c530ab8
A
251 * Time of day (calendar) variables.
252 *
253 * Algorithm:
254 *
5ba3f43e 255 * TOD <- bintime + delta*scale
0c530ab8 256 *
5ba3f43e
A
257 * where :
258 * bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
259 * delta is ticks elapsed since last scale update.
260 * scale is computed according to an adjustment provided by ntp_kern.
1c79356b 261 */
0c530ab8 262static struct clock_calend {
5ba3f43e
A
263 uint64_t s_scale_ns; /* scale to apply for each second elapsed, it converts in ns */
264 int64_t s_adj_nsx; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
265 uint64_t tick_scale_x; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
266 uint64_t offset_count; /* abs time from which apply current scales */
267 struct bintime offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
268 struct bintime bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
269 struct bintime boottime; /* boot time expressed in (sec, 64 bits frac of a second) */
270 struct bintime basesleep;
2d21ac55
A
271} clock_calend;
272
5ba3f43e
A
273static uint64_t ticks_per_sec; /* ticks in a second (expressed in abs time) */
274
275#if DEVELOPMENT || DEBUG
276clock_sec_t last_utc_sec = 0;
277clock_usec_t last_utc_usec = 0;
278clock_sec_t max_utc_sec = 0;
279clock_sec_t last_sys_sec = 0;
280clock_usec_t last_sys_usec = 0;
281#endif
282
cc8bc92a
A
283#if DEVELOPMENT || DEBUG
284extern int g_should_log_clock_adjustments;
285
286static void print_all_clock_variables(const char*, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* calend_cp);
287static void print_all_clock_variables_internal(const char *, struct clock_calend* calend_cp);
288#else
289#define print_all_clock_variables(...) do { } while (0)
290#define print_all_clock_variables_internal(...) do { } while (0)
291#endif
292
b0d623f7
A
293#if CONFIG_DTRACE
294
cc8bc92a 295
2d21ac55
A
296/*
297 * Unlocked calendar flipflop; this is used to track a clock_calend such
298 * that we can safely access a snapshot of a valid clock_calend structure
299 * without needing to take any locks to do it.
300 *
301 * The trick is to use a generation count and set the low bit when it is
302 * being updated/read; by doing this, we guarantee, through use of the
303 * hw_atomic functions, that the generation is incremented when the bit
304 * is cleared atomically (by using a 1 bit add).
305 */
306static struct unlocked_clock_calend {
307 struct clock_calend calend; /* copy of calendar */
308 uint32_t gen; /* generation count */
309} flipflop[ 2];
b0d623f7
A
310
311static void clock_track_calend_nowait(void);
312
2d21ac55 313#endif
1c79356b 314
5ba3f43e
A
315void _clock_delay_until_deadline(uint64_t interval, uint64_t deadline);
316void _clock_delay_until_deadline_with_leeway(uint64_t interval, uint64_t deadline, uint64_t leeway);
9bccf70c 317
5ba3f43e 318/* Boottime variables*/
39037602
A
319static uint64_t clock_boottime;
320static uint32_t clock_boottime_usec;
4452a7af 321
0c530ab8
A
322#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
323MACRO_BEGIN \
324 if (((rfrac) += (frac)) >= (unit)) { \
325 (rfrac) -= (unit); \
326 (rsecs) += 1; \
327 } \
328 (rsecs) += (secs); \
329MACRO_END
330
331#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
332MACRO_BEGIN \
b0d623f7 333 if ((int)((rfrac) -= (frac)) < 0) { \
0c530ab8
A
334 (rfrac) += (unit); \
335 (rsecs) -= 1; \
336 } \
337 (rsecs) -= (secs); \
338MACRO_END
1c79356b
A
339
340/*
91447636
A
341 * clock_config:
342 *
343 * Called once at boot to configure the clock subsystem.
1c79356b
A
344 */
345void
346clock_config(void)
347{
5ba3f43e 348
b0d623f7 349 clock_lock_init();
8f6c56a5 350
5ba3f43e
A
351 settime_lock_grp_attr = lck_grp_attr_alloc_init();
352 settime_lock_grp = lck_grp_alloc_init("settime grp", settime_lock_grp_attr);
353 settime_lock_attr = lck_attr_alloc_init();
354 lck_mtx_init(&settime_lock, settime_lock_grp, settime_lock_attr);
6601e61a 355
0c530ab8 356 clock_oldconfig();
5ba3f43e
A
357
358 ntp_init();
359
360 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
1c79356b
A
361}
362
363/*
91447636
A
364 * clock_init:
365 *
366 * Called on a processor each time started.
1c79356b
A
367 */
368void
369clock_init(void)
370{
0c530ab8 371 clock_oldinit();
1c79356b
A
372}
373
55e303ae 374/*
0c530ab8
A
375 * clock_timebase_init:
376 *
377 * Called by machine dependent code
378 * to initialize areas dependent on the
379 * timebase value. May be called multiple
380 * times during start up.
55e303ae
A
381 */
382void
383clock_timebase_init(void)
384{
0c530ab8 385 uint64_t abstime;
5d5c5d0d 386
2d21ac55 387 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
b0d623f7 388 hz_tick_interval = (uint32_t)abstime;
89b3af67 389
0c530ab8 390 sched_timebase_init();
8ad349bb 391}
c0fea474 392
8ad349bb 393/*
0c530ab8
A
394 * mach_timebase_info_trap:
395 *
396 * User trap returns timebase constant.
8ad349bb 397 */
6601e61a 398kern_return_t
0c530ab8
A
399mach_timebase_info_trap(
400 struct mach_timebase_info_trap_args *args)
6601e61a 401{
0c530ab8 402 mach_vm_address_t out_info_addr = args->info;
527f9951 403 mach_timebase_info_data_t info = {};
6601e61a 404
0c530ab8 405 clock_timebase_info(&info);
89b3af67 406
0c530ab8 407 copyout((void *)&info, out_info_addr, sizeof (info));
4452a7af 408
6601e61a 409 return (KERN_SUCCESS);
8f6c56a5 410}
5d5c5d0d 411
8f6c56a5 412/*
0c530ab8 413 * Calendar routines.
8f6c56a5 414 */
4452a7af 415
6601e61a 416/*
0c530ab8
A
417 * clock_get_calendar_microtime:
418 *
419 * Returns the current calendar value,
420 * microseconds as the fraction.
6601e61a 421 */
0c530ab8
A
422void
423clock_get_calendar_microtime(
5ba3f43e 424 clock_sec_t *secs,
b0d623f7 425 clock_usec_t *microsecs)
39236c6e
A
426{
427 clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
428}
429
5ba3f43e
A
430/*
431 * get_scale_factors_from_adj:
432 *
433 * computes scale factors from the value given in adjustment.
434 *
435 * Part of the code has been taken from tc_windup of FreeBSD
436 * written by Poul-Henning Kamp <phk@FreeBSD.ORG>, Julien Ridoux and
437 * Konstantin Belousov.
438 * https://github.com/freebsd/freebsd/blob/master/sys/kern/kern_tc.c
439 */
440static void
441get_scale_factors_from_adj(int64_t adjustment, uint64_t* tick_scale_x, uint64_t* s_scale_ns, int64_t* s_adj_nsx)
442{
443 uint64_t scale;
444 int64_t nano, frac;
445
446 /*-
447 * Calculating the scaling factor. We want the number of 1/2^64
448 * fractions of a second per period of the hardware counter, taking
449 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
450 * processing provides us with.
451 *
452 * The th_adjustment is nanoseconds per second with 32 bit binary
453 * fraction and we want 64 bit binary fraction of second:
454 *
455 * x = a * 2^32 / 10^9 = a * 4.294967296
456 *
457 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
458 * we can only multiply by about 850 without overflowing, that
459 * leaves no suitably precise fractions for multiply before divide.
460 *
461 * Divide before multiply with a fraction of 2199/512 results in a
462 * systematic undercompensation of 10PPM of th_adjustment. On a
463 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
464 *
465 * We happily sacrifice the lowest of the 64 bits of our result
466 * to the goddess of code clarity.
467 *
468 */
469 scale = (uint64_t)1 << 63;
470 scale += (adjustment / 1024) * 2199;
471 scale /= ticks_per_sec;
472 *tick_scale_x = scale * 2;
473
474 /*
475 * hi part of adj
476 * it contains ns (without fraction) to add to the next sec.
477 * Get ns scale factor for the next sec.
478 */
479 nano = (adjustment > 0)? adjustment >> 32 : -((-adjustment) >> 32);
480 scale = (uint64_t) NSEC_PER_SEC;
481 scale += nano;
482 *s_scale_ns = scale;
483
484 /*
485 * lo part of adj
486 * it contains 32 bit frac of ns to add to the next sec.
487 * Keep it as additional adjustment for the next sec.
488 */
489 frac = (adjustment > 0)? ((uint32_t) adjustment) : -((uint32_t) (-adjustment));
490 *s_adj_nsx = (frac>0)? frac << 32 : -( (-frac) << 32);
491
492 return;
493}
494
495/*
496 * scale_delta:
497 *
498 * returns a bintime struct representing delta scaled accordingly to the
499 * scale factors provided to this function.
500 */
501static struct bintime
502scale_delta(uint64_t delta, uint64_t tick_scale_x, uint64_t s_scale_ns, int64_t s_adj_nsx)
503{
504 uint64_t sec, new_ns, over;
505 struct bintime bt;
506
507 bt.sec = 0;
508 bt.frac = 0;
509
510 /*
511 * If more than one second is elapsed,
512 * scale fully elapsed seconds using scale factors for seconds.
513 * s_scale_ns -> scales sec to ns.
514 * s_adj_nsx -> additional adj expressed in 64 bit frac of ns to apply to each sec.
515 */
516 if (delta > ticks_per_sec) {
517 sec = (delta/ticks_per_sec);
518 new_ns = sec * s_scale_ns;
519 bintime_addns(&bt, new_ns);
520 if (s_adj_nsx) {
521 if (sec == 1) {
522 /* shortcut, no overflow can occur */
523 if (s_adj_nsx > 0)
524 bintime_addx(&bt, (uint64_t)s_adj_nsx/ (uint64_t)NSEC_PER_SEC);
525 else
526 bintime_subx(&bt, (uint64_t)-s_adj_nsx/ (uint64_t)NSEC_PER_SEC);
527 }
528 else{
529 /*
530 * s_adj_nsx is 64 bit frac of ns.
531 * sec*s_adj_nsx might overflow in int64_t.
532 * use bintime_addxns to not lose overflowed ns.
533 */
534 bintime_addxns(&bt, sec, s_adj_nsx);
535 }
536 }
537 delta = (delta % ticks_per_sec);
538 }
539
540 over = multi_overflow(tick_scale_x, delta);
541 if(over){
542 bt.sec += over;
543 }
544
545 /*
546 * scale elapsed ticks using the scale factor for ticks.
547 */
548 bintime_addx(&bt, delta * tick_scale_x);
549
550 return bt;
551}
552
553/*
554 * get_scaled_time:
555 *
556 * returns the scaled time of the time elapsed from the last time
557 * scale factors were updated to now.
558 */
559static struct bintime
560get_scaled_time(uint64_t now)
561{
562 uint64_t delta;
563
564 /*
565 * Compute ticks elapsed since last scale update.
566 * This time will be scaled according to the value given by ntp kern.
567 */
568 delta = now - clock_calend.offset_count;
569
570 return scale_delta(delta, clock_calend.tick_scale_x, clock_calend.s_scale_ns, clock_calend.s_adj_nsx);
571}
572
39037602
A
573static void
574clock_get_calendar_absolute_and_microtime_locked(
5ba3f43e 575 clock_sec_t *secs,
39236c6e
A
576 clock_usec_t *microsecs,
577 uint64_t *abstime)
6601e61a 578{
5ba3f43e
A
579 uint64_t now;
580 struct bintime bt;
581
582 now = mach_absolute_time();
39236c6e
A
583 if (abstime)
584 *abstime = now;
4452a7af 585
5ba3f43e
A
586 bt = get_scaled_time(now);
587 bintime_add(&bt, &clock_calend.bintime);
588 bintime2usclock(&bt, secs, microsecs);
589}
0c530ab8 590
5ba3f43e
A
591static void
592clock_get_calendar_absolute_and_nanotime_locked(
593 clock_sec_t *secs,
594 clock_usec_t *nanosecs,
595 uint64_t *abstime)
596{
597 uint64_t now;
598 struct bintime bt;
0c530ab8 599
5ba3f43e
A
600 now = mach_absolute_time();
601 if (abstime)
602 *abstime = now;
0c530ab8 603
5ba3f43e
A
604 bt = get_scaled_time(now);
605 bintime_add(&bt, &clock_calend.bintime);
606 bintime2nsclock(&bt, secs, nanosecs);
39037602
A
607}
608
609/*
610 * clock_get_calendar_absolute_and_microtime:
611 *
612 * Returns the current calendar value,
613 * microseconds as the fraction. Also
614 * returns mach_absolute_time if abstime
615 * is not NULL.
616 */
617void
618clock_get_calendar_absolute_and_microtime(
5ba3f43e 619 clock_sec_t *secs,
39037602
A
620 clock_usec_t *microsecs,
621 uint64_t *abstime)
622{
623 spl_t s;
624
625 s = splclock();
626 clock_lock();
627
628 clock_get_calendar_absolute_and_microtime_locked(secs, microsecs, abstime);
0c530ab8 629
b0d623f7 630 clock_unlock();
0c530ab8 631 splx(s);
21362eb3 632}
89b3af67 633
21362eb3 634/*
0c530ab8
A
635 * clock_get_calendar_nanotime:
636 *
637 * Returns the current calendar value,
638 * nanoseconds as the fraction.
639 *
640 * Since we do not have an interface to
641 * set the calendar with resolution greater
642 * than a microsecond, we honor that here.
21362eb3 643 */
0c530ab8
A
644void
645clock_get_calendar_nanotime(
5ba3f43e 646 clock_sec_t *secs,
b0d623f7 647 clock_nsec_t *nanosecs)
21362eb3 648{
0c530ab8
A
649 spl_t s;
650
651 s = splclock();
b0d623f7 652 clock_lock();
0c530ab8 653
5ba3f43e 654 clock_get_calendar_absolute_and_nanotime_locked(secs, nanosecs, NULL);
0c530ab8 655
b0d623f7 656 clock_unlock();
0c530ab8 657 splx(s);
6601e61a 658}
4452a7af 659
6601e61a 660/*
0c530ab8
A
661 * clock_gettimeofday:
662 *
663 * Kernel interface for commpage implementation of
664 * gettimeofday() syscall.
665 *
666 * Returns the current calendar value, and updates the
667 * commpage info as appropriate. Because most calls to
668 * gettimeofday() are handled in user mode by the commpage,
669 * this routine should be used infrequently.
6601e61a 670 */
0c530ab8
A
671void
672clock_gettimeofday(
5ba3f43e 673 clock_sec_t *secs,
b0d623f7 674 clock_usec_t *microsecs)
39037602
A
675{
676 clock_gettimeofday_and_absolute_time(secs, microsecs, NULL);
677}
678
679void
680clock_gettimeofday_and_absolute_time(
5ba3f43e 681 clock_sec_t *secs,
39037602 682 clock_usec_t *microsecs,
5ba3f43e 683 uint64_t *mach_time)
6601e61a 684{
0c530ab8
A
685 uint64_t now;
686 spl_t s;
5ba3f43e 687 struct bintime bt;
4452a7af 688
0c530ab8 689 s = splclock();
b0d623f7 690 clock_lock();
0c530ab8
A
691
692 now = mach_absolute_time();
5ba3f43e
A
693 bt = get_scaled_time(now);
694 bintime_add(&bt, &clock_calend.bintime);
695 bintime2usclock(&bt, secs, microsecs);
0c530ab8 696
5ba3f43e 697 clock_gettimeofday_set_commpage(now, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
1c79356b 698
b0d623f7 699 clock_unlock();
0c530ab8 700 splx(s);
39037602
A
701
702 if (mach_time) {
703 *mach_time = now;
704 }
1c79356b
A
705}
706
cc8bc92a
A
707static void
708update_basesleep(struct bintime delta, bool forward)
709{
710 /*
711 * Update basesleep only if the platform does not have monotonic clock.
712 * In that case the sleep time computation will use the PMU time
713 * which offset gets modified by settimeofday.
714 * We don't need this for mononic clock because in that case the sleep
715 * time computation is independent from the offset value of the PMU.
716 */
717 if (!has_monotonic_clock) {
718 if (forward)
719 bintime_add(&clock_calend.basesleep, &delta);
720 else
721 bintime_sub(&clock_calend.basesleep, &delta);
722 }
723}
724
1c79356b 725/*
0c530ab8
A
726 * clock_set_calendar_microtime:
727 *
728 * Sets the current calendar value by
729 * recalculating the epoch and offset
730 * from the system clock.
731 *
732 * Also adjusts the boottime to keep the
733 * value consistent, writes the new
734 * calendar value to the platform clock,
735 * and sends calendar change notifications.
1c79356b 736 */
0c530ab8
A
737void
738clock_set_calendar_microtime(
5ba3f43e 739 clock_sec_t secs,
b0d623f7 740 clock_usec_t microsecs)
1c79356b 741{
5ba3f43e
A
742 uint64_t absolutesys;
743 clock_sec_t newsecs;
744 clock_sec_t oldsecs;
745 clock_usec_t newmicrosecs;
39037602 746 clock_usec_t oldmicrosecs;
5ba3f43e
A
747 uint64_t commpage_value;
748 spl_t s;
749 struct bintime bt;
750 clock_sec_t deltasecs;
751 clock_usec_t deltamicrosecs;
752
753 newsecs = secs;
754 newmicrosecs = microsecs;
8ad349bb 755
5ba3f43e
A
756 /*
757 * settime_lock mtx is used to avoid that racing settimeofdays update the wall clock and
758 * the platform clock concurrently.
759 *
760 * clock_lock cannot be used for this race because it is acquired from interrupt context
761 * and it needs interrupts disabled while instead updating the platform clock needs to be
762 * called with interrupts enabled.
763 */
764 lck_mtx_lock(&settime_lock);
0c530ab8
A
765
766 s = splclock();
b0d623f7 767 clock_lock();
8ad349bb 768
cc8bc92a
A
769#if DEVELOPMENT || DEBUG
770 struct clock_calend clock_calend_cp = clock_calend;
771#endif
2d21ac55 772 commpage_disable_timestamp();
8f6c56a5 773
89b3af67 774 /*
39037602 775 * Adjust the boottime based on the delta.
89b3af67 776 */
39037602 777 clock_get_calendar_absolute_and_microtime_locked(&oldsecs, &oldmicrosecs, &absolutesys);
5ba3f43e 778
cc8bc92a
A
779#if DEVELOPMENT || DEBUG
780 if (g_should_log_clock_adjustments) {
781 os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
782 __func__, (unsigned long)oldsecs, oldmicrosecs, absolutesys);
783 os_log(OS_LOG_DEFAULT, "%s requested %lu s %d u\n",
784 __func__, (unsigned long)secs, microsecs );
785 }
786#endif
787
5ba3f43e 788 if (oldsecs < secs || (oldsecs == secs && oldmicrosecs < microsecs)) {
39037602 789 // moving forwards
5ba3f43e
A
790 deltasecs = secs;
791 deltamicrosecs = microsecs;
792
39037602 793 TIME_SUB(deltasecs, oldsecs, deltamicrosecs, oldmicrosecs, USEC_PER_SEC);
5ba3f43e 794
cc8bc92a
A
795#if DEVELOPMENT || DEBUG
796 if (g_should_log_clock_adjustments) {
797 os_log(OS_LOG_DEFAULT, "%s delta requested %lu s %d u\n",
798 __func__, (unsigned long)deltasecs, deltamicrosecs);
799 }
800#endif
801
802 TIME_ADD(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
5ba3f43e
A
803 clock2bintime(&deltasecs, &deltamicrosecs, &bt);
804 bintime_add(&clock_calend.boottime, &bt);
cc8bc92a 805 update_basesleep(bt, TRUE);
39037602
A
806 } else {
807 // moving backwards
5ba3f43e
A
808 deltasecs = oldsecs;
809 deltamicrosecs = oldmicrosecs;
810
39037602 811 TIME_SUB(deltasecs, secs, deltamicrosecs, microsecs, USEC_PER_SEC);
cc8bc92a
A
812#if DEVELOPMENT || DEBUG
813 if (g_should_log_clock_adjustments) {
814 os_log(OS_LOG_DEFAULT, "%s negative delta requested %lu s %d u\n",
815 __func__, (unsigned long)deltasecs, deltamicrosecs);
816 }
817#endif
8f6c56a5 818
cc8bc92a 819 TIME_SUB(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
5ba3f43e
A
820 clock2bintime(&deltasecs, &deltamicrosecs, &bt);
821 bintime_sub(&clock_calend.boottime, &bt);
cc8bc92a 822 update_basesleep(bt, FALSE);
5ba3f43e 823 }
21362eb3 824
5ba3f43e
A
825 clock_calend.bintime = clock_calend.boottime;
826 bintime_add(&clock_calend.bintime, &clock_calend.offset);
6d2010ae 827
5ba3f43e 828 clock2bintime((clock_sec_t *) &secs, (clock_usec_t *) &microsecs, &bt);
21362eb3 829
5ba3f43e 830 clock_gettimeofday_set_commpage(absolutesys, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
3e170ce0 831
cc8bc92a
A
832#if DEVELOPMENT || DEBUG
833 struct clock_calend clock_calend_cp1 = clock_calend;
834#endif
835
5ba3f43e 836 commpage_value = clock_boottime * USEC_PER_SEC + clock_boottime_usec;
21362eb3 837
b0d623f7 838 clock_unlock();
5ba3f43e 839 splx(s);
6601e61a 840
0c530ab8
A
841 /*
842 * Set the new value for the platform clock.
5ba3f43e 843 * This call might block, so interrupts must be enabled.
0c530ab8 844 */
cc8bc92a
A
845#if DEVELOPMENT || DEBUG
846 uint64_t now_b = mach_absolute_time();
847#endif
848
fe8ab488 849 PESetUTCTimeOfDay(newsecs, newmicrosecs);
6601e61a 850
cc8bc92a
A
851#if DEVELOPMENT || DEBUG
852 uint64_t now_a = mach_absolute_time();
853 if (g_should_log_clock_adjustments) {
854 os_log(OS_LOG_DEFAULT, "%s mach bef PESet %llu mach aft %llu \n", __func__, now_b, now_a);
855 }
856#endif
857
858 print_all_clock_variables_internal(__func__, &clock_calend_cp);
859 print_all_clock_variables_internal(__func__, &clock_calend_cp1);
860
39037602
A
861 commpage_update_boottime(commpage_value);
862
0c530ab8
A
863 /*
864 * Send host notifications.
865 */
866 host_notify_calendar_change();
39037602
A
867 host_notify_calendar_set();
868
2d21ac55
A
869#if CONFIG_DTRACE
870 clock_track_calend_nowait();
871#endif
5ba3f43e
A
872
873 lck_mtx_unlock(&settime_lock);
874}
875
876uint64_t mach_absolutetime_asleep = 0;
877uint64_t mach_absolutetime_last_sleep = 0;
878
879void
880clock_get_calendar_uptime(clock_sec_t *secs)
881{
882 uint64_t now;
883 spl_t s;
884 struct bintime bt;
885
886 s = splclock();
887 clock_lock();
888
889 now = mach_absolute_time();
890
891 bt = get_scaled_time(now);
892 bintime_add(&bt, &clock_calend.offset);
893
894 *secs = bt.sec;
895
896 clock_unlock();
897 splx(s);
898}
899
900
901/*
902 * clock_update_calendar:
903 *
904 * called by ntp timer to update scale factors.
905 */
906void
907clock_update_calendar(void)
908{
909
910 uint64_t now, delta;
911 struct bintime bt;
912 spl_t s;
913 int64_t adjustment;
914
915 s = splclock();
916 clock_lock();
917
918 now = mach_absolute_time();
919
920 /*
921 * scale the time elapsed since the last update and
922 * add it to offset.
923 */
924 bt = get_scaled_time(now);
925 bintime_add(&clock_calend.offset, &bt);
926
927 /*
928 * update the base from which apply next scale factors.
929 */
930 delta = now - clock_calend.offset_count;
931 clock_calend.offset_count += delta;
932
933 clock_calend.bintime = clock_calend.offset;
934 bintime_add(&clock_calend.bintime, &clock_calend.boottime);
935
936 /*
937 * recompute next adjustment.
938 */
939 ntp_update_second(&adjustment, clock_calend.bintime.sec);
940
cc8bc92a
A
941#if DEVELOPMENT || DEBUG
942 if (g_should_log_clock_adjustments) {
943 os_log(OS_LOG_DEFAULT, "%s adjustment %lld\n", __func__, adjustment);
944 }
945#endif
946
5ba3f43e
A
947 /*
948 * recomputing scale factors.
949 */
950 get_scale_factors_from_adj(adjustment, &clock_calend.tick_scale_x, &clock_calend.s_scale_ns, &clock_calend.s_adj_nsx);
951
952 clock_gettimeofday_set_commpage(now, clock_calend.bintime.sec, clock_calend.bintime.frac, clock_calend.tick_scale_x, ticks_per_sec);
953
cc8bc92a
A
954#if DEVELOPMENT || DEBUG
955 struct clock_calend calend_cp = clock_calend;
956#endif
957
5ba3f43e
A
958 clock_unlock();
959 splx(s);
cc8bc92a
A
960
961 print_all_clock_variables(__func__, NULL,NULL,NULL,NULL, &calend_cp);
1c79356b
A
962}
963
cc8bc92a
A
964
965#if DEVELOPMENT || DEBUG
966
967void print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp)
968{
969 clock_sec_t offset_secs;
970 clock_usec_t offset_microsecs;
971 clock_sec_t bintime_secs;
972 clock_usec_t bintime_microsecs;
973 clock_sec_t bootime_secs;
974 clock_usec_t bootime_microsecs;
975
976 if (!g_should_log_clock_adjustments)
977 return;
978
979 bintime2usclock(&clock_calend_cp->offset, &offset_secs, &offset_microsecs);
980 bintime2usclock(&clock_calend_cp->bintime, &bintime_secs, &bintime_microsecs);
981 bintime2usclock(&clock_calend_cp->boottime, &bootime_secs, &bootime_microsecs);
982
983 os_log(OS_LOG_DEFAULT, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n",
984 func , clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx,
985 clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count);
986 os_log(OS_LOG_DEFAULT, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n",
987 func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac,
988 (unsigned long)offset_secs, offset_microsecs);
989 os_log(OS_LOG_DEFAULT, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n",
990 func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac,
991 (unsigned long)bintime_secs, bintime_microsecs);
992 os_log(OS_LOG_DEFAULT, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n",
993 func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac,
994 (unsigned long)bootime_secs, bootime_microsecs);
995
996 clock_sec_t basesleep_secs;
997 clock_usec_t basesleep_microsecs;
998
999 bintime2usclock(&clock_calend_cp->basesleep, &basesleep_secs, &basesleep_microsecs);
1000 os_log(OS_LOG_DEFAULT, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n",
1001 func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac,
1002 (unsigned long)basesleep_secs, basesleep_microsecs);
1003
1004}
1005
1006
1007void print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp)
1008{
1009 if (!g_should_log_clock_adjustments)
1010 return;
1011
1012 struct bintime bt;
1013 clock_sec_t wall_secs;
1014 clock_usec_t wall_microsecs;
1015 uint64_t now;
1016 uint64_t delta;
1017
1018 if (pmu_secs) {
1019 os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec);
1020 }
1021 if (sys_secs) {
1022 os_log(OS_LOG_DEFAULT, "%s sys %lu s %d u \n", func, (unsigned long)*sys_secs, *sys_usec);
1023 }
1024
1025 print_all_clock_variables_internal(func, clock_calend_cp);
1026
1027 now = mach_absolute_time();
1028 delta = now - clock_calend_cp->offset_count;
1029
1030 bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx);
1031 bintime_add(&bt, &clock_calend_cp->bintime);
1032 bintime2usclock(&bt, &wall_secs, &wall_microsecs);
1033
1034 os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
1035 func, (unsigned long)wall_secs, wall_microsecs, now);
1036}
1037
1038
1039#endif /* DEVELOPMENT || DEBUG */
1040
1041
1c79356b 1042/*
0c530ab8
A
1043 * clock_initialize_calendar:
1044 *
1045 * Set the calendar and related clocks
cc8bc92a 1046 * from the platform clock at boot.
0c530ab8
A
1047 *
1048 * Also sends host notifications.
1c79356b
A
1049 */
1050void
0c530ab8 1051clock_initialize_calendar(void)
1c79356b 1052{
5ba3f43e
A
1053 clock_sec_t sys; // sleepless time since boot in seconds
1054 clock_sec_t secs; // Current UTC time
1055 clock_sec_t utc_offset_secs; // Difference in current UTC time and sleepless time since boot
39037602
A
1056 clock_usec_t microsys;
1057 clock_usec_t microsecs;
1058 clock_usec_t utc_offset_microsecs;
5ba3f43e
A
1059 spl_t s;
1060 struct bintime bt;
cc8bc92a
A
1061 struct bintime monotonic_bt;
1062 struct latched_time monotonic_time;
1063 uint64_t monotonic_usec_total;
1064 clock_sec_t sys2, monotonic_sec;
1065 clock_usec_t microsys2, monotonic_usec;
1066 size_t size;
1067
1068 //Get PMU time with offset and corresponding sys time
39037602 1069 PEGetUTCTimeOfDay(&secs, &microsecs);
cc8bc92a
A
1070 clock_get_system_microtime(&sys, &microsys);
1071
1072 /*
1073 * If the platform has a monotonic clock, use kern.monotonicclock_usecs
1074 * to estimate the sleep/wake time, otherwise use the PMU and adjustments
1075 * provided through settimeofday to estimate the sleep time.
1076 * NOTE: the latter case relies that the kernel is the only component
1077 * to set the PMU offset.
1078 */
1079 size = sizeof(monotonic_time);
1080 if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
1081 has_monotonic_clock = 0;
1082 os_log(OS_LOG_DEFAULT, "%s system does not have monotonic clock.\n", __func__);
1083 } else {
1084 has_monotonic_clock = 1;
1085 monotonic_usec_total = monotonic_time.monotonic_time_usec;
1086 absolutetime_to_microtime(monotonic_time.mach_time, &sys2, &microsys2);
1087 os_log(OS_LOG_DEFAULT, "%s system has monotonic clock.\n", __func__);
1088 }
fe8ab488 1089
0c530ab8 1090 s = splclock();
b0d623f7 1091 clock_lock();
1c79356b 1092
2d21ac55 1093 commpage_disable_timestamp();
1c79356b 1094
5ba3f43e
A
1095 utc_offset_secs = secs;
1096 utc_offset_microsecs = microsecs;
1097
1098#if DEVELOPMENT || DEBUG
1099 last_utc_sec = secs;
1100 last_utc_usec = microsecs;
1101 last_sys_sec = sys;
1102 last_sys_usec = microsys;
1103 if (secs > max_utc_sec)
1104 max_utc_sec = secs;
1105#endif
39037602 1106
5ba3f43e
A
1107 /*
1108 * We normally expect the UTC clock to be always-on and produce
1109 * greater readings than the tick counter. There may be corner cases
1110 * due to differing clock resolutions (UTC clock is likely lower) and
1111 * and errors reading the UTC clock (some implementations return 0
1112 * on error) in which that doesn't hold true. Bring the UTC measurements
1113 * in-line with the tick counter measurements as a best effort in that case.
1114 */
cc8bc92a 1115 //FIXME if the current time is prior than 1970 secs will be negative
5ba3f43e 1116 if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
cc8bc92a
A
1117 os_log(OS_LOG_DEFAULT, "%s WARNING: PMU offset is less then sys PMU %lu s %d u sys %lu s %d u\n",
1118 __func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys);
5ba3f43e
A
1119 secs = utc_offset_secs = sys;
1120 microsecs = utc_offset_microsecs = microsys;
1121 }
1c79356b 1122
cc8bc92a 1123 // PMU time with offset - sys
5ba3f43e
A
1124 // This macro stores the subtraction result in utc_offset_secs and utc_offset_microsecs
1125 TIME_SUB(utc_offset_secs, sys, utc_offset_microsecs, microsys, USEC_PER_SEC);
3e170ce0 1126
5ba3f43e 1127 clock2bintime(&utc_offset_secs, &utc_offset_microsecs, &bt);
6d2010ae 1128
5ba3f43e
A
1129 /*
1130 * Initialize the boot time based on the platform clock.
1131 */
1132 clock_boottime = secs;
1133 clock_boottime_usec = microsecs;
1134 commpage_update_boottime(clock_boottime * USEC_PER_SEC + clock_boottime_usec);
1c79356b 1135
5ba3f43e
A
1136 nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
1137 clock_calend.boottime = bt;
1138 clock_calend.bintime = bt;
1139 clock_calend.offset.sec = 0;
1140 clock_calend.offset.frac = 0;
3e170ce0 1141
5ba3f43e
A
1142 clock_calend.tick_scale_x = (uint64_t)1 << 63;
1143 clock_calend.tick_scale_x /= ticks_per_sec;
1144 clock_calend.tick_scale_x *= 2;
39037602 1145
5ba3f43e
A
1146 clock_calend.s_scale_ns = NSEC_PER_SEC;
1147 clock_calend.s_adj_nsx = 0;
3e170ce0 1148
cc8bc92a
A
1149 if (has_monotonic_clock) {
1150
1151 monotonic_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
1152 monotonic_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
1c79356b 1153
cc8bc92a
A
1154 // PMU time without offset - sys
1155 // This macro stores the subtraction result in monotonic_sec and monotonic_usec
1156 TIME_SUB(monotonic_sec, sys2, monotonic_usec, microsys2, USEC_PER_SEC);
1157 clock2bintime(&monotonic_sec, &monotonic_usec, &monotonic_bt);
1158
1159 // set the baseleep as the difference between monotonic clock - sys
1160 clock_calend.basesleep = monotonic_bt;
1161 } else {
1162 // set the baseleep as the difference between PMU clock - sys
1163 clock_calend.basesleep = bt;
1164 }
39037602 1165 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
39037602 1166
cc8bc92a
A
1167#if DEVELOPMENT || DEBUG
1168 struct clock_calend clock_calend_cp = clock_calend;
1169#endif
1170
b0d623f7 1171 clock_unlock();
0c530ab8
A
1172 splx(s);
1173
cc8bc92a
A
1174 print_all_clock_variables(__func__, &secs, &microsecs, &sys, &microsys, &clock_calend_cp);
1175
1c79356b 1176 /*
0c530ab8 1177 * Send host notifications.
1c79356b 1178 */
0c530ab8 1179 host_notify_calendar_change();
2d21ac55
A
1180
1181#if CONFIG_DTRACE
1182 clock_track_calend_nowait();
1183#endif
1c79356b
A
1184}
1185
5ba3f43e 1186
0c530ab8 1187void
5ba3f43e 1188clock_wakeup_calendar(void)
1c79356b 1189{
cc8bc92a
A
1190 clock_sec_t sys;
1191 clock_sec_t secs;
1192 clock_usec_t microsys;
1193 clock_usec_t microsecs;
5ba3f43e 1194 spl_t s;
cc8bc92a
A
1195 struct bintime bt, last_sleep_bt;
1196 clock_sec_t basesleep_s, last_sleep_sec;
1197 clock_usec_t basesleep_us, last_sleep_usec;
1198 struct latched_time monotonic_time;
1199 uint64_t monotonic_usec_total;
1200 size_t size;
1201 clock_sec_t secs_copy;
1202 clock_usec_t microsecs_copy;
1203#if DEVELOPMENT || DEBUG
1204 clock_sec_t utc_sec;
1205 clock_usec_t utc_usec;
1206 PEGetUTCTimeOfDay(&utc_sec, &utc_usec);
1207#endif
5ba3f43e 1208
cc8bc92a
A
1209 /*
1210 * If the platform has the monotonic clock use that to
1211 * compute the sleep time. The monotonic clock does not have an offset
1212 * that can be modified, so nor kernel or userspace can change the time
1213 * of this clock, it can only monotonically increase over time.
1214 * During sleep mach_absolute_time does not tick,
1215 * so the sleep time is the difference betwen the current monotonic time
1216 * less the absolute time and the previous difference stored at wake time.
1217 *
1218 * basesleep = monotonic - sys ---> computed at last wake
1219 * sleep_time = (monotonic - sys) - basesleep
1220 *
1221 * If the platform does not support monotonic time we use the PMU time
1222 * to compute the last sleep.
1223 * The PMU time is the monotonic clock + an offset that can be set
1224 * by kernel.
1225 *
1226 * IMPORTANT:
1227 * We assume that only the kernel is setting the offset of the PMU and that
1228 * it is doing it only througth the settimeofday interface.
1229 *
1230 * basesleep is the different between the PMU time and the mach_absolute_time
1231 * at wake.
1232 * During awake time settimeofday can change the PMU offset by a delta,
1233 * and basesleep is shifted by the same delta applyed to the PMU. So the sleep
1234 * time computation becomes:
1235 *
1236 * PMU = monotonic + PMU_offset
1237 * basesleep = PMU - sys ---> computed at last wake
1238 * basesleep += settimeofday_delta
1239 * PMU_offset += settimeofday_delta
1240 * sleep_time = (PMU - sys) - basesleep
1241 */
1242 if (has_monotonic_clock) {
1243 //Get monotonic time with corresponding sys time
1244 size = sizeof(monotonic_time);
1245 if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
1246 panic("%s: could not call kern.monotonicclock_usecs", __func__);
1247 }
1248 monotonic_usec_total = monotonic_time.monotonic_time_usec;
1249 absolutetime_to_microtime(monotonic_time.mach_time, &sys, &microsys);
1250
1251 secs = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
1252 microsecs = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
1253 } else {
1254 //Get PMU time with offset and corresponding sys time
1255 PEGetUTCTimeOfDay(&secs, &microsecs);
1256 clock_get_system_microtime(&sys, &microsys);
1257
1258 }
b0d623f7
A
1259
1260 s = splclock();
1261 clock_lock();
cc8bc92a 1262
5ba3f43e
A
1263 commpage_disable_timestamp();
1264
cc8bc92a
A
1265 secs_copy = secs;
1266 microsecs_copy = microsecs;
1267
1268#if DEVELOPMENT || DEBUG
1269 struct clock_calend clock_calend_cp1 = clock_calend;
1270#endif /* DEVELOPMENT || DEBUG */
5ba3f43e
A
1271
1272#if DEVELOPMENT || DEBUG
1273 last_utc_sec = secs;
1274 last_utc_usec = microsecs;
1275 last_sys_sec = sys;
1276 last_sys_usec = microsys;
1277 if (secs > max_utc_sec)
1278 max_utc_sec = secs;
1279#endif
5ba3f43e
A
1280 /*
1281 * We normally expect the UTC clock to be always-on and produce
1282 * greater readings than the tick counter. There may be corner cases
1283 * due to differing clock resolutions (UTC clock is likely lower) and
cc8bc92a
A
1284 * and errors reading the UTC clock (some implementations return 0
1285 * on error) in which that doesn't hold true. Bring the UTC measurements
1286 * in-line with the tick counter measurements as a best effort in that case.
5ba3f43e 1287 */
cc8bc92a 1288 //FIXME if the current time is prior than 1970 secs will be negative
5ba3f43e 1289 if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
cc8bc92a
A
1290 os_log(OS_LOG_DEFAULT, "%s WARNING: %s is less then sys %s %lu s %d u sys %lu s %d u\n",
1291 __func__, (has_monotonic_clock)?"monotonic":"PMU", (has_monotonic_clock)?"monotonic":"PMU", (unsigned long)secs, microsecs, (unsigned long)sys, microsys);
5ba3f43e
A
1292 secs = sys;
1293 microsecs = microsys;
1294 }
1295
cc8bc92a 1296 // PMU or monotonic - sys
5ba3f43e
A
1297 // This macro stores the subtraction result in secs and microsecs
1298 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
cc8bc92a 1299 clock2bintime(&secs, &microsecs, &bt);
5ba3f43e
A
1300
1301 /*
1302 * Safety belt: the UTC clock will likely have a lower resolution than the tick counter.
1303 * It's also possible that the device didn't fully transition to the powered-off state on
1304 * the most recent sleep, so the tick counter may not have reset or may have only briefly
1305 * tured off. In that case it's possible for the difference between the UTC clock and the
1306 * tick counter to be less than the previously recorded value in clock.calend.basesleep.
1307 * In that case simply record that we slept for 0 ticks.
1308 */
cc8bc92a
A
1309 if ((bt.sec > clock_calend.basesleep.sec) ||
1310 ((bt.sec == clock_calend.basesleep.sec) && (bt.frac > clock_calend.basesleep.frac))) {
5ba3f43e 1311
cc8bc92a
A
1312 //last_sleep is the difference between current PMU or monotonic - abs and last wake PMU or monotonic - abs
1313 last_sleep_bt = bt;
5ba3f43e 1314 bintime_sub(&last_sleep_bt, &clock_calend.basesleep);
5ba3f43e 1315
cc8bc92a
A
1316 //set baseseep to current PMU or monotonic - abs
1317 clock_calend.basesleep = bt;
1318 bintime2usclock(&last_sleep_bt, &last_sleep_sec, &last_sleep_usec);
5ba3f43e
A
1319 bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
1320 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
1321
1322 bintime_add(&clock_calend.offset, &last_sleep_bt);
1323 bintime_add(&clock_calend.bintime, &last_sleep_bt);
cc8bc92a
A
1324
1325 } else{
5ba3f43e 1326 mach_absolutetime_last_sleep = 0;
cc8bc92a
A
1327 last_sleep_sec = last_sleep_usec = 0;
1328 bintime2usclock(&clock_calend.basesleep, &basesleep_s, &basesleep_us);
1329 os_log(OS_LOG_DEFAULT, "%s WARNING: basesleep (%lu s %d u) > %s-sys (%lu s %d u) \n",
1330 __func__, (unsigned long) basesleep_s, basesleep_us, (has_monotonic_clock)?"monotonic":"PMU", (unsigned long) secs_copy, microsecs_copy );
1331 }
5ba3f43e
A
1332
1333 KERNEL_DEBUG_CONSTANT(
1334 MACHDBG_CODE(DBG_MACH_CLOCK,MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
1335 (uintptr_t) mach_absolutetime_last_sleep,
1336 (uintptr_t) mach_absolutetime_asleep,
1337 (uintptr_t) (mach_absolutetime_last_sleep >> 32),
1338 (uintptr_t) (mach_absolutetime_asleep >> 32),
1339 0);
1340
1341 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
1342 adjust_cont_time_thread_calls();
39037602 1343
cc8bc92a
A
1344#if DEVELOPMENT || DEBUG
1345 struct clock_calend clock_calend_cp = clock_calend;
1346#endif
1347
39037602
A
1348 clock_unlock();
1349 splx(s);
5ba3f43e 1350
cc8bc92a
A
1351#if DEVELOPMENT || DEBUG
1352 if (g_should_log_clock_adjustments) {
1353 os_log(OS_LOG_DEFAULT, "PMU was %lu s %d u\n",(unsigned long) utc_sec, utc_usec);
1354 os_log(OS_LOG_DEFAULT, "last sleep was %lu s %d u\n",(unsigned long) last_sleep_sec, last_sleep_usec);
1355 print_all_clock_variables("clock_wakeup_calendar:BEFORE",
1356 &secs_copy, &microsecs_copy, &sys, &microsys, &clock_calend_cp1);
1357 print_all_clock_variables("clock_wakeup_calendar:AFTER", NULL, NULL, NULL, NULL, &clock_calend_cp);
1358 }
1359#endif /* DEVELOPMENT || DEBUG */
1360
5ba3f43e
A
1361 host_notify_calendar_change();
1362
1363#if CONFIG_DTRACE
1364 clock_track_calend_nowait();
1365#endif
39037602
A
1366}
1367
5ba3f43e 1368
39037602
A
1369/*
1370 * clock_get_boottime_nanotime:
1371 *
1372 * Return the boottime, used by sysctl.
1373 */
1374void
5ba3f43e 1375clock_get_boottime_nanotime(
39037602 1376 clock_sec_t *secs,
5ba3f43e 1377 clock_nsec_t *nanosecs)
39037602
A
1378{
1379 spl_t s;
1380
1381 s = splclock();
1382 clock_lock();
1383
1384 *secs = (clock_sec_t)clock_boottime;
5ba3f43e 1385 *nanosecs = (clock_nsec_t)clock_boottime_usec * NSEC_PER_USEC;
b0d623f7
A
1386
1387 clock_unlock();
1388 splx(s);
1c79356b
A
1389}
1390
1391/*
5ba3f43e 1392 * clock_get_boottime_nanotime:
0c530ab8 1393 *
5ba3f43e 1394 * Return the boottime, used by sysctl.
6601e61a 1395 */
1c79356b 1396void
5ba3f43e
A
1397clock_get_boottime_microtime(
1398 clock_sec_t *secs,
1399 clock_usec_t *microsecs)
1c79356b 1400{
5ba3f43e 1401 spl_t s;
1c79356b 1402
0c530ab8 1403 s = splclock();
b0d623f7 1404 clock_lock();
1c79356b 1405
5ba3f43e
A
1406 *secs = (clock_sec_t)clock_boottime;
1407 *microsecs = (clock_nsec_t)clock_boottime_usec;
0c530ab8 1408
b0d623f7 1409 clock_unlock();
0c530ab8 1410 splx(s);
1c79356b
A
1411}
1412
0c530ab8 1413
0c530ab8
A
1414/*
1415 * Wait / delay routines.
1416 */
91447636
A
1417static void
1418mach_wait_until_continue(
1419 __unused void *parameter,
1420 wait_result_t wresult)
1421{
1422 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
1423 /*NOTREACHED*/
1424}
1425
316670eb
A
1426/*
1427 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
1428 *
1429 * Parameters: args->deadline Amount of time to wait
1430 *
1431 * Returns: 0 Success
1432 * !0 Not success
1433 *
1434 */
1c79356b 1435kern_return_t
91447636
A
1436mach_wait_until_trap(
1437 struct mach_wait_until_trap_args *args)
1438{
1439 uint64_t deadline = args->deadline;
1440 wait_result_t wresult;
1441
39236c6e
A
1442 wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
1443 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
91447636
A
1444 if (wresult == THREAD_WAITING)
1445 wresult = thread_block(mach_wait_until_continue);
1446
1447 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
1448}
1449
91447636
A
1450void
1451clock_delay_until(
1c79356b
A
1452 uint64_t deadline)
1453{
91447636
A
1454 uint64_t now = mach_absolute_time();
1455
1456 if (now >= deadline)
1457 return;
1c79356b 1458
316670eb
A
1459 _clock_delay_until_deadline(deadline - now, deadline);
1460}
1461
1462/*
1463 * Preserve the original precise interval that the client
1464 * requested for comparison to the spin threshold.
1465 */
1466void
1467_clock_delay_until_deadline(
1468 uint64_t interval,
1469 uint64_t deadline)
1470{
3e170ce0
A
1471 _clock_delay_until_deadline_with_leeway(interval, deadline, 0);
1472}
1473
1474/*
1475 * Like _clock_delay_until_deadline, but it accepts a
1476 * leeway value.
1477 */
1478void
1479_clock_delay_until_deadline_with_leeway(
1480 uint64_t interval,
1481 uint64_t deadline,
1482 uint64_t leeway)
1483{
316670eb
A
1484
1485 if (interval == 0)
1486 return;
1487
1488 if ( ml_delay_should_spin(interval) ||
91447636 1489 get_preemption_level() != 0 ||
316670eb 1490 ml_get_interrupts_enabled() == FALSE ) {
bd504ef0 1491 machine_delay_until(interval, deadline);
316670eb 1492 } else {
3e170ce0
A
1493 /*
1494 * For now, assume a leeway request of 0 means the client does not want a leeway
1495 * value. We may want to change this interpretation in the future.
1496 */
1497
1498 if (leeway) {
1499 assert_wait_deadline_with_leeway((event_t)clock_delay_until, THREAD_UNINT, TIMEOUT_URGENCY_LEEWAY, deadline, leeway);
1500 } else {
1501 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
1502 }
91447636
A
1503
1504 thread_block(THREAD_CONTINUE_NULL);
9bccf70c 1505 }
91447636 1506}
1c79356b 1507
91447636
A
1508void
1509delay_for_interval(
1510 uint32_t interval,
1511 uint32_t scale_factor)
1512{
316670eb 1513 uint64_t abstime;
91447636 1514
316670eb 1515 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
91447636 1516
316670eb 1517 _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
91447636
A
1518}
1519
3e170ce0
A
1520void
1521delay_for_interval_with_leeway(
1522 uint32_t interval,
1523 uint32_t leeway,
1524 uint32_t scale_factor)
1525{
1526 uint64_t abstime_interval;
1527 uint64_t abstime_leeway;
1528
1529 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
1530 clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
1531
1532 _clock_delay_until_deadline_with_leeway(abstime_interval, mach_absolute_time() + abstime_interval, abstime_leeway);
1533}
1534
91447636
A
1535void
1536delay(
1537 int usec)
1538{
1539 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1c79356b 1540}
9bccf70c 1541
0c530ab8
A
1542/*
1543 * Miscellaneous routines.
1544 */
55e303ae 1545void
0c530ab8
A
1546clock_interval_to_deadline(
1547 uint32_t interval,
1548 uint32_t scale_factor,
1549 uint64_t *result)
9bccf70c 1550{
0c530ab8 1551 uint64_t abstime;
c0fea474 1552
0c530ab8 1553 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
6601e61a 1554
0c530ab8 1555 *result = mach_absolute_time() + abstime;
8f6c56a5 1556}
5d5c5d0d 1557
0c530ab8
A
1558void
1559clock_absolutetime_interval_to_deadline(
1560 uint64_t abstime,
1561 uint64_t *result)
8f6c56a5 1562{
0c530ab8 1563 *result = mach_absolute_time() + abstime;
21362eb3 1564}
89b3af67 1565
39037602
A
1566void
1567clock_continuoustime_interval_to_deadline(
1568 uint64_t conttime,
1569 uint64_t *result)
1570{
1571 *result = mach_continuous_time() + conttime;
1572}
1573
4452a7af 1574void
0c530ab8
A
1575clock_get_uptime(
1576 uint64_t *result)
21362eb3 1577{
0c530ab8 1578 *result = mach_absolute_time();
6601e61a 1579}
4452a7af 1580
0c530ab8
A
1581void
1582clock_deadline_for_periodic_event(
1583 uint64_t interval,
1584 uint64_t abstime,
1585 uint64_t *deadline)
6601e61a 1586{
0c530ab8
A
1587 assert(interval != 0);
1588
1589 *deadline += interval;
1590
1591 if (*deadline <= abstime) {
1592 *deadline = abstime + interval;
1593 abstime = mach_absolute_time();
55e303ae 1594
0c530ab8
A
1595 if (*deadline <= abstime)
1596 *deadline = abstime + interval;
1597 }
55e303ae 1598}
2d21ac55 1599
39037602
A
1600uint64_t
1601mach_continuous_time(void)
1602{
1603 while(1) {
1604 uint64_t read1 = mach_absolutetime_asleep;
1605 uint64_t absolute = mach_absolute_time();
1606 OSMemoryBarrier();
1607 uint64_t read2 = mach_absolutetime_asleep;
1608
1609 if(__builtin_expect(read1 == read2, 1)) {
1610 return absolute + read1;
1611 }
1612 }
1613}
1614
1615uint64_t
1616mach_continuous_approximate_time(void)
1617{
1618 while(1) {
1619 uint64_t read1 = mach_absolutetime_asleep;
1620 uint64_t absolute = mach_approximate_time();
1621 OSMemoryBarrier();
1622 uint64_t read2 = mach_absolutetime_asleep;
1623
1624 if(__builtin_expect(read1 == read2, 1)) {
1625 return absolute + read1;
1626 }
1627 }
1628}
1629
1630/*
1631 * continuoustime_to_absolutetime
1632 * Must be called with interrupts disabled
1633 * Returned value is only valid until the next update to
1634 * mach_continuous_time
1635 */
1636uint64_t
1637continuoustime_to_absolutetime(uint64_t conttime) {
1638 if (conttime <= mach_absolutetime_asleep)
1639 return 0;
1640 else
1641 return conttime - mach_absolutetime_asleep;
1642}
1643
1644/*
1645 * absolutetime_to_continuoustime
1646 * Must be called with interrupts disabled
1647 * Returned value is only valid until the next update to
1648 * mach_continuous_time
1649 */
1650uint64_t
1651absolutetime_to_continuoustime(uint64_t abstime) {
1652 return abstime + mach_absolutetime_asleep;
1653}
1654
b0d623f7 1655#if CONFIG_DTRACE
2d21ac55
A
1656
1657/*
1658 * clock_get_calendar_nanotime_nowait
1659 *
1660 * Description: Non-blocking version of clock_get_calendar_nanotime()
1661 *
1662 * Notes: This function operates by separately tracking calendar time
1663 * updates using a two element structure to copy the calendar
1664 * state, which may be asynchronously modified. It utilizes
1665 * barrier instructions in the tracking process and in the local
1666 * stable snapshot process in order to ensure that a consistent
1667 * snapshot is used to perform the calculation.
1668 */
1669void
1670clock_get_calendar_nanotime_nowait(
b0d623f7
A
1671 clock_sec_t *secs,
1672 clock_nsec_t *nanosecs)
2d21ac55
A
1673{
1674 int i = 0;
1675 uint64_t now;
1676 struct unlocked_clock_calend stable;
5ba3f43e 1677 struct bintime bt;
2d21ac55
A
1678
1679 for (;;) {
1680 stable = flipflop[i]; /* take snapshot */
1681
1682 /*
1683 * Use a barrier instructions to ensure atomicity. We AND
1684 * off the "in progress" bit to get the current generation
1685 * count.
1686 */
1687 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
1688
1689 /*
1690 * If an update _is_ in progress, the generation count will be
1691 * off by one, if it _was_ in progress, it will be off by two,
1692 * and if we caught it at a good time, it will be equal (and
1693 * our snapshot is threfore stable).
1694 */
1695 if (flipflop[i].gen == stable.gen)
1696 break;
1697
5ba3f43e 1698 /* Switch to the other element of the flipflop, and try again. */
2d21ac55
A
1699 i ^= 1;
1700 }
1701
1702 now = mach_absolute_time();
1703
5ba3f43e 1704 bt = get_scaled_time(now);
2d21ac55 1705
5ba3f43e 1706 bintime_add(&bt, &clock_calend.bintime);
2d21ac55 1707
5ba3f43e 1708 bintime2nsclock(&bt, secs, nanosecs);
2d21ac55
A
1709}
1710
1711static void
1712clock_track_calend_nowait(void)
1713{
1714 int i;
1715
1716 for (i = 0; i < 2; i++) {
1717 struct clock_calend tmp = clock_calend;
1718
1719 /*
1720 * Set the low bit if the generation count; since we use a
1721 * barrier instruction to do this, we are guaranteed that this
1722 * will flag an update in progress to an async caller trying
1723 * to examine the contents.
1724 */
1725 (void)hw_atomic_or(&flipflop[i].gen, 1);
1726
1727 flipflop[i].calend = tmp;
1728
1729 /*
1730 * Increment the generation count to clear the low bit to
1731 * signal completion. If a caller compares the generation
1732 * count after taking a copy while in progress, the count
1733 * will be off by two.
1734 */
1735 (void)hw_atomic_add(&flipflop[i].gen, 1);
1736 }
1737}
b0d623f7
A
1738
1739#endif /* CONFIG_DTRACE */
fe8ab488 1740