]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/clock.c
xnu-1699.24.8.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
1c79356b
A
32 */
33
91447636 34#include <mach/mach_types.h>
91447636 35
1c79356b 36#include <kern/lock.h>
1c79356b 37#include <kern/spl.h>
55e303ae 38#include <kern/sched_prim.h>
1c79356b 39#include <kern/thread.h>
1c79356b 40#include <kern/clock.h>
0c530ab8
A
41#include <kern/host_notify.h>
42
43#include <IOKit/IOPlatformExpert.h>
c0fea474 44
0c530ab8 45#include <machine/commpage.h>
1c79356b 46
91447636 47#include <mach/mach_traps.h>
1c79356b
A
48#include <mach/mach_time.h>
49
2d21ac55
A
50uint32_t hz_tick_interval = 1;
51
2d21ac55 52
6d2010ae 53decl_simple_lock_data(,clock_lock)
91447636 54
b0d623f7
A
55#define clock_lock() \
56 simple_lock(&clock_lock)
57
58#define clock_unlock() \
59 simple_unlock(&clock_lock)
60
61#define clock_lock_init() \
62 simple_lock_init(&clock_lock, 0)
63
64
1c79356b 65/*
0c530ab8
A
66 * Time of day (calendar) variables.
67 *
68 * Algorithm:
69 *
70 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
71 *
72 * where CONV converts absolute time units into seconds and a fraction.
1c79356b 73 */
0c530ab8 74static struct clock_calend {
2d21ac55
A
75 uint64_t epoch;
76 uint64_t offset;
b0d623f7 77
2d21ac55
A
78 int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
79 uint64_t adjstart; /* Absolute time value for start of this adjustment period */
80 uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */
2d21ac55
A
81} clock_calend;
82
b0d623f7
A
83#if CONFIG_DTRACE
84
2d21ac55
A
85/*
86 * Unlocked calendar flipflop; this is used to track a clock_calend such
87 * that we can safely access a snapshot of a valid clock_calend structure
88 * without needing to take any locks to do it.
89 *
90 * The trick is to use a generation count and set the low bit when it is
91 * being updated/read; by doing this, we guarantee, through use of the
92 * hw_atomic functions, that the generation is incremented when the bit
93 * is cleared atomically (by using a 1 bit add).
94 */
95static struct unlocked_clock_calend {
96 struct clock_calend calend; /* copy of calendar */
97 uint32_t gen; /* generation count */
98} flipflop[ 2];
b0d623f7
A
99
100static void clock_track_calend_nowait(void);
101
2d21ac55 102#endif
1c79356b 103
0c530ab8
A
104/*
105 * Calendar adjustment variables and values.
106 */
107#define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
108#define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
109#define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
110
b0d623f7
A
111static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
112static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
113static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
114
115static timer_call_data_t calend_adjcall;
116static uint32_t calend_adjactive;
117
0c530ab8 118static uint32_t calend_set_adjustment(
b0d623f7
A
119 long *secs,
120 int *microsecs);
0c530ab8
A
121
122static void calend_adjust_call(void);
123static uint32_t calend_adjust(void);
9bccf70c 124
55e303ae
A
125static thread_call_data_t calend_wakecall;
126
0c530ab8 127extern void IOKitResetTime(void);
5d5c5d0d 128
0c530ab8 129static uint64_t clock_boottime; /* Seconds boottime epoch */
4452a7af 130
0c530ab8
A
131#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
132MACRO_BEGIN \
133 if (((rfrac) += (frac)) >= (unit)) { \
134 (rfrac) -= (unit); \
135 (rsecs) += 1; \
136 } \
137 (rsecs) += (secs); \
138MACRO_END
139
140#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
141MACRO_BEGIN \
b0d623f7 142 if ((int)((rfrac) -= (frac)) < 0) { \
0c530ab8
A
143 (rfrac) += (unit); \
144 (rsecs) -= 1; \
145 } \
146 (rsecs) -= (secs); \
147MACRO_END
1c79356b
A
148
149/*
91447636
A
150 * clock_config:
151 *
152 * Called once at boot to configure the clock subsystem.
1c79356b
A
153 */
154void
155clock_config(void)
156{
b0d623f7 157 clock_lock_init();
8f6c56a5 158
b0d623f7 159 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
0c530ab8 160 thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
6601e61a 161
0c530ab8 162 clock_oldconfig();
1c79356b
A
163}
164
165/*
91447636
A
166 * clock_init:
167 *
168 * Called on a processor each time started.
1c79356b
A
169 */
170void
171clock_init(void)
172{
0c530ab8 173 clock_oldinit();
1c79356b
A
174}
175
55e303ae 176/*
0c530ab8
A
177 * clock_timebase_init:
178 *
179 * Called by machine dependent code
180 * to initialize areas dependent on the
181 * timebase value. May be called multiple
182 * times during start up.
55e303ae
A
183 */
184void
185clock_timebase_init(void)
186{
0c530ab8 187 uint64_t abstime;
5d5c5d0d 188
0c530ab8 189 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
b0d623f7 190 calend_adjinterval = (uint32_t)abstime;
2d21ac55
A
191
192 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
b0d623f7 193 hz_tick_interval = (uint32_t)abstime;
89b3af67 194
0c530ab8 195 sched_timebase_init();
8ad349bb 196}
c0fea474 197
8ad349bb 198/*
0c530ab8
A
199 * mach_timebase_info_trap:
200 *
201 * User trap returns timebase constant.
8ad349bb 202 */
6601e61a 203kern_return_t
0c530ab8
A
204mach_timebase_info_trap(
205 struct mach_timebase_info_trap_args *args)
6601e61a 206{
0c530ab8
A
207 mach_vm_address_t out_info_addr = args->info;
208 mach_timebase_info_data_t info;
6601e61a 209
0c530ab8 210 clock_timebase_info(&info);
89b3af67 211
0c530ab8 212 copyout((void *)&info, out_info_addr, sizeof (info));
4452a7af 213
6601e61a 214 return (KERN_SUCCESS);
8f6c56a5 215}
5d5c5d0d 216
8f6c56a5 217/*
0c530ab8 218 * Calendar routines.
8f6c56a5 219 */
4452a7af 220
6601e61a 221/*
0c530ab8
A
222 * clock_get_calendar_microtime:
223 *
224 * Returns the current calendar value,
225 * microseconds as the fraction.
6601e61a 226 */
0c530ab8
A
227void
228clock_get_calendar_microtime(
b0d623f7
A
229 clock_sec_t *secs,
230 clock_usec_t *microsecs)
6601e61a 231{
0c530ab8
A
232 uint64_t now;
233 spl_t s;
4452a7af 234
0c530ab8 235 s = splclock();
b0d623f7 236 clock_lock();
4452a7af 237
0c530ab8 238 now = mach_absolute_time();
4452a7af 239
2d21ac55 240 if (clock_calend.adjdelta < 0) {
0c530ab8 241 uint32_t t32;
4452a7af 242
6d2010ae
A
243 /*
244 * Since offset is decremented during a negative adjustment,
245 * ensure that time increases monotonically without going
246 * temporarily backwards.
247 * If the delta has not yet passed, now is set to the start
248 * of the current adjustment period; otherwise, we're between
249 * the expiry of the delta and the next call to calend_adjust(),
250 * and we offset accordingly.
251 */
2d21ac55 252 if (now > clock_calend.adjstart) {
b0d623f7 253 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 254
2d21ac55
A
255 if (t32 > clock_calend.adjoffset)
256 now -= clock_calend.adjoffset;
0c530ab8 257 else
2d21ac55 258 now = clock_calend.adjstart;
0c530ab8
A
259 }
260 }
261
262 now += clock_calend.offset;
263
264 absolutetime_to_microtime(now, secs, microsecs);
265
b0d623f7 266 *secs += (clock_sec_t)clock_calend.epoch;
0c530ab8 267
b0d623f7 268 clock_unlock();
0c530ab8 269 splx(s);
21362eb3 270}
89b3af67 271
21362eb3 272/*
0c530ab8
A
273 * clock_get_calendar_nanotime:
274 *
275 * Returns the current calendar value,
276 * nanoseconds as the fraction.
277 *
278 * Since we do not have an interface to
279 * set the calendar with resolution greater
280 * than a microsecond, we honor that here.
21362eb3 281 */
0c530ab8
A
282void
283clock_get_calendar_nanotime(
b0d623f7
A
284 clock_sec_t *secs,
285 clock_nsec_t *nanosecs)
21362eb3 286{
0c530ab8
A
287 uint64_t now;
288 spl_t s;
289
290 s = splclock();
b0d623f7 291 clock_lock();
0c530ab8
A
292
293 now = mach_absolute_time();
294
2d21ac55 295 if (clock_calend.adjdelta < 0) {
0c530ab8
A
296 uint32_t t32;
297
2d21ac55 298 if (now > clock_calend.adjstart) {
b0d623f7 299 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 300
2d21ac55
A
301 if (t32 > clock_calend.adjoffset)
302 now -= clock_calend.adjoffset;
0c530ab8 303 else
2d21ac55 304 now = clock_calend.adjstart;
0c530ab8
A
305 }
306 }
307
308 now += clock_calend.offset;
309
310 absolutetime_to_microtime(now, secs, nanosecs);
6d2010ae 311
0c530ab8
A
312 *nanosecs *= NSEC_PER_USEC;
313
b0d623f7 314 *secs += (clock_sec_t)clock_calend.epoch;
0c530ab8 315
b0d623f7 316 clock_unlock();
0c530ab8 317 splx(s);
6601e61a 318}
4452a7af 319
6601e61a 320/*
0c530ab8
A
321 * clock_gettimeofday:
322 *
323 * Kernel interface for commpage implementation of
324 * gettimeofday() syscall.
325 *
326 * Returns the current calendar value, and updates the
327 * commpage info as appropriate. Because most calls to
328 * gettimeofday() are handled in user mode by the commpage,
329 * this routine should be used infrequently.
6601e61a 330 */
0c530ab8
A
331void
332clock_gettimeofday(
b0d623f7
A
333 clock_sec_t *secs,
334 clock_usec_t *microsecs)
6601e61a 335{
0c530ab8
A
336 uint64_t now;
337 spl_t s;
4452a7af 338
0c530ab8 339 s = splclock();
b0d623f7 340 clock_lock();
0c530ab8
A
341
342 now = mach_absolute_time();
343
2d21ac55 344 if (clock_calend.adjdelta >= 0) {
0c530ab8 345 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
1c79356b 346 }
0c530ab8
A
347 else {
348 uint32_t t32;
4452a7af 349
2d21ac55 350 if (now > clock_calend.adjstart) {
b0d623f7 351 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 352
2d21ac55
A
353 if (t32 > clock_calend.adjoffset)
354 now -= clock_calend.adjoffset;
0c530ab8 355 else
2d21ac55 356 now = clock_calend.adjstart;
0c530ab8
A
357 }
358
359 now += clock_calend.offset;
4452a7af 360
0c530ab8
A
361 absolutetime_to_microtime(now, secs, microsecs);
362
b0d623f7 363 *secs += (clock_sec_t)clock_calend.epoch;
1c79356b 364 }
1c79356b 365
b0d623f7 366 clock_unlock();
0c530ab8 367 splx(s);
1c79356b
A
368}
369
370/*
0c530ab8
A
371 * clock_set_calendar_microtime:
372 *
373 * Sets the current calendar value by
374 * recalculating the epoch and offset
375 * from the system clock.
376 *
377 * Also adjusts the boottime to keep the
378 * value consistent, writes the new
379 * calendar value to the platform clock,
380 * and sends calendar change notifications.
1c79356b 381 */
0c530ab8
A
382void
383clock_set_calendar_microtime(
b0d623f7
A
384 clock_sec_t secs,
385 clock_usec_t microsecs)
1c79356b 386{
b0d623f7
A
387 clock_sec_t sys;
388 clock_usec_t microsys;
389 clock_sec_t newsecs;
390 spl_t s;
8ad349bb 391
b0d623f7 392 newsecs = (microsecs < 500*USEC_PER_SEC)? secs: secs + 1;
0c530ab8
A
393
394 s = splclock();
b0d623f7 395 clock_lock();
8ad349bb 396
2d21ac55 397 commpage_disable_timestamp();
8f6c56a5 398
89b3af67 399 /*
0c530ab8
A
400 * Calculate the new calendar epoch based on
401 * the new value and the system clock.
89b3af67 402 */
0c530ab8
A
403 clock_get_system_microtime(&sys, &microsys);
404 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
8f6c56a5 405
4452a7af 406 /*
0c530ab8 407 * Adjust the boottime based on the delta.
4452a7af 408 */
0c530ab8 409 clock_boottime += secs - clock_calend.epoch;
21362eb3 410
4452a7af 411 /*
0c530ab8 412 * Set the new calendar epoch.
4452a7af 413 */
0c530ab8 414 clock_calend.epoch = secs;
6d2010ae 415
0c530ab8 416 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
21362eb3 417
0c530ab8
A
418 /*
419 * Cancel any adjustment in progress.
420 */
b0d623f7 421 calend_adjtotal = clock_calend.adjdelta = 0;
21362eb3 422
b0d623f7 423 clock_unlock();
6601e61a 424
0c530ab8
A
425 /*
426 * Set the new value for the platform clock.
427 */
428 PESetGMTTimeOfDay(newsecs);
6601e61a 429
0c530ab8 430 splx(s);
6601e61a 431
0c530ab8
A
432 /*
433 * Send host notifications.
434 */
435 host_notify_calendar_change();
2d21ac55
A
436
437#if CONFIG_DTRACE
438 clock_track_calend_nowait();
439#endif
1c79356b
A
440}
441
442/*
0c530ab8
A
443 * clock_initialize_calendar:
444 *
445 * Set the calendar and related clocks
446 * from the platform clock at boot or
447 * wake event.
448 *
449 * Also sends host notifications.
1c79356b
A
450 */
451void
0c530ab8 452clock_initialize_calendar(void)
1c79356b 453{
b0d623f7
A
454 clock_sec_t sys, secs = PEGetGMTTimeOfDay();
455 clock_usec_t microsys, microsecs = 0;
456 spl_t s;
1c79356b 457
0c530ab8 458 s = splclock();
b0d623f7 459 clock_lock();
1c79356b 460
2d21ac55 461 commpage_disable_timestamp();
1c79356b 462
b0d623f7 463 if ((long)secs >= (long)clock_boottime) {
0c530ab8
A
464 /*
465 * Initialize the boot time based on the platform clock.
466 */
467 if (clock_boottime == 0)
468 clock_boottime = secs;
1c79356b
A
469
470 /*
0c530ab8
A
471 * Calculate the new calendar epoch based on
472 * the platform clock and the system clock.
473 */
474 clock_get_system_microtime(&sys, &microsys);
475 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
1c79356b
A
476
477 /*
0c530ab8 478 * Set the new calendar epoch.
1c79356b 479 */
0c530ab8 480 clock_calend.epoch = secs;
6d2010ae 481
0c530ab8 482 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
1c79356b 483
0c530ab8
A
484 /*
485 * Cancel any adjustment in progress.
1c79356b 486 */
b0d623f7 487 calend_adjtotal = clock_calend.adjdelta = 0;
1c79356b
A
488 }
489
b0d623f7 490 clock_unlock();
0c530ab8
A
491 splx(s);
492
1c79356b 493 /*
0c530ab8 494 * Send host notifications.
1c79356b 495 */
0c530ab8 496 host_notify_calendar_change();
2d21ac55
A
497
498#if CONFIG_DTRACE
499 clock_track_calend_nowait();
500#endif
1c79356b
A
501}
502
503/*
0c530ab8
A
504 * clock_get_boottime_nanotime:
505 *
506 * Return the boottime, used by sysctl.
1c79356b 507 */
0c530ab8
A
508void
509clock_get_boottime_nanotime(
b0d623f7
A
510 clock_sec_t *secs,
511 clock_nsec_t *nanosecs)
1c79356b 512{
b0d623f7
A
513 spl_t s;
514
515 s = splclock();
516 clock_lock();
517
518 *secs = (clock_sec_t)clock_boottime;
0c530ab8 519 *nanosecs = 0;
b0d623f7
A
520
521 clock_unlock();
522 splx(s);
1c79356b
A
523}
524
525/*
0c530ab8
A
526 * clock_adjtime:
527 *
528 * Interface to adjtime() syscall.
529 *
530 * Calculates adjustment variables and
531 * initiates adjustment.
6601e61a 532 */
1c79356b 533void
0c530ab8 534clock_adjtime(
b0d623f7
A
535 long *secs,
536 int *microsecs)
1c79356b 537{
0c530ab8
A
538 uint32_t interval;
539 spl_t s;
1c79356b 540
0c530ab8 541 s = splclock();
b0d623f7 542 clock_lock();
1c79356b 543
0c530ab8
A
544 interval = calend_set_adjustment(secs, microsecs);
545 if (interval != 0) {
b0d623f7 546 calend_adjdeadline = mach_absolute_time() + interval;
6d2010ae 547 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
b0d623f7 548 calend_adjactive++;
1c79356b 549 }
0c530ab8 550 else
b0d623f7
A
551 if (timer_call_cancel(&calend_adjcall))
552 calend_adjactive--;
0c530ab8 553
b0d623f7 554 clock_unlock();
0c530ab8 555 splx(s);
1c79356b
A
556}
557
0c530ab8
A
558static uint32_t
559calend_set_adjustment(
b0d623f7
A
560 long *secs,
561 int *microsecs)
1c79356b 562{
0c530ab8
A
563 uint64_t now, t64;
564 int64_t total, ototal;
565 uint32_t interval = 0;
1c79356b 566
6d2010ae
A
567 /*
568 * Compute the total adjustment time in nanoseconds.
569 */
0c530ab8 570 total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
1c79356b 571
6d2010ae
A
572 /*
573 * Disable commpage gettimeofday().
574 */
2d21ac55 575 commpage_disable_timestamp();
1c79356b 576
6d2010ae
A
577 /*
578 * Get current absolute time.
579 */
0c530ab8 580 now = mach_absolute_time();
1c79356b 581
6d2010ae
A
582 /*
583 * Save the old adjustment total for later return.
584 */
b0d623f7 585 ototal = calend_adjtotal;
1c79356b 586
6d2010ae
A
587 /*
588 * Is a new correction specified?
589 */
0c530ab8 590 if (total != 0) {
6d2010ae
A
591 /*
592 * Set delta to the standard, small, adjustment skew.
593 */
0c530ab8 594 int32_t delta = calend_adjskew;
1c79356b 595
0c530ab8 596 if (total > 0) {
6d2010ae
A
597 /*
598 * Positive adjustment. If greater than the preset 'big'
599 * threshold, slew at a faster rate, capping if necessary.
600 */
0c530ab8
A
601 if (total > calend_adjbig)
602 delta *= 10;
603 if (delta > total)
b0d623f7 604 delta = (int32_t)total;
c0fea474 605
6d2010ae
A
606 /*
607 * Convert the delta back from ns to absolute time and store in adjoffset.
608 */
0c530ab8 609 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
b0d623f7 610 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8
A
611 }
612 else {
6d2010ae
A
613 /*
614 * Negative adjustment; therefore, negate the delta. If
615 * greater than the preset 'big' threshold, slew at a faster
616 * rate, capping if necessary.
617 */
0c530ab8
A
618 if (total < -calend_adjbig)
619 delta *= 10;
620 delta = -delta;
621 if (delta < total)
b0d623f7 622 delta = (int32_t)total;
5d5c5d0d 623
6d2010ae
A
624 /*
625 * Save the current absolute time. Subsequent time operations occuring
626 * during this negative correction can make use of this value to ensure
627 * that time increases monotonically.
628 */
2d21ac55 629 clock_calend.adjstart = now;
89b3af67 630
6d2010ae
A
631 /*
632 * Convert the delta back from ns to absolute time and store in adjoffset.
633 */
0c530ab8 634 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
b0d623f7 635 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8 636 }
4452a7af 637
6d2010ae
A
638 /*
639 * Store the total adjustment time in ns.
640 */
b0d623f7 641 calend_adjtotal = total;
6d2010ae
A
642
643 /*
644 * Store the delta for this adjustment period in ns.
645 */
2d21ac55 646 clock_calend.adjdelta = delta;
0c530ab8 647
6d2010ae
A
648 /*
649 * Set the interval in absolute time for later return.
650 */
b0d623f7 651 interval = calend_adjinterval;
0c530ab8 652 }
6d2010ae
A
653 else {
654 /*
655 * No change; clear any prior adjustment.
656 */
b0d623f7 657 calend_adjtotal = clock_calend.adjdelta = 0;
6d2010ae 658 }
1c79356b 659
6d2010ae
A
660 /*
661 * If an prior correction was in progress, return the
662 * remaining uncorrected time from it.
663 */
0c530ab8 664 if (ototal != 0) {
b0d623f7
A
665 *secs = (long)(ototal / NSEC_PER_SEC);
666 *microsecs = (int)((ototal % NSEC_PER_SEC) / NSEC_PER_USEC);
0c530ab8
A
667 }
668 else
669 *secs = *microsecs = 0;
1c79356b 670
2d21ac55
A
671#if CONFIG_DTRACE
672 clock_track_calend_nowait();
673#endif
674
0c530ab8 675 return (interval);
1c79356b
A
676}
677
0c530ab8
A
678static void
679calend_adjust_call(void)
1c79356b 680{
0c530ab8
A
681 uint32_t interval;
682 spl_t s;
1c79356b 683
0c530ab8 684 s = splclock();
b0d623f7 685 clock_lock();
1c79356b 686
b0d623f7 687 if (--calend_adjactive == 0) {
0c530ab8
A
688 interval = calend_adjust();
689 if (interval != 0) {
b0d623f7 690 clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
1c79356b 691
6d2010ae 692 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
b0d623f7 693 calend_adjactive++;
0c530ab8 694 }
1c79356b 695 }
0c530ab8 696
b0d623f7 697 clock_unlock();
0c530ab8 698 splx(s);
1c79356b
A
699}
700
0c530ab8
A
701static uint32_t
702calend_adjust(void)
1c79356b 703{
0c530ab8
A
704 uint64_t now, t64;
705 int32_t delta;
706 uint32_t interval = 0;
89b3af67 707
2d21ac55 708 commpage_disable_timestamp();
89b3af67 709
0c530ab8 710 now = mach_absolute_time();
89b3af67 711
2d21ac55 712 delta = clock_calend.adjdelta;
89b3af67 713
0c530ab8 714 if (delta > 0) {
2d21ac55 715 clock_calend.offset += clock_calend.adjoffset;
4452a7af 716
b0d623f7
A
717 calend_adjtotal -= delta;
718 if (delta > calend_adjtotal) {
719 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
4452a7af 720
0c530ab8 721 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
b0d623f7 722 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8
A
723 }
724 }
725 else
6d2010ae
A
726 if (delta < 0) {
727 clock_calend.offset -= clock_calend.adjoffset;
4452a7af 728
6d2010ae
A
729 calend_adjtotal -= delta;
730 if (delta < calend_adjtotal) {
731 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
4452a7af 732
6d2010ae
A
733 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
734 clock_calend.adjoffset = (uint32_t)t64;
735 }
736
737 if (clock_calend.adjdelta != 0)
738 clock_calend.adjstart = now;
0c530ab8
A
739 }
740
2d21ac55 741 if (clock_calend.adjdelta != 0)
b0d623f7 742 interval = calend_adjinterval;
0c530ab8 743
2d21ac55
A
744#if CONFIG_DTRACE
745 clock_track_calend_nowait();
746#endif
0c530ab8
A
747
748 return (interval);
749}
750
751/*
752 * clock_wakeup_calendar:
753 *
754 * Interface to power management, used
755 * to initiate the reset of the calendar
756 * on wake from sleep event.
757 */
758void
759clock_wakeup_calendar(void)
760{
761 thread_call_enter(&calend_wakecall);
1c79356b
A
762}
763
0c530ab8
A
764/*
765 * Wait / delay routines.
766 */
91447636
A
767static void
768mach_wait_until_continue(
769 __unused void *parameter,
770 wait_result_t wresult)
771{
772 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
773 /*NOTREACHED*/
774}
775
1c79356b 776kern_return_t
91447636
A
777mach_wait_until_trap(
778 struct mach_wait_until_trap_args *args)
779{
780 uint64_t deadline = args->deadline;
781 wait_result_t wresult;
782
783 wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
784 if (wresult == THREAD_WAITING)
785 wresult = thread_block(mach_wait_until_continue);
786
787 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
788}
789
91447636
A
790void
791clock_delay_until(
1c79356b
A
792 uint64_t deadline)
793{
91447636
A
794 uint64_t now = mach_absolute_time();
795
796 if (now >= deadline)
797 return;
1c79356b 798
91447636
A
799 if ( (deadline - now) < (8 * sched_cswtime) ||
800 get_preemption_level() != 0 ||
801 ml_get_interrupts_enabled() == FALSE )
802 machine_delay_until(deadline);
803 else {
804 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
805
806 thread_block(THREAD_CONTINUE_NULL);
9bccf70c 807 }
91447636 808}
1c79356b 809
91447636
A
810void
811delay_for_interval(
812 uint32_t interval,
813 uint32_t scale_factor)
814{
815 uint64_t end;
816
817 clock_interval_to_deadline(interval, scale_factor, &end);
818
819 clock_delay_until(end);
820}
821
822void
823delay(
824 int usec)
825{
826 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1c79356b 827}
9bccf70c 828
0c530ab8
A
829/*
830 * Miscellaneous routines.
831 */
55e303ae 832void
0c530ab8
A
833clock_interval_to_deadline(
834 uint32_t interval,
835 uint32_t scale_factor,
836 uint64_t *result)
9bccf70c 837{
0c530ab8 838 uint64_t abstime;
c0fea474 839
0c530ab8 840 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
6601e61a 841
0c530ab8 842 *result = mach_absolute_time() + abstime;
8f6c56a5 843}
5d5c5d0d 844
0c530ab8
A
845void
846clock_absolutetime_interval_to_deadline(
847 uint64_t abstime,
848 uint64_t *result)
8f6c56a5 849{
0c530ab8 850 *result = mach_absolute_time() + abstime;
21362eb3 851}
89b3af67 852
4452a7af 853void
0c530ab8
A
854clock_get_uptime(
855 uint64_t *result)
21362eb3 856{
0c530ab8 857 *result = mach_absolute_time();
6601e61a 858}
4452a7af 859
0c530ab8
A
860void
861clock_deadline_for_periodic_event(
862 uint64_t interval,
863 uint64_t abstime,
864 uint64_t *deadline)
6601e61a 865{
0c530ab8
A
866 assert(interval != 0);
867
868 *deadline += interval;
869
870 if (*deadline <= abstime) {
871 *deadline = abstime + interval;
872 abstime = mach_absolute_time();
55e303ae 873
0c530ab8
A
874 if (*deadline <= abstime)
875 *deadline = abstime + interval;
876 }
55e303ae 877}
2d21ac55 878
b0d623f7 879#if CONFIG_DTRACE
2d21ac55
A
880
881/*
882 * clock_get_calendar_nanotime_nowait
883 *
884 * Description: Non-blocking version of clock_get_calendar_nanotime()
885 *
886 * Notes: This function operates by separately tracking calendar time
887 * updates using a two element structure to copy the calendar
888 * state, which may be asynchronously modified. It utilizes
889 * barrier instructions in the tracking process and in the local
890 * stable snapshot process in order to ensure that a consistent
891 * snapshot is used to perform the calculation.
892 */
893void
894clock_get_calendar_nanotime_nowait(
b0d623f7
A
895 clock_sec_t *secs,
896 clock_nsec_t *nanosecs)
2d21ac55
A
897{
898 int i = 0;
899 uint64_t now;
900 struct unlocked_clock_calend stable;
901
902 for (;;) {
903 stable = flipflop[i]; /* take snapshot */
904
905 /*
906 * Use a barrier instructions to ensure atomicity. We AND
907 * off the "in progress" bit to get the current generation
908 * count.
909 */
910 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
911
912 /*
913 * If an update _is_ in progress, the generation count will be
914 * off by one, if it _was_ in progress, it will be off by two,
915 * and if we caught it at a good time, it will be equal (and
916 * our snapshot is threfore stable).
917 */
918 if (flipflop[i].gen == stable.gen)
919 break;
920
921 /* Switch to the oher element of the flipflop, and try again. */
922 i ^= 1;
923 }
924
925 now = mach_absolute_time();
926
927 if (stable.calend.adjdelta < 0) {
928 uint32_t t32;
929
930 if (now > stable.calend.adjstart) {
b0d623f7 931 t32 = (uint32_t)(now - stable.calend.adjstart);
2d21ac55
A
932
933 if (t32 > stable.calend.adjoffset)
934 now -= stable.calend.adjoffset;
935 else
936 now = stable.calend.adjstart;
937 }
938 }
939
940 now += stable.calend.offset;
941
942 absolutetime_to_microtime(now, secs, nanosecs);
943 *nanosecs *= NSEC_PER_USEC;
944
b0d623f7 945 *secs += (clock_sec_t)stable.calend.epoch;
2d21ac55
A
946}
947
948static void
949clock_track_calend_nowait(void)
950{
951 int i;
952
953 for (i = 0; i < 2; i++) {
954 struct clock_calend tmp = clock_calend;
955
956 /*
957 * Set the low bit if the generation count; since we use a
958 * barrier instruction to do this, we are guaranteed that this
959 * will flag an update in progress to an async caller trying
960 * to examine the contents.
961 */
962 (void)hw_atomic_or(&flipflop[i].gen, 1);
963
964 flipflop[i].calend = tmp;
965
966 /*
967 * Increment the generation count to clear the low bit to
968 * signal completion. If a caller compares the generation
969 * count after taking a copy while in progress, the count
970 * will be off by two.
971 */
972 (void)hw_atomic_add(&flipflop[i].gen, 1);
973 }
974}
b0d623f7
A
975
976#endif /* CONFIG_DTRACE */