]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/clock.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
1c79356b
A
32 */
33
91447636 34#include <mach/mach_types.h>
91447636 35
1c79356b 36#include <kern/spl.h>
55e303ae 37#include <kern/sched_prim.h>
1c79356b 38#include <kern/thread.h>
1c79356b 39#include <kern/clock.h>
0c530ab8
A
40#include <kern/host_notify.h>
41
42#include <IOKit/IOPlatformExpert.h>
c0fea474 43
0c530ab8 44#include <machine/commpage.h>
1c79356b 45
91447636 46#include <mach/mach_traps.h>
1c79356b
A
47#include <mach/mach_time.h>
48
2d21ac55
A
49uint32_t hz_tick_interval = 1;
50
2d21ac55 51
6d2010ae 52decl_simple_lock_data(,clock_lock)
91447636 53
b0d623f7
A
54#define clock_lock() \
55 simple_lock(&clock_lock)
56
57#define clock_unlock() \
58 simple_unlock(&clock_lock)
59
60#define clock_lock_init() \
61 simple_lock_init(&clock_lock, 0)
62
63
1c79356b 64/*
0c530ab8
A
65 * Time of day (calendar) variables.
66 *
67 * Algorithm:
68 *
69 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
70 *
71 * where CONV converts absolute time units into seconds and a fraction.
1c79356b 72 */
0c530ab8 73static struct clock_calend {
2d21ac55
A
74 uint64_t epoch;
75 uint64_t offset;
b0d623f7 76
2d21ac55
A
77 int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
78 uint64_t adjstart; /* Absolute time value for start of this adjustment period */
79 uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */
2d21ac55
A
80} clock_calend;
81
b0d623f7
A
82#if CONFIG_DTRACE
83
2d21ac55
A
84/*
85 * Unlocked calendar flipflop; this is used to track a clock_calend such
86 * that we can safely access a snapshot of a valid clock_calend structure
87 * without needing to take any locks to do it.
88 *
89 * The trick is to use a generation count and set the low bit when it is
90 * being updated/read; by doing this, we guarantee, through use of the
91 * hw_atomic functions, that the generation is incremented when the bit
92 * is cleared atomically (by using a 1 bit add).
93 */
94static struct unlocked_clock_calend {
95 struct clock_calend calend; /* copy of calendar */
96 uint32_t gen; /* generation count */
97} flipflop[ 2];
b0d623f7
A
98
99static void clock_track_calend_nowait(void);
100
2d21ac55 101#endif
1c79356b 102
0c530ab8
A
103/*
104 * Calendar adjustment variables and values.
105 */
106#define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
107#define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
108#define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
109
b0d623f7
A
110static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
111static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
112static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
113
114static timer_call_data_t calend_adjcall;
115static uint32_t calend_adjactive;
116
0c530ab8 117static uint32_t calend_set_adjustment(
b0d623f7
A
118 long *secs,
119 int *microsecs);
0c530ab8
A
120
121static void calend_adjust_call(void);
122static uint32_t calend_adjust(void);
9bccf70c 123
55e303ae
A
124static thread_call_data_t calend_wakecall;
125
0c530ab8 126extern void IOKitResetTime(void);
5d5c5d0d 127
316670eb
A
128void _clock_delay_until_deadline(uint64_t interval,
129 uint64_t deadline);
130
0c530ab8 131static uint64_t clock_boottime; /* Seconds boottime epoch */
4452a7af 132
0c530ab8
A
133#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
134MACRO_BEGIN \
135 if (((rfrac) += (frac)) >= (unit)) { \
136 (rfrac) -= (unit); \
137 (rsecs) += 1; \
138 } \
139 (rsecs) += (secs); \
140MACRO_END
141
142#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
143MACRO_BEGIN \
b0d623f7 144 if ((int)((rfrac) -= (frac)) < 0) { \
0c530ab8
A
145 (rfrac) += (unit); \
146 (rsecs) -= 1; \
147 } \
148 (rsecs) -= (secs); \
149MACRO_END
1c79356b
A
150
151/*
91447636
A
152 * clock_config:
153 *
154 * Called once at boot to configure the clock subsystem.
1c79356b
A
155 */
156void
157clock_config(void)
158{
b0d623f7 159 clock_lock_init();
8f6c56a5 160
b0d623f7 161 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
0c530ab8 162 thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
6601e61a 163
0c530ab8 164 clock_oldconfig();
1c79356b
A
165}
166
167/*
91447636
A
168 * clock_init:
169 *
170 * Called on a processor each time started.
1c79356b
A
171 */
172void
173clock_init(void)
174{
0c530ab8 175 clock_oldinit();
1c79356b
A
176}
177
55e303ae 178/*
0c530ab8
A
179 * clock_timebase_init:
180 *
181 * Called by machine dependent code
182 * to initialize areas dependent on the
183 * timebase value. May be called multiple
184 * times during start up.
55e303ae
A
185 */
186void
187clock_timebase_init(void)
188{
0c530ab8 189 uint64_t abstime;
5d5c5d0d 190
0c530ab8 191 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
b0d623f7 192 calend_adjinterval = (uint32_t)abstime;
2d21ac55
A
193
194 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
b0d623f7 195 hz_tick_interval = (uint32_t)abstime;
89b3af67 196
0c530ab8 197 sched_timebase_init();
8ad349bb 198}
c0fea474 199
8ad349bb 200/*
0c530ab8
A
201 * mach_timebase_info_trap:
202 *
203 * User trap returns timebase constant.
8ad349bb 204 */
6601e61a 205kern_return_t
0c530ab8
A
206mach_timebase_info_trap(
207 struct mach_timebase_info_trap_args *args)
6601e61a 208{
0c530ab8
A
209 mach_vm_address_t out_info_addr = args->info;
210 mach_timebase_info_data_t info;
6601e61a 211
0c530ab8 212 clock_timebase_info(&info);
89b3af67 213
0c530ab8 214 copyout((void *)&info, out_info_addr, sizeof (info));
4452a7af 215
6601e61a 216 return (KERN_SUCCESS);
8f6c56a5 217}
5d5c5d0d 218
8f6c56a5 219/*
0c530ab8 220 * Calendar routines.
8f6c56a5 221 */
4452a7af 222
6601e61a 223/*
0c530ab8
A
224 * clock_get_calendar_microtime:
225 *
226 * Returns the current calendar value,
227 * microseconds as the fraction.
6601e61a 228 */
0c530ab8
A
229void
230clock_get_calendar_microtime(
b0d623f7
A
231 clock_sec_t *secs,
232 clock_usec_t *microsecs)
39236c6e
A
233{
234 clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
235}
236
237/*
238 * clock_get_calendar_absolute_and_microtime:
239 *
240 * Returns the current calendar value,
241 * microseconds as the fraction. Also
242 * returns mach_absolute_time if abstime
243 * is not NULL.
244 */
245void
246clock_get_calendar_absolute_and_microtime(
247 clock_sec_t *secs,
248 clock_usec_t *microsecs,
249 uint64_t *abstime)
6601e61a 250{
0c530ab8
A
251 uint64_t now;
252 spl_t s;
4452a7af 253
0c530ab8 254 s = splclock();
b0d623f7 255 clock_lock();
4452a7af 256
0c530ab8 257 now = mach_absolute_time();
39236c6e
A
258 if (abstime)
259 *abstime = now;
4452a7af 260
2d21ac55 261 if (clock_calend.adjdelta < 0) {
0c530ab8 262 uint32_t t32;
4452a7af 263
6d2010ae
A
264 /*
265 * Since offset is decremented during a negative adjustment,
266 * ensure that time increases monotonically without going
267 * temporarily backwards.
268 * If the delta has not yet passed, now is set to the start
269 * of the current adjustment period; otherwise, we're between
270 * the expiry of the delta and the next call to calend_adjust(),
271 * and we offset accordingly.
272 */
2d21ac55 273 if (now > clock_calend.adjstart) {
b0d623f7 274 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 275
2d21ac55
A
276 if (t32 > clock_calend.adjoffset)
277 now -= clock_calend.adjoffset;
0c530ab8 278 else
2d21ac55 279 now = clock_calend.adjstart;
0c530ab8
A
280 }
281 }
282
283 now += clock_calend.offset;
284
285 absolutetime_to_microtime(now, secs, microsecs);
286
b0d623f7 287 *secs += (clock_sec_t)clock_calend.epoch;
0c530ab8 288
b0d623f7 289 clock_unlock();
0c530ab8 290 splx(s);
21362eb3 291}
89b3af67 292
21362eb3 293/*
0c530ab8
A
294 * clock_get_calendar_nanotime:
295 *
296 * Returns the current calendar value,
297 * nanoseconds as the fraction.
298 *
299 * Since we do not have an interface to
300 * set the calendar with resolution greater
301 * than a microsecond, we honor that here.
21362eb3 302 */
0c530ab8
A
303void
304clock_get_calendar_nanotime(
b0d623f7
A
305 clock_sec_t *secs,
306 clock_nsec_t *nanosecs)
21362eb3 307{
0c530ab8
A
308 uint64_t now;
309 spl_t s;
310
311 s = splclock();
b0d623f7 312 clock_lock();
0c530ab8
A
313
314 now = mach_absolute_time();
315
2d21ac55 316 if (clock_calend.adjdelta < 0) {
0c530ab8
A
317 uint32_t t32;
318
2d21ac55 319 if (now > clock_calend.adjstart) {
b0d623f7 320 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 321
2d21ac55
A
322 if (t32 > clock_calend.adjoffset)
323 now -= clock_calend.adjoffset;
0c530ab8 324 else
2d21ac55 325 now = clock_calend.adjstart;
0c530ab8
A
326 }
327 }
328
329 now += clock_calend.offset;
330
331 absolutetime_to_microtime(now, secs, nanosecs);
6d2010ae 332
0c530ab8
A
333 *nanosecs *= NSEC_PER_USEC;
334
b0d623f7 335 *secs += (clock_sec_t)clock_calend.epoch;
0c530ab8 336
b0d623f7 337 clock_unlock();
0c530ab8 338 splx(s);
6601e61a 339}
4452a7af 340
6601e61a 341/*
0c530ab8
A
342 * clock_gettimeofday:
343 *
344 * Kernel interface for commpage implementation of
345 * gettimeofday() syscall.
346 *
347 * Returns the current calendar value, and updates the
348 * commpage info as appropriate. Because most calls to
349 * gettimeofday() are handled in user mode by the commpage,
350 * this routine should be used infrequently.
6601e61a 351 */
0c530ab8
A
352void
353clock_gettimeofday(
b0d623f7
A
354 clock_sec_t *secs,
355 clock_usec_t *microsecs)
6601e61a 356{
0c530ab8
A
357 uint64_t now;
358 spl_t s;
4452a7af 359
0c530ab8 360 s = splclock();
b0d623f7 361 clock_lock();
0c530ab8
A
362
363 now = mach_absolute_time();
364
2d21ac55 365 if (clock_calend.adjdelta >= 0) {
0c530ab8 366 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
1c79356b 367 }
0c530ab8
A
368 else {
369 uint32_t t32;
4452a7af 370
2d21ac55 371 if (now > clock_calend.adjstart) {
b0d623f7 372 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 373
2d21ac55
A
374 if (t32 > clock_calend.adjoffset)
375 now -= clock_calend.adjoffset;
0c530ab8 376 else
2d21ac55 377 now = clock_calend.adjstart;
0c530ab8
A
378 }
379
380 now += clock_calend.offset;
4452a7af 381
0c530ab8
A
382 absolutetime_to_microtime(now, secs, microsecs);
383
b0d623f7 384 *secs += (clock_sec_t)clock_calend.epoch;
1c79356b 385 }
1c79356b 386
b0d623f7 387 clock_unlock();
0c530ab8 388 splx(s);
1c79356b
A
389}
390
391/*
0c530ab8
A
392 * clock_set_calendar_microtime:
393 *
394 * Sets the current calendar value by
395 * recalculating the epoch and offset
396 * from the system clock.
397 *
398 * Also adjusts the boottime to keep the
399 * value consistent, writes the new
400 * calendar value to the platform clock,
401 * and sends calendar change notifications.
1c79356b 402 */
0c530ab8
A
403void
404clock_set_calendar_microtime(
b0d623f7
A
405 clock_sec_t secs,
406 clock_usec_t microsecs)
1c79356b 407{
b0d623f7
A
408 clock_sec_t sys;
409 clock_usec_t microsys;
410 clock_sec_t newsecs;
fe8ab488 411 clock_usec_t newmicrosecs;
b0d623f7 412 spl_t s;
8ad349bb 413
fe8ab488
A
414 newsecs = secs;
415 newmicrosecs = microsecs;
0c530ab8
A
416
417 s = splclock();
b0d623f7 418 clock_lock();
8ad349bb 419
2d21ac55 420 commpage_disable_timestamp();
8f6c56a5 421
89b3af67 422 /*
0c530ab8
A
423 * Calculate the new calendar epoch based on
424 * the new value and the system clock.
89b3af67 425 */
0c530ab8
A
426 clock_get_system_microtime(&sys, &microsys);
427 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
8f6c56a5 428
4452a7af 429 /*
0c530ab8 430 * Adjust the boottime based on the delta.
4452a7af 431 */
0c530ab8 432 clock_boottime += secs - clock_calend.epoch;
21362eb3 433
4452a7af 434 /*
0c530ab8 435 * Set the new calendar epoch.
4452a7af 436 */
0c530ab8 437 clock_calend.epoch = secs;
6d2010ae 438
0c530ab8 439 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
21362eb3 440
0c530ab8
A
441 /*
442 * Cancel any adjustment in progress.
443 */
b0d623f7 444 calend_adjtotal = clock_calend.adjdelta = 0;
21362eb3 445
b0d623f7 446 clock_unlock();
6601e61a 447
0c530ab8
A
448 /*
449 * Set the new value for the platform clock.
450 */
fe8ab488 451 PESetUTCTimeOfDay(newsecs, newmicrosecs);
6601e61a 452
0c530ab8 453 splx(s);
6601e61a 454
0c530ab8
A
455 /*
456 * Send host notifications.
457 */
458 host_notify_calendar_change();
2d21ac55
A
459
460#if CONFIG_DTRACE
461 clock_track_calend_nowait();
462#endif
1c79356b
A
463}
464
465/*
0c530ab8
A
466 * clock_initialize_calendar:
467 *
468 * Set the calendar and related clocks
469 * from the platform clock at boot or
470 * wake event.
471 *
472 * Also sends host notifications.
1c79356b
A
473 */
474void
0c530ab8 475clock_initialize_calendar(void)
1c79356b 476{
fe8ab488
A
477 clock_sec_t sys, secs;
478 clock_usec_t microsys, microsecs;
b0d623f7 479 spl_t s;
1c79356b 480
fe8ab488
A
481 PEGetUTCTimeOfDay(&secs, &microsecs);
482
0c530ab8 483 s = splclock();
b0d623f7 484 clock_lock();
1c79356b 485
2d21ac55 486 commpage_disable_timestamp();
1c79356b 487
b0d623f7 488 if ((long)secs >= (long)clock_boottime) {
0c530ab8
A
489 /*
490 * Initialize the boot time based on the platform clock.
491 */
492 if (clock_boottime == 0)
493 clock_boottime = secs;
1c79356b
A
494
495 /*
0c530ab8
A
496 * Calculate the new calendar epoch based on
497 * the platform clock and the system clock.
498 */
499 clock_get_system_microtime(&sys, &microsys);
500 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
1c79356b
A
501
502 /*
0c530ab8 503 * Set the new calendar epoch.
1c79356b 504 */
0c530ab8 505 clock_calend.epoch = secs;
6d2010ae 506
0c530ab8 507 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
1c79356b 508
0c530ab8
A
509 /*
510 * Cancel any adjustment in progress.
1c79356b 511 */
b0d623f7 512 calend_adjtotal = clock_calend.adjdelta = 0;
1c79356b
A
513 }
514
b0d623f7 515 clock_unlock();
0c530ab8
A
516 splx(s);
517
1c79356b 518 /*
0c530ab8 519 * Send host notifications.
1c79356b 520 */
0c530ab8 521 host_notify_calendar_change();
2d21ac55
A
522
523#if CONFIG_DTRACE
524 clock_track_calend_nowait();
525#endif
1c79356b
A
526}
527
528/*
0c530ab8
A
529 * clock_get_boottime_nanotime:
530 *
531 * Return the boottime, used by sysctl.
1c79356b 532 */
0c530ab8
A
533void
534clock_get_boottime_nanotime(
b0d623f7
A
535 clock_sec_t *secs,
536 clock_nsec_t *nanosecs)
1c79356b 537{
b0d623f7
A
538 spl_t s;
539
540 s = splclock();
541 clock_lock();
542
543 *secs = (clock_sec_t)clock_boottime;
0c530ab8 544 *nanosecs = 0;
b0d623f7
A
545
546 clock_unlock();
547 splx(s);
1c79356b
A
548}
549
550/*
0c530ab8
A
551 * clock_adjtime:
552 *
553 * Interface to adjtime() syscall.
554 *
555 * Calculates adjustment variables and
556 * initiates adjustment.
6601e61a 557 */
1c79356b 558void
0c530ab8 559clock_adjtime(
b0d623f7
A
560 long *secs,
561 int *microsecs)
1c79356b 562{
0c530ab8
A
563 uint32_t interval;
564 spl_t s;
1c79356b 565
0c530ab8 566 s = splclock();
b0d623f7 567 clock_lock();
1c79356b 568
0c530ab8
A
569 interval = calend_set_adjustment(secs, microsecs);
570 if (interval != 0) {
b0d623f7 571 calend_adjdeadline = mach_absolute_time() + interval;
39236c6e 572 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
b0d623f7 573 calend_adjactive++;
1c79356b 574 }
0c530ab8 575 else
b0d623f7
A
576 if (timer_call_cancel(&calend_adjcall))
577 calend_adjactive--;
0c530ab8 578
b0d623f7 579 clock_unlock();
0c530ab8 580 splx(s);
1c79356b
A
581}
582
0c530ab8
A
583static uint32_t
584calend_set_adjustment(
b0d623f7
A
585 long *secs,
586 int *microsecs)
1c79356b 587{
0c530ab8
A
588 uint64_t now, t64;
589 int64_t total, ototal;
590 uint32_t interval = 0;
1c79356b 591
6d2010ae
A
592 /*
593 * Compute the total adjustment time in nanoseconds.
594 */
39236c6e 595 total = ((int64_t)*secs * (int64_t)NSEC_PER_SEC) + (*microsecs * (int64_t)NSEC_PER_USEC);
1c79356b 596
6d2010ae
A
597 /*
598 * Disable commpage gettimeofday().
599 */
2d21ac55 600 commpage_disable_timestamp();
1c79356b 601
6d2010ae
A
602 /*
603 * Get current absolute time.
604 */
0c530ab8 605 now = mach_absolute_time();
1c79356b 606
6d2010ae
A
607 /*
608 * Save the old adjustment total for later return.
609 */
b0d623f7 610 ototal = calend_adjtotal;
1c79356b 611
6d2010ae
A
612 /*
613 * Is a new correction specified?
614 */
0c530ab8 615 if (total != 0) {
6d2010ae
A
616 /*
617 * Set delta to the standard, small, adjustment skew.
618 */
0c530ab8 619 int32_t delta = calend_adjskew;
1c79356b 620
0c530ab8 621 if (total > 0) {
6d2010ae
A
622 /*
623 * Positive adjustment. If greater than the preset 'big'
624 * threshold, slew at a faster rate, capping if necessary.
625 */
39236c6e 626 if (total > (int64_t) calend_adjbig)
0c530ab8
A
627 delta *= 10;
628 if (delta > total)
b0d623f7 629 delta = (int32_t)total;
c0fea474 630
6d2010ae
A
631 /*
632 * Convert the delta back from ns to absolute time and store in adjoffset.
633 */
0c530ab8 634 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
b0d623f7 635 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8
A
636 }
637 else {
6d2010ae
A
638 /*
639 * Negative adjustment; therefore, negate the delta. If
640 * greater than the preset 'big' threshold, slew at a faster
641 * rate, capping if necessary.
642 */
39236c6e 643 if (total < (int64_t) -calend_adjbig)
0c530ab8
A
644 delta *= 10;
645 delta = -delta;
646 if (delta < total)
b0d623f7 647 delta = (int32_t)total;
5d5c5d0d 648
6d2010ae
A
649 /*
650 * Save the current absolute time. Subsequent time operations occuring
651 * during this negative correction can make use of this value to ensure
652 * that time increases monotonically.
653 */
2d21ac55 654 clock_calend.adjstart = now;
89b3af67 655
6d2010ae
A
656 /*
657 * Convert the delta back from ns to absolute time and store in adjoffset.
658 */
0c530ab8 659 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
b0d623f7 660 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8 661 }
4452a7af 662
6d2010ae
A
663 /*
664 * Store the total adjustment time in ns.
665 */
b0d623f7 666 calend_adjtotal = total;
6d2010ae
A
667
668 /*
669 * Store the delta for this adjustment period in ns.
670 */
2d21ac55 671 clock_calend.adjdelta = delta;
0c530ab8 672
6d2010ae
A
673 /*
674 * Set the interval in absolute time for later return.
675 */
b0d623f7 676 interval = calend_adjinterval;
0c530ab8 677 }
6d2010ae
A
678 else {
679 /*
680 * No change; clear any prior adjustment.
681 */
b0d623f7 682 calend_adjtotal = clock_calend.adjdelta = 0;
6d2010ae 683 }
1c79356b 684
6d2010ae
A
685 /*
686 * If an prior correction was in progress, return the
687 * remaining uncorrected time from it.
688 */
0c530ab8 689 if (ototal != 0) {
39236c6e
A
690 *secs = (long)(ototal / (long)NSEC_PER_SEC);
691 *microsecs = (int)((ototal % (int)NSEC_PER_SEC) / (int)NSEC_PER_USEC);
0c530ab8
A
692 }
693 else
694 *secs = *microsecs = 0;
1c79356b 695
2d21ac55
A
696#if CONFIG_DTRACE
697 clock_track_calend_nowait();
698#endif
699
0c530ab8 700 return (interval);
1c79356b
A
701}
702
0c530ab8
A
703static void
704calend_adjust_call(void)
1c79356b 705{
0c530ab8
A
706 uint32_t interval;
707 spl_t s;
1c79356b 708
0c530ab8 709 s = splclock();
b0d623f7 710 clock_lock();
1c79356b 711
b0d623f7 712 if (--calend_adjactive == 0) {
0c530ab8
A
713 interval = calend_adjust();
714 if (interval != 0) {
b0d623f7 715 clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
1c79356b 716
39236c6e 717 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
b0d623f7 718 calend_adjactive++;
0c530ab8 719 }
1c79356b 720 }
0c530ab8 721
b0d623f7 722 clock_unlock();
0c530ab8 723 splx(s);
1c79356b
A
724}
725
0c530ab8
A
726static uint32_t
727calend_adjust(void)
1c79356b 728{
0c530ab8
A
729 uint64_t now, t64;
730 int32_t delta;
731 uint32_t interval = 0;
89b3af67 732
2d21ac55 733 commpage_disable_timestamp();
89b3af67 734
0c530ab8 735 now = mach_absolute_time();
89b3af67 736
2d21ac55 737 delta = clock_calend.adjdelta;
89b3af67 738
0c530ab8 739 if (delta > 0) {
2d21ac55 740 clock_calend.offset += clock_calend.adjoffset;
4452a7af 741
b0d623f7
A
742 calend_adjtotal -= delta;
743 if (delta > calend_adjtotal) {
744 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
4452a7af 745
0c530ab8 746 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
b0d623f7 747 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8
A
748 }
749 }
750 else
6d2010ae
A
751 if (delta < 0) {
752 clock_calend.offset -= clock_calend.adjoffset;
4452a7af 753
6d2010ae
A
754 calend_adjtotal -= delta;
755 if (delta < calend_adjtotal) {
756 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
4452a7af 757
6d2010ae
A
758 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
759 clock_calend.adjoffset = (uint32_t)t64;
760 }
761
762 if (clock_calend.adjdelta != 0)
763 clock_calend.adjstart = now;
0c530ab8
A
764 }
765
2d21ac55 766 if (clock_calend.adjdelta != 0)
b0d623f7 767 interval = calend_adjinterval;
0c530ab8 768
2d21ac55
A
769#if CONFIG_DTRACE
770 clock_track_calend_nowait();
771#endif
0c530ab8
A
772
773 return (interval);
774}
775
776/*
777 * clock_wakeup_calendar:
778 *
779 * Interface to power management, used
780 * to initiate the reset of the calendar
781 * on wake from sleep event.
782 */
783void
784clock_wakeup_calendar(void)
785{
786 thread_call_enter(&calend_wakecall);
1c79356b
A
787}
788
0c530ab8
A
789/*
790 * Wait / delay routines.
791 */
91447636
A
792static void
793mach_wait_until_continue(
794 __unused void *parameter,
795 wait_result_t wresult)
796{
797 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
798 /*NOTREACHED*/
799}
800
316670eb
A
801/*
802 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
803 *
804 * Parameters: args->deadline Amount of time to wait
805 *
806 * Returns: 0 Success
807 * !0 Not success
808 *
809 */
1c79356b 810kern_return_t
91447636
A
811mach_wait_until_trap(
812 struct mach_wait_until_trap_args *args)
813{
814 uint64_t deadline = args->deadline;
815 wait_result_t wresult;
816
39236c6e
A
817 wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
818 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
91447636
A
819 if (wresult == THREAD_WAITING)
820 wresult = thread_block(mach_wait_until_continue);
821
822 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
823}
824
91447636
A
825void
826clock_delay_until(
1c79356b
A
827 uint64_t deadline)
828{
91447636
A
829 uint64_t now = mach_absolute_time();
830
831 if (now >= deadline)
832 return;
1c79356b 833
316670eb
A
834 _clock_delay_until_deadline(deadline - now, deadline);
835}
836
837/*
838 * Preserve the original precise interval that the client
839 * requested for comparison to the spin threshold.
840 */
841void
842_clock_delay_until_deadline(
843 uint64_t interval,
844 uint64_t deadline)
845{
846
847 if (interval == 0)
848 return;
849
850 if ( ml_delay_should_spin(interval) ||
91447636 851 get_preemption_level() != 0 ||
316670eb 852 ml_get_interrupts_enabled() == FALSE ) {
bd504ef0 853 machine_delay_until(interval, deadline);
316670eb
A
854 } else {
855 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
91447636
A
856
857 thread_block(THREAD_CONTINUE_NULL);
9bccf70c 858 }
91447636 859}
1c79356b 860
316670eb 861
91447636
A
862void
863delay_for_interval(
864 uint32_t interval,
865 uint32_t scale_factor)
866{
316670eb 867 uint64_t abstime;
91447636 868
316670eb 869 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
91447636 870
316670eb 871 _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
91447636
A
872}
873
874void
875delay(
876 int usec)
877{
878 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1c79356b 879}
9bccf70c 880
0c530ab8
A
881/*
882 * Miscellaneous routines.
883 */
55e303ae 884void
0c530ab8
A
885clock_interval_to_deadline(
886 uint32_t interval,
887 uint32_t scale_factor,
888 uint64_t *result)
9bccf70c 889{
0c530ab8 890 uint64_t abstime;
c0fea474 891
0c530ab8 892 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
6601e61a 893
0c530ab8 894 *result = mach_absolute_time() + abstime;
8f6c56a5 895}
5d5c5d0d 896
0c530ab8
A
897void
898clock_absolutetime_interval_to_deadline(
899 uint64_t abstime,
900 uint64_t *result)
8f6c56a5 901{
0c530ab8 902 *result = mach_absolute_time() + abstime;
21362eb3 903}
89b3af67 904
4452a7af 905void
0c530ab8
A
906clock_get_uptime(
907 uint64_t *result)
21362eb3 908{
0c530ab8 909 *result = mach_absolute_time();
6601e61a 910}
4452a7af 911
0c530ab8
A
912void
913clock_deadline_for_periodic_event(
914 uint64_t interval,
915 uint64_t abstime,
916 uint64_t *deadline)
6601e61a 917{
0c530ab8
A
918 assert(interval != 0);
919
920 *deadline += interval;
921
922 if (*deadline <= abstime) {
923 *deadline = abstime + interval;
924 abstime = mach_absolute_time();
55e303ae 925
0c530ab8
A
926 if (*deadline <= abstime)
927 *deadline = abstime + interval;
928 }
55e303ae 929}
2d21ac55 930
b0d623f7 931#if CONFIG_DTRACE
2d21ac55
A
932
933/*
934 * clock_get_calendar_nanotime_nowait
935 *
936 * Description: Non-blocking version of clock_get_calendar_nanotime()
937 *
938 * Notes: This function operates by separately tracking calendar time
939 * updates using a two element structure to copy the calendar
940 * state, which may be asynchronously modified. It utilizes
941 * barrier instructions in the tracking process and in the local
942 * stable snapshot process in order to ensure that a consistent
943 * snapshot is used to perform the calculation.
944 */
945void
946clock_get_calendar_nanotime_nowait(
b0d623f7
A
947 clock_sec_t *secs,
948 clock_nsec_t *nanosecs)
2d21ac55
A
949{
950 int i = 0;
951 uint64_t now;
952 struct unlocked_clock_calend stable;
953
954 for (;;) {
955 stable = flipflop[i]; /* take snapshot */
956
957 /*
958 * Use a barrier instructions to ensure atomicity. We AND
959 * off the "in progress" bit to get the current generation
960 * count.
961 */
962 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
963
964 /*
965 * If an update _is_ in progress, the generation count will be
966 * off by one, if it _was_ in progress, it will be off by two,
967 * and if we caught it at a good time, it will be equal (and
968 * our snapshot is threfore stable).
969 */
970 if (flipflop[i].gen == stable.gen)
971 break;
972
973 /* Switch to the oher element of the flipflop, and try again. */
974 i ^= 1;
975 }
976
977 now = mach_absolute_time();
978
979 if (stable.calend.adjdelta < 0) {
980 uint32_t t32;
981
982 if (now > stable.calend.adjstart) {
b0d623f7 983 t32 = (uint32_t)(now - stable.calend.adjstart);
2d21ac55
A
984
985 if (t32 > stable.calend.adjoffset)
986 now -= stable.calend.adjoffset;
987 else
988 now = stable.calend.adjstart;
989 }
990 }
991
992 now += stable.calend.offset;
993
994 absolutetime_to_microtime(now, secs, nanosecs);
995 *nanosecs *= NSEC_PER_USEC;
996
b0d623f7 997 *secs += (clock_sec_t)stable.calend.epoch;
2d21ac55
A
998}
999
1000static void
1001clock_track_calend_nowait(void)
1002{
1003 int i;
1004
1005 for (i = 0; i < 2; i++) {
1006 struct clock_calend tmp = clock_calend;
1007
1008 /*
1009 * Set the low bit if the generation count; since we use a
1010 * barrier instruction to do this, we are guaranteed that this
1011 * will flag an update in progress to an async caller trying
1012 * to examine the contents.
1013 */
1014 (void)hw_atomic_or(&flipflop[i].gen, 1);
1015
1016 flipflop[i].calend = tmp;
1017
1018 /*
1019 * Increment the generation count to clear the low bit to
1020 * signal completion. If a caller compares the generation
1021 * count after taking a copy while in progress, the count
1022 * will be off by two.
1023 */
1024 (void)hw_atomic_add(&flipflop[i].gen, 1);
1025 }
1026}
b0d623f7
A
1027
1028#endif /* CONFIG_DTRACE */
fe8ab488 1029