]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/clock.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
1c79356b
A
32 */
33
91447636 34#include <mach/mach_types.h>
91447636 35
1c79356b 36#include <kern/spl.h>
55e303ae 37#include <kern/sched_prim.h>
1c79356b 38#include <kern/thread.h>
1c79356b 39#include <kern/clock.h>
0c530ab8
A
40#include <kern/host_notify.h>
41
42#include <IOKit/IOPlatformExpert.h>
c0fea474 43
0c530ab8 44#include <machine/commpage.h>
1c79356b 45
91447636 46#include <mach/mach_traps.h>
1c79356b
A
47#include <mach/mach_time.h>
48
3e170ce0
A
49#include <sys/kdebug.h>
50
2d21ac55
A
51uint32_t hz_tick_interval = 1;
52
2d21ac55 53
6d2010ae 54decl_simple_lock_data(,clock_lock)
91447636 55
b0d623f7
A
56#define clock_lock() \
57 simple_lock(&clock_lock)
58
59#define clock_unlock() \
60 simple_unlock(&clock_lock)
61
62#define clock_lock_init() \
63 simple_lock_init(&clock_lock, 0)
64
65
1c79356b 66/*
0c530ab8
A
67 * Time of day (calendar) variables.
68 *
69 * Algorithm:
70 *
71 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
72 *
73 * where CONV converts absolute time units into seconds and a fraction.
1c79356b 74 */
0c530ab8 75static struct clock_calend {
2d21ac55
A
76 uint64_t epoch;
77 uint64_t offset;
3e170ce0 78 uint64_t epoch_absolute;
b0d623f7 79
2d21ac55
A
80 int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
81 uint64_t adjstart; /* Absolute time value for start of this adjustment period */
82 uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */
2d21ac55
A
83} clock_calend;
84
b0d623f7
A
85#if CONFIG_DTRACE
86
2d21ac55
A
87/*
88 * Unlocked calendar flipflop; this is used to track a clock_calend such
89 * that we can safely access a snapshot of a valid clock_calend structure
90 * without needing to take any locks to do it.
91 *
92 * The trick is to use a generation count and set the low bit when it is
93 * being updated/read; by doing this, we guarantee, through use of the
94 * hw_atomic functions, that the generation is incremented when the bit
95 * is cleared atomically (by using a 1 bit add).
96 */
97static struct unlocked_clock_calend {
98 struct clock_calend calend; /* copy of calendar */
99 uint32_t gen; /* generation count */
100} flipflop[ 2];
b0d623f7
A
101
102static void clock_track_calend_nowait(void);
103
2d21ac55 104#endif
1c79356b 105
0c530ab8
A
106/*
107 * Calendar adjustment variables and values.
108 */
109#define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
110#define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
111#define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
112
b0d623f7
A
113static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
114static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
115static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
116
117static timer_call_data_t calend_adjcall;
118static uint32_t calend_adjactive;
119
0c530ab8 120static uint32_t calend_set_adjustment(
b0d623f7
A
121 long *secs,
122 int *microsecs);
0c530ab8
A
123
124static void calend_adjust_call(void);
125static uint32_t calend_adjust(void);
9bccf70c 126
316670eb
A
127void _clock_delay_until_deadline(uint64_t interval,
128 uint64_t deadline);
3e170ce0
A
129void _clock_delay_until_deadline_with_leeway(uint64_t interval,
130 uint64_t deadline,
131 uint64_t leeway);
316670eb 132
0c530ab8 133static uint64_t clock_boottime; /* Seconds boottime epoch */
4452a7af 134
0c530ab8
A
135#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
136MACRO_BEGIN \
137 if (((rfrac) += (frac)) >= (unit)) { \
138 (rfrac) -= (unit); \
139 (rsecs) += 1; \
140 } \
141 (rsecs) += (secs); \
142MACRO_END
143
144#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
145MACRO_BEGIN \
b0d623f7 146 if ((int)((rfrac) -= (frac)) < 0) { \
0c530ab8
A
147 (rfrac) += (unit); \
148 (rsecs) -= 1; \
149 } \
150 (rsecs) -= (secs); \
151MACRO_END
1c79356b
A
152
153/*
91447636
A
154 * clock_config:
155 *
156 * Called once at boot to configure the clock subsystem.
1c79356b
A
157 */
158void
159clock_config(void)
160{
b0d623f7 161 clock_lock_init();
8f6c56a5 162
b0d623f7 163 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
6601e61a 164
0c530ab8 165 clock_oldconfig();
1c79356b
A
166}
167
168/*
91447636
A
169 * clock_init:
170 *
171 * Called on a processor each time started.
1c79356b
A
172 */
173void
174clock_init(void)
175{
0c530ab8 176 clock_oldinit();
1c79356b
A
177}
178
55e303ae 179/*
0c530ab8
A
180 * clock_timebase_init:
181 *
182 * Called by machine dependent code
183 * to initialize areas dependent on the
184 * timebase value. May be called multiple
185 * times during start up.
55e303ae
A
186 */
187void
188clock_timebase_init(void)
189{
0c530ab8 190 uint64_t abstime;
5d5c5d0d 191
0c530ab8 192 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
b0d623f7 193 calend_adjinterval = (uint32_t)abstime;
2d21ac55
A
194
195 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
b0d623f7 196 hz_tick_interval = (uint32_t)abstime;
89b3af67 197
0c530ab8 198 sched_timebase_init();
8ad349bb 199}
c0fea474 200
8ad349bb 201/*
0c530ab8
A
202 * mach_timebase_info_trap:
203 *
204 * User trap returns timebase constant.
8ad349bb 205 */
6601e61a 206kern_return_t
0c530ab8
A
207mach_timebase_info_trap(
208 struct mach_timebase_info_trap_args *args)
6601e61a 209{
0c530ab8
A
210 mach_vm_address_t out_info_addr = args->info;
211 mach_timebase_info_data_t info;
6601e61a 212
0c530ab8 213 clock_timebase_info(&info);
89b3af67 214
0c530ab8 215 copyout((void *)&info, out_info_addr, sizeof (info));
4452a7af 216
6601e61a 217 return (KERN_SUCCESS);
8f6c56a5 218}
5d5c5d0d 219
8f6c56a5 220/*
0c530ab8 221 * Calendar routines.
8f6c56a5 222 */
4452a7af 223
6601e61a 224/*
0c530ab8
A
225 * clock_get_calendar_microtime:
226 *
227 * Returns the current calendar value,
228 * microseconds as the fraction.
6601e61a 229 */
0c530ab8
A
230void
231clock_get_calendar_microtime(
b0d623f7
A
232 clock_sec_t *secs,
233 clock_usec_t *microsecs)
39236c6e
A
234{
235 clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
236}
237
238/*
239 * clock_get_calendar_absolute_and_microtime:
240 *
241 * Returns the current calendar value,
242 * microseconds as the fraction. Also
243 * returns mach_absolute_time if abstime
244 * is not NULL.
245 */
246void
247clock_get_calendar_absolute_and_microtime(
248 clock_sec_t *secs,
249 clock_usec_t *microsecs,
250 uint64_t *abstime)
6601e61a 251{
0c530ab8
A
252 uint64_t now;
253 spl_t s;
4452a7af 254
0c530ab8 255 s = splclock();
b0d623f7 256 clock_lock();
4452a7af 257
0c530ab8 258 now = mach_absolute_time();
39236c6e
A
259 if (abstime)
260 *abstime = now;
4452a7af 261
2d21ac55 262 if (clock_calend.adjdelta < 0) {
0c530ab8 263 uint32_t t32;
4452a7af 264
6d2010ae
A
265 /*
266 * Since offset is decremented during a negative adjustment,
267 * ensure that time increases monotonically without going
268 * temporarily backwards.
269 * If the delta has not yet passed, now is set to the start
270 * of the current adjustment period; otherwise, we're between
271 * the expiry of the delta and the next call to calend_adjust(),
272 * and we offset accordingly.
273 */
2d21ac55 274 if (now > clock_calend.adjstart) {
b0d623f7 275 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 276
2d21ac55
A
277 if (t32 > clock_calend.adjoffset)
278 now -= clock_calend.adjoffset;
0c530ab8 279 else
2d21ac55 280 now = clock_calend.adjstart;
0c530ab8
A
281 }
282 }
283
284 now += clock_calend.offset;
285
286 absolutetime_to_microtime(now, secs, microsecs);
287
b0d623f7 288 *secs += (clock_sec_t)clock_calend.epoch;
0c530ab8 289
b0d623f7 290 clock_unlock();
0c530ab8 291 splx(s);
21362eb3 292}
89b3af67 293
21362eb3 294/*
0c530ab8
A
295 * clock_get_calendar_nanotime:
296 *
297 * Returns the current calendar value,
298 * nanoseconds as the fraction.
299 *
300 * Since we do not have an interface to
301 * set the calendar with resolution greater
302 * than a microsecond, we honor that here.
21362eb3 303 */
0c530ab8
A
304void
305clock_get_calendar_nanotime(
b0d623f7
A
306 clock_sec_t *secs,
307 clock_nsec_t *nanosecs)
21362eb3 308{
0c530ab8
A
309 uint64_t now;
310 spl_t s;
311
312 s = splclock();
b0d623f7 313 clock_lock();
0c530ab8
A
314
315 now = mach_absolute_time();
316
2d21ac55 317 if (clock_calend.adjdelta < 0) {
0c530ab8
A
318 uint32_t t32;
319
2d21ac55 320 if (now > clock_calend.adjstart) {
b0d623f7 321 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 322
2d21ac55
A
323 if (t32 > clock_calend.adjoffset)
324 now -= clock_calend.adjoffset;
0c530ab8 325 else
2d21ac55 326 now = clock_calend.adjstart;
0c530ab8
A
327 }
328 }
329
330 now += clock_calend.offset;
331
332 absolutetime_to_microtime(now, secs, nanosecs);
6d2010ae 333
0c530ab8
A
334 *nanosecs *= NSEC_PER_USEC;
335
b0d623f7 336 *secs += (clock_sec_t)clock_calend.epoch;
0c530ab8 337
b0d623f7 338 clock_unlock();
0c530ab8 339 splx(s);
6601e61a 340}
4452a7af 341
6601e61a 342/*
0c530ab8
A
343 * clock_gettimeofday:
344 *
345 * Kernel interface for commpage implementation of
346 * gettimeofday() syscall.
347 *
348 * Returns the current calendar value, and updates the
349 * commpage info as appropriate. Because most calls to
350 * gettimeofday() are handled in user mode by the commpage,
351 * this routine should be used infrequently.
6601e61a 352 */
0c530ab8
A
353void
354clock_gettimeofday(
b0d623f7
A
355 clock_sec_t *secs,
356 clock_usec_t *microsecs)
6601e61a 357{
0c530ab8
A
358 uint64_t now;
359 spl_t s;
4452a7af 360
0c530ab8 361 s = splclock();
b0d623f7 362 clock_lock();
0c530ab8
A
363
364 now = mach_absolute_time();
365
2d21ac55 366 if (clock_calend.adjdelta >= 0) {
0c530ab8 367 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
1c79356b 368 }
0c530ab8
A
369 else {
370 uint32_t t32;
4452a7af 371
2d21ac55 372 if (now > clock_calend.adjstart) {
b0d623f7 373 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 374
2d21ac55
A
375 if (t32 > clock_calend.adjoffset)
376 now -= clock_calend.adjoffset;
0c530ab8 377 else
2d21ac55 378 now = clock_calend.adjstart;
0c530ab8
A
379 }
380
381 now += clock_calend.offset;
4452a7af 382
0c530ab8
A
383 absolutetime_to_microtime(now, secs, microsecs);
384
b0d623f7 385 *secs += (clock_sec_t)clock_calend.epoch;
1c79356b 386 }
1c79356b 387
b0d623f7 388 clock_unlock();
0c530ab8 389 splx(s);
1c79356b
A
390}
391
392/*
0c530ab8
A
393 * clock_set_calendar_microtime:
394 *
395 * Sets the current calendar value by
396 * recalculating the epoch and offset
397 * from the system clock.
398 *
399 * Also adjusts the boottime to keep the
400 * value consistent, writes the new
401 * calendar value to the platform clock,
402 * and sends calendar change notifications.
1c79356b 403 */
0c530ab8
A
404void
405clock_set_calendar_microtime(
b0d623f7
A
406 clock_sec_t secs,
407 clock_usec_t microsecs)
1c79356b 408{
b0d623f7
A
409 clock_sec_t sys;
410 clock_usec_t microsys;
411 clock_sec_t newsecs;
fe8ab488 412 clock_usec_t newmicrosecs;
b0d623f7 413 spl_t s;
8ad349bb 414
fe8ab488
A
415 newsecs = secs;
416 newmicrosecs = microsecs;
0c530ab8
A
417
418 s = splclock();
b0d623f7 419 clock_lock();
8ad349bb 420
2d21ac55 421 commpage_disable_timestamp();
8f6c56a5 422
89b3af67 423 /*
0c530ab8
A
424 * Calculate the new calendar epoch based on
425 * the new value and the system clock.
89b3af67 426 */
0c530ab8
A
427 clock_get_system_microtime(&sys, &microsys);
428 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
8f6c56a5 429
4452a7af 430 /*
0c530ab8 431 * Adjust the boottime based on the delta.
4452a7af 432 */
0c530ab8 433 clock_boottime += secs - clock_calend.epoch;
21362eb3 434
4452a7af 435 /*
0c530ab8 436 * Set the new calendar epoch.
4452a7af 437 */
0c530ab8 438 clock_calend.epoch = secs;
6d2010ae 439
0c530ab8 440 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
21362eb3 441
3e170ce0
A
442 clock_interval_to_absolutetime_interval((uint32_t) secs, NSEC_PER_SEC, &clock_calend.epoch_absolute);
443 clock_calend.epoch_absolute += clock_calend.offset;
444
0c530ab8
A
445 /*
446 * Cancel any adjustment in progress.
447 */
b0d623f7 448 calend_adjtotal = clock_calend.adjdelta = 0;
21362eb3 449
b0d623f7 450 clock_unlock();
6601e61a 451
0c530ab8
A
452 /*
453 * Set the new value for the platform clock.
454 */
fe8ab488 455 PESetUTCTimeOfDay(newsecs, newmicrosecs);
6601e61a 456
0c530ab8 457 splx(s);
6601e61a 458
0c530ab8
A
459 /*
460 * Send host notifications.
461 */
462 host_notify_calendar_change();
2d21ac55
A
463
464#if CONFIG_DTRACE
465 clock_track_calend_nowait();
466#endif
1c79356b
A
467}
468
469/*
0c530ab8
A
470 * clock_initialize_calendar:
471 *
472 * Set the calendar and related clocks
473 * from the platform clock at boot or
474 * wake event.
475 *
476 * Also sends host notifications.
1c79356b 477 */
3e170ce0
A
478
479uint64_t mach_absolutetime_asleep;
480uint64_t mach_absolutetime_last_sleep;
481
1c79356b 482void
0c530ab8 483clock_initialize_calendar(void)
1c79356b 484{
fe8ab488
A
485 clock_sec_t sys, secs;
486 clock_usec_t microsys, microsecs;
3e170ce0 487 uint64_t new_epoch;
b0d623f7 488 spl_t s;
1c79356b 489
fe8ab488
A
490 PEGetUTCTimeOfDay(&secs, &microsecs);
491
0c530ab8 492 s = splclock();
b0d623f7 493 clock_lock();
1c79356b 494
2d21ac55 495 commpage_disable_timestamp();
1c79356b 496
b0d623f7 497 if ((long)secs >= (long)clock_boottime) {
0c530ab8
A
498 /*
499 * Initialize the boot time based on the platform clock.
500 */
501 if (clock_boottime == 0)
502 clock_boottime = secs;
1c79356b
A
503
504 /*
0c530ab8
A
505 * Calculate the new calendar epoch based on
506 * the platform clock and the system clock.
507 */
508 clock_get_system_microtime(&sys, &microsys);
509 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
1c79356b
A
510
511 /*
0c530ab8 512 * Set the new calendar epoch.
1c79356b 513 */
3e170ce0 514
0c530ab8 515 clock_calend.epoch = secs;
6d2010ae 516
0c530ab8 517 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
1c79356b 518
3e170ce0
A
519 clock_interval_to_absolutetime_interval((uint32_t) secs, NSEC_PER_SEC, &new_epoch);
520 new_epoch += clock_calend.offset;
521
522 if (clock_calend.epoch_absolute)
523 {
524 mach_absolutetime_last_sleep = new_epoch - clock_calend.epoch_absolute;
525 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
526 KERNEL_DEBUG_CONSTANT(
527 MACHDBG_CODE(DBG_MACH_CLOCK,MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
528 (uintptr_t) mach_absolutetime_last_sleep,
529 (uintptr_t) mach_absolutetime_asleep,
530 (uintptr_t) (mach_absolutetime_last_sleep >> 32),
531 (uintptr_t) (mach_absolutetime_asleep >> 32),
532 0);
533 }
534 clock_calend.epoch_absolute = new_epoch;
535
0c530ab8
A
536 /*
537 * Cancel any adjustment in progress.
1c79356b 538 */
b0d623f7 539 calend_adjtotal = clock_calend.adjdelta = 0;
1c79356b
A
540 }
541
b0d623f7 542 clock_unlock();
0c530ab8
A
543 splx(s);
544
1c79356b 545 /*
0c530ab8 546 * Send host notifications.
1c79356b 547 */
0c530ab8 548 host_notify_calendar_change();
2d21ac55
A
549
550#if CONFIG_DTRACE
551 clock_track_calend_nowait();
552#endif
1c79356b
A
553}
554
555/*
0c530ab8
A
556 * clock_get_boottime_nanotime:
557 *
558 * Return the boottime, used by sysctl.
1c79356b 559 */
0c530ab8
A
560void
561clock_get_boottime_nanotime(
b0d623f7
A
562 clock_sec_t *secs,
563 clock_nsec_t *nanosecs)
1c79356b 564{
b0d623f7
A
565 spl_t s;
566
567 s = splclock();
568 clock_lock();
569
570 *secs = (clock_sec_t)clock_boottime;
0c530ab8 571 *nanosecs = 0;
b0d623f7
A
572
573 clock_unlock();
574 splx(s);
1c79356b
A
575}
576
577/*
0c530ab8
A
578 * clock_adjtime:
579 *
580 * Interface to adjtime() syscall.
581 *
582 * Calculates adjustment variables and
583 * initiates adjustment.
6601e61a 584 */
1c79356b 585void
0c530ab8 586clock_adjtime(
b0d623f7
A
587 long *secs,
588 int *microsecs)
1c79356b 589{
0c530ab8
A
590 uint32_t interval;
591 spl_t s;
1c79356b 592
0c530ab8 593 s = splclock();
b0d623f7 594 clock_lock();
1c79356b 595
0c530ab8
A
596 interval = calend_set_adjustment(secs, microsecs);
597 if (interval != 0) {
b0d623f7 598 calend_adjdeadline = mach_absolute_time() + interval;
39236c6e 599 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
b0d623f7 600 calend_adjactive++;
1c79356b 601 }
0c530ab8 602 else
b0d623f7
A
603 if (timer_call_cancel(&calend_adjcall))
604 calend_adjactive--;
0c530ab8 605
b0d623f7 606 clock_unlock();
0c530ab8 607 splx(s);
1c79356b
A
608}
609
0c530ab8
A
610static uint32_t
611calend_set_adjustment(
b0d623f7
A
612 long *secs,
613 int *microsecs)
1c79356b 614{
0c530ab8
A
615 uint64_t now, t64;
616 int64_t total, ototal;
617 uint32_t interval = 0;
1c79356b 618
6d2010ae
A
619 /*
620 * Compute the total adjustment time in nanoseconds.
621 */
39236c6e 622 total = ((int64_t)*secs * (int64_t)NSEC_PER_SEC) + (*microsecs * (int64_t)NSEC_PER_USEC);
1c79356b 623
6d2010ae
A
624 /*
625 * Disable commpage gettimeofday().
626 */
2d21ac55 627 commpage_disable_timestamp();
1c79356b 628
6d2010ae
A
629 /*
630 * Get current absolute time.
631 */
0c530ab8 632 now = mach_absolute_time();
1c79356b 633
6d2010ae
A
634 /*
635 * Save the old adjustment total for later return.
636 */
b0d623f7 637 ototal = calend_adjtotal;
1c79356b 638
6d2010ae
A
639 /*
640 * Is a new correction specified?
641 */
0c530ab8 642 if (total != 0) {
6d2010ae
A
643 /*
644 * Set delta to the standard, small, adjustment skew.
645 */
0c530ab8 646 int32_t delta = calend_adjskew;
1c79356b 647
0c530ab8 648 if (total > 0) {
6d2010ae
A
649 /*
650 * Positive adjustment. If greater than the preset 'big'
651 * threshold, slew at a faster rate, capping if necessary.
652 */
39236c6e 653 if (total > (int64_t) calend_adjbig)
0c530ab8
A
654 delta *= 10;
655 if (delta > total)
b0d623f7 656 delta = (int32_t)total;
c0fea474 657
6d2010ae
A
658 /*
659 * Convert the delta back from ns to absolute time and store in adjoffset.
660 */
0c530ab8 661 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
b0d623f7 662 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8
A
663 }
664 else {
6d2010ae
A
665 /*
666 * Negative adjustment; therefore, negate the delta. If
667 * greater than the preset 'big' threshold, slew at a faster
668 * rate, capping if necessary.
669 */
39236c6e 670 if (total < (int64_t) -calend_adjbig)
0c530ab8
A
671 delta *= 10;
672 delta = -delta;
673 if (delta < total)
b0d623f7 674 delta = (int32_t)total;
5d5c5d0d 675
6d2010ae
A
676 /*
677 * Save the current absolute time. Subsequent time operations occuring
678 * during this negative correction can make use of this value to ensure
679 * that time increases monotonically.
680 */
2d21ac55 681 clock_calend.adjstart = now;
89b3af67 682
6d2010ae
A
683 /*
684 * Convert the delta back from ns to absolute time and store in adjoffset.
685 */
0c530ab8 686 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
b0d623f7 687 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8 688 }
4452a7af 689
6d2010ae
A
690 /*
691 * Store the total adjustment time in ns.
692 */
b0d623f7 693 calend_adjtotal = total;
6d2010ae
A
694
695 /*
696 * Store the delta for this adjustment period in ns.
697 */
2d21ac55 698 clock_calend.adjdelta = delta;
0c530ab8 699
6d2010ae
A
700 /*
701 * Set the interval in absolute time for later return.
702 */
b0d623f7 703 interval = calend_adjinterval;
0c530ab8 704 }
6d2010ae
A
705 else {
706 /*
707 * No change; clear any prior adjustment.
708 */
b0d623f7 709 calend_adjtotal = clock_calend.adjdelta = 0;
6d2010ae 710 }
1c79356b 711
6d2010ae
A
712 /*
713 * If an prior correction was in progress, return the
714 * remaining uncorrected time from it.
715 */
0c530ab8 716 if (ototal != 0) {
39236c6e
A
717 *secs = (long)(ototal / (long)NSEC_PER_SEC);
718 *microsecs = (int)((ototal % (int)NSEC_PER_SEC) / (int)NSEC_PER_USEC);
0c530ab8
A
719 }
720 else
721 *secs = *microsecs = 0;
1c79356b 722
2d21ac55
A
723#if CONFIG_DTRACE
724 clock_track_calend_nowait();
725#endif
726
0c530ab8 727 return (interval);
1c79356b
A
728}
729
0c530ab8
A
730static void
731calend_adjust_call(void)
1c79356b 732{
0c530ab8
A
733 uint32_t interval;
734 spl_t s;
1c79356b 735
0c530ab8 736 s = splclock();
b0d623f7 737 clock_lock();
1c79356b 738
b0d623f7 739 if (--calend_adjactive == 0) {
0c530ab8
A
740 interval = calend_adjust();
741 if (interval != 0) {
b0d623f7 742 clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
1c79356b 743
39236c6e 744 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
b0d623f7 745 calend_adjactive++;
0c530ab8 746 }
1c79356b 747 }
0c530ab8 748
b0d623f7 749 clock_unlock();
0c530ab8 750 splx(s);
1c79356b
A
751}
752
0c530ab8
A
753static uint32_t
754calend_adjust(void)
1c79356b 755{
0c530ab8
A
756 uint64_t now, t64;
757 int32_t delta;
758 uint32_t interval = 0;
89b3af67 759
2d21ac55 760 commpage_disable_timestamp();
89b3af67 761
0c530ab8 762 now = mach_absolute_time();
89b3af67 763
2d21ac55 764 delta = clock_calend.adjdelta;
89b3af67 765
0c530ab8 766 if (delta > 0) {
2d21ac55 767 clock_calend.offset += clock_calend.adjoffset;
4452a7af 768
b0d623f7
A
769 calend_adjtotal -= delta;
770 if (delta > calend_adjtotal) {
771 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
4452a7af 772
0c530ab8 773 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
b0d623f7 774 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8
A
775 }
776 }
777 else
6d2010ae
A
778 if (delta < 0) {
779 clock_calend.offset -= clock_calend.adjoffset;
4452a7af 780
6d2010ae
A
781 calend_adjtotal -= delta;
782 if (delta < calend_adjtotal) {
783 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
4452a7af 784
6d2010ae
A
785 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
786 clock_calend.adjoffset = (uint32_t)t64;
787 }
788
789 if (clock_calend.adjdelta != 0)
790 clock_calend.adjstart = now;
0c530ab8
A
791 }
792
2d21ac55 793 if (clock_calend.adjdelta != 0)
b0d623f7 794 interval = calend_adjinterval;
0c530ab8 795
2d21ac55
A
796#if CONFIG_DTRACE
797 clock_track_calend_nowait();
798#endif
0c530ab8
A
799
800 return (interval);
801}
802
0c530ab8
A
803/*
804 * Wait / delay routines.
805 */
91447636
A
806static void
807mach_wait_until_continue(
808 __unused void *parameter,
809 wait_result_t wresult)
810{
811 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
812 /*NOTREACHED*/
813}
814
316670eb
A
815/*
816 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
817 *
818 * Parameters: args->deadline Amount of time to wait
819 *
820 * Returns: 0 Success
821 * !0 Not success
822 *
823 */
1c79356b 824kern_return_t
91447636
A
825mach_wait_until_trap(
826 struct mach_wait_until_trap_args *args)
827{
828 uint64_t deadline = args->deadline;
829 wait_result_t wresult;
830
39236c6e
A
831 wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
832 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
91447636
A
833 if (wresult == THREAD_WAITING)
834 wresult = thread_block(mach_wait_until_continue);
835
836 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
837}
838
91447636
A
839void
840clock_delay_until(
1c79356b
A
841 uint64_t deadline)
842{
91447636
A
843 uint64_t now = mach_absolute_time();
844
845 if (now >= deadline)
846 return;
1c79356b 847
316670eb
A
848 _clock_delay_until_deadline(deadline - now, deadline);
849}
850
851/*
852 * Preserve the original precise interval that the client
853 * requested for comparison to the spin threshold.
854 */
855void
856_clock_delay_until_deadline(
857 uint64_t interval,
858 uint64_t deadline)
859{
3e170ce0
A
860 _clock_delay_until_deadline_with_leeway(interval, deadline, 0);
861}
862
863/*
864 * Like _clock_delay_until_deadline, but it accepts a
865 * leeway value.
866 */
867void
868_clock_delay_until_deadline_with_leeway(
869 uint64_t interval,
870 uint64_t deadline,
871 uint64_t leeway)
872{
316670eb
A
873
874 if (interval == 0)
875 return;
876
877 if ( ml_delay_should_spin(interval) ||
91447636 878 get_preemption_level() != 0 ||
316670eb 879 ml_get_interrupts_enabled() == FALSE ) {
bd504ef0 880 machine_delay_until(interval, deadline);
316670eb 881 } else {
3e170ce0
A
882 /*
883 * For now, assume a leeway request of 0 means the client does not want a leeway
884 * value. We may want to change this interpretation in the future.
885 */
886
887 if (leeway) {
888 assert_wait_deadline_with_leeway((event_t)clock_delay_until, THREAD_UNINT, TIMEOUT_URGENCY_LEEWAY, deadline, leeway);
889 } else {
890 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
891 }
91447636
A
892
893 thread_block(THREAD_CONTINUE_NULL);
9bccf70c 894 }
91447636 895}
1c79356b 896
91447636
A
897void
898delay_for_interval(
899 uint32_t interval,
900 uint32_t scale_factor)
901{
316670eb 902 uint64_t abstime;
91447636 903
316670eb 904 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
91447636 905
316670eb 906 _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
91447636
A
907}
908
3e170ce0
A
909void
910delay_for_interval_with_leeway(
911 uint32_t interval,
912 uint32_t leeway,
913 uint32_t scale_factor)
914{
915 uint64_t abstime_interval;
916 uint64_t abstime_leeway;
917
918 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
919 clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
920
921 _clock_delay_until_deadline_with_leeway(abstime_interval, mach_absolute_time() + abstime_interval, abstime_leeway);
922}
923
91447636
A
924void
925delay(
926 int usec)
927{
928 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1c79356b 929}
9bccf70c 930
0c530ab8
A
931/*
932 * Miscellaneous routines.
933 */
55e303ae 934void
0c530ab8
A
935clock_interval_to_deadline(
936 uint32_t interval,
937 uint32_t scale_factor,
938 uint64_t *result)
9bccf70c 939{
0c530ab8 940 uint64_t abstime;
c0fea474 941
0c530ab8 942 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
6601e61a 943
0c530ab8 944 *result = mach_absolute_time() + abstime;
8f6c56a5 945}
5d5c5d0d 946
0c530ab8
A
947void
948clock_absolutetime_interval_to_deadline(
949 uint64_t abstime,
950 uint64_t *result)
8f6c56a5 951{
0c530ab8 952 *result = mach_absolute_time() + abstime;
21362eb3 953}
89b3af67 954
4452a7af 955void
0c530ab8
A
956clock_get_uptime(
957 uint64_t *result)
21362eb3 958{
0c530ab8 959 *result = mach_absolute_time();
6601e61a 960}
4452a7af 961
0c530ab8
A
962void
963clock_deadline_for_periodic_event(
964 uint64_t interval,
965 uint64_t abstime,
966 uint64_t *deadline)
6601e61a 967{
0c530ab8
A
968 assert(interval != 0);
969
970 *deadline += interval;
971
972 if (*deadline <= abstime) {
973 *deadline = abstime + interval;
974 abstime = mach_absolute_time();
55e303ae 975
0c530ab8
A
976 if (*deadline <= abstime)
977 *deadline = abstime + interval;
978 }
55e303ae 979}
2d21ac55 980
b0d623f7 981#if CONFIG_DTRACE
2d21ac55
A
982
983/*
984 * clock_get_calendar_nanotime_nowait
985 *
986 * Description: Non-blocking version of clock_get_calendar_nanotime()
987 *
988 * Notes: This function operates by separately tracking calendar time
989 * updates using a two element structure to copy the calendar
990 * state, which may be asynchronously modified. It utilizes
991 * barrier instructions in the tracking process and in the local
992 * stable snapshot process in order to ensure that a consistent
993 * snapshot is used to perform the calculation.
994 */
995void
996clock_get_calendar_nanotime_nowait(
b0d623f7
A
997 clock_sec_t *secs,
998 clock_nsec_t *nanosecs)
2d21ac55
A
999{
1000 int i = 0;
1001 uint64_t now;
1002 struct unlocked_clock_calend stable;
1003
1004 for (;;) {
1005 stable = flipflop[i]; /* take snapshot */
1006
1007 /*
1008 * Use a barrier instructions to ensure atomicity. We AND
1009 * off the "in progress" bit to get the current generation
1010 * count.
1011 */
1012 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
1013
1014 /*
1015 * If an update _is_ in progress, the generation count will be
1016 * off by one, if it _was_ in progress, it will be off by two,
1017 * and if we caught it at a good time, it will be equal (and
1018 * our snapshot is threfore stable).
1019 */
1020 if (flipflop[i].gen == stable.gen)
1021 break;
1022
1023 /* Switch to the oher element of the flipflop, and try again. */
1024 i ^= 1;
1025 }
1026
1027 now = mach_absolute_time();
1028
1029 if (stable.calend.adjdelta < 0) {
1030 uint32_t t32;
1031
1032 if (now > stable.calend.adjstart) {
b0d623f7 1033 t32 = (uint32_t)(now - stable.calend.adjstart);
2d21ac55
A
1034
1035 if (t32 > stable.calend.adjoffset)
1036 now -= stable.calend.adjoffset;
1037 else
1038 now = stable.calend.adjstart;
1039 }
1040 }
1041
1042 now += stable.calend.offset;
1043
1044 absolutetime_to_microtime(now, secs, nanosecs);
1045 *nanosecs *= NSEC_PER_USEC;
1046
b0d623f7 1047 *secs += (clock_sec_t)stable.calend.epoch;
2d21ac55
A
1048}
1049
1050static void
1051clock_track_calend_nowait(void)
1052{
1053 int i;
1054
1055 for (i = 0; i < 2; i++) {
1056 struct clock_calend tmp = clock_calend;
1057
1058 /*
1059 * Set the low bit if the generation count; since we use a
1060 * barrier instruction to do this, we are guaranteed that this
1061 * will flag an update in progress to an async caller trying
1062 * to examine the contents.
1063 */
1064 (void)hw_atomic_or(&flipflop[i].gen, 1);
1065
1066 flipflop[i].calend = tmp;
1067
1068 /*
1069 * Increment the generation count to clear the low bit to
1070 * signal completion. If a caller compares the generation
1071 * count after taking a copy while in progress, the count
1072 * will be off by two.
1073 */
1074 (void)hw_atomic_add(&flipflop[i].gen, 1);
1075 }
1076}
b0d623f7
A
1077
1078#endif /* CONFIG_DTRACE */
fe8ab488 1079