]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/clock.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
1c79356b
A
32 */
33
91447636 34#include <mach/mach_types.h>
91447636 35
1c79356b 36#include <kern/lock.h>
1c79356b 37#include <kern/spl.h>
55e303ae 38#include <kern/sched_prim.h>
1c79356b 39#include <kern/thread.h>
1c79356b 40#include <kern/clock.h>
0c530ab8
A
41#include <kern/host_notify.h>
42
43#include <IOKit/IOPlatformExpert.h>
c0fea474 44
0c530ab8 45#include <machine/commpage.h>
1c79356b 46
91447636 47#include <mach/mach_traps.h>
1c79356b
A
48#include <mach/mach_time.h>
49
2d21ac55
A
50uint32_t hz_tick_interval = 1;
51
2d21ac55 52
0c530ab8 53decl_simple_lock_data(static,clock_lock)
91447636 54
b0d623f7
A
55#define clock_lock() \
56 simple_lock(&clock_lock)
57
58#define clock_unlock() \
59 simple_unlock(&clock_lock)
60
61#define clock_lock_init() \
62 simple_lock_init(&clock_lock, 0)
63
64
1c79356b 65/*
0c530ab8
A
66 * Time of day (calendar) variables.
67 *
68 * Algorithm:
69 *
70 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
71 *
72 * where CONV converts absolute time units into seconds and a fraction.
1c79356b 73 */
0c530ab8 74static struct clock_calend {
2d21ac55
A
75
76 uint64_t epoch;
77 uint64_t offset;
b0d623f7 78
2d21ac55
A
79 int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
80 uint64_t adjstart; /* Absolute time value for start of this adjustment period */
81 uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */
2d21ac55
A
82} clock_calend;
83
b0d623f7
A
84#if CONFIG_DTRACE
85
2d21ac55
A
86/*
87 * Unlocked calendar flipflop; this is used to track a clock_calend such
88 * that we can safely access a snapshot of a valid clock_calend structure
89 * without needing to take any locks to do it.
90 *
91 * The trick is to use a generation count and set the low bit when it is
92 * being updated/read; by doing this, we guarantee, through use of the
93 * hw_atomic functions, that the generation is incremented when the bit
94 * is cleared atomically (by using a 1 bit add).
95 */
96static struct unlocked_clock_calend {
97 struct clock_calend calend; /* copy of calendar */
98 uint32_t gen; /* generation count */
99} flipflop[ 2];
b0d623f7
A
100
101static void clock_track_calend_nowait(void);
102
2d21ac55 103#endif
1c79356b 104
0c530ab8
A
105/*
106 * Calendar adjustment variables and values.
107 */
108#define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
109#define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
110#define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
111
b0d623f7
A
112static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
113static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
114static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
115
116static timer_call_data_t calend_adjcall;
117static uint32_t calend_adjactive;
118
0c530ab8 119static uint32_t calend_set_adjustment(
b0d623f7
A
120 long *secs,
121 int *microsecs);
0c530ab8
A
122
123static void calend_adjust_call(void);
124static uint32_t calend_adjust(void);
9bccf70c 125
55e303ae
A
126static thread_call_data_t calend_wakecall;
127
0c530ab8 128extern void IOKitResetTime(void);
5d5c5d0d 129
0c530ab8 130static uint64_t clock_boottime; /* Seconds boottime epoch */
4452a7af 131
0c530ab8
A
132#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
133MACRO_BEGIN \
134 if (((rfrac) += (frac)) >= (unit)) { \
135 (rfrac) -= (unit); \
136 (rsecs) += 1; \
137 } \
138 (rsecs) += (secs); \
139MACRO_END
140
141#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
142MACRO_BEGIN \
b0d623f7 143 if ((int)((rfrac) -= (frac)) < 0) { \
0c530ab8
A
144 (rfrac) += (unit); \
145 (rsecs) -= 1; \
146 } \
147 (rsecs) -= (secs); \
148MACRO_END
1c79356b
A
149
150/*
91447636
A
151 * clock_config:
152 *
153 * Called once at boot to configure the clock subsystem.
1c79356b
A
154 */
155void
156clock_config(void)
157{
b0d623f7 158 clock_lock_init();
8f6c56a5 159
b0d623f7 160 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
0c530ab8 161 thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
6601e61a 162
0c530ab8 163 clock_oldconfig();
1c79356b 164
91447636
A
165 /*
166 * Initialize the timer callouts.
167 */
168 timer_call_initialize();
1c79356b
A
169}
170
171/*
91447636
A
172 * clock_init:
173 *
174 * Called on a processor each time started.
1c79356b
A
175 */
176void
177clock_init(void)
178{
0c530ab8 179 clock_oldinit();
1c79356b
A
180}
181
55e303ae 182/*
0c530ab8
A
183 * clock_timebase_init:
184 *
185 * Called by machine dependent code
186 * to initialize areas dependent on the
187 * timebase value. May be called multiple
188 * times during start up.
55e303ae
A
189 */
190void
191clock_timebase_init(void)
192{
0c530ab8 193 uint64_t abstime;
5d5c5d0d 194
0c530ab8 195 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
b0d623f7 196 calend_adjinterval = (uint32_t)abstime;
2d21ac55
A
197
198 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
b0d623f7 199 hz_tick_interval = (uint32_t)abstime;
89b3af67 200
0c530ab8 201 sched_timebase_init();
8ad349bb 202}
c0fea474 203
8ad349bb 204/*
0c530ab8
A
205 * mach_timebase_info_trap:
206 *
207 * User trap returns timebase constant.
8ad349bb 208 */
6601e61a 209kern_return_t
0c530ab8
A
210mach_timebase_info_trap(
211 struct mach_timebase_info_trap_args *args)
6601e61a 212{
0c530ab8
A
213 mach_vm_address_t out_info_addr = args->info;
214 mach_timebase_info_data_t info;
6601e61a 215
0c530ab8 216 clock_timebase_info(&info);
89b3af67 217
0c530ab8 218 copyout((void *)&info, out_info_addr, sizeof (info));
4452a7af 219
6601e61a 220 return (KERN_SUCCESS);
8f6c56a5 221}
5d5c5d0d 222
8f6c56a5 223/*
0c530ab8 224 * Calendar routines.
8f6c56a5 225 */
4452a7af 226
6601e61a 227/*
0c530ab8
A
228 * clock_get_calendar_microtime:
229 *
230 * Returns the current calendar value,
231 * microseconds as the fraction.
6601e61a 232 */
0c530ab8
A
233void
234clock_get_calendar_microtime(
b0d623f7
A
235 clock_sec_t *secs,
236 clock_usec_t *microsecs)
6601e61a 237{
0c530ab8
A
238 uint64_t now;
239 spl_t s;
4452a7af 240
0c530ab8 241 s = splclock();
b0d623f7 242 clock_lock();
4452a7af 243
0c530ab8 244 now = mach_absolute_time();
4452a7af 245
2d21ac55 246 if (clock_calend.adjdelta < 0) {
0c530ab8 247 uint32_t t32;
4452a7af 248
2d21ac55 249 if (now > clock_calend.adjstart) {
b0d623f7 250 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 251
2d21ac55
A
252 if (t32 > clock_calend.adjoffset)
253 now -= clock_calend.adjoffset;
0c530ab8 254 else
2d21ac55 255 now = clock_calend.adjstart;
0c530ab8
A
256 }
257 }
258
259 now += clock_calend.offset;
260
261 absolutetime_to_microtime(now, secs, microsecs);
262
b0d623f7 263 *secs += (clock_sec_t)clock_calend.epoch;
0c530ab8 264
b0d623f7 265 clock_unlock();
0c530ab8 266 splx(s);
21362eb3 267}
89b3af67 268
21362eb3 269/*
0c530ab8
A
270 * clock_get_calendar_nanotime:
271 *
272 * Returns the current calendar value,
273 * nanoseconds as the fraction.
274 *
275 * Since we do not have an interface to
276 * set the calendar with resolution greater
277 * than a microsecond, we honor that here.
21362eb3 278 */
0c530ab8
A
279void
280clock_get_calendar_nanotime(
b0d623f7
A
281 clock_sec_t *secs,
282 clock_nsec_t *nanosecs)
21362eb3 283{
0c530ab8
A
284 uint64_t now;
285 spl_t s;
286
287 s = splclock();
b0d623f7 288 clock_lock();
0c530ab8
A
289
290 now = mach_absolute_time();
291
2d21ac55 292 if (clock_calend.adjdelta < 0) {
0c530ab8
A
293 uint32_t t32;
294
2d21ac55 295 if (now > clock_calend.adjstart) {
b0d623f7 296 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 297
2d21ac55
A
298 if (t32 > clock_calend.adjoffset)
299 now -= clock_calend.adjoffset;
0c530ab8 300 else
2d21ac55 301 now = clock_calend.adjstart;
0c530ab8
A
302 }
303 }
304
305 now += clock_calend.offset;
306
307 absolutetime_to_microtime(now, secs, nanosecs);
308 *nanosecs *= NSEC_PER_USEC;
309
b0d623f7 310 *secs += (clock_sec_t)clock_calend.epoch;
0c530ab8 311
b0d623f7 312 clock_unlock();
0c530ab8 313 splx(s);
6601e61a 314}
4452a7af 315
6601e61a 316/*
0c530ab8
A
317 * clock_gettimeofday:
318 *
319 * Kernel interface for commpage implementation of
320 * gettimeofday() syscall.
321 *
322 * Returns the current calendar value, and updates the
323 * commpage info as appropriate. Because most calls to
324 * gettimeofday() are handled in user mode by the commpage,
325 * this routine should be used infrequently.
6601e61a 326 */
0c530ab8
A
327void
328clock_gettimeofday(
b0d623f7
A
329 clock_sec_t *secs,
330 clock_usec_t *microsecs)
6601e61a 331{
0c530ab8
A
332 uint64_t now;
333 spl_t s;
4452a7af 334
0c530ab8 335 s = splclock();
b0d623f7 336 clock_lock();
0c530ab8
A
337
338 now = mach_absolute_time();
339
2d21ac55 340 if (clock_calend.adjdelta >= 0) {
0c530ab8 341 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
1c79356b 342 }
0c530ab8
A
343 else {
344 uint32_t t32;
4452a7af 345
2d21ac55 346 if (now > clock_calend.adjstart) {
b0d623f7 347 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 348
2d21ac55
A
349 if (t32 > clock_calend.adjoffset)
350 now -= clock_calend.adjoffset;
0c530ab8 351 else
2d21ac55 352 now = clock_calend.adjstart;
0c530ab8
A
353 }
354
355 now += clock_calend.offset;
4452a7af 356
0c530ab8
A
357 absolutetime_to_microtime(now, secs, microsecs);
358
b0d623f7 359 *secs += (clock_sec_t)clock_calend.epoch;
1c79356b 360 }
1c79356b 361
b0d623f7 362 clock_unlock();
0c530ab8 363 splx(s);
1c79356b
A
364}
365
366/*
0c530ab8
A
367 * clock_set_calendar_microtime:
368 *
369 * Sets the current calendar value by
370 * recalculating the epoch and offset
371 * from the system clock.
372 *
373 * Also adjusts the boottime to keep the
374 * value consistent, writes the new
375 * calendar value to the platform clock,
376 * and sends calendar change notifications.
1c79356b 377 */
0c530ab8
A
378void
379clock_set_calendar_microtime(
b0d623f7
A
380 clock_sec_t secs,
381 clock_usec_t microsecs)
1c79356b 382{
b0d623f7
A
383 clock_sec_t sys;
384 clock_usec_t microsys;
385 clock_sec_t newsecs;
386 spl_t s;
8ad349bb 387
b0d623f7 388 newsecs = (microsecs < 500*USEC_PER_SEC)? secs: secs + 1;
0c530ab8
A
389
390 s = splclock();
b0d623f7 391 clock_lock();
8ad349bb 392
2d21ac55 393 commpage_disable_timestamp();
8f6c56a5 394
89b3af67 395 /*
0c530ab8
A
396 * Calculate the new calendar epoch based on
397 * the new value and the system clock.
89b3af67 398 */
0c530ab8
A
399 clock_get_system_microtime(&sys, &microsys);
400 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
8f6c56a5 401
4452a7af 402 /*
0c530ab8 403 * Adjust the boottime based on the delta.
4452a7af 404 */
0c530ab8 405 clock_boottime += secs - clock_calend.epoch;
21362eb3 406
4452a7af 407 /*
0c530ab8 408 * Set the new calendar epoch.
4452a7af 409 */
0c530ab8
A
410 clock_calend.epoch = secs;
411 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
21362eb3 412
0c530ab8
A
413 /*
414 * Cancel any adjustment in progress.
415 */
b0d623f7 416 calend_adjtotal = clock_calend.adjdelta = 0;
21362eb3 417
b0d623f7 418 clock_unlock();
6601e61a 419
0c530ab8
A
420 /*
421 * Set the new value for the platform clock.
422 */
423 PESetGMTTimeOfDay(newsecs);
6601e61a 424
0c530ab8 425 splx(s);
6601e61a 426
0c530ab8
A
427 /*
428 * Send host notifications.
429 */
430 host_notify_calendar_change();
2d21ac55
A
431
432#if CONFIG_DTRACE
433 clock_track_calend_nowait();
434#endif
1c79356b
A
435}
436
437/*
0c530ab8
A
438 * clock_initialize_calendar:
439 *
440 * Set the calendar and related clocks
441 * from the platform clock at boot or
442 * wake event.
443 *
444 * Also sends host notifications.
1c79356b
A
445 */
446void
0c530ab8 447clock_initialize_calendar(void)
1c79356b 448{
b0d623f7
A
449 clock_sec_t sys, secs = PEGetGMTTimeOfDay();
450 clock_usec_t microsys, microsecs = 0;
451 spl_t s;
1c79356b 452
0c530ab8 453 s = splclock();
b0d623f7 454 clock_lock();
1c79356b 455
2d21ac55 456 commpage_disable_timestamp();
1c79356b 457
b0d623f7 458 if ((long)secs >= (long)clock_boottime) {
0c530ab8
A
459 /*
460 * Initialize the boot time based on the platform clock.
461 */
462 if (clock_boottime == 0)
463 clock_boottime = secs;
1c79356b
A
464
465 /*
0c530ab8
A
466 * Calculate the new calendar epoch based on
467 * the platform clock and the system clock.
468 */
469 clock_get_system_microtime(&sys, &microsys);
470 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
1c79356b
A
471
472 /*
0c530ab8 473 * Set the new calendar epoch.
1c79356b 474 */
0c530ab8
A
475 clock_calend.epoch = secs;
476 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
1c79356b 477
0c530ab8
A
478 /*
479 * Cancel any adjustment in progress.
1c79356b 480 */
b0d623f7 481 calend_adjtotal = clock_calend.adjdelta = 0;
1c79356b
A
482 }
483
b0d623f7 484 clock_unlock();
0c530ab8
A
485 splx(s);
486
1c79356b 487 /*
0c530ab8 488 * Send host notifications.
1c79356b 489 */
0c530ab8 490 host_notify_calendar_change();
2d21ac55
A
491
492#if CONFIG_DTRACE
493 clock_track_calend_nowait();
494#endif
1c79356b
A
495}
496
497/*
0c530ab8
A
498 * clock_get_boottime_nanotime:
499 *
500 * Return the boottime, used by sysctl.
1c79356b 501 */
0c530ab8
A
502void
503clock_get_boottime_nanotime(
b0d623f7
A
504 clock_sec_t *secs,
505 clock_nsec_t *nanosecs)
1c79356b 506{
b0d623f7
A
507 spl_t s;
508
509 s = splclock();
510 clock_lock();
511
512 *secs = (clock_sec_t)clock_boottime;
0c530ab8 513 *nanosecs = 0;
b0d623f7
A
514
515 clock_unlock();
516 splx(s);
1c79356b
A
517}
518
519/*
0c530ab8
A
520 * clock_adjtime:
521 *
522 * Interface to adjtime() syscall.
523 *
524 * Calculates adjustment variables and
525 * initiates adjustment.
6601e61a 526 */
1c79356b 527void
0c530ab8 528clock_adjtime(
b0d623f7
A
529 long *secs,
530 int *microsecs)
1c79356b 531{
0c530ab8
A
532 uint32_t interval;
533 spl_t s;
1c79356b 534
0c530ab8 535 s = splclock();
b0d623f7 536 clock_lock();
1c79356b 537
0c530ab8
A
538 interval = calend_set_adjustment(secs, microsecs);
539 if (interval != 0) {
b0d623f7
A
540 calend_adjdeadline = mach_absolute_time() + interval;
541 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline))
542 calend_adjactive++;
1c79356b 543 }
0c530ab8 544 else
b0d623f7
A
545 if (timer_call_cancel(&calend_adjcall))
546 calend_adjactive--;
0c530ab8 547
b0d623f7 548 clock_unlock();
0c530ab8 549 splx(s);
1c79356b
A
550}
551
0c530ab8
A
552static uint32_t
553calend_set_adjustment(
b0d623f7
A
554 long *secs,
555 int *microsecs)
1c79356b 556{
0c530ab8
A
557 uint64_t now, t64;
558 int64_t total, ototal;
559 uint32_t interval = 0;
1c79356b 560
0c530ab8 561 total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
1c79356b 562
2d21ac55 563 commpage_disable_timestamp();
1c79356b 564
0c530ab8 565 now = mach_absolute_time();
1c79356b 566
b0d623f7 567 ototal = calend_adjtotal;
1c79356b 568
0c530ab8
A
569 if (total != 0) {
570 int32_t delta = calend_adjskew;
1c79356b 571
0c530ab8
A
572 if (total > 0) {
573 if (total > calend_adjbig)
574 delta *= 10;
575 if (delta > total)
b0d623f7 576 delta = (int32_t)total;
c0fea474 577
0c530ab8 578 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
b0d623f7 579 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8
A
580 }
581 else {
582 if (total < -calend_adjbig)
583 delta *= 10;
584 delta = -delta;
585 if (delta < total)
b0d623f7 586 delta = (int32_t)total;
5d5c5d0d 587
2d21ac55 588 clock_calend.adjstart = now;
89b3af67 589
0c530ab8 590 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
b0d623f7 591 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8 592 }
4452a7af 593
b0d623f7 594 calend_adjtotal = total;
2d21ac55 595 clock_calend.adjdelta = delta;
0c530ab8 596
b0d623f7 597 interval = calend_adjinterval;
0c530ab8
A
598 }
599 else
b0d623f7 600 calend_adjtotal = clock_calend.adjdelta = 0;
1c79356b 601
0c530ab8 602 if (ototal != 0) {
b0d623f7
A
603 *secs = (long)(ototal / NSEC_PER_SEC);
604 *microsecs = (int)((ototal % NSEC_PER_SEC) / NSEC_PER_USEC);
0c530ab8
A
605 }
606 else
607 *secs = *microsecs = 0;
1c79356b 608
2d21ac55
A
609#if CONFIG_DTRACE
610 clock_track_calend_nowait();
611#endif
612
0c530ab8 613 return (interval);
1c79356b
A
614}
615
0c530ab8
A
616static void
617calend_adjust_call(void)
1c79356b 618{
0c530ab8
A
619 uint32_t interval;
620 spl_t s;
1c79356b 621
0c530ab8 622 s = splclock();
b0d623f7 623 clock_lock();
1c79356b 624
b0d623f7 625 if (--calend_adjactive == 0) {
0c530ab8
A
626 interval = calend_adjust();
627 if (interval != 0) {
b0d623f7 628 clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
1c79356b 629
b0d623f7
A
630 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline))
631 calend_adjactive++;
0c530ab8 632 }
1c79356b 633 }
0c530ab8 634
b0d623f7 635 clock_unlock();
0c530ab8 636 splx(s);
1c79356b
A
637}
638
0c530ab8
A
639static uint32_t
640calend_adjust(void)
1c79356b 641{
0c530ab8
A
642 uint64_t now, t64;
643 int32_t delta;
644 uint32_t interval = 0;
89b3af67 645
2d21ac55 646 commpage_disable_timestamp();
89b3af67 647
0c530ab8 648 now = mach_absolute_time();
89b3af67 649
2d21ac55 650 delta = clock_calend.adjdelta;
89b3af67 651
0c530ab8 652 if (delta > 0) {
2d21ac55 653 clock_calend.offset += clock_calend.adjoffset;
4452a7af 654
b0d623f7
A
655 calend_adjtotal -= delta;
656 if (delta > calend_adjtotal) {
657 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
4452a7af 658
0c530ab8 659 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
b0d623f7 660 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8
A
661 }
662 }
663 else
664 if (delta < 0) {
2d21ac55 665 clock_calend.offset -= clock_calend.adjoffset;
4452a7af 666
b0d623f7
A
667 calend_adjtotal -= delta;
668 if (delta < calend_adjtotal) {
669 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
4452a7af 670
0c530ab8 671 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
b0d623f7 672 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8
A
673 }
674
2d21ac55
A
675 if (clock_calend.adjdelta != 0)
676 clock_calend.adjstart = now;
0c530ab8 677 }
2d21ac55
A
678
679 if (clock_calend.adjdelta != 0)
b0d623f7 680 interval = calend_adjinterval;
0c530ab8 681
2d21ac55
A
682#if CONFIG_DTRACE
683 clock_track_calend_nowait();
684#endif
0c530ab8
A
685
686 return (interval);
687}
688
689/*
690 * clock_wakeup_calendar:
691 *
692 * Interface to power management, used
693 * to initiate the reset of the calendar
694 * on wake from sleep event.
695 */
696void
697clock_wakeup_calendar(void)
698{
699 thread_call_enter(&calend_wakecall);
1c79356b
A
700}
701
0c530ab8
A
702/*
703 * Wait / delay routines.
704 */
91447636
A
705static void
706mach_wait_until_continue(
707 __unused void *parameter,
708 wait_result_t wresult)
709{
710 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
711 /*NOTREACHED*/
712}
713
1c79356b 714kern_return_t
91447636
A
715mach_wait_until_trap(
716 struct mach_wait_until_trap_args *args)
717{
718 uint64_t deadline = args->deadline;
719 wait_result_t wresult;
720
721 wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
722 if (wresult == THREAD_WAITING)
723 wresult = thread_block(mach_wait_until_continue);
724
725 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
726}
727
91447636
A
728void
729clock_delay_until(
1c79356b
A
730 uint64_t deadline)
731{
91447636
A
732 uint64_t now = mach_absolute_time();
733
734 if (now >= deadline)
735 return;
1c79356b 736
91447636
A
737 if ( (deadline - now) < (8 * sched_cswtime) ||
738 get_preemption_level() != 0 ||
739 ml_get_interrupts_enabled() == FALSE )
740 machine_delay_until(deadline);
741 else {
742 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
743
744 thread_block(THREAD_CONTINUE_NULL);
9bccf70c 745 }
91447636 746}
1c79356b 747
91447636
A
748void
749delay_for_interval(
750 uint32_t interval,
751 uint32_t scale_factor)
752{
753 uint64_t end;
754
755 clock_interval_to_deadline(interval, scale_factor, &end);
756
757 clock_delay_until(end);
758}
759
760void
761delay(
762 int usec)
763{
764 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1c79356b 765}
9bccf70c 766
0c530ab8
A
767/*
768 * Miscellaneous routines.
769 */
55e303ae 770void
0c530ab8
A
771clock_interval_to_deadline(
772 uint32_t interval,
773 uint32_t scale_factor,
774 uint64_t *result)
9bccf70c 775{
0c530ab8 776 uint64_t abstime;
c0fea474 777
0c530ab8 778 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
6601e61a 779
0c530ab8 780 *result = mach_absolute_time() + abstime;
8f6c56a5 781}
5d5c5d0d 782
0c530ab8
A
783void
784clock_absolutetime_interval_to_deadline(
785 uint64_t abstime,
786 uint64_t *result)
8f6c56a5 787{
0c530ab8 788 *result = mach_absolute_time() + abstime;
21362eb3 789}
89b3af67 790
4452a7af 791void
0c530ab8
A
792clock_get_uptime(
793 uint64_t *result)
21362eb3 794{
0c530ab8 795 *result = mach_absolute_time();
6601e61a 796}
4452a7af 797
0c530ab8
A
798void
799clock_deadline_for_periodic_event(
800 uint64_t interval,
801 uint64_t abstime,
802 uint64_t *deadline)
6601e61a 803{
0c530ab8
A
804 assert(interval != 0);
805
806 *deadline += interval;
807
808 if (*deadline <= abstime) {
809 *deadline = abstime + interval;
810 abstime = mach_absolute_time();
55e303ae 811
0c530ab8
A
812 if (*deadline <= abstime)
813 *deadline = abstime + interval;
814 }
55e303ae 815}
2d21ac55 816
b0d623f7 817#if CONFIG_DTRACE
2d21ac55
A
818
819/*
820 * clock_get_calendar_nanotime_nowait
821 *
822 * Description: Non-blocking version of clock_get_calendar_nanotime()
823 *
824 * Notes: This function operates by separately tracking calendar time
825 * updates using a two element structure to copy the calendar
826 * state, which may be asynchronously modified. It utilizes
827 * barrier instructions in the tracking process and in the local
828 * stable snapshot process in order to ensure that a consistent
829 * snapshot is used to perform the calculation.
830 */
831void
832clock_get_calendar_nanotime_nowait(
b0d623f7
A
833 clock_sec_t *secs,
834 clock_nsec_t *nanosecs)
2d21ac55
A
835{
836 int i = 0;
837 uint64_t now;
838 struct unlocked_clock_calend stable;
839
840 for (;;) {
841 stable = flipflop[i]; /* take snapshot */
842
843 /*
844 * Use a barrier instructions to ensure atomicity. We AND
845 * off the "in progress" bit to get the current generation
846 * count.
847 */
848 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
849
850 /*
851 * If an update _is_ in progress, the generation count will be
852 * off by one, if it _was_ in progress, it will be off by two,
853 * and if we caught it at a good time, it will be equal (and
854 * our snapshot is threfore stable).
855 */
856 if (flipflop[i].gen == stable.gen)
857 break;
858
859 /* Switch to the oher element of the flipflop, and try again. */
860 i ^= 1;
861 }
862
863 now = mach_absolute_time();
864
865 if (stable.calend.adjdelta < 0) {
866 uint32_t t32;
867
868 if (now > stable.calend.adjstart) {
b0d623f7 869 t32 = (uint32_t)(now - stable.calend.adjstart);
2d21ac55
A
870
871 if (t32 > stable.calend.adjoffset)
872 now -= stable.calend.adjoffset;
873 else
874 now = stable.calend.adjstart;
875 }
876 }
877
878 now += stable.calend.offset;
879
880 absolutetime_to_microtime(now, secs, nanosecs);
881 *nanosecs *= NSEC_PER_USEC;
882
b0d623f7 883 *secs += (clock_sec_t)stable.calend.epoch;
2d21ac55
A
884}
885
886static void
887clock_track_calend_nowait(void)
888{
889 int i;
890
891 for (i = 0; i < 2; i++) {
892 struct clock_calend tmp = clock_calend;
893
894 /*
895 * Set the low bit if the generation count; since we use a
896 * barrier instruction to do this, we are guaranteed that this
897 * will flag an update in progress to an async caller trying
898 * to examine the contents.
899 */
900 (void)hw_atomic_or(&flipflop[i].gen, 1);
901
902 flipflop[i].calend = tmp;
903
904 /*
905 * Increment the generation count to clear the low bit to
906 * signal completion. If a caller compares the generation
907 * count after taking a copy while in progress, the count
908 * will be off by two.
909 */
910 (void)hw_atomic_add(&flipflop[i].gen, 1);
911 }
912}
b0d623f7
A
913
914#endif /* CONFIG_DTRACE */